aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorsprhawk <465558+sprhawk@users.noreply.github.com>2017-12-11 12:25:13 +0800
committersprhawk <465558+sprhawk@users.noreply.github.com>2017-12-11 12:26:19 +0800
commit29ac31afaf627363fbc1f757aa50078d343acf1f (patch)
tree4e74a262d2d8b922b8e7415f4a6c5947eda7e7d7
parentd4f05d473134d7bd61b054468e6ba297cef3c88f (diff)
downloadyoutube-dl-29ac31afaf627363fbc1f757aa50078d343acf1f.zip
youtube-dl-29ac31afaf627363fbc1f757aa50078d343acf1f.tar.gz
youtube-dl-29ac31afaf627363fbc1f757aa50078d343acf1f.tar.bz2
simply get the correct webpage, but not parsed to extract information
-rw-r--r--youtube_dl/extractor/extractors.py1
-rw-r--r--youtube_dl/extractor/weibo.py97
2 files changed, 98 insertions, 0 deletions
diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 2cc3bc4..12dc2e7 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -1286,6 +1286,7 @@ from .webofstories import (
WebOfStoriesIE,
WebOfStoriesPlaylistIE,
)
+from .weibo import WeiboIE
from .weiqitv import WeiqiTVIE
from .wimp import WimpIE
from .wistia import WistiaIE
diff --git a/youtube_dl/extractor/weibo.py b/youtube_dl/extractor/weibo.py
new file mode 100644
index 0000000..195508e
--- /dev/null
+++ b/youtube_dl/extractor/weibo.py
@@ -0,0 +1,97 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+
+from urllib.request import Request
+from urllib.parse import urlencode
+import json
+import random as rnd
+
+class WeiboIE(InfoExtractor):
+ _VALID_URL = r'https?://weibo\.com/[0-9]+/(?P<id>[a-zA-Z0-9]+)'
+ _TEST = {
+ 'url': 'https://weibo.com/6275294458/Fp6RGfbff?from=page_1005056275294458_profile&wvr=6&mod=weibotime&type=comment',
+ 'md5': 'TODO: md5 sum of the first 10241 bytes of the video file (use --test)',
+ 'info_dict': {
+ 'id': '42',
+ 'ext': 'mp4',
+ 'title': 'Video title goes here',
+ 'thumbnail': r're:^https?://.*\.jpg$',
+ # TODO more properties, either as:
+ # * A value
+ # * MD5 checksum; start the string with md5:
+ # * A regular expression; start the string with re:
+ # * Any Python type (for example int or float)
+ }
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ headers = {
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
+ 'Accept-Encoding': 'gzip, deflate, br',
+ 'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8',
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',
+ 'Upgrade-Insecure-Requests': '1',
+ }
+ # to get Referer url for genvisitor
+ webpage,urlh = self._download_webpage_handle(url, video_id, headers=headers, note="first visit the page")
+
+ visitor_url = urlh.geturl()
+
+ data = urlencode({
+ "cb": "gen_callback",
+ "fp": '{"os":"2","browser":"Gecko57,0,0,0","fonts":"undefined","screenInfo":"1440*900*24","plugins":""}',
+ }).encode()
+ headers = {
+ 'Accept-Encoding': 'gzip, deflate, br',
+ 'Accept': '*/*',
+ 'Referer': visitor_url,
+ }
+
+ r_genvisitor = Request(
+ 'https://passport.weibo.com/visitor/genvisitor',
+ data = data,
+ headers = headers,
+ method = 'POST'
+ )
+ webpage,urlh = self._download_webpage_handle(r_genvisitor, video_id, note="gen visitor")
+ print("webpage", webpage)
+
+ p = webpage.split("&&")[1] # split "gen_callback && gen_callback(...)"
+ i1 = p.find('{')
+ i2 = p.rfind('}')
+ j = p[i1:i2+1] # get JSON object
+ d = json.loads(j)
+ tid = d["data"]["tid"]
+ cnfd = "%03d" % d["data"]["confidence"]
+
+ param = urlencode({
+ 'a': 'incarnate',
+ 't': tid,
+ 'w': 2,
+ 'c': cnfd,
+ 'cb': 'cross_domain',
+ 'from': 'weibo',
+ '_rand': rnd.random()
+ })
+ gencallback_url = "https://passport.weibo.com/visitor/visitor?" + param
+ webpage,urlh = self._download_webpage_handle(gencallback_url, video_id, note="gen callback")
+ print("webpage", webpage)
+
+ webpage,urlh = self._download_webpage_handle(url, video_id, headers=headers, note="retry to visit the page")
+ print("webpage", webpage)
+
+ # TODO more code goes here, for example ...
+ title = self._html_search_regex(r'<title>(.+?)</title>', webpage, 'title')
+
+ video_sources = self._search_regex(r'video-sources=(.+?)', webpage, 'video_sources')
+ print("video_sources:", video_sources)
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': self._og_search_description(webpage),
+ 'uploader': self._search_regex(r'<div[^>]+id="uploader"[^>]*>([^<]+)<', webpage, 'uploader', fatal=False),
+ # TODO more properties (see youtube_dl/extractor/common.py)
+ }