選択できるのは25トピックまでです。 トピックは、先頭が英数字で、英数字とダッシュ('-')を使用した35文字以内のものにしてください。

206 行
7.7KB

  1. from __future__ import unicode_literals
  2. import datetime
  3. import re
  4. from .common import InfoExtractor
  5. from ..compat import compat_urlparse
  6. from ..utils import (
  7. ExtractorError,
  8. InAdvancePagedList,
  9. orderedSet,
  10. str_to_int,
  11. unified_strdate,
  12. )
  13. class MotherlessIE(InfoExtractor):
  14. _VALID_URL = r'https?://(?:www\.)?motherless\.com/(?:g/[a-z0-9_]+/)?(?P<id>[A-Z0-9]+)'
  15. _TESTS = [{
  16. 'url': 'http://motherless.com/AC3FFE1',
  17. 'md5': '310f62e325a9fafe64f68c0bccb6e75f',
  18. 'info_dict': {
  19. 'id': 'AC3FFE1',
  20. 'ext': 'mp4',
  21. 'title': 'Fucked in the ass while playing PS3',
  22. 'categories': ['Gaming', 'anal', 'reluctant', 'rough', 'Wife'],
  23. 'upload_date': '20100913',
  24. 'uploader_id': 'famouslyfuckedup',
  25. 'thumbnail': r're:http://.*\.jpg',
  26. 'age_limit': 18,
  27. }
  28. }, {
  29. 'url': 'http://motherless.com/532291B',
  30. 'md5': 'bc59a6b47d1f958e61fbd38a4d31b131',
  31. 'info_dict': {
  32. 'id': '532291B',
  33. 'ext': 'mp4',
  34. 'title': 'Amazing girl playing the omegle game, PERFECT!',
  35. 'categories': ['Amateur', 'webcam', 'omegle', 'pink', 'young', 'masturbate', 'teen',
  36. 'game', 'hairy'],
  37. 'upload_date': '20140622',
  38. 'uploader_id': 'Sulivana7x',
  39. 'thumbnail': r're:http://.*\.jpg',
  40. 'age_limit': 18,
  41. },
  42. 'skip': '404',
  43. }, {
  44. 'url': 'http://motherless.com/g/cosplay/633979F',
  45. 'md5': '0b2a43f447a49c3e649c93ad1fafa4a0',
  46. 'info_dict': {
  47. 'id': '633979F',
  48. 'ext': 'mp4',
  49. 'title': 'Turtlette',
  50. 'categories': ['superheroine heroine superher'],
  51. 'upload_date': '20140827',
  52. 'uploader_id': 'shade0230',
  53. 'thumbnail': r're:http://.*\.jpg',
  54. 'age_limit': 18,
  55. }
  56. }, {
  57. # no keywords
  58. 'url': 'http://motherless.com/8B4BBC1',
  59. 'only_matching': True,
  60. }]
  61. def _real_extract(self, url):
  62. video_id = self._match_id(url)
  63. webpage = self._download_webpage(url, video_id)
  64. if any(p in webpage for p in (
  65. '<title>404 - MOTHERLESS.COM<',
  66. ">The page you're looking for cannot be found.<")):
  67. raise ExtractorError('Video %s does not exist' % video_id, expected=True)
  68. if '>The content you are trying to view is for friends only.' in webpage:
  69. raise ExtractorError('Video %s is for friends only' % video_id, expected=True)
  70. title = self._html_search_regex(
  71. r'id="view-upload-title">\s+([^<]+)<', webpage, 'title')
  72. video_url = (self._html_search_regex(
  73. (r'setup\(\{\s*["\']file["\']\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1',
  74. r'fileurl\s*=\s*(["\'])(?P<url>(?:(?!\1).)+)\1'),
  75. webpage, 'video URL', default=None, group='url')
  76. or 'http://cdn4.videos.motherlessmedia.com/videos/%s.mp4?fs=opencloud' % video_id)
  77. age_limit = self._rta_search(webpage)
  78. view_count = str_to_int(self._html_search_regex(
  79. r'<strong>Views</strong>\s+([^<]+)<',
  80. webpage, 'view count', fatal=False))
  81. like_count = str_to_int(self._html_search_regex(
  82. r'<strong>Favorited</strong>\s+([^<]+)<',
  83. webpage, 'like count', fatal=False))
  84. upload_date = self._html_search_regex(
  85. r'<strong>Uploaded</strong>\s+([^<]+)<', webpage, 'upload date')
  86. if 'Ago' in upload_date:
  87. days = int(re.search(r'([0-9]+)', upload_date).group(1))
  88. upload_date = (datetime.datetime.now() - datetime.timedelta(days=days)).strftime('%Y%m%d')
  89. else:
  90. upload_date = unified_strdate(upload_date)
  91. comment_count = webpage.count('class="media-comment-contents"')
  92. uploader_id = self._html_search_regex(
  93. r'"thumb-member-username">\s+<a href="/m/([^"]+)"',
  94. webpage, 'uploader_id')
  95. categories = self._html_search_meta('keywords', webpage, default=None)
  96. if categories:
  97. categories = [cat.strip() for cat in categories.split(',')]
  98. return {
  99. 'id': video_id,
  100. 'title': title,
  101. 'upload_date': upload_date,
  102. 'uploader_id': uploader_id,
  103. 'thumbnail': self._og_search_thumbnail(webpage),
  104. 'categories': categories,
  105. 'view_count': view_count,
  106. 'like_count': like_count,
  107. 'comment_count': comment_count,
  108. 'age_limit': age_limit,
  109. 'url': video_url,
  110. }
  111. class MotherlessGroupIE(InfoExtractor):
  112. _VALID_URL = r'https?://(?:www\.)?motherless\.com/gv?/(?P<id>[a-z0-9_]+)'
  113. _TESTS = [{
  114. 'url': 'http://motherless.com/g/movie_scenes',
  115. 'info_dict': {
  116. 'id': 'movie_scenes',
  117. 'title': 'Movie Scenes',
  118. 'description': 'Hot and sexy scenes from "regular" movies... '
  119. 'Beautiful actresses fully nude... A looot of '
  120. 'skin! :)Enjoy!',
  121. },
  122. 'playlist_mincount': 662,
  123. }, {
  124. 'url': 'http://motherless.com/gv/sex_must_be_funny',
  125. 'info_dict': {
  126. 'id': 'sex_must_be_funny',
  127. 'title': 'Sex must be funny',
  128. 'description': 'Sex can be funny. Wide smiles,laugh, games, fun of '
  129. 'any kind!'
  130. },
  131. 'playlist_mincount': 9,
  132. }]
  133. @classmethod
  134. def suitable(cls, url):
  135. return (False if MotherlessIE.suitable(url)
  136. else super(MotherlessGroupIE, cls).suitable(url))
  137. def _extract_entries(self, webpage, base):
  138. entries = []
  139. for mobj in re.finditer(
  140. r'href="(?P<href>/[^"]+)"[^>]*>(?:\s*<img[^>]+alt="[^-]+-\s(?P<title>[^"]+)")?',
  141. webpage):
  142. video_url = compat_urlparse.urljoin(base, mobj.group('href'))
  143. if not MotherlessIE.suitable(video_url):
  144. continue
  145. video_id = MotherlessIE._match_id(video_url)
  146. title = mobj.group('title')
  147. entries.append(self.url_result(
  148. video_url, ie=MotherlessIE.ie_key(), video_id=video_id,
  149. video_title=title))
  150. # Alternative fallback
  151. if not entries:
  152. entries = [
  153. self.url_result(
  154. compat_urlparse.urljoin(base, '/' + entry_id),
  155. ie=MotherlessIE.ie_key(), video_id=entry_id)
  156. for entry_id in orderedSet(re.findall(
  157. r'data-codename=["\']([A-Z0-9]+)', webpage))]
  158. return entries
  159. def _real_extract(self, url):
  160. group_id = self._match_id(url)
  161. page_url = compat_urlparse.urljoin(url, '/gv/%s' % group_id)
  162. webpage = self._download_webpage(page_url, group_id)
  163. title = self._search_regex(
  164. r'<title>([\w\s]+\w)\s+-', webpage, 'title', fatal=False)
  165. description = self._html_search_meta(
  166. 'description', webpage, fatal=False)
  167. page_count = self._int(self._search_regex(
  168. r'(\d+)</(?:a|span)><(?:a|span)[^>]+>\s*NEXT',
  169. webpage, 'page_count'), 'page_count')
  170. PAGE_SIZE = 80
  171. def _get_page(idx):
  172. webpage = self._download_webpage(
  173. page_url, group_id, query={'page': idx + 1},
  174. note='Downloading page %d/%d' % (idx + 1, page_count)
  175. )
  176. for entry in self._extract_entries(webpage, url):
  177. yield entry
  178. playlist = InAdvancePagedList(_get_page, page_count, PAGE_SIZE)
  179. return {
  180. '_type': 'playlist',
  181. 'id': group_id,
  182. 'title': title,
  183. 'description': description,
  184. 'entries': playlist
  185. }