Skip to content

Commit

Permalink
remove imported but unused module
Browse files Browse the repository at this point in the history
  • Loading branch information
ljhcage authored Oct 11, 2024
1 parent c4042d0 commit 094d5a0
Show file tree
Hide file tree
Showing 17 changed files with 4 additions and 26 deletions.
4 changes: 2 additions & 2 deletions src/you_get/extractors/acfun.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def prepare(self, **kwargs):
currentVideoInfo = json_data.get('currentVideoInfo')

else:
raise NotImplemented
raise NotImplementedError()

if 'ksPlayJson' in currentVideoInfo:
durationMillis = currentVideoInfo['durationMillis']
Expand Down Expand Up @@ -193,7 +193,7 @@ def getM3u8UrlFromCurrentVideoInfo(currentVideoInfo):
m3u8_url = getM3u8UrlFromCurrentVideoInfo(currentVideoInfo)

else:
raise NotImplemented
raise NotImplementedError()

assert title and m3u8_url
title = unescape_html(title)
Expand Down
4 changes: 2 additions & 2 deletions src/you_get/extractors/baidu.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,15 +185,15 @@ def baidu_pan_download(url):
isprotected = False
sign, timestamp, bdstoken, appid, primary_id, fs_id, uk = baidu_pan_parse(
html)
if sign == None:
if sign is None:
if re.findall(r'\baccess-code\b', html):
isprotected = True
sign, timestamp, bdstoken, appid, primary_id, fs_id, uk, fake_headers, psk = baidu_pan_protected_share(
url)
# raise NotImplementedError("Password required!")
if isprotected != True:
raise AssertionError("Share not found or canceled: %s" % url)
if bdstoken == None:
if bdstoken is None:
bdstoken = ""
if isprotected != True:
sign, timestamp, bdstoken, appid, primary_id, fs_id, uk = baidu_pan_parse(
Expand Down
1 change: 0 additions & 1 deletion src/you_get/extractors/facebook.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
__all__ = ['facebook_download']

from ..common import *
import json

def facebook_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
url = re.sub(r'//.*?facebook.com','//facebook.com',url)
Expand Down
1 change: 0 additions & 1 deletion src/you_get/extractors/fc2video.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
from ..common import *
from hashlib import md5
from urllib.parse import urlparse
import re

#----------------------------------------------------------------------
def makeMimi(upid):
Expand Down
1 change: 0 additions & 1 deletion src/you_get/extractors/giphy.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
__all__ = ['giphy_download']

from ..common import *
import json

def giphy_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
html = get_html(url)
Expand Down
1 change: 0 additions & 1 deletion src/you_get/extractors/iqiyi.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
from random import random,randint
import json
from math import floor
from zlib import decompress
import hashlib
import time

Expand Down
5 changes: 0 additions & 5 deletions src/you_get/extractors/ixigua.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,7 @@
#!/usr/bin/env python
import base64

import binascii

from ..common import *
import random
import string
import ctypes
from json import loads
from urllib import request

Expand Down
2 changes: 0 additions & 2 deletions src/you_get/extractors/kuaishou.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,8 @@

import urllib.request
import urllib.parse
import json
import re

from ..util import log
from ..common import get_content, download_urls, print_info, playlist_not_supported, url_size

__all__ = ['kuaishou_download_by_url']
Expand Down
1 change: 0 additions & 1 deletion src/you_get/extractors/kugou.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
from json import loads
from base64 import b64decode
import re
import hashlib


def kugou_download(url, output_dir=".", merge=True, info_only=False, **kwargs):
Expand Down
1 change: 0 additions & 1 deletion src/you_get/extractors/pixnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
__all__ = ['pixnet_download']

from ..common import *
import urllib.error
from time import time
from urllib.parse import quote
from json import loads
Expand Down
3 changes: 0 additions & 3 deletions src/you_get/extractors/sohu.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,6 @@
from ..common import *

import json
import time
from random import random
from urllib.parse import urlparse

'''
Changelog:
Expand Down
1 change: 0 additions & 1 deletion src/you_get/extractors/soundcloud.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
from ..common import *
import re
import json
import urllib.error


def get_sndcd_apikey():
Expand Down
1 change: 0 additions & 1 deletion src/you_get/extractors/suntv.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ def suntv_download(url, output_dir = '.', merge = True, info_only = False, **kwa
html = html.decode('gbk')
title = match1(html, '<title>([^<]+)').strip() #get rid of \r\n s

type_ = ''
size = 0
type, ext, size = url_info(video_url)

Expand Down
1 change: 0 additions & 1 deletion src/you_get/extractors/ucas.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
__all__ = ['ucas_download', 'ucas_download_single', 'ucas_download_playlist']

from ..common import *
import urllib.error
import http.client
from time import time
from random import random
Expand Down
1 change: 0 additions & 1 deletion src/you_get/extractors/veoh.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
__all__ = ['veoh_download']

from ..common import *
import urllib.error

def veoh_download(url, output_dir = '.', merge = False, info_only = False, **kwargs):
'''Get item_id'''
Expand Down
1 change: 0 additions & 1 deletion src/you_get/extractors/yizhibo.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

from ..common import *
import json
import time

def yizhibo_download(url, output_dir = '.', merge = True, info_only = False, **kwargs):
video_id = url[url.rfind('/')+1:].split(".")[0]
Expand Down
1 change: 0 additions & 1 deletion src/you_get/extractors/youku.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
from ..extractor import VideoExtractor

import time
import traceback
import json
import urllib.request
import urllib.parse
Expand Down

0 comments on commit 094d5a0

Please sign in to comment.