Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 18 additions & 9 deletions khinsider.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,16 @@ def installRequiredModules(needed=None, verbose=True):

BASE_URL = 'https://downloads.khinsider.com/'

# A user-agent needs to be specified or else you'll get a 403
# Encoding needs to be identity or else you need to do additional decoding
# of the request response
# Make it as minimal as possible - if other issues appear in the future, we'll add more
headers = {"User-Agent": "Mozilla/5.0",
"Accept": "text/html,application/xhtml+xml",
"Accept-Encoding": "identity",
"Accept-Language": "en-US,en;q=0.9",
"Sec-Fetch-Site":"same-site"}

# Although some of these are valid on Linux, keeping this the same
# across systems is nice for consistency AND it works on WSL.
FILENAME_INVALID_RE = re.compile(r'[<>:"/\\|?*]')
Expand Down Expand Up @@ -192,7 +202,7 @@ def getAppropriateFile(song, formatOrder):
return song.files[0]


def friendlyDownloadFile(file, path, index, total, verbose=False):
def friendlyDownloadFile(file, path, index, total, verbose=True):
numberStr = "{}/{}".format(
str(index).zfill(len(str(total))),
str(total)
Expand Down Expand Up @@ -289,7 +299,7 @@ def _isLoaded(self, property):

@lazyProperty
def _contentSoup(self):
soup = getSoup(self.url)
soup = getSoup(self.url, headers=headers)
contentSoup = soup.find(id='pageContent')
if contentSoup.find('p').string == "No such album":
# The pageContent and p exist even if the soundtrack doesn't, so no
Expand Down Expand Up @@ -328,10 +338,9 @@ def images(self):
anchors = [a for a in table('a') if a.find('img')]
urls = [a['href'] for a in anchors]
images = [File(urljoin(self.url, url)) for url in urls]
print(images)
return images

def download(self, path='', makeDirs=True, formatOrder=None, verbose=False):
def download(self, path='', makeDirs=True, formatOrder=None, verbose=True):
"""Download the soundtrack to the directory specified by `path`!

Create any directories that are missing if `makeDirs` is set to True.
Expand Down Expand Up @@ -392,10 +401,10 @@ def __repr__(self):

@lazyProperty
def _soup(self):
r = requests.get(self.url, timeout=10)
r = requests.get(self.url, timeout=10, headers=headers)
if r.url.rsplit('/', 1)[-1] == '404':
raise NonexistentSongError("Nonexistent song page (404).")
return getSoup(self.url)
return getSoup(self.url, headers=headers)

@lazyProperty
def name(self):
Expand Down Expand Up @@ -440,12 +449,12 @@ def __repr__(self):

def download(self, path):
"""Download the file to `path`."""
response = requests.get(self.url, timeout=10)
response = requests.get(self.url, timeout=10, headers=headers)
with open(path, 'wb') as outFile:
outFile.write(response.content)


def download(soundtrackId, path='', makeDirs=True, formatOrder=None, verbose=False):
def download(soundtrackId, path='', makeDirs=True, formatOrder=None, verbose=True):
"""Download the soundtrack with the ID `soundtrackId`.
See Soundtrack.download for more information.
"""
Expand All @@ -466,7 +475,7 @@ def search(term):
`term`. The first tuple contains album name results, and the second song
name results.
"""
r = requests.get(urljoin(BASE_URL, 'search'), params={'search': term})
r = requests.get(urljoin(BASE_URL, 'search'), params={'search': term}, headers=headers)
path = urlsplit(r.url).path
if path.split('/', 2)[1] == 'game-soundtracks':
return [Soundtrack(path.rsplit('/', 1)[-1])]
Expand Down
Loading