Skip to content

Commit

Permalink
fix: 新版m-team下载
Browse files Browse the repository at this point in the history
  • Loading branch information
linyuan0213 committed Mar 30, 2024
1 parent 4ac3eca commit 601d6e1
Show file tree
Hide file tree
Showing 7 changed files with 135 additions and 25 deletions.
5 changes: 1 addition & 4 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,12 +1,9 @@
# NAS媒体库管理工具

### 项目
Forked from [NAStool/nas-tools](https://github.com/NAStool/nas-tools)
项目详情以及使用方式请移步原项目。

### 新增
- 支持 Jackett 和 Prowlarr 索引器
- 支持 Aria2 下载器
- 支持新版馒头刷流和下载,需要添加cookie


### Docker 镜像地址:
Expand Down
2 changes: 1 addition & 1 deletion app/helper/site_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def is_logged_in(cls, html_text):
"""
if JsonUtils.is_valid_json(html_text):
json_data = json.loads(html_text)
if 'message' in json_data and json_data['message'] == 'SUCCESS':
if json_data.get('message') == 'SUCCESS':
return True
else:
return False
Expand Down
98 changes: 98 additions & 0 deletions app/indexer/client/_mteam.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
import re
import json

import log
from app.utils import RequestUtils, StringUtils
from config import Config


class MteamSpider(object):
_indexerid = None
_domain = None
_name = ""
_proxy = None
_cookie = None
_ua = None
_size = 100
_searchurl = "%sapi/torrent/search"
_downloadurl = "%sapi/torrent/genDlToken"
_pageurl = "%sdetail/%s"

def __init__(self, indexer):
if indexer:
self._indexerid = indexer.id
self._domain = indexer.domain
self._searchurl = self._searchurl % self._domain
self._downloadurl = self._downloadurl % self._domain
self._name = indexer.name
if indexer.proxy:
self._proxy = Config().get_proxies()
self._cookie = indexer.cookie
self._ua = indexer.ua
self.init_config()

def init_config(self):
self._size = Config().get_config('pt').get('site_search_result_num') or 100

def __get_torrent_url(self, mid):
if not self._domain:
return

res = RequestUtils(headers=self._ua,
cookies=self._cookie,
proxies=self._proxy,
timeout=15).post_res(url=self._downloadurl, data={'id': mid})
if res and res.status_code == 200:
return res.json().get('data', '')
else:
return

def search(self, keyword="", page=0):
params = {
"mode": "normal",
"categories": [],
"visible": 1,
"keyword": keyword,
"pageNumber": int(page) + 1,
"pageSize": self._size
}

params = json.dumps(params, separators=(',', ':'))
res = RequestUtils(
headers={
"Content-Type": "application/json; charset=utf-8",
"User-Agent": f"{self._ua}"
},
cookies=self._cookie,
proxies=self._proxy,
timeout=30
).post_res(url=self._searchurl, data=params)
torrents = []
if res and res.status_code == 200:
results = res.json().get('data', {}).get("data") or []
for result in results:
imdbid = (re.findall(r'tt\d+', result.get('imdb')) or [''])[0]
enclosure = self.__get_torrent_url(result.get('id'))
torrent = {
'indexer': self._indexerid,
'title': result.get('name'),
'description': result.get('smallDescr'),
'enclosure': enclosure,
'pubdate': result.get('createdDate'),
'size': result.get('size'),
'seeders': result.get('status').get('seeders'),
'peers': result.get('status').get('leechers'),
'grabs': result.get('status').get('timesCompleted'),
'downloadvolumefactor': 0.0,
'uploadvolumefactor': 1.0,
'page_url': self._pageurl % (self._domain, result.get('id')),
'imdbid': imdbid
}
torrents.append(torrent)
elif res is not None:
log.warn(f"【INDEXER】{self._name} 搜索失败,错误码:{res.status_code}")
return True, []
else:
log.warn(f"【INDEXER】{self._name} 搜索失败,无法连接 {self._domain}")
return True, []
return False, torrents
1 change: 1 addition & 0 deletions app/indexer/client/_rarbg.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import requests
import re

import log
from app.utils import RequestUtils
Expand Down
6 changes: 6 additions & 0 deletions app/indexer/client/builtin.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from app.indexer.client._spider import TorrentSpider
from app.indexer.client._tnode import TNodeSpider
from app.indexer.client._torrentleech import TorrentLeech
from app.indexer.client._mteam import MteamSpider
from app.sites import Sites
from app.utils import StringUtils
from app.utils.types import SearchType, IndexerType, ProgressKey, SystemConfigKey
Expand Down Expand Up @@ -160,6 +161,8 @@ def search(self, order_seq,
mtype=match_media.type if match_media and match_media.tmdb_info else None)
elif indexer.parser == "TorrentLeech":
error_flag, result_array = TorrentLeech(indexer).search(keyword=search_word)
elif indexer.parser == "MteamSpider":
error_flag, result_array = MteamSpider(indexer).search(keyword=search_word)
else:
error_flag, result_array = self.__spider_search(
keyword=search_word,
Expand Down Expand Up @@ -219,6 +222,9 @@ def list(self, index_id, page=0, keyword=None):
elif indexer.parser == "TorrentLeech":
error_flag, result_array = TorrentLeech(indexer).search(keyword=keyword,
page=page)
elif indexer.parser == "MteamSpider":
error_flag, result_array = MteamSpider(indexer).search(keyword=keyword,
page=page)
else:
error_flag, result_array = self.__spider_search(indexer=indexer,
page=page,
Expand Down
48 changes: 28 additions & 20 deletions app/sites/siteuserinfo/mteam.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from urllib.parse import urljoin

from app.sites.siteuserinfo._base import _ISiteUserInfo, SITE_BASE_ORDER
from app.utils import RequestUtils
from app.utils import RequestUtils, JsonUtils
from app.utils.types import SiteSchema
from config import Config

Expand Down Expand Up @@ -36,7 +36,6 @@ def parse(self):
self._parse_seeding_pages()
self.seeding_info = json.dumps(self.seeding_info)


def _parse_favicon(self, html_text):
"""
解析站点favicon,返回base64 fav图标
Expand All @@ -51,14 +50,17 @@ def _parse_favicon(self, html_text):
self.site_favicon = base64.b64encode(res.content).decode()

def _parse_user_base_info(self, html_text):
if not JsonUtils.is_valid_json(html_text):
return

json_data = json.loads(html_text)

user_profile = self._get_page_content(self._base_url + '/api/member/profile', params={})
user_profile = json.loads(user_profile)
if user_profile['message'] == 'SUCCESS' and json_data['data'] is not None:
userid = user_profile['data']['id']
self.username = json_data['data']['username']
self.bonus = json_data['data']['memberCount']['bonus']
if user_profile.get('message') == 'SUCCESS' and json_data.get('data') is not None:
userid = user_profile.get('data').get('id') or ''
self.username = json_data.get('data').get('username') or ''
self.bonus = json_data.get('data').get('memberCount').get('bonus') or ''
self._torrent_seeding_page = '/api/member/getUserTorrentList'
self._torrent_seeding_params = {"userid": userid, "type": "SEEDING", "pageNumber": 1, "pageSize": 25}

Expand All @@ -82,37 +84,43 @@ def _parse_user_detail_info(self, html_text):
'8': '總督/Ultimate User',
'9': '大臣/mTorrent Master'
}
if not JsonUtils.is_valid_json(html_text):
return
json_data = json.loads(html_text)
if json_data['data'] is not None:
if json_data.get('data') is not None:
# 用户等级
role = json_data['data']['role']
role = json_data.get('data').get('role') or ''
self.user_level = role_dict.get(role, '其他')

# 加入日期
self.join_at = json_data['data']['createdDate']
self.join_at = json_data.get('data').get('createdDate')

def _parse_user_traffic_info(self, html_text):
json_data = json.loads(html_text)
if json_data['data'] is not None:
self.upload = int(json_data['data']['memberCount']['uploaded'])
if json_data.get('data') is not None:
self.upload = int(json_data.get('data').get('memberCount').get('uploaded'))

self.download = int(json_data['data']['memberCount']['downloaded'])
self.download = int(json_data.get('data').get('memberCount').get('downloaded'))

self.ratio = json_data['data']['memberCount']['shareRate']
self.ratio = json_data.get('data').get('memberCount').get('shareRate')

def _parse_user_torrent_seeding_info(self, html_text, multi_page=False):

if not JsonUtils.is_valid_json(html_text):
return None

json_data = json.loads(html_text)

page_seeding = 0
page_seeding_size = 0
page_seeding_info = []
next_page = None

if json_data['data'] is not None:
page_seeding = len(json_data['data']['data'])
for data in json_data['data']['data']:
size = int(data['torrent']['size'])
seeders = data['torrent']['status']['seeders']
if json_data.get('data') is not None:
page_seeding = len(json_data.get('data').get('data'))
for data in json_data.get('data').get('data'):
size = int(data.get('torrent').get('size'))
seeders = data.get('torrent').get('status').get('seeders')

page_seeding_size += size
page_seeding_info.append([seeders, size])
Expand All @@ -121,8 +129,8 @@ def _parse_user_torrent_seeding_info(self, html_text, multi_page=False):
self.seeding_size += page_seeding_size
self.seeding_info.extend(page_seeding_info)

page_num = int(json_data['data']['pageNumber'])
total_pages = int(json_data['data']['totalPages'])
page_num = int(json_data.get('data').get('pageNumber'))
total_pages = int(json_data.get('data').get('totalPages'))

next_page = page_num + 1
self._torrent_seeding_params['pageNumber'] = next_page
Expand Down
Binary file modified config/sites.dat
Binary file not shown.

0 comments on commit 601d6e1

Please sign in to comment.