Skip to content

Commit 8c97323

Browse files
committed
🕊
🕊
1 parent e770ed1 commit 8c97323

File tree

9 files changed

+548
-28
lines changed

9 files changed

+548
-28
lines changed

‎KekikStream/Core/Plugin/PluginBase.py‎

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,14 @@ async def extract(self, url: str, referer: str = None, prefix: str | None = None
122122
try:
123123
data = await extractor.extract(url, referer=referer)
124124

125-
# prefix varsa name'e ekle
125+
# Liste ise her bir öğe için prefix ekle
126+
if isinstance(data, list):
127+
for item in data:
128+
if prefix and item.name:
129+
item.name = f"{prefix} | {item.name}"
130+
return data
131+
132+
# Tekil öğe ise
126133
if prefix and data.name:
127134
data.name = f"{prefix} | {data.name}"
128135

‎KekikStream/Extractors/ContentX.py‎

Lines changed: 27 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ async def extract(self, url, referer=None) -> list[ExtractResult]:
3030
istek.raise_for_status()
3131
i_source = istek.text
3232

33-
i_extract_value = HTMLHelper(i_source).regex_first(r"window\.openPlayer\('([^']+)'\)")
33+
i_extract_value = HTMLHelper(i_source).regex_first(r"window\.openPlayer\('([^']+)'")
3434
if not i_extract_value:
3535
raise ValueError("i_extract is null")
3636

@@ -47,8 +47,12 @@ async def extract(self, url, referer=None) -> list[ExtractResult]:
4747
name = sub_lang.replace("\\u0131", "ı")
4848
.replace("\\u0130", "İ")
4949
.replace("\\u00fc", "ü")
50-
.replace("\\u00e7", "ç"),
51-
url = self.fix_url(sub_url.replace("\\", ""))
50+
.replace("\\u00e7", "ç")
51+
.replace("\\u011f", "ÄŸ")
52+
.replace("\\u015f", "ÅŸ")
53+
.replace("\\u011e", "Äž")
54+
.replace("\\u015e", "Åž"),
55+
url = self.fix_url(sub_url.replace("\\/", "/").replace("\\", ""))
5256
)
5357
)
5458

@@ -61,7 +65,7 @@ async def extract(self, url, referer=None) -> list[ExtractResult]:
6165
if not m3u_link:
6266
raise ValueError("vidExtract is null")
6367

64-
m3u_link = m3u_link.replace("\\", "")
68+
m3u_link = m3u_link.replace("\\", "").replace("/m.php", "/master.m3u8")
6569
results = [
6670
ExtractResult(
6771
name = self.name,
@@ -71,24 +75,25 @@ async def extract(self, url, referer=None) -> list[ExtractResult]:
7175
)
7276
]
7377

74-
dublaj_value = HTMLHelper(i_source).regex_first(r',\"([^\"]+)\",\"Türkçe\"')
78+
dublaj_value = HTMLHelper(i_source).regex_first(r'["\']([^"\']+)["\'],["\']Türkçe["\']')
7579
if dublaj_value:
76-
dublaj_source_request = await self.httpx.get(f"{base_url}/source2.php?v={dublaj_value}", headers={"Referer": referer or base_url})
77-
dublaj_source_request.raise_for_status()
78-
79-
dublaj_source = dublaj_source_request.text
80-
dublaj_link = HTMLHelper(dublaj_source).regex_first(r'file":"([^\"]+)"')
81-
if not dublaj_link:
82-
raise ValueError("dublajExtract is null")
83-
84-
dublaj_link = dublaj_link.replace("\\", "")
85-
results.append(
86-
ExtractResult(
87-
name = f"{self.name} Türkçe Dublaj",
88-
url = dublaj_link,
89-
referer = url,
90-
subtitles = []
91-
)
92-
)
80+
try:
81+
dublaj_source_request = await self.httpx.get(f"{base_url}/source2.php?v={dublaj_value}", headers={"Referer": referer or base_url})
82+
dublaj_source_request.raise_for_status()
83+
84+
dublaj_source = dublaj_source_request.text
85+
dublaj_link = HTMLHelper(dublaj_source).regex_first(r'file":"([^\"]+)"')
86+
if dublaj_link:
87+
dublaj_link = dublaj_link.replace("\\", "")
88+
results.append(
89+
ExtractResult(
90+
name = f"{self.name} Türkçe Dublaj",
91+
url = dublaj_link,
92+
referer = url,
93+
subtitles = []
94+
)
95+
)
96+
except Exception:
97+
pass
9398

9499
return results[0] if len(results) == 1 else results

‎KekikStream/Extractors/MolyStream.py‎

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,16 @@ class MolyStream(ExtractorBase):
77
name = "MolyStream"
88
main_url = "https://dbx.molystream.org"
99

10+
# Birden fazla domain destekle
11+
supported_domains = [
12+
"ydx.molystream.org",
13+
"yd.sheila.stream",
14+
"ydf.popcornvakti.net",
15+
]
16+
17+
def can_handle_url(self, url: str) -> bool:
18+
return any(domain in url for domain in self.supported_domains)
19+
1020
async def extract(self, url, referer=None) -> ExtractResult:
1121
if "doctype html" in url:
1222
secici = HTMLHelper(url)
Lines changed: 217 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,217 @@
1+
# Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2+
3+
from KekikStream.Core import PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode, ExtractResult, HTMLHelper
4+
import urllib.parse
5+
6+
class DiziWatch(PluginBase):
7+
name = "DiziWatch"
8+
language = "tr"
9+
main_url = "https://diziwatch.to"
10+
favicon = f"https://www.google.com/s2/favicons?domain={main_url}&sz=64"
11+
description = "Diziwatch; en güncel yabancı dizileri ve animeleri, Türkçe altyazılı ve dublaj seçenekleriyle izleyebileceğiniz platform."
12+
13+
main_page = {
14+
f"{main_url}/episodes" : "Yeni Bölümler",
15+
"9" : "Aksiyon",
16+
"17" : "Animasyon",
17+
"5" : "Bilim Kurgu",
18+
"2" : "Dram",
19+
"12" : "Fantastik",
20+
"3" : "Gizem",
21+
"4" : "Komedi",
22+
"8" : "Korku",
23+
"24" : "Macera",
24+
"14" : "Müzik",
25+
"7" : "Romantik",
26+
"23" : "Spor",
27+
"1" : "Suç",
28+
}
29+
30+
def __init__(self):
31+
super().__init__()
32+
self.c_key = None
33+
self.c_value = None
34+
35+
async def _init_session(self):
36+
if self.c_key and self.c_value:
37+
return
38+
39+
# Fetch anime-arsivi to get CSRF tokens
40+
resp = await self.httpx.get(f"{self.main_url}/anime-arsivi")
41+
sel = HTMLHelper(resp.text)
42+
43+
# form.bg-[rgba(255,255,255,.15)] > input
44+
# We can just look for the first two inputs in that specific form
45+
inputs = sel.select("form.bg-\\[rgba\\(255\\,255\\,255\\,\\.15\\)\\] input")
46+
if len(inputs) >= 2:
47+
self.c_key = inputs[0].attrs.get("value")
48+
self.c_value = inputs[1].attrs.get("value")
49+
50+
async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
51+
await self._init_session()
52+
53+
if url.startswith("https://"):
54+
full_url = f"{url}?page={page}"
55+
resp = await self.httpx.get(full_url, headers={"Referer": f"{self.main_url}/"})
56+
sel = HTMLHelper(resp.text)
57+
items = sel.select("div.swiper-slide a")
58+
else:
59+
# Category ID based
60+
full_url = f"{self.main_url}/anime-arsivi?category={url}&minImdb=&name=&release_year=&sort=date_desc&page={page}"
61+
resp = await self.httpx.get(full_url, headers={"Referer": f"{self.main_url}/"})
62+
sel = HTMLHelper(resp.text)
63+
items = sel.select("div.content-inner a")
64+
65+
results = []
66+
for item in items:
67+
title = sel.select_text("h2", item)
68+
href = item.attrs.get("href") if item.tag == "a" else sel.select_attr("a", "href", item)
69+
poster = sel.select_attr("img", "src", item) or sel.select_attr("img", "data-src", item)
70+
71+
if title and href:
72+
# If it's an episode link, clean it to get show link
73+
# Regex in Kotlin: /sezon-\d+/bolum-\d+/?$
74+
clean_href = HTMLHelper(href).regex_replace(r"/sezon-\d+/bolum-\d+/?$", "")
75+
76+
# If cleaning changed something, it was an episode link, maybe add it to title
77+
if clean_href != href:
78+
se_info = sel.select_text("div.flex.gap-1.items-center", item)
79+
if se_info:
80+
title = f"{title} - {se_info}"
81+
82+
results.append(MainPageResult(
83+
category = category,
84+
title = title,
85+
url = self.fix_url(clean_href),
86+
poster = self.fix_url(poster) if poster else None
87+
))
88+
89+
return results
90+
91+
async def search(self, query: str) -> list[SearchResult]:
92+
await self._init_session()
93+
94+
post_url = f"{self.main_url}/bg/searchcontent"
95+
data = {
96+
"cKey" : self.c_key,
97+
"cValue" : self.c_value,
98+
"searchterm" : query
99+
}
100+
101+
headers = {
102+
"X-Requested-With" : "XMLHttpRequest",
103+
"Accept" : "application/json, text/javascript, */*; q=0.01",
104+
"Referer" : f"{self.main_url}/"
105+
}
106+
107+
resp = await self.httpx.post(post_url, data=data, headers=headers)
108+
109+
try:
110+
raw = resp.json()
111+
# Kotlin maps this to ApiResponse -> DataWrapper -> Icerikler
112+
res_array = raw.get("data", {}).get("result", [])
113+
114+
results = []
115+
for item in res_array:
116+
title = item.get("object_name", "").replace("\\", "")
117+
slug = item.get("used_slug", "").replace("\\", "")
118+
poster = item.get("object_poster_url", "")
119+
120+
# Cleanup poster URL as in Kotlin
121+
if poster:
122+
poster = poster.replace("images-macellan-online.cdn.ampproject.org/i/s/", "") \
123+
.replace("file.dizilla.club", "file.macellan.online") \
124+
.replace("images.dizilla.club", "images.macellan.online") \
125+
.replace("images.dizimia4.com", "images.macellan.online") \
126+
.replace("file.dizimia4.com", "file.macellan.online")
127+
poster = HTMLHelper(poster).regex_replace(r"(file\.)[\w\.]+\/?", r"\1macellan.online/")
128+
poster = HTMLHelper(poster).regex_replace(r"(images\.)[\w\.]+\/?", r"\1macellan.online/")
129+
poster = poster.replace("/f/f/", "/630/910/")
130+
131+
if title and slug:
132+
results.append(SearchResult(
133+
title = title,
134+
url = self.fix_url(slug),
135+
poster = self.fix_url(poster) if poster else None
136+
))
137+
return results
138+
except Exception:
139+
return []
140+
141+
async def load_item(self, url: str) -> SeriesInfo:
142+
resp = await self.httpx.get(url)
143+
sel = HTMLHelper(resp.text)
144+
145+
title = sel.select_text("h2")
146+
poster = sel.select_attr("img.rounded-md", "src")
147+
description = sel.select_text("div.text-sm")
148+
149+
year = sel.regex_first(r"Yap\u0131m Y\u0131l\u0131\s*:\s*(\d+)", resp.text)
150+
151+
tags = []
152+
tags_raw = sel.regex_first(r"T\u00fcr\s*:\s*([^<]+)", resp.text)
153+
if tags_raw:
154+
tags = [t.strip() for t in tags_raw.split(",")]
155+
156+
rating = sel.select_text(".font-semibold.text-white")
157+
if rating:
158+
rating = rating.replace(",", ".").strip()
159+
160+
actors = [a.text(strip=True) for a in sel.select("span.valor a")]
161+
162+
trailer_match = sel.regex_first(r"embed\/(.*)\?rel", resp.text)
163+
trailer = f"https://www.youtube.com/embed/{trailer_match}" if trailer_match else None
164+
165+
duration_text = sel.select_text("span.runtime")
166+
duration = duration_text.split(" ")[0] if duration_text else None
167+
168+
episodes = []
169+
# ul a handles episodes
170+
for ep_link in sel.select("ul a"):
171+
href = ep_link.attrs.get("href")
172+
if not href or "/sezon-" not in href:
173+
continue
174+
175+
ep_name = sel.select_text("span.hidden.sm\\:block", ep_link)
176+
177+
season_match = sel.regex_first(r"sezon-(\d+)", href)
178+
episode_match = sel.regex_first(r"bolum-(\d+)", href)
179+
180+
season = season_match if season_match else None
181+
episode_num = episode_match if episode_match else None
182+
183+
episodes.append(Episode(
184+
season = int(season) if season and season.isdigit() else None,
185+
episode = int(episode_num) if episode_num and episode_num.isdigit() else None,
186+
title = ep_name if ep_name else f"{season}x{episode_num}",
187+
url = self.fix_url(href)
188+
))
189+
190+
return SeriesInfo(
191+
title = title,
192+
url = url,
193+
poster = self.fix_url(poster) if poster else None,
194+
description = description,
195+
rating = rating,
196+
tags = tags,
197+
actors = actors,
198+
year = year,
199+
episodes = episodes,
200+
duration = int(duration) if duration and str(duration).isdigit() else None
201+
)
202+
203+
async def load_links(self, url: str) -> list[ExtractResult]:
204+
resp = await self.httpx.get(url)
205+
sel = HTMLHelper(resp.text)
206+
207+
iframe = sel.select_attr("iframe", "src")
208+
if not iframe:
209+
return []
210+
211+
iframe_url = self.fix_url(iframe)
212+
data = await self.extract(iframe_url, referer=f"{self.main_url}/")
213+
214+
if not data:
215+
return []
216+
217+
return data if isinstance(data, list) else [data]

‎KekikStream/Plugins/Dizilla.py‎

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -248,4 +248,6 @@ async def load_links(self, url: str) -> list[ExtractResult]:
248248
return []
249249

250250
data = await self.extract(iframe_url, referer=f"{self.main_url}/", prefix=first_result.get('language_name', 'Unknown'))
251-
return [data] if data else []
251+
if not data:
252+
return []
253+
return data if isinstance(data, list) else [data]

‎KekikStream/Plugins/SezonlukDizi.py‎

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -75,9 +75,9 @@ async def search(self, query: str) -> list[SearchResult]:
7575

7676
results = []
7777
for afis in secici.select("div.afis a"):
78-
title = secici.select_text("div.description", veri)
79-
href = secici.select_attr("a", "href", veri)
80-
poster = secici.select_attr("img", "data-src", veri)
78+
title = secici.select_text("div.description", afis)
79+
href = secici.select_attr("a", "href", afis)
80+
poster = secici.select_attr("img", "data-src", afis)
8181

8282
if title and href:
8383
results.append(SearchResult(

‎KekikStream/Plugins/Sinefy.py‎

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -159,6 +159,11 @@ async def load_item(self, url: str) -> SeriesInfo:
159159
actors = [h5.text(strip=True) for h5 in sel.select("div.content h5") if h5.text(strip=True)]
160160

161161
year = sel.select_text("span.item.year")
162+
if not year and title:
163+
# Try to extract year from title like "Movie Name(2024)"
164+
year_match = sel.regex_first(r"\((\d{4})\)", title)
165+
if year_match:
166+
year = year_match
162167

163168
episodes = []
164169
episodes_box_list = sel.select("section.episodes-box")

0 commit comments

Comments
 (0)