Files
anime_downloader/mod_ohli24.py

3689 lines
170 KiB
Python
Raw Normal View History

2022-10-29 17:21:14 +09:00
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2022/02/08 3:44 PM
# @Author : yommi
# @Site :
# @File : logic_ohli24
# @Software: PyCharm
from __future__ import annotations
2022-10-29 17:21:14 +09:00
import asyncio
2022-10-29 17:21:14 +09:00
import hashlib
import importlib
import json
import os
import re
2022-10-29 17:21:14 +09:00
import subprocess
import sys
import threading
import traceback
import urllib
import unicodedata
from datetime import datetime, date
from typing import Any, Dict, List, Optional, Tuple, Union, Callable, TYPE_CHECKING
from urllib import parse
2022-10-29 17:21:14 +09:00
# third-party
import requests
2023-01-26 15:33:59 +09:00
2022-10-29 17:21:14 +09:00
# third-party
from flask import request, render_template, jsonify, Response
from lxml import html
from sqlalchemy import or_, desc
2022-10-29 17:21:14 +09:00
2025-12-27 23:27:46 +09:00
# third-party
import requests
2022-10-29 17:21:14 +09:00
# third party package
2022-10-29 17:21:14 +09:00
import aiohttp
from bs4 import BeautifulSoup
import jsbeautifier
# sjva 공용
from framework import db, scheduler, path_data, socketio
from framework.util import Util
2023-01-26 15:33:59 +09:00
2022-10-29 17:21:14 +09:00
# from framework.common.util import headers
from framework import F
2023-01-26 15:33:59 +09:00
from plugin import PluginModuleBase
2022-11-30 23:03:41 +09:00
from .lib.ffmpeg_queue_v1 import FfmpegQueueEntity, FfmpegQueue
2022-10-29 17:21:14 +09:00
from support.expand.ffmpeg import SupportFfmpeg
from .lib.util import Util
2022-11-12 23:47:21 +09:00
# from support_site import SupportKakaotv
2022-10-29 17:21:14 +09:00
from .setup import *
from .mod_base import AnimeModuleBase
from .model_base import AnimeQueueEntity
2022-10-29 17:21:14 +09:00
try:
from gommi_downloader_manager.mod_queue import ModuleQueue
except ImportError:
ModuleQueue = None
2022-10-29 17:21:14 +09:00
logger = P.logger
2023-01-26 15:33:59 +09:00
print("*=" * 50)
name = "ohli24"
2022-10-29 17:21:14 +09:00
class LogicOhli24(AnimeModuleBase):
current_headers: Optional[Dict[str, str]] = None
current_data: Optional[Dict[str, Any]] = None
referer: Optional[str] = None
origin_url: Optional[str] = None
episode_url: Optional[str] = None
cookies: Optional[requests.cookies.RequestsCookieJar] = None
# proxy = "http://192.168.0.2:3138"
# proxies = {
# "http": proxy,
# "https": proxy,
# }
@classmethod
def get_proxy(cls) -> str:
return P.ModelSetting.get("ohli24_proxy_url")
@classmethod
def get_proxies(cls) -> Optional[Dict[str, str]]:
proxy = cls.get_proxy()
if proxy:
return {"http": proxy, "https": proxy}
return None
2022-10-29 17:21:14 +09:00
session = requests.Session()
2022-10-29 17:21:14 +09:00
headers = {
2023-01-26 15:33:59 +09:00
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.5249.114 Whale/3.17.145.12 Safari/537.36",
"authority": "ndoodle.xyz",
"accept": "*/*",
"accept-language": "ko-KR,ko;q=0.9,en-US;q=0.8,en;q=0.7",
"cache-control": "no-cache",
"pragma": "no-cache",
"referer": "https://ndoodle.xyz/video/e6e31529675d0ef99d777d729c423382",
2022-10-29 17:21:14 +09:00
}
useragent = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, "
2023-01-26 15:33:59 +09:00
"like Gecko) Chrome/96.0.4664.110 Whale/3.12.129.46 Safari/537.36"
2022-10-29 17:21:14 +09:00
}
2022-11-12 23:47:21 +09:00
download_queue = None
download_thread = None
current_download_count = 0
zendriver_setup_done = False # Zendriver 자동 설치 완료 플래그
zendriver_daemon_process = None # Zendriver 데몬 프로세스
zendriver_daemon_port = 19876
daemon_fail_count = 0 # 데몬 연속 실패 카운트
# Streaming tokens for external players (no auth required)
_stream_tokens: Dict[str, Dict[str, Any]] = {}
_TOKEN_TTL_SECONDS = 300 # 5 minutes
@classmethod
def _cleanup_expired_tokens(cls) -> None:
"""Remove expired streaming tokens"""
import time
now = time.time()
expired = [k for k, v in cls._stream_tokens.items() if v.get("expires", 0) < now]
for k in expired:
del cls._stream_tokens[k]
@classmethod
def _generate_stream_token(cls, file_path: str) -> str:
"""Generate a temporary streaming token for external players"""
import time
import secrets
cls._cleanup_expired_tokens()
token = secrets.token_urlsafe(32)
cls._stream_tokens[token] = {
"path": file_path,
"expires": time.time() + cls._TOKEN_TTL_SECONDS
}
return token
@classmethod
def _validate_stream_token(cls, token: str) -> Optional[str]:
"""Validate token and return file path if valid (consumes token)"""
import time
cls._cleanup_expired_tokens()
token_data = cls._stream_tokens.get(token)
if token_data and token_data.get("expires", 0) > time.time():
# Don't consume token immediately - allow multiple uses within TTL
return token_data.get("path")
return None
@classmethod
def ensure_essential_dependencies(cls) -> bool:
"""필수 패키지(jsbeautifier, loguru, botasaurus) 확인 및 자동 설치"""
target_packages = ["jsbeautifier", "loguru", "botasaurus"]
need_install = []
import importlib.util
for pkg in target_packages:
if importlib.util.find_spec(pkg) is None:
need_install.append(pkg)
if not need_install:
return True
import subprocess as sp
try:
logger.info(f"[Dependencies] Missing: {need_install}, installing via pip...")
cmd = [sys.executable, "-m", "pip", "install"] + need_install + ["-q"]
result = sp.run(cmd, capture_output=True, text=True, timeout=180)
if result.returncode == 0:
logger.info(f"[Dependencies] Successfully installed: {need_install}")
return True
else:
logger.warning(f"[Dependencies] Installation failed: {result.stderr[:200]}")
return False
except Exception as e:
logger.error(f"[Dependencies] Installation error: {e}")
return False
@classmethod
def ensure_zendriver_installed(cls) -> bool:
"""Zendriver 패키지 확인 및 자동 설치"""
if cls.zendriver_setup_done:
return True
# 필수 패키지 먼저 확인
cls.ensure_essential_dependencies()
import importlib.util
import subprocess as sp
# 라이브러리 존재 확인
lib_exists = importlib.util.find_spec("zendriver") is not None
if lib_exists:
cls.zendriver_setup_done = True
return True
# 자동 설치 시도
try:
logger.info("[Zendriver] Not found, installing via pip...")
cmd = [sys.executable, "-m", "pip", "install", "zendriver", "-q"]
result = sp.run(cmd, capture_output=True, text=True, timeout=120)
if result.returncode == 0:
cls.zendriver_setup_done = True
logger.info("[Zendriver] Successfully installed")
# 브라우저 존재 확인 안내
import shutil
browser_path = P.ModelSetting.get("ohli24_zendriver_browser_path")
if browser_path and os.path.exists(browser_path):
has_browser = True
else:
has_browser = any(shutil.which(cmd) for cmd in ["google-chrome", "google-chrome-stable", "chromium-browser", "chromium"])
if not has_browser:
logger.warning("[Zendriver] 브라우저(Chrome/Chromium)가 시스템에 설치되어 있지 않습니다. Docker 환경에서는 직접 설치가 필요할 수 있습니다.")
logger.warning("[Zendriver] Ubuntu Tip: apt-get update && apt-get install -y chromium-browser")
return True
else:
logger.warning(f"[Zendriver] Installation failed: {result.stderr[:200]}")
return False
except Exception as e:
logger.error(f"[Zendriver] Installation error: {e}")
return False
@classmethod
def start_zendriver_daemon(cls) -> bool:
"""Zendriver 데몬 시작"""
if cls.is_zendriver_daemon_running():
logger.debug("[ZendriverDaemon] Already running")
return True
if not cls.ensure_zendriver_installed():
return False
try:
import subprocess
daemon_script = os.path.join(os.path.dirname(__file__), "lib", "zendriver_daemon.py")
if not os.path.exists(daemon_script):
logger.warning("[ZendriverDaemon] Daemon script not found")
return False
# 데몬 프로세스 시작 (백그라운드)
browser_path = P.ModelSetting.get("ohli24_zendriver_browser_path")
cmd = [sys.executable, daemon_script]
if browser_path:
cmd.extend(["--browser_path", browser_path])
cls.zendriver_daemon_process = subprocess.Popen(
cmd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
start_new_session=True
)
# 시작 대기
import time
for _ in range(10):
time.sleep(0.5)
if cls.is_zendriver_daemon_running():
logger.info(f"[ZendriverDaemon] Started on port {cls.zendriver_daemon_port}")
return True
logger.warning("[ZendriverDaemon] Failed to start (timeout)")
return False
except Exception as e:
logger.error(f"[ZendriverDaemon] Start error: {e}")
return False
@classmethod
def is_zendriver_daemon_running(cls) -> bool:
"""데몬 실행 상태 확인"""
try:
import requests
resp = requests.get(f"http://127.0.0.1:{cls.zendriver_daemon_port}/health", timeout=1)
return resp.status_code == 200
except:
return False
@classmethod
def fetch_via_daemon(cls, url: str, timeout: int = 30, headers: dict = None) -> dict:
"""데몬을 통한 HTML 페칭 (빠름, 헤더 지원)"""
try:
import requests
payload = {"url": url, "timeout": timeout}
if headers:
payload["headers"] = headers
resp = requests.post(
f"http://127.0.0.1:{cls.zendriver_daemon_port}/fetch",
json=payload,
timeout=timeout + 5
)
if resp.status_code == 200:
return resp.json()
return {"success": False, "error": f"HTTP {resp.status_code}"}
except Exception as e:
return {"success": False, "error": str(e)}
2022-11-12 23:47:21 +09:00
@classmethod
def system_check(cls) -> Dict[str, Any]:
"""시스템 의존성 확인 (브라우저 등)"""
import shutil
import platform
res = {
"browser_found": False,
"browser_path": "",
"os": platform.system(),
"dist": "",
"can_install": False,
"install_cmd": "wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - && echo 'deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main' >> /etc/apt/sources.list.d/google.list && apt-get update && apt-get install -y google-chrome-stable"
}
# 브라우저 찾기
def is_bad_snap(path):
if not path or not os.path.exists(path): return True
if "chromium-browser" in path:
try:
# --version 실행 시 Snap 안내가 나오거나 에러가 나면 래퍼임
v_out = sp.check_output([path, "--version"], stderr=sp.STDOUT, timeout=5).decode().lower()
if "snap" in v_out: return True
except:
return True # 실행 안 되면 일단 문제 있는 것으로 간주
return False
manual_path = P.ModelSetting.get("ohli24_zendriver_browser_path")
if manual_path and os.path.exists(manual_path):
if not is_bad_snap(manual_path):
res["browser_found"] = True
res["browser_path"] = manual_path
else:
res["snap_error"] = True
if not res["browser_found"]:
# Snap 이슈를 피하기 위해 google-chrome을 최우선으로 둠
for cmd in ["google-chrome", "google-chrome-stable", "chromium", "chromium-browser"]:
found = shutil.which(cmd)
if found:
if not is_bad_snap(found):
res["browser_found"] = True
res["browser_path"] = found
break
else:
res["snap_error"] = True # 스냅 래퍼 발견 알림용
# OS 및 설치 가능 여부 확인
if res["os"] == "Linux":
try:
if os.path.exists("/etc/debian_version"):
res["dist"] = "debian/ubuntu"
res["can_install"] = True
except:
pass
return res
@classmethod
def install_system_browser(cls) -> Dict[str, Any]:
"""시스템 브라우저 자동 설치 (Ubuntu/Debian 전용)"""
import subprocess as sp
check = cls.system_check()
if not check["can_install"]:
return {"ret": "error", "msg": "자동 설치가 지원되지 않는 OS 환경입니다. 안내된 명령어를 터미널에서 직접 실행해 주세요."}
try:
logger.info("[Zendriver] Starting system browser installation...")
# Google Chrome Repo 등록 및 설치 (Snap 회피용)
sp.run(["apt-get", "update"], capture_output=True, text=True, timeout=300)
sp.run(["apt-get", "install", "-y", "wget", "gnupg"], capture_output=True, text=True, timeout=300)
# Google Key & Repo
sp.run("wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add -", shell=True, capture_output=True, text=True, timeout=60)
sp.run("echo 'deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main' > /etc/apt/sources.list.d/google.list", shell=True, capture_output=True, text=True, timeout=60)
sp.run(["apt-get", "update"], capture_output=True, text=True, timeout=300)
# Install Chrome
process = sp.run(
["apt-get", "install", "-y", "google-chrome-stable"],
capture_output=True,
text=True,
timeout=600
)
if process.returncode == 0:
logger.info("[Zendriver] System browser installed successfully")
# 설치 후 다시 찾기
new_check = cls.system_check()
if new_check["browser_found"]:
P.ModelSetting.set("ohli24_zendriver_browser_path", new_check["browser_path"])
return {"ret": "success", "msg": "브라우저 설치 및 경로 설정이 완료되었습니다.", "path": new_check["browser_path"]}
return {"ret": "success", "msg": "설치는 완료되었으나 경로를 자동으로 찾지 못했습니다. 직접 입력해 주세요."}
else:
logger.error(f"[Zendriver] Installation failed: {process.stderr}")
return {"ret": "error", "msg": f"설치 중 오류가 발생했습니다: {process.stderr[:200]}"}
except sp.TimeoutExpired:
return {"ret": "error", "msg": "설치 시간이 초과되었습니다. 네트워크 상태를 확인하거나 터미널에서 직접 실행해 주세요."}
except Exception as e:
logger.error(f"[Zendriver] Install exception: {e}")
return {"ret": "error", "msg": f"설치 중 예외가 발생했습니다: {str(e)}"}
def __init__(self, P: Any) -> None:
2022-11-12 23:47:21 +09:00
self.db_default = {
"ohli24_db_version": "1",
"ohli24_proxy_url": "",
"ohli24_discord_webhook_url": "",
2025-12-27 23:27:46 +09:00
"ohli24_url": "https://ani.ohli24.com",
2022-11-12 23:47:21 +09:00
"ohli24_download_path": os.path.join(path_data, P.package_name, "ohli24"),
"ohli24_auto_make_folder": "True",
f"{name}_recent_code": "",
2022-11-12 23:47:21 +09:00
"ohli24_auto_make_season_folder": "True",
"ohli24_finished_insert": "[완결]",
"ohli24_max_ffmpeg_process_count": "1",
f"{name}_download_method": "cdndania", # cdndania (default), ffmpeg, ytdlp, aria2c
"ohli24_download_threads": "2", # 기본값 2 (안정성 권장)
2022-11-12 23:47:21 +09:00
"ohli24_order_desc": "False",
"ohli24_auto_start": "False",
"ohli24_interval": "* 5 * * *",
"ohli24_auto_mode_all": "False",
2023-01-26 15:33:59 +09:00
"ohli24_auto_code_list": "",
2022-11-12 23:47:21 +09:00
"ohli24_current_code": "",
"ohli24_uncompleted_auto_enqueue": "False",
"ohli24_image_url_prefix_series": "https://www.jetcloud.cc/series/",
"ohli24_image_url_prefix_episode": "https://www.jetcloud-list.cc/thumbnail/",
"ohli24_discord_notify": "True",
"ohli24_zendriver_browser_path": "",
"ohli24_cache_minutes": "5", # 0=캐시 없음, 5, 10, 15, 30분 등
2022-11-12 23:47:21 +09:00
}
super(LogicOhli24, self).__init__(P, name=name, first_menu='setting', scheduler_desc="ohli24 자동 다운로드", setup_default=self.db_default)
2022-10-29 17:21:14 +09:00
self.queue = None
# default_route_socketio(P, self)
self.web_list_model = ModelOhli24Item
2023-01-26 15:33:59 +09:00
default_route_socketio_module(self, attach="/queue")
2022-10-29 17:21:14 +09:00
@staticmethod
def get_base_url():
return P.ModelSetting.get("ohli24_url").rstrip('/')
def cleanup_stale_temps(self) -> None:
"""서버 시작 시 잔여 tmp 폴더 정리"""
try:
download_path = P.ModelSetting.get("ohli24_download_path")
if not download_path or not os.path.exists(download_path):
return
logger.info(f"Checking for stale temp directories in: {download_path}")
# 다운로드 경로 순회 (1 depth만 확인해도 충분할 듯 하나, 시즌 폴더 고려하여 recursively)
for root, dirs, files in os.walk(download_path):
for dir_name in dirs:
if dir_name.startswith("tmp") and len(dir_name) > 3:
full_path = os.path.join(root, dir_name)
try:
import shutil
logger.info(f"Removing stale temp directory: {full_path}")
shutil.rmtree(full_path)
except Exception as e:
logger.error(f"Failed to remove stale temp dir {full_path}: {e}")
except Exception as e:
logger.error(f"Error during stale temp cleanup: {e}")
2022-10-29 17:21:14 +09:00
# try:
# for key, value in P.Logic.db_default.items():
# if db.session.query(ModelSetting).filter_by(key=key).count() == 0:
# db.session.add(ModelSetting(key, value))
# db.session.commit()
# except Exception as e:
# logger.error('Exception:%s', e)
# logger.error(traceback.format_exc())
2022-10-29 17:21:14 +09:00
# @staticmethod
def process_ajax(self, sub: str, req: Any) -> Any:
2022-10-29 17:21:14 +09:00
try:
cate = request.form.get("type", None)
page = request.form.get("page", None)
2022-10-29 17:21:14 +09:00
if sub == "analysis":
code = request.form["code"]
# cate = request.form["type"]
2022-10-29 17:21:14 +09:00
wr_id = request.form.get("wr_id", None)
bo_table = request.form.get("bo_table", None)
P.ModelSetting.set("ohli24_current_code", code)
data = self.get_series_info(code, wr_id, bo_table)
2022-11-12 23:47:21 +09:00
P.ModelSetting.set(f"{self.name}_recent_code", code)
2022-10-29 17:21:14 +09:00
self.current_data = data
return jsonify({"ret": "success", "data": data, "code": code})
elif sub == "anime_list":
sca = request.form.get("sca", None)
data = self.get_anime_info(cate, page, sca=sca)
if isinstance(data, dict) and data.get("ret") == "error":
return jsonify(data)
return jsonify({"ret": "success", "cate": cate, "page": page, "data": data, "sca": sca})
2022-10-29 17:21:14 +09:00
elif sub == "complete_list":
logger.debug("cate:: %s", cate)
page = request.form["page"]
sca = request.form.get("sca", None)
data = self.get_anime_info(cate, page, sca=sca)
if isinstance(data, dict) and data.get("ret") == "error":
return jsonify(data)
return jsonify({"ret": "success", "cate": cate, "page": page, "data": data, "sca": sca})
2022-10-29 17:21:14 +09:00
elif sub == "search":
query = request.form["query"]
page = request.form["page"]
data = self.get_search_result(query, page, cate)
if isinstance(data, dict) and data.get("ret") == "error":
return jsonify(data)
2022-10-29 17:21:14 +09:00
return jsonify(
{
"ret": "success",
"cate": cate,
"page": page,
"query": query,
"data": data,
}
)
elif sub == "add_queue":
ret = {}
data_str = request.form.get("data")
if not data_str:
logger.error("Missing 'data' in add_queue request")
return jsonify({"ret": "error", "msg": "Missing data"})
try:
info = json.loads(data_str)
logger.info(f"info:: {info}")
ret["ret"] = self.add(info)
except Exception as e:
logger.error(f"Failed to process add_queue: {e}")
ret["ret"] = "error"
ret["msg"] = str(e)
2022-10-29 17:21:14 +09:00
return jsonify(ret)
2022-11-12 23:47:21 +09:00
# todo: new version
# info = json.loads(request.form["data"])
# logger.info(info)
# logger.info(self.current_data)
# # 1. db 조회
# db_item = ModelOhli24Program.get(info['_id'])
# logger.debug(db_item)
#
# if db_item is not None:
# print(f"db_item is not None")
# pass
# else:
# if db_item == None:
# db_item = ModelOhli24Program(info['_id'], self.get_episode(info['_id']))
# db_item.save()
2022-10-29 17:21:14 +09:00
elif sub == "entity_list":
if ModuleQueue:
# GDM에서 이 플러그인의 이 모듈이 요청한 항목들만 필터링하여 반환
caller_id = f"{P.package_name}_{self.name}"
all_items = [d.get_status() for d in ModuleQueue._downloads.values()]
plugin_items = [i for i in all_items if i.get('caller_plugin') == caller_id]
# Ohli24 UI(ffmpeg_queue_v1 호환)를 위한 데이터 매핑
mapped_items = []
status_map = {
'pending': '대기중',
'extracting': '추출중',
'downloading': '다운로드중',
'paused': '일시정지',
'completed': '완료',
'error': '실패',
'cancelled': '취소됨'
}
active_ids = set()
for item in plugin_items:
active_ids.add(item.get('callback_id'))
mapped = {
'entity_id': item['id'], # GDM id -> entity_id
'filename': item['filename'],
'ffmpeg_percent': item['progress'], # progress -> ffmpeg_percent
'ffmpeg_status_kor': status_map.get(item['status'], item['status']),
'current_speed': item['speed'],
'created_time': item.get('created_time', ''), # GDM에 없으면 공백
'content_title': item.get('title', ''),
}
# 기타 Ohli24 UI에서 필요한 필드 추가
mapped_items.append(mapped)
# DB에서 최근 50개 가져와서 완료된 항목 추가
try:
from framework import F
with F.app.app_context():
db_items = F.db.session.query(ModelOhli24Item).order_by(ModelOhli24Item.id.desc()).limit(50).all()
for db_item in db_items:
# 이미 active에 있으면 스킵
if db_item.ohli24_id in active_ids:
continue
# 완료된 항목만 추가
if db_item.status == 'completed':
mapped = {
'entity_id': f"db_{db_item.id}",
'filename': db_item.filename or '파일명 없음',
'ffmpeg_percent': 100,
'ffmpeg_status_kor': '완료',
'current_speed': '',
'created_time': str(db_item.created_time) if db_item.created_time else '',
'content_title': db_item.title or '',
}
mapped_items.append(mapped)
except Exception as e:
logger.warning(f"Failed to add DB items to entity_list: {e}")
return jsonify(mapped_items)
return jsonify(self.queue.get_entity_list() if self.queue else [])
2022-11-12 23:47:21 +09:00
elif sub == "queue_list":
return jsonify([])
2022-10-29 17:21:14 +09:00
elif sub == "queue_command":
command = req.form["command"]
entity_id = req.form["entity_id"]
if ModuleQueue:
if command == "stop" or command == "cancel":
ModuleQueue.process_ajax('cancel', req)
return jsonify({'ret':'success'})
elif command == "reset":
# Ohli24 모듈의 다운로드만 취소 (다른 플러그인 항목은 그대로)
caller_id = f"{P.package_name}_{self.name}"
cancelled_count = 0
for task_id, task in list(ModuleQueue._downloads.items()):
if task.caller_plugin == caller_id:
task.cancel()
del ModuleQueue._downloads[task_id]
cancelled_count += 1
# Ohli24 DB도 정리
try:
from framework import F
with F.app.app_context():
F.db.session.query(ModelOhli24Item).delete()
F.db.session.commit()
except Exception as e:
logger.error(f"Failed to clear Ohli24 DB: {e}")
return jsonify({'ret':'notify', 'log':f'{cancelled_count}개 Ohli24 항목이 초기화되었습니다.'})
elif command == "delete_completed":
# 완료 항목만 삭제
try:
from framework import F
with F.app.app_context():
F.db.session.query(ModelOhli24Item).filter(ModelOhli24Item.status == 'completed').delete()
F.db.session.commit()
except Exception as e:
logger.error(f"Failed to delete completed: {e}")
return jsonify({'ret':'success', 'log':'완료 항목이 삭제되었습니다.'})
if self.queue:
ret = self.queue.command(command, int(entity_id))
return jsonify(ret)
return jsonify({'ret':'error', 'msg':'Queue not initialized'})
2022-10-29 17:21:14 +09:00
elif sub == "add_queue_checked_list":
data = json.loads(request.form["data"])
def func():
count = 0
for tmp in data:
add_ret = self.add(tmp)
if add_ret.startswith("enqueue"):
self.socketio_callback("list_refresh", "")
count += 1
notify = {
"type": "success",
"msg": "%s 개의 에피소드를 큐에 추가 하였습니다." % count,
}
socketio.emit("notify", notify, namespace="/framework")
2022-10-29 17:21:14 +09:00
thread = threading.Thread(target=func, args=())
thread.daemon = True
thread.start()
return jsonify("")
2023-01-26 15:33:59 +09:00
elif sub == "web_list3":
# print("web_list3")
# print(request)
# P.logger.debug(req)
# P.logger.debug("web_list3")
2023-01-26 15:33:59 +09:00
ret = ModelOhli24Item.web_list(req)
# print(ret)
return jsonify(ret)
2023-01-26 15:33:59 +09:00
2022-11-30 23:03:41 +09:00
elif sub == "web_list2":
2023-01-26 15:33:59 +09:00
2022-11-30 23:03:41 +09:00
logger.debug("web_list2")
2022-10-29 17:21:14 +09:00
return jsonify(ModelOhli24Item.web_list(request))
2023-01-26 15:33:59 +09:00
2022-10-29 17:21:14 +09:00
elif sub == "db_remove":
db_id = request.form.get("id")
if not db_id:
return jsonify({"ret": "error", "log": "No ID provided"})
return jsonify(ModelOhli24Item.delete_by_id(db_id))
2022-10-29 17:21:14 +09:00
elif sub == "add_whitelist":
try:
# params = request.get_data()
# logger.debug(f"params: {params}")
# data_code = request.args.get("data_code")
params = request.get_json()
logger.debug(f"params:: {params}")
if params is not None:
code = params["data_code"]
logger.debug(f"params: {code}")
ret = LogicOhli24.add_whitelist(code)
else:
ret = LogicOhli24.add_whitelist()
return jsonify(ret)
except Exception as e:
logger.error(f"Exception: {e}")
2022-10-29 17:21:14 +09:00
logger.error(traceback.format_exc())
return jsonify({"error": str(e)}), 500
elif sub == "system_check":
return jsonify(self.system_check())
elif sub == "install_browser":
return jsonify(self.install_system_browser())
elif sub == "stream_video":
# 비디오 스트리밍 (MP4 파일 직접 서빙) - 외부 플레이어(MXPlayer, VLC 등) 호환
try:
from flask import send_file, Response
import mimetypes
file_path = request.args.get("path", "")
logger.info(f"Stream video request: {file_path}")
if not file_path or not os.path.exists(file_path):
return jsonify({"error": "File not found"}), 404
# 보안 체크: 다운로드 폴더 내부인지 확인
download_path = P.ModelSetting.get("ohli24_download_path")
if not file_path.startswith(download_path):
return jsonify({"error": "Access denied"}), 403
file_size = os.path.getsize(file_path)
filename = os.path.basename(file_path)
mimetype = mimetypes.guess_type(file_path)[0] or 'video/mp4'
range_header = request.headers.get('Range', None)
# 공통 헤더 (외부 플레이어 호환성)
# RFC 5987: 비ASCII 문자는 UTF-8 인코딩 필요
encoded_filename = urllib.parse.quote(filename)
common_headers = {
'Accept-Ranges': 'bytes',
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET, HEAD, OPTIONS',
'Access-Control-Allow-Headers': 'Range, Content-Type',
'Access-Control-Expose-Headers': 'Content-Length, Content-Range, Accept-Ranges',
'Content-Disposition': f"inline; filename*=UTF-8''{encoded_filename}",
}
# OPTIONS 요청 처리 (CORS preflight)
if request.method == 'OPTIONS':
resp = Response('', status=200)
for k, v in common_headers.items():
resp.headers[k] = v
return resp
if range_header:
byte_start, byte_end = 0, None
match = re.search(r'bytes=(\d+)-(\d*)', range_header)
if match:
byte_start = int(match.group(1))
byte_end = int(match.group(2)) if match.group(2) else file_size - 1
if byte_end is None or byte_end >= file_size:
byte_end = file_size - 1
length = byte_end - byte_start + 1
def generate():
with open(file_path, 'rb') as f:
f.seek(byte_start)
remaining = length
while remaining > 0:
chunk_size = min(65536, remaining) # 64KB chunks for better streaming
data = f.read(chunk_size)
if not data:
break
remaining -= len(data)
yield data
resp = Response(
generate(),
status=206,
mimetype=mimetype,
direct_passthrough=True
)
resp.headers['Content-Range'] = f'bytes {byte_start}-{byte_end}/{file_size}'
resp.headers['Content-Length'] = length
for k, v in common_headers.items():
resp.headers[k] = v
return resp
else:
# Non-range request - 전체 파일 전송
resp = send_file(
file_path,
mimetype=mimetype,
as_attachment=False,
download_name=filename
)
for k, v in common_headers.items():
resp.headers[k] = v
return resp
except Exception as e:
logger.error(f"Stream video error: {e}")
logger.error(traceback.format_exc())
return jsonify({"error": str(e)}), 500
elif sub == "generate_stream_token":
# Generate a temporary streaming token for external players
try:
file_path = request.args.get("path", "") or request.form.get("path", "")
if not file_path:
return jsonify({"error": "No path provided"}), 400
# Normalize path
file_path = unicodedata.normalize('NFC', file_path)
if not os.path.exists(file_path):
return jsonify({"error": "File not found"}), 404
# Security check: must be in download folder
download_path = P.ModelSetting.get("ohli24_download_path")
norm_file_path = unicodedata.normalize('NFC', os.path.abspath(file_path))
norm_dl_path = unicodedata.normalize('NFC', os.path.abspath(download_path))
if not norm_file_path.startswith(norm_dl_path):
return jsonify({"error": "Access denied"}), 403
token = self._generate_stream_token(file_path)
logger.info(f"Generated stream token for: {file_path[:50]}...")
return jsonify({
"ret": "success",
"token": token,
"ttl": self._TOKEN_TTL_SECONDS
})
except Exception as e:
logger.error(f"Generate stream token error: {e}")
logger.error(traceback.format_exc())
return jsonify({"error": str(e)}), 500
elif sub == "stream_with_token":
# Stream video using temporary token (NO AUTH REQUIRED)
try:
from flask import send_file, Response
import mimetypes
token = request.args.get("token", "")
if not token:
return jsonify({"error": "No token provided"}), 400
file_path = self._validate_stream_token(token)
if not file_path:
return jsonify({"error": "Invalid or expired token"}), 403
logger.info(f"Token stream request: {file_path[:50]}...")
if not os.path.exists(file_path):
return jsonify({"error": "File not found"}), 404
file_size = os.path.getsize(file_path)
filename = os.path.basename(file_path)
mimetype = mimetypes.guess_type(file_path)[0] or 'video/mp4'
range_header = request.headers.get('Range', None)
# Common headers for external player compatibility
encoded_filename = urllib.parse.quote(filename)
common_headers = {
'Accept-Ranges': 'bytes',
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET, HEAD, OPTIONS',
'Access-Control-Allow-Headers': 'Range, Content-Type',
'Access-Control-Expose-Headers': 'Content-Length, Content-Range, Accept-Ranges',
'Content-Disposition': f"inline; filename*=UTF-8''{encoded_filename}",
}
if request.method == 'OPTIONS':
resp = Response('', status=200)
for k, v in common_headers.items():
resp.headers[k] = v
return resp
if range_header:
byte_start, byte_end = 0, None
match = re.search(r'bytes=(\d+)-(\d*)', range_header)
if match:
byte_start = int(match.group(1))
byte_end = int(match.group(2)) if match.group(2) else file_size - 1
if byte_end is None or byte_end >= file_size:
byte_end = file_size - 1
length = byte_end - byte_start + 1
def generate():
with open(file_path, 'rb') as f:
f.seek(byte_start)
remaining = length
while remaining > 0:
chunk_size = min(65536, remaining)
data = f.read(chunk_size)
if not data:
break
remaining -= len(data)
yield data
resp = Response(
generate(),
status=206,
mimetype=mimetype,
direct_passthrough=True
)
resp.headers['Content-Range'] = f'bytes {byte_start}-{byte_end}/{file_size}'
resp.headers['Content-Length'] = length
for k, v in common_headers.items():
resp.headers[k] = v
return resp
else:
resp = send_file(
file_path,
mimetype=mimetype,
as_attachment=False,
download_name=filename
)
for k, v in common_headers.items():
resp.headers[k] = v
return resp
except Exception as e:
logger.error(f"Stream with token error: {e}")
logger.error(traceback.format_exc())
return jsonify({"error": str(e)}), 500
elif sub == "get_playlist":
# 현재 파일과 같은 폴더에서 다음 에피소드들 찾기
try:
file_path = request.args.get("path", "")
if not file_path or not os.path.exists(file_path):
return jsonify({"error": "File not found", "playlist": [], "current_index": 0}), 404
# 보안 체크
download_path = P.ModelSetting.get("ohli24_download_path")
# Normalize both paths to NFC and absolute paths for comparison
norm_file_path = unicodedata.normalize('NFC', os.path.abspath(file_path))
norm_dl_path = unicodedata.normalize('NFC', os.path.abspath(download_path))
if not norm_file_path.startswith(norm_dl_path):
return jsonify({"error": "Access denied", "playlist": [], "current_index": 0}), 403
folder = os.path.dirname(file_path)
current_file = os.path.basename(file_path)
# 파일명에서 SxxExx 패턴 추출 (구분자 유연화)
ep_match = re.search(r'[ .\-_]S(\d+)E(\d+)[ .\-_]', current_file, re.IGNORECASE)
if not ep_match:
# 패턴 없으면 현재 파일만 반환
return jsonify({
"playlist": [{"path": file_path, "name": current_file}],
"current_index": 0
})
current_season = int(ep_match.group(1))
current_episode = int(ep_match.group(2))
# 같은 폴더의 모든 mp4 파일 가져오기
all_files = []
for f in os.listdir(folder):
# Normalize to NFC for consistent matching
f_nfc = unicodedata.normalize('NFC', f)
if f_nfc.endswith('.mp4'):
match = re.search(r'[ .\-_]S(\d+)E(\d+)[ .\-_]', f_nfc, re.IGNORECASE)
if match:
s = int(match.group(1))
e = int(match.group(2))
all_files.append({
"path": os.path.join(folder, f),
"name": f,
"season": s,
"episode": e
})
# 시즌/에피소드 순으로 정렬
all_files.sort(key=lambda x: (x["season"], x["episode"]))
logger.debug(f"[PLAYLIST_DEBUG] Folder: {folder}")
logger.debug(f"[PLAYLIST_DEBUG] All files in folder: {os.listdir(folder)[:10]}...") # First 10
logger.debug(f"[PLAYLIST_DEBUG] Matched SxxExx files: {len(all_files)}")
logger.debug(f"[PLAYLIST_DEBUG] Current: S{current_season:02d}E{current_episode:02d}")
# 현재 시즌의 모든 에피소드 포함 (전체 시즌 재생)
playlist = []
current_index = 0
for i, f in enumerate(all_files):
if f["season"] == current_season:
entry = {"path": f["path"], "name": f["name"]}
if f["episode"] == current_episode:
current_index = len(playlist)
playlist.append(entry)
logger.info(f"Playlist: {len(playlist)} items, current_index: {current_index}")
return jsonify({
"playlist": playlist,
"current_index": current_index
})
except Exception as e:
logger.error(f"Get playlist error: {e}")
logger.error(traceback.format_exc())
return jsonify({"error": str(e), "playlist": [], "current_index": 0}), 500
2022-10-29 17:21:14 +09:00
except Exception as e:
P.logger.error(f"Exception: {e}")
2022-10-29 17:21:14 +09:00
P.logger.error(traceback.format_exc())
return jsonify({"error": str(e)}), 500
# 폴더 탐색 엔드포인트
if sub == "browse_dir":
try:
path = request.form.get("path", "")
# 기본 경로: 홈 디렉토리 또는 현재 다운로드 경로
if not path or not os.path.exists(path):
path = P.ModelSetting.get("ohli24_download_path") or os.path.expanduser("~")
# 경로 정규화
path = os.path.abspath(path)
if not os.path.isdir(path):
path = os.path.dirname(path)
# 디렉토리 목록 가져오기
directories = []
try:
for item in sorted(os.listdir(path)):
item_path = os.path.join(path, item)
if os.path.isdir(item_path) and not item.startswith('.'):
directories.append({
"name": item,
"path": item_path
})
except PermissionError:
pass
# 상위 폴더
parent = os.path.dirname(path) if path != "/" else None
return jsonify({
"ret": "success",
"current_path": path,
"parent_path": parent,
"directories": directories
})
except Exception as e:
logger.error(f"browse_dir error: {e}")
return jsonify({"ret": "error", "error": str(e)}), 500
# Fallback to base class for common subs (setting_save, queue_command, entity_list, etc.)
return super().process_ajax(sub, req)
2022-10-29 17:21:14 +09:00
def get_episode(self, clip_id: str) -> Optional[Dict[str, Any]]:
"""클립 ID로 에피소드 정보 조회."""
for ep in self.current_data["episode"]:
if ep["title"] == clip_id:
return ep
return None
2022-11-12 23:47:21 +09:00
def process_normal(self, sub: str, req: Any) -> Any:
"""인증 없이 접근 가능한 엔드포인트 (외부 플레이어용)"""
try:
if sub == "stream_with_token":
# Stream video using temporary token (NO AUTH REQUIRED)
from flask import send_file, Response
import mimetypes
token = request.args.get("token", "")
if not token:
return jsonify({"error": "No token provided"}), 400
file_path = self._validate_stream_token(token)
if not file_path:
return jsonify({"error": "Invalid or expired token"}), 403
logger.info(f"Token stream request: {file_path[:50]}...")
if not os.path.exists(file_path):
return jsonify({"error": "File not found"}), 404
file_size = os.path.getsize(file_path)
filename = os.path.basename(file_path)
mimetype = mimetypes.guess_type(file_path)[0] or 'video/mp4'
range_header = request.headers.get('Range', None)
# Common headers for external player compatibility
encoded_filename = urllib.parse.quote(filename)
common_headers = {
'Accept-Ranges': 'bytes',
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET, HEAD, OPTIONS',
'Access-Control-Allow-Headers': 'Range, Content-Type',
'Access-Control-Expose-Headers': 'Content-Length, Content-Range, Accept-Ranges',
'Content-Disposition': f"inline; filename*=UTF-8''{encoded_filename}",
}
if request.method == 'OPTIONS':
resp = Response('', status=200)
for k, v in common_headers.items():
resp.headers[k] = v
return resp
if range_header:
byte_start, byte_end = 0, None
match = re.search(r'bytes=(\d+)-(\d*)', range_header)
if match:
byte_start = int(match.group(1))
byte_end = int(match.group(2)) if match.group(2) else file_size - 1
if byte_end is None or byte_end >= file_size:
byte_end = file_size - 1
length = byte_end - byte_start + 1
def generate():
with open(file_path, 'rb') as f:
f.seek(byte_start)
remaining = length
while remaining > 0:
chunk_size = min(65536, remaining)
data = f.read(chunk_size)
if not data:
break
remaining -= len(data)
yield data
resp = Response(
generate(),
status=206,
mimetype=mimetype,
direct_passthrough=True
)
resp.headers['Content-Range'] = f'bytes {byte_start}-{byte_end}/{file_size}'
resp.headers['Content-Length'] = length
for k, v in common_headers.items():
resp.headers[k] = v
return resp
else:
resp = send_file(
file_path,
mimetype=mimetype,
as_attachment=False,
download_name=filename
)
for k, v in common_headers.items():
resp.headers[k] = v
return resp
except Exception as e:
logger.error(f"process_normal error: {e}")
logger.error(traceback.format_exc())
return jsonify({"error": str(e)}), 500
return None
def process_command(
self, command: str, arg1: str, arg2: str, arg3: str, req: Any
) -> Any:
"""커맨드 처리."""
try:
if command == "list":
# 1. 자체 큐 목록 가져오기
ret = self.queue.get_entity_list() if self.queue else []
# 2. GDM 태스크 가져오기 (설치된 경우)
try:
from gommi_downloader_manager.mod_queue import ModuleQueue
if ModuleQueue:
gdm_tasks = ModuleQueue.get_all_downloads()
# 이 모듈(ohli24)이 추가한 작업만 필터링
ohli24_tasks = [t for t in gdm_tasks if t.caller_plugin == f"{P.package_name}_{self.name}"]
for task in ohli24_tasks:
# 템플릿 호환 형식으로 변환
gdm_item = self._convert_gdm_task_to_queue_item(task)
ret.append(gdm_item)
except Exception as e:
logger.debug(f"GDM tasks fetch error: {e}")
return jsonify(ret)
elif command in ["stop", "remove", "cancel"]:
entity_id = arg1
if entity_id and str(entity_id).startswith("dl_"):
# GDM 작업 처리
try:
from gommi_downloader_manager.mod_queue import ModuleQueue
if ModuleQueue:
if command == "stop" or command == "cancel":
task = ModuleQueue.get_download(entity_id)
if task:
task.cancel()
return jsonify({"ret": "success", "log": "GDM 작업을 중지하였습니다."})
elif command == "remove" or command == "delete":
# GDM에서 삭제 처리
class DummyReq:
def __init__(self, id):
self.form = {"id": id}
ModuleQueue.process_ajax("delete", DummyReq(entity_id))
return jsonify({"ret": "success", "log": "GDM 작업을 삭제하였습니다."})
except Exception as e:
logger.error(f"GDM command error: {e}")
return jsonify({"ret": "error", "log": f"GDM 명령 실패: {e}"})
# 자체 큐 처리
return super().process_command(command, arg1, arg2, arg3, req)
2022-11-30 23:03:41 +09:00
if command == "download_program":
ret: Dict[str, Any] = {"ret": "success"}
_pass = arg2
db_item = ModelOhli24Program.get(arg1)
if _pass == "false" and db_item is not None:
ret["ret"] = "warning"
ret["msg"] = "이미 DB에 있는 항목 입니다."
elif (
_pass == "true"
and db_item is not None
and ModelOhli24Program.get_by_id_in_queue(db_item.id) is not None
):
ret["ret"] = "warning"
ret["msg"] = "이미 큐에 있는 항목 입니다."
else:
if db_item is None:
db_item = ModelOhli24Program(arg1, self.get_episode(arg1))
db_item.save()
db_item.init_for_queue()
self.download_queue.put(db_item)
ret["msg"] = "다운로드를 추가 하였습니다."
return jsonify(ret)
2022-11-12 23:47:21 +09:00
return super().process_command(command, arg1, arg2, arg3, req)
except Exception as e:
logger.error(f"process_command Error: {e}")
logger.error(traceback.format_exc())
return jsonify({'ret': 'fail', 'log': str(e)})
def _convert_gdm_task_to_queue_item(self, task):
"""GDM DownloadTask 객체를 FfmpegQueueEntity.as_dict() 호환 형식으로 변환"""
status_kor_map = {
"pending": "대기중",
"extracting": "분석중",
"downloading": "다운로드중",
"paused": "일시정지",
"completed": "완료",
"error": "실패",
"cancelled": "취소됨"
}
status_str_map = {
"pending": "WAITING",
"extracting": "ANALYZING",
"downloading": "DOWNLOADING",
"paused": "PAUSED",
"completed": "COMPLETED",
"error": "FAILED",
"cancelled": "FAILED"
}
t_dict = task.as_dict()
return {
"entity_id": t_dict["id"],
"url": t_dict["url"],
"filename": t_dict["filename"] or t_dict["title"],
"status_kor": status_kor_map.get(t_dict["status"], "알수없음"),
"percent": t_dict["progress"],
"created_time": t_dict["created_time"],
"current_speed": t_dict["speed"] or "0 B/s",
"download_time": t_dict["eta"] or "-",
"status_str": status_str_map.get(t_dict["status"], "WAITING"),
"idx": t_dict["id"],
"callback_id": "ohli24",
"start_time": t_dict["start_time"] or t_dict["created_time"],
"save_fullpath": t_dict["filepath"],
"duration_str": "GDM",
"current_pf_count": 0,
"duration": "-",
"current_duration": "-",
"current_bitrate": "-",
"max_pf_count": 0,
"is_gdm": True
}
def plugin_callback(self, data):
"""GDM 모듈로부터 다운로드 상태 업데이트 수신"""
try:
callback_id = data.get('callback_id')
status = data.get('status')
logger.info(f"[Ohli24] Received GDM callback: id={callback_id}, status={status}")
if callback_id:
from framework import F
with F.app.app_context():
db_item = ModelOhli24Item.get_by_ohli24_id(callback_id)
if db_item:
if status == "completed":
db_item.status = "completed"
db_item.completed_time = datetime.now()
db_item.filepath = data.get('filepath')
db_item.save()
logger.info(f"[Ohli24] Updated DB item {db_item.id} to COMPLETED via GDM callback")
elif status == "error":
pass
except Exception as e:
logger.error(f"[Ohli24] Callback processing error: {e}")
logger.error(traceback.format_exc())
2022-11-12 23:47:21 +09:00
2022-10-29 17:21:14 +09:00
@staticmethod
def add_whitelist(*args: str) -> Dict[str, Any]:
"""화이트리스트에 코드 추가."""
ret: Dict[str, Any] = {}
2022-10-29 17:21:14 +09:00
logger.debug(f"args: {args}")
try:
if len(args) == 0:
code = str(LogicOhli24.current_data["code"])
else:
code = str(args[0])
print(code)
whitelist_program = P.ModelSetting.get("ohli24_auto_code_list")
# whitelist_programs = [
# str(x.strip().replace(" ", ""))
# for x in whitelist_program.replace("\n", "|").split("|")
# ]
2025-12-25 19:50:27 +09:00
whitelist_programs = [str(x.strip()) for x in whitelist_program.replace("\n", "|").split("|")]
2022-10-29 17:21:14 +09:00
if code not in whitelist_programs:
whitelist_programs.append(code)
2025-12-25 19:50:27 +09:00
whitelist_programs = filter(lambda x: x != "", whitelist_programs) # remove blank code
2022-10-29 17:21:14 +09:00
whitelist_program = "|".join(whitelist_programs)
entity = (
2025-12-25 19:50:27 +09:00
db.session.query(P.ModelSetting).filter_by(key="ohli24_auto_code_list").with_for_update().first()
2022-10-29 17:21:14 +09:00
)
entity.value = whitelist_program
db.session.commit()
ret["ret"] = True
ret["code"] = code
if len(args) == 0:
return LogicOhli24.current_data
else:
return ret
else:
ret["ret"] = False
ret["log"] = "이미 추가되어 있습니다."
except Exception as e:
2022-11-12 23:47:21 +09:00
logger.error(f"Exception: {str(e)}")
2022-10-29 17:21:14 +09:00
logger.error(traceback.format_exc())
ret["ret"] = False
ret["log"] = str(e)
return ret
def setting_save_after(self, change_list: List[str]) -> None:
"""설정 저장 후 처리."""
if self.queue is None:
return
if self.queue.get_max_ffmpeg_count() != P.ModelSetting.get_int("ohli24_max_ffmpeg_process_count"):
self.queue.set_max_ffmpeg_count(P.ModelSetting.get_int("ohli24_max_ffmpeg_process_count"))
2022-10-29 17:21:14 +09:00
def scheduler_function(self) -> None:
"""스케줄러 함수 - 자동 다운로드 처리."""
logger.debug("ohli24 scheduler_function::=========================")
2022-10-29 17:21:14 +09:00
content_code_list = P.ModelSetting.get_list("ohli24_auto_code_list", "|")
logger.debug(f"content_code_list::: {content_code_list}")
url_list = ["https://www.naver.com/", "https://www.daum.net/"]
week = ["월요일", "화요일", "수요일", "목요일", "금요일", "토요일", "일요일"]
today = date.today()
2022-11-12 23:47:21 +09:00
# print(today)
# print()
# print(today.weekday())
2022-10-29 17:21:14 +09:00
url = f'{LogicOhli24.get_base_url()}/bbs/board.php?bo_table=ing&sca={week[today.weekday()]}'
2022-10-29 17:21:14 +09:00
# print(url)
if "all" in content_code_list:
ret_data = LogicOhli24.get_auto_anime_info(self, url=url)
logger.debug(f"today_info:: {ret_data}")
for item in ret_data["anime_list"]:
# wr_id = request.form.get("wr_id", None)
# bo_table = request.form.get("bo_table", None)
wr_id = None
bo_table = None
data = []
# print(code)
# logger.info("code::: %s", code)
# logger.debug(item)
# 잠시 중지
# data = self.get_series_info(item["code"], wr_id, bo_table)
# logger.debug(data)
# result = asyncio.run(LogicOhli24.main(url_list))
# logger.debug(f"result:: {result}")
elif len(content_code_list) > 0:
for item in content_code_list:
url = LogicOhli24.get_base_url() + "/c/" + item
2023-01-26 15:33:59 +09:00
logger.debug(f"scheduling url: {url}")
2022-10-29 17:21:14 +09:00
# ret_data = LogicOhli24.get_auto_anime_info(self, url=url)
content_info = self.get_series_info(item, "", "")
# logger.debug(content_info)
2023-01-26 15:33:59 +09:00
2022-10-29 17:21:14 +09:00
for episode_info in content_info["episode"]:
add_ret = self.add(episode_info)
if add_ret.startswith("enqueue"):
self.socketio_callback("list_refresh", "")
# logger.debug(f"data: {data}")
# self.current_data = data
2022-11-12 23:47:21 +09:00
# db 에서 다운로드 완료 유무 체크
2022-10-29 17:21:14 +09:00
@staticmethod
async def get_data(url: str) -> str:
2022-10-29 17:21:14 +09:00
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
content = await response.text()
# print(response)
return content
@staticmethod
async def main(url_list: List[str]) -> List[str]:
2022-10-29 17:21:14 +09:00
input_coroutines = [LogicOhli24.get_data(url_) for url_ in url_list]
res = await asyncio.gather(*input_coroutines)
return res
def get_series_info(self, code: str, wr_id: Optional[str], bo_table: Optional[str]) -> Dict[str, Any]:
2022-10-29 17:21:14 +09:00
code_type = "c"
2023-10-05 21:41:56 +09:00
code = urllib.parse.quote(code)
2022-10-29 17:21:14 +09:00
try:
# 캐시 기능을 제거하여 분석 버튼 클릭 시 항상 최신 설정으로 다시 분석하도록 함
# if self.current_data is not None and "code" in self.current_data and self.current_data["code"] == code:
# return self.current_data
2022-10-29 17:21:14 +09:00
if code.startswith("http"):
if "/c/" in code:
code = code.split("c/")[1]
code_type = "c"
elif "/e/" in code:
code = code.split("e/")[1]
code_type = "e"
logger.info(f"code:::: {code}")
2025-12-27 23:27:46 +09:00
base_url = P.ModelSetting.get("ohli24_url").rstrip("/") # 뒤에 슬래시 제거
2022-10-29 17:21:14 +09:00
if code_type == "c":
2025-12-27 23:27:46 +09:00
url = base_url + "/c/" + code
2022-10-29 17:21:14 +09:00
elif code_type == "e":
2025-12-27 23:27:46 +09:00
url = base_url + "/e/" + code
2022-10-29 17:21:14 +09:00
else:
2025-12-27 23:27:46 +09:00
url = base_url + "/e/" + code
2022-10-29 17:21:14 +09:00
if wr_id is not None:
if len(wr_id) > 0:
2025-12-27 23:27:46 +09:00
url = base_url + "/bbs/board.php?bo_table=" + bo_table + "&wr_id=" + wr_id
2022-10-29 17:21:14 +09:00
2023-01-26 15:33:59 +09:00
logger.debug("url:::> %s", url)
2022-10-29 17:21:14 +09:00
response_data = LogicOhli24.get_html_cached(url, timeout=10)
2025-12-27 23:27:46 +09:00
logger.debug(f"HTML length: {len(response_data)}")
# 디버깅: HTML 일부 출력
if len(response_data) < 1000:
logger.warning(f"Short HTML response: {response_data[:500]}")
else:
# item-subject 있는지 확인
if "item-subject" in response_data:
logger.info("Found item-subject in HTML")
else:
logger.warning("item-subject NOT found in HTML")
if 'itemprop="image"' in response_data:
2025-12-27 23:27:46 +09:00
logger.info("Found itemprop=image in HTML")
else:
logger.warning("itemprop=image NOT found in HTML")
2022-10-29 17:21:14 +09:00
tree = html.fromstring(response_data)
2025-12-27 23:27:46 +09:00
# 제목 추출 - h1[itemprop="headline"] 또는 기타 h1
title = ""
title_xpaths = [
'//h1[@itemprop="headline"]/text()',
'//h1[@itemprop="headline"]//text()',
'//div[@class="view-wrap"]//h1/text()',
'//h1/text()',
]
for xpath in title_xpaths:
result = tree.xpath(xpath)
if result:
title = "".join(result).strip()
if title and title != "OHLI24":
break
if not title or "OHLI24" in title:
title = urllib.parse.unquote(code)
logger.info(f"title:: {title}")
# 이미지 추출 - img[itemprop="image"] 또는 img.img-tag
image = ""
image_xpaths = [
'//img[@itemprop="image"]/@src',
'//img[@class="img-tag"]/@src',
'//div[@class="view-wrap"]//img/@src',
'//div[contains(@class, "view-img")]//img/@src',
2022-10-29 17:21:14 +09:00
]
2025-12-27 23:27:46 +09:00
for xpath in image_xpaths:
result = tree.xpath(xpath)
if result:
image = result[0]
if image and not "logo" in image.lower():
break
if image:
if image.startswith(".."):
image = image.replace("..", LogicOhli24.get_base_url())
2025-12-27 23:27:46 +09:00
elif not image.startswith("http"):
image = LogicOhli24.get_base_url() + image
2025-12-27 23:27:46 +09:00
logger.info(f"image:: {image}")
# 설명 정보 추출
des = {}
2022-10-29 17:21:14 +09:00
description_dict = {
"원제": "_otit",
"원작": "_org",
"감독": "_dir",
"각본": "_scr",
"캐릭터 디자인": "_character_design",
"음악": "_sound",
"제작사": "_pub",
"장르": "_tag",
"분류": "_classifi",
"제작국가": "_country",
"방영일": "_date",
"등급": "_grade",
"총화수": "_total_chapter",
"상영시간": "_show_time",
"상영일": "_release_date",
"개봉년도": "_release_year",
"개봉일": "_opening_date",
"런타임": "_run_time",
"작화": "_drawing",
2022-10-29 17:21:14 +09:00
}
2025-12-27 23:27:46 +09:00
# view-fields에서 메타데이터 추출 시도
des_items = tree.xpath('//div[@class="list"]/p')
if not des_items:
des_items = tree.xpath('//div[contains(@class, "view-field")]')
for item in des_items:
try:
span = item.xpath(".//span//text()")
if span and span[0] in description_dict:
key = description_dict[span[0]]
value = item.xpath(".//span/text()")
des[key] = value[1] if len(value) > 1 else ""
except Exception:
pass
2022-10-29 17:21:14 +09:00
2025-12-27 23:27:46 +09:00
# 에피소드 목록 추출 - a.item-subject
2022-10-29 17:21:14 +09:00
episodes = []
2025-12-27 23:27:46 +09:00
episode_links = tree.xpath('//a[@class="item-subject"]')
for a_elem in episode_links:
try:
ep_title = "".join(a_elem.xpath(".//text()")).strip()
href = a_elem.get("href", "")
if not href.startswith("http"):
href = LogicOhli24.get_base_url() + href
2025-12-27 23:27:46 +09:00
# 부모에서 날짜 찾기
parent = a_elem.getparent()
_date = ""
if parent is not None:
grandparent = parent.getparent()
if grandparent is not None:
date_result = grandparent.xpath('.//div[@class="wr-date"]/text()')
if not date_result:
date_result = grandparent.xpath('.//*[contains(@class, "date")]/text()')
_date = date_result[0].strip() if date_result else ""
m = hashlib.md5(ep_title.encode("utf-8"))
_vi = m.hexdigest()
episodes.append({
"title": ep_title,
"link": href,
2022-10-29 17:21:14 +09:00
"thumbnail": image,
2023-10-05 21:41:56 +09:00
"date": _date,
"day": _date,
2025-12-27 23:27:46 +09:00
"_id": ep_title,
"va": href,
2022-10-29 17:21:14 +09:00
"_vi": _vi,
"content_code": code,
2025-12-27 23:27:46 +09:00
})
except Exception as ep_err:
logger.warning(f"Episode parse error: {ep_err}")
continue
logger.info(f"Found {len(episodes)} episodes")
# 디버깅: 원본 순서 확인 (첫번째 에피소드 제목)
if episodes:
logger.info(f"First parsed episode: {episodes[0]['title']}")
2025-12-27 23:27:46 +09:00
# 줄거리 추출
ser_description_result = tree.xpath('//div[@class="view-stocon"]/div[@class="c"]/text()')
if not ser_description_result:
ser_description_result = tree.xpath('//div[contains(@class, "view-story")]//text()')
ser_description = ser_description_result if ser_description_result else []
2022-10-29 17:21:14 +09:00
data = {
"title": title,
"image": image,
2025-12-27 23:27:46 +09:00
"date": "",
"day": "",
2022-10-29 17:21:14 +09:00
"ser_description": ser_description,
"des": des,
"episode": episodes,
2025-12-27 23:27:46 +09:00
"code": code,
2022-10-29 17:21:14 +09:00
}
# 정렬 적용: 사이트 원본은 최신화가 가장 위임 (13, 12, ... 1)
# ohli24_order_desc가 Off(False)이면 1화부터 나오게 뒤집기
raw_order_desc = P.ModelSetting.get("ohli24_order_desc")
order_desc = True if str(raw_order_desc).lower() == 'true' else False
logger.info(f"Sorting - Raw: {raw_order_desc}, Parsed: {order_desc}")
if not order_desc:
logger.info("Order is set to Ascending (Off), reversing list to show episode 1 first.")
data["episode"] = list(reversed(data['episode']))
data["list_order"] = "asc"
else:
logger.info("Order is set to Descending (On), keeping site order (Newest first).")
2022-10-29 17:21:14 +09:00
data["list_order"] = "desc"
if data["episode"]:
logger.info(f"Final episode list range: {data['episode'][0]['title']} ~ {data['episode'][-1]['title']}")
# [FILE EXISTENCE CHECK FOR UI PLAY BUTTON]
try:
import glob
# 1. Savepath Calculation
savepath = P.ModelSetting.get("ohli24_download_path")
logger.warning(f"[DEBUG_FILE_CHECK] Base savepath: {savepath}")
# Season Parsing
season = 1
content_title = data["title"]
# Clean title by removing Season/Episode info for folder name (e.g. "원펀맨 3기" -> "원펀맨")
# This must match Ohli24QueueEntity logic
content_title_clean = content_title
match = re.compile(r"(?P<title>.*?)\s*((?P<season>\d+)%s)?\s*((?P<epi_no>\d+)%s)" % ("", "")).search(content_title)
if match:
if "season" in match.groupdict() and match.group("season") is not None:
season = int(match.group("season"))
content_title_clean = match.group("title").strip()
if P.ModelSetting.get_bool("ohli24_auto_make_folder"):
# Use clean title for folder if season detected, otherwise full title
folder_name = Util.change_text_for_use_filename(content_title_clean)
savepath = os.path.join(savepath, folder_name)
if P.ModelSetting.get_bool("ohli24_auto_make_season_folder"):
season_str = str(int(season))
savepath = os.path.join(savepath, "Season %s" % season_str)
# logger.warning(f"[DEBUG_FILE_CHECK] Final savepath: {savepath}")
if not os.path.exists(savepath):
# logger.warning(f"[DEBUG_FILE_CHECK] Path does not exist: {savepath}")
pass
else:
# 2. File Search using Glob
# Pattern: Title.SxxExx.*-OHNI24.mp4
# Use the SAME cleaned title for filename matching as we did for folder name
# e.g. "원펀맨 3기" -> folder "원펀맨", filename "원펀맨.S03..."
title_clean = Util.change_text_for_use_filename(content_title_clean)
season_str = "0%s" % season if season < 10 else str(season)
# Check first few episodes or just iterate all until found
# Check first few episodes or just iterate all until found
for episode in data["episode"]:
# Parse episode number from title
epi_no = 1
try:
# First try explicit 'no' field if it exists and is clean
if "no" in episode and episode["no"]:
epi_no = float(episode["no"])
if epi_no.is_integer():
epi_no = int(epi_no)
else:
# Parse from title: e.g. "도원암귀 24화(完)" -> 24
ematch = re.search(r"(\d+(?:\.\d+)?)(?:화|회)", episode["title"])
if ematch:
epi_no_float = float(ematch.group(1))
epi_no = int(epi_no_float) if epi_no_float.is_integer() else epi_no_float
except Exception as parse_e:
# logger.debug(f"[DEBUG_FILE_CHECK] Episode parse error: {parse_e}")
pass
epi_no_str = "0%s" % epi_no if isinstance(epi_no, int) and epi_no < 10 else str(epi_no)
# Glob pattern matching Ohli24QueueEntity format
glob_pattern = f"{title_clean}.S{season_str}E{epi_no_str}.*-OHNI24.mp4"
full_pattern = os.path.join(savepath, glob_pattern)
# logger.warning(f"[DEBUG_FILE_CHECK] Trying pattern: {glob_pattern}")
# logger.warning(f"[DEBUG_FILE_CHECK] Trying pattern: {glob_pattern}")
files = glob.glob(full_pattern)
if not files and episode == data["episode"][0]:
# If first episode check fails, debug what IS in the folder
# logger.warning(f"[DEBUG_FILE_CHECK] Listing all files in {savepath}: {glob.glob(os.path.join(savepath, '*'))}")
pass
if files:
valid_file = files[0]
data["first_exist_filepath"] = valid_file
data["first_exist_filename"] = os.path.basename(valid_file)
logger.info(f"Play button enabled: Found {data['first_exist_filename']}")
break
except Exception as e:
logger.error(f"Error checking file existence: {e}")
logger.error(traceback.format_exc())
2025-12-27 23:27:46 +09:00
self.current_data = data
2022-10-29 17:21:14 +09:00
return data
except Exception as e:
P.logger.error("Exception:%s", e)
P.logger.error(traceback.format_exc())
return {"ret": "error", "log": str(e)}
2022-10-29 17:21:14 +09:00
def get_anime_info(self, cate: str, page: str, sca: Optional[str] = None) -> Dict[str, Any]:
"""카테고리별 애니메이션 목록 조회."""
logger.debug(f"get_anime_info: cate={cate}, page={page}, sca={sca}")
2022-10-29 17:21:14 +09:00
try:
# URL 끝 슬래시 제거 로직 추가
base_url = P.ModelSetting.get("ohli24_url").rstrip('/')
url = base_url + "/bbs/board.php?bo_table=" + cate + "&page=" + page
if sca:
url += "&sca=" + sca
2022-10-29 17:21:14 +09:00
logger.info("url:::> %s", url)
data = {}
response_data = LogicOhli24.get_html_cached(url, timeout=10)
2022-10-29 17:21:14 +09:00
tree = html.fromstring(response_data)
tmp_items = tree.xpath('//div[@class="list-row"]')
data["anime_count"] = len(tmp_items)
data["anime_list"] = []
for item in tmp_items:
entity = {}
entity["link"] = item.xpath(".//a/@href")[0]
entity["code"] = entity["link"].split("/")[-1]
2025-12-25 19:50:27 +09:00
entity["title"] = item.xpath(".//div[@class='post-title']/text()")[0].strip()
# logger.debug(item.xpath(".//div[@class='img-item']/img/@src")[0])
# logger.debug(item.xpath(".//div[@class='img-item']/img/@data-ezsrc")[0])
# entity["image_link"] = item.xpath(".//div[@class='img-item']/img/@src")[
# 0
# ].replace("..", P.ModelSetting.get("ohli24_url"))
if len(item.xpath(".//div[@class='img-item']/img/@src")) > 0:
2025-12-25 19:50:27 +09:00
entity["image_link"] = item.xpath(".//div[@class='img-item']/img/@src")[0].replace(
"..", LogicOhli24.get_base_url()
2025-12-25 19:50:27 +09:00
)
else:
2025-12-25 19:50:27 +09:00
entity["image_link"] = item.xpath(".//div[@class='img-item']/img/@data-ezsrc")[0]
2022-10-29 17:21:14 +09:00
data["ret"] = "success"
data["anime_list"].append(entity)
return data
except Exception as e:
P.logger.error("Exception:%s", e)
P.logger.error(traceback.format_exc())
return {"ret": "error", "log": str(e)}
2022-10-29 17:21:14 +09:00
def get_auto_anime_info(self, url: str = ""):
try:
logger.info("url:::> %s", url)
data = {}
response_data = LogicOhli24.get_html_cached(url, timeout=10)
2022-10-29 17:21:14 +09:00
tree = html.fromstring(response_data)
tmp_items = tree.xpath('//div[@class="list-row"]')
data["anime_count"] = len(tmp_items)
data["anime_list"] = []
for item in tmp_items:
entity = {}
entity["link"] = item.xpath(".//a/@href")[0]
entity["code"] = entity["link"].split("/")[-1]
2025-12-25 19:50:27 +09:00
entity["title"] = item.xpath(".//div[@class='post-title']/text()")[0].strip()
entity["image_link"] = item.xpath(".//div[@class='img-item']/img/@src")[0].replace(
"..", LogicOhli24.get_base_url()
2025-12-25 19:50:27 +09:00
)
2022-10-29 17:21:14 +09:00
data["ret"] = "success"
data["anime_list"].append(entity)
return data
except Exception as e:
P.logger.error("Exception:%s", e)
P.logger.error(traceback.format_exc())
return {"ret": "error", "log": str(e)}
2022-10-29 17:21:14 +09:00
# @staticmethod
def get_search_result(self, query: str, page: str, cate: str) -> Dict[str, Any]:
"""검색 결과 조회."""
2022-10-29 17:21:14 +09:00
try:
_query = urllib.parse.quote(query)
url = (
LogicOhli24.get_base_url()
2023-01-26 15:33:59 +09:00
+ "/bbs/search.php?srows=24&gr_id=&sfl=wr_subject&stx="
+ _query
+ "&page="
+ page
2022-10-29 17:21:14 +09:00
)
logger.info("get_search_result()::url> %s", url)
data = {}
response_data = LogicOhli24.get_html_cached(url, timeout=10)
2022-10-29 17:21:14 +09:00
tree = html.fromstring(response_data)
tmp_items = tree.xpath('//div[@class="list-row"]')
data["anime_count"] = len(tmp_items)
data["anime_list"] = []
# Clean up nested mess
2022-10-29 17:21:14 +09:00
for item in tmp_items:
entity = {}
entity["link"] = item.xpath(".//a/@href")[0]
# entity["code"] = entity["link"].split("/")[-1]
entity["wr_id"] = entity["link"].split("=")[-1]
# logger.debug(item.xpath(".//div[@class='post-title']/text()").join())
2025-12-25 19:50:27 +09:00
entity["title"] = "".join(item.xpath(".//div[@class='post-title']/text()")).strip()
# Use multiple image attributes for lazy-loading support
img_attributes = [".//div[@class='img-item']/img/@src", ".//div[@class='img-item']/img/@data-src", ".//div[@class='img-item']/img/@data-ezsrc"]
original_img = ""
for attr in img_attributes:
matches = item.xpath(attr)
if matches and matches[0].strip():
original_img = matches[0].replace("..", LogicOhli24.get_base_url())
break
if not original_img:
original_img = "https://via.placeholder.com/200x300?text=No+Image"
# Use Image Proxy
entity["image_link"] = "/%s/api/%s/image_proxy?url=%s" % (
P.package_name,
self.name,
urllib.parse.quote(original_img)
)
2022-10-29 17:21:14 +09:00
entity["code"] = item.xpath(".//div[@class='img-item']/img/@alt")[0]
data["ret"] = "success"
data["anime_list"].append(entity)
return data
except Exception as e:
2023-01-26 15:33:59 +09:00
P.logger.error(f"Exception: {str(e)}")
2022-10-29 17:21:14 +09:00
P.logger.error(traceback.format_exc())
return {"ret": "error", "log": str(e)}
2022-10-29 17:21:14 +09:00
def process_api(self, sub: str, req: Any) -> Any:
try:
if sub == "image_proxy":
image_url = req.args.get("url")
if not image_url:
return Response("No URL provided", status=400)
# Fetch image with referer
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
"Referer": LogicOhli24.get_base_url() + "/",
}
# Use stream=True to handle binary data efficiently
try:
r = requests.get(image_url, headers=headers, stream=True, timeout=10)
excluded_headers = ['content-encoding', 'content-length', 'transfer-encoding', 'connection']
headers = [(name, value) for (name, value) in r.raw.headers.items()
if name.lower() not in excluded_headers]
return Response(r.content, r.status_code, headers)
except Exception as e:
return Response(f"Error fetching image: {e}", status=500)
except Exception as e:
logger.error(f"process_api error: {e}")
logger.error(traceback.format_exc())
2022-10-29 17:21:14 +09:00
# @staticmethod
def plugin_load(self) -> None:
2022-10-29 17:21:14 +09:00
try:
# 필수 패키지 확인 및 설치
LogicOhli24.ensure_essential_dependencies()
2022-10-29 17:21:14 +09:00
# SupportFfmpeg.initialize(ffmpeg_modelsetting.get('ffmpeg_path'), os.path.join(F.config['path_data'], 'tmp'),
# self.callback_function, ffmpeg_modelsetting.get_int('max_pf_count'))
2022-11-12 23:47:21 +09:00
# plugin loading download_queue 가 없으면 생성
2022-11-30 23:03:41 +09:00
# if self.download_queue is None:
# self.download_queue = queue.Queue()
2022-11-12 23:47:21 +09:00
2023-01-26 15:33:59 +09:00
SupportFfmpeg.initialize(
"ffmpeg",
os.path.join(F.config["path_data"], "tmp"),
self.callback_function,
P.ModelSetting.get(f"{name}_max_ffmpeg_process_count"),
)
2022-10-29 17:21:14 +09:00
# FfmpegQueue 초기화 (GDM 없을 경우 대비한 Fallback)
self.queue = None
if ModuleQueue is None:
logger.info("GDM not found. Initializing legacy FfmpegQueue fallback.")
self.queue = FfmpegQueue(
P,
P.ModelSetting.get_int(f"{name}_max_ffmpeg_process_count"),
name,
self,
)
self.queue.queue_start()
else:
logger.info("GDM found. FfmpegQueue fallback disabled.")
# 잔여 Temp 폴더 정리
self.cleanup_stale_temps()
# Zendriver 데몬 시작 (백그라운드)
try:
from threading import Thread
Thread(target=LogicOhli24.start_zendriver_daemon, daemon=True).start()
except Exception as daemon_err:
logger.debug(f"[ZendriverDaemon] Auto-start skipped: {daemon_err}")
2022-10-29 17:21:14 +09:00
except Exception as e:
logger.error("Exception:%s", e)
logger.error(traceback.format_exc())
# @staticmethod
def plugin_unload(self) -> None:
2022-10-29 17:21:14 +09:00
try:
logger.debug("%s plugin_unload", P.package_name)
scheduler.remove_job("%s_recent" % P.package_name)
except Exception as e:
logger.error("Exception:%s", e)
logger.error(traceback.format_exc())
@staticmethod
def reset_db() -> bool:
db.session.query(ModelOhli24Item).delete()
db.session.commit()
return True
@staticmethod
def get_html(
url: str,
headers: Optional[Dict[str, str]] = None,
referer: Optional[str] = None,
stream: bool = False,
timeout: int = 60,
stealth: bool = False,
data: Optional[Dict[str, Any]] = None,
method: str = 'GET'
) -> str:
"""별도 스레드에서 curl_cffi 실행하여 gevent SSL 충돌 및 Cloudflare 우회"""
2025-12-27 23:27:46 +09:00
from concurrent.futures import ThreadPoolExecutor, TimeoutError as FuturesTimeoutError
import time
from urllib import parse
total_start = time.time()
2025-12-27 23:27:46 +09:00
# URL 인코딩 (한글 주소 대응)
if '://' in url:
try:
scheme, netloc, path, params, query, fragment = parse.urlparse(url)
# 이미 인코딩된 경우를 대비해 unquote 후 다시 quote
path = parse.quote(parse.unquote(path), safe='/')
query = parse.quote(parse.unquote(query), safe='=&%')
url = parse.urlunparse((scheme, netloc, path, params, query, fragment))
except:
pass
2023-10-05 21:41:56 +09:00
def fetch_url_with_cffi(url, headers, timeout, data, method):
"""별도 스레드에서 curl_cffi로 실행 (Chrome 124 impersonation + Enhanced Headers)"""
from curl_cffi import requests
# 프록시 설정
proxies = LogicOhli24.get_proxies()
# Chrome 124 impersonation (최신 안정 버전)
with requests.Session(impersonate="chrome124") as session:
# 헤더 설정
if headers:
session.headers.update(headers)
# 추가 보안 헤더 (Cloudflare 우회용)
enhanced_headers = {
"sec-ch-ua": '"Not_A Brand";v="8", "Chromium";v="124", "Google Chrome";v="124"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"macOS"',
"sec-fetch-dest": "document",
"sec-fetch-mode": "navigate",
"sec-fetch-site": "none",
"sec-fetch-user": "?1",
"upgrade-insecure-requests": "1",
"dnt": "1",
"cache-control": "max-age=0",
}
session.headers.update(enhanced_headers)
if method.upper() == 'POST':
response = session.post(url, data=data, timeout=timeout, proxies=proxies)
else:
response = session.get(url, timeout=timeout, proxies=proxies)
return response.text
2025-12-27 23:27:46 +09:00
response_data = ""
if headers is None:
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"Accept-Language": "ko-KR,ko;q=0.9,en-US;q=0.8,en;q=0.7",
"Accept-Encoding": "gzip, deflate, br, zstd",
2025-12-27 23:27:46 +09:00
}
if referer:
if '://' in referer:
try:
scheme, netloc, path, params, query, fragment = parse.urlparse(referer)
path = parse.quote(parse.unquote(path), safe='/')
query = parse.quote(parse.unquote(query), safe='=&%')
referer = parse.urlunparse((scheme, netloc, path, params, query, fragment))
except:
pass
headers["Referer"] = referer
elif "Referer" not in headers and "referer" not in headers:
headers["Referer"] = "https://ani.ohli24.com"
# === [Layer 3A: Zendriver Daemon (Primary - Persistent Browser)] ===
# 리눅스/도커 차단 환경 대응: 가장 확실하고 빠른 젠드라이버 데몬을 최우선으로 시도
if not response_data or len(response_data) < 10:
if LogicOhli24.is_zendriver_daemon_running():
logger.debug(f"[Layer3A] Trying Zendriver Daemon: {url}")
daemon_result = LogicOhli24.fetch_via_daemon(url, 30)
if daemon_result.get("success") and daemon_result.get("html"):
elapsed = time.time() - total_start
logger.info(f"[Layer3A] Success in {elapsed:.2f}s (HTML: {len(daemon_result['html'])})")
LogicOhli24.daemon_fail_count = 0
return daemon_result["html"]
else:
logger.warning(f"[Layer3A] Daemon failed: {daemon_result.get('error', 'Unknown')}")
LogicOhli24.daemon_fail_count += 1
# === [Layer 1: curl-cffi (Fallback 1)] ===
if not response_data or len(response_data) < 10:
try:
from concurrent.futures import ThreadPoolExecutor, TimeoutError as FuturesTimeoutError
logger.debug(f"[Layer1] Trying curl_cffi: {url}")
with ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(fetch_url_with_cffi, url, headers, 15, data, method)
response_data = future.result(timeout=20)
if response_data and len(response_data) > 500:
logger.info(f"[Layer1] curl_cffi success, HTML len: {len(response_data)}")
return response_data
else:
response_data = ""
except Exception as e:
logger.warning(f"[Layer1] curl_cffi failed: {e}")
response_data = ""
# === [Layer 2: Botasaurus @request (Mac Subprocess / Stealth)] ===
if not response_data or len(response_data) < 10:
# 리스트/검색 페이지에서 Botasaurus 활용 (Zendriver보다 빠름)
is_list_page = any(x in url for x in ["bo_table=", "/anime/", "search"])
if is_list_page and LogicOhli24.ensure_essential_dependencies():
import platform
is_mac = platform.system() == "Darwin"
try:
if is_mac:
# Mac에서는 gevent-Trio 충돌로 인해 서브프로세스로 실행
logger.debug(f"[Layer2] Trying Botasaurus subprocess (Mac): {url}")
import subprocess
script_path = os.path.join(os.path.dirname(__file__), "lib", "botasaurus_ohli24.py")
cmd = [sys.executable, script_path, url, json.dumps(headers), LogicOhli24.get_proxy() or ""]
result = subprocess.run(
cmd,
capture_output=True,
text=True,
timeout=timeout + 15
)
if result.returncode == 0 and result.stdout.strip():
try:
b_result = json.loads(result.stdout.strip())
if b_result.get("success") and b_result.get("html"):
logger.info(f"[Layer2] Botasaurus(sub) success, HTML len: {len(b_result['html'])} (Attempt: {b_result.get('attempt', 1)})")
return b_result["html"]
else:
logger.warning(f"[Layer2] Botasaurus(sub) logic failed: {b_result.get('error')}")
if b_result.get("traceback"):
logger.debug(f"Botasaurus Traceback: {b_result.get('traceback')}")
except json.JSONDecodeError:
logger.error(f"[Layer2] Botasaurus JSON Decode Error. Output: {result.stdout[:200]}")
logger.debug(f"Botasaurus Stderr: {result.stderr}")
else:
logger.warning(f"[Layer2] Botasaurus subprocess error (RC: {result.returncode}): {result.stderr}")
else:
# Linux 등에서는 직접 실행 시도
logger.debug(f"[Layer2] Trying Botasaurus @request (Direct): {url}")
from botasaurus.request import request as b_request
@b_request(headers=headers, use_stealth=True, proxy=LogicOhli24.get_proxy())
def fetch_url(request, data):
return request.get(data)
b_resp = fetch_url(url)
if b_resp and len(b_resp) > 500:
logger.info(f"[Layer2] Botasaurus success, HTML len: {len(b_resp)}")
return b_resp
else:
logger.warning(f"[Layer2] Botasaurus short response: {len(b_resp) if b_resp else 0}")
except Exception as e:
logger.warning(f"[Layer2] Botasaurus failed: {e}")
response_data = ""
# max_retries = 3
# for attempt in range(max_retries):
# try:
# logger.debug(f"get_html (curl_cffi in thread) {method} attempt {attempt + 1}: {url}")
#
# # ThreadPoolExecutor로 별도 스레드에서 실행
# with ThreadPoolExecutor(max_workers=1) as executor:
# future = executor.submit(fetch_url_with_cffi, url, headers, timeout, data, method)
# response_data = future.result(timeout=timeout + 10)
#
# if response_data and (len(response_data) > 10 or method.upper() == 'POST'):
# logger.debug(f"get_html success, length: {len(response_data)}")
# return response_data
# else:
# logger.warning(f"Short response (len={len(response_data) if response_data else 0})")
#
# except FuturesTimeoutError:
# logger.warning(f"get_html attempt {attempt + 1} timed out")
# except Exception as e:
# logger.warning(f"get_html attempt {attempt + 1} failed: {e}")
#
# if attempt < max_retries - 1:
# time.sleep(3)
# # --- Layer 2: Cloudscraper Fallback (가벼운 JS 챌린지 해결) ---
# if not response_data or len(response_data) < 10:
# logger.info(f"[Layer2] curl_cffi failed, trying cloudscraper: {url}")
# try:
# import cloudscraper
# scraper = cloudscraper.create_scraper(
# browser={"browser": "chrome", "platform": "darwin", "mobile": False}
# )
#
# if method.upper() == 'POST':
# cs_response = scraper.post(url, data=data, headers=headers, timeout=timeout)
# else:
# cs_response = scraper.get(url, headers=headers, timeout=timeout)
#
# if cs_response and cs_response.text and len(cs_response.text) > 10:
# logger.info(f"[Layer2] Cloudscraper success, HTML len: {len(cs_response.text)}")
# return cs_response.text
# else:
# logger.warning(f"[Layer2] Cloudscraper short response: {len(cs_response.text) if cs_response else 0}")
#
# except Exception as e:
# logger.warning(f"[Layer2] Cloudscraper failed: {e}")
# (Layer 3A was moved to the top)
# --- Layer 3B: Zendriver Subprocess Fallback (데몬 실패 시) ---
if not response_data or len(response_data) < 10:
logger.info(f"[Layer3B] Trying Zendriver subprocess: {url}")
# Zendriver 자동 설치 확인
if not LogicOhli24.ensure_zendriver_installed():
logger.warning("[Layer3B] Zendriver installation failed, skipping to Layer 4")
else:
try:
import subprocess
import contextlib
script_path = os.path.join(os.path.dirname(__file__), "lib", "zendriver_ohli24.py")
browser_path = P.ModelSetting.get("ohli24_zendriver_browser_path")
cmd = [sys.executable, script_path, url, str(timeout)]
if browser_path:
cmd.append(browser_path)
# gevent fork 경고 억제 (부모 프로세스 stderr 임시 리다이렉트)
with open(os.devnull, 'w') as devnull:
old_stderr = sys.stderr
sys.stderr = devnull
try:
result = subprocess.run(
cmd,
capture_output=False,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
text=True,
timeout=timeout + 30
)
finally:
sys.stderr = old_stderr
if result.returncode == 0 and result.stdout.strip():
zd_result = json.loads(result.stdout.strip())
if zd_result.get("success") and zd_result.get("html"):
elapsed = time.time() - total_start
logger.info(f"[Ohli24] Fetch success via Layer3B: {url} in {elapsed:.2f}s (HTML: {len(zd_result['html'])})")
return zd_result["html"]
else:
logger.warning(f"[Layer3B] Zendriver failed: {zd_result.get('error', 'Unknown error')}")
else:
logger.warning(f"[Layer3B] Zendriver subprocess failed")
except subprocess.TimeoutExpired:
logger.warning(f"[Layer3B] Zendriver timed out after {timeout + 30}s")
except Exception as e:
logger.warning(f"[Layer3B] Zendriver exception: {e}")
2025-12-27 23:27:46 +09:00
# --- Layer 4: Camoufox Fallback (최후의 수단 - 풀 Firefox 브라우저) ---
if not response_data or len(response_data) < 10:
logger.info(f"[Layer4] Zendriver failed, trying Camoufox: {url}")
2025-12-27 23:27:46 +09:00
try:
import subprocess
script_path = os.path.join(os.path.dirname(__file__), "lib", "camoufox_ohli24.py")
2025-12-27 23:27:46 +09:00
# gevent fork 경고 억제 (부모 프로세스 stderr 임시 리다이렉트)
with open(os.devnull, 'w') as devnull:
old_stderr = sys.stderr
sys.stderr = devnull
try:
result = subprocess.run(
[sys.executable, script_path, url, str(timeout)],
capture_output=False,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
text=True,
timeout=timeout + 30
)
finally:
sys.stderr = old_stderr
2025-12-27 23:27:46 +09:00
if result.returncode == 0 and result.stdout.strip():
cf_result = json.loads(result.stdout.strip())
if cf_result.get("success") and cf_result.get("html"):
logger.info(f"[Layer4] Camoufox success in {cf_result.get('elapsed', '?')}s, HTML len: {len(cf_result['html'])}")
return cf_result["html"]
else:
logger.warning(f"[Layer4] Camoufox failed: {cf_result.get('error', 'Unknown error')}")
2025-12-27 23:27:46 +09:00
else:
logger.warning(f"[Layer4] Camoufox subprocess failed")
except subprocess.TimeoutExpired:
logger.warning(f"[Layer4] Camoufox timed out after {timeout + 30}s")
2025-12-27 23:27:46 +09:00
except Exception as e:
logger.warning(f"[Layer4] Camoufox exception: {e}")
2025-12-27 23:27:46 +09:00
2023-10-05 22:01:35 +09:00
return response_data
2022-10-29 17:21:14 +09:00
@staticmethod
def get_html_cached(url: str, **kwargs) -> str:
"""캐시된 버전의 get_html - 브라우징 페이지용 (request, search 등)
캐시 시간은 ohli24_cache_minutes 설정에 따름 (0=캐시 없음)
다운로드 루틴은 함수를 사용하지 않음 (세션/헤더 필요)
"""
import hashlib
cache_minutes = int(P.ModelSetting.get("ohli24_cache_minutes") or 0)
# 캐시 비활성화 시 바로 fetch
if cache_minutes <= 0:
logger.debug(f"[Cache SKIP] Cache disabled (minutes: {cache_minutes})")
return LogicOhli24.get_html(url, **kwargs)
# 캐시 디렉토리 생성
cache_dir = os.path.join(path_data, P.package_name, "cache")
os.makedirs(cache_dir, exist_ok=True)
# URL 해시로 캐시 파일명 생성
url_hash = hashlib.md5(url.encode('utf-8')).hexdigest()
cache_file = os.path.join(cache_dir, f"{url_hash}.html")
# 캐시 유효성 확인
if os.path.exists(cache_file):
cache_age = time.time() - os.path.getmtime(cache_file)
if cache_age < cache_minutes * 60:
try:
with open(cache_file, 'r', encoding='utf-8') as f:
cached_html = f.read()
if cached_html and len(cached_html) > 100:
logger.debug(f"[Cache HIT] {url[:60]}... (age: {cache_age:.0f}s)")
return cached_html
else:
logger.debug(f"[Cache MISS] Cached content is empty or too short for {url[:60]}...")
except Exception as e:
logger.warning(f"[Cache READ ERROR] {e}")
else:
logger.debug(f"[Cache EXPIRED] {url[:60]}... (age: {cache_age:.0f}s, expiry: {cache_minutes * 60}s)")
else:
logger.debug(f"[Cache MISS] No cache file found for {url[:60]}")
# 신규 fetch
html = LogicOhli24.get_html(url, **kwargs)
# 캐시에 저장 (유효한 HTML만)
if html and len(html) > 100:
try:
with open(cache_file, 'w', encoding='utf-8') as f:
f.write(html)
logger.debug(f"[Cache SAVE] {url[:60]}...")
except Exception as e:
logger.warning(f"[Cache WRITE ERROR] {e}")
return html
2022-10-29 17:21:14 +09:00
#########################################################
def add(self, episode_info: Dict[str, Any]) -> str:
"""Add episode to download queue with early skip checks."""
# 1. Check if already in queue
2022-10-29 17:21:14 +09:00
if self.is_exist(episode_info):
return "queue_exist"
# 2. Check DB for completion status FIRST (before expensive operations)
db_entity = ModelOhli24Item.get_by_ohli24_id(episode_info["_id"])
logger.debug(f"db_entity:::> {db_entity}")
if db_entity is not None and db_entity.status == "completed":
logger.info(f"[Skip] Already completed in DB: {episode_info.get('title')}")
return "db_completed"
# 3. Early file existence check - predict filepath before expensive extraction
predicted_filepath = self._predict_filepath(episode_info)
if predicted_filepath and os.path.exists(predicted_filepath):
logger.info(f"[Skip] File already exists: {predicted_filepath}")
# Update DB status to completed if not already
if db_entity is not None and db_entity.status != "completed":
db_entity.status = "completed"
db_entity.filepath = predicted_filepath
db_entity.save()
return "file_exists"
# 4. Proceed with queue addition via GDM
logger.debug(f"episode_info:: {episode_info}")
# GDM 모듈 사용 시나리오
if ModuleQueue:
logger.info(f"Preparing GDM delegation for: {episode_info.get('title')}")
# Entity 인스턴스를 생성하여 메타데이터 파싱 및 URL 추출 수행
entity = Ohli24QueueEntity(P, self, episode_info)
# URL/자막/쿠키 추출 수행 (동기식 - 상위에서 비동기로 호출 권장되나 현재 ajax_process는 동기)
try:
logger.debug(f"Calling entity.prepare_extra() for {episode_info.get('_id')}")
entity.prepare_extra()
logger.debug(f"entity.prepare_extra() done. URL found: {entity.url is not None}")
if not entity.url:
logger.error(f"Failed to extract video URL for {episode_info.get('_id')}")
return "extract_failed"
except Exception as e:
logger.error(f"Failed to extract video info: {e}")
# 추출 실패 시 기존 방식(전체 큐)으로 넘기거나 에러 반환
return "extract_failed"
# 추출된 정보를 바탕으로 GDM 옵션 준비 (표준화된 필드명 사용)
download_method = P.ModelSetting.get("ohli24_download_method")
download_threads = P.ModelSetting.get_int("ohli24_download_threads")
# GDM 소스 타입 결정 (멀티쓰레드/aria2c 사용 여부에 따라)
# GDM의 'general'은 yt-dlp + aria2c를 사용함
gdm_source_type = "ohli24"
if download_method in ['ytdlp', 'aria2c']:
gdm_source_type = "general"
gdm_options = {
"url": entity.url, # 추출된 m3u8 URL
"save_path": entity.savepath,
"filename": entity.filename,
"source_type": gdm_source_type,
"caller_plugin": f"{P.package_name}_{self.name}",
"callback_id": episode_info["_id"],
"title": entity.filename or episode_info.get('title'),
"thumbnail": episode_info.get('thumbnail') or episode_info.get('image'),
"meta": {
"series": entity.content_title,
"season": entity.season,
"episode": entity.epi_queue,
"source": "ohli24"
},
"connections": download_threads, # 멀티쓰레드 개수 전달
# options 내부가 아닌 상위 레벨로 headers/cookies 전달 (GDM 평탄화 대응)
"headers": entity.headers,
"subtitles": entity.srt_url or entity.vtt,
"cookies_file": entity.cookies_file
}
try:
logger.debug(f"Calling ModuleQueue.add_download with options: {list(gdm_options.keys())}")
task = ModuleQueue.add_download(**gdm_options)
if task:
logger.info(f"Delegated Ohli24 download to GDM: {entity.filename} (Task ID: {task.id})")
else:
logger.error("ModuleQueue.add_download returned None")
except Exception as e:
logger.error(f"Error calling ModuleQueue.add_download: {e}")
logger.error(traceback.format_exc())
task = None
if task:
# DB 상태 업데이트 (prepare_extra에서도 이미 수행하지만 명시적 상태 변경)
if db_entity is None:
# append는 이미 prepare_extra 상단에서 db_entity를 조회하므로
# 이미 DB에 entry가 생겼을 가능성 높음 (만약 없다면 여기서 추가)
db_entity = ModelOhli24Item.get_by_ohli24_id(episode_info["_id"])
if not db_entity:
ModelOhli24Item.append(entity.as_dict())
return "enqueue_gdm_success"
# GDM 미설치 시 기존 방식 fallback (또는 에러 처리)
logger.warning("GDM Module not found, falling back to FfmpegQueue")
if db_entity is None:
entity = Ohli24QueueEntity(P, self, episode_info)
entity.proxy = LogicOhli24.get_proxy()
ModelOhli24Item.append(entity.as_dict())
self.queue.add_queue(entity)
return "enqueue_db_append"
2022-10-29 17:21:14 +09:00
else:
entity = Ohli24QueueEntity(P, self, episode_info)
entity.proxy = LogicOhli24.get_proxy()
self.queue.add_queue(entity)
return "enqueue_db_exist"
def _get_savepath(self, episode_info: Dict[str, Any]) -> str:
"""다운로드 경로 계산 (내부 로직 재사용)"""
savepath = P.ModelSetting.get("ohli24_download_path")
title = episode_info.get("title", "")
match = re.search(r"(?P<title>.*?)\s*((?P<season>\d+)기)?\s*((?P<epi_no>\d+)화)", title)
if P.ModelSetting.get_bool("ohli24_auto_make_folder"):
day = episode_info.get("day", "")
content_title_clean = match.group("title").strip() if match else title
if "완결" in day:
folder_name = "%s %s" % (P.ModelSetting.get("ohli24_finished_insert"), content_title_clean)
else:
folder_name = content_title_clean
folder_name = Util.change_text_for_use_filename(folder_name.strip())
savepath = os.path.join(savepath, folder_name)
if P.ModelSetting.get_bool("ohli24_auto_make_season_folder"):
season_val = int(match.group("season")) if match and match.group("season") else 1
savepath = os.path.join(savepath, "Season %s" % season_val)
return savepath
def plugin_callback(self, data: Dict[str, Any]):
"""GDM 등 외부에서 작업 완료 알림을 받을 때 실행"""
try:
callback_id = data.get('callback_id')
status = data.get('status')
filepath = data.get('filepath')
logger.info(f"Plugin callback received: id={callback_id}, status={status}")
if status == "completed" and callback_id:
# DB 업데이트하여 '보기' 버튼 활성화
db_entity = ModelOhli24Item.get_by_ohli24_id(callback_id)
if db_entity:
db_entity.status = "completed"
db_entity.filepath = filepath
db_entity.filename = os.path.basename(filepath)
db_entity.completed_time = datetime.now()
db_entity.save()
logger.info(f"Ohli24 DB updated for completed task: {db_entity.title}")
# UI 갱신을 위한 소켓 이벤트를 보내고 싶다면 여기서 처리 가능
# self.socketio_callback('list_refresh', "")
except Exception as e:
logger.error(f"Error in plugin_callback: {e}")
logger.error(traceback.format_exc())
def _predict_filepath(self, episode_info: Dict[str, Any]) -> Optional[str]:
"""Predict the output filepath from episode info WITHOUT expensive site access.
Uses glob pattern to match any quality variant (720p, 1080p, etc.)."""
try:
import glob
title = episode_info.get("title", "")
if not title:
return None
# Parse title pattern: "제목 N기 M화" or "제목 M화"
match = re.compile(
r"(?P<title>.*?)\s*((?P<season>\d+)기)?\s*((?P<epi_no>\d+)화)"
).search(title)
if match:
content_title = match.group("title").strip()
season = int(match.group("season")) if match.group("season") else 1
epi_no = int(match.group("epi_no"))
# Use glob pattern for quality: *-OHNI24.mp4 matches any quality
# Sanitize ONLY the fixed text parts to avoid breaking glob wildcards (*)
if match:
content_title_clean = Util.change_text_for_use_filename(content_title)
filename_pattern = "%s.S%sE%s.*-OHNI24.mp4" % (
content_title_clean,
"0%s" % season if season < 10 else season,
"0%s" % epi_no if epi_no < 10 else epi_no,
)
2022-10-29 17:21:14 +09:00
else:
title_clean = Util.change_text_for_use_filename(title)
filename_pattern = "%s.*-OHNI24.mp4" % title_clean
# Get save path
savepath = P.ModelSetting.get("ohli24_download_path")
if not savepath:
return None
# Check auto folder option
if P.ModelSetting.get_bool("ohli24_auto_make_folder"):
day = episode_info.get("day", "")
content_title_clean = match.group("title").strip() if match else title
if "완결" in day:
folder_name = "%s %s" % (
P.ModelSetting.get("ohli24_finished_insert"),
content_title_clean,
)
else:
folder_name = content_title_clean
folder_name = Util.change_text_for_use_filename(folder_name.strip())
savepath = os.path.join(savepath, folder_name)
if P.ModelSetting.get_bool("ohli24_auto_make_season_folder"):
season_val = int(match.group("season")) if match and match.group("season") else 1
savepath = os.path.join(savepath, "Season %s" % season_val)
# Use case-insensitive matching to find any existing file
# (prevents duplicate downloads for 1080P vs 1080p)
if os.path.isdir(savepath):
import fnmatch
pattern_basename = os.path.basename(filename_pattern)
for fname in os.listdir(savepath):
# Case-insensitive fnmatch
if fnmatch.fnmatch(fname.lower(), pattern_basename.lower()):
matched_path = os.path.join(savepath, fname)
# 0바이트 파일은 존재하지 않는 것으로 간주하고 삭제 시도
if os.path.exists(matched_path) and os.path.getsize(matched_path) == 0:
logger.info(f"Found 0-byte file, deleting and ignoring: {matched_path}")
try: os.remove(matched_path)
except: pass
continue
logger.debug(f"Found existing file (case-insensitive): {matched_path}")
return matched_path
return None
except Exception as e:
logger.debug(f"_predict_filepath error: {e}")
return None
2022-10-29 17:21:14 +09:00
def is_exist(self, info: Dict[str, Any]) -> bool:
# GDM 체크
if ModuleQueue:
for d in ModuleQueue._downloads.values():
status = d.get_status()
if status.get('callback_id') == info["_id"]:
return True
# Legacy Queue 체크
if self.queue:
for en in self.queue.entity_list:
if en.info["_id"] == info["_id"]:
return True
return False
2022-10-29 17:21:14 +09:00
def callback_function(self, **args: Any) -> None:
if not self.queue and ModuleQueue:
# GDM 사용 중이면 SupportFfmpeg 직접 콜백은 무시하거나 로그만 남김
# (GDM은 자체적으로 완료 처리를 수행하고 plugin_callback을 호출함)
return
logger.debug(f"callback_function invoked with args: {args}")
if 'status' in args:
logger.debug(f"Status: {args['status']}")
2022-10-29 17:21:14 +09:00
refresh_type = None
2023-01-26 15:33:59 +09:00
if args["type"] == "status_change":
if args["status"] == SupportFfmpeg.Status.DOWNLOADING:
refresh_type = "status_change"
elif args["status"] == SupportFfmpeg.Status.COMPLETED:
refresh_type = "status_change"
logger.debug("mod_ohli24.py:: download completed........")
elif args["status"] == SupportFfmpeg.Status.READY:
data = {
"type": "info",
"msg": "다운로드중 Duration(%s)" % args["data"]["duration_str"]
+ "<br>"
+ args["data"]["save_fullpath"],
"url": "/ffmpeg/download/list",
}
# socketio.emit("notify", data, namespace='/framework')
2023-01-26 15:33:59 +09:00
refresh_type = "add"
elif args["type"] == "last":
entity = self.queue.get_entity_by_entity_id(args['data']['callback_id'])
2023-01-26 15:33:59 +09:00
if args["status"] == SupportFfmpeg.Status.WRONG_URL:
if entity: entity.download_failed("WRONG_URL")
2023-01-26 15:33:59 +09:00
data = {"type": "warning", "msg": "잘못된 URL입니다"}
socketio.emit("notify", data, namespace="/framework")
2023-01-26 15:33:59 +09:00
refresh_type = "add"
elif args["status"] == SupportFfmpeg.Status.WRONG_DIRECTORY:
if entity: entity.download_failed("WRONG_DIRECTORY")
2023-01-26 15:33:59 +09:00
data = {
"type": "warning",
"msg": "잘못된 디렉토리입니다.<br>" + args["data"]["save_fullpath"],
}
socketio.emit("notify", data, namespace="/framework")
2023-01-26 15:33:59 +09:00
refresh_type = "add"
2025-12-25 19:50:27 +09:00
elif args["status"] == SupportFfmpeg.Status.ERROR or args["status"] == SupportFfmpeg.Status.EXCEPTION:
if entity: entity.download_failed("ERROR/EXCEPTION")
2023-01-26 15:33:59 +09:00
data = {
"type": "warning",
"msg": "다운로드 시작 실패.<br>" + args["data"]["save_fullpath"],
}
socketio.emit("notify", data, namespace="/framework")
2023-01-26 15:33:59 +09:00
refresh_type = "add"
elif args["status"] == SupportFfmpeg.Status.USER_STOP:
if entity: entity.download_failed("USER_STOP")
2023-01-26 15:33:59 +09:00
data = {
"type": "warning",
"msg": "다운로드가 중지 되었습니다.<br>" + args["data"]["save_fullpath"],
"url": "/ffmpeg/download/list",
}
socketio.emit("notify", data, namespace="/framework")
2023-01-26 15:33:59 +09:00
refresh_type = "last"
elif args["status"] == SupportFfmpeg.Status.COMPLETED:
logger.debug("download completed........")
data = {
"type": "success",
"msg": "다운로드가 완료 되었습니다.<br>" + args["data"]["save_fullpath"],
"url": "/ffmpeg/download/list",
}
socketio.emit("notify", data, namespace="/framework")
2023-01-26 15:33:59 +09:00
refresh_type = "last"
elif args["status"] == SupportFfmpeg.Status.TIME_OVER:
if entity: entity.download_failed("TIME_OVER")
2023-01-26 15:33:59 +09:00
data = {
"type": "warning",
"msg": "시간초과로 중단 되었습니다.<br>" + args["data"]["save_fullpath"],
"url": "/ffmpeg/download/list",
}
socketio.emit("notify", data, namespace="/framework")
2023-01-26 15:33:59 +09:00
refresh_type = "last"
elif args["status"] == SupportFfmpeg.Status.PF_STOP:
if entity: entity.download_failed("PF_STOP")
2023-01-26 15:33:59 +09:00
data = {
"type": "warning",
"msg": "PF초과로 중단 되었습니다.<br>" + args["data"]["save_fullpath"],
"url": "/ffmpeg/download/list",
}
socketio.emit("notify", data, namespace="/framework")
2023-01-26 15:33:59 +09:00
refresh_type = "last"
elif args["status"] == SupportFfmpeg.Status.FORCE_STOP:
if entity: entity.download_failed("FORCE_STOP")
2023-01-26 15:33:59 +09:00
data = {
"type": "warning",
"msg": "강제 중단 되었습니다.<br>" + args["data"]["save_fullpath"],
"url": "/ffmpeg/download/list",
}
socketio.emit("notify", data, namespace="/framework")
2023-01-26 15:33:59 +09:00
refresh_type = "last"
elif args["status"] == SupportFfmpeg.Status.HTTP_FORBIDDEN:
if entity: entity.download_failed("HTTP_FORBIDDEN")
2023-01-26 15:33:59 +09:00
data = {
"type": "warning",
"msg": "403에러로 중단 되었습니다.<br>" + args["data"]["save_fullpath"],
"url": "/ffmpeg/download/list",
}
socketio.emit("notify", data, namespace="/framework")
2023-01-26 15:33:59 +09:00
refresh_type = "last"
elif args["status"] == SupportFfmpeg.Status.ALREADY_DOWNLOADING:
# Already downloading usually means logic error or race condition, maybe not fail DB?
# Keeping as is for now unless requested.
2023-01-26 15:33:59 +09:00
data = {
"type": "warning",
"msg": "임시파일폴더에 파일이 있습니다.<br>" + args["data"]["temp_fullpath"],
"url": "/ffmpeg/download/list",
}
socketio.emit("notify", data, namespace="/framework")
2023-01-26 15:33:59 +09:00
refresh_type = "last"
elif args["type"] == "normal":
if args["status"] == SupportFfmpeg.Status.DOWNLOADING:
refresh_type = "status"
2022-10-29 17:21:14 +09:00
# P.logger.info(refresh_type)
2023-01-26 15:33:59 +09:00
self.socketio_callback(refresh_type, args["data"])
2022-10-29 17:21:14 +09:00
def send_discord_notification(
self,
title: str,
desc: str,
filename: str,
image_url: str = ""
) -> None:
try:
webhook_url = P.ModelSetting.get("ohli24_discord_webhook_url")
if not webhook_url:
logger.debug("Discord webhook URL is empty.")
return
logger.info(f"Sending Discord notification to: {webhook_url}")
# 에피소드/시즌 정보 추출 (배지용)
import re
season_ep_str = ""
match = re.search(r"(?P<season>\d+)기\s*(?P<episode>\d+)화", title)
if not match:
match = re.search(r"(?P<season>\d+)기", title)
if not match:
match = re.search(r"(?P<episode>\d+)화", title)
if match:
parts = []
gd = match.groupdict()
if "season" in gd and gd["season"]:
parts.append(f"S{int(gd['season']):02d}")
if "episode" in gd and gd["episode"]:
parts.append(f"E{int(gd['episode']):02d}")
if parts:
season_ep_str = " | ".join(parts)
author_name = "Ohli24 Downloader"
if season_ep_str:
author_name = f"{season_ep_str} • Ohli24"
embed = {
"title": f"📺 {title}",
"description": desc,
"color": 0x5865F2, # Discord Blurple
"author": {
"name": author_name,
"icon_url": "https://i.imgur.com/4M34hi2.png"
},
"fields": [
{
"name": "📁 파일명",
"value": f"`{filename}`" if filename else "알 수 없음",
"inline": False
}
],
"footer": {
"text": "FlaskFarm Ohli24",
"icon_url": "https://i.imgur.com/4M34hi2.png"
},
"timestamp": datetime.now().isoformat()
}
if image_url:
# image는 큰 이미지 (하단 전체 너비)
embed["image"] = {"url": image_url}
# thumbnail은 작은 우측 상단 이미지 (선택적)
# embed["thumbnail"] = {"url": image_url}
message = {
"username": "Ohli24 Downloader",
"avatar_url": "https://i.imgur.com/4M34hi2.png",
"embeds": [embed]
}
import requests
headers = {"Content-Type": "application/json"}
response = requests.post(webhook_url, json=message, headers=headers)
if response.status_code == 204:
logger.info("Discord notification sent successfully.")
else:
logger.error(f"Failed to send Discord notification. Status Code: {response.status_code}, Response: {response.text}")
except Exception as e:
logger.error(f"Exception in send_discord_notification: {e}")
logger.error(traceback.format_exc())
class Ohli24QueueEntity(AnimeQueueEntity):
def __init__(self, P: Any, module_logic: LogicOhli24, info: Dict[str, Any]) -> None:
2022-10-29 17:21:14 +09:00
super(Ohli24QueueEntity, self).__init__(P, module_logic, info)
self._vi: Optional[Any] = None
self.url: Optional[str] = None
self.epi_queue: Optional[str] = None
self.filepath: Optional[str] = None
self.savepath: Optional[str] = None
self.quality: Optional[str] = "720P"
self.filename: Optional[str] = None
self.vtt: Optional[str] = None
self.season: int = 1
self.content_title: Optional[str] = None
self.srt_url: Optional[str] = None
self.headers: Optional[Dict[str, str]] = None
self.cookies_file: Optional[str] = None
self.need_special_downloader: bool = False
self._discord_sent: bool = False
# [Early Extraction] Parse basic metadata immediately so DB append has data
self.parse_metadata()
def parse_metadata(self) -> None:
"""Extract basic info from title for early DB population."""
try:
title_full = self.info.get("title", "")
if not title_full:
return
match = re.compile(r"(?P<title>.*?)\s*((?P<season>\d+)기)?\s*((?P<epi_no>\d+)화)").search(title_full)
if match:
self.content_title = match.group("title").strip()
if match.group("season"):
self.season = int(match.group("season"))
self.epi_queue = int(match.group("epi_no"))
else:
self.content_title = title_full
self.epi_queue = 1
# Predict initial filename/filepath for UI
epi_no = self.epi_queue
ret = "%s.S%sE%s.%s-OHNI24.mp4" % (
self.content_title,
"0%s" % self.season if self.season < 10 else self.season,
"0%s" % epi_no if epi_no < 10 else epi_no,
self.quality,
)
self.filename = Util.change_text_for_use_filename(ret)
# Savepath
self.savepath = P.ModelSetting.get("ohli24_download_path")
if P.ModelSetting.get_bool("ohli24_auto_make_folder"):
folder_name = self.content_title
if self.info.get("day", "").find("완결") != -1:
folder_name = "%s %s" % (P.ModelSetting.get("ohli24_finished_insert"), self.content_title)
folder_name = Util.change_text_for_use_filename(folder_name.strip())
self.savepath = os.path.join(self.savepath, folder_name)
if P.ModelSetting.get_bool("ohli24_auto_make_season_folder"):
self.savepath = os.path.join(self.savepath, "Season %s" % int(self.season))
self.filepath = os.path.join(self.savepath, self.filename)
except Exception as e:
logger.error(f"Error in parse_metadata: {e}")
2022-10-29 17:21:14 +09:00
def refresh_status(self) -> None:
super().refresh_status()
# ffmpeg_queue_v1.py에서 실패 처리(-1)된 경우 DB 업데이트 트리거
if getattr(self, 'ffmpeg_status', 0) == -1:
reason = getattr(self, 'ffmpeg_status_kor', 'Unknown Error')
self.download_failed(reason)
2022-10-29 17:21:14 +09:00
self.module_logic.socketio_callback("status", self.as_dict())
# 추가: /queue 네임스페이스로도 명시적으로 전송
try:
from framework import socketio
namespace = f"/{self.P.package_name}/{self.module_logic.name}/queue"
socketio.emit("status", self.as_dict(), namespace=namespace)
except:
pass
2022-10-29 17:21:14 +09:00
def info_dict(self, tmp: Dict[str, Any]) -> Dict[str, Any]:
2022-10-29 17:21:14 +09:00
# logger.debug('self.info::> %s', self.info)
for key, value in self.info.items():
tmp[key] = value
tmp["vtt"] = self.vtt
tmp["season"] = self.season
tmp["content_title"] = self.content_title
tmp["ohli24_info"] = self.info
tmp["epi_queue"] = self.epi_queue
return tmp
def download_completed(self) -> None:
super().download_completed()
2023-01-26 15:33:59 +09:00
logger.debug("download completed.......!!")
# Verify file actually exists before marking as completed
if not self.filepath or not os.path.exists(self.filepath):
logger.warning(f"[DB_COMPLETE] File does not exist after download_completed: {self.filepath}")
# Call download_failed instead
self.download_failed("File not found after download")
return
logger.debug(f"[DB_COMPLETE] Looking up entity by ohli24_id: {self.info.get('_id')}")
2022-10-29 17:21:14 +09:00
db_entity = ModelOhli24Item.get_by_ohli24_id(self.info["_id"])
logger.debug(f"[DB_COMPLETE] Found db_entity: {db_entity}")
2022-10-29 17:21:14 +09:00
if db_entity is not None:
db_entity.status = "completed"
db_entity.completed_time = datetime.now()
# Map missing fields from queue entity to DB record
db_entity.filepath = self.filepath
db_entity.filename = self.filename
db_entity.savepath = self.savepath
db_entity.quality = self.quality
db_entity.video_url = self.url
db_entity.vtt_url = self.vtt
result = db_entity.save()
logger.debug(f"[DB_COMPLETE] Save result: {result}")
# Discord Notification (On Complete)
try:
if P.ModelSetting.get_bool("ohli24_discord_notify"):
title = self.info.get('title', 'Unknown Title')
filename = self.filename
poster_url = self.info.get('thumbnail', '')
msg = "다운로드가 완료되었습니다."
self.module_logic.send_discord_notification(msg, title, filename, poster_url)
except Exception as e:
logger.error(f"Failed to send discord notification on complete: {e}")
else:
logger.warning(f"[DB_COMPLETE] No db_entity found for _id: {self.info.get('_id')}")
def download_failed(self, reason: str) -> None:
logger.debug(f"download failed.......!! reason: {reason}")
db_entity = ModelOhli24Item.get_by_ohli24_id(self.info["_id"])
if db_entity is not None:
db_entity.status = "failed"
2022-10-29 17:21:14 +09:00
db_entity.save()
# [Lazy Extraction] prepare_extra() replaces make_episode_info()
def prepare_extra(self):
2022-10-29 17:21:14 +09:00
try:
base_url = LogicOhli24.get_base_url()
2025-12-27 23:27:46 +09:00
# 에피소드 페이지 URL (예: https://ani.ohli24.com/e/원펀맨 3기 1화)
2022-10-29 17:21:14 +09:00
url = self.info["va"]
2025-12-27 23:27:46 +09:00
if "//e/" in url:
url = url.replace("//e/", "/e/")
# URL Sanitization for va
if base_url in url and f"{base_url}//" in url:
url = url.replace(f"{base_url}//", f"{base_url}/")
2022-10-29 17:21:14 +09:00
ourls = parse.urlparse(url)
2025-12-27 23:27:46 +09:00
2022-10-29 17:21:14 +09:00
headers = {
2023-01-26 15:33:59 +09:00
"Referer": f"{ourls.scheme}://{ourls.netloc}",
2025-12-27 23:27:46 +09:00
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
2022-10-29 17:21:14 +09:00
}
2025-12-27 23:27:46 +09:00
logger.debug(f"make_episode_info()::url==> {url}")
2022-10-29 17:21:14 +09:00
logger.info(f"self.info:::> {self.info}")
# ------------------------------------------------------------------
# [METADATA PARSING] - Extract title, season, epi info first!
# ------------------------------------------------------------------
# [IMMEDIATE SYNC] Update DB with extracted metadata
try:
db_entity = ModelOhli24Item.get_by_ohli24_id(self.info["_id"])
if db_entity:
logger.debug(f"[SYNC] Syncing metadata for Ohli24 _id: {self.info.get('_id')}")
db_entity.title = self.content_title
db_entity.season = self.season
db_entity.episode_no = self.epi_queue
db_entity.savepath = self.savepath
db_entity.filename = self.filename
db_entity.filepath = self.filepath
db_entity.save()
except Exception as sync_err:
logger.error(f"Failed to sync metadata to DB: {sync_err}")
if not os.path.exists(self.savepath):
os.makedirs(self.savepath)
logger.info(f"self.savepath::> {self.savepath}")
# ------------------------------------------------------------------
# [VIDEO EXTRACTION]
# ------------------------------------------------------------------
2025-12-27 23:27:46 +09:00
# Step 1: 에피소드 페이지에서 cdndania.com iframe 찾기
2025-12-25 19:50:27 +09:00
text = LogicOhli24.get_html(url, headers=headers, referer=f"{ourls.scheme}://{ourls.netloc}")
2025-12-27 23:27:46 +09:00
# 디버깅: HTML에 cdndania 있는지 확인
if "cdndania" in text:
logger.info("cdndania found in HTML")
else:
logger.warning("cdndania NOT found in HTML - page may be dynamically loaded")
# logger.debug(f"HTML snippet: {text[:1000]}")
2025-12-27 23:27:46 +09:00
soup = BeautifulSoup(text, "lxml")
# mcpalyer 클래스 내의 iframe 찾기
player_div = soup.find("div", class_="mcpalyer")
# logger.debug(f"player_div (mcpalyer): {player_div is not None}")
2025-12-27 23:27:46 +09:00
if not player_div:
player_div = soup.find("div", class_="embed-responsive")
# logger.debug(f"player_div (embed-responsive): {player_div is not None}")
2025-12-27 23:27:46 +09:00
iframe = None
if player_div:
iframe = player_div.find("iframe")
# logger.debug(f"iframe in player_div: {iframe is not None}")
2025-12-27 23:27:46 +09:00
if not iframe:
iframe = soup.find("iframe", src=re.compile(r"cdndania\.com"))
# logger.debug(f"iframe with cdndania src: {iframe is not None}")
2025-12-27 23:27:46 +09:00
if not iframe:
# 모든 iframe 찾기
all_iframes = soup.find_all("iframe")
# logger.debug(f"Total iframes found: {len(all_iframes)}")
2025-12-27 23:27:46 +09:00
if all_iframes:
iframe = all_iframes[0]
if not iframe or not iframe.get("src"):
logger.error("No iframe found on episode page")
return
iframe_src = iframe.get("src")
logger.info(f"Found cdndania iframe: {iframe_src}")
self.iframe_src = iframe_src
# CDN 보안 우회 다운로더 필요 여부 - 설정에 따름
# self.need_special_downloader = True # 설정값 존중 (ffmpeg/ytdlp/aria2c 테스트 가능)
self.need_special_downloader = False
2025-12-27 23:27:46 +09:00
# Step 2: cdndania.com 페이지에서 m3u8 URL 및 해상도 추출
video_url, vtt_url, cookies_file, detected_resolution = self.extract_video_from_cdndania(iframe_src, url)
# 해상도 설정 (감지된 값 또는 기본값 720)
if detected_resolution:
self.quality = f"{detected_resolution}P"
logger.info(f"Quality set from m3u8: {self.quality}")
# [FILENAME GENERATION] - 해상도 감지 후 파일명 생성
# [FILENAME GENERATION] - Re-generate filename after quality detection
if self.epi_queue:
epi_no = self.epi_queue
ret = "%s.S%sE%s.%s-OHNI24.mp4" % (
self.content_title,
"0%s" % self.season if self.season < 10 else self.season,
"0%s" % epi_no if epi_no < 10 else epi_no,
self.quality,
)
self.filename = Util.change_text_for_use_filename(ret)
self.filepath = os.path.join(self.savepath, self.filename)
# [NFD CHECK] Mac/Docker Compatibility
if not os.path.exists(self.filepath):
nfd_filename = unicodedata.normalize('NFD', self.filename)
nfd_filepath = os.path.join(self.savepath, nfd_filename)
if os.path.exists(nfd_filepath):
logger.info(f"[NFD Match] Found existing file with NFD normalization: {nfd_filename}")
self.filename = nfd_filename
self.filepath = nfd_filepath
# [IMMEDIATE SYNC 2] Update filename/filepath after resolution detection
try:
db_entity = ModelOhli24Item.get_by_ohli24_id(self.info["_id"])
if db_entity:
db_entity.quality = self.quality
db_entity.filename = self.filename
db_entity.filepath = self.filepath
db_entity.save()
except:
pass
logger.info(f"self.filename::> {self.filename}")
2025-12-27 23:27:46 +09:00
if not video_url:
logger.error("Failed to extract video URL from cdndania")
return
self.url = video_url
self.srt_url = vtt_url
self.cookies_file = cookies_file # yt-dlp용 세션 쿠키 파일
self.iframe_src = iframe_src # CdndaniaDownloader용 원본 iframe URL
2025-12-27 23:27:46 +09:00
logger.info(f"Video URL: {self.url}")
if self.srt_url:
logger.info(f"Subtitle URL: {self.srt_url}")
if self.cookies_file:
logger.info(f"Cookies file: {self.cookies_file}")
2025-12-27 23:27:46 +09:00
# 헤더 설정 (Video Download용)
2022-10-29 17:21:14 +09:00
self.headers = {
2025-12-27 23:27:46 +09:00
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
"Referer": iframe_src,
2022-10-29 17:21:14 +09:00
}
# ------------------------------------------------------------------
# [SUBTITLE DOWNLOAD]
# ------------------------------------------------------------------
2025-12-27 23:27:46 +09:00
if self.srt_url and "thumbnails.vtt" not in self.srt_url:
try:
srt_filepath = os.path.join(self.savepath, self.filename.replace(".mp4", ".ko.srt"))
if not os.path.exists(srt_filepath):
srt_resp = requests.get(self.srt_url, headers=self.headers, timeout=30)
if srt_resp.status_code == 200:
Util.write_file(srt_resp.text, srt_filepath)
logger.info(f"Subtitle saved: {srt_filepath}")
except Exception as srt_err:
logger.warning(f"Subtitle download failed: {srt_err}")
# ------------------------------------------------------------------
# [IMMEDIATE SYNC] - Update DB with all extracted metadata
# ------------------------------------------------------------------
try:
db_entity = ModelOhli24Item.get_by_ohli24_id(self.info["_id"])
if db_entity:
logger.debug(f"[SYNC] Syncing metadata for _id: {self.info['_id']}")
db_entity.title = self.content_title
db_entity.season = self.season
db_entity.episode_no = self.epi_queue
db_entity.quality = self.quality
db_entity.savepath = self.savepath
db_entity.filename = self.filename
db_entity.filepath = self.filepath
db_entity.video_url = self.url
db_entity.vtt_url = self.srt_url or self.vtt
db_entity.save()
except Exception as sync_err:
logger.error(f"[SYNC] Failed to sync metadata in prepare_extra: {sync_err}")
2022-10-29 17:21:14 +09:00
except Exception as e:
P.logger.error("Exception:%s", e)
P.logger.error(traceback.format_exc())
2025-12-27 23:27:46 +09:00
def extract_video_from_cdndania(
self, iframe_src: str, referer_url: str
) -> Tuple[Optional[str], Optional[str], Optional[str], Optional[int]]:
"""cdndania.com 플레이어에서 API 호출을 통해 비디오(m3u8) 및 자막(vtt) URL 추출
Returns:
tuple: (video_url, vtt_url, cookies_file, resolution) - resolution은 720, 1080
"""
2025-12-27 23:27:46 +09:00
video_url = None
vtt_url = None
cookies_file = None
resolution = None # 해상도 (height: 720, 1080 등)
2025-12-27 23:27:46 +09:00
try:
from curl_cffi import requests
import tempfile
import json
2025-12-27 23:27:46 +09:00
logger.debug(f"Extracting from cdndania: {iframe_src}")
# iframe URL에서 비디오 ID(hash) 추출
video_id = ""
if "/video/" in iframe_src:
video_id = iframe_src.split("/video/")[1].split("?")[0].split("&")[0]
elif "/v/" in iframe_src:
video_id = iframe_src.split("/v/")[1].split("?")[0].split("&")[0]
if not video_id:
logger.error(f"Could not find video ID in iframe URL: {iframe_src}")
return video_url, vtt_url, cookies_file
# curl_cffi 세션 생성 (Chrome 120 TLS Fingerprint)
scraper = requests.Session(impersonate="chrome120")
proxies = LogicOhli24.get_proxies()
if proxies:
scraper.proxies = {"http": proxies["http"], "https": proxies["https"]}
2025-12-27 23:27:46 +09:00
# iframe 도메인 자동 감지 (cdndania.com -> michealcdn.com 등)
parsed_iframe = parse.urlparse(iframe_src)
iframe_domain = f"{parsed_iframe.scheme}://{parsed_iframe.netloc}"
# [CRITICAL] iframe 페이지 먼저 방문하여 세션 쿠키 획득
encoded_referer = parse.quote(referer_url, safe=":/?#[]@!$&'()*+,;=%")
iframe_headers = {
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"referer": encoded_referer,
}
logger.debug(f"Visiting iframe page for cookies: {iframe_src}")
scraper.get(iframe_src, headers=iframe_headers, timeout=30, proxies=proxies)
# getVideo API 호출
api_url = f"{iframe_domain}/player/index.php?data={video_id}&do=getVideo"
2025-12-27 23:27:46 +09:00
headers = {
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
2025-12-27 23:27:46 +09:00
"x-requested-with": "XMLHttpRequest",
"content-type": "application/x-www-form-urlencoded; charset=UTF-8",
"referer": iframe_src,
"origin": iframe_domain
2025-12-27 23:27:46 +09:00
}
post_data = {
"hash": video_id,
"r": "https://ani.ohli24.com/"
}
logger.debug(f"Calling video API with session: {api_url}")
response = scraper.post(api_url, headers=headers, data=post_data, timeout=30, proxies=proxies)
json_text = response.text
2025-12-27 23:27:46 +09:00
if json_text:
try:
data = json.loads(json_text)
video_url = data.get("videoSource")
if not video_url:
video_url = data.get("securedLink")
if video_url:
logger.info(f"Found video URL via API: {video_url}")
# [RESOLUTION PARSING] - 같은 세션으로 m3u8 파싱 (쿠키 유지)
try:
m3u8_headers = {
"referer": iframe_src,
"origin": iframe_domain,
"accept": "*/*",
}
m3u8_resp = scraper.get(video_url, headers=m3u8_headers, timeout=10, proxies=proxies)
m3u8_content = m3u8_resp.text
logger.debug(f"m3u8 content (first 300 chars): {m3u8_content[:300]}")
if "#EXT-X-STREAM-INF" in m3u8_content:
for line in m3u8_content.strip().split('\n'):
if line.startswith('#EXT-X-STREAM-INF'):
res_match = re.search(r'RESOLUTION=(\d+)x(\d+)', line)
if res_match:
resolution = int(res_match.group(2)) # height
if resolution:
logger.info(f"Detected resolution from m3u8: {resolution}p")
except Exception as res_err:
logger.warning(f"Resolution parsing failed: {res_err}")
2025-12-27 23:27:46 +09:00
# VTT 자막 확인 (있는 경우)
vtt_url = data.get("videoSubtitle")
if vtt_url:
logger.info(f"Found subtitle URL via API: {vtt_url}")
# 세션 쿠키를 파일로 저장 (yt-dlp용)
try:
# Netscape 형식 쿠키 파일 생성
fd, cookies_file = tempfile.mkstemp(suffix='.txt', prefix='cdndania_cookies_')
with os.fdopen(fd, 'w') as f:
f.write("# Netscape HTTP Cookie File\n")
f.write("# https://curl.haxx.se/docs/http-cookies.html\n\n")
# RequestsCookieJar는 반복 시 Cookie 객체를 반환하거나 이름(str)을 반환할 수 있음
for cookie in scraper.cookies:
if hasattr(cookie, 'domain'):
# Cookie 객체인 경우
domain = cookie.domain
flag = "TRUE" if domain.startswith('.') else "FALSE"
path = cookie.path or "/"
secure = "TRUE" if cookie.secure else "FALSE"
expiry = str(int(cookie.expires)) if cookie.expires else "0"
name = cookie.name
value = cookie.value
f.write(f"{domain}\t{flag}\t{path}\t{secure}\t{expiry}\t{name}\t{value}\n")
elif isinstance(cookie, str):
# 이름(str)인 경우 (dictionary-like iteration)
name = cookie
value = scraper.cookies.get(name)
# 도메인 정보가 없으므로 iframe_domain 활용
domain = parse.urlparse(iframe_src).netloc
f.write(f"{domain}\tTRUE\t/\tFALSE\t0\t{name}\t{value}\n")
logger.info(f"Saved {len(scraper.cookies)} cookies to: {cookies_file}")
except Exception as cookie_err:
logger.warning(f"Failed to save cookies: {cookie_err}")
cookies_file = None
2025-12-27 23:27:46 +09:00
except Exception as json_err:
logger.warning(f"Failed to parse API JSON: {json_err}")
logger.debug(f"API Response Text (First 1000 chars): {json_text[:1000] if json_text else 'Empty'}")
2025-12-27 23:27:46 +09:00
# API 실패 시 기존 방식(정규식)으로 폴백
if not video_url:
logger.info("API extraction failed, falling back to regex")
# Ensure referer is percent-encoded for headers (avoids UnicodeEncodeError)
encoded_referer = parse.quote(referer_url, safe=":/?#[]@!$&'()*+,;=%")
html_response = scraper.get(iframe_src, headers={"referer": encoded_referer}, timeout=30, proxies=proxies)
html_content = html_response.text
2025-12-27 23:27:46 +09:00
if html_content:
# m3u8 URL 패턴 찾기
m3u8_patterns = [
re.compile(r"file:\s*['\"]([^'\"]*(?:\.m3u8|master\.txt)[^'\"]*)['\"]"),
re.compile(r"['\"]([^'\"]*(?:\.m3u8|master\.txt)[^'\"]*)['\"]"),
2025-12-27 23:27:46 +09:00
]
for pattern in m3u8_patterns:
match = pattern.search(html_content)
if match:
tmp_url = match.group(1)
if tmp_url.startswith("//"): tmp_url = "https:" + tmp_url
elif tmp_url.startswith("/"):
parsed = parse.urlparse(iframe_src)
tmp_url = f"{parsed.scheme}://{parsed.netloc}{tmp_url}"
video_url = tmp_url
logger.info(f"Found video URL via regex: {video_url}")
break
if not video_url:
logger.warning("Regex extraction failed. Dumping HTML content.")
logger.debug(f"HTML Content (First 2000 chars): {html_content[:2000]}")
2025-12-27 23:27:46 +09:00
if not vtt_url:
vtt_match = re.search(r"['\"]([^'\"]*\.vtt[^'\"]*)['\"]", html_content)
2025-12-27 23:27:46 +09:00
if vtt_match:
vtt_url = vtt_match.group(1)
if vtt_url.startswith("//"): vtt_url = "https:" + vtt_url
elif vtt_url.startswith("/"):
parsed = parse.urlparse(iframe_src)
vtt_url = f"{parsed.scheme}://{parsed.netloc}{vtt_url}"
except Exception as e:
logger.error(f"Error in extract_video_from_cdndania: {e}")
logger.error(traceback.format_exc())
return video_url, vtt_url, cookies_file, resolution
2022-10-29 17:21:14 +09:00
2023-01-26 15:33:59 +09:00
# def callback_function(self, **args):
# refresh_type = None
# # entity = self.get_entity_by_entity_id(arg['plugin_id'])
# entity = self.get_entity_by_entity_id(args['data']['callback_id'])
#
# if args['type'] == 'status_change':
# if args['status'] == SupportFfmpeg.Status.DOWNLOADING:
# refresh_type = 'status_change'
# elif args['status'] == SupportFfmpeg.Status.COMPLETED:
# refresh_type = 'status_change'
# logger.debug('ffmpeg_queue_v1.py:: download completed........')
# elif args['status'] == SupportFfmpeg.Status.READY:
# data = {'type': 'info',
# 'msg': '다운로드중 Duration(%s)' % args['data']['duration_str'] + '<br>' + args['data'][
# 'save_fullpath'], 'url': '/ffmpeg/download/list'}
# socketio.emit("notify", data, namespace='/framework')
2023-01-26 15:33:59 +09:00
# refresh_type = 'add'
# elif args['type'] == 'last':
# if args['status'] == SupportFfmpeg.Status.WRONG_URL:
# data = {'type': 'warning', 'msg': '잘못된 URL입니다'}
# socketio.emit("notify", data, namespace='/framework')
2023-01-26 15:33:59 +09:00
# refresh_type = 'add'
# elif args['status'] == SupportFfmpeg.Status.WRONG_DIRECTORY:
# data = {'type': 'warning', 'msg': '잘못된 디렉토리입니다.<br>' + args['data']['save_fullpath']}
# socketio.emit("notify", data, namespace='/framework')
2023-01-26 15:33:59 +09:00
# refresh_type = 'add'
# elif args['status'] == SupportFfmpeg.Status.ERROR or args['status'] == SupportFfmpeg.Status.EXCEPTION:
# data = {'type': 'warning', 'msg': '다운로드 시작 실패.<br>' + args['data']['save_fullpath']}
# socketio.emit("notify", data, namespace='/framework')
2023-01-26 15:33:59 +09:00
# refresh_type = 'add'
# elif args['status'] == SupportFfmpeg.Status.USER_STOP:
# data = {'type': 'warning', 'msg': '다운로드가 중지 되었습니다.<br>' + args['data']['save_fullpath'],
# 'url': '/ffmpeg/download/list'}
# socketio.emit("notify", data, namespace='/framework')
2023-01-26 15:33:59 +09:00
# refresh_type = 'last'
# elif args['status'] == SupportFfmpeg.Status.COMPLETED:
# logger.debug('ffmpeg download completed......')
# entity.download_completed()
# data = {'type': 'success', 'msg': '다운로드가 완료 되었습니다.<br>' + args['data']['save_fullpath'],
# 'url': '/ffmpeg/download/list'}
#
#
# socketio.emit("notify", data, namespace='/framework')
2023-01-26 15:33:59 +09:00
# refresh_type = 'last'
# elif args['status'] == SupportFfmpeg.Status.TIME_OVER:
# data = {'type': 'warning', 'msg': '시간초과로 중단 되었습니다.<br>' + args['data']['save_fullpath'],
# 'url': '/ffmpeg/download/list'}
# socketio.emit("notify", data, namespace='/framework')
2023-01-26 15:33:59 +09:00
# refresh_type = 'last'
# elif args['status'] == SupportFfmpeg.Status.PF_STOP:
# data = {'type': 'warning', 'msg': 'PF초과로 중단 되었습니다.<br>' + args['data']['save_fullpath'],
# 'url': '/ffmpeg/download/list'}
# socketio.emit("notify", data, namespace='/framework')
2023-01-26 15:33:59 +09:00
# refresh_type = 'last'
# elif args['status'] == SupportFfmpeg.Status.FORCE_STOP:
# data = {'type': 'warning', 'msg': '강제 중단 되었습니다.<br>' + args['data']['save_fullpath'],
# 'url': '/ffmpeg/download/list'}
# socketio.emit("notify", data, namespace='/framework')
2023-01-26 15:33:59 +09:00
# refresh_type = 'last'
# elif args['status'] == SupportFfmpeg.Status.HTTP_FORBIDDEN:
# data = {'type': 'warning', 'msg': '403에러로 중단 되었습니다.<br>' + args['data']['save_fullpath'],
# 'url': '/ffmpeg/download/list'}
# socketio.emit("notify", data, namespace='/framework')
2023-01-26 15:33:59 +09:00
# refresh_type = 'last'
# elif args['status'] == SupportFfmpeg.Status.ALREADY_DOWNLOADING:
# data = {'type': 'warning', 'msg': '임시파일폴더에 파일이 있습니다.<br>' + args['data']['temp_fullpath'],
# 'url': '/ffmpeg/download/list'}
# socketio.emit("notify", data, namespace='/framework')
2023-01-26 15:33:59 +09:00
# refresh_type = 'last'
# elif args['type'] == 'normal':
# if args['status'] == SupportFfmpeg.Status.DOWNLOADING:
# refresh_type = 'status'
# # P.logger.info(refresh_type)
# # Todo:
# self.socketio_callback(refresh_type, args['data'])
2023-01-26 15:33:59 +09:00
class ModelOhli24Item(ModelBase):
P = P
2022-10-29 17:21:14 +09:00
__tablename__ = "{package_name}_ohli24_item".format(package_name=P.package_name)
__table_args__ = {"mysql_collate": "utf8_general_ci", "extend_existing": True}
2022-10-29 17:21:14 +09:00
__bind_key__ = P.package_name
id = db.Column(db.Integer, primary_key=True)
created_time = db.Column(db.DateTime)
completed_time = db.Column(db.DateTime)
reserved = db.Column(db.JSON)
content_code = db.Column(db.String)
season = db.Column(db.Integer)
episode_no = db.Column(db.Integer)
title = db.Column(db.String)
episode_title = db.Column(db.String)
ohli24_va = db.Column(db.String)
ohli24_vi = db.Column(db.String)
ohli24_id = db.Column(db.String)
quality = db.Column(db.String)
filepath = db.Column(db.String)
filename = db.Column(db.String)
savepath = db.Column(db.String)
video_url = db.Column(db.String)
vtt_url = db.Column(db.String)
thumbnail = db.Column(db.String)
status = db.Column(db.String)
ohli24_info = db.Column(db.JSON)
def __init__(self):
self.created_time = datetime.now()
def __repr__(self):
return repr(self.as_dict())
def as_dict(self):
ret = {x.name: getattr(self, x.name) for x in self.__table__.columns}
ret["created_time"] = self.created_time.strftime("%Y-%m-%d %H:%M:%S")
ret["completed_time"] = (
2025-12-25 19:50:27 +09:00
self.completed_time.strftime("%Y-%m-%d %H:%M:%S") if self.completed_time is not None else None
2022-10-29 17:21:14 +09:00
)
return ret
def save(self):
2023-01-26 15:33:59 +09:00
try:
with F.app.app_context():
F.db.session.add(self)
F.db.session.commit()
return self
except Exception as e:
self.P.logger.error(f"Exception:{str(e)}")
self.P.logger.error(traceback.format_exc())
2022-10-29 17:21:14 +09:00
@classmethod
def get_by_id(cls, id: int) -> Optional["ModelOhli24Item"]:
"""아이디로 아이템 조회."""
2023-01-26 15:33:59 +09:00
try:
with F.app.app_context():
return F.db.session.query(cls).filter_by(id=int(id)).first()
except Exception as e:
cls.P.logger.error(f"Exception:{str(e)}")
cls.P.logger.error(traceback.format_exc())
2022-10-29 17:21:14 +09:00
@classmethod
def get_by_ohli24_id(cls, ohli24_id: str) -> Optional["ModelOhli24Item"]:
"""오리24 ID로 아이템 조회."""
2023-01-26 15:33:59 +09:00
try:
with F.app.app_context():
return F.db.session.query(cls).filter_by(ohli24_id=ohli24_id).first()
except Exception as e:
cls.P.logger.error(f"Exception:{str(e)}")
cls.P.logger.error(traceback.format_exc())
2022-10-29 17:21:14 +09:00
@classmethod
def delete_by_id(cls, idx: int) -> bool:
"""ID로 아이템 삭제."""
2022-10-29 17:21:14 +09:00
db.session.query(cls).filter_by(id=idx).delete()
db.session.commit()
return True
@classmethod
def web_list(cls, req: Any) -> Dict[str, Any]:
"""웹 목록 조회."""
ret: Dict[str, Any] = {}
2022-10-29 17:21:14 +09:00
page = int(req.form["page"]) if "page" in req.form else 1
page_size = 30
job_id = ""
search = req.form["search_word"] if "search_word" in req.form else ""
option = req.form["option"] if "option" in req.form else "all"
order = req.form["order"] if "order" in req.form else "desc"
query = cls.make_query(search=search, order=order, option=option)
count = query.count()
query = query.limit(page_size).offset((page - 1) * page_size)
lists = query.all()
ret["list"] = [item.as_dict() for item in lists]
2023-01-26 15:33:59 +09:00
ret["paging"] = cls.get_paging_info(count, page, page_size)
2022-10-29 17:21:14 +09:00
return ret
@classmethod
def make_query(
cls, search: str = "", order: str = "desc", option: str = "all"
) -> Any:
"""쿼리 생성."""
2022-10-29 17:21:14 +09:00
query = db.session.query(cls)
if search is not None and search != "":
if search.find("|") != -1:
tmp = search.split("|")
conditions = []
for tt in tmp:
if tt != "":
conditions.append(cls.filename.like("%" + tt.strip() + "%"))
query = query.filter(or_(*conditions))
elif search.find(",") != -1:
tmp = search.split(",")
for tt in tmp:
if tt != "":
query = query.filter(cls.filename.like("%" + tt.strip() + "%"))
else:
query = query.filter(cls.filename.like("%" + search + "%"))
if option == "completed":
query = query.filter(cls.status == "completed")
2025-12-25 19:50:27 +09:00
query = query.order_by(desc(cls.id)) if order == "desc" else query.order_by(cls.id)
2022-10-29 17:21:14 +09:00
return query
@classmethod
def get_list_uncompleted(cls):
return db.session.query(cls).filter(cls.status != "completed").all()
@classmethod
def append(cls, q):
try:
logger.debug(f"[DB_APPEND] Starting append for _id: {q.get('_id')}")
item = ModelOhli24Item()
item.content_code = q["content_code"]
item.season = q["season"]
item.episode_no = q["epi_queue"]
item.title = q["content_title"]
item.episode_title = q["title"]
item.ohli24_va = q["va"]
item.ohli24_vi = q["_vi"]
item.ohli24_id = q["_id"]
item.quality = q["quality"]
item.filepath = q["filepath"]
item.filename = q["filename"]
item.savepath = q["savepath"]
item.video_url = q["url"]
item.vtt_url = q["vtt"]
item.thumbnail = q["thumbnail"]
item.status = "wait"
item.ohli24_info = q["ohli24_info"]
result = item.save()
logger.debug(f"[DB_APPEND] Save result for _id {q.get('_id')}: {result}")
except Exception as e:
logger.error(f"[DB_APPEND] Exception during append: {e}")
logger.error(traceback.format_exc())
2022-11-12 23:47:21 +09:00
class ModelOhli24Program(ModelBase):
P = P
2023-01-26 15:33:59 +09:00
__tablename__ = f"{P.package_name}_{name}_program"
__table_args__ = {"mysql_collate": "utf8_general_ci", "extend_existing": True}
2022-11-12 23:47:21 +09:00
__bind_key__ = P.package_name
id = db.Column(db.Integer, primary_key=True)
created_time = db.Column(db.DateTime, nullable=False)
completed_time = db.Column(db.DateTime)
completed = db.Column(db.Boolean)
clip_id = db.Column(db.String)
info = db.Column(db.String)
status = db.Column(db.String)
call = db.Column(db.String)
queue_list = []
2023-01-26 15:33:59 +09:00
def __init__(self, clip_id, info, call="user"):
2022-11-12 23:47:21 +09:00
self.clip_id = clip_id
self.info = info
self.completed = False
self.created_time = datetime.now()
self.status = "READY"
self.call = call
def init_for_queue(self):
self.status = "READY"
self.queue_list.append(self)
@classmethod
def get(cls, clip_id):
with F.app.app_context():
2023-01-26 15:33:59 +09:00
return (
db.session.query(cls)
.filter_by(
clip_id=clip_id,
)
.order_by(desc(cls.id))
.first()
)
2022-11-12 23:47:21 +09:00
@classmethod
def is_duplicate(cls, clip_id):
2025-12-25 19:50:27 +09:00
return cls.get(clip_id) is not None
2022-11-12 23:47:21 +09:00
# 오버라이딩
@classmethod
2023-01-26 15:33:59 +09:00
def make_query(cls, req, order="desc", search="", option1="all", option2="all"):
2022-11-12 23:47:21 +09:00
with F.app.app_context():
query = F.db.session.query(cls)
# query = cls.make_query_search(query, search, cls.program_title)
2023-01-26 15:33:59 +09:00
query = query.filter(cls.info["channel_name"].like("%" + search + "%"))
if option1 == "completed":
2022-11-12 23:47:21 +09:00
query = query.filter_by(completed=True)
2023-01-26 15:33:59 +09:00
elif option1 == "incompleted":
2022-11-12 23:47:21 +09:00
query = query.filter_by(completed=False)
2023-01-26 15:33:59 +09:00
elif option1 == "auto":
2022-11-12 23:47:21 +09:00
query = query.filter_by(call="user")
2023-01-26 15:33:59 +09:00
if order == "desc":
2022-11-12 23:47:21 +09:00
query = query.order_by(desc(cls.id))
else:
query = query.order_by(cls.id)
return query
@classmethod
def remove_all(cls, is_completed=True): # to remove_all(True/False)
with F.app.app_context():
count = db.session.query(cls).filter_by(completed=is_completed).delete()
db.session.commit()
return count
@classmethod
def get_failed(cls):
with F.app.app_context():
2023-01-26 15:33:59 +09:00
return db.session.query(cls).filter_by(completed=False).all()
2022-11-12 23:47:21 +09:00
2025-12-25 19:50:27 +09:00
# only for queue
2022-11-12 23:47:21 +09:00
@classmethod
def get_by_id_in_queue(cls, id):
for _ in cls.queue_list:
if _.id == int(id):
return _
2023-01-26 15:33:59 +09:00
2025-12-25 19:50:27 +09:00
# only for queue END