Release v0.1.0: GDM Refactor, Rate Limit, Metallic UI

This commit is contained in:
2026-01-05 21:14:51 +09:00
commit fac33cff0b
22 changed files with 1829 additions and 0 deletions

30
downloader/__init__.py Normal file
View File

@@ -0,0 +1,30 @@
"""
다운로더 모듈 패키지
"""
from typing import Optional
from .base import BaseDownloader
def get_downloader(source_type: str) -> Optional[BaseDownloader]:
"""소스 타입에 맞는 다운로더 인스턴스 반환"""
if source_type in ('youtube', 'general'):
from .ytdlp_aria2 import YtdlpAria2Downloader
return YtdlpAria2Downloader()
elif source_type in ('ani24', 'linkkf', 'hls'):
from .ffmpeg_hls import FfmpegHlsDownloader
return FfmpegHlsDownloader()
elif source_type == 'anilife':
from .anilife import AnilifeDnloader
return AnilifeDnloader()
elif source_type == 'http':
from .http_direct import HttpDirectDownloader
return HttpDirectDownloader()
return None
__all__ = ['get_downloader', 'BaseDownloader']

Binary file not shown.

Binary file not shown.

Binary file not shown.

144
downloader/anilife.py Normal file
View File

@@ -0,0 +1,144 @@
"""
Anilife 전용 다운로더
- Camoufox로 _aldata 추출 후 ffmpeg 다운로드
- 기존 anime_downloader의 camoufox_anilife.py 로직 활용
"""
import os
import traceback
from typing import Dict, Any, Optional, Callable
from .base import BaseDownloader
from .ffmpeg_hls import FfmpegHlsDownloader
try:
from ..setup import P
logger = P.logger
except:
import logging
logger = logging.getLogger(__name__)
class AnilifeDnloader(BaseDownloader):
"""Anilife 전용 다운로더 (Camoufox + FFmpeg)"""
def __init__(self):
super().__init__()
self._ffmpeg_downloader = FfmpegHlsDownloader()
def download(
self,
url: str,
save_path: str,
filename: Optional[str] = None,
progress_callback: Optional[Callable] = None,
**options
) -> Dict[str, Any]:
"""Anilife 다운로드 (추출 + 다운로드)"""
try:
# 1. 스트림 URL 추출
if progress_callback:
progress_callback(0, 'Extracting...', '')
stream_url = self._extract_stream_url(url, options)
if not stream_url:
return {'success': False, 'error': 'Failed to extract stream URL'}
logger.info(f'Anilife 스트림 URL 추출 완료: {stream_url[:50]}...')
# 2. FFmpeg로 다운로드
return self._ffmpeg_downloader.download(
url=stream_url,
save_path=save_path,
filename=filename,
progress_callback=progress_callback,
**options
)
except Exception as e:
logger.error(f'Anilife download error: {e}')
logger.error(traceback.format_exc())
return {'success': False, 'error': str(e)}
def get_info(self, url: str) -> Dict[str, Any]:
"""URL 정보 추출"""
return {'source': 'anilife'}
def cancel(self):
"""다운로드 취소"""
super().cancel()
self._ffmpeg_downloader.cancel()
def _extract_stream_url(self, url: str, options: Dict) -> Optional[str]:
"""Camoufox를 사용하여 스트림 URL 추출"""
try:
# anime_downloader의 기존 로직 활용 시도
try:
from anime_downloader.lib.camoufox_anilife import extract_aldata
import asyncio
# URL에서 detail_url과 episode_num 파싱
detail_url = options.get('detail_url', url)
episode_num = options.get('episode_num', '1')
# 비동기 추출 실행
result = asyncio.run(extract_aldata(detail_url, episode_num))
if result.get('success') and result.get('aldata'):
# aldata 디코딩하여 실제 스트림 URL 획득
return self._decode_aldata(result['aldata'])
except ImportError:
logger.warning('anime_downloader 모듈을 찾을 수 없습니다. 기본 추출 로직 사용')
# 폴백: 직접 Camoufox 사용
return self._extract_with_camoufox(url, options)
except Exception as e:
logger.error(f'Stream URL extraction error: {e}')
return None
def _decode_aldata(self, aldata: str) -> Optional[str]:
"""_aldata base64 디코딩"""
try:
import base64
import json
decoded = base64.b64decode(aldata).decode('utf-8')
data = json.loads(decoded)
# 스트림 URL 추출 (구조에 따라 다를 수 있음)
if isinstance(data, dict):
return data.get('url') or data.get('stream') or data.get('file')
elif isinstance(data, str):
return data
except Exception as e:
logger.error(f'_aldata decode error: {e}')
return None
def _extract_with_camoufox(self, url: str, options: Dict) -> Optional[str]:
"""직접 Camoufox 사용하여 추출"""
try:
from camoufox.async_api import AsyncCamoufox
import asyncio
async def extract():
async with AsyncCamoufox(headless=True) as browser:
page = await browser.new_page()
await page.goto(url, wait_until='domcontentloaded', timeout=30000)
# _aldata 변수 추출 시도
aldata = await page.evaluate("typeof _aldata !== 'undefined' ? _aldata : null")
await page.close()
return aldata
aldata = asyncio.run(extract())
if aldata:
return self._decode_aldata(aldata)
except Exception as e:
logger.error(f'Camoufox extraction error: {e}')
return None

77
downloader/base.py Normal file
View File

@@ -0,0 +1,77 @@
"""
다운로더 베이스 클래스
"""
from abc import ABC, abstractmethod
from typing import Dict, Any, Optional, Callable
class BaseDownloader(ABC):
"""모든 다운로더의 추상 베이스 클래스"""
def __init__(self):
self._cancelled = False
self._paused = False
@abstractmethod
def download(
self,
url: str,
save_path: str,
filename: Optional[str] = None,
progress_callback: Optional[Callable] = None,
**options
) -> Dict[str, Any]:
"""
다운로드 실행
Args:
url: 다운로드 URL
save_path: 저장 경로
filename: 파일명 (None이면 자동 감지)
progress_callback: 진행률 콜백 (progress, speed, eta)
**options: 추가 옵션
Returns:
{
'success': bool,
'filepath': str, # 완료된 파일 경로
'error': str, # 에러 메시지 (실패 시)
}
"""
pass
@abstractmethod
def get_info(self, url: str) -> Dict[str, Any]:
"""
URL 정보 추출 (메타데이터)
Returns:
{
'title': str,
'thumbnail': str,
'duration': int,
'formats': list,
...
}
"""
pass
def cancel(self):
"""다운로드 취소"""
self._cancelled = True
def pause(self):
"""다운로드 일시정지"""
self._paused = True
def resume(self):
"""다운로드 재개"""
self._paused = False
@property
def is_cancelled(self) -> bool:
return self._cancelled
@property
def is_paused(self) -> bool:
return self._paused

153
downloader/ffmpeg_hls.py Normal file
View File

@@ -0,0 +1,153 @@
"""
FFmpeg HLS 다운로더
- ani24, 링크애니 등 HLS 스트림용
- 기존 SupportFfmpeg 로직 재사용
"""
import os
import subprocess
import re
import traceback
from typing import Dict, Any, Optional, Callable
from .base import BaseDownloader
try:
from ..setup import P
logger = P.logger
except:
import logging
logger = logging.getLogger(__name__)
class FfmpegHlsDownloader(BaseDownloader):
"""FFmpeg HLS 다운로더"""
def __init__(self):
super().__init__()
self._process: Optional[subprocess.Popen] = None
def download(
self,
url: str,
save_path: str,
filename: Optional[str] = None,
progress_callback: Optional[Callable] = None,
**options
) -> Dict[str, Any]:
"""ffmpeg로 HLS 스트림 다운로드"""
try:
os.makedirs(save_path, exist_ok=True)
# 파일명 결정
if not filename:
filename = f"download_{int(__import__('time').time())}.mp4"
filepath = os.path.join(save_path, filename)
# ffmpeg 명령어 구성
ffmpeg_path = options.get('ffmpeg_path', 'ffmpeg')
cmd = [ffmpeg_path, '-y']
# 헤더 추가
headers = options.get('headers', {})
if headers:
header_str = '\r\n'.join([f'{k}: {v}' for k, v in headers.items()])
cmd.extend(['-headers', header_str])
# 입력 URL
cmd.extend(['-i', url])
# 코덱 복사 (트랜스코딩 없이 빠르게)
cmd.extend(['-c', 'copy'])
# 출력 파일
cmd.append(filepath)
logger.debug(f'ffmpeg 명령어: {" ".join(cmd[:10])}...')
# 먼저 duration 얻기 위해 ffprobe 실행
duration = self._get_duration(url, options.get('ffprobe_path', 'ffprobe'), headers)
# 프로세스 실행
self._process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
bufsize=1
)
# 출력 파싱
for line in self._process.stdout:
if self._cancelled:
self._process.terminate()
return {'success': False, 'error': 'Cancelled'}
line = line.strip()
# 진행률 계산 (time= 파싱)
if duration > 0 and progress_callback:
time_match = re.search(r'time=(\d+):(\d+):(\d+)', line)
if time_match:
h, m, s = map(int, time_match.groups())
current_time = h * 3600 + m * 60 + s
progress = min(int(current_time / duration * 100), 99)
# 속도 파싱
speed = ''
speed_match = re.search(r'speed=\s*([\d.]+)x', line)
if speed_match:
speed = f'{speed_match.group(1)}x'
progress_callback(progress, speed, '')
self._process.wait()
if self._process.returncode == 0 and os.path.exists(filepath):
if progress_callback:
progress_callback(100, '', '')
return {'success': True, 'filepath': filepath}
else:
return {'success': False, 'error': f'FFmpeg exit code: {self._process.returncode}'}
except Exception as e:
logger.error(f'FfmpegHls download error: {e}')
logger.error(traceback.format_exc())
return {'success': False, 'error': str(e)}
def get_info(self, url: str) -> Dict[str, Any]:
"""스트림 정보 추출"""
try:
duration = self._get_duration(url, 'ffprobe', {})
return {
'duration': duration,
'type': 'hls',
}
except:
return {}
def cancel(self):
"""다운로드 취소"""
super().cancel()
if self._process:
self._process.terminate()
def _get_duration(self, url: str, ffprobe_path: str, headers: Dict) -> float:
"""ffprobe로 영상 길이 획득"""
try:
cmd = [ffprobe_path, '-v', 'error', '-show_entries', 'format=duration',
'-of', 'default=noprint_wrappers=1:nokey=1']
if headers:
header_str = '\r\n'.join([f'{k}: {v}' for k, v in headers.items()])
cmd.extend(['-headers', header_str])
cmd.append(url)
result = subprocess.run(cmd, capture_output=True, text=True, timeout=30)
if result.returncode == 0:
return float(result.stdout.strip())
except:
pass
return 0

91
downloader/http_direct.py Normal file
View File

@@ -0,0 +1,91 @@
"""
HTTP 직접 다운로더
- 단순 HTTP 파일 다운로드
- aiohttp 비동기 사용 (고성능)
"""
import os
import traceback
from typing import Dict, Any, Optional, Callable
from .base import BaseDownloader
try:
from ..setup import P
logger = P.logger
except:
import logging
logger = logging.getLogger(__name__)
class HttpDirectDownloader(BaseDownloader):
"""HTTP 직접 다운로더"""
def download(
self,
url: str,
save_path: str,
filename: Optional[str] = None,
progress_callback: Optional[Callable] = None,
**options
) -> Dict[str, Any]:
"""HTTP로 직접 다운로드"""
try:
import requests
os.makedirs(save_path, exist_ok=True)
# 파일명 결정
if not filename:
filename = url.split('/')[-1].split('?')[0] or f"download_{int(__import__('time').time())}"
filepath = os.path.join(save_path, filename)
# 헤더 설정
headers = options.get('headers', {})
if 'User-Agent' not in headers:
headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
# 스트리밍 다운로드
response = requests.get(url, headers=headers, stream=True, timeout=60)
response.raise_for_status()
total_size = int(response.headers.get('content-length', 0))
downloaded = 0
chunk_size = 1024 * 1024 # 1MB 청크
with open(filepath, 'wb') as f:
for chunk in response.iter_content(chunk_size=chunk_size):
if self._cancelled:
return {'success': False, 'error': 'Cancelled'}
if chunk:
f.write(chunk)
downloaded += len(chunk)
if total_size > 0 and progress_callback:
progress = int(downloaded / total_size * 100)
speed = '' # TODO: 속도 계산
progress_callback(progress, speed, '')
if progress_callback:
progress_callback(100, '', '')
return {'success': True, 'filepath': filepath}
except Exception as e:
logger.error(f'HTTP download error: {e}')
logger.error(traceback.format_exc())
return {'success': False, 'error': str(e)}
def get_info(self, url: str) -> Dict[str, Any]:
"""URL 정보 추출"""
try:
import requests
response = requests.head(url, timeout=10)
return {
'content_length': response.headers.get('content-length'),
'content_type': response.headers.get('content-type'),
}
except:
return {}

222
downloader/ytdlp_aria2.py Normal file
View File

@@ -0,0 +1,222 @@
"""
yt-dlp + aria2c 다운로더 (최고속)
- aria2c 16개 연결로 3-5배 속도 향상
- YouTube 및 yt-dlp 지원 사이트 전용
"""
import os
import re
import subprocess
import traceback
from typing import Dict, Any, Optional, Callable
from .base import BaseDownloader
# 상위 모듈에서 로거 가져오기
try:
from ..setup import P
logger = P.logger
except:
import logging
logger = logging.getLogger(__name__)
class YtdlpAria2Downloader(BaseDownloader):
"""yt-dlp + aria2c 다운로더"""
def __init__(self):
super().__init__()
self._process: Optional[subprocess.Popen] = None
def download(
self,
url: str,
save_path: str,
filename: Optional[str] = None,
progress_callback: Optional[Callable] = None,
**options
) -> Dict[str, Any]:
"""yt-dlp + aria2c로 다운로드"""
try:
os.makedirs(save_path, exist_ok=True)
# 출력 템플릿
if filename:
output_template = os.path.join(save_path, filename)
else:
output_template = os.path.join(save_path, '%(title)s.%(ext)s')
# yt-dlp 명령어 구성
cmd = [
'yt-dlp',
'--newline', # 진행률 파싱용
'-o', output_template,
]
# aria2c 사용 (설치되어 있으면)
aria2c_path = options.get('aria2c_path', 'aria2c')
# TODO: 나중에 설정에서 쓰레드 수 지정 (기본값 4로 변경)
connections = options.get('connections', 4)
# 속도 제한 설정
max_rate = P.ModelSetting.get('max_download_rate')
if max_rate == '0':
max_rate_arg = ''
log_rate_msg = '무제한'
else:
max_rate_arg = f'--max-download-limit={max_rate}'
log_rate_msg = max_rate
cmd.extend(['--limit-rate', max_rate]) # Native downloader limit
# aria2c 사용 (일시 중지: 진행률 파싱 문제 해결 전까지 Native 사용)
if False and self._check_aria2c(aria2c_path):
cmd.extend([
'--downloader', 'aria2c',
'--downloader-args', f'aria2c:-x {connections} -s {connections} -k 1M {max_rate_arg}',
])
logger.debug(f'aria2c 사용: {connections}개 연결 (속도제한 {log_rate_msg})')
# 포맷 선택
format_spec = options.get('format', 'bestvideo+bestaudio/best')
cmd.extend(['-f', format_spec])
# 병합 포맷
merge_format = options.get('merge_output_format', 'mp4')
cmd.extend(['--merge-output-format', merge_format])
# 쿠키 파일
if options.get('cookiefile'):
cmd.extend(['--cookies', options['cookiefile']])
# 프록시
if options.get('proxy'):
cmd.extend(['--proxy', options['proxy']])
# URL 추가
cmd.append(url)
logger.debug(f'yt-dlp 명령어: {" ".join(cmd)}')
# 프로세스 실행
self._process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
bufsize=1
)
final_filepath = ''
# 출력 파싱
for line in self._process.stdout:
if self._cancelled:
self._process.terminate()
return {'success': False, 'error': 'Cancelled'}
line = line.strip()
# logger.debug(line)
# 진행률 파싱 (yt-dlp default)
progress_match = re.search(r'\[download\]\s+(\d+\.?\d*)%', line)
# 진행률 파싱 (aria2c)
if not progress_match:
# logger.error(f'DEBUG LINE: {line}') # Log raw line to debug
aria2_match = re.search(r'\(\s*([\d.]+)%\)', line) # Allow spaces ( 7%)
if aria2_match and (('DL:' in line) or ('CN:' in line)): # DL or CN must be present
try:
progress = int(float(aria2_match.group(1)))
# logger.error(f'MATCHED PROGRESS: {progress}%')
speed_match = re.search(r'DL:(\S+)', line)
speed = speed_match.group(1) if speed_match else ''
# Strip color codes from speed if needed? output is usually clean text if no TTY
eta_match = re.search(r'ETA:(\S+)', line)
eta = eta_match.group(1) if eta_match else ''
if progress_callback:
progress_callback(progress, speed, eta)
continue
except Exception as e:
logger.error(f'Parsing Error: {e}')
if progress_match and progress_callback:
progress = int(float(progress_match.group(1)))
# 속도 파싱
speed = ''
speed_match = re.search(r'at\s+([\d.]+\s*[KMG]?i?B/s)', line)
if speed_match:
speed = speed_match.group(1)
# ETA 파싱
eta = ''
eta_match = re.search(r'ETA\s+([\d:]+)', line)
if eta_match:
eta = eta_match.group(1)
progress_callback(progress, speed, eta)
# 최종 파일 경로 추출
if '[Merger]' in line or 'Destination:' in line:
path_match = re.search(r'(?:Destination:|into\s+["\'])(.+?)(?:["\']|$)', line)
if path_match:
final_filepath = path_match.group(1).strip('"\'')
self._process.wait()
if self._process.returncode == 0:
if progress_callback:
progress_callback(100, '', '')
return {'success': True, 'filepath': final_filepath}
else:
return {'success': False, 'error': f'Exit code: {self._process.returncode}'}
except Exception as e:
logger.error(f'YtdlpAria2 download error: {e}')
logger.error(traceback.format_exc())
return {'success': False, 'error': str(e)}
def get_info(self, url: str) -> Dict[str, Any]:
"""URL 정보 추출"""
try:
import yt_dlp
ydl_opts = {
'quiet': True,
'no_warnings': True,
'extract_flat': False,
}
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
info = ydl.extract_info(url, download=False)
return {
'title': info.get('title', ''),
'thumbnail': info.get('thumbnail', ''),
'duration': info.get('duration', 0),
'formats': info.get('formats', []),
'uploader': info.get('uploader', ''),
'view_count': info.get('view_count', 0),
}
except Exception as e:
logger.error(f'get_info error: {e}')
return {}
def cancel(self):
"""다운로드 취소"""
super().cancel()
if self._process:
self._process.terminate()
def _check_aria2c(self, aria2c_path: str) -> bool:
"""aria2c 설치 확인"""
try:
result = subprocess.run(
[aria2c_path, '--version'],
capture_output=True,
timeout=5
)
return result.returncode == 0
except:
return False