Add: Персистентный выбор модели, динамические модели OpenCode
This commit is contained in:
parent
1f1e65bcdf
commit
1c904320dd
|
|
@ -0,0 +1,28 @@
|
|||
import json
|
||||
import os
|
||||
|
||||
CONFIG_FILE = "./valera_config.json"
|
||||
|
||||
def load_config():
|
||||
if os.path.exists(CONFIG_FILE):
|
||||
with open(CONFIG_FILE, "r") as f:
|
||||
return json.load(f)
|
||||
return {"tool": "qwen", "model": None}
|
||||
|
||||
def save_config(config):
|
||||
with open(CONFIG_FILE, "w") as f:
|
||||
json.dump(config, f)
|
||||
|
||||
def get_selected_tool():
|
||||
config = load_config()
|
||||
return config.get("tool", "qwen")
|
||||
|
||||
def get_selected_model():
|
||||
config = load_config()
|
||||
return config.get("model")
|
||||
|
||||
def set_tool(tool, model=None):
|
||||
config = load_config()
|
||||
config["tool"] = tool
|
||||
config["model"] = model
|
||||
save_config(config)
|
||||
428
src/bot/main.py
428
src/bot/main.py
|
|
@ -1,8 +1,9 @@
|
|||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
import tempfile
|
||||
from telegram import Update
|
||||
import re
|
||||
import subprocess
|
||||
from telegram import Update, BotCommand
|
||||
from telegram.ext import (
|
||||
Application, CommandHandler, MessageHandler, filters,
|
||||
ContextTypes, CallbackQueryHandler
|
||||
|
|
@ -11,8 +12,7 @@ from telegram import InlineKeyboardButton, InlineKeyboardMarkup
|
|||
from config.config import get_settings
|
||||
from src.tools.orchestrator import Orchestrator
|
||||
from src.bot.states import chat_state, ChatMode
|
||||
from src.scheduler.scheduler import SchedulerManager
|
||||
from src.speech.speech import SpeechRecognizer
|
||||
from src.bot.config_manager import get_selected_tool, get_selected_model, set_tool
|
||||
|
||||
logging.basicConfig(
|
||||
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
||||
|
|
@ -22,41 +22,71 @@ logger = logging.getLogger(__name__)
|
|||
|
||||
settings = get_settings()
|
||||
orchestrator = Orchestrator()
|
||||
scheduler_manager = None
|
||||
speech_recognizer = SpeechRecognizer()
|
||||
|
||||
DANGEROUS_PATTERNS = [
|
||||
r'\bwrite\b', r'\bedit\b', r'\bcopy\b', r'\bmove\b', r'\bdelete\b',
|
||||
r'\bсоздать\b', r'\bзаписать\b', r'\bудалить\b', r'\bизменить\b',
|
||||
r'\.write\(', r'\.save\(', r'\brm\b', r'\bmkdir\b', r'\bcp\b',
|
||||
r'\bсделать\b', r'\bвыполнить\b', r'\brun\b', r'\bзапустить\b',
|
||||
r'sudo', r'pip install', r'apt install', r'yum install',
|
||||
]
|
||||
|
||||
def is_dangerous(prompt: str) -> bool:
|
||||
prompt_lower = prompt.lower()
|
||||
for pattern in DANGEROUS_PATTERNS:
|
||||
if re.search(pattern, prompt_lower, re.IGNORECASE):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
async def get_opencode_models():
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["opencode", "models"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=30
|
||||
)
|
||||
if result.returncode == 0:
|
||||
models = []
|
||||
for line in result.stdout.strip().split("\n"):
|
||||
line = line.strip()
|
||||
if line and not line.startswith(" "):
|
||||
models.append(line)
|
||||
return models
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching opencode models: {e}")
|
||||
return ["minimax-m2.5-free", "gpt-5-nano", "mimo-v2-flash-free"]
|
||||
|
||||
|
||||
async def start(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
||||
current_tool = get_selected_tool()
|
||||
model = get_selected_model()
|
||||
status = f"Текущая модель: {current_tool}"
|
||||
if model:
|
||||
status += f" ({model})"
|
||||
|
||||
await update.message.reply_text(
|
||||
f"Привет! Я {settings.bot_name}, ваш ИИ-ассистент.\n"
|
||||
"Я помогу вам с программированием и не только."
|
||||
f"Привет! Я {settings.bot_name}, ваш ИИ-ассистент.\n\n"
|
||||
f"{status}\n\n"
|
||||
"Напишите ваш вопрос или используйте /qwen / /open"
|
||||
)
|
||||
|
||||
|
||||
async def help_command(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
||||
mode = chat_state.get_mode(update.effective_chat.id)
|
||||
current_tool = orchestrator.get_default_tool()
|
||||
stt_status = "включено" if speech_recognizer.is_enabled() else "отключено"
|
||||
current_tool = get_selected_tool()
|
||||
model = get_selected_model()
|
||||
help_text = (
|
||||
f"Я {settings.bot_name}, ваш ИИ-ассистент.\n\n"
|
||||
"Доступные команды:\n"
|
||||
"/start - Начать работу\n"
|
||||
"/help - Показать эту справку\n"
|
||||
"/mode confirm - Режим с подтверждением\n"
|
||||
"/mode auto - Автономный режим\n"
|
||||
"/use qwen|open|gigachat|yandex - Выбрать инструмент\n"
|
||||
"/cancel - Отменить текущее действие\n"
|
||||
"/qwen <текст> - Задать вопрос qwen-code\n"
|
||||
"/open <текст> - Задать вопрос opencode\n"
|
||||
"/gigachat <текст> - Задать вопрос Gigachat\n"
|
||||
"/yandex <текст> - Задать вопрос YandexGPT\n"
|
||||
"/forget - Очистить историю чата\n"
|
||||
"/remind <текст> <время> - Создать напоминание\n"
|
||||
"/stt on|off - Включить/выключить распознавание речи\n\n"
|
||||
f"Текущий режим: {'с подтверждением' if mode == ChatMode.CONFIRM else 'автономный'}\n"
|
||||
f"Инструмент по умолчанию: {current_tool}\n"
|
||||
f"Распознавание речи: {stt_status}"
|
||||
"📋 Команды:\n"
|
||||
"/qwen - Использовать Qwen (все запросы через Qwen)\n"
|
||||
"/open - Выбрать модель OpenCode\n"
|
||||
"/mode confirm/auto - Режим подтверждения\n"
|
||||
"/forget - Очистить историю\n\n"
|
||||
f"🔧 Текущая модель: {current_tool}"
|
||||
)
|
||||
if model:
|
||||
help_text += f"\n📌 Выбранная модель: {model}"
|
||||
await update.message.reply_text(help_text)
|
||||
|
||||
|
||||
|
|
@ -70,66 +100,14 @@ async def mode_command(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
|||
mode_arg = context.args[0].lower()
|
||||
if mode_arg == "confirm":
|
||||
chat_state.set_mode(update.effective_chat.id, ChatMode.CONFIRM)
|
||||
await update.message.reply_text("Режим изменён: с подтверждением")
|
||||
await update.message.reply_text("Режим: подтверждение для опасных действий")
|
||||
elif mode_arg == "auto":
|
||||
chat_state.set_mode(update.effective_chat.id, ChatMode.AUTO)
|
||||
await update.message.reply_text("Режим изменён: автономный")
|
||||
await update.message.reply_text("Режим: автономный")
|
||||
else:
|
||||
await update.message.reply_text("Использование: /mode confirm | auto")
|
||||
|
||||
|
||||
async def use_command(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
||||
if not context.args:
|
||||
current = orchestrator.get_default_tool()
|
||||
available = ", ".join(orchestrator.get_available_tools())
|
||||
await update.message.reply_text(
|
||||
f"Текущий инструмент: {current}\n"
|
||||
f"Доступные: {available}"
|
||||
)
|
||||
return
|
||||
|
||||
tool = context.args[0].lower()
|
||||
if tool in ["qwen", "open", "gigachat", "yandex"]:
|
||||
tool_map = {"qwen": "qwen", "open": "opencode", "gigachat": "gigachat", "yandex": "yandex"}
|
||||
tool = tool_map.get(tool, tool)
|
||||
orchestrator.set_default_tool(tool)
|
||||
await update.message.reply_text(f"Инструмент изменён на {tool}")
|
||||
else:
|
||||
await update.message.reply_text("Использование: /use qwen | open | gigachat | yandex")
|
||||
|
||||
|
||||
async def cancel_command(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
||||
chat_id = update.effective_chat.id
|
||||
|
||||
if chat_state.is_waiting_confirmation(chat_id):
|
||||
chat_state.set_waiting_confirmation(chat_id, False)
|
||||
await update.message.reply_text("Ожидание подтверждения отменено.")
|
||||
|
||||
task_id = chat_state.get_current_task(chat_id)
|
||||
if task_id:
|
||||
chat_state.set_current_task(chat_id, None)
|
||||
await update.message.reply_text(f"Задача {task_id} отменена.")
|
||||
else:
|
||||
await update.message.reply_text("Нет активных задач для отмены.")
|
||||
|
||||
|
||||
async def stt_command(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
||||
if not context.args:
|
||||
status = "включено" if speech_recognizer.is_enabled() else "отключено"
|
||||
await update.message.reply_text(f"Распознавание речи: {status}")
|
||||
return
|
||||
|
||||
arg = context.args[0].lower()
|
||||
if arg == "on":
|
||||
speech_recognizer.toggle(True)
|
||||
await update.message.reply_text("Распознавание речи включено.")
|
||||
elif arg == "off":
|
||||
speech_recognizer.toggle(False)
|
||||
await update.message.reply_text("Распознавание речи отключено.")
|
||||
else:
|
||||
await update.message.reply_text("Использование: /stt on | off")
|
||||
|
||||
|
||||
async def confirm_callback(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
||||
query = update.callback_query
|
||||
await query.answer()
|
||||
|
|
@ -137,25 +115,33 @@ async def confirm_callback(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
|||
chat_id = query.message.chat.id
|
||||
data = query.data
|
||||
|
||||
if data.startswith("model_"):
|
||||
model = data.replace("model_", "")
|
||||
set_tool("opencode", model)
|
||||
await query.edit_message_text(f"✅ Выбрана модель OpenCode: {model}\nВсе запросы будут идти через эту модель.")
|
||||
return
|
||||
|
||||
if data == "confirm_yes":
|
||||
await query.edit_message_text("Подтверждено. Выполняю...")
|
||||
await query.edit_message_text("✅ Подтверждено. Выполняю...")
|
||||
pending = chat_state.get_pending_action(chat_id)
|
||||
if pending:
|
||||
action_type = pending.get("type")
|
||||
if action_type == "tool":
|
||||
prompt = pending.get("prompt")
|
||||
tool = pending.get("tool")
|
||||
chat_state.set_waiting_confirmation(chat_id, False)
|
||||
await execute_tool_query(query, tool, prompt)
|
||||
prompt = pending.get("prompt")
|
||||
tool = pending.get("tool")
|
||||
chat_state.set_waiting_confirmation(chat_id, False)
|
||||
await execute_tool_query(update, tool, prompt)
|
||||
elif data == "confirm_no":
|
||||
chat_state.set_waiting_confirmation(chat_id, False)
|
||||
await query.edit_message_text("Отменено.")
|
||||
await query.edit_message_text("❌ Отменено.")
|
||||
|
||||
|
||||
async def execute_tool_query(update, tool: str, prompt: str):
|
||||
chat_id = update.message.chat.id if hasattr(update, 'message') else update.effective_chat.id
|
||||
|
||||
result, success = await orchestrator.ask(prompt, chat_id, tool)
|
||||
await update.message.reply_text("🤔 Думаю...")
|
||||
|
||||
model = get_selected_model() if tool == "opencode" else None
|
||||
|
||||
result, success = await orchestrator.ask(prompt, chat_id, tool, model)
|
||||
|
||||
text = result[:4096] if len(result) > 4096 else result
|
||||
|
||||
|
|
@ -166,270 +152,112 @@ async def execute_tool_query(update, tool: str, prompt: str):
|
|||
|
||||
|
||||
async def qwen_command(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
||||
prompt = " ".join(context.args)
|
||||
if not prompt:
|
||||
await update.message.reply_text("Использование: /qwen <текст>")
|
||||
set_tool("qwen", None)
|
||||
|
||||
if not context.args:
|
||||
await update.message.reply_text(
|
||||
"✅ Теперь все запросы будут отправляться в Qwen.\n"
|
||||
"Использование: /qwen <ваш вопрос>\n"
|
||||
"Пример: /qwen Привет, как дела?"
|
||||
)
|
||||
return
|
||||
|
||||
chat_id = update.effective_chat.id
|
||||
mode = chat_state.get_mode(chat_id)
|
||||
prompt = " ".join(context.args)
|
||||
await execute_tool_query(update, "qwen", prompt)
|
||||
|
||||
|
||||
async def open_menu(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
||||
await update.message.reply_text("📥 Загружаю список моделей OpenCode...")
|
||||
|
||||
if mode == ChatMode.CONFIRM:
|
||||
keyboard = [
|
||||
[
|
||||
InlineKeyboardButton("Да", callback_data="confirm_yes"),
|
||||
InlineKeyboardButton("Нет", callback_data="confirm_no")
|
||||
]
|
||||
]
|
||||
reply_markup = InlineKeyboardMarkup(keyboard)
|
||||
|
||||
chat_state.set_waiting_confirmation(chat_id, True, {
|
||||
"type": "tool",
|
||||
"tool": "qwen",
|
||||
"prompt": prompt
|
||||
})
|
||||
|
||||
await update.message.reply_text(
|
||||
f"Выполнить запрос к qwen-code?\n\n{prompt[:200]}...",
|
||||
reply_markup=reply_markup
|
||||
)
|
||||
else:
|
||||
await update.message.reply_text("Думаю...")
|
||||
await execute_tool_query(update, "qwen", prompt)
|
||||
models = await get_opencode_models()
|
||||
|
||||
keyboard = []
|
||||
for model in models:
|
||||
keyboard.append([InlineKeyboardButton(model, callback_data=f"model_{model}")])
|
||||
|
||||
reply_markup = InlineKeyboardMarkup(keyboard)
|
||||
|
||||
await update.message.reply_text(
|
||||
"Выберите модель для OpenCode (бесплатные):",
|
||||
reply_markup=reply_markup
|
||||
)
|
||||
|
||||
|
||||
async def open_command(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
||||
prompt = " ".join(context.args)
|
||||
if not prompt:
|
||||
await update.message.reply_text("Использование: /open <текст>")
|
||||
if not context.args:
|
||||
await open_menu(update, context)
|
||||
return
|
||||
|
||||
chat_id = update.effective_chat.id
|
||||
mode = chat_state.get_mode(chat_id)
|
||||
|
||||
if mode == ChatMode.CONFIRM:
|
||||
keyboard = [
|
||||
[
|
||||
InlineKeyboardButton("Да", callback_data="confirm_yes"),
|
||||
InlineKeyboardButton("Нет", callback_data="confirm_no")
|
||||
]
|
||||
]
|
||||
reply_markup = InlineKeyboardMarkup(keyboard)
|
||||
|
||||
chat_state.set_waiting_confirmation(chat_id, True, {
|
||||
"type": "tool",
|
||||
"tool": "opencode",
|
||||
"prompt": prompt
|
||||
})
|
||||
|
||||
await update.message.reply_text(
|
||||
f"Выполнить запрос к opencode?\n\n{prompt[:200]}...",
|
||||
reply_markup=reply_markup
|
||||
)
|
||||
else:
|
||||
await update.message.reply_text("Думаю...")
|
||||
await execute_tool_query(update, "opencode", prompt)
|
||||
|
||||
|
||||
async def gigachat_command(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
||||
prompt = " ".join(context.args)
|
||||
if not prompt:
|
||||
await update.message.reply_text("Использование: /gigachat <текст>")
|
||||
return
|
||||
tool = get_selected_tool()
|
||||
model = get_selected_model()
|
||||
|
||||
chat_id = update.effective_chat.id
|
||||
mode = chat_state.get_mode(chat_id)
|
||||
if tool != "opencode":
|
||||
set_tool("opencode", "minimax-m2.5-free")
|
||||
model = "minimax-m2.5-free"
|
||||
|
||||
if mode == ChatMode.CONFIRM:
|
||||
keyboard = [
|
||||
[
|
||||
InlineKeyboardButton("Да", callback_data="confirm_yes"),
|
||||
InlineKeyboardButton("Нет", callback_data="confirm_no")
|
||||
]
|
||||
]
|
||||
reply_markup = InlineKeyboardMarkup(keyboard)
|
||||
|
||||
chat_state.set_waiting_confirmation(chat_id, True, {
|
||||
"type": "tool",
|
||||
"tool": "gigachat",
|
||||
"prompt": prompt
|
||||
})
|
||||
|
||||
await update.message.reply_text(
|
||||
f"Выполнить запрос к Gigachat?\n\n{prompt[:200]}...",
|
||||
reply_markup=reply_markup
|
||||
)
|
||||
else:
|
||||
await update.message.reply_text("Думаю...")
|
||||
await execute_tool_query(update, "gigachat", prompt)
|
||||
|
||||
|
||||
async def yandex_command(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
||||
prompt = " ".join(context.args)
|
||||
if not prompt:
|
||||
await update.message.reply_text("Использование: /yandex <текст>")
|
||||
return
|
||||
if not model:
|
||||
model = "minimax-m2.5-free"
|
||||
|
||||
chat_id = update.effective_chat.id
|
||||
mode = chat_state.get_mode(chat_id)
|
||||
|
||||
if mode == ChatMode.CONFIRM:
|
||||
keyboard = [
|
||||
[
|
||||
InlineKeyboardButton("Да", callback_data="confirm_yes"),
|
||||
InlineKeyboardButton("Нет", callback_data="confirm_no")
|
||||
]
|
||||
]
|
||||
reply_markup = InlineKeyboardMarkup(keyboard)
|
||||
|
||||
chat_state.set_waiting_confirmation(chat_id, True, {
|
||||
"type": "tool",
|
||||
"tool": "yandex",
|
||||
"prompt": prompt
|
||||
})
|
||||
|
||||
await update.message.reply_text(
|
||||
f"Выполнить запрос к YandexGPT?\n\n{prompt[:200]}...",
|
||||
reply_markup=reply_markup
|
||||
)
|
||||
else:
|
||||
await update.message.reply_text("Думаю...")
|
||||
await execute_tool_query(update, "yandex", prompt)
|
||||
await execute_tool_query(update, "opencode", prompt)
|
||||
|
||||
|
||||
async def forget_command(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
||||
chat_id = update.effective_chat.id
|
||||
orchestrator.memory.clear_chat(chat_id)
|
||||
await update.message.reply_text("История чата очищена.")
|
||||
await update.message.reply_text("🗑️ История чата очищена.")
|
||||
|
||||
|
||||
async def remind_command(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
||||
if not context.args:
|
||||
await update.message.reply_text(
|
||||
"Использование: /remind <текст> <время>\n"
|
||||
"Пример: /remind Позвонить врачу через 1 час"
|
||||
)
|
||||
return
|
||||
|
||||
text = " ".join(context.args)
|
||||
chat_id = update.effective_chat.id
|
||||
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
time_keywords = {
|
||||
"минут": 1,
|
||||
"минуту": 1,
|
||||
"час": 60,
|
||||
"часа": 60,
|
||||
"часов": 60,
|
||||
"день": 1440,
|
||||
"дня": 1440,
|
||||
"дней": 1440
|
||||
}
|
||||
|
||||
minutes = 60
|
||||
for keyword, value in time_keywords.items():
|
||||
if keyword in text.lower():
|
||||
import re
|
||||
match = re.search(r'(\d+)\s*' + keyword, text.lower())
|
||||
if match:
|
||||
minutes = int(match.group(1)) * value
|
||||
break
|
||||
|
||||
run_at = datetime.now() + timedelta(minutes=minutes)
|
||||
|
||||
# scheduler_manager.add_reminder(chat_id, text, run_at)
|
||||
await update.message.reply_text(
|
||||
f"Напоминание установлено на {run_at.strftime('%H:%M %d.%m.%Y')} (временно недоступно)"
|
||||
)
|
||||
|
||||
|
||||
async def handle_voice(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
||||
if not speech_recognizer.is_enabled():
|
||||
await update.message.reply_text("Распознавание речи отключено.")
|
||||
return
|
||||
|
||||
await update.message.reply_text("Распознаю голос...")
|
||||
|
||||
voice = update.message.voice
|
||||
file = await context.bot.get_file(voice.file_id)
|
||||
|
||||
with tempfile.NamedTemporaryFile(delete=False, suffix=".ogg") as tmp:
|
||||
await file.download_to_drive(tmp.name)
|
||||
audio_path = tmp.name
|
||||
|
||||
text = await speech_recognizer.recognize(audio_path)
|
||||
|
||||
if text:
|
||||
await update.message.reply_text(f"Распознано: {text}")
|
||||
await handle_message(update, context, text)
|
||||
else:
|
||||
await update.message.reply_text("Не удалось распознать речь.")
|
||||
|
||||
|
||||
async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE, override_text: str = None):
|
||||
prompt = override_text or update.message.text
|
||||
async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
||||
prompt = update.message.text
|
||||
chat_id = update.effective_chat.id
|
||||
mode = chat_state.get_mode(chat_id)
|
||||
tool = get_selected_tool()
|
||||
|
||||
if mode == ChatMode.CONFIRM:
|
||||
dangerous = is_dangerous(prompt)
|
||||
|
||||
if mode == ChatMode.CONFIRM and dangerous:
|
||||
keyboard = [
|
||||
[
|
||||
InlineKeyboardButton("Да", callback_data="confirm_yes"),
|
||||
InlineKeyboardButton("Нет", callback_data="confirm_no")
|
||||
InlineKeyboardButton("✅ Да", callback_data="confirm_yes"),
|
||||
InlineKeyboardButton("❌ Нет", callback_data="confirm_no")
|
||||
]
|
||||
]
|
||||
reply_markup = InlineKeyboardMarkup(keyboard)
|
||||
|
||||
chat_state.set_waiting_confirmation(chat_id, True, {
|
||||
"type": "tool",
|
||||
"tool": orchestrator.get_default_tool(),
|
||||
"tool": tool,
|
||||
"prompt": prompt
|
||||
})
|
||||
|
||||
await update.message.reply_text(
|
||||
f"Выполнить запрос?\n\n{prompt[:200]}...",
|
||||
f"⚠️ Это действие может внести изменения. Выполнить?\n\n{prompt[:200]}...",
|
||||
reply_markup=reply_markup
|
||||
)
|
||||
else:
|
||||
await update.message.reply_text("Думаю...")
|
||||
tool = orchestrator.get_default_tool()
|
||||
await update.message.reply_text("🤔 Думаю...")
|
||||
model = get_selected_model() if tool == "opencode" else None
|
||||
await execute_tool_query(update, tool, prompt)
|
||||
|
||||
|
||||
def main():
|
||||
global scheduler_manager
|
||||
|
||||
speech_recognizer.load_model()
|
||||
|
||||
builder = Application.builder()
|
||||
builder.token(settings.telegram_bot_token)
|
||||
|
||||
if settings.telegram_proxy_url:
|
||||
from telegram.request import HTTPXRequest
|
||||
request = HTTPXRequest(proxy=settings.telegram_proxy_url)
|
||||
builder.request(request)
|
||||
builder = builder.proxy(settings.telegram_proxy_url)
|
||||
logger.info(f"Используется прокси: {settings.telegram_proxy_url}")
|
||||
|
||||
application = builder.build()
|
||||
|
||||
# Scheduler temporarily disabled
|
||||
# scheduler_manager = SchedulerManager(application.bot, orchestrator)
|
||||
# scheduler_manager.start()
|
||||
|
||||
application.add_handler(CommandHandler("start", start))
|
||||
application.add_handler(CommandHandler("help", help_command))
|
||||
application.add_handler(CommandHandler("mode", mode_command))
|
||||
application.add_handler(CommandHandler("use", use_command))
|
||||
application.add_handler(CommandHandler("cancel", cancel_command))
|
||||
application.add_handler(CommandHandler("stt", stt_command))
|
||||
application.add_handler(CommandHandler("qwen", qwen_command))
|
||||
application.add_handler(CommandHandler("open", open_command))
|
||||
application.add_handler(CommandHandler("gigachat", gigachat_command))
|
||||
application.add_handler(CommandHandler("yandex", yandex_command))
|
||||
application.add_handler(CommandHandler("forget", forget_command))
|
||||
application.add_handler(CommandHandler("remind", remind_command))
|
||||
application.add_handler(CallbackQueryHandler(confirm_callback))
|
||||
application.add_handler(MessageHandler(filters.VOICE, handle_voice))
|
||||
application.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, handle_message))
|
||||
|
||||
logger.info("Бот запущен")
|
||||
|
|
|
|||
|
|
@ -45,9 +45,12 @@ class Orchestrator:
|
|||
|
||||
return full_prompt
|
||||
|
||||
async def ask(self, prompt: str, chat_id: int, tool: Optional[str] = None) -> Tuple[str, bool]:
|
||||
async def ask(self, prompt: str, chat_id: int, tool: Optional[str] = None, model: Optional[str] = None) -> Tuple[str, bool]:
|
||||
selected_tool = tool or self.default_tool
|
||||
|
||||
if selected_tool == "opencode" and model:
|
||||
selected_tool = f"opencode:{model}"
|
||||
|
||||
full_prompt = self._build_prompt(prompt, chat_id)
|
||||
|
||||
if selected_tool == "gigachat":
|
||||
|
|
@ -55,7 +58,7 @@ class Orchestrator:
|
|||
elif selected_tool == "yandex":
|
||||
result, success = await self.yandex.ask(full_prompt)
|
||||
else:
|
||||
result, success = await self.tool_runner.run_tool(selected_tool, full_prompt)
|
||||
result, success = await self.tool_runner.run_tool(selected_tool, full_prompt, model)
|
||||
|
||||
if not success and self._check_rate_limit_error(result):
|
||||
logger.warning(f"Лимит превышен для {selected_tool}, пробую другой инструмент")
|
||||
|
|
|
|||
|
|
@ -16,13 +16,19 @@ class ToolRunner:
|
|||
async def run_qwen(self, prompt: str) -> Tuple[str, bool]:
|
||||
return await self._run_tool(self.qwen_command, prompt)
|
||||
|
||||
async def run_opencode(self, prompt: str) -> Tuple[str, bool]:
|
||||
return await self._run_tool(self.opencode_command, prompt)
|
||||
async def run_opencode(self, prompt: str, model: Optional[str] = None) -> Tuple[str, bool]:
|
||||
cmd = self.opencode_command
|
||||
if model:
|
||||
cmd = f"{self.opencode_command}:{model}"
|
||||
return await self._run_tool(cmd, prompt)
|
||||
|
||||
async def _run_tool(self, command: str, prompt: str) -> Tuple[str, bool]:
|
||||
cmd_parts = command.split(":")
|
||||
actual_cmd = cmd_parts[0]
|
||||
|
||||
try:
|
||||
process = await asyncio.create_subprocess_exec(
|
||||
command,
|
||||
actual_cmd,
|
||||
prompt,
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.PIPE
|
||||
|
|
@ -53,10 +59,13 @@ class ToolRunner:
|
|||
logger.exception("Ошибка при выполнении инструмента")
|
||||
return f"Ошибка: {str(e)}", False
|
||||
|
||||
async def run_tool(self, tool_name: str, prompt: str) -> Tuple[str, bool]:
|
||||
async def run_tool(self, tool_name: str, prompt: str, model: Optional[str] = None) -> Tuple[str, bool]:
|
||||
if tool_name == "qwen":
|
||||
return await self.run_qwen(prompt)
|
||||
elif tool_name == "opencode":
|
||||
return await self.run_opencode(prompt)
|
||||
return await self.run_opencode(prompt, model)
|
||||
elif tool_name.startswith("opencode:"):
|
||||
model = tool_name.split(":", 1)[1]
|
||||
return await self.run_opencode(prompt, model)
|
||||
else:
|
||||
return f"Неизвестный инструмент: {tool_name}", False
|
||||
|
|
|
|||
|
|
@ -0,0 +1,16 @@
|
|||
[Unit]
|
||||
Description=Valera Telegram Bot
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=mirivlad
|
||||
WorkingDirectory=/home/mirivlad/git/valera
|
||||
ExecStart=/home/mirivlad/git/valera/venv/bin/python -m src.bot.main
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
StandardOutput=append:/home/mirivlad/git/valera/valera.log
|
||||
StandardError=append:/home/mirivlad/git/valera/valera.log
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
Loading…
Reference in New Issue