Greasy Fork

来自缓存

SmolLLM

LLM utility library

目前为 2025-03-04 提交的版本。查看 最新版本

此脚本不应直接安装,它是一个供其他脚本使用的外部库。如果您需要使用该库,请在脚本元属性加入:// @require https://update.greasyfork.icu/scripts/528704/1546715/SmolLLM.js

// ==UserScript==
// @name         SmolLLM
// @namespace    http://tampermonkey.net/
// @version      0.1.7
// @description  LLM utility library
// @author       RoCry
// @grant        GM_xmlhttpRequest
// @require https://update.greasyfork.org/scripts/528703/1546610/SimpleBalancer.js
// @license MIT
// ==/UserScript==

class SmolLLM {
  constructor() {
    // Ensure SimpleBalancer is available
    if (typeof SimpleBalancer === 'undefined') {
      throw new Error('SimpleBalancer is required for SmolLLM to work');
    }

    // Verify GM_xmlhttpRequest is available
    if (typeof GM_xmlhttpRequest === 'undefined') {
      throw new Error('GM_xmlhttpRequest is required for SmolLLM to work');
    }

    this.balancer = new SimpleBalancer();
    this.logger = console;
  }

  /**
   * Prepares request data based on the provider
   * 
   * @param {string} prompt - User prompt
   * @param {string} systemPrompt - System prompt 
   * @param {string} modelName - Model name
   * @param {string} providerName - Provider name (anthropic, openai, gemini)
   * @param {string} baseUrl - API base URL
   * @returns {Object} - {url, data} for the request
   */
  prepareRequestData(prompt, systemPrompt, modelName, providerName, baseUrl) {
    let url, data;

    if (providerName === 'anthropic') {
      url = `${baseUrl}/v1/messages`;
      data = {
        model: modelName,
        max_tokens: 4096,
        messages: [{ role: 'user', content: prompt }],
        stream: true
      };
      if (systemPrompt) {
        data.system = systemPrompt;
      }
    } else if (providerName === 'gemini') {
      url = `${baseUrl}/v1beta/models/${modelName}:streamGenerateContent?alt=sse`;
      data = {
        contents: [{ parts: [{ text: prompt }] }]
      };
      if (systemPrompt) {
        data.system_instruction = { parts: [{ text: systemPrompt }] };
      }
    } else {
      // OpenAI compatible APIs
      const messages = [];
      if (systemPrompt) {
        messages.push({ role: 'system', content: systemPrompt });
      }
      messages.push({ role: 'user', content: prompt });

      data = {
        messages: messages,
        model: modelName,
        stream: true
      };

      // Handle URL based on suffix
      if (baseUrl.endsWith('#')) {
        url = baseUrl.slice(0, -1); // Remove the # and use exact URL
      } else if (baseUrl.endsWith('/')) {
        url = `${baseUrl}chat/completions`; // Skip v1 prefix
      } else {
        url = `${baseUrl}/v1/chat/completions`; // Default pattern
      }
    }

    return { url, data };
  }

  /**
   * Prepares headers for authentication based on the provider
   * 
   * @param {string} providerName - Provider name
   * @param {string} apiKey - API key
   * @returns {Object} - Request headers
   */
  prepareHeaders(providerName, apiKey) {
    const headers = {
      'Content-Type': 'application/json'
    };

    if (providerName === 'anthropic') {
      headers['X-API-Key'] = apiKey;
      headers['Anthropic-Version'] = '2023-06-01';
    } else if (providerName === 'gemini') {
      headers['X-Goog-Api-Key'] = apiKey;
    } else {
      headers['Authorization'] = `Bearer ${apiKey}`;
    }

    return headers;
  }

  /**
   * Process SSE stream data for different providers
   * 
   * @param {string} chunk - Data chunk from SSE
   * @param {string} providerName - Provider name
   * @returns {string|null} - Extracted text content or null
   */
  processStreamChunk(chunk, providerName) {
    if (!chunk || chunk === '[DONE]') return null;

    try {
      console.log(`Processing chunk for ${providerName}:`, chunk.substring(0, 100) + (chunk.length > 100 ? '...' : ''));
      const data = JSON.parse(chunk);

      // Follow the Python implementation pattern for cleaner provider-specific handling
      if (providerName === 'gemini') {
        const candidates = data.candidates || [];
        if (candidates.length > 0 && candidates[0].content) {
          if (candidates[0].content.parts && candidates[0].content.parts.length > 0) {
            return candidates[0].content.parts[0].text || '';
          }
        }
      } else if (providerName === 'anthropic') {
        // Handle content_block_delta which contains the actual text
        if (data.type === 'content_block_delta') {
          const delta = data.delta || {};
          if (delta.type === 'text_delta' || delta.text) {
            return delta.text || '';
          }
        }
        // Anthropic sends various event types - only some contain text
        return null;
      } else {
        // OpenAI compatible format
        const choice = (data.choices || [{}])[0];
        if (choice.finish_reason !== null && choice.finish_reason !== undefined) {
          return null; // End of generation
        }
        return choice.delta && choice.delta.content ? choice.delta.content : null;
      }
    } catch (e) {
      console.error(`Error parsing chunk: ${e.message}, chunk: ${chunk}`);
      return null;
    }

    return null;
  }

  /**
   * Makes a request to the LLM API and handles streaming responses
   * 
   * @param {Object} params - Request parameters
   * @returns {Promise<string>} - Full response text
   */
  async askLLM({
    prompt,
    providerName,
    systemPrompt = '',
    model,
    apiKey,
    baseUrl,
    handler = null,
    timeout = 60000
  }) {
    if (!prompt || !providerName || !model || !apiKey || !baseUrl) {
      throw new Error('Required parameters missing');
    }

    // Use balancer to choose API key and base URL pair
    [apiKey, baseUrl] = this.balancer.choosePair(apiKey, baseUrl);

    const { url, data } = this.prepareRequestData(
      prompt, systemPrompt, model, providerName, baseUrl
    );

    const headers = this.prepareHeaders(providerName, apiKey);

    // Log request info (with masked API key)
    const apiKeyPreview = `${apiKey.slice(0, 5)}...${apiKey.slice(-4)}`;
    this.logger.info(
      `Sending request to ${url} with model=${model}, api_key=${apiKeyPreview}, prompt_length=${prompt.length}`
    );

    // Additional debug info
    this.logger.debug(`Provider: ${providerName}, Request data:`, JSON.stringify(data).substring(0, 500));

    return new Promise((resolve, reject) => {
      let responseText = '';
      let buffer = '';
      let timeoutId;

      // Set timeout
      if (timeout) {
        timeoutId = setTimeout(() => {
          reject(new Error(`Request timed out after ${timeout}ms`));
        }, timeout);
      }

      GM_xmlhttpRequest({
        method: 'POST',
        url: url,
        headers: headers,
        data: JSON.stringify(data),
        responseType: 'stream',
        onload: (response) => {
          // This won't be called for streaming responses
          if (response.status !== 200) {
            clearTimeout(timeoutId);
            reject(new Error(`API request failed: ${response.status} - ${response.responseText}`));
          }
        },
        onreadystatechange: (state) => {
          if (state.readyState === 4) {
            // Request completed
            clearTimeout(timeoutId);
            console.log(`Request completed with response text length: ${responseText.length}`);
            resolve(responseText);
          }
        },
        onprogress: (response) => {
          // Handle streaming response
          const chunk = response.responseText.substring(buffer.length);
          buffer = response.responseText;

          console.log(`Received chunk size: ${chunk.length}`);

          if (!chunk) return;

          // Process SSE format (data: {...}\n\n) - following Python implementation pattern
          const lines = chunk.split('\n');

          for (const line of lines) {
            const trimmed = line.trim();
            if (!trimmed || trimmed === 'data: [DONE]' || !trimmed.startsWith('data: ')) continue;

            try {
              // Remove 'data: ' prefix (6 characters)
              const content = trimmed.substring(6);
              const textChunk = this.processStreamChunk(content, providerName);

              if (textChunk) {
                responseText += textChunk;
                if (handler && typeof handler === 'function') {
                  handler(textChunk);
                }
              }
            } catch (e) {
              console.error('Error processing line:', e, trimmed);
            }
          }
        },
        onerror: (error) => {
          clearTimeout(timeoutId);
          console.error('Request error:', error);
          reject(new Error(`Request failed: ${error.error || JSON.stringify(error)}`));
        },
        ontimeout: () => {
          clearTimeout(timeoutId);
          reject(new Error(`Request timed out after ${timeout}ms`));
        }
      });
    });
  }
}

// Make it available globally
window.SmolLLM = SmolLLM;

// Export for module systems if needed
if (typeof module !== 'undefined') {
  module.exports = SmolLLM;
}