src / generator.ts

// src/generator.ts
import { configSchematics, globalConfigSchematics } from "./config";
import { type Chat, type GeneratorController } from "@lmstudio/sdk";
import OpenAI from "openai";
import {
  type ChatCompletionMessageParam,
  type ChatCompletionMessageToolCall,
  type ChatCompletionTool,
  type ChatCompletionToolMessageParam,
} from "openai/resources/index";
import type { ChatCompletionCreateParamsStreaming } from "openai/resources/chat/completions";

/* -------------------------------------------------------------------------- */
/*                              Global Vars                                   */
/* -------------------------------------------------------------------------- */

const MAX_REQUESTS = 25;

// before 1st using of function
function getFormattedTime(): string {
  const now = new Date();
  const timeZone = "America/Los_Angeles";
  const dateStr = now.toLocaleDateString("en-US", { timeZone });
  const timeStr = now.toLocaleTimeString("en-US", {
    timeZone,
    hour12: false,
    hour: "2-digit",
    minute: "2-digit",
    timeZoneName: "short",
  });
  return `${dateStr}, ${timeStr}`;
}

/* -------------------------------------------------------------------------- */
/*                                   Types                                    */
/* -------------------------------------------------------------------------- */

type ToolCallState = {
  id: string;
  name: string | null;
  index: number;
  arguments: string;
};

/* -------------------------------------------------------------------------- */
/*                               Build helpers                                */
/* -------------------------------------------------------------------------- */

function createOpenAI(globalConfig: any) {  //  any instead of typed
  const baseURL = globalConfig?.get("baseUrl") || "https://openrouter.ai/api/v1"; 
  const apiKey = globalConfig?.get("apiKey"); 
  return new OpenAI({
    apiKey,
    baseURL
  });
}

/** Convert internal chat history to the format expected by OpenAI. */

function toOpenAIMessages(history: Chat): ChatCompletionMessageParam[] {
  const messages: ChatCompletionMessageParam[] = [];

  for (const message of history) {
    switch (message.getRole()) {
      case "system":
        messages.push({ role: "system", content: message.getText() });
        break;

      case "user":
        messages.push({ role: "user", content: message.getText() });
        break;

      case "assistant": {
        const toolCalls: ChatCompletionMessageToolCall[] = message
          .getToolCallRequests()
          .map(toolCall => ({
            id: toolCall.id ?? "",
            type: "function",
            function: {
              name: toolCall.name,
              arguments: JSON.stringify(toolCall.arguments ?? {}),
            },
          }));

        messages.push({
          role: "assistant",
          content: message.getText(),
          ...(toolCalls.length ? { tool_calls: toolCalls } : {}),
        });
        break;
      }

      case "tool": {
        message.getToolCallResults().forEach(toolCallResult => {
          messages.push({
            role: "tool",
            tool_call_id: toolCallResult.toolCallId ?? "",
            content: toolCallResult.content,
          } as ChatCompletionToolMessageParam);
        });
        break;
      }
    }
  }

  return messages;
}

/** Convert LM Studio tool definitions to OpenAI function-tool descriptors. */
function toOpenAITools(ctl: GeneratorController): ChatCompletionTool[] | undefined {
  const tools = ctl.getToolDefinitions().map<ChatCompletionTool>(t => ({
    type: "function",
    function: {
      name: t.function.name,
      description: t.function.description,
      parameters: t.function.parameters ?? {},
    },
  }));
  return tools.length ? tools : undefined;
}

function extractProviderError(error: unknown) {
  const err = error && typeof error === "object" ? (error as any) : null;
  const status =
    err?.status ??
    err?.response?.status ??
    (typeof err?.code === "number" ? err.code : undefined);
  const headers = err?.headers ?? err?.response?.headers;
  const requestId =
    err?.request_id ??
    err?.requestId ??
    headers?.["x-request-id"] ??
    headers?.["X-Request-Id"];
  const apiMessage = typeof err?.message === "string" ? err.message : undefined;
  const errorObj = err?.error ?? err?.response?.data?.error ?? err?.response?.error;
  const responseData = err?.response?.data;
  const providerMessage =
    (typeof errorObj?.message === "string" && errorObj.message) ||
    (typeof responseData?.message === "string" && responseData.message) ||
    apiMessage;

  return {
    status,
    requestId,
    providerMessage,
    apiMessage,
    type: errorObj?.type ?? err?.type,
    code: errorObj?.code ?? err?.code,
    param: errorObj?.param,
    error: errorObj ?? responseData,
  };
}

/* -------------------------------------------------------------------------- */
/*                            Stream-handling utils                           */
/* -------------------------------------------------------------------------- */

function wireAbort(ctl: GeneratorController, stream: { controller: AbortController }) {
  ctl.onAborted(() => {
    console.info("Generation aborted by user.");
    stream.controller.abort();
  });
}

async function consumeStream(stream: AsyncIterable<any>, ctl: GeneratorController) {
  let current: ToolCallState | null = null;

  function maybeFlushCurrentToolCall() {
    if (current === null || current.name === null) {
      return;
    }
    ctl.toolCallGenerationEnded({
      type: "function",
      name: current.name,
      arguments: JSON.parse(current.arguments),
      id: current.id,
    });
    current = null;
  }

  for await (const chunk of stream) {
    //console.info("Received chunk:", JSON.stringify(chunk));            //DEBUG in LOG - not my
    const delta = chunk.choices?.[0]?.delta as
      | {
          content?: string;
          tool_calls?: Array<{
            index: number;
            id?: string;
            function?: { name?: string; arguments?: string };
          }>;
        }
      | undefined;

    if (!delta) continue;

    /* Text streaming */
    if (delta.content) {
      ctl.fragmentGenerated(delta.content);
    }

    /* Tool-call streaming */
    for (const toolCall of delta.tool_calls ?? []) {
      if (toolCall.id !== undefined) {
        maybeFlushCurrentToolCall();
        current = { id: toolCall.id, name: null, index: toolCall.index, arguments: "" };
        ctl.toolCallGenerationStarted();
      }

      if (toolCall.function?.name && current) {
        current.name = toolCall.function.name;
        ctl.toolCallGenerationNameReceived(toolCall.function.name);
      }

      if (toolCall.function?.arguments && current) {
        current.arguments += toolCall.function.arguments;
        ctl.toolCallGenerationArgumentFragmentGenerated(toolCall.function.arguments);
      }
    }

    /* Finalize tool call */
    if (chunk.choices?.[0]?.finish_reason === "tool_calls" && current?.name) {
      maybeFlushCurrentToolCall();
    }
  }

  console.info("Generation completed.");
}

/* -------------------------------------------------------------------------- */
/*                                     API                                    */
/* -------------------------------------------------------------------------- */

export async function generate(ctl: GeneratorController, history: Chat) {
  const config = ctl.getPluginConfig(configSchematics as any) as any;
  const globalConfig = ctl.getGlobalPluginConfig(globalConfigSchematics as any) as any;
  const debug = Boolean(config.get("debug"));

  if (debug) {
    console.log(`[DEBUG] [ENTER] generate() PID=${Math.random().toString(36).slice(-4)}`);
  }

// +++ STATELESS COUNTER: parse from chat history
let requestCounter = 1;
const allMessages = Array.from(history);
//console.log('History length:', allMessages.length);  // DEBUG

for (let i = allMessages.length - 1; i >= 0; i--) {
  const msg = allMessages[i];
  if (msg.getRole() === "assistant") {
    const content = msg.getText();
    if (content) {
      const match = content.match(/\u0192o\. Request #(\d+)/);
      if (match) {
        requestCounter = parseInt(match[1], 10) + 1;
        break;
      }
    }
  }
}
// +++ 3.12.2025

if (debug) {
  console.log('[PLUGIN] History length:', Array.from(history).length);
  const lastMsg = Array.from(history).slice(-1)[0];
  if (lastMsg) console.log('[PLUGIN] Last role:', lastMsg.getRole(), 'preview:', lastMsg.getText()?.substring(0, 50));
  console.log('[PLUGIN] Parsed counter:', requestCounter);
}
// +++ END DEBUG



  let model = config.get("model");                // user-supplied model id
  model = typeof model === "string" ? model.trim() : "";

  if (!model) {
    ctl.fragmentGenerated("\u0192?O Missing model. Set a model ID in plugin settings.\n");
    return;
  }

  const openai = createOpenAI(globalConfig);
  const messages = toOpenAIMessages(history);
  const tools = toOpenAITools(ctl);
  const requestPayload: ChatCompletionCreateParamsStreaming = {
    model,
    messages,
    tools,
    stream: true
  };


try {
  if (debug) {
    const baseUrl = globalConfig?.get("baseUrl") || "https://openrouter.ai/api/v1";
    console.info("[PLUGIN] Request target:", { baseUrl, model });
    try {
      console.info("[PLUGIN] Request payload:", JSON.stringify(requestPayload, null, 2));
    } catch {
      console.info("[PLUGIN] Request payload (non-JSON):", requestPayload);
    }
  }

  const stream = await openai.chat.completions.create(requestPayload);

  wireAbort(ctl, stream);
  await consumeStream(stream, ctl);

  // post-generation status 
  if (debug) {
    const timeStr = getFormattedTime();
    ctl.fragmentGenerated(`\n\u0192o. Request #${requestCounter}/${MAX_REQUESTS} at ${timeStr}\n`);
  }

} catch (error: unknown) {
  let msg = "\u0192?O Generation failed.";
  const info = extractProviderError(error);

  const errorPayload = info.error ?? info;
  try {
    console.error("[PLUGIN] Upstream error:", JSON.stringify(errorPayload, null, 2));
  } catch {
    console.error("[PLUGIN] Upstream error (non-JSON):", errorPayload);
  }

  if (info.status === 429) {
    msg = `\u0192?O 429 Rate Limit Exceeded. You've used ${requestCounter}/${MAX_REQUESTS} free requests. Try again later or add your own API key.`;
    if (info.providerMessage) {
      msg += ` Provider: ${info.providerMessage}`;
    }
  } else if (info.providerMessage) {
    if (typeof info.providerMessage === "string" && info.providerMessage.includes("API Key")) {
      msg = "\u0192?O Invalid or missing API key.";
    } else {
      msg = `\u0192?O API error: ${info.providerMessage}`;
    }
  } else if (info.apiMessage) {
    msg = `\u0192?O API error: ${info.apiMessage}`;
  }

  ctl.fragmentGenerated(`${msg}\n`);

  return;
}

}
//end.