From 53afcf5eb29bea47c9f055e3b53873f94f98ad3f Mon Sep 17 00:00:00 2001 From: renolation Date: Sun, 12 Apr 2026 19:07:31 +0700 Subject: [PATCH] edge func --- src/hooks/use-writing-check.ts | 55 ++++------------------- supabase/functions/writing-check/index.ts | 2 +- 2 files changed, 10 insertions(+), 47 deletions(-) diff --git a/src/hooks/use-writing-check.ts b/src/hooks/use-writing-check.ts index 41b6804..8a60946 100644 --- a/src/hooks/use-writing-check.ts +++ b/src/hooks/use-writing-check.ts @@ -1,58 +1,22 @@ import { useMutation } from "@tanstack/react-query" import { canUseWritingCheck, recordWritingCheckUsage } from "@/utils/rate-limiter" import { useAuthStore } from "@/store/auth-store" +import { supabase } from "@/lib/supabase" import { saveWritingSubmission, countTodayWritingSubmissions } from "@/lib/progress-service" import type { WritingFeedback } from "@/types" const AUTH_DAILY_LIMIT = 10 const GUEST_DAILY_LIMIT = 3 -const GLM_BASE_URL = "https://open.bigmodel.cn/api/paas/v4" -const GLM_API_KEY = import.meta.env.VITE_GLM_API_KEY as string -const GLM_MODEL = (import.meta.env.VITE_GLM_MODEL as string) || "GLM-4-32B-0414-128K" - -// Keep system prompt concise — fewer tokens = more room for output. -// improved_version omitted from schema to reduce output length; added back as optional. -const SYSTEM_PROMPT = `You are an expert English writing teacher for TOEIC and IELTS. -Respond ONLY with valid JSON, no markdown: -{"score":"6.5","grammar":["issue + fix in Vietnamese"],"vocabulary":["observation in Vietnamese"],"structure":"2 sentences in Vietnamese","improved_version":"full improved text","summary":"2 sentences in Vietnamese"}` - -async function callGlm(content: string): Promise { - const res = await fetch(`${GLM_BASE_URL}/chat/completions`, { - method: "POST", - headers: { - "Content-Type": "application/json", - Authorization: `Bearer ${GLM_API_KEY}`, - }, - body: JSON.stringify({ - model: GLM_MODEL, - messages: [ - { role: "system", content: SYSTEM_PROMPT }, - { role: "user", content: `Analyse:\n\n${content.slice(0, 1500)}` }, - ], - temperature: 0.3, - max_tokens: 2500, - // Force JSON output mode (OpenAI-compatible, supported by GLM) - response_format: { type: "json_object" }, - }), +async function callEdgeFunction(content: string): Promise { + const { data, error } = await supabase.functions.invoke("writing-check", { + body: { content }, }) - if (!res.ok) { - const err = await res.json().catch(() => ({})) - throw new Error((err as { error?: { message?: string } }).error?.message ?? `GLM error ${res.status}`) - } + if (error) throw new Error(error.message ?? "Đã có lỗi khi chấm bài. Vui lòng thử lại.") + if (!data) throw new Error("Phản hồi từ AI không hợp lệ. Vui lòng thử lại.") - const data = await res.json() as { choices: { message: { content: string } }[] } - const raw = data.choices[0]?.message?.content ?? "{}" - - // Strip markdown code fences defensively - const cleaned = raw.replace(/^```(?:json)?\s*/i, "").replace(/\s*```$/, "").trim() - - try { - return JSON.parse(cleaned) as WritingFeedback - } catch { - throw new Error("Phản hồi từ AI không hợp lệ. Vui lòng thử lại.") - } + return data } export function useWritingCheck() { @@ -73,13 +37,12 @@ export function useWritingCheck() { } } - const feedback = await callGlm(content) + const feedback = await callEdgeFunction(content) if (user) { - // Save to DB (fire-and-forget) + // Save submission to DB (fire-and-forget) saveWritingSubmission(user.id, content, feedback) } else { - // Persist guest usage in localStorage recordWritingCheckUsage() } diff --git a/supabase/functions/writing-check/index.ts b/supabase/functions/writing-check/index.ts index bf58962..3615593 100644 --- a/supabase/functions/writing-check/index.ts +++ b/supabase/functions/writing-check/index.ts @@ -47,7 +47,7 @@ Deno.serve(async (req: Request) => { const completion = await glm.chat.completions.create({ // GLM-4-32B-0414-128K: cheapest paid model at $0.1/$0.1 per 1M tokens. // Override via: supabase secrets set GLM_MODEL= - model: Deno.env.get("GLM_MODEL") ?? "GLM-4-32B-0414-128K", + model: Deno.env.get("GLM_MODEL") ?? "GLM-4.5-Flash", messages: [ { role: "system", content: SYSTEM_PROMPT }, { role: "user", content: `Analyse this writing:\n\n${content.slice(0, 2000)}` },