feat(llm-gateway): add system prompt injection to OpenAI chat proxy

- Add injectSystemPromptOpenAI() for OpenAI messages format (role: system)
- Integrate injection into createOpenAIChatProxy before upstream call
- Update audit logs to track injection status
- Enables brand identity override for both Anthropic and OpenAI endpoints

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
hailin 2026-02-26 02:01:29 -08:00
parent a4fa4f47d6
commit 5683185a47
2 changed files with 57 additions and 6 deletions

View File

@ -113,6 +113,52 @@ export async function injectSystemPrompt(
return { system, applied: false, ruleIds: [] };
}
// ─── Inject into OpenAI messages format ───
export async function injectSystemPromptOpenAI(
messages: Array<{ role: string; content: any }>,
model: string,
apiKeyId: string,
): Promise<{ messages: Array<{ role: string; content: any }>; applied: boolean; ruleIds: string[] }> {
const allRules = await loadRules();
const applicableRules = allRules.filter(
(r) => matchesModel(r.matchModels, model) && matchesKey(r.matchKeyIds, apiKeyId),
);
if (applicableRules.length === 0) {
return { messages, applied: false, ruleIds: [] };
}
const prependRules = applicableRules.filter((r) => r.position === 'prepend');
const appendRules = applicableRules.filter((r) => r.position === 'append');
const prependText = prependRules.map((r) => r.content).join('\n\n');
const appendText = appendRules.map((r) => r.content).join('\n\n');
const ruleIds = applicableRules.map((r) => r.id);
if (!prependText && !appendText) {
return { messages, applied: false, ruleIds: [] };
}
const injectedContent = [prependText, appendText].filter(Boolean).join('\n\n');
const result = [...messages];
// Find existing system message
const systemIdx = result.findIndex((m) => m.role === 'system');
if (systemIdx >= 0) {
// Merge into existing system message
const existing = typeof result[systemIdx].content === 'string' ? result[systemIdx].content : '';
const parts = [prependText, existing, appendText].filter(Boolean);
result[systemIdx] = { ...result[systemIdx], content: parts.join('\n\n') };
} else {
// Insert new system message at the beginning
result.unshift({ role: 'system', content: injectedContent });
}
return { messages: result, applied: true, ruleIds };
}
export function clearInjectionRulesCache(): void {
cachedRules = [];
cacheTimestamp = 0;

View File

@ -4,6 +4,7 @@ import { ApiKeyRecord } from '../types';
import { isModelAllowed } from '../middleware/auth';
import { recordFromOpenAIResponse } from '../logging/usage-tracker';
import { recordAudit } from '../logging/audit-logger';
import { injectSystemPromptOpenAI } from '../injection/system-prompt-injector';
import { pipeSSEStream } from './stream-pipe';
import { sanitizeOpenAIResponse, sanitizeOpenAIEmbeddingResponse, buildOpenAIStreamTransform } from './response-sanitizer';
@ -63,7 +64,7 @@ export function createOpenAIEmbeddingsProxy(config: GatewayConfig) {
requestIp: clientIp,
contentFiltered: false,
filterRuleId: null,
injectionApplied: false,
injectionApplied: injection.applied,
responseStatus: 502,
durationMs: Date.now() - startTime,
});
@ -105,7 +106,7 @@ export function createOpenAIEmbeddingsProxy(config: GatewayConfig) {
requestIp: clientIp,
contentFiltered: false,
filterRuleId: null,
injectionApplied: false,
injectionApplied: injection.applied,
responseStatus: upstreamResponse.status,
durationMs,
});
@ -175,6 +176,10 @@ export function createOpenAIChatProxy(config: GatewayConfig) {
// Replace model for upstream
body.model = effectiveModel;
// Inject system prompt (identity, regulatory content)
const injection = await injectSystemPromptOpenAI(body.messages || [], effectiveModel, apiKeyRecord.id);
body.messages = injection.messages;
let upstreamResponse: Response;
try {
// openaiUpstreamUrl may already include /v1 (e.g., "https://host:8443/v1")
@ -196,7 +201,7 @@ export function createOpenAIChatProxy(config: GatewayConfig) {
requestIp: clientIp,
contentFiltered: false,
filterRuleId: null,
injectionApplied: false,
injectionApplied: injection.applied,
responseStatus: 502,
durationMs: Date.now() - startTime,
});
@ -236,7 +241,7 @@ export function createOpenAIChatProxy(config: GatewayConfig) {
requestIp: clientIp,
contentFiltered: false,
filterRuleId: null,
injectionApplied: false,
injectionApplied: injection.applied,
responseStatus: upstreamResponse.status,
durationMs: Date.now() - startTime,
});
@ -259,7 +264,7 @@ export function createOpenAIChatProxy(config: GatewayConfig) {
requestIp: clientIp,
contentFiltered: false,
filterRuleId: null,
injectionApplied: false,
injectionApplied: injection.applied,
responseStatus: upstreamResponse.status,
durationMs,
});
@ -282,7 +287,7 @@ export function createOpenAIChatProxy(config: GatewayConfig) {
requestIp: clientIp,
contentFiltered: false,
filterRuleId: null,
injectionApplied: false,
injectionApplied: injection.applied,
responseStatus: upstreamResponse.status,
durationMs,
});