chore: add metadata on openai content generator

This commit is contained in:
tanzhenxin 2025-08-08 14:57:13 +08:00
parent ce632725b0
commit f503be14e9
5 changed files with 85 additions and 73 deletions

View file

@ -87,7 +87,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
mockOpenAIClient.chat.completions.create.mockRejectedValueOnce(error);
try {
await generator.generateContent(request);
await generator.generateContent(request, 'test-prompt-id');
} catch (thrownError: unknown) {
// Should contain timeout-specific messaging and troubleshooting tips
const errorMessage =
@ -119,7 +119,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
mockOpenAIClient.chat.completions.create.mockRejectedValueOnce(error);
try {
await generator.generateContent(request);
await generator.generateContent(request, 'test-prompt-id');
} catch (thrownError: unknown) {
// Should NOT contain timeout-specific messaging
const errorMessage =
@ -146,7 +146,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
model: 'gpt-4',
};
await expect(generator.generateContent(request)).rejects.toThrow(
await expect(generator.generateContent(request, 'test-prompt-id')).rejects.toThrow(
/Request timeout after \d+s\. Try reducing input length or increasing timeout in config\./,
);
});
@ -161,7 +161,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
model: 'gpt-4',
};
await expect(generator.generateContent(request)).rejects.toThrow(
await expect(generator.generateContent(request, 'test-prompt-id')).rejects.toThrow(
'Invalid API key',
);
});
@ -176,7 +176,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
};
try {
await generator.generateContent(request);
await generator.generateContent(request, 'test-prompt-id');
} catch (error: unknown) {
const errorMessage =
error instanceof Error ? error.message : String(error);
@ -199,7 +199,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
model: 'gpt-4',
};
await expect(generator.generateContentStream(request)).rejects.toThrow(
await expect(generator.generateContentStream(request, 'test-prompt-id')).rejects.toThrow(
/Streaming setup timeout after \d+s\. Try reducing input length or increasing timeout in config\./,
);
});
@ -214,7 +214,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
};
try {
await generator.generateContentStream(request);
await generator.generateContentStream(request, 'test-prompt-id');
} catch (error: unknown) {
const errorMessage =
error instanceof Error ? error.message : String(error);
@ -300,7 +300,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
};
try {
await generator.generateContent(request);
await generator.generateContent(request, 'test-prompt-id');
} catch (_error) {
// Verify that countTokens was called for estimation
expect(mockCountTokens).toHaveBeenCalledWith({
@ -324,7 +324,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
};
// Should not throw due to token counting failure
await expect(generator.generateContent(request)).rejects.toThrow(
await expect(generator.generateContent(request, 'test-prompt-id')).rejects.toThrow(
/Request timeout after \d+s/,
);
});

View file

@ -189,7 +189,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4',
};
const result = await generator.generateContent(request);
const result = await generator.generateContent(request, 'test-prompt-id');
expect(result.candidates).toHaveLength(1);
if (
@ -236,7 +236,7 @@ describe('OpenAIContentGenerator', () => {
},
};
await generator.generateContent(request);
await generator.generateContent(request, 'test-prompt-id');
expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith(
expect.objectContaining({
@ -302,7 +302,7 @@ describe('OpenAIContentGenerator', () => {
},
};
const result = await generator.generateContent(request);
const result = await generator.generateContent(request, 'test-prompt-id');
if (
result.candidates &&
@ -345,7 +345,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4',
};
await generator.generateContent(request);
await generator.generateContent(request, 'test-prompt-id');
expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith(
expect.objectContaining({
@ -381,7 +381,7 @@ describe('OpenAIContentGenerator', () => {
},
};
await generator.generateContent(request);
await generator.generateContent(request, 'test-prompt-id');
expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith(
expect.objectContaining({
@ -439,7 +439,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4',
};
const stream = await generator.generateContentStream(request);
const stream = await generator.generateContentStream(request, 'test-prompt-id');
const responses = [];
for await (const response of stream) {
responses.push(response);
@ -528,7 +528,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4',
};
const stream = await generator.generateContentStream(request);
const stream = await generator.generateContentStream(request, 'test-prompt-id');
const responses = [];
for await (const response of stream) {
responses.push(response);
@ -668,7 +668,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4',
};
await expect(generator.generateContent(request)).rejects.toThrow(
await expect(generator.generateContent(request, 'test-prompt-id')).rejects.toThrow(
'Invalid API key',
);
});
@ -683,7 +683,7 @@ describe('OpenAIContentGenerator', () => {
};
try {
await generator.generateContent(request);
await generator.generateContent(request, 'test-prompt-id');
} catch (error) {
// Error should be thrown but token estimation should have been attempted
expect(error).toBeInstanceOf(Error);
@ -703,7 +703,7 @@ describe('OpenAIContentGenerator', () => {
};
try {
await generator.generateContent(request);
await generator.generateContent(request, 'test-prompt-id');
expect.fail('Expected error to be thrown');
} catch (error: unknown) {
// Should throw the original error object with status preserved
@ -763,7 +763,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4',
};
await generator.generateContent(request);
await generator.generateContent(request, 'test-prompt-id');
expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith(
expect.objectContaining({
@ -828,7 +828,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4',
};
await generator.generateContent(request);
await generator.generateContent(request, 'test-prompt-id');
// Should not include the orphaned tool call
expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith(
@ -872,7 +872,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4',
};
const result = await generator.generateContent(request);
const result = await generator.generateContent(request, 'test-prompt-id');
if (
result.candidates &&
result.candidates.length > 0 &&
@ -919,7 +919,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4',
};
await loggingGenerator.generateContent(request);
await loggingGenerator.generateContent(request, 'test-prompt-id');
// Verify logging was called
const { openaiLogger } = await import('../utils/openaiLogger.js');
@ -949,7 +949,7 @@ describe('OpenAIContentGenerator', () => {
};
try {
await generator.generateContent(request);
await generator.generateContent(request, 'test-prompt-id');
// Should not reach here
expect(true).toBe(false);
} catch (error) {
@ -969,7 +969,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4',
};
await expect(generator.generateContent(request)).rejects.toThrow(
await expect(generator.generateContent(request, 'test-prompt-id')).rejects.toThrow(
/Troubleshooting tips.*Reduce input length.*Increase timeout.*Check network/s,
);
});
@ -985,7 +985,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4',
};
await expect(generator.generateContentStream(request)).rejects.toThrow(
await expect(generator.generateContent(request, 'test-prompt-id')).rejects.toThrow(
'Streaming setup failed',
);
});
@ -999,7 +999,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4',
};
await expect(generator.generateContentStream(request)).rejects.toThrow(
await expect(generator.generateContentStream(request, 'test-prompt-id')).rejects.toThrow(
/Streaming setup timeout troubleshooting.*Reduce input length/s,
);
});
@ -1042,7 +1042,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4',
};
const stream = await loggingGenerator.generateContentStream(request);
const stream = await loggingGenerator.generateContentStream(request, 'test-prompt-id');
// Consume the stream and expect error
await expect(async () => {
@ -1113,7 +1113,7 @@ describe('OpenAIContentGenerator', () => {
},
};
await generator.generateContent(request);
await generator.generateContent(request, 'test-prompt-id');
expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith(
expect.objectContaining({
@ -1191,7 +1191,7 @@ describe('OpenAIContentGenerator', () => {
},
};
await generator.generateContent(request);
await generator.generateContent(request, 'test-prompt-id');
expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith(
expect.objectContaining({
@ -1296,7 +1296,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4',
};
await generator.generateContent(request);
await generator.generateContent(request, 'test-prompt-id');
expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith(
expect.objectContaining({
@ -1404,7 +1404,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4',
};
await generator.generateContent(request);
await generator.generateContent(request, 'test-prompt-id');
expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith(
expect.objectContaining({
@ -1460,7 +1460,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4',
};
await generator.generateContent(request);
await generator.generateContent(request, 'test-prompt-id');
expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith(
expect.objectContaining({
@ -1495,7 +1495,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4',
};
await expect(testGenerator.generateContent(request)).rejects.toThrow();
await expect(testGenerator.generateContent(request, 'test-prompt-id')).rejects.toThrow();
// Error logging should be suppressed
expect(consoleSpy).not.toHaveBeenCalledWith(
@ -1519,7 +1519,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4',
};
await expect(generator.generateContent(request)).rejects.toThrow();
await expect(generator.generateContent(request, 'test-prompt-id')).rejects.toThrow();
// Error logging should occur by default
expect(consoleSpy).toHaveBeenCalledWith(
@ -1566,7 +1566,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4',
};
const result = await generator.generateContent(request);
const result = await generator.generateContent(request, 'test-prompt-id');
// Should handle malformed JSON gracefully
if (
@ -1643,7 +1643,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4',
};
const stream = await generator.generateContentStream(request);
const stream = await generator.generateContentStream(request, 'test-prompt-id');
const responses = [];
for await (const response of stream) {
responses.push(response);
@ -1692,7 +1692,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4',
};
const result = await generator.generateContent(request);
const result = await generator.generateContent(request, 'test-prompt-id');
expect(result.candidates).toHaveLength(1);
if (
@ -1733,7 +1733,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4',
};
const result = await generator.generateContent(request);
const result = await generator.generateContent(request, 'test-prompt-id');
expect(result.usageMetadata).toEqual({
promptTokenCount: 70, // 70% of 100
@ -1772,7 +1772,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4',
};
const result = await generator.generateContent(request);
const result = await generator.generateContent(request, 'test-prompt-id');
expect(result.usageMetadata).toEqual({
promptTokenCount: 50,
@ -1884,7 +1884,7 @@ describe('OpenAIContentGenerator', () => {
},
};
await loggingGenerator.generateContent(request);
await loggingGenerator.generateContent(request, 'test-prompt-id');
// Verify that logging was called with properly converted request/response
const { openaiLogger } = await import('../utils/openaiLogger.js');
@ -2039,7 +2039,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4',
};
const stream = await loggingGenerator.generateContentStream(request);
const stream = await loggingGenerator.generateContentStream(request, 'test-prompt-id');
const responses = [];
for await (const response of stream) {
responses.push(response);
@ -2083,7 +2083,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4',
};
const stream = await generator.generateContentStream(request);
const stream = await generator.generateContentStream(request, 'test-prompt-id');
const responses = [];
for await (const response of stream) {
responses.push(response);
@ -2166,7 +2166,7 @@ describe('OpenAIContentGenerator', () => {
},
};
await generator.generateContent(request);
await generator.generateContent(request, 'test-prompt-id');
expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith(
expect.objectContaining({
@ -2204,7 +2204,7 @@ describe('OpenAIContentGenerator', () => {
},
};
await generator.generateContent(request);
await generator.generateContent(request, 'test-prompt-id');
expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith(
expect.objectContaining({
@ -2260,7 +2260,7 @@ describe('OpenAIContentGenerator', () => {
},
};
await testGenerator.generateContent(request);
await testGenerator.generateContent(request, 'test-prompt-id');
expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith(
expect.objectContaining({
@ -2312,7 +2312,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4',
};
await testGenerator.generateContent(request);
await testGenerator.generateContent(request, 'test-prompt-id');
expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith(
expect.objectContaining({

View file

@ -187,6 +187,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
async generateContent(
request: GenerateContentParameters,
userPromptId: string,
): Promise<GenerateContentResponse> {
const startTime = Date.now();
const messages = this.convertToOpenAIFormat(request);
@ -204,6 +205,10 @@ export class OpenAIContentGenerator implements ContentGenerator {
model: this.model,
messages,
...samplingParams,
metadata: {
sessionId: this.config.getSessionId?.(),
promptId: userPromptId,
},
};
if (request.config?.tools) {
@ -223,7 +228,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
const responseEvent = new ApiResponseEvent(
this.model,
durationMs,
`openai-${Date.now()}`, // Generate a prompt ID
userPromptId,
this.config.getContentGeneratorConfig()?.authType,
response.usageMetadata,
);
@ -277,7 +282,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
const errorEvent = new ApiResponseEvent(
this.model,
durationMs,
`openai-${Date.now()}`, // Generate a prompt ID
userPromptId,
this.config.getContentGeneratorConfig()?.authType,
estimatedUsage,
undefined,
@ -317,6 +322,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
async generateContentStream(
request: GenerateContentParameters,
userPromptId: string,
): Promise<AsyncGenerator<GenerateContentResponse>> {
const startTime = Date.now();
const messages = this.convertToOpenAIFormat(request);
@ -333,6 +339,10 @@ export class OpenAIContentGenerator implements ContentGenerator {
...samplingParams,
stream: true,
stream_options: { include_usage: true },
metadata: {
sessionId: this.config.getSessionId?.(),
promptId: userPromptId,
},
};
if (request.config?.tools) {
@ -372,7 +382,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
const responseEvent = new ApiResponseEvent(
this.model,
durationMs,
`openai-stream-${Date.now()}`, // Generate a prompt ID
userPromptId,
this.config.getContentGeneratorConfig()?.authType,
finalUsageMetadata,
);
@ -428,7 +438,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
const errorEvent = new ApiResponseEvent(
this.model,
durationMs,
`openai-stream-${Date.now()}`, // Generate a prompt ID
userPromptId,
this.config.getContentGeneratorConfig()?.authType,
estimatedUsage,
undefined,
@ -501,7 +511,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
const errorEvent = new ApiResponseEvent(
this.model,
durationMs,
`openai-stream-${Date.now()}`, // Generate a prompt ID
userPromptId,
this.config.getContentGeneratorConfig()?.authType,
estimatedUsage,
undefined,

View file

@ -154,7 +154,7 @@ describe('QwenContentGenerator', () => {
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
};
const result = await qwenContentGenerator.generateContent(request);
const result = await qwenContentGenerator.generateContent(request, 'test-prompt-id');
expect(result.text).toBe('Generated content');
expect(mockQwenClient.getAccessToken).toHaveBeenCalled();
@ -171,7 +171,7 @@ describe('QwenContentGenerator', () => {
contents: [{ role: 'user', parts: [{ text: 'Hello stream' }] }],
};
const stream = await qwenContentGenerator.generateContentStream(request);
const stream = await qwenContentGenerator.generateContentStream(request, 'test-prompt-id');
const chunks: string[] = [];
for await (const chunk of stream) {
@ -238,7 +238,7 @@ describe('QwenContentGenerator', () => {
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
};
const result = await qwenContentGenerator.generateContent(request);
const result = await qwenContentGenerator.generateContent(request, 'test-prompt-id');
expect(result.text).toBe('Generated content');
expect(mockQwenClient.refreshAccessToken).toHaveBeenCalled();
@ -258,7 +258,7 @@ describe('QwenContentGenerator', () => {
};
await expect(
qwenContentGenerator.generateContent(request),
qwenContentGenerator.generateContent(request, 'test-prompt-id'),
).rejects.toThrow(
'Failed to obtain valid Qwen access token. Please re-authenticate.',
);
@ -278,7 +278,7 @@ describe('QwenContentGenerator', () => {
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
};
await qwenContentGenerator.generateContent(request);
await qwenContentGenerator.generateContent(request, 'test-prompt-id');
expect(mockQwenClient.getCredentials).toHaveBeenCalled();
});
@ -315,7 +315,7 @@ describe('QwenContentGenerator', () => {
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
};
await qwenContentGenerator.generateContent(request);
await qwenContentGenerator.generateContent(request, 'test-prompt-id');
// Should use default endpoint with /v1 suffix
expect(capturedBaseURL).toBe(
@ -355,7 +355,7 @@ describe('QwenContentGenerator', () => {
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
};
await qwenContentGenerator.generateContent(request);
await qwenContentGenerator.generateContent(request, 'test-prompt-id');
// Should add https:// and /v1
expect(capturedBaseURL).toBe('https://custom-endpoint.com/v1');
@ -393,7 +393,7 @@ describe('QwenContentGenerator', () => {
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
};
await qwenContentGenerator.generateContent(request);
await qwenContentGenerator.generateContent(request, 'test-prompt-id');
// Should preserve https:// and add /v1
expect(capturedBaseURL).toBe('https://custom-endpoint.com/v1');
@ -431,7 +431,7 @@ describe('QwenContentGenerator', () => {
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
};
await qwenContentGenerator.generateContent(request);
await qwenContentGenerator.generateContent(request, 'test-prompt-id');
// Should not duplicate /v1
expect(capturedBaseURL).toBe('https://custom-endpoint.com/v1');
@ -464,7 +464,7 @@ describe('QwenContentGenerator', () => {
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
};
await qwenContentGenerator.generateContent(request);
await qwenContentGenerator.generateContent(request, 'test-prompt-id');
// Should restore original values after operation
expect(client.apiKey).toBe(originalApiKey);
@ -499,7 +499,7 @@ describe('QwenContentGenerator', () => {
};
try {
await qwenContentGenerator.generateContent(request);
await qwenContentGenerator.generateContent(request, 'test-prompt-id');
} catch (error) {
expect(error).toBe(mockError);
}
@ -545,7 +545,7 @@ describe('QwenContentGenerator', () => {
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
};
const result = await qwenContentGenerator.generateContent(request);
const result = await qwenContentGenerator.generateContent(request, 'test-prompt-id');
expect(result.text).toBe('Success after retry');
expect(mockGenerateContent).toHaveBeenCalledTimes(2);
@ -576,7 +576,7 @@ describe('QwenContentGenerator', () => {
};
await expect(
qwenContentGenerator.generateContent(request),
qwenContentGenerator.generateContent(request, 'test-prompt-id'),
).rejects.toThrow('Network timeout');
expect(mockGenerateContent).toHaveBeenCalledTimes(1);
expect(mockQwenClient.refreshAccessToken).not.toHaveBeenCalled();
@ -600,7 +600,7 @@ describe('QwenContentGenerator', () => {
};
await expect(
qwenContentGenerator.generateContent(request),
qwenContentGenerator.generateContent(request, 'test-prompt-id'),
).rejects.toThrow('Failed to obtain valid Qwen access token');
});
});
@ -691,9 +691,9 @@ describe('QwenContentGenerator', () => {
// Make multiple concurrent requests - should all use the same refresh promise
const promises = [
qwenContentGenerator.generateContent(request),
qwenContentGenerator.generateContent(request),
qwenContentGenerator.generateContent(request),
qwenContentGenerator.generateContent(request, 'test-prompt-id'),
qwenContentGenerator.generateContent(request, 'test-prompt-id'),
qwenContentGenerator.generateContent(request, 'test-prompt-id'),
];
const results = await Promise.all(promises);
@ -795,7 +795,7 @@ describe('QwenContentGenerator', () => {
contents: [{ role: 'user', parts: [{ text: 'Test message' }] }],
};
const result = await qwenContentGenerator.generateContent(request);
const result = await qwenContentGenerator.generateContent(request, 'test-prompt-id');
expect(result.text).toBe('Success after refresh');
expect(mockQwenClient.getAccessToken).toHaveBeenCalled();

View file

@ -78,6 +78,7 @@ export class QwenContentGenerator extends OpenAIContentGenerator {
*/
async generateContent(
request: GenerateContentParameters,
userPromptId: string,
): Promise<GenerateContentResponse> {
return this.withValidToken(async (token) => {
// Temporarily update the API key and base URL
@ -87,7 +88,7 @@ export class QwenContentGenerator extends OpenAIContentGenerator {
this.client.baseURL = this.getCurrentEndpoint();
try {
return await super.generateContent(request);
return await super.generateContent(request, userPromptId);
} finally {
// Restore original values
this.client.apiKey = originalApiKey;
@ -101,6 +102,7 @@ export class QwenContentGenerator extends OpenAIContentGenerator {
*/
async generateContentStream(
request: GenerateContentParameters,
userPromptId: string,
): Promise<AsyncGenerator<GenerateContentResponse>> {
return this.withValidTokenForStream(async (token) => {
// Update the API key and base URL before streaming
@ -110,7 +112,7 @@ export class QwenContentGenerator extends OpenAIContentGenerator {
this.client.baseURL = this.getCurrentEndpoint();
try {
return await super.generateContentStream(request);
return await super.generateContentStream(request, userPromptId);
} catch (error) {
// Restore original values on error
this.client.apiKey = originalApiKey;