dev: migrate openai to new controller (last one)

This commit is contained in:
KernelDeimos 2024-11-25 15:54:07 -05:00
parent 3f4efb9948
commit 871416f461
2 changed files with 72 additions and 5 deletions

View File

@ -135,6 +135,10 @@ class AIChatService extends BaseService {
test_mode = true;
}
if ( ! test_mode ) {
Context.set('moderated', true);
}
if ( test_mode ) {
intended_service = 'fake-chat';
}
@ -145,6 +149,7 @@ class AIChatService extends BaseService {
const svc_driver = this.services.get('driver');
let ret, error, errors = [];
let service_used = intended_service;
try {
ret = await svc_driver.call_new_({
actor: Context.get('actor'),
@ -199,6 +204,7 @@ class AIChatService extends BaseService {
},
});
error = null;
service_used = fallback_service_name;
response_metadata.fallback = {
service: fallback_service_name,
model: fallback_model_name,
@ -217,6 +223,7 @@ class AIChatService extends BaseService {
}
}
ret.result.via_ai_chat_service = true;
response_metadata.service_used = service_used;
const username = Context.get('actor').type?.user?.username;

View File

@ -19,19 +19,74 @@ class OpenAICompletionService extends BaseService {
this.openai = new this.modules.openai.OpenAI({
apiKey: sk_key
});
const svc_aiChat = this.services.get('ai-chat');
svc_aiChat.register_provider({
service_name: this.service_name,
alias: true,
});
}
get_default_model () {
return 'gpt-4o-mini';
}
async models_ () {
return [
{
id: 'gpt-4o',
cost: {
currency: 'usd-cents',
tokens: 1_000_000,
input: 250,
output: 500,
}
},
{
id: 'gpt-4o-mini',
cost: {
currency: 'usd-cents',
tokens: 1_000_000,
input: 15,
output: 30,
}
},
// {
// id: 'o1-preview',
// cost: {
// currency: 'usd-cents',
// tokens: 1_000_000,
// input: 1500,
// output: 6000,
// },
// }
{
id: 'o1-mini',
cost: {
currency: 'usd-cents',
tokens: 1_000_000,
input: 300,
output: 1200,
}
},
];
}
static IMPLEMENTS = {
['puter-chat-completion']: {
async models () {
return await this.models_();
},
async list () {
return [
'gpt-4o',
'gpt-4o-mini',
];
const models = await this.models_();
const model_names = [];
for ( const model of models ) {
model_names.push(model.id);
if ( model.aliases ) {
model_names.push(...model.aliases);
}
}
return model_names;
},
async complete ({ messages, test_mode, stream, model }) {
@ -134,7 +189,12 @@ class OpenAICompletionService extends BaseService {
else texts.push(msg.content.text);
}
if ( moderation ) {
this.log.noticeme('OPENAI MODERATION CHECK', {
moderation,
context_value: Context.get('moderated'),
})
if ( moderation && ! Context.get('moderated') ) {
console.log('RAN MODERATION');
for ( const text of texts ) {
const moderation_result = await this.check_moderation(text);
if ( moderation_result.flagged ) {