|
{
|
|
"name": "Codettes",
|
|
"description": "An advanced AI assistant designed to assist users with a wide range of tasks by providing insightful responses.",
|
|
"strict": true,
|
|
"parameters": {
|
|
"type": "object",
|
|
"required": [
|
|
"Config",
|
|
"fewShotExamples",
|
|
"chatParameters",
|
|
"systemPrompt"
|
|
],
|
|
"properties": {
|
|
"Config": {
|
|
"type": "object",
|
|
"required": [
|
|
"max_input_length",
|
|
"max_retries",
|
|
"model_name",
|
|
"perspectives",
|
|
"safety_thresholds"
|
|
],
|
|
"properties": {
|
|
"max_input_length": {
|
|
"type": "number",
|
|
"description": "Maximum length of user input"
|
|
},
|
|
"max_retries": {
|
|
"type": "number",
|
|
"description": "Maximum number of retries for processing requests"
|
|
},
|
|
"model_name": {
|
|
"type": "string",
|
|
"description": "The name of the model being used"
|
|
},
|
|
"perspectives": {
|
|
"type": "array",
|
|
"description": "Array of perspectives to utilize in processing queries",
|
|
"items": {
|
|
"type": "string",
|
|
"description": "Different perspectives for cognitive processing"
|
|
}
|
|
},
|
|
"safety_thresholds": {
|
|
"type": "object",
|
|
"required": [
|
|
"memory",
|
|
"cpu",
|
|
"response_time"
|
|
],
|
|
"properties": {
|
|
"memory": {
|
|
"type": "number",
|
|
"description": "Memory usage threshold percentage"
|
|
},
|
|
"cpu": {
|
|
"type": "number",
|
|
"description": "CPU usage threshold percentage"
|
|
},
|
|
"response_time": {
|
|
"type": "number",
|
|
"description": "Maximum acceptable response time in seconds"
|
|
}
|
|
},
|
|
"additionalProperties": false
|
|
}
|
|
},
|
|
"additionalProperties": false
|
|
},
|
|
"fewShotExamples": {
|
|
"type": "array",
|
|
"description": "Examples of interactions to aid in understanding function usage",
|
|
"items": {
|
|
"type": "object",
|
|
"properties": {
|
|
"input": {
|
|
"type": "string",
|
|
"description": "Input from the user"
|
|
},
|
|
"output": {
|
|
"type": "string",
|
|
"description": "Assistant's response to the user input"
|
|
}
|
|
},
|
|
"additionalProperties": false,
|
|
"required": [
|
|
"input",
|
|
"output"
|
|
]
|
|
}
|
|
},
|
|
"chatParameters": {
|
|
"type": "object",
|
|
"required": [
|
|
"deploymentName",
|
|
"frequencyPenalty",
|
|
"maxResponseLength",
|
|
"pastMessagesToInclude",
|
|
"presencePenalty",
|
|
"temperature",
|
|
"topProbablities",
|
|
"stopSequences"
|
|
],
|
|
"properties": {
|
|
"deploymentName": {
|
|
"type": "string",
|
|
"description": "Name of the deployment for the AI model"
|
|
},
|
|
"frequencyPenalty": {
|
|
"type": "number",
|
|
"description": "Penalty for word repetition"
|
|
},
|
|
"maxResponseLength": {
|
|
"type": "number",
|
|
"description": "Maximum length of the response that the assistant can generate"
|
|
},
|
|
"pastMessagesToInclude": {
|
|
"type": "string",
|
|
"description": "Number of past messages to include in context for generating responses"
|
|
},
|
|
"presencePenalty": {
|
|
"type": "number",
|
|
"description": "Penalty applied to promote new topic introduction"
|
|
},
|
|
"temperature": {
|
|
"type": "number",
|
|
"description": "Sampling temperature controlling randomness in responses"
|
|
},
|
|
"topProbablities": {
|
|
"type": "number",
|
|
"description": "Sampling parameter influencing response diversity"
|
|
},
|
|
"stopSequences": {
|
|
"type": "array",
|
|
"description": "List of sequences to stop generating further tokens",
|
|
"items": {
|
|
"type": "string",
|
|
"description": "Sequence indicating completion of response"
|
|
}
|
|
}
|
|
},
|
|
"additionalProperties": false
|
|
},
|
|
"systemPrompt": {
|
|
"type": "string",
|
|
"description": "Initial prompt to set the behavior and capabilities of the AI assistant"
|
|
}
|
|
},
|
|
"additionalProperties": false
|
|
}
|
|
} |