flexeval.schema.config_schema#
- pydantic model flexeval.schema.config_schema.Config[source]#
Bases:
BaseModel
Show JSON schema
{ "title": "Config", "type": "object", "properties": { "logs_path": { "anyOf": [ { "format": "path", "type": "string" }, { "type": "null" } ], "default": null, "description": "Log directory path.", "title": "Logs Path" }, "env_filepath": { "anyOf": [ { "format": "path", "type": "string" }, { "type": "null" } ], "default": null, "description": "A .env file to be processed by python-dotenv before running evals with this config.", "title": "Env Filepath" }, "env": { "additionalProperties": true, "description": "Any additional environment variables.", "title": "Env", "type": "object" }, "clear_tables": { "default": false, "description": "Clear any existing tables, if the output SQLite database already exists.", "title": "Clear Tables", "type": "boolean" }, "max_workers": { "default": 1, "description": "Max worker count. Multiple threads will be used if set to > 1. This may have usage limit implications if you are calling APIs.", "title": "Max Workers", "type": "integer" }, "random_seed_conversation_sampling": { "default": 42, "title": "Random Seed Conversation Sampling", "type": "integer" }, "max_n_conversation_threads": { "default": 50, "title": "Max N Conversation Threads", "type": "integer" }, "nb_evaluations_per_thread": { "default": 1, "title": "Nb Evaluations Per Thread", "type": "integer" }, "raise_on_completion_error": { "default": false, "description": "If False (default), metrics will be run even if one or more completions fails.", "title": "Raise On Completion Error", "type": "boolean" }, "raise_on_metric_error": { "default": false, "description": "If False (default), no exception will be thrown if a metric function raises an exception.", "title": "Raise On Metric Error", "type": "boolean" } } }
- Config:
extra: str = ignore
validate_assignment: bool = True
- Fields:
- field clear_tables: bool = False#
Clear any existing tables, if the output SQLite database already exists.
- field env: Annotated[dict, BeforeValidator(func=convert_none_or_empty_string_to_dict, json_schema_input_type=PydanticUndefined)] [Optional]#
Any additional environment variables.
- Constraints:
func = <function convert_none_or_empty_string_to_dict at 0x7fdfb249eb00>
json_schema_input_type = PydanticUndefined
- field env_filepath: Path | None = None#
A .env file to be processed by python-dotenv before running evals with this config.
- field max_workers: int = 1#
Max worker count. Multiple threads will be used if set to > 1. This may have usage limit implications if you are calling APIs.
- field raise_on_completion_error: bool = False#
If False (default), metrics will be run even if one or more completions fails.