diff --git a/.cargo/config.toml b/.cargo/config.toml index 3b8bc0d..45bf408 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,3 +1,3 @@ [alias] xtask = "run --package xtask --" -test-clean = "run --package xtask -- test-clean" +test-clean = "run --package xtask --features typesense -- test-clean" diff --git a/Cargo.toml b/Cargo.toml index 626ce20..fd09242 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,6 +24,7 @@ bon = "3" clap = { version = "4", features = ["derive"] } debug_unsafe = "0.1" hmac = "0.12" +indexmap = { version = "2", features = ["serde"] } reqwest-retry = "0.7" serde = { version = "1", features = ["derive"] } serde_json = "1.0" diff --git a/openapi-generator-template/model.mustache b/openapi-generator-template/model.mustache index 627362a..ab15a35 100644 --- a/openapi-generator-template/model.mustache +++ b/openapi-generator-template/model.mustache @@ -1,5 +1,6 @@ {{>partial_header}} use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; {{#models}} {{#model}} @@ -123,11 +124,12 @@ impl Default for {{classname}} { {{^discriminator}} {{#vendorExtensions.x-rust-builder}} #[derive(bon::Builder)] +#[builder(on(Cow<'_, str>, into))] #[builder(on(String, into))] -{{/vendorExtensions.x-rust-builder}} -{{#vendorExtensions.x-rust-has-byte-array}}#[serde_as] -{{/vendorExtensions.x-rust-has-byte-array}}{{#oneOf.isEmpty}}#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct {{{classname}}}{{{vendorExtensions.x-rust-generic-parameter}}} { +{{/vendorExtensions.x-rust-builder}}{{! +}}{{#vendorExtensions.x-rust-has-byte-array}}#[serde_as]{{/vendorExtensions.x-rust-has-byte-array}}{{! +}}{{#oneOf.isEmpty}}#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct {{{classname}}}{{#vendorExtensions.x-rust-has-borrowed-data}}<'a{{#vendorExtensions.x-rust-generic-parameter}}, {{{.}}}{{/vendorExtensions.x-rust-generic-parameter}}>{{/vendorExtensions.x-rust-has-borrowed-data}}{{^vendorExtensions.x-rust-has-borrowed-data}}{{#vendorExtensions.x-rust-generic-parameter}}<{{{.}}}>{{/vendorExtensions.x-rust-generic-parameter}}{{/vendorExtensions.x-rust-has-borrowed-data}} { {{#vars}} {{#description}} /// {{{.}}} @@ -145,21 +147,32 @@ pub struct {{{classname}}}{{{vendorExtensions.x-rust-generic-parameter}}} { }}{{#isNullable}}Option<{{/isNullable}}{{^required}}Option<{{/required}}{{! ### Enums }}{{#isEnum}}{{#isArray}}{{#uniqueItems}}std::collections::HashSet<{{/uniqueItems}}{{^uniqueItems}}Vec<{{/uniqueItems}}{{/isArray}}{{{enumName}}}{{#isArray}}>{{/isArray}}{{/isEnum}}{{! - ### Non-Enums Start - }}{{^isEnum}}{{! ### Models - }}{{#isModel}}{{^avoidBoxedModels}}Box<{{/avoidBoxedModels}}{{{dataType}}}{{^avoidBoxedModels}}>{{/avoidBoxedModels}}{{/isModel}}{{! - ### Primative datatypes - }}{{^isModel}}{{#isByteArray}}Vec{{/isByteArray}}{{^isByteArray}}{{{dataType}}}{{/isByteArray}}{{/isModel}}{{! - ### Non-Enums End - }}{{/isEnum}}{{! + }}{{^isEnum}}{{#isModel}}{{^avoidBoxedModels}}Box<{{/avoidBoxedModels}}{{{dataType}}}{{#vendorExtensions.x-rust-has-borrowed-data}}{{#model}}{{#vendorExtensions.x-rust-has-borrowed-data}}<'a>{{/vendorExtensions.x-rust-has-borrowed-data}}{{^vendorExtensions.x-rust-has-borrowed-data}}<'static>{{/vendorExtensions.x-rust-has-borrowed-data}}{{/model}}{{/vendorExtensions.x-rust-has-borrowed-data}}{{^avoidBoxedModels}}>{{/avoidBoxedModels}}{{/isModel}}{{! + ### ByteArray + }}{{^isModel}}{{#isByteArray}}Vec{{/isByteArray}}{{! + ### String + }}{{^isByteArray}}{{#isString}}{{#model}}{{#vendorExtensions.x-rust-has-borrowed-data}}Cow<'a, str>{{/vendorExtensions.x-rust-has-borrowed-data}}{{^vendorExtensions.x-rust-has-borrowed-data}}String{{/vendorExtensions.x-rust-has-borrowed-data}}{{/model}}{{/isString}}{{! + ### Arrays + }}{{^isString}}{{#isArray}}Vec<{{#items}}{{! + ### Array Models + }}{{#isModel}}{{{dataType}}}{{#vendorExtensions.x-rust-has-borrowed-data}}{{#model}}{{#vendorExtensions.x-rust-has-borrowed-data}}<'a>{{/vendorExtensions.x-rust-has-borrowed-data}}{{^vendorExtensions.x-rust-has-borrowed-data}}<'static>{{/vendorExtensions.x-rust-has-borrowed-data}}{{/model}}{{/vendorExtensions.x-rust-has-borrowed-data}}{{/isModel}}{{! + ### Array other datatypes + }}{{^isModel}}{{{dataType}}}{{/isModel}}{{/items}}>{{/isArray}}{{! + ### Primitive datatypes + }}{{^isArray}}{{#isPrimitiveType}}{{{dataType}}}{{/isPrimitiveType}}{{! + ### Any other Non-Primitive datatypes + }}{{^isPrimitiveType}}{{{dataType}}}{{! + ### Closing tags + }}{{/isPrimitiveType}}{{/isArray}}{{/isString}}{{/isByteArray}}{{/isModel}}{{/isEnum}}{{! ### Option End (and trailing comma) }}{{#isNullable}}>{{/isNullable}}{{^required}}>{{/required}}, {{/vendorExtensions.x-rust-type}} {{/vars}} } -impl{{{vendorExtensions.x-rust-generic-parameter}}} {{{classname}}}{{{vendorExtensions.x-rust-generic-parameter}}} { +impl{{#vendorExtensions.x-rust-has-borrowed-data}}<'a{{#vendorExtensions.x-rust-generic-parameter}}, {{{.}}}{{/vendorExtensions.x-rust-generic-parameter}}>{{/vendorExtensions.x-rust-has-borrowed-data}}{{^vendorExtensions.x-rust-has-borrowed-data}}{{#vendorExtensions.x-rust-generic-parameter}}<{{{.}}}>{{/vendorExtensions.x-rust-generic-parameter}}{{/vendorExtensions.x-rust-has-borrowed-data}} {{! + }}{{{classname}}}{{#vendorExtensions.x-rust-has-borrowed-data}}<'a{{#vendorExtensions.x-rust-generic-parameter}}, {{{.}}}{{/vendorExtensions.x-rust-generic-parameter}}>{{/vendorExtensions.x-rust-has-borrowed-data}}{{^vendorExtensions.x-rust-has-borrowed-data}}{{#vendorExtensions.x-rust-generic-parameter}}<{{{.}}}>{{/vendorExtensions.x-rust-generic-parameter}}{{/vendorExtensions.x-rust-has-borrowed-data}} { {{#description}} /// {{{.}}} {{/description}} @@ -167,10 +180,19 @@ impl{{{vendorExtensions.x-rust-generic-parameter}}} {{{classname}}}{{{vendorExte ### CHANGE 1: If x-rust-type is available for a required var, use it for the parameter type. }}{{#vendorExtensions.x-rust-type}}{{{.}}}{{/vendorExtensions.x-rust-type}}{{! ### Fallback to original logic if x-rust-type is not present. - }}{{^vendorExtensions.x-rust-type}}{{#isNullable}}Option<{{/isNullable}}{{#isEnum}}{{#isArray}}{{#uniqueItems}}std::collections::HashSet<{{/uniqueItems}}{{^uniqueItems}}Vec<{{/uniqueItems}}{{/isArray}}{{{enumName}}}{{#isArray}}>{{/isArray}}{{/isEnum}}{{^isEnum}}{{#isByteArray}}Vec{{/isByteArray}}{{^isByteArray}}{{{dataType}}}{{/isByteArray}}{{/isEnum}}{{#isNullable}}>{{/isNullable}}{{/vendorExtensions.x-rust-type}}{{! + }}{{^vendorExtensions.x-rust-type}}{{#isNullable}}Option<{{/isNullable}}{{! + }}{{#isEnum}}{{#isArray}}{{#uniqueItems}}std::collections::HashSet<{{/uniqueItems}}{{^uniqueItems}}Vec<{{/uniqueItems}}{{/isArray}}{{{enumName}}}{{#isArray}}>{{/isArray}}{{/isEnum}}{{! + }}{{^isEnum}}{{#isByteArray}}Vec{{/isByteArray}}{{! + }}{{^isByteArray}}{{#isString}}{{#model}}{{#vendorExtensions.x-rust-has-borrowed-data}}Cow<'a, str>{{/vendorExtensions.x-rust-has-borrowed-data}}{{^vendorExtensions.x-rust-has-borrowed-data}}String{{/vendorExtensions.x-rust-has-borrowed-data}}{{/model}}{{/isString}}{{! + }}{{^isString}}{{#isArray}}Vec<{{#items}}{{! + }}{{#isModel}}{{{dataType}}}{{#vendorExtensions.x-rust-has-borrowed-data}}{{#model}}{{#vendorExtensions.x-rust-has-borrowed-data}}<'a>{{/vendorExtensions.x-rust-has-borrowed-data}}{{^vendorExtensions.x-rust-has-borrowed-data}}<'static>{{/vendorExtensions.x-rust-has-borrowed-data}}{{/model}}{{/vendorExtensions.x-rust-has-borrowed-data}}{{/isModel}}{{! + }}{{^isModel}}{{{dataType}}}{{/isModel}}{{/items}}>{{/isArray}}{{! + }}{{^isArray}}{{{dataType}}}{{#isModel}}{{#vendorExtensions.x-rust-has-borrowed-data}}{{#model}}{{#vendorExtensions.x-rust-has-borrowed-data}}<'a>{{/vendorExtensions.x-rust-has-borrowed-data}}{{^vendorExtensions.x-rust-has-borrowed-data}}<'static>{{/vendorExtensions.x-rust-has-borrowed-data}}{{/model}}{{/vendorExtensions.x-rust-has-borrowed-data}}{{/isModel}}{{! + }}{{/isArray}}{{/isString}}{{/isByteArray}}{{/isEnum}}{{! + }}{{#isNullable}}>{{/isNullable}}{{/vendorExtensions.x-rust-type}}{{! ### Comma for next arguement - }}{{^-last}}, {{/-last}}{{/requiredVars}}) -> {{{classname}}}{{{vendorExtensions.x-rust-generic-parameter}}}{ - {{{classname}}} { + }}{{^-last}}, {{/-last}}{{/requiredVars}}) -> Self { + Self { {{#vars}} {{#vendorExtensions.x-rust-type}} {{! Differentiate between required and optional fields with x-rust-type.}} @@ -196,16 +218,16 @@ impl{{{vendorExtensions.x-rust-generic-parameter}}} {{{classname}}}{{{vendorExte {{/description}} #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] #[serde(untagged)] -pub enum {{classname}} { +pub enum {{classname}}{{#vendorExtensions.x-rust-has-borrowed-data}}<'a>{{/vendorExtensions.x-rust-has-borrowed-data}} { {{#composedSchemas.oneOf}} {{#description}} /// {{{.}}} {{/description}} - {{{name}}}({{#isModel}}{{^avoidBoxedModels}}Box<{{/avoidBoxedModels}}{{/isModel}}{{{dataType}}}{{#isModel}}{{^avoidBoxedModels}}>{{/avoidBoxedModels}}{{/isModel}}), + {{{name}}}({{#isModel}}{{^avoidBoxedModels}}Box<{{/avoidBoxedModels}}{{/isModel}}{{#isArray}}Vec<{{{items.dataType}}}{{#vendorExtensions.x-rust-has-borrowed-data}}<'a>{{/vendorExtensions.x-rust-has-borrowed-data}}>{{/isArray}}{{^isArray}}{{{dataType}}}{{#vendorExtensions.x-rust-has-borrowed-data}}<'a>{{/vendorExtensions.x-rust-has-borrowed-data}}{{/isArray}}{{#isModel}}{{^avoidBoxedModels}}>{{/avoidBoxedModels}}{{/isModel}}), {{/composedSchemas.oneOf}} } -impl Default for {{classname}} { +impl Default for {{classname}}{{#vendorExtensions.x-rust-has-borrowed-data}}<'_>{{/vendorExtensions.x-rust-has-borrowed-data}} { fn default() -> Self { {{#composedSchemas.oneOf}}{{#-first}}Self::{{{name}}}(Default::default()){{/-first}}{{/composedSchemas.oneOf}} } diff --git a/openapi-generator-template/reqwest/api.mustache b/openapi-generator-template/reqwest/api.mustache index 9734291..038480e 100644 --- a/openapi-generator-template/reqwest/api.mustache +++ b/openapi-generator-template/reqwest/api.mustache @@ -1,9 +1,10 @@ {{>partial_header}} +use super::{Error, configuration, ContentType}; +use crate::{apis::ResponseContent, models}; +use ::std::borrow::Cow; use reqwest; use serde::{Deserialize, Serialize, de::Error as _}; -use crate::{apis::ResponseContent, models}; -use super::{Error, configuration, ContentType}; {{#operations}} {{#operation}} @@ -12,15 +13,15 @@ use super::{Error, configuration, ContentType}; {{#-first}} /// struct for passing parameters to the method [`{{operationId}}`] #[derive(Clone, Debug)] -pub struct {{{operationIdCamelCase}}}Params{{! +pub struct {{{operationIdCamelCase}}}Params<'p{{! Iterate through ALL parameters in the operation. Only the requestBody has this extension defined, so it will print "". The other parameters have nothing, so they will print nothing. This effectively extract the generic parameter from the requestBody and places it on the struct definition line. }}{{#allParams}} -{{{vendorExtensions.x-rust-params-generic-parameter}}} -{{/allParams}} { +{{#vendorExtensions.x-rust-params-generic-parameter}}, {{{.}}}{{/vendorExtensions.x-rust-params-generic-parameter}} +{{/allParams}}> { {{/-first}} {{#description}} /// {{{.}}} @@ -33,15 +34,15 @@ pub struct {{{operationIdCamelCase}}}Params{{! ### Option Start }}{{^required}}Option<{{/required}}{{#required}}{{#isNullable}}Option<{{/isNullable}}{{/required}}{{! ### &str and Vec<&str> - }}{{^isUuid}}{{#isString}}{{#isArray}}Vec<{{/isArray}}String{{#isArray}}>{{/isArray}}{{/isString}}{{/isUuid}}{{! + }}{{^isUuid}}{{#isString}}{{#isArray}}Vec<{{/isArray}}Cow<'p, str>{{#isArray}}>{{/isArray}}{{/isString}}{{/isUuid}}{{! ### UUIDs }}{{#isUuid}}{{#isArray}}Vec<{{/isArray}}String{{#isArray}}>{{/isArray}}{{/isUuid}}{{! ### Models and primative types - }}{{^isString}}{{^isUuid}}{{^isPrimitiveType}}{{^isContainer}}models::{{/isContainer}}{{/isPrimitiveType}}{{{dataType}}}{{/isUuid}}{{/isString}}{{! + }}{{^isString}}{{^isUuid}}{{^isPrimitiveType}}{{^isContainer}}models::{{/isContainer}}{{/isPrimitiveType}}{{{dataType}}}{{#isModel}}<'p>{{/isModel}}{{/isUuid}}{{/isString}}{{! ### Option End }}{{^required}}>{{/required}}{{#required}}{{#isNullable}}>{{/isNullable}}{{/required}}{{! ### Comma for next arguement - }}{{^-last}},{{/-last}} + }}, {{/vendorExtensions.x-rust-type}} {{#-last}} } @@ -105,11 +106,17 @@ pub enum {{{operationIdCamelCase}}}Error { /// {{{.}}} {{/notes}} {{#vendorExtensions.x-group-parameters}} -pub {{#supportAsync}}async {{/supportAsync}}fn {{{operationId}}}{{{vendorExtensions.x-rust-generic-parameter}}}(configuration: &configuration::Configuration{{#allParams}}{{#-first}}, {{! +pub {{#supportAsync}}async {{/supportAsync}}fn {{{operationId}}}{{#vendorExtensions.x-rust-generic-parameter}}<{{{.}}}>{{/vendorExtensions.x-rust-generic-parameter}}(configuration: &configuration::Configuration{{#allParams}}{{#-first}}, {{! ### Params -}}params: &{{{operationIdCamelCase}}}Params{{#allParams}}{{{vendorExtensions.x-rust-params-generic-parameter}}}{{/allParams}}{{/-first}}{{/allParams}}{{! +}}params: &{{{operationIdCamelCase}}}Params<'_{{#allParams}}{{#vendorExtensions.x-rust-params-generic-parameter}}, {{{.}}}{{/vendorExtensions.x-rust-params-generic-parameter}}{{/allParams}}>{{/-first}}{{/allParams}}{{! ### Function return type -}}) -> Result<{{#vendorExtensions.x-rust-return-type}}{{{.}}}{{/vendorExtensions.x-rust-return-type}}{{^vendorExtensions.x-rust-return-type}}{{#isResponseFile}}{{#supportAsync}}reqwest::Response{{/supportAsync}}{{^supportAsync}}reqwest::blocking::Response{{/supportAsync}}{{/isResponseFile}}{{^isResponseFile}}{{#supportMultipleResponses}}ResponseContent<{{{operationIdCamelCase}}}Success>{{/supportMultipleResponses}}{{^supportMultipleResponses}}{{^returnType}}(){{/returnType}}{{{returnType}}}{{/supportMultipleResponses}}{{/isResponseFile}}{{/vendorExtensions.x-rust-return-type}}, Error<{{{operationIdCamelCase}}}Error>> { +}}) -> Result<{{#vendorExtensions.x-rust-return-type}}{{{.}}}{{/vendorExtensions.x-rust-return-type}}{{^vendorExtensions.x-rust-return-type}}{{! +}}{{#isResponseFile}}{{#supportAsync}}reqwest::Response{{/supportAsync}}{{^supportAsync}}reqwest::blocking::Response{{/supportAsync}}{{/isResponseFile}}{{! +}}{{^isResponseFile}}{{#supportMultipleResponses}}ResponseContent<{{{operationIdCamelCase}}}Success>{{/supportMultipleResponses}}{{^supportMultipleResponses}}{{! +}}{{^returnType}}(){{/returnType}}{{! +}}{{#isArray}}Vec<{{#returnProperty.items}}{{{dataType}}}{{#isModel}}{{#vendorExtensions.x-rust-has-borrowed-data}}<'static>{{/vendorExtensions.x-rust-has-borrowed-data}}{{/isModel}}{{/returnProperty.items}}>{{/isArray}}{{! +}}{{^isArray}}{{#returnProperty}}{{{dataType}}}{{#isModel}}{{#vendorExtensions.x-rust-has-borrowed-data}}<'static>{{/vendorExtensions.x-rust-has-borrowed-data}}{{/isModel}}{{/returnProperty}}{{! +}}{{/isArray}}{{/supportMultipleResponses}}{{/isResponseFile}}{{/vendorExtensions.x-rust-return-type}}, Error<{{{operationIdCamelCase}}}Error>> { {{/vendorExtensions.x-group-parameters}} let uri_str = format!("{}{{{path}}}", configuration.base_path{{#pathParams}}, {{{baseName}}}={{#isString}}crate::apis::urlencode(&{{/isString}}{{{vendorExtensions.x-rust-param-identifier}}}{{^required}}.unwrap(){{/required}}{{#required}}{{#isNullable}}.unwrap(){{/isNullable}}{{/required}}{{#isArray}}.join(",").as_ref(){{/isArray}}{{^isString}}{{^isUuid}}{{^isPrimitiveType}}{{^isContainer}}.to_string(){{/isContainer}}{{/isPrimitiveType}}{{/isUuid}}{{/isString}}{{#isString}}){{/isString}}{{/pathParams}}); let mut req_builder = configuration.client.request(reqwest::Method::{{{httpMethod}}}, &uri_str); @@ -416,7 +423,7 @@ pub {{#supportAsync}}async {{/supportAsync}}fn {{{operationId}}}{{{vendorExtensi {{#vendorExtensions.x-rust-body-is-raw-text}} {{! If the flag is true, we generate only the raw text body logic }} {{#bodyParams}} - req_builder = req_builder.header(reqwest::header::CONTENT_TYPE, "text/plain").body({{{vendorExtensions.x-rust-param-identifier}}}.to_owned()); + req_builder = req_builder.header(reqwest::header::CONTENT_TYPE, "text/plain").body({{{vendorExtensions.x-rust-param-identifier}}}.clone().into_owned()); {{/bodyParams}} {{/vendorExtensions.x-rust-body-is-raw-text}} {{! Now, handle the case where the extension is NOT present. This is the "else" block }} diff --git a/openapi-generator-template/reqwest/api_mod.mustache b/openapi-generator-template/reqwest/api_mod.mustache index 6a78390..9e9a2fe 100644 --- a/openapi-generator-template/reqwest/api_mod.mustache +++ b/openapi-generator-template/reqwest/api_mod.mustache @@ -1,4 +1,4 @@ -use std::{error, fmt}; +use ::std::{error, fmt}; {{#withAWSV4Signature}} use aws_sigv4; diff --git a/preprocessed_openapi.yml b/preprocessed_openapi.yml index d0fc19d..fad52d6 100644 --- a/preprocessed_openapi.yml +++ b/preprocessed_openapi.yml @@ -96,19 +96,19 @@ paths: - name: exclude_fields in: query schema: - description: Comma-separated list of fields from the collection to exclude from the response type: string + description: Comma-separated list of fields from the collection to exclude from the response - name: limit in: query schema: + type: integer description: | Number of collections to fetch. Default: returns all collections. - type: integer - name: offset in: query schema: - description: Identifies the starting point to return collections when paginating. type: integer + description: Identifies the starting point to return collections when paginating. responses: '200': description: List of all collections @@ -116,9 +116,9 @@ paths: application/json: schema: type: array - x-go-type: '[]*CollectionResponse' items: $ref: '#/components/schemas/CollectionResponse' + x-go-type: '[]*CollectionResponse' post: tags: - collections @@ -126,12 +126,12 @@ paths: description: When a collection is created, we give it a name and describe the fields that will be indexed from the documents added to the collection. operationId: createCollection requestBody: + required: true description: The collection object to be created content: application/json: schema: $ref: '#/components/schemas/CollectionSchema' - required: true responses: '201': description: Collection successfully created @@ -160,9 +160,9 @@ paths: operationId: getCollection parameters: - name: collectionName + required: true in: path description: The name of the collection to retrieve - required: true schema: type: string responses: @@ -186,18 +186,18 @@ paths: operationId: updateCollection parameters: - name: collectionName + required: true in: path description: The name of the collection to update - required: true schema: type: string requestBody: + required: true description: The document object with fields to be updated content: application/json: schema: $ref: '#/components/schemas/CollectionUpdateSchema' - required: true responses: '200': description: The updated partial collection schema @@ -225,9 +225,9 @@ paths: operationId: deleteCollection parameters: - name: collectionName + required: true in: path description: The name of the collection to delete - required: true schema: type: string responses: @@ -252,9 +252,9 @@ paths: operationId: indexDocument parameters: - name: collectionName + required: true in: path description: The name of the collection to add the document to - required: true schema: type: string - name: action @@ -270,6 +270,7 @@ paths: schema: $ref: '#/components/schemas/DirtyValues' requestBody: + required: true description: The document object to be indexed content: application/json: @@ -277,7 +278,7 @@ paths: type: object description: Can be any key-value pair x-go-type: interface{} - required: true + x-rust-has-borrowed-data: true responses: '201': description: Document successfully created/indexed @@ -300,9 +301,9 @@ paths: operationId: updateDocuments parameters: - name: collectionName + required: true in: path description: The name of the collection to update documents in - required: true schema: type: string - name: filter_by @@ -310,6 +311,18 @@ paths: schema: type: string example: 'num_employees:>100 && country: [USA, UK]' + requestBody: + required: true + description: The document fields to be updated + content: + application/json: + schema: + type: object + description: Can be any key-value pair + x-go-type: interface{} + x-rust-has-borrowed-data: true + x-rust-params-generic-parameter: B + x-rust-type: B responses: '200': description: The response contains a single field, `num_updated`, indicating the number of documents affected. @@ -336,18 +349,7 @@ paths: application/json: schema: $ref: '#/components/schemas/ApiResponse' - requestBody: - description: The document fields to be updated - content: - application/json: - schema: - type: object - description: Can be any key-value pair - x-go-type: interface{} - required: true - x-rust-params-generic-parameter: - x-rust-type: B - x-rust-generic-parameter: '' + x-rust-generic-parameter: 'B: Serialize' delete: tags: - documents @@ -356,9 +358,9 @@ paths: operationId: deleteDocuments parameters: - name: collectionName + required: true in: path description: The name of the collection to delete documents from - required: true schema: type: string - name: filter_by @@ -369,8 +371,8 @@ paths: - name: batch_size in: query schema: - description: Batch size parameter controls the number of documents that should be deleted at a time. A larger value will speed up deletions, but will impact performance of other operations running on the server. type: integer + description: Batch size parameter controls the number of documents that should be deleted at a time. A larger value will speed up deletions, but will impact performance of other operations running on the server. - name: ignore_not_found in: query schema: @@ -378,8 +380,8 @@ paths: - name: truncate in: query schema: - description: When true, removes all documents from the collection while preserving the collection and its schema. type: boolean + description: When true, removes all documents from the collection while preserving the collection and its schema. responses: '200': description: Documents successfully deleted @@ -407,190 +409,190 @@ paths: operationId: searchCollection parameters: - name: collectionName + required: true in: path description: The name of the collection to search for the document under - required: true schema: type: string - name: q in: query schema: - description: The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. type: string + description: The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. - name: query_by in: query schema: - description: A list of `string` fields that should be queried against. Multiple fields are separated with a comma. type: string + description: A list of `string` fields that should be queried against. Multiple fields are separated with a comma. - name: nl_query in: query schema: - description: Whether to use natural language processing to parse the query. type: boolean + description: Whether to use natural language processing to parse the query. - name: nl_model_id in: query schema: - description: The ID of the natural language model to use. type: string + description: The ID of the natural language model to use. - name: query_by_weights in: query schema: - description: The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. type: string + description: The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. - name: text_match_type in: query schema: - description: In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. type: string + description: In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. - name: prefix in: query schema: - description: Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. type: string + description: Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. - name: infix in: query schema: - description: If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results type: string + description: If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results - name: max_extra_prefix in: query schema: - description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. type: integer + description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. - name: max_extra_suffix in: query schema: - description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. type: integer + description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. - name: filter_by in: query schema: - description: Filter conditions for refining your open api validator search results. Separate multiple conditions with &&. type: string + description: Filter conditions for refining your open api validator search results. Separate multiple conditions with &&. example: 'num_employees:>100 && country: [USA, UK]' - name: max_filter_by_candidates in: query schema: - description: Controls the number of similar words that Typesense considers during fuzzy search on filter_by values. Useful for controlling prefix matches like company_name:Acm*. type: integer + description: Controls the number of similar words that Typesense considers during fuzzy search on filter_by values. Useful for controlling prefix matches like company_name:Acm*. - name: sort_by in: query schema: - description: A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` type: string + description: A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` example: num_employees:desc - name: facet_by in: query schema: - description: A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. type: string + description: A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. - name: max_facet_values in: query schema: - description: Maximum number of facet values to be returned. type: integer + description: Maximum number of facet values to be returned. - name: facet_query in: query schema: - description: Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix "shoe". type: string + description: Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix "shoe". - name: num_typos in: query schema: + type: string description: | The number of typographical errors (1 or 2) that would be tolerated. Default: 2 - type: string - name: page in: query schema: - description: Results from this specific page number would be fetched. type: integer + description: Results from this specific page number would be fetched. - name: per_page in: query schema: - description: 'Number of results to fetch per page. Default: 10' type: integer + description: 'Number of results to fetch per page. Default: 10' - name: limit in: query schema: + type: integer description: | Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. - type: integer - name: offset in: query schema: - description: Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. type: integer + description: Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. - name: group_by in: query schema: - description: You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. type: string + description: You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. - name: group_limit in: query schema: + type: integer description: | Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 - type: integer - name: group_missing_values in: query schema: + type: boolean description: | Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true - type: boolean - name: include_fields in: query schema: - description: List of fields from the document to include in the search result type: string + description: List of fields from the document to include in the search result - name: exclude_fields in: query schema: - description: List of fields from the document to exclude in the search result type: string + description: List of fields from the document to exclude in the search result - name: highlight_full_fields in: query schema: - description: List of fields which should be highlighted fully without snippeting type: string + description: List of fields which should be highlighted fully without snippeting - name: highlight_affix_num_tokens in: query schema: + type: integer description: | The number of tokens that should surround the highlighted text on each side. Default: 4 - type: integer - name: highlight_start_tag in: query schema: + type: string description: | The start tag used for the highlighted snippets. Default: `` - type: string - name: highlight_end_tag in: query schema: + type: string description: | The end tag used for the highlighted snippets. Default: `` - type: string - name: enable_highlight_v1 in: query schema: - description: | - Flag for enabling/disabling the deprecated, old highlight structure in the response. Default: true type: boolean default: true + description: | + Flag for enabling/disabling the deprecated, old highlight structure in the response. Default: true - name: enable_analytics in: query schema: - description: | - Flag for enabling/disabling analytics aggregation for specific search queries (for e.g. those originating from a test script). type: boolean default: true + description: | + Flag for enabling/disabling analytics aggregation for specific search queries (for e.g. those originating from a test script). - name: snippet_threshold in: query schema: + type: integer description: | Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 - type: integer - name: synonym_sets in: query schema: @@ -600,9 +602,9 @@ paths: - name: drop_tokens_threshold in: query schema: + type: integer description: | If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 - type: integer - name: drop_tokens_mode in: query schema: @@ -610,9 +612,9 @@ paths: - name: typo_tokens_threshold in: query schema: + type: integer description: | If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 - type: integer - name: enable_typos_for_alpha_numerical_tokens in: query schema: @@ -646,184 +648,184 @@ paths: - name: pinned_hits in: query schema: + type: string description: | A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. - type: string - name: hidden_hits in: query schema: + type: string description: | A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. - type: string - name: override_tags in: query schema: - description: Comma separated list of tags to trigger the curations rules that match the tags. type: string + description: Comma separated list of tags to trigger the curations rules that match the tags. - name: highlight_fields in: query schema: + type: string description: | A list of custom fields that must be highlighted even if you don't query for them - type: string - name: split_join_tokens in: query schema: + type: string description: | Treat space as typo: search for q=basket ball if q=basketball is not found or vice-versa. Splitting/joining of tokens will only be attempted if the original query produces no results. To always trigger this behavior, set value to `always``. To disable, set value to `off`. Default is `fallback`. - type: string - name: pre_segmented_query in: query schema: + type: boolean description: | You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. Set this parameter to true to do the same - type: boolean - name: preset in: query schema: + type: string description: | Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. - type: string - name: enable_overrides in: query schema: - description: | - If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false type: boolean default: false + description: | + If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false - name: prioritize_exact_match in: query schema: - description: | - Set this parameter to true to ensure that an exact match is ranked above the others type: boolean default: true + description: | + Set this parameter to true to ensure that an exact match is ranked above the others - name: max_candidates in: query schema: + type: integer description: | Control the number of words that Typesense considers for typo and prefix searching. - type: integer - name: prioritize_token_position in: query schema: - description: | - Make Typesense prioritize documents where the query words appear earlier in the text. type: boolean default: false + description: | + Make Typesense prioritize documents where the query words appear earlier in the text. - name: prioritize_num_matching_fields in: query schema: - description: | - Make Typesense prioritize documents where the query words appear in more number of fields. type: boolean default: true + description: | + Make Typesense prioritize documents where the query words appear in more number of fields. - name: enable_typos_for_numerical_tokens in: query schema: - description: | - Make Typesense disable typos for numerical tokens. type: boolean default: true + description: | + Make Typesense disable typos for numerical tokens. - name: exhaustive_search in: query schema: + type: boolean description: | Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). - type: boolean - name: search_cutoff_ms in: query schema: + type: integer description: | Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. - type: integer - name: use_cache in: query schema: + type: boolean description: | Enable server side caching of search query results. By default, caching is disabled. - type: boolean - name: cache_ttl in: query schema: + type: integer description: | The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. - type: integer - name: min_len_1typo in: query schema: + type: integer description: | Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. - type: integer - name: min_len_2typo in: query schema: + type: integer description: | Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. - type: integer - name: vector_query in: query schema: + type: string description: | Vector query expression for fetching documents "closest" to a given query/document vector. - type: string - name: remote_embedding_timeout_ms in: query schema: + type: integer description: | Timeout (in milliseconds) for fetching remote embeddings. - type: integer - name: remote_embedding_num_tries in: query schema: + type: integer description: | Number of times to retry fetching remote embeddings. - type: integer - name: facet_strategy in: query schema: + type: string description: | Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). - type: string - name: stopwords in: query schema: + type: string description: | Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. - type: string - name: facet_return_parent in: query schema: + type: string description: | Comma separated string of nested facet fields whose parent object should be returned in facet response. - type: string - name: voice_query in: query schema: + type: string description: | The base64 encoded audio file in 16 khz 16-bit WAV format. - type: string - name: conversation in: query schema: + type: boolean description: | Enable conversational search. - type: boolean - name: conversation_model_id in: query schema: + type: string description: | The Id of Conversation Model to be used. - type: string - name: conversation_id in: query schema: + type: string description: | The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. - type: string responses: '200': description: Search results @@ -843,7 +845,7 @@ paths: application/json: schema: $ref: '#/components/schemas/ApiResponse' - x-rust-generic-parameter: ' serde::Deserialize<''de> + Serialize>' + x-rust-generic-parameter: 'D: for<''de> serde::Deserialize<''de> + Serialize' x-rust-return-type: models::SearchResult /synonym_sets: get: @@ -870,9 +872,9 @@ paths: operationId: retrieveSynonymSet parameters: - name: synonymSetName + required: true in: path description: The name of the synonym set to retrieve - required: true schema: type: string responses: @@ -896,18 +898,18 @@ paths: operationId: upsertSynonymSet parameters: - name: synonymSetName + required: true in: path description: The name of the synonym set to create/update - required: true schema: type: string requestBody: + required: true description: The synonym set to be created/updated content: application/json: schema: $ref: '#/components/schemas/SynonymSetCreateSchema' - required: true responses: '200': description: Synonym set successfully created/updated @@ -929,9 +931,9 @@ paths: operationId: deleteSynonymSet parameters: - name: synonymSetName + required: true in: path description: The name of the synonym set to delete - required: true schema: type: string responses: @@ -956,9 +958,9 @@ paths: operationId: retrieveSynonymSetItems parameters: - name: synonymSetName + required: true in: path description: The name of the synonym set to retrieve items for - required: true schema: type: string responses: @@ -985,15 +987,15 @@ paths: operationId: retrieveSynonymSetItem parameters: - name: synonymSetName + required: true in: path description: The name of the synonym set - required: true schema: type: string - name: itemId + required: true in: path description: The id of the synonym item to retrieve - required: true schema: type: string responses: @@ -1017,24 +1019,24 @@ paths: operationId: upsertSynonymSetItem parameters: - name: synonymSetName + required: true in: path description: The name of the synonym set - required: true schema: type: string - name: itemId + required: true in: path description: The id of the synonym item to upsert - required: true schema: type: string requestBody: + required: true description: The synonym item to be created/updated content: application/json: schema: $ref: '#/components/schemas/SynonymItemSchema' - required: true responses: '200': description: Synonym item successfully created/updated @@ -1056,15 +1058,15 @@ paths: operationId: deleteSynonymSetItem parameters: - name: synonymSetName + required: true in: path description: The name of the synonym set - required: true schema: type: string - name: itemId + required: true in: path description: The id of the synonym item to delete - required: true schema: type: string responses: @@ -1105,9 +1107,9 @@ paths: operationId: retrieveCurationSet parameters: - name: curationSetName + required: true in: path description: The name of the curation set to retrieve - required: true schema: type: string responses: @@ -1131,18 +1133,18 @@ paths: operationId: upsertCurationSet parameters: - name: curationSetName + required: true in: path description: The name of the curation set to create/update - required: true schema: type: string requestBody: + required: true description: The curation set to be created/updated content: application/json: schema: $ref: '#/components/schemas/CurationSetCreateSchema' - required: true responses: '200': description: Curation set successfully created/updated @@ -1164,9 +1166,9 @@ paths: operationId: deleteCurationSet parameters: - name: curationSetName + required: true in: path description: The name of the curation set to delete - required: true schema: type: string responses: @@ -1191,9 +1193,9 @@ paths: operationId: retrieveCurationSetItems parameters: - name: curationSetName + required: true in: path description: The name of the curation set to retrieve items for - required: true schema: type: string responses: @@ -1220,15 +1222,15 @@ paths: operationId: retrieveCurationSetItem parameters: - name: curationSetName + required: true in: path description: The name of the curation set - required: true schema: type: string - name: itemId + required: true in: path description: The id of the curation item to retrieve - required: true schema: type: string responses: @@ -1252,24 +1254,24 @@ paths: operationId: upsertCurationSetItem parameters: - name: curationSetName + required: true in: path description: The name of the curation set - required: true schema: type: string - name: itemId + required: true in: path description: The id of the curation item to upsert - required: true schema: type: string requestBody: + required: true description: The curation item to be created/updated content: application/json: schema: $ref: '#/components/schemas/CurationItemCreateSchema' - required: true responses: '200': description: Curation item successfully created/updated @@ -1291,15 +1293,15 @@ paths: operationId: deleteCurationSetItem parameters: - name: curationSetName + required: true in: path description: The name of the curation set - required: true schema: type: string - name: itemId + required: true in: path description: The id of the curation item to delete - required: true schema: type: string responses: @@ -1324,26 +1326,26 @@ paths: operationId: exportDocuments parameters: - name: collectionName + required: true in: path description: The name of the collection - required: true schema: type: string - name: filter_by in: query schema: - description: Filter conditions for refining your search results. Separate multiple conditions with &&. type: string + description: Filter conditions for refining your search results. Separate multiple conditions with &&. - name: include_fields in: query schema: - description: List of fields from the document to include in the search result type: string + description: List of fields from the document to include in the search result - name: exclude_fields in: query schema: - description: List of fields from the document to exclude in the search result type: string + description: List of fields from the document to exclude in the search result responses: '200': description: Exports all the documents in a given collection. @@ -1371,9 +1373,9 @@ paths: operationId: importDocuments parameters: - name: collectionName + required: true in: path description: The name of the collection - required: true schema: type: string - name: batch_size @@ -1402,13 +1404,13 @@ paths: schema: $ref: '#/components/schemas/DirtyValues' requestBody: + required: true description: The json array of documents or the JSONL file to import content: application/octet-stream: schema: type: string description: The JSONL file to import - required: true responses: '200': description: Result of the import operation. Each line of the response indicates the result of each document present in the request body (in the same order). If the import of a single document fails, it does not affect the other documents. If there is a failure, the response line will include a corresponding error message and as well as the actual document content. @@ -1442,15 +1444,15 @@ paths: operationId: getDocument parameters: - name: collectionName + required: true in: path description: The name of the collection to search for the document under - required: true schema: type: string - name: documentId + required: true in: path description: The Document ID - required: true schema: type: string responses: @@ -1475,15 +1477,15 @@ paths: operationId: updateDocument parameters: - name: collectionName + required: true in: path description: The name of the collection to search for the document under - required: true schema: type: string - name: documentId + required: true in: path description: The Document ID - required: true schema: type: string - name: dirty_values @@ -1492,6 +1494,7 @@ paths: schema: $ref: '#/components/schemas/DirtyValues' requestBody: + required: true description: The document object with fields to be updated content: application/json: @@ -1499,8 +1502,8 @@ paths: type: object description: Can be any key-value pair x-go-type: interface{} - required: true - x-rust-params-generic-parameter: + x-rust-has-borrowed-data: true + x-rust-params-generic-parameter: B x-rust-type: B responses: '200': @@ -1516,7 +1519,7 @@ paths: application/json: schema: $ref: '#/components/schemas/ApiResponse' - x-rust-generic-parameter: '' + x-rust-generic-parameter: 'B: Serialize' delete: tags: - documents @@ -1525,15 +1528,15 @@ paths: operationId: deleteDocument parameters: - name: collectionName + required: true in: path description: The name of the collection to search for the document under - required: true schema: type: string - name: documentId + required: true in: path description: The Document ID - required: true schema: type: string responses: @@ -1552,113 +1555,113 @@ paths: $ref: '#/components/schemas/ApiResponse' /conversations/models: get: + tags: + - conversations + summary: List all conversation models description: Retrieve all conversation models operationId: retrieveAllConversationModels responses: '200': + description: List of all conversation models content: application/json: schema: + type: array items: $ref: '#/components/schemas/ConversationModelSchema' - type: array x-go-type: '[]*ConversationModelSchema' - description: List of all conversation models - summary: List all conversation models + post: tags: - conversations - post: summary: Create a conversation model description: Create a Conversation Model operationId: createConversationModel requestBody: + required: true content: application/json: schema: $ref: '#/components/schemas/ConversationModelCreateSchema' - required: true responses: '201': + description: Created Conversation Model content: application/json: schema: $ref: '#/components/schemas/ConversationModelSchema' - description: Created Conversation Model '400': + description: Bad request, see error message for details content: application/json: schema: $ref: '#/components/schemas/ApiResponse' - description: Bad request, see error message for details - tags: - - conversations /conversations/models/{modelId}: get: + tags: + - conversations + summary: Retrieve a conversation model description: Retrieve a conversation model operationId: retrieveConversationModel parameters: - name: modelId + required: true in: path description: The id of the conversation model to retrieve - required: true schema: type: string responses: '200': + description: A conversation model content: application/json: schema: $ref: '#/components/schemas/ConversationModelSchema' - description: A conversation model - summary: Retrieve a conversation model + put: tags: - conversations - put: + summary: Update a conversation model description: Update a conversation model operationId: updateConversationModel - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/ConversationModelUpdateSchema' - required: true parameters: - name: modelId + required: true in: path description: The id of the conversation model to update - required: true schema: type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ConversationModelUpdateSchema' responses: '200': + description: The conversation model was successfully updated content: application/json: schema: $ref: '#/components/schemas/ConversationModelSchema' - description: The conversation model was successfully updated - summary: Update a conversation model + delete: tags: - conversations - delete: + summary: Delete a conversation model description: Delete a conversation model operationId: deleteConversationModel parameters: - name: modelId + required: true in: path description: The id of the conversation model to delete - required: true schema: type: string responses: '200': + description: The conversation model was successfully deleted content: application/json: schema: $ref: '#/components/schemas/ConversationModelSchema' - description: The conversation model was successfully deleted - summary: Delete a conversation model - tags: - - conversations /keys: get: tags: @@ -1712,9 +1715,9 @@ paths: operationId: getKey parameters: - name: keyId + required: true in: path description: The ID of the key to retrieve - required: true schema: type: integer format: int64 @@ -1738,9 +1741,9 @@ paths: operationId: deleteKey parameters: - name: keyId + required: true in: path description: The ID of the key to delete - required: true schema: type: integer format: int64 @@ -1786,9 +1789,9 @@ paths: operationId: upsertAlias parameters: - name: aliasName + required: true in: path description: The name of the alias to create/update - required: true schema: type: string requestBody: @@ -1824,9 +1827,9 @@ paths: operationId: getAlias parameters: - name: aliasName + required: true in: path description: The name of the alias to retrieve - required: true schema: type: string responses: @@ -1849,9 +1852,9 @@ paths: operationId: deleteAlias parameters: - name: aliasName + required: true in: path description: The name of the alias to delete - required: true schema: type: string responses: @@ -1924,9 +1927,9 @@ paths: operationId: takeSnapshot parameters: - name: snapshot_path + required: true in: query description: The directory on the server where the snapshot should be saved. - required: true schema: type: string responses: @@ -1990,13 +1993,14 @@ paths: application/json: schema: type: object - properties: - log-slow-requests-time-ms: - type: integer required: - log-slow-requests-time-ms example: | {"log-slow-requests-time-ms": 2000} + properties: + log-slow-requests-time-ms: + type: integer + x-rust-has-borrowed-data: true responses: '200': description: Compacting the on-disk database succeeded. @@ -2006,167 +2010,167 @@ paths: $ref: '#/components/schemas/SuccessStatus' /multi_search: post: - operationId: multiSearch tags: - documents summary: send multiple search requests in a single HTTP request description: This is especially useful to avoid round-trip network latencies incurred otherwise if each of these requests are sent in separate HTTP requests. You can also use this feature to do a federated search across multiple collections in a single HTTP request. + operationId: multiSearch parameters: - name: q in: query schema: - description: The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. type: string + description: The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. - name: query_by in: query schema: - description: A list of `string` fields that should be queried against. Multiple fields are separated with a comma. type: string + description: A list of `string` fields that should be queried against. Multiple fields are separated with a comma. - name: query_by_weights in: query schema: - description: The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. type: string + description: The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. - name: text_match_type in: query schema: - description: In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. type: string + description: In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. - name: prefix in: query schema: - description: Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. type: string + description: Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. - name: infix in: query schema: - description: If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results type: string + description: If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results - name: max_extra_prefix in: query schema: - description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. type: integer + description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. - name: max_extra_suffix in: query schema: - description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. type: integer + description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. - name: filter_by in: query schema: - description: Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. type: string + description: Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. example: 'num_employees:>100 && country: [USA, UK]' - name: sort_by in: query schema: - description: A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` type: string + description: A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` - name: facet_by in: query schema: - description: A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. type: string + description: A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. - name: max_facet_values in: query schema: - description: Maximum number of facet values to be returned. type: integer + description: Maximum number of facet values to be returned. - name: facet_query in: query schema: - description: Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix "shoe". type: string + description: Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix "shoe". - name: num_typos in: query schema: + type: string description: | The number of typographical errors (1 or 2) that would be tolerated. Default: 2 - type: string - name: page in: query schema: - description: Results from this specific page number would be fetched. type: integer + description: Results from this specific page number would be fetched. - name: per_page in: query schema: - description: 'Number of results to fetch per page. Default: 10' type: integer + description: 'Number of results to fetch per page. Default: 10' - name: limit in: query schema: + type: integer description: | Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. - type: integer - name: offset in: query schema: - description: Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. type: integer + description: Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. - name: group_by in: query schema: - description: You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. type: string + description: You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. - name: group_limit in: query schema: + type: integer description: | Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 - type: integer - name: group_missing_values in: query schema: + type: boolean description: | Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true - type: boolean - name: include_fields in: query schema: - description: List of fields from the document to include in the search result type: string + description: List of fields from the document to include in the search result - name: exclude_fields in: query schema: - description: List of fields from the document to exclude in the search result type: string + description: List of fields from the document to exclude in the search result - name: highlight_full_fields in: query schema: - description: List of fields which should be highlighted fully without snippeting type: string + description: List of fields which should be highlighted fully without snippeting - name: highlight_affix_num_tokens in: query schema: + type: integer description: | The number of tokens that should surround the highlighted text on each side. Default: 4 - type: integer - name: highlight_start_tag in: query schema: + type: string description: | The start tag used for the highlighted snippets. Default: `` - type: string - name: highlight_end_tag in: query schema: + type: string description: | The end tag used for the highlighted snippets. Default: `` - type: string - name: snippet_threshold in: query schema: + type: integer description: | Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 - type: integer - name: drop_tokens_threshold in: query schema: + type: integer description: | If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 - type: integer - name: drop_tokens_mode in: query schema: @@ -2174,9 +2178,9 @@ paths: - name: typo_tokens_threshold in: query schema: + type: integer description: | If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 - type: integer - name: enable_typos_for_alpha_numerical_tokens in: query schema: @@ -2198,10 +2202,10 @@ paths: - name: enable_analytics in: query schema: - description: | - Flag for enabling/disabling analytics aggregation for specific search queries (for e.g. those originating from a test script). type: boolean default: true + description: | + Flag for enabling/disabling analytics aggregation for specific search queries (for e.g. those originating from a test script). - name: synonym_prefix in: query schema: @@ -2217,173 +2221,173 @@ paths: - name: pinned_hits in: query schema: + type: string description: | A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. - type: string - name: hidden_hits in: query schema: + type: string description: | A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. - type: string - name: override_tags in: query schema: - description: Comma separated list of tags to trigger the curations rules that match the tags. type: string + description: Comma separated list of tags to trigger the curations rules that match the tags. - name: highlight_fields in: query schema: + type: string description: | A list of custom fields that must be highlighted even if you don't query for them - type: string - name: pre_segmented_query in: query schema: + type: boolean + default: false description: | You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. Set this parameter to true to do the same - type: boolean - default: false - name: preset in: query schema: + type: string description: | Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. - type: string - name: enable_overrides in: query schema: - description: | - If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false type: boolean default: false + description: | + If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false - name: prioritize_exact_match in: query schema: - description: | - Set this parameter to true to ensure that an exact match is ranked above the others type: boolean default: true + description: | + Set this parameter to true to ensure that an exact match is ranked above the others - name: prioritize_token_position in: query schema: - description: | - Make Typesense prioritize documents where the query words appear earlier in the text. type: boolean default: false + description: | + Make Typesense prioritize documents where the query words appear earlier in the text. - name: prioritize_num_matching_fields in: query schema: - description: | - Make Typesense prioritize documents where the query words appear in more number of fields. type: boolean default: true + description: | + Make Typesense prioritize documents where the query words appear in more number of fields. - name: enable_typos_for_numerical_tokens in: query schema: - description: | - Make Typesense disable typos for numerical tokens. type: boolean default: true + description: | + Make Typesense disable typos for numerical tokens. - name: exhaustive_search in: query schema: + type: boolean description: | Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). - type: boolean - name: search_cutoff_ms in: query schema: + type: integer description: | Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. - type: integer - name: use_cache in: query schema: + type: boolean description: | Enable server side caching of search query results. By default, caching is disabled. - type: boolean - name: cache_ttl in: query schema: + type: integer description: | The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. - type: integer - name: min_len_1typo in: query schema: + type: integer description: | Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. - type: integer - name: min_len_2typo in: query schema: + type: integer description: | Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. - type: integer - name: vector_query in: query schema: + type: string description: | Vector query expression for fetching documents "closest" to a given query/document vector. - type: string - name: remote_embedding_timeout_ms in: query schema: + type: integer description: | Timeout (in milliseconds) for fetching remote embeddings. - type: integer - name: remote_embedding_num_tries in: query schema: + type: integer description: | Number of times to retry fetching remote embeddings. - type: integer - name: facet_strategy in: query schema: + type: string description: | Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). - type: string - name: stopwords in: query schema: + type: string description: | Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. - type: string - name: facet_return_parent in: query schema: + type: string description: | Comma separated string of nested facet fields whose parent object should be returned in facet response. - type: string - name: voice_query in: query schema: + type: string description: | The base64 encoded audio file in 16 khz 16-bit WAV format. - type: string - name: conversation in: query schema: + type: boolean description: | Enable conversational search. - type: boolean - name: conversation_model_id in: query schema: + type: string description: | The Id of Conversation Model to be used. - type: string - name: conversation_id in: query schema: + type: string description: | The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. - type: string requestBody: content: application/json: @@ -2411,12 +2415,12 @@ paths: description: Submit a single analytics event. The event must correspond to an existing analytics rule by name. operationId: createAnalyticsEvent requestBody: + required: true description: The analytics event to be created content: application/json: schema: $ref: '#/components/schemas/AnalyticsEvent' - required: true responses: '200': description: Analytics event successfully created @@ -2438,20 +2442,20 @@ paths: operationId: getAnalyticsEvents parameters: - name: user_id - in: query required: true + in: query schema: type: string - name: name + required: true in: query description: Analytics rule name - required: true schema: type: string - name: n + required: true in: query description: Number of events to return (max 1000) - required: true schema: type: integer responses: @@ -2503,6 +2507,7 @@ paths: description: Create one or more analytics rules. You can send a single rule object or an array of rule objects. operationId: createAnalyticsRule requestBody: + required: true description: The analytics rule(s) to be created content: application/json: @@ -2512,7 +2517,7 @@ paths: - type: array items: $ref: '#/components/schemas/AnalyticsRuleCreate' - required: true + x-rust-has-borrowed-data: true responses: '200': description: Analytics rule(s) successfully created @@ -2542,12 +2547,12 @@ paths: description: Retrieve all analytics rules. Use the optional rule_tag filter to narrow down results. operationId: retrieveAnalyticsRules parameters: - - in: query - name: rule_tag - schema: - type: string + - name: rule_tag required: false + in: query description: Filter rules by rule_tag + schema: + type: string responses: '200': description: Analytics rules fetched @@ -2565,19 +2570,19 @@ paths: description: Upserts an analytics rule with the given name. operationId: upsertAnalyticsRule parameters: - - in: path - name: ruleName + - name: ruleName + required: true + in: path description: The name of the analytics rule to upsert schema: type: string - required: true requestBody: + required: true description: The Analytics rule to be upserted content: application/json: schema: $ref: '#/components/schemas/AnalyticsRuleUpdate' - required: true responses: '200': description: Analytics rule successfully upserted @@ -2598,12 +2603,12 @@ paths: description: Retrieve the details of an analytics rule, given it's name operationId: retrieveAnalyticsRule parameters: - - in: path - name: ruleName + - name: ruleName + required: true + in: path description: The name of the analytics rule to retrieve schema: type: string - required: true responses: '200': description: Analytics rule fetched @@ -2624,12 +2629,12 @@ paths: description: Permanently deletes an analytics rule, given it's name operationId: deleteAnalyticsRule parameters: - - in: path - name: ruleName + - name: ruleName + required: true + in: path description: The name of the analytics rule to delete schema: type: string - required: true responses: '200': description: Analytics rule deleted @@ -2693,20 +2698,20 @@ paths: description: When an analytics rule is created, we give it a name and describe the type, the source collections and the destination collection. operationId: upsertStopwordsSet parameters: - - in: path - name: setId + - name: setId + required: true + in: path description: The ID of the stopwords set to upsert. schema: type: string - required: true example: countries requestBody: + required: true description: The stopwords set to upsert. content: application/json: schema: $ref: '#/components/schemas/StopwordsSetUpsertSchema' - required: true responses: '200': description: Stopwords set successfully upserted. @@ -2727,12 +2732,12 @@ paths: description: Retrieve the details of a stopwords set, given it's name. operationId: retrieveStopwordsSet parameters: - - in: path - name: setId + - name: setId + required: true + in: path description: The ID of the stopwords set to retrieve. schema: type: string - required: true example: countries responses: '200': @@ -2754,12 +2759,12 @@ paths: description: Permanently deletes a stopwords set, given it's name. operationId: deleteStopwordsSet parameters: - - in: path - name: setId + - name: setId + required: true + in: path description: The ID of the stopwords set to delete. schema: type: string - required: true example: countries responses: '200': @@ -2768,13 +2773,13 @@ paths: application/json: schema: type: object - properties: - id: - type: string required: - id example: | {"id": "countries"} + properties: + id: + type: string '404': description: Stopwords set not found. content: @@ -2803,12 +2808,12 @@ paths: description: Retrieve the details of a preset, given it's name. operationId: retrievePreset parameters: - - in: path - name: presetId + - name: presetId + required: true + in: path description: The ID of the preset to retrieve. schema: type: string - required: true example: listing_view responses: '200': @@ -2830,20 +2835,20 @@ paths: description: Create or update an existing preset. operationId: upsertPreset parameters: - - in: path - name: presetId + - name: presetId + required: true + in: path description: The name of the preset set to upsert. schema: type: string - required: true example: listing_view requestBody: + required: true description: The stopwords set to upsert. content: application/json: schema: $ref: '#/components/schemas/PresetUpsertSchema' - required: true responses: '200': description: Preset successfully upserted. @@ -2864,12 +2869,12 @@ paths: description: Permanently deletes a preset, given it's name. operationId: deletePreset parameters: - - in: path - name: presetId + - name: presetId + required: true + in: path description: The ID of the preset to delete. schema: type: string - required: true example: listing_view responses: '200': @@ -2901,11 +2906,11 @@ paths: properties: dictionaries: type: array - items: - type: string example: - irregular-plurals - company-terms + items: + type: string /stemming/dictionaries/{dictionaryId}: get: tags: @@ -2915,9 +2920,9 @@ paths: operationId: getStemmingDictionary parameters: - name: dictionaryId + required: true in: path description: The ID of the dictionary to retrieve - required: true schema: type: string example: irregular-plurals @@ -2943,15 +2948,15 @@ paths: operationId: importStemmingDictionary parameters: - name: id + required: true in: query description: The ID to assign to the dictionary - required: true schema: type: string example: irregular-plurals requestBody: - description: The JSONL file containing word mappings required: true + description: The JSONL file containing word mappings content: application/json: schema: @@ -2999,12 +3004,12 @@ paths: description: Create a new NL search model. operationId: createNLSearchModel requestBody: + required: true description: The NL search model to be created content: application/json: schema: $ref: '#/components/schemas/NLSearchModelCreateSchema' - required: true responses: '201': description: NL search model successfully created @@ -3027,9 +3032,9 @@ paths: operationId: retrieveNLSearchModel parameters: - name: modelId + required: true in: path description: The ID of the NL search model to retrieve - required: true schema: type: string responses: @@ -3053,18 +3058,18 @@ paths: operationId: updateNLSearchModel parameters: - name: modelId + required: true in: path description: The ID of the NL search model to update - required: true schema: type: string requestBody: + required: true description: The NL search model fields to update content: application/json: schema: $ref: '#/components/schemas/NLSearchModelUpdateSchema' - required: true responses: '200': description: NL search model successfully updated @@ -3092,9 +3097,9 @@ paths: operationId: deleteNLSearchModel parameters: - name: modelId + required: true in: path description: The ID of the NL search model to delete - required: true schema: type: string responses: @@ -3113,10 +3118,10 @@ paths: components: schemas: CollectionSchema: + type: object required: - name - fields - type: object properties: name: type: string @@ -3139,18 +3144,18 @@ components: $ref: '#/components/schemas/Field' default_sorting_field: type: string + default: '' description: The name of an int32 / float field that determines the order in which the search results are ranked when a sort_by clause is not provided during searching. This field must indicate some kind of popularity. example: num_employees - default: '' token_separators: type: array + default: [] description: | List of symbols or special characters to be used for splitting the text into individual words in addition to space and new-line characters. items: type: string minLength: 1 maxLength: 1 - default: [] synonym_sets: type: array description: List of synonym set names to associate with this collection @@ -3159,29 +3164,31 @@ components: example: synonym_set_1 enable_nested_fields: type: boolean - description: Enables experimental support at a collection level for nested object or object array fields. This field is only available if the Typesense server is version `0.24.0.rcn34` or later. default: false + description: Enables experimental support at a collection level for nested object or object array fields. This field is only available if the Typesense server is version `0.24.0.rcn34` or later. example: true symbols_to_index: type: array + default: [] description: | List of symbols or special characters to be indexed. items: type: string minLength: 1 maxLength: 1 - default: [] voice_query_model: $ref: '#/components/schemas/VoiceQueryModelCollectionConfig' metadata: type: object description: | Optional details about the collection, e.g., when it was created, who created it etc. + x-rust-has-borrowed-data: true x-rust-builder: true + x-rust-has-borrowed-data: true CollectionUpdateSchema: + type: object required: - fields - type: object properties: fields: type: array @@ -3208,6 +3215,8 @@ components: type: object description: | Optional details about the collection, e.g., when it was created, who created it etc. + x-rust-has-borrowed-data: true + x-rust-has-borrowed-data: true CollectionResponse: allOf: - $ref: '#/components/schemas/CollectionSchema' @@ -3227,10 +3236,10 @@ components: format: int64 readOnly: true Field: + type: object required: - name - type - type: object properties: name: type: string @@ -3246,8 +3255,8 @@ components: example: false index: type: boolean - example: true default: true + example: true locale: type: string example: el @@ -3256,8 +3265,8 @@ components: example: true infix: type: boolean - example: true default: false + example: true reference: type: string description: | @@ -3290,22 +3299,22 @@ components: example: irregular-plurals token_separators: type: array + default: [] description: | List of symbols or special characters to be used for splitting the text into individual words in addition to space and new-line characters. items: type: string minLength: 1 maxLength: 1 - default: [] symbols_to_index: type: array + default: [] description: | List of symbols or special characters to be indexed. items: type: string minLength: 1 maxLength: 1 - default: [] embed: type: object required: @@ -3358,6 +3367,7 @@ components: collection_name: type: string description: Name of the collection you wish to map the alias to + x-rust-has-borrowed-data: true CollectionAlias: type: object required: @@ -3366,8 +3376,8 @@ components: properties: name: type: string - readOnly: true description: Name of the collection alias + readOnly: true collection_name: type: string description: Name of the collection the alias mapped to @@ -3378,9 +3388,9 @@ components: properties: aliases: type: array - x-go-type: '[]*CollectionAlias' items: $ref: '#/components/schemas/CollectionAlias' + x-go-type: '[]*CollectionAlias' SearchResult: type: object properties: @@ -3429,7 +3439,7 @@ components: type: object description: Custom JSON object that can be returned in the search response additionalProperties: true - x-rust-generic-parameter: + x-rust-generic-parameter: D SearchRequestParams: type: object required: @@ -3483,9 +3493,20 @@ components: items: $ref: '#/components/schemas/SearchResultHit' x-rust-type: Vec> - x-rust-generic-parameter: + x-rust-generic-parameter: D SearchResultHit: type: object + example: + highlights: + company_name: + field: company_name + snippet: Stark Industries + document: + id: '124' + company_name: Stark Industries + num_employees: 5215 + country: USA + text_match: 1234556 properties: highlights: type: array @@ -3531,31 +3552,20 @@ components: type: integer vector_distance: type: number - format: float description: Distance between the query vector and matching document's vector value + format: float hybrid_search_info: type: object description: Information about hybrid search scoring properties: rank_fusion_score: type: number - format: float description: Combined score from rank fusion of text and vector search + format: float search_index: type: integer description: Returned only for union query response. Indicates the index of the query which this document matched to. - example: - highlights: - company_name: - field: company_name - snippet: Stark Industries - document: - id: '124' - company_name: Stark Industries - num_employees: 5215 - country: USA - text_match: 1234556 - x-rust-generic-parameter: + x-rust-generic-parameter: D SearchHighlight: type: object properties: @@ -3643,9 +3653,9 @@ components: properties: synonyms: type: array - x-go-type: '[]*SearchSynonym' items: $ref: '#/components/schemas/SearchSynonym' + x-go-type: '[]*SearchSynonym' HealthStatus: type: object required: @@ -3701,6 +3711,7 @@ components: expires_at: type: integer format: int64 + x-rust-has-borrowed-data: true ApiKey: allOf: - $ref: '#/components/schemas/ApiKeySchema' @@ -3720,8 +3731,8 @@ components: properties: id: type: integer - format: int64 description: The id of the API key that was deleted + format: int64 ApiKeysResponse: type: object required: @@ -3729,9 +3740,9 @@ components: properties: keys: type: array - x-go-type: '[]*ApiKey' items: $ref: '#/components/schemas/ApiKey' + x-go-type: '[]*ApiKey' MultiSearchResult: type: object required: @@ -3744,7 +3755,7 @@ components: x-rust-type: Vec> conversation: $ref: '#/components/schemas/SearchResultConversation' - x-rust-generic-parameter: + x-rust-generic-parameter: D MultiSearchResultItem: allOf: - $ref: '#/components/schemas/SearchResult' @@ -3757,137 +3768,137 @@ components: error: type: string description: Error description - x-rust-generic-parameter: + x-rust-generic-parameter: D SearchParameters: type: object properties: q: - description: The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. type: string + description: The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. query_by: - description: A list of `string` fields that should be queried against. Multiple fields are separated with a comma. type: string + description: A list of `string` fields that should be queried against. Multiple fields are separated with a comma. nl_query: - description: Whether to use natural language processing to parse the query. type: boolean + description: Whether to use natural language processing to parse the query. nl_model_id: - description: The ID of the natural language model to use. type: string + description: The ID of the natural language model to use. query_by_weights: - description: The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. type: string + description: The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. text_match_type: - description: In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. type: string + description: In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. prefix: - description: Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. type: string + description: Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. infix: - description: If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results type: string + description: If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results max_extra_prefix: - description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. type: integer - max_extra_suffix: description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. + max_extra_suffix: type: integer + description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. filter_by: - description: Filter conditions for refining your open api validator search results. Separate multiple conditions with &&. type: string + description: Filter conditions for refining your open api validator search results. Separate multiple conditions with &&. example: 'num_employees:>100 && country: [USA, UK]' max_filter_by_candidates: - description: Controls the number of similar words that Typesense considers during fuzzy search on filter_by values. Useful for controlling prefix matches like company_name:Acm*. type: integer + description: Controls the number of similar words that Typesense considers during fuzzy search on filter_by values. Useful for controlling prefix matches like company_name:Acm*. sort_by: - description: A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` type: string + description: A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` example: num_employees:desc facet_by: - description: A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. type: string + description: A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. max_facet_values: - description: Maximum number of facet values to be returned. type: integer + description: Maximum number of facet values to be returned. facet_query: - description: Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix "shoe". type: string + description: Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix "shoe". num_typos: + type: string description: | The number of typographical errors (1 or 2) that would be tolerated. Default: 2 - type: string page: - description: Results from this specific page number would be fetched. type: integer + description: Results from this specific page number would be fetched. per_page: - description: 'Number of results to fetch per page. Default: 10' type: integer + description: 'Number of results to fetch per page. Default: 10' limit: + type: integer description: | Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. - type: integer offset: - description: Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. type: integer + description: Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. group_by: - description: You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. type: string + description: You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. group_limit: + type: integer description: | Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 - type: integer group_missing_values: + type: boolean description: | Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true - type: boolean include_fields: - description: List of fields from the document to include in the search result type: string + description: List of fields from the document to include in the search result exclude_fields: - description: List of fields from the document to exclude in the search result type: string + description: List of fields from the document to exclude in the search result highlight_full_fields: - description: List of fields which should be highlighted fully without snippeting type: string + description: List of fields which should be highlighted fully without snippeting highlight_affix_num_tokens: + type: integer description: | The number of tokens that should surround the highlighted text on each side. Default: 4 - type: integer highlight_start_tag: + type: string description: | The start tag used for the highlighted snippets. Default: `` - type: string highlight_end_tag: + type: string description: | The end tag used for the highlighted snippets. Default: `` - type: string enable_highlight_v1: + type: boolean + default: true description: | Flag for enabling/disabling the deprecated, old highlight structure in the response. Default: true + enable_analytics: type: boolean default: true - enable_analytics: description: | Flag for enabling/disabling analytics aggregation for specific search queries (for e.g. those originating from a test script). - type: boolean - default: true snippet_threshold: + type: integer description: | Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 - type: integer synonym_sets: type: string description: List of synonym set names to associate with this search query example: synonym_set_1,synonym_set_2 drop_tokens_threshold: + type: integer description: | If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 - type: integer drop_tokens_mode: $ref: '#/components/schemas/DropTokensMode' typo_tokens_threshold: + type: integer description: | If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 - type: integer enable_typos_for_alpha_numerical_tokens: type: boolean description: | @@ -3909,237 +3920,238 @@ components: description: | Allow synonym resolution on typo-corrected words in the query. Default: 0 pinned_hits: + type: string description: | A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. - type: string hidden_hits: + type: string description: | A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. - type: string override_tags: - description: Comma separated list of tags to trigger the curations rules that match the tags. type: string + description: Comma separated list of tags to trigger the curations rules that match the tags. highlight_fields: + type: string description: | A list of custom fields that must be highlighted even if you don't query for them - type: string split_join_tokens: + type: string description: | Treat space as typo: search for q=basket ball if q=basketball is not found or vice-versa. Splitting/joining of tokens will only be attempted if the original query produces no results. To always trigger this behavior, set value to `always``. To disable, set value to `off`. Default is `fallback`. - type: string pre_segmented_query: + type: boolean description: | You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. Set this parameter to true to do the same - type: boolean preset: + type: string description: | Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. - type: string enable_overrides: - description: | - If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false type: boolean default: false - prioritize_exact_match: description: | - Set this parameter to true to ensure that an exact match is ranked above the others + If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false + prioritize_exact_match: type: boolean default: true + description: | + Set this parameter to true to ensure that an exact match is ranked above the others max_candidates: + type: integer description: | Control the number of words that Typesense considers for typo and prefix searching. - type: integer prioritize_token_position: - description: | - Make Typesense prioritize documents where the query words appear earlier in the text. type: boolean default: false + description: | + Make Typesense prioritize documents where the query words appear earlier in the text. prioritize_num_matching_fields: + type: boolean + default: true description: | Make Typesense prioritize documents where the query words appear in more number of fields. + enable_typos_for_numerical_tokens: type: boolean default: true - enable_typos_for_numerical_tokens: description: | Make Typesense disable typos for numerical tokens. - type: boolean - default: true exhaustive_search: + type: boolean description: | Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). - type: boolean search_cutoff_ms: + type: integer description: | Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. - type: integer use_cache: + type: boolean description: | Enable server side caching of search query results. By default, caching is disabled. - type: boolean cache_ttl: + type: integer description: | The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. - type: integer min_len_1typo: + type: integer description: | Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. - type: integer min_len_2typo: + type: integer description: | Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. - type: integer vector_query: + type: string description: | Vector query expression for fetching documents "closest" to a given query/document vector. - type: string remote_embedding_timeout_ms: + type: integer description: | Timeout (in milliseconds) for fetching remote embeddings. - type: integer remote_embedding_num_tries: + type: integer description: | Number of times to retry fetching remote embeddings. - type: integer facet_strategy: + type: string description: | Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). - type: string stopwords: + type: string description: | Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. - type: string facet_return_parent: + type: string description: | Comma separated string of nested facet fields whose parent object should be returned in facet response. - type: string voice_query: + type: string description: | The base64 encoded audio file in 16 khz 16-bit WAV format. - type: string conversation: + type: boolean description: | Enable conversational search. - type: boolean conversation_model_id: + type: string description: | The Id of Conversation Model to be used. - type: string conversation_id: + type: string description: | The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. - type: string x-rust-builder: true + x-rust-has-borrowed-data: true MultiSearchParameters: + type: object description: | Parameters for the multi search API. - type: object properties: q: - description: The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. type: string + description: The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. query_by: - description: A list of `string` fields that should be queried against. Multiple fields are separated with a comma. type: string + description: A list of `string` fields that should be queried against. Multiple fields are separated with a comma. query_by_weights: - description: The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. type: string + description: The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. text_match_type: - description: In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. type: string + description: In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. prefix: - description: Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. type: string + description: Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. infix: - description: If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results type: string + description: If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results max_extra_prefix: - description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. type: integer - max_extra_suffix: description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. + max_extra_suffix: type: integer + description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. filter_by: - description: Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. type: string + description: Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. example: 'num_employees:>100 && country: [USA, UK]' sort_by: - description: A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` type: string + description: A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` facet_by: - description: A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. type: string + description: A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. max_facet_values: - description: Maximum number of facet values to be returned. type: integer + description: Maximum number of facet values to be returned. facet_query: - description: Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix "shoe". type: string + description: Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix "shoe". num_typos: + type: string description: | The number of typographical errors (1 or 2) that would be tolerated. Default: 2 - type: string page: - description: Results from this specific page number would be fetched. type: integer + description: Results from this specific page number would be fetched. per_page: - description: 'Number of results to fetch per page. Default: 10' type: integer + description: 'Number of results to fetch per page. Default: 10' limit: + type: integer description: | Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. - type: integer offset: - description: Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. type: integer + description: Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. group_by: - description: You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. type: string + description: You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. group_limit: + type: integer description: | Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 - type: integer group_missing_values: + type: boolean description: | Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true - type: boolean include_fields: - description: List of fields from the document to include in the search result type: string + description: List of fields from the document to include in the search result exclude_fields: - description: List of fields from the document to exclude in the search result type: string + description: List of fields from the document to exclude in the search result highlight_full_fields: - description: List of fields which should be highlighted fully without snippeting type: string + description: List of fields which should be highlighted fully without snippeting highlight_affix_num_tokens: + type: integer description: | The number of tokens that should surround the highlighted text on each side. Default: 4 - type: integer highlight_start_tag: + type: string description: | The start tag used for the highlighted snippets. Default: `` - type: string highlight_end_tag: + type: string description: | The end tag used for the highlighted snippets. Default: `` - type: string snippet_threshold: + type: integer description: | Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 - type: integer drop_tokens_threshold: + type: integer description: | If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 - type: integer drop_tokens_mode: $ref: '#/components/schemas/DropTokensMode' typo_tokens_threshold: + type: integer description: | If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 - type: integer enable_typos_for_alpha_numerical_tokens: type: boolean description: | @@ -4153,10 +4165,10 @@ components: description: | If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true enable_analytics: - description: | - Flag for enabling/disabling analytics aggregation for specific search queries (for e.g. those originating from a test script). type: boolean default: true + description: | + Flag for enabling/disabling analytics aggregation for specific search queries (for e.g. those originating from a test script). synonym_prefix: type: boolean description: | @@ -4166,122 +4178,123 @@ components: description: | Allow synonym resolution on typo-corrected words in the query. Default: 0 pinned_hits: + type: string description: | A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. - type: string hidden_hits: + type: string description: | A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. - type: string override_tags: - description: Comma separated list of tags to trigger the curations rules that match the tags. type: string + description: Comma separated list of tags to trigger the curations rules that match the tags. highlight_fields: + type: string description: | A list of custom fields that must be highlighted even if you don't query for them - type: string pre_segmented_query: + type: boolean + default: false description: | You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. Set this parameter to true to do the same - type: boolean - default: false preset: + type: string description: | Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. - type: string enable_overrides: - description: | - If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false type: boolean default: false - prioritize_exact_match: description: | - Set this parameter to true to ensure that an exact match is ranked above the others + If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false + prioritize_exact_match: type: boolean default: true - prioritize_token_position: description: | - Make Typesense prioritize documents where the query words appear earlier in the text. + Set this parameter to true to ensure that an exact match is ranked above the others + prioritize_token_position: type: boolean default: false + description: | + Make Typesense prioritize documents where the query words appear earlier in the text. prioritize_num_matching_fields: + type: boolean + default: true description: | Make Typesense prioritize documents where the query words appear in more number of fields. + enable_typos_for_numerical_tokens: type: boolean default: true - enable_typos_for_numerical_tokens: description: | Make Typesense disable typos for numerical tokens. - type: boolean - default: true exhaustive_search: + type: boolean description: | Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). - type: boolean search_cutoff_ms: + type: integer description: | Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. - type: integer use_cache: + type: boolean description: | Enable server side caching of search query results. By default, caching is disabled. - type: boolean cache_ttl: + type: integer description: | The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. - type: integer min_len_1typo: + type: integer description: | Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. - type: integer min_len_2typo: + type: integer description: | Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. - type: integer vector_query: + type: string description: | Vector query expression for fetching documents "closest" to a given query/document vector. - type: string remote_embedding_timeout_ms: + type: integer description: | Timeout (in milliseconds) for fetching remote embeddings. - type: integer remote_embedding_num_tries: + type: integer description: | Number of times to retry fetching remote embeddings. - type: integer facet_strategy: + type: string description: | Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). - type: string stopwords: + type: string description: | Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. - type: string facet_return_parent: + type: string description: | Comma separated string of nested facet fields whose parent object should be returned in facet response. - type: string voice_query: + type: string description: | The base64 encoded audio file in 16 khz 16-bit WAV format. - type: string conversation: + type: boolean description: | Enable conversational search. - type: boolean conversation_model_id: + type: string description: | The Id of Conversation Model to be used. - type: string conversation_id: + type: string description: | The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. - type: string x-rust-builder: true + x-rust-has-borrowed-data: true MultiSearchSearchesParameter: type: object required: @@ -4295,6 +4308,7 @@ components: type: array items: $ref: '#/components/schemas/MultiSearchCollectionParameters' + x-rust-has-borrowed-data: true MultiSearchCollectionParameters: allOf: - $ref: '#/components/schemas/MultiSearchParameters' @@ -4313,6 +4327,7 @@ components: When true, computes both text match and vector distance scores for all matches in hybrid search. Documents found only through keyword search will get a vector distance score, and documents found only through vector search will get a text match score. default: false x-rust-builder: true + x-rust-has-borrowed-data: true FacetCounts: type: object properties: @@ -4384,6 +4399,8 @@ components: type: string analytics_tag: type: string + x-rust-has-borrowed-data: true + x-rust-has-borrowed-data: true AnalyticsEventsResponse: type: object required: @@ -4455,6 +4472,8 @@ components: type: string weight: type: integer + x-rust-has-borrowed-data: true + x-rust-has-borrowed-data: true AnalyticsRuleUpdate: type: object description: Fields allowed to update on an analytics rule @@ -4482,6 +4501,8 @@ components: type: string weight: type: integer + x-rust-has-borrowed-data: true + x-rust-has-borrowed-data: true AnalyticsRule: allOf: - $ref: '#/components/schemas/AnalyticsRuleCreate' @@ -4547,6 +4568,10 @@ components: format: double StopwordsSetUpsertSchema: type: object + required: + - stopwords + example: | + {"stopwords": ["Germany", "France", "Italy"], "locale": "en"} properties: stopwords: type: array @@ -4554,12 +4579,14 @@ components: type: string locale: type: string + x-rust-has-borrowed-data: true + StopwordsSetSchema: + type: object required: + - id - stopwords example: | - {"stopwords": ["Germany", "France", "Italy"], "locale": "en"} - StopwordsSetSchema: - type: object + {"id": "countries", "stopwords": ["Germany", "France", "Italy"], "locale": "en"} properties: id: type: string @@ -4569,39 +4596,36 @@ components: type: string locale: type: string + StopwordsSetRetrieveSchema: + type: object required: - - id - stopwords example: | - {"id": "countries", "stopwords": ["Germany", "France", "Italy"], "locale": "en"} - StopwordsSetRetrieveSchema: - type: object + {"stopwords": {"id": "countries", "stopwords": ["Germany", "France", "Italy"], "locale": "en"}} properties: stopwords: $ref: '#/components/schemas/StopwordsSetSchema' + StopwordsSetsRetrieveAllSchema: + type: object required: - stopwords example: | - {"stopwords": {"id": "countries", "stopwords": ["Germany", "France", "Italy"], "locale": "en"}} - StopwordsSetsRetrieveAllSchema: - type: object + {"stopwords": [{"id": "countries", "stopwords": ["Germany", "France", "Italy"], "locale": "en"}]} properties: stopwords: type: array items: $ref: '#/components/schemas/StopwordsSetSchema' - required: - - stopwords - example: | - {"stopwords": [{"id": "countries", "stopwords": ["Germany", "France", "Italy"], "locale": "en"}]} PresetUpsertSchema: + required: + - value properties: value: oneOf: - $ref: '#/components/schemas/SearchParameters' - $ref: '#/components/schemas/MultiSearchSearchesParameter' - required: - - value + x-rust-has-borrowed-data: true + x-rust-has-borrowed-data: true PresetSchema: allOf: - $ref: '#/components/schemas/PresetUpsertSchema' @@ -4635,6 +4659,7 @@ components: - coerce_or_drop - drop - reject + x-rust-has-borrowed-data: true IndexAction: type: string enum: @@ -4642,14 +4667,16 @@ components: - update - upsert - emplace + x-rust-has-borrowed-data: true DropTokensMode: type: string + description: | + Dictates the direction in which the words in the query must be dropped when the original words in the query do not appear in any document. Values: right_to_left (default), left_to_right, both_sides:3 A note on both_sides:3 - for queries up to 3 tokens (words) in length, this mode will drop tokens from both sides and exhaustively rank all matching results. If query length is greater than 3 words, Typesense will just fallback to default behavior of right_to_left enum: - right_to_left - left_to_right - both_sides:3 - description: | - Dictates the direction in which the words in the query must be dropped when the original words in the query do not appear in any document. Values: right_to_left (default), left_to_right, both_sides:3 A note on both_sides:3 - for queries up to 3 tokens (words) in length, this mode will drop tokens from both sides and exhaustively rank all matching results. If query length is greater than 3 words, Typesense will just fallback to default behavior of right_to_left + x-rust-has-borrowed-data: true ConversationModelCreateSchema: required: - model_name @@ -4672,6 +4699,7 @@ components: history_collection: type: string description: Typesense collection that stores the historical conversations + x-rust-has-borrowed-data: true ConversationModelUpdateSchema: type: object properties: @@ -4679,31 +4707,32 @@ components: type: string description: An explicit id for the model, otherwise the API will return a response with an auto-generated conversation model id. model_name: - description: Name of the LLM model offered by OpenAI, Cloudflare or vLLM type: string + description: Name of the LLM model offered by OpenAI, Cloudflare or vLLM api_key: - description: The LLM service's API Key type: string + description: The LLM service's API Key history_collection: type: string description: Typesense collection that stores the historical conversations account_id: - description: LLM service's account ID (only applicable for Cloudflare) type: string + description: LLM service's account ID (only applicable for Cloudflare) system_prompt: - description: The system prompt that contains special instructions to the LLM type: string + description: The system prompt that contains special instructions to the LLM ttl: type: integer description: | Time interval in seconds after which the messages would be deleted. Default: 86400 (24 hours) max_bytes: + type: integer description: | The maximum number of bytes to send to the LLM in every API call. Consult the LLM's documentation on the number of bytes supported in the context window. - type: integer vllm_url: - description: URL of vLLM service type: string + description: URL of vLLM service + x-rust-has-borrowed-data: true ConversationModelSchema: allOf: - $ref: '#/components/schemas/ConversationModelCreateSchema' @@ -4770,9 +4799,9 @@ components: description: Top-k parameter for the NL model (Google-specific) stop_sequences: type: array + description: Stop sequences for the NL model (Google-specific) items: type: string - description: Stop sequences for the NL model (Google-specific) api_version: type: string description: API version for the NL model service @@ -4808,6 +4837,7 @@ components: id: type: string description: Optional ID for the NL search model + x-rust-has-borrowed-data: true NLSearchModelSchema: allOf: - $ref: '#/components/schemas/NLSearchModelCreateSchema' @@ -4820,6 +4850,7 @@ components: description: ID of the NL search model NLSearchModelUpdateSchema: $ref: '#/components/schemas/NLSearchModelCreateSchema' + x-rust-has-borrowed-data: true NLSearchModelDeleteSchema: type: object required: @@ -4853,6 +4884,7 @@ components: description: By default, special characters are dropped from synonyms. Use this attribute to specify which special characters should be indexed as is items: type: string + x-rust-has-borrowed-data: true SynonymSetCreateSchema: type: object required: @@ -4863,6 +4895,7 @@ components: description: Array of synonym items items: $ref: '#/components/schemas/SynonymItemSchema' + x-rust-has-borrowed-data: true SynonymSetSchema: allOf: - $ref: '#/components/schemas/SynonymSetCreateSchema' @@ -4930,6 +4963,7 @@ components: type: object description: | Return a custom JSON object in the Search API response, when this rule is triggered. This can can be used to display a pre-defined message (eg: a promotion banner) on the front-end when a particular rule is triggered. + x-rust-has-borrowed-data: true sort_by: type: string description: | @@ -4957,6 +4991,7 @@ components: id: type: string description: ID of the curation item + x-rust-has-borrowed-data: true CurationItemSchema: allOf: - $ref: '#/components/schemas/CurationItemCreateSchema' @@ -4979,6 +5014,7 @@ components: description: type: string description: Optional description for the curation set + x-rust-has-borrowed-data: true CurationSetSchema: allOf: - $ref: '#/components/schemas/CurationSetCreateSchema' @@ -5064,24 +5100,27 @@ components: $ref: '#/components/schemas/IndexAction' dirty_values: $ref: '#/components/schemas/DirtyValues' + x-rust-has-borrowed-data: true ExportDocumentsParameters: type: object properties: filter_by: - description: Filter conditions for refining your search results. Separate multiple conditions with &&. type: string + description: Filter conditions for refining your search results. Separate multiple conditions with &&. include_fields: - description: List of fields from the document to include in the search result type: string + description: List of fields from the document to include in the search result exclude_fields: - description: List of fields from the document to exclude in the search result type: string + description: List of fields from the document to exclude in the search result + x-rust-has-borrowed-data: true UpdateDocumentsParameters: type: object properties: filter_by: type: string example: 'num_employees:>100 && country: [USA, UK]' + x-rust-has-borrowed-data: true DeleteDocumentsParameters: type: object required: @@ -5091,26 +5130,28 @@ components: type: string example: 'num_employees:>100 && country: [USA, UK]' batch_size: - description: Batch size parameter controls the number of documents that should be deleted at a time. A larger value will speed up deletions, but will impact performance of other operations running on the server. type: integer + description: Batch size parameter controls the number of documents that should be deleted at a time. A larger value will speed up deletions, but will impact performance of other operations running on the server. ignore_not_found: type: boolean truncate: - description: When true, removes all documents from the collection while preserving the collection and its schema. type: boolean + description: When true, removes all documents from the collection while preserving the collection and its schema. + x-rust-has-borrowed-data: true GetCollectionsParameters: type: object properties: exclude_fields: - description: Comma-separated list of fields from the collection to exclude from the response type: string + description: Comma-separated list of fields from the collection to exclude from the response limit: + type: integer description: | Number of collections to fetch. Default: returns all collections. - type: integer offset: - description: Identifies the starting point to return collections when paginating. type: integer + description: Identifies the starting point to return collections when paginating. + x-rust-has-borrowed-data: true securitySchemes: api_key_header: type: apiKey diff --git a/typesense/src/client/alias.rs b/typesense/src/client/alias.rs index 65c9a68..840bd23 100644 --- a/typesense/src/client/alias.rs +++ b/typesense/src/client/alias.rs @@ -25,7 +25,7 @@ impl<'a> Alias<'a> { &self, ) -> Result> { let params = collections_api::GetAliasParams { - alias_name: self.alias_name.to_owned(), + alias_name: self.alias_name.into(), }; execute_wrapper!(self, collections_api::get_alias, params) @@ -36,7 +36,7 @@ impl<'a> Alias<'a> { &self, ) -> Result> { let params = collections_api::DeleteAliasParams { - alias_name: self.alias_name.to_owned(), + alias_name: self.alias_name.into(), }; execute_wrapper!(self, collections_api::delete_alias, params) } diff --git a/typesense/src/client/aliases.rs b/typesense/src/client/aliases.rs index 43d3270..ec5b884 100644 --- a/typesense/src/client/aliases.rs +++ b/typesense/src/client/aliases.rs @@ -3,6 +3,7 @@ //! An `Aliases` instance is created via the main `client.aliases()` method. use crate::{Client, Error, execute_wrapper}; +use ::std::borrow::Cow; use typesense_codegen::{apis::collections_api, models}; /// Provides methods for interacting with Typesense collection aliases. @@ -30,8 +31,8 @@ impl<'a> Aliases<'a> { /// * `schema` - A `CollectionAliasSchema` pointing to the target collection. pub async fn upsert( &self, - alias_name: impl Into, - schema: models::CollectionAliasSchema, + alias_name: impl Into>, + schema: models::CollectionAliasSchema<'_>, ) -> Result> { let params = collections_api::UpsertAliasParams { alias_name: alias_name.into(), diff --git a/typesense/src/client/collection/document.rs b/typesense/src/client/collection/document.rs index 9a159ec..03fe09f 100644 --- a/typesense/src/client/collection/document.rs +++ b/typesense/src/client/collection/document.rs @@ -5,6 +5,7 @@ //! `client.collection::().document("123")` use crate::{Client, Error, execute_wrapper, traits}; +use ::std::borrow::Cow; use serde::{Serialize, de::DeserializeOwned}; use typesense_codegen::apis::documents_api; @@ -14,28 +15,32 @@ use typesense_codegen::apis::documents_api; /// or `client.collection::().document("document_id")`. /// The generic `D` represents the shape of the document and must implement `Serialize` and `DeserializeOwned`. /// If `D` is not specified, it defaults to `serde_json::Value` for schemaless interactions. -pub struct Document<'c, 'n, D = serde_json::Value> +pub struct Document<'d, D = serde_json::Value> where D: DeserializeOwned + Serialize, { - client: &'c Client, - collection_name: &'n str, - document_id: String, - _phantom: std::marker::PhantomData, + client: &'d Client, + collection_name: &'d str, + document_id: Cow<'d, str>, + _phantom: core::marker::PhantomData, } -impl<'c, 'n, D> Document<'c, 'n, D> +impl<'d, D> Document<'d, D> where D: DeserializeOwned + Serialize, { /// Creates a new `Document` instance for a specific document ID. #[inline] - pub(super) fn new(client: &'c Client, collection_name: &'n str, document_id: String) -> Self { + pub(super) fn new( + client: &'d Client, + collection_name: &'d str, + document_id: impl Into>, + ) -> Self { Self { client, collection_name, - document_id, - _phantom: std::marker::PhantomData, + document_id: document_id.into(), + _phantom: core::marker::PhantomData, } } @@ -45,8 +50,8 @@ where /// A `Result` containing the strongly-typed document `D` if successful. pub async fn retrieve(&self) -> Result> { let params = documents_api::GetDocumentParams { - collection_name: self.collection_name.to_owned(), - document_id: self.document_id.to_owned(), + collection_name: self.collection_name.into(), + document_id: self.document_id.as_ref().into(), }; let result_value = execute_wrapper!(self, documents_api::get_document, params)?; @@ -62,8 +67,8 @@ where /// A `Result` containing the deleted document deserialized into `D`. pub async fn delete(&self) -> Result> { let params = documents_api::DeleteDocumentParams { - collection_name: self.collection_name.to_owned(), - document_id: self.document_id.to_owned(), + collection_name: self.collection_name.into(), + document_id: self.document_id.as_ref().into(), }; let result_value = execute_wrapper!(self, documents_api::delete_document, params)?; @@ -73,7 +78,7 @@ where } } -impl<'c, 'n, D> Document<'c, 'n, D> +impl<'d, D> Document<'d, D> where D: traits::Document, { @@ -125,8 +130,8 @@ where params: Option, ) -> Result> { let params = documents_api::UpdateDocumentParams { - collection_name: self.collection_name.to_owned(), - document_id: self.document_id.to_owned(), + collection_name: self.collection_name.into(), + document_id: self.document_id.as_ref().into(), body: partial_document, dirty_values: params.and_then(|d| d.dirty_values), }; diff --git a/typesense/src/client/collection/documents.rs b/typesense/src/client/collection/documents.rs index b394475..9010d2e 100644 --- a/typesense/src/client/collection/documents.rs +++ b/typesense/src/client/collection/documents.rs @@ -9,6 +9,7 @@ use crate::{ models::{DocumentIndexParameters, SearchResult}, traits, }; +use ::std::borrow::Cow; use serde::{Serialize, de::DeserializeOwned}; use typesense_codegen::{ apis::documents_api, @@ -22,26 +23,26 @@ use typesense_codegen::{ /// This struct is generic over the document type `D`. If created via `client.collection_schemaless(...)`, /// `D` defaults to `serde_json::Value`. If created via `client.collection_named::(...)`, /// `D` will be `MyType`. -pub struct Documents<'c, 'n, D = serde_json::Value> +pub struct Documents<'d, D = serde_json::Value> where D: DeserializeOwned + Serialize, { - client: &'c Client, - collection_name: &'n str, - _phantom: std::marker::PhantomData, + client: &'d Client, + collection_name: &'d str, + _phantom: core::marker::PhantomData, } -impl<'c, 'n, D> Documents<'c, 'n, D> +impl<'d, D> Documents<'d, D> where D: DeserializeOwned + Serialize, { /// Creates a new `Documents` instance. #[inline] - pub(super) fn new(client: &'c Client, collection_name: &'n str) -> Self { + pub(super) fn new(client: &'d Client, collection_name: &'d str) -> Self { Self { client, collection_name, - _phantom: std::marker::PhantomData, + _phantom: core::marker::PhantomData, } } @@ -56,9 +57,9 @@ where params: Option, ) -> Result> { let params = documents_api::IndexDocumentParams { - collection_name: self.collection_name.to_owned(), + collection_name: self.collection_name.into(), body: document, - action: Some(action.to_owned()), + action: Some(action.into()), dirty_values: params.and_then(|d| d.dirty_values), // Or expose this as an argument if needed }; execute_wrapper!(self, documents_api::index_document, params) @@ -75,12 +76,12 @@ where /// * `params` - An `ImportDocumentsParameters` struct containing options like `action` and `batch_size`. pub async fn import_jsonl( &self, - documents_jsonl: String, + documents_jsonl: impl Into>, params: ImportDocumentsParameters, ) -> Result> { let params = documents_api::ImportDocumentsParams { - body: documents_jsonl, - collection_name: self.collection_name.to_owned(), + body: documents_jsonl.into(), + collection_name: self.collection_name.into(), action: params.action, batch_size: params.batch_size, @@ -98,10 +99,10 @@ where /// * `params` - An `ExportDocumentsParameters` struct containing options like `filter_by` and `include_fields`. pub async fn export_jsonl( &self, - params: ExportDocumentsParameters, + params: ExportDocumentsParameters<'_>, ) -> Result> { let params = documents_api::ExportDocumentsParams { - collection_name: self.collection_name.to_owned(), + collection_name: self.collection_name.into(), exclude_fields: params.exclude_fields, filter_by: params.filter_by, include_fields: params.include_fields, @@ -115,11 +116,11 @@ where /// * `params` - A `DeleteDocumentsParameters` describing the conditions for deleting documents. pub async fn delete( &self, - params: DeleteDocumentsParameters, + params: DeleteDocumentsParameters<'_>, ) -> Result> { let params = documents_api::DeleteDocumentsParams { - collection_name: self.collection_name.to_owned(), + collection_name: self.collection_name.into(), filter_by: Some(params.filter_by), batch_size: params.batch_size, ignore_not_found: params.ignore_not_found, @@ -135,10 +136,10 @@ where /// * `params` - A `SearchParameters` struct containing all search parameters. pub async fn search( &self, - params: raw_models::SearchParameters, + params: raw_models::SearchParameters<'_>, ) -> Result, Error> { let search_params = documents_api::SearchCollectionParams { - collection_name: self.collection_name.to_owned(), + collection_name: self.collection_name.into(), // Map all corresponding fields directly. cache_ttl: params.cache_ttl, @@ -217,7 +218,7 @@ where } } -impl<'c, 'n, D> Documents<'c, 'n, D> +impl<'d, D> Documents<'d, D> where D: traits::Document, { @@ -266,11 +267,11 @@ where pub async fn update( &self, document: &D::Partial, - params: UpdateDocumentsParameters, + params: UpdateDocumentsParameters<'_>, ) -> Result> { let params = documents_api::UpdateDocumentsParams { - collection_name: self.collection_name.to_owned(), + collection_name: self.collection_name.into(), filter_by: params.filter_by, body: document, }; diff --git a/typesense/src/client/collection/mod.rs b/typesense/src/client/collection/mod.rs index ba06950..a7a9cef 100644 --- a/typesense/src/client/collection/mod.rs +++ b/typesense/src/client/collection/mod.rs @@ -4,47 +4,51 @@ mod document; mod documents; -use crate::{Client, Error, execute_wrapper}; +use crate::{Client, Error, execute_wrapper}; +use ::std::borrow::Cow; use serde::{Serialize, de::DeserializeOwned}; use typesense_codegen::{apis::collections_api, models}; /// Provides methods for interacting with a Typesense collection. /// /// This struct is created by calling `client.collection()`. -pub struct Collection<'c, 'n, D = serde_json::Value> +pub struct Collection<'c, D = serde_json::Value> where D: DeserializeOwned + Serialize, { client: &'c Client, - collection_name: &'n str, - _phantom: std::marker::PhantomData, + collection_name: Cow<'c, str>, + _phantom: core::marker::PhantomData, } -impl<'c, 'n, D> Collection<'c, 'n, D> +impl<'c, D> Collection<'c, D> where D: DeserializeOwned + Serialize, { /// Creates a new `Collection` instance. #[inline] - pub(super) fn new(client: &'c Client, collection_name: &'n str) -> Self { + pub(super) fn new(client: &'c Client, collection_name: impl Into>) -> Self { Self { client, - collection_name, - _phantom: std::marker::PhantomData, + collection_name: collection_name.into(), + _phantom: core::marker::PhantomData, } } /// Provides access to the document-related API endpoints for a specific collection. #[inline] - pub fn documents(&self) -> documents::Documents<'c, 'n, D> { - documents::Documents::new(self.client, self.collection_name) + pub fn documents<'d>(&'d self) -> documents::Documents<'d, D> { + documents::Documents::new(self.client, &self.collection_name) } /// Provides access to the API endpoints for a single document within a Typesense collection. #[inline] - pub fn document(&self, document_id: impl Into) -> document::Document<'c, 'n, D> { - document::Document::new(self.client, self.collection_name, document_id.into()) + pub fn document<'d>( + &'d self, + document_id: impl Into>, + ) -> document::Document<'d, D> { + document::Document::new(self.client, &self.collection_name, document_id) } /// Retrieves the details of a collection, given its name. @@ -53,7 +57,7 @@ where &self, ) -> Result> { let params = collections_api::GetCollectionParams { - collection_name: self.collection_name.to_owned(), + collection_name: self.collection_name.as_ref().into(), }; execute_wrapper!(self, collections_api::get_collection, params) } @@ -64,7 +68,7 @@ where &self, ) -> Result> { let params = collections_api::DeleteCollectionParams { - collection_name: self.collection_name.to_owned(), + collection_name: self.collection_name.as_ref().into(), }; execute_wrapper!(self, collections_api::delete_collection, params) } @@ -79,7 +83,7 @@ where update_schema: models::CollectionUpdateSchema, ) -> Result> { let params = collections_api::UpdateCollectionParams { - collection_name: self.collection_name.to_owned(), + collection_name: self.collection_name.as_ref().into(), collection_update_schema: update_schema, }; execute_wrapper!(self, collections_api::update_collection, params) diff --git a/typesense/src/client/collections.rs b/typesense/src/client/collections.rs index 060bf94..2c0d19b 100644 --- a/typesense/src/client/collections.rs +++ b/typesense/src/client/collections.rs @@ -28,7 +28,7 @@ impl<'c> Collections<'c> { /// * `schema` - A `CollectionSchema` object describing the collection to be created. pub async fn create( &self, - schema: models::CollectionSchema, + schema: models::CollectionSchema<'_>, ) -> Result> { let params = collections_api::CreateCollectionParams { collection_schema: schema, @@ -39,7 +39,7 @@ impl<'c> Collections<'c> { /// List the existing Typesense collections. pub async fn retrieve( &self, - params: GetCollectionsParameters, + params: GetCollectionsParameters<'_>, ) -> Result, Error> { let params = GetCollectionsParams { exclude_fields: params.exclude_fields, diff --git a/typesense/src/client/conversations/model.rs b/typesense/src/client/conversations/model.rs index e57faf2..1192f09 100644 --- a/typesense/src/client/conversations/model.rs +++ b/typesense/src/client/conversations/model.rs @@ -28,7 +28,7 @@ impl<'a> Model<'a> { Error, > { let params = conversations_api::RetrieveConversationModelParams { - model_id: self.model_id.to_owned(), + model_id: self.model_id.into(), }; execute_wrapper!(self, conversations_api::retrieve_conversation_model, params) } @@ -39,13 +39,13 @@ impl<'a> Model<'a> { /// * `schema` - A `ConversationModelUpdateSchema` object with the fields to update. pub async fn update( &self, - schema: models::ConversationModelUpdateSchema, + schema: models::ConversationModelUpdateSchema<'_>, ) -> Result< models::ConversationModelSchema, Error, > { let params = conversations_api::UpdateConversationModelParams { - model_id: self.model_id.to_owned(), + model_id: self.model_id.into(), conversation_model_update_schema: schema, }; execute_wrapper!(self, conversations_api::update_conversation_model, params) @@ -59,7 +59,7 @@ impl<'a> Model<'a> { Error, > { let params = conversations_api::DeleteConversationModelParams { - model_id: self.model_id.to_owned(), + model_id: self.model_id.into(), }; execute_wrapper!(self, conversations_api::delete_conversation_model, params) } diff --git a/typesense/src/client/conversations/models.rs b/typesense/src/client/conversations/models.rs index be6960e..1cf566a 100644 --- a/typesense/src/client/conversations/models.rs +++ b/typesense/src/client/conversations/models.rs @@ -25,7 +25,7 @@ impl<'a> Models<'a> { /// * `schema` - A `ConversationModelCreateSchema` object describing the model. pub async fn create( &self, - schema: models::ConversationModelCreateSchema, + schema: models::ConversationModelCreateSchema<'_>, ) -> Result< models::ConversationModelSchema, Error, diff --git a/typesense/src/client/keys.rs b/typesense/src/client/keys.rs index a7979f0..5949832 100644 --- a/typesense/src/client/keys.rs +++ b/typesense/src/client/keys.rs @@ -35,7 +35,7 @@ impl<'c> Keys<'c> { #[inline] pub async fn create( &self, - schema: models::ApiKeySchema, + schema: models::ApiKeySchema<'_>, ) -> Result> { let params = keys_api::CreateKeyParams { api_key_schema: Some(schema), @@ -55,7 +55,7 @@ impl<'c> Keys<'c> { pub fn generate_scoped_search_key( &self, key: impl AsRef, - params: &ScopedKeyParameters, + params: &ScopedKeyParameters<'_>, ) -> anyhow::Result { let params = serde_json::to_string(params)?; diff --git a/typesense/src/client/mod.rs b/typesense/src/client/mod.rs index 5b15871..773abd8 100644 --- a/typesense/src/client/mod.rs +++ b/typesense/src/client/mod.rs @@ -38,8 +38,8 @@ //! //! // Search for a document //! let search_params = models::SearchParameters { -//! q: Some("phone".to_owned()), -//! query_by: Some("name".to_owned()), +//! q: Some("phone".into()), +//! query_by: Some("name".into()), //! ..Default::default() //! }; //! @@ -91,8 +91,8 @@ //! //! // Search for a document //! let search_params = models::SearchParameters { -//! q: Some("phone".to_owned()), -//! query_by: Some("name".to_owned()), +//! q: Some("phone".into()), +//! query_by: Some("name".into()), //! ..Default::default() //! }; //! @@ -142,15 +142,16 @@ use reqwest_middleware::ClientBuilder as ReqwestMiddlewareClientBuilder; use reqwest_retry::RetryTransientMiddleware; pub use reqwest_retry::policies::ExponentialBackoff; -use debug_unsafe::{option::OptionUnwrapper, slice::SliceGetter}; -use serde::{Serialize, de::DeserializeOwned}; -use std::{ +use ::std::{ + borrow::Cow, future::Future, sync::{ RwLock, atomic::{AtomicBool, AtomicUsize, Ordering}, }, }; +use debug_unsafe::{option::OptionUnwrapper, slice::SliceGetter}; +use serde::{Serialize, de::DeserializeOwned}; use typesense_codegen::apis::{self, configuration}; use web_time::{Duration, Instant}; @@ -480,7 +481,10 @@ impl Client { /// # } /// ``` #[inline] - pub fn collection_named<'c, 'n, D>(&'c self, collection_name: &'n str) -> Collection<'c, 'n, D> + pub fn collection_named<'c, D>( + &'c self, + collection_name: impl Into>, + ) -> Collection<'c, D> where D: DeserializeOwned + Serialize, { @@ -525,7 +529,7 @@ impl Client { /// # } /// ``` #[inline] - pub fn collection<'c, 'n, D>(&'c self) -> Collection<'c, 'n, D> + pub fn collection<'c, D>(&'c self) -> Collection<'c, D> where D: Document, { @@ -561,10 +565,10 @@ impl Client { /// # } /// ``` #[inline] - pub fn collection_schemaless<'c, 'n>( + pub fn collection_schemaless<'c>( &'c self, - collection_name: &'n str, - ) -> Collection<'c, 'n, serde_json::Value> { + collection_name: impl Into>, + ) -> Collection<'c, serde_json::Value> { Collection::new(self, collection_name) } @@ -608,7 +612,7 @@ impl Client { /// # .build() /// # .unwrap(); /// # let schema = models::ApiKeySchema { - /// # description: "Search-only key.".to_owned(), + /// # description: "Search-only key.".into(), /// # actions: vec!["documents:search".to_owned()], /// # collections: vec!["*".to_owned()], /// # ..Default::default() @@ -668,9 +672,9 @@ impl Client { /// # .unwrap(); /// # let search_requests = models::MultiSearchBody { /// # searches: vec![models::MultiSearchCollectionParameters { - /// # collection: Some("products".to_owned()), - /// # q: Some("phone".to_owned()), - /// # query_by: Some("name".to_owned()), + /// # collection: Some("products".into()), + /// # q: Some("phone".into()), + /// # query_by: Some("name".into()), /// # ..Default::default() /// # }], /// # ..Default::default() diff --git a/typesense/src/client/multi_search.rs b/typesense/src/client/multi_search.rs index 3188a01..e26778d 100644 --- a/typesense/src/client/multi_search.rs +++ b/typesense/src/client/multi_search.rs @@ -69,16 +69,16 @@ impl<'c> MultiSearch<'c> { /// searches: vec![ /// // Search #0 targets the 'products' collection /// models::MultiSearchCollectionParameters { - /// collection: Some("products".to_owned()), - /// q: Some("shoe".to_owned()), - /// query_by: Some("name".to_owned()), + /// collection: Some("products".into()), + /// q: Some("shoe".into()), + /// query_by: Some("name".into()), /// ..Default::default() /// }, /// // Search #1 targets the 'brands' collection /// models::MultiSearchCollectionParameters { - /// collection: Some("brands".to_owned()), - /// q: Some("nike".to_owned()), - /// query_by: Some("company_name".to_owned()), + /// collection: Some("brands".into()), + /// q: Some("nike".into()), + /// query_by: Some("company_name".into()), /// ..Default::default() /// }, /// ], @@ -112,8 +112,8 @@ impl<'c> MultiSearch<'c> { /// * `common_search_params` - A `MultiSearchParameters` struct describing search parameters that are common to all searches. pub async fn perform( &self, - search_requests: MultiSearchBody, - common_search_params: raw_models::MultiSearchParameters, + search_requests: MultiSearchBody<'_>, + common_search_params: raw_models::MultiSearchParameters<'_>, ) -> Result< raw_models::MultiSearchResult, Error, @@ -164,16 +164,16 @@ impl<'c> MultiSearch<'c> { /// searches: vec![ /// // Search #0 targets the 'products' collection /// models::MultiSearchCollectionParameters { - /// collection: Some("products".to_owned()), - /// q: Some("shoe".to_owned()), - /// query_by: Some("name".to_owned()), + /// collection: Some("products".into()), + /// q: Some("shoe".into()), + /// query_by: Some("name".into()), /// ..Default::default() /// }, /// // Search #1 targets the 'brands' collection /// models::MultiSearchCollectionParameters { - /// collection: Some("brands".to_owned()), - /// q: Some("nike".to_owned()), - /// query_by: Some("company_name".to_owned()), + /// collection: Some("brands".into()), + /// q: Some("nike".into()), + /// query_by: Some("company_name".into()), /// ..Default::default() /// }, /// ], @@ -216,15 +216,15 @@ impl<'c> MultiSearch<'c> { /// let search_requests = models::MultiSearchBody { /// searches: vec![ /// models::MultiSearchCollectionParameters { - /// collection: Some("products".to_owned()), - /// q: Some("shoe".to_owned()), - /// query_by: Some("name".to_owned()), + /// collection: Some("products".into()), + /// q: Some("shoe".into()), + /// query_by: Some("name".into()), /// ..Default::default() /// }, /// models::MultiSearchCollectionParameters { - /// collection: Some("products".to_owned()), - /// q: Some("sock".to_owned()), - /// query_by: Some("name".to_owned()), + /// collection: Some("products".into()), + /// q: Some("sock".into()), + /// query_by: Some("name".into()), /// ..Default::default() /// }, /// ], @@ -251,8 +251,8 @@ impl<'c> MultiSearch<'c> { /// A `Result` containing a `SearchResult` on success, or an `Error` on failure. pub async fn perform_union serde::Deserialize<'de>>( &self, - search_requests: MultiSearchBody, - common_search_params: raw_models::MultiSearchParameters, + search_requests: MultiSearchBody<'_>, + common_search_params: raw_models::MultiSearchParameters<'_>, ) -> Result, Error> { // Explicitly set `union: true` for the request body let request_body = raw_models::MultiSearchSearchesParameter { @@ -273,10 +273,10 @@ impl<'c> MultiSearch<'c> { } // Private helper function to construct the final search parameters object. // This encapsulates the repetitive mapping logic. -fn build_multi_search_params( - request_body: raw_models::MultiSearchSearchesParameter, - params: raw_models::MultiSearchParameters, -) -> MultiSearchParams { +fn build_multi_search_params<'a>( + request_body: raw_models::MultiSearchSearchesParameter<'a>, + params: raw_models::MultiSearchParameters<'a>, +) -> MultiSearchParams<'a> { MultiSearchParams { multi_search_searches_parameter: Some(request_body), // Common URL search params diff --git a/typesense/src/client/operations.rs b/typesense/src/client/operations.rs index d1c6060..2979818 100644 --- a/typesense/src/client/operations.rs +++ b/typesense/src/client/operations.rs @@ -105,7 +105,7 @@ impl<'a> Operations<'a> { /// Docs: pub async fn take_snapshot( &self, - params: operations_api::TakeSnapshotParams, + params: operations_api::TakeSnapshotParams<'_>, ) -> Result> { execute_wrapper!(self, operations_api::take_snapshot, params) } diff --git a/typesense/src/client/preset.rs b/typesense/src/client/preset.rs index fa6d3cb..0ba4c87 100644 --- a/typesense/src/client/preset.rs +++ b/typesense/src/client/preset.rs @@ -25,7 +25,7 @@ impl<'a> Preset<'a> { &self, ) -> Result> { let params = presets_api::RetrievePresetParams { - preset_id: self.preset_id.to_owned(), + preset_id: self.preset_id.into(), }; execute_wrapper!(self, presets_api::retrieve_preset, params) } @@ -35,7 +35,7 @@ impl<'a> Preset<'a> { &self, ) -> Result> { let params = presets_api::DeletePresetParams { - preset_id: self.preset_id.to_owned(), + preset_id: self.preset_id.into(), }; execute_wrapper!(self, presets_api::delete_preset, params) } diff --git a/typesense/src/client/presets.rs b/typesense/src/client/presets.rs index 0cce7b0..dd8fd4a 100644 --- a/typesense/src/client/presets.rs +++ b/typesense/src/client/presets.rs @@ -5,6 +5,7 @@ //! A `Presets` instance is created via the main `client.presets()` method. use crate::{Client, Error, execute_wrapper}; +use ::std::borrow::Cow; use typesense_codegen::{apis::presets_api, models}; /// Provides methods for managing all of your Typesense presets. @@ -35,8 +36,8 @@ impl<'a> Presets<'a> { /// * `schema` - A `PresetUpsertSchema` object with the preset's value. pub async fn upsert( &self, - preset_id: impl Into, - schema: models::PresetUpsertSchema, + preset_id: impl Into>, + schema: models::PresetUpsertSchema<'_>, ) -> Result> { let params = presets_api::UpsertPresetParams { preset_id: preset_id.into(), diff --git a/typesense/src/client/stemming/dictionaries.rs b/typesense/src/client/stemming/dictionaries.rs index 72854d1..8eb944f 100644 --- a/typesense/src/client/stemming/dictionaries.rs +++ b/typesense/src/client/stemming/dictionaries.rs @@ -6,6 +6,7 @@ use crate::{ client::{Client, Error}, execute_wrapper, }; +use ::std::borrow::Cow; use typesense_codegen::{apis::stemming_api, models}; /// Provides methods for interacting with the collection of stemming dictionaries. @@ -31,12 +32,12 @@ impl<'a> Dictionaries<'a> { /// * `dictionary_jsonl` - A string containing the word mappings in JSONL format. pub async fn import( &self, - dictionary_id: impl Into, - dictionary_jsonl: String, + dictionary_id: impl Into>, + dictionary_jsonl: impl Into>, ) -> Result> { let params = stemming_api::ImportStemmingDictionaryParams { id: dictionary_id.into(), - body: dictionary_jsonl, + body: dictionary_jsonl.into(), }; execute_wrapper!(self, stemming_api::import_stemming_dictionary, params) } diff --git a/typesense/src/client/stemming/dictionary.rs b/typesense/src/client/stemming/dictionary.rs index bbd8212..d93d64c 100644 --- a/typesense/src/client/stemming/dictionary.rs +++ b/typesense/src/client/stemming/dictionary.rs @@ -31,7 +31,7 @@ impl<'a> Dictionary<'a> { &self, ) -> Result> { let params = stemming_api::GetStemmingDictionaryParams { - dictionary_id: self.dictionary_id.to_owned(), + dictionary_id: self.dictionary_id.into(), }; execute_wrapper!(self, stemming_api::get_stemming_dictionary, params) } diff --git a/typesense/src/client/stopword.rs b/typesense/src/client/stopword.rs index f1fce68..41f58f1 100644 --- a/typesense/src/client/stopword.rs +++ b/typesense/src/client/stopword.rs @@ -26,7 +26,7 @@ impl<'a> Stopword<'a> { ) -> Result> { let params = stopwords_api::RetrieveStopwordsSetParams { - set_id: self.set_id.to_owned(), + set_id: self.set_id.into(), }; execute_wrapper!(self, stopwords_api::retrieve_stopwords_set, params) } @@ -37,7 +37,7 @@ impl<'a> Stopword<'a> { ) -> Result> { let params = stopwords_api::DeleteStopwordsSetParams { - set_id: self.set_id.to_owned(), + set_id: self.set_id.into(), }; execute_wrapper!(self, stopwords_api::delete_stopwords_set, params) } diff --git a/typesense/src/client/stopwords.rs b/typesense/src/client/stopwords.rs index a19bf29..98bf9a7 100644 --- a/typesense/src/client/stopwords.rs +++ b/typesense/src/client/stopwords.rs @@ -3,6 +3,7 @@ //! A `Stopwords` instance is created via the main `client.stopwords()` method. use crate::{Client, Error, execute_wrapper}; +use ::std::borrow::Cow; use typesense_codegen::{apis::stopwords_api, models}; /// Provides methods for managing Typesense stopwords sets. @@ -26,8 +27,8 @@ impl<'a> Stopwords<'a> { /// * `schema` - A `StopwordsSetUpsertSchema` object with the stopwords to upsert. pub async fn upsert( &self, - set_id: impl Into, - schema: models::StopwordsSetUpsertSchema, + set_id: impl Into>, + schema: models::StopwordsSetUpsertSchema<'_>, ) -> Result> { let params = stopwords_api::UpsertStopwordsSetParams { set_id: set_id.into(), diff --git a/typesense/src/models/mod.rs b/typesense/src/models/mod.rs index 689ca79..3902ecb 100644 --- a/typesense/src/models/mod.rs +++ b/typesense/src/models/mod.rs @@ -5,7 +5,6 @@ mod scoped_key_parameters; pub use document_index_parameters::*; pub use scoped_key_parameters::*; -pub use typesense_codegen::apis::operations_api::TakeSnapshotParams; -pub use typesense_codegen::models::*; +pub use typesense_codegen::{apis::operations_api::TakeSnapshotParams, models::*}; pub use multi_search::MultiSearchBody; diff --git a/typesense/src/models/multi_search.rs b/typesense/src/models/multi_search.rs index 4efe08c..439a3e1 100644 --- a/typesense/src/models/multi_search.rs +++ b/typesense/src/models/multi_search.rs @@ -7,8 +7,8 @@ use typesense_codegen::models::MultiSearchCollectionParameters; /// sent to the Typesense multi-search endpoint. Each search query is defined in a /// `MultiSearchCollectionParameters` struct within the `searches` vector. #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize, bon::Builder)] -pub struct MultiSearchBody { +pub struct MultiSearchBody<'a> { /// A vector of individual search queries to be executed. The order of the search results returned by Typesense will match the order of these queries. #[serde(rename = "searches")] - pub searches: Vec, + pub searches: Vec>, } diff --git a/typesense/src/models/scoped_key_parameters.rs b/typesense/src/models/scoped_key_parameters.rs index 0d2e168..8a8222d 100644 --- a/typesense/src/models/scoped_key_parameters.rs +++ b/typesense/src/models/scoped_key_parameters.rs @@ -7,12 +7,12 @@ use serde::Serialize; /// search restrictions and an optional expiration time embedded within it. It allows /// you to delegate search permissions securely without exposing your main API key. #[derive(Debug, Clone, Default, Serialize)] -pub struct ScopedKeyParameters { +pub struct ScopedKeyParameters<'a> { /// The search parameters to embed in the key. These parameters will be /// enforced for all searches made with the generated key. /// For example, you can use `filter_by` to restrict searches to a subset of documents. #[serde(flatten, skip_serializing_if = "Option::is_none")] - pub search_params: Option, + pub search_params: Option>, /// The number of `multi_search` requests that can be performed using this key. #[serde(skip_serializing_if = "Option::is_none")] diff --git a/typesense/src/traits/document.rs b/typesense/src/traits/document.rs index dd019bd..dbc2803 100644 --- a/typesense/src/traits/document.rs +++ b/typesense/src/traits/document.rs @@ -14,9 +14,10 @@ pub trait DocumentPartial: Serialize {} pub trait Document: DeserializeOwned + Serialize { /// Collection name const COLLECTION_NAME: &'static str; + /// A struct for partial updates type Partial: DocumentPartial; /// Collection schema associated with the document. - fn collection_schema() -> CollectionSchema; + fn collection_schema() -> CollectionSchema<'static>; } diff --git a/typesense/src/traits/field_type.rs b/typesense/src/traits/field_type.rs index da2b52d..e994e8d 100644 --- a/typesense/src/traits/field_type.rs +++ b/typesense/src/traits/field_type.rs @@ -11,6 +11,7 @@ pub trait ToTypesenseField { } /// Generic implementation for any type that is also a Typesense document. impl ToTypesenseField for T { + #[inline(always)] fn to_typesense_type() -> &'static str { "object" } @@ -18,6 +19,7 @@ impl ToTypesenseField for T { /// Generic implementation for a Vec of any type that is also a Typesense document. impl ToTypesenseField for Vec { + #[inline(always)] fn to_typesense_type() -> &'static str { "object[]" } diff --git a/typesense/tests/client/aliases_test.rs b/typesense/tests/client/aliases_test.rs index 7faf018..c094a38 100644 --- a/typesense/tests/client/aliases_test.rs +++ b/typesense/tests/client/aliases_test.rs @@ -9,10 +9,10 @@ async fn logic_test_aliases_and_alias_lifecycle() { // --- 1. Create a collection to alias to --- let collection_schema = CollectionSchema { - name: collection_name.clone(), + name: collection_name.as_str().into(), fields: vec![Field { - name: "name".to_owned(), - r#type: "string".to_owned(), + name: "name".into(), + r#type: "string".into(), ..Default::default() }], ..Default::default() @@ -26,7 +26,7 @@ async fn logic_test_aliases_and_alias_lifecycle() { // --- 2. Create (Upsert) an alias --- let alias_schema = CollectionAliasSchema { - collection_name: collection_name.clone(), + collection_name: collection_name.as_str().into(), }; let upsert_result = client.aliases().upsert(&alias_name, alias_schema).await; diff --git a/typesense/tests/client/client_test.rs b/typesense/tests/client/client_test.rs index 05f3883..8365883 100644 --- a/typesense/tests/client/client_test.rs +++ b/typesense/tests/client/client_test.rs @@ -11,7 +11,7 @@ use wiremock::{ // Helper to create a mock Typesense server for a successful collection retrieval. async fn setup_mock_server_ok(server: &MockServer, collection_name: &str) { let response_body = CollectionResponse { - name: collection_name.to_owned(), + name: collection_name.into(), ..Default::default() }; diff --git a/typesense/tests/client/collections_test.rs b/typesense/tests/client/collections_test.rs index 70d471c..7c2ba47 100644 --- a/typesense/tests/client/collections_test.rs +++ b/typesense/tests/client/collections_test.rs @@ -10,16 +10,16 @@ async fn logic_test_collections_and_collection_lifecycle() { // --- 1. Create a Collection (via `collections`) --- let schema = CollectionSchema { - name: collection_name.clone(), + name: collection_name.as_str().into(), fields: vec![ Field { - name: "name".to_owned(), - r#type: "string".to_owned(), + name: "name".into(), + r#type: "string".into(), ..Default::default() }, Field { - name: "price".to_owned(), - r#type: "int32".to_owned(), + name: "price".into(), + r#type: "int32".into(), ..Default::default() }, ], @@ -67,14 +67,14 @@ async fn logic_test_collections_and_collection_lifecycle() { fields: vec![ // Add a new field Field { - name: "description".to_owned(), - r#type: "string".to_owned(), + name: "description".into(), + r#type: "string".into(), optional: Some(true), ..Default::default() }, // Drop an existing field Field { - name: "price".to_owned(), + name: "price".into(), drop: Some(true), ..Default::default() }, diff --git a/typesense/tests/client/conversation_models_test.rs b/typesense/tests/client/conversation_models_test.rs index d85e469..be5cd6f 100644 --- a/typesense/tests/client/conversation_models_test.rs +++ b/typesense/tests/client/conversation_models_test.rs @@ -19,32 +19,32 @@ async fn test_create_model_with_invalid_key_fails_as_expected() { // --- 1. Setup: Create the prerequisite collection for history --- let schema = CollectionSchema { - name: collection_name.clone(), + name: collection_name.as_str().into(), fields: vec![ Field { - name: "conversation_id".to_owned(), - r#type: "string".to_owned(), + name: "conversation_id".into(), + r#type: "string".into(), ..Default::default() }, Field { - name: "model_id".to_owned(), - r#type: "string".to_owned(), + name: "model_id".into(), + r#type: "string".into(), ..Default::default() }, Field { - name: "timestamp".to_owned(), - r#type: "int32".to_owned(), + name: "timestamp".into(), + r#type: "int32".into(), ..Default::default() }, Field { - name: "role".to_owned(), - r#type: "string".to_owned(), + name: "role".into(), + r#type: "string".into(), index: Some(false), ..Default::default() }, Field { - name: "message".to_owned(), - r#type: "string".to_owned(), + name: "message".into(), + r#type: "string".into(), index: Some(false), ..Default::default() }, @@ -59,10 +59,10 @@ async fn test_create_model_with_invalid_key_fails_as_expected() { // --- 2. Action: Attempt to create a model with a deliberately invalid API key --- let create_schema = ConversationModelCreateSchema { - id: Some(model_id.clone()), - model_name: "openai/gpt-4".to_owned(), - api_key: Some("THIS_IS_AN_INVALID_KEY".to_owned()), - history_collection: collection_name.clone(), + id: Some(model_id.into()), + model_name: "openai/gpt-4".into(), + api_key: Some("THIS_IS_AN_INVALID_KEY".into()), + history_collection: collection_name.into(), max_bytes: 10000, ..Default::default() }; @@ -134,11 +134,11 @@ async fn test_create_model_with_wiremock() { let collection_name = new_id("history-collection"); let create_schema = ConversationModelCreateSchema { - id: Some(model_id.clone()), - model_name: "openai/gpt-4".to_owned(), - api_key: Some("A-FAKE-BUT-VALID-LOOKING-KEY".to_owned()), - history_collection: collection_name.clone(), - system_prompt: Some("You are a helpful assistant.".to_owned()), + id: Some(model_id.as_str().into()), + model_name: "openai/gpt-4".into(), + api_key: Some("A-FAKE-BUT-VALID-LOOKING-KEY".into()), + history_collection: collection_name.as_str().into(), + system_prompt: Some("You are a helpful assistant.".into()), ..Default::default() }; @@ -180,7 +180,7 @@ async fn test_create_model_with_wiremock() { assert_eq!(created_model.history_collection, collection_name); assert_eq!( created_model.system_prompt, - Some("You are a helpful assistant.".to_owned()) + Some("You are a helpful assistant.".into()) ); } @@ -270,7 +270,7 @@ async fn test_update_single_model_with_wiremock() { let model_id = new_id("conv-model"); let update_schema = ConversationModelUpdateSchema { - system_prompt: Some("A new, updated prompt.".to_owned()), + system_prompt: Some("A new, updated prompt.".into()), ..Default::default() }; diff --git a/typesense/tests/client/derive_integration_test.rs b/typesense/tests/client/derive_integration_test.rs index 89178f9..ef0c14a 100644 --- a/typesense/tests/client/derive_integration_test.rs +++ b/typesense/tests/client/derive_integration_test.rs @@ -1,8 +1,9 @@ use serde::{Deserialize, Serialize}; -use typesense::Typesense; -use typesense::models::Field; -use typesense::models::SearchParameters; -use typesense::prelude::*; +use typesense::{ + Typesense, + models::{Field, SearchParameters}, + prelude::*, +}; use crate::{get_client, new_id}; @@ -122,7 +123,7 @@ async fn logic_test_derive_macro_with_generic_client_lifecycle() { // Create Collection using the schema from the derive macro let schema = MegaProduct::collection_schema(); let mut schema_for_creation = schema.clone(); - schema_for_creation.name = collection_name.clone(); // Use the unique name + schema_for_creation.name = collection_name.as_str().into(); // Use the unique name let create_res = client.collections().create(schema_for_creation).await; assert!( @@ -146,8 +147,8 @@ async fn logic_test_derive_macro_with_generic_client_lifecycle() { .collect(); // Iterate through our *expected* fields and assert only the attributes we set. - for expected_field in schema.fields { - let field_name = &expected_field.name; + for expected_field in schema.fields.into_iter() { + let field_name = &expected_field.name as &str; // The 'id' field is a special primary key and not listed in the schema's "fields" array. if field_name == "id" { continue; @@ -160,7 +161,7 @@ async fn logic_test_derive_macro_with_generic_client_lifecycle() { }); // Perform targeted checks based on the attributes set in MegaProduct struct - match field_name.as_str() { + match field_name { "title" => { assert_eq!( actual_field.infix, @@ -343,7 +344,7 @@ async fn logic_test_derive_macro_with_generic_client_lifecycle() { "locale" => { assert_eq!( actual_field.locale, - Some("vi".to_owned()), + Some("vi".into()), "Field 'locale' should have locale of 'vi'" ); } @@ -439,8 +440,8 @@ async fn logic_test_derive_macro_with_generic_client_lifecycle() { typesense::Error, > = documents_client .search(SearchParameters { - q: Some("Wrench".to_owned()), - query_by: Some("title".to_owned()), + q: Some("Wrench".into()), + query_by: Some("title".into()), ..Default::default() }) .await; @@ -449,8 +450,8 @@ async fn logic_test_derive_macro_with_generic_client_lifecycle() { // B. Search a renamed field let search_res2 = documents_client .search(SearchParameters { - q: Some("Wrenchmaster".to_owned()), - query_by: Some("product_name".to_owned()), + q: Some("Wrenchmaster".into()), + query_by: Some("product_name".into()), ..Default::default() }) .await; @@ -458,9 +459,9 @@ async fn logic_test_derive_macro_with_generic_client_lifecycle() { // C. Filter by a facet let search_params3 = SearchParameters { - q: Some("*".to_owned()), - query_by: Some("title".to_owned()), - filter_by: Some("brand:='MegaTools'".to_owned()), + q: Some("*".into()), + query_by: Some("title".into()), + filter_by: Some("brand:='MegaTools'".into()), ..Default::default() }; let search_res3 = documents_client.search(search_params3).await; @@ -468,9 +469,9 @@ async fn logic_test_derive_macro_with_generic_client_lifecycle() { // D. Filter by a range_index let search_params4 = SearchParameters { - q: Some("*".to_owned()), - query_by: Some("title".to_owned()), - filter_by: Some("review_score:>4.5".to_owned()), + q: Some("*".into()), + query_by: Some("title".into()), + filter_by: Some("review_score:>4.5".into()), ..Default::default() }; let search_res4 = documents_client.search(search_params4).await; @@ -478,8 +479,8 @@ async fn logic_test_derive_macro_with_generic_client_lifecycle() { // E. Search a flattened field let search_params5 = SearchParameters { - q: Some("MT-WM-3000".to_owned()), - query_by: Some("details.part_number".to_owned()), + q: Some("MT-WM-3000".into()), + query_by: Some("details.part_number".into()), ..Default::default() }; let search_res5 = documents_client.search(search_params5).await; @@ -487,9 +488,9 @@ async fn logic_test_derive_macro_with_generic_client_lifecycle() { // F. Filter by a deep nested field let search_params_deep = SearchParameters { - q: Some("*".to_owned()), - query_by: Some("title".to_owned()), - filter_by: Some("details.extra_details.color:='Red'".to_owned()), + q: Some("*".into()), + query_by: Some("title".into()), + filter_by: Some("details.extra_details.color:='Red'".into()), ..Default::default() }; let search_res_deep = documents_client.search(search_params_deep).await; @@ -500,8 +501,8 @@ async fn logic_test_derive_macro_with_generic_client_lifecycle() { ); let search_params6 = SearchParameters { - q: Some("WH-US-WEST-05".to_owned()), - query_by: Some("logistics_data.warehouse_code".to_owned()), + q: Some("WH-US-WEST-05".into()), + query_by: Some("logistics_data.warehouse_code".into()), ..Default::default() }; let search_res6 = documents_client.search(search_params6).await; @@ -513,8 +514,8 @@ async fn logic_test_derive_macro_with_generic_client_lifecycle() { // G. Search a field in a nested object array let search_params7 = SearchParameters { - q: Some("p-01".to_owned()), - query_by: Some("parts.part_id".to_owned()), + q: Some("p-01".into()), + query_by: Some("parts.part_id".into()), ..Default::default() }; let search_res7 = documents_client.search(search_params7).await; @@ -526,8 +527,8 @@ async fn logic_test_derive_macro_with_generic_client_lifecycle() { // H. Search a field in a flattened nested object array let search_params8 = SearchParameters { - q: Some("Supplier A".to_owned()), - query_by: Some("parts.supplier.name".to_owned()), + q: Some("Supplier A".into()), + query_by: Some("parts.supplier.name".into()), ..Default::default() }; let search_res8 = documents_client.search(search_params8).await; @@ -617,10 +618,10 @@ async fn logic_test_manual_flattening_lifecycle() { // 1. Create collection from the schema derived from `ManualFlattenedProduct` let mut schema = ManualFlattenedProduct::collection_schema(); - schema.name = collection_name.clone(); + schema.name = collection_name.as_str().into(); // Verify the generated schema is correct *before* creating it - let schema_fields: Vec<_> = schema.fields.iter().map(|f| f.name.as_str()).collect(); + let schema_fields: Vec<_> = schema.fields.iter().map(|f| &f.name as &str).collect(); assert!( !schema_fields.contains(&"details"), "Schema should not contain the skipped 'details' field" @@ -674,8 +675,8 @@ async fn logic_test_manual_flattening_lifecycle() { let search_res_indexed = typed_collection .documents() .search(SearchParameters { - q: Some("PG-123".to_owned()), - query_by: Some("details.part_number".to_owned()), + q: Some("PG-123".into()), + query_by: Some("details.part_number".into()), ..Default::default() }) .await diff --git a/typesense/tests/client/documents_test.rs b/typesense/tests/client/documents_test.rs index a98cce1..474c1c3 100644 --- a/typesense/tests/client/documents_test.rs +++ b/typesense/tests/client/documents_test.rs @@ -14,22 +14,22 @@ async fn run_test_schemaless_document_lifecycle() { // --- Setup: Create a Collection --- let schema = CollectionSchema { - name: collection_name.clone(), + name: collection_name.as_str().into(), fields: vec![ Field { - name: "title".to_owned(), - r#type: "string".to_owned(), + name: "title".into(), + r#type: "string".into(), ..Default::default() }, Field { - name: "author".to_owned(), - r#type: "string".to_owned(), + name: "author".into(), + r#type: "string".into(), facet: Some(true), ..Default::default() }, Field { - name: "publication_year".to_owned(), - r#type: "int32".to_owned(), + name: "publication_year".into(), + r#type: "int32".into(), ..Default::default() }, ], @@ -106,7 +106,7 @@ async fn run_test_schemaless_document_lifecycle() { // --- Export documents (via `documents().export_jsonl()`) --- let export_params = ExportDocumentsParameters { - filter_by: Some("author:John".to_owned()), + filter_by: Some("author:John".into()), ..Default::default() }; let exported_jsonl = documents_client @@ -122,7 +122,7 @@ async fn run_test_schemaless_document_lifecycle() { // --- Bulk Delete --- let delete_params = DeleteDocumentsParameters { - filter_by: "publication_year:>1960".to_owned(), + filter_by: "publication_year:>1960".into(), ..Default::default() }; let bulk_delete_response = documents_client @@ -152,27 +152,27 @@ async fn run_test_generic_document_lifecycle() { // --- 1. Setup: Create a Collection matching the Book struct --- let schema = CollectionSchema { - name: collection_name.clone(), + name: collection_name.as_str().into(), fields: vec![ Field { - name: "title".to_owned(), - r#type: "string".to_owned(), + name: "title".into(), + r#type: "string".into(), ..Default::default() }, Field { - name: "author".to_owned(), - r#type: "string".to_owned(), + name: "author".into(), + r#type: "string".into(), facet: Some(true), ..Default::default() }, Field { - name: "publication_year".to_owned(), - r#type: "int32".to_owned(), + name: "publication_year".into(), + r#type: "int32".into(), ..Default::default() }, Field { - name: "in_stock".to_owned(), - r#type: "bool".to_owned(), + name: "in_stock".into(), + r#type: "bool".into(), optional: Some(true), ..Default::default() }, @@ -232,8 +232,8 @@ async fn run_test_generic_document_lifecycle() { // --- 5. Search for documents with strongly-typed results --- let search_params = SearchParameters { - q: Some("dune".to_owned()), - query_by: Some("title".to_owned()), + q: Some("dune".into()), + query_by: Some("title".into()), ..Default::default() }; let search_results = typed_collection @@ -277,7 +277,7 @@ async fn run_test_generic_document_lifecycle() { // --- 7. Bulk Update (via `documents().update()`) --- let bulk_update_params = UpdateDocumentsParameters { - filter_by: Some("publication_year:>1965".to_owned()), + filter_by: Some("publication_year:>1965".into()), }; let bulk_update_response = typed_collection .documents() diff --git a/typesense/tests/client/keys_test.rs b/typesense/tests/client/keys_test.rs index a2f4a21..551275a 100644 --- a/typesense/tests/client/keys_test.rs +++ b/typesense/tests/client/keys_test.rs @@ -7,7 +7,7 @@ async fn run_test_keys_lifecycle() { // --- 1. Create a new API Key (via `keys`) --- let key_schema = ApiKeySchema { - description: key_description.to_owned(), + description: key_description.into(), actions: vec!["documents:search".to_owned()], // Grant only search permissions collections: vec!["*".to_owned()], // For all collections ..Default::default() @@ -88,7 +88,7 @@ fn test_generate_scoped_search_key_with_example_values() { // The parameters to be embedded in the new scoped key. let params = ScopedKeyParameters { search_params: Some(SearchParameters { - filter_by: Some("company_id:124".to_owned()), + filter_by: Some("company_id:124".into()), ..Default::default() }), expires_at: Some(1906054106), diff --git a/typesense/tests/client/multi_search_test.rs b/typesense/tests/client/multi_search_test.rs index 643ff45..38a7b22 100644 --- a/typesense/tests/client/multi_search_test.rs +++ b/typesense/tests/client/multi_search_test.rs @@ -16,20 +16,20 @@ async fn setup_multi_search_tests( ) { // --- Create collections --- let products_schema = CollectionSchema { - name: products_collection_name.to_owned(), + name: products_collection_name.to_owned().into(), fields: vec![ - Field::new("name".to_owned(), "string".to_owned()), - Field::new("price".to_owned(), "int32".to_owned()), + Field::new("name".into(), "string".into()), + Field::new("price".into(), "int32".into()), ], ..Default::default() }; client.collections().create(products_schema).await.unwrap(); let brands_schema = CollectionSchema { - name: brands_collection_name.to_owned(), + name: brands_collection_name.to_owned().into(), fields: vec![ - Field::new("company_name".to_owned(), "string".to_owned()), - Field::new("country".to_owned(), "string".to_owned()), + Field::new("company_name".into(), "string".into()), + Field::new("country".into(), "string".into()), ], ..Default::default() }; @@ -94,13 +94,13 @@ async fn run_test_multi_search_federated() { MultiSearchCollectionParameters { q: Some("pro".into()), query_by: Some("name".into()), - collection: Some(products_collection_name.clone()), + collection: Some(products_collection_name.into()), ..Default::default() }, MultiSearchCollectionParameters { q: Some("USA".into()), query_by: Some("country".into()), - collection: Some(brands_collection_name.clone()), + collection: Some(brands_collection_name.into()), ..Default::default() }, ], @@ -161,13 +161,13 @@ async fn run_test_multi_search_with_common_params() { let search_requests = MultiSearchBody { searches: vec![ MultiSearchCollectionParameters { - collection: Some(products_collection_name.clone()), + collection: Some(products_collection_name.into()), q: Some("pro".into()), // This should find "Macbook Pro" query_by: Some("name".into()), // Specific to the products schema ..Default::default() }, MultiSearchCollectionParameters { - collection: Some(brands_collection_name.clone()), + collection: Some(brands_collection_name.into()), q: Some("inc".into()), // This should find "Apple Inc." query_by: Some("company_name".into()), // Specific to the brands schema ..Default::default() @@ -249,14 +249,14 @@ async fn run_test_multi_search_generic_parsing() { MultiSearchCollectionParameters { q: Some("pro".into()), query_by: Some("name".into()), - collection: Some(products_collection_name.clone()), + collection: Some(products_collection_name.into()), ..Default::default() }, // Search #1 for brands MultiSearchCollectionParameters { q: Some("USA".into()), query_by: Some("country".into()), - collection: Some(brands_collection_name.clone()), + collection: Some(brands_collection_name.into()), ..Default::default() }, ], @@ -345,13 +345,13 @@ async fn run_test_multi_search_union_heterogeneous() { MultiSearchCollectionParameters { q: Some("pro".into()), query_by: Some("name".into()), - collection: Some(products_collection_name.clone()), + collection: Some(products_collection_name.into()), ..Default::default() }, MultiSearchCollectionParameters { q: Some("samsung".into()), query_by: Some("company_name".into()), - collection: Some(brands_collection_name.clone()), + collection: Some(brands_collection_name.into()), ..Default::default() }, ], @@ -427,14 +427,14 @@ async fn run_test_multi_search_union_homogeneous_and_typed_conversion() { MultiSearchCollectionParameters { q: Some("iphone".into()), query_by: Some("name".into()), - collection: Some(products_collection_name.clone()), + collection: Some(products_collection_name.as_str().into()), ..Default::default() }, // This query should find "MacBook Pro" MultiSearchCollectionParameters { q: Some("macbook".into()), query_by: Some("name".into()), - collection: Some(products_collection_name.clone()), + collection: Some(products_collection_name.as_str().into()), ..Default::default() }, ], diff --git a/typesense/tests/client/operations_test.rs b/typesense/tests/client/operations_test.rs index 9a0f1a6..f9a5c10 100644 --- a/typesense/tests/client/operations_test.rs +++ b/typesense/tests/client/operations_test.rs @@ -66,7 +66,7 @@ async fn run_test_take_snapshot() { // Note: This requires a directory that Typesense can write to. // In a typical Docker setup, `/tmp` is a safe choice. let params = typesense::models::TakeSnapshotParams { - snapshot_path: "/tmp/typesense-snapshots-rust-test".to_string(), + snapshot_path: "/tmp/typesense-snapshots-rust-test".into(), }; let snapshot_result = client.operations().take_snapshot(params).await; diff --git a/typesense/tests/client/presets_test.rs b/typesense/tests/client/presets_test.rs index 4cd2c62..1f02863 100644 --- a/typesense/tests/client/presets_test.rs +++ b/typesense/tests/client/presets_test.rs @@ -9,8 +9,8 @@ async fn run_test_presets_lifecycle() { // --- 1. Define the Preset's value using the strong types --- // This will be the expected value in the response as well. let search_params = SearchParameters { - query_by: Some("title,authors".to_owned()), - sort_by: Some("_text_match:desc,publication_year:desc".to_owned()), + query_by: Some("title,authors".into()), + sort_by: Some("_text_match:desc,publication_year:desc".into()), ..Default::default() }; let expected_preset_value = PresetUpsertSchemaValue::SearchParameters(Box::new(search_params)); diff --git a/typesense/tests/client/stopwords_test.rs b/typesense/tests/client/stopwords_test.rs index 9f89abe..f0310e6 100644 --- a/typesense/tests/client/stopwords_test.rs +++ b/typesense/tests/client/stopwords_test.rs @@ -12,9 +12,11 @@ async fn run_test_stopwords_and_stopword_lifecycle() { ..Default::default() }; - let upsert_result = client.stopwords().upsert(&set_id, schema).await; - assert!(upsert_result.is_ok(), "Failed to upsert stopwords set"); - let upserted_set = upsert_result.unwrap(); + let upserted_set = client + .stopwords() + .upsert(&set_id, schema) + .await + .expect("Failed to upsert stopwords set"); assert_eq!(upserted_set.id, set_id); assert_eq!(upserted_set.stopwords, vec!["a", "the", "an"]); diff --git a/typesense/tests/derive/collection.rs b/typesense/tests/derive/collection.rs index 9cc6821..85e4b2c 100644 --- a/typesense/tests/derive/collection.rs +++ b/typesense/tests/derive/collection.rs @@ -1,10 +1,7 @@ -use std::collections::BTreeMap; -use std::collections::HashMap; - +use ::std::collections::{BTreeMap, HashMap}; use serde::{Deserialize, Serialize}; use serde_json::json; -use typesense::Typesense; -use typesense::prelude::*; +use typesense::{Typesense, prelude::*}; // Test 1: Basic Schema Generation (keeping the old test to ensure backward compatibility) #[allow(dead_code)] @@ -218,10 +215,19 @@ fn derived_document_handles_nested_and_flattened_fields() { // --- Sub-fields indexing --- { "name": "profile", "type": "object" }, + + { "name": "previous_addresses", "type": "object[]" }, + + { "name": "nested_struct_vec", "type": "object[]"}, + + // --- Manually flattened object --- + // correctly skipped `data` + { "name": "primary_address.city", "type": "string" }, + { "name": "work_addresses.zip", "type": "string[]" }, + { "name": "profile.name", "type": "string", "facet": true, "sort": true}, { "name": "profile.email", "type": "string", "optional": true }, - { "name": "previous_addresses", "type": "object[]" }, { "name": "previous_addresses.line_1", "type": "string[]" }, { "name": "previous_addresses.number", "type": "int32[]" }, { "name": "previous_addresses.optional_field", "type": "string[]", "optional": true}, @@ -235,16 +241,10 @@ fn derived_document_handles_nested_and_flattened_fields() { { "name": "nested_struct.address.primary_city", "type": "string" }, { "name": "nested_struct.address.work_zips", "type": "string[]" }, - { "name": "nested_struct_vec", "type": "object[]"}, { "name": "nested_struct_vec.name", "type": "string[]"}, { "name": "nested_struct_vec.address", "type": "object[]" }, { "name": "nested_struct_vec.address.primary_city", "type": "string[]" }, - { "name": "nested_struct_vec.address.work_zips", "type": "string[]" }, - - // --- Manually flattened object --- - // correctly skipped `data` - { "name": "primary_address.city", "type": "string" }, - { "name": "work_addresses.zip", "type": "string[]" } + { "name": "nested_struct_vec.address.work_zips", "type": "string[]" } ] }); diff --git a/typesense_codegen/.openapi-generator/VERSION b/typesense_codegen/.openapi-generator/VERSION index e465da4..2fb556b 100644 --- a/typesense_codegen/.openapi-generator/VERSION +++ b/typesense_codegen/.openapi-generator/VERSION @@ -1 +1 @@ -7.14.0 +7.18.0-SNAPSHOT diff --git a/typesense_codegen/README.md b/typesense_codegen/README.md index d324f6f..250ae46 100644 --- a/typesense_codegen/README.md +++ b/typesense_codegen/README.md @@ -9,7 +9,7 @@ This API client was generated by the [OpenAPI Generator](https://openapi-generat - API version: 30.0 - Package version: 30.0 -- Generator version: 7.14.0 +- Generator version: 7.18.0-SNAPSHOT - Build package: `org.openapitools.codegen.languages.RustClientCodegen` ## Installation diff --git a/typesense_codegen/src/apis/analytics_api.rs b/typesense_codegen/src/apis/analytics_api.rs index ef17e52..eed196b 100644 --- a/typesense_codegen/src/apis/analytics_api.rs +++ b/typesense_codegen/src/apis/analytics_api.rs @@ -10,61 +10,62 @@ use super::{ContentType, Error, configuration}; use crate::{apis::ResponseContent, models}; +use ::std::borrow::Cow; use reqwest; use serde::{Deserialize, Serialize, de::Error as _}; /// struct for passing parameters to the method [`create_analytics_event`] #[derive(Clone, Debug)] -pub struct CreateAnalyticsEventParams { +pub struct CreateAnalyticsEventParams<'p> { /// The analytics event to be created - pub analytics_event: models::AnalyticsEvent, + pub analytics_event: models::AnalyticsEvent<'p>, } /// struct for passing parameters to the method [`create_analytics_rule`] #[derive(Clone, Debug)] -pub struct CreateAnalyticsRuleParams { +pub struct CreateAnalyticsRuleParams<'p> { /// The analytics rule(s) to be created - pub create_analytics_rule_request: models::CreateAnalyticsRuleRequest, + pub create_analytics_rule_request: models::CreateAnalyticsRuleRequest<'p>, } /// struct for passing parameters to the method [`delete_analytics_rule`] #[derive(Clone, Debug)] -pub struct DeleteAnalyticsRuleParams { +pub struct DeleteAnalyticsRuleParams<'p> { /// The name of the analytics rule to delete - pub rule_name: String, + pub rule_name: Cow<'p, str>, } /// struct for passing parameters to the method [`get_analytics_events`] #[derive(Clone, Debug)] -pub struct GetAnalyticsEventsParams { - pub user_id: String, +pub struct GetAnalyticsEventsParams<'p> { + pub user_id: Cow<'p, str>, /// Analytics rule name - pub name: String, + pub name: Cow<'p, str>, /// Number of events to return (max 1000) pub n: i32, } /// struct for passing parameters to the method [`retrieve_analytics_rule`] #[derive(Clone, Debug)] -pub struct RetrieveAnalyticsRuleParams { +pub struct RetrieveAnalyticsRuleParams<'p> { /// The name of the analytics rule to retrieve - pub rule_name: String, + pub rule_name: Cow<'p, str>, } /// struct for passing parameters to the method [`retrieve_analytics_rules`] #[derive(Clone, Debug)] -pub struct RetrieveAnalyticsRulesParams { +pub struct RetrieveAnalyticsRulesParams<'p> { /// Filter rules by rule_tag - pub rule_tag: Option, + pub rule_tag: Option>, } /// struct for passing parameters to the method [`upsert_analytics_rule`] #[derive(Clone, Debug)] -pub struct UpsertAnalyticsRuleParams { +pub struct UpsertAnalyticsRuleParams<'p> { /// The name of the analytics rule to upsert - pub rule_name: String, + pub rule_name: Cow<'p, str>, /// The Analytics rule to be upserted - pub analytics_rule_update: models::AnalyticsRuleUpdate, + pub analytics_rule_update: models::AnalyticsRuleUpdate<'p>, } /// struct for typed errors of method [`create_analytics_event`] @@ -139,7 +140,7 @@ pub enum UpsertAnalyticsRuleError { /// Submit a single analytics event. The event must correspond to an existing analytics rule by name. pub async fn create_analytics_event( configuration: &configuration::Configuration, - params: &CreateAnalyticsEventParams, + params: &CreateAnalyticsEventParams<'_>, ) -> Result> { let uri_str = format!("{}/analytics/events", configuration.base_path); let mut req_builder = configuration @@ -199,7 +200,7 @@ pub async fn create_analytics_event( /// Create one or more analytics rules. You can send a single rule object or an array of rule objects. pub async fn create_analytics_rule( configuration: &configuration::Configuration, - params: &CreateAnalyticsRuleParams, + params: &CreateAnalyticsRuleParams<'_>, ) -> Result> { let uri_str = format!("{}/analytics/rules", configuration.base_path); let mut req_builder = configuration @@ -259,7 +260,7 @@ pub async fn create_analytics_rule( /// Permanently deletes an analytics rule, given it's name pub async fn delete_analytics_rule( configuration: &configuration::Configuration, - params: &DeleteAnalyticsRuleParams, + params: &DeleteAnalyticsRuleParams<'_>, ) -> Result> { let uri_str = format!( "{}/analytics/rules/{ruleName}", @@ -380,7 +381,7 @@ pub async fn flush_analytics( /// Retrieve the most recent events for a user and rule. pub async fn get_analytics_events( configuration: &configuration::Configuration, - params: &GetAnalyticsEventsParams, + params: &GetAnalyticsEventsParams<'_>, ) -> Result> { let uri_str = format!("{}/analytics/events", configuration.base_path); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); @@ -496,7 +497,7 @@ pub async fn get_analytics_status( /// Retrieve the details of an analytics rule, given it's name pub async fn retrieve_analytics_rule( configuration: &configuration::Configuration, - params: &RetrieveAnalyticsRuleParams, + params: &RetrieveAnalyticsRuleParams<'_>, ) -> Result> { let uri_str = format!( "{}/analytics/rules/{ruleName}", @@ -557,7 +558,7 @@ pub async fn retrieve_analytics_rule( /// Retrieve all analytics rules. Use the optional rule_tag filter to narrow down results. pub async fn retrieve_analytics_rules( configuration: &configuration::Configuration, - params: &RetrieveAnalyticsRulesParams, + params: &RetrieveAnalyticsRulesParams<'_>, ) -> Result, Error> { let uri_str = format!("{}/analytics/rules", configuration.base_path); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); @@ -617,7 +618,7 @@ pub async fn retrieve_analytics_rules( /// Upserts an analytics rule with the given name. pub async fn upsert_analytics_rule( configuration: &configuration::Configuration, - params: &UpsertAnalyticsRuleParams, + params: &UpsertAnalyticsRuleParams<'_>, ) -> Result> { let uri_str = format!( "{}/analytics/rules/{ruleName}", diff --git a/typesense_codegen/src/apis/collections_api.rs b/typesense_codegen/src/apis/collections_api.rs index b965f8c..af842a8 100644 --- a/typesense_codegen/src/apis/collections_api.rs +++ b/typesense_codegen/src/apis/collections_api.rs @@ -10,68 +10,69 @@ use super::{ContentType, Error, configuration}; use crate::{apis::ResponseContent, models}; +use ::std::borrow::Cow; use reqwest; use serde::{Deserialize, Serialize, de::Error as _}; /// struct for passing parameters to the method [`create_collection`] #[derive(Clone, Debug)] -pub struct CreateCollectionParams { +pub struct CreateCollectionParams<'p> { /// The collection object to be created - pub collection_schema: models::CollectionSchema, + pub collection_schema: models::CollectionSchema<'p>, } /// struct for passing parameters to the method [`delete_alias`] #[derive(Clone, Debug)] -pub struct DeleteAliasParams { +pub struct DeleteAliasParams<'p> { /// The name of the alias to delete - pub alias_name: String, + pub alias_name: Cow<'p, str>, } /// struct for passing parameters to the method [`delete_collection`] #[derive(Clone, Debug)] -pub struct DeleteCollectionParams { +pub struct DeleteCollectionParams<'p> { /// The name of the collection to delete - pub collection_name: String, + pub collection_name: Cow<'p, str>, } /// struct for passing parameters to the method [`get_alias`] #[derive(Clone, Debug)] -pub struct GetAliasParams { +pub struct GetAliasParams<'p> { /// The name of the alias to retrieve - pub alias_name: String, + pub alias_name: Cow<'p, str>, } /// struct for passing parameters to the method [`get_collection`] #[derive(Clone, Debug)] -pub struct GetCollectionParams { +pub struct GetCollectionParams<'p> { /// The name of the collection to retrieve - pub collection_name: String, + pub collection_name: Cow<'p, str>, } /// struct for passing parameters to the method [`get_collections`] #[derive(Clone, Debug)] -pub struct GetCollectionsParams { - pub exclude_fields: Option, +pub struct GetCollectionsParams<'p> { + pub exclude_fields: Option>, pub limit: Option, pub offset: Option, } /// struct for passing parameters to the method [`update_collection`] #[derive(Clone, Debug)] -pub struct UpdateCollectionParams { +pub struct UpdateCollectionParams<'p> { /// The name of the collection to update - pub collection_name: String, + pub collection_name: Cow<'p, str>, /// The document object with fields to be updated pub collection_update_schema: models::CollectionUpdateSchema, } /// struct for passing parameters to the method [`upsert_alias`] #[derive(Clone, Debug)] -pub struct UpsertAliasParams { +pub struct UpsertAliasParams<'p> { /// The name of the alias to create/update - pub alias_name: String, + pub alias_name: Cow<'p, str>, /// Collection alias to be created/updated - pub collection_alias_schema: Option, + pub collection_alias_schema: Option>, } /// struct for typed errors of method [`create_collection`] @@ -150,7 +151,7 @@ pub enum UpsertAliasError { /// When a collection is created, we give it a name and describe the fields that will be indexed from the documents added to the collection. pub async fn create_collection( configuration: &configuration::Configuration, - params: &CreateCollectionParams, + params: &CreateCollectionParams<'_>, ) -> Result> { let uri_str = format!("{}/collections", configuration.base_path); let mut req_builder = configuration @@ -209,7 +210,7 @@ pub async fn create_collection( pub async fn delete_alias( configuration: &configuration::Configuration, - params: &DeleteAliasParams, + params: &DeleteAliasParams<'_>, ) -> Result> { let uri_str = format!( "{}/aliases/{aliasName}", @@ -272,7 +273,7 @@ pub async fn delete_alias( /// Permanently drops a collection. This action cannot be undone. For large collections, this might have an impact on read latencies. pub async fn delete_collection( configuration: &configuration::Configuration, - params: &DeleteCollectionParams, + params: &DeleteCollectionParams<'_>, ) -> Result> { let uri_str = format!( "{}/collections/{collectionName}", @@ -335,7 +336,7 @@ pub async fn delete_collection( /// Find out which collection an alias points to by fetching it pub async fn get_alias( configuration: &configuration::Configuration, - params: &GetAliasParams, + params: &GetAliasParams<'_>, ) -> Result> { let uri_str = format!( "{}/aliases/{aliasName}", @@ -452,7 +453,7 @@ pub async fn get_aliases( /// Retrieve the details of a collection, given its name. pub async fn get_collection( configuration: &configuration::Configuration, - params: &GetCollectionParams, + params: &GetCollectionParams<'_>, ) -> Result> { let uri_str = format!( "{}/collections/{collectionName}", @@ -513,7 +514,7 @@ pub async fn get_collection( /// Returns a summary of all your collections. The collections are returned sorted by creation date, with the most recent collections appearing first. pub async fn get_collections( configuration: &configuration::Configuration, - params: &GetCollectionsParams, + params: &GetCollectionsParams<'_>, ) -> Result, Error> { let uri_str = format!("{}/collections", configuration.base_path); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); @@ -579,7 +580,7 @@ pub async fn get_collections( /// Update a collection's schema to modify the fields and their types. pub async fn update_collection( configuration: &configuration::Configuration, - params: &UpdateCollectionParams, + params: &UpdateCollectionParams<'_>, ) -> Result> { let uri_str = format!( "{}/collections/{collectionName}", @@ -643,7 +644,7 @@ pub async fn update_collection( /// Create or update a collection alias. An alias is a virtual collection name that points to a real collection. If you're familiar with symbolic links on Linux, it's very similar to that. Aliases are useful when you want to reindex your data in the background on a new collection and switch your application to it without any changes to your code. pub async fn upsert_alias( configuration: &configuration::Configuration, - params: &UpsertAliasParams, + params: &UpsertAliasParams<'_>, ) -> Result> { let uri_str = format!( "{}/aliases/{aliasName}", diff --git a/typesense_codegen/src/apis/conversations_api.rs b/typesense_codegen/src/apis/conversations_api.rs index c495f6b..3a16d76 100644 --- a/typesense_codegen/src/apis/conversations_api.rs +++ b/typesense_codegen/src/apis/conversations_api.rs @@ -10,35 +10,36 @@ use super::{ContentType, Error, configuration}; use crate::{apis::ResponseContent, models}; +use ::std::borrow::Cow; use reqwest; use serde::{Deserialize, Serialize, de::Error as _}; /// struct for passing parameters to the method [`create_conversation_model`] #[derive(Clone, Debug)] -pub struct CreateConversationModelParams { - pub conversation_model_create_schema: models::ConversationModelCreateSchema, +pub struct CreateConversationModelParams<'p> { + pub conversation_model_create_schema: models::ConversationModelCreateSchema<'p>, } /// struct for passing parameters to the method [`delete_conversation_model`] #[derive(Clone, Debug)] -pub struct DeleteConversationModelParams { +pub struct DeleteConversationModelParams<'p> { /// The id of the conversation model to delete - pub model_id: String, + pub model_id: Cow<'p, str>, } /// struct for passing parameters to the method [`retrieve_conversation_model`] #[derive(Clone, Debug)] -pub struct RetrieveConversationModelParams { +pub struct RetrieveConversationModelParams<'p> { /// The id of the conversation model to retrieve - pub model_id: String, + pub model_id: Cow<'p, str>, } /// struct for passing parameters to the method [`update_conversation_model`] #[derive(Clone, Debug)] -pub struct UpdateConversationModelParams { +pub struct UpdateConversationModelParams<'p> { /// The id of the conversation model to update - pub model_id: String, - pub conversation_model_update_schema: models::ConversationModelUpdateSchema, + pub model_id: Cow<'p, str>, + pub conversation_model_update_schema: models::ConversationModelUpdateSchema<'p>, } /// struct for typed errors of method [`create_conversation_model`] @@ -80,7 +81,7 @@ pub enum UpdateConversationModelError { /// Create a Conversation Model pub async fn create_conversation_model( configuration: &configuration::Configuration, - params: &CreateConversationModelParams, + params: &CreateConversationModelParams<'_>, ) -> Result> { let uri_str = format!("{}/conversations/models", configuration.base_path); let mut req_builder = configuration @@ -140,7 +141,7 @@ pub async fn create_conversation_model( /// Delete a conversation model pub async fn delete_conversation_model( configuration: &configuration::Configuration, - params: &DeleteConversationModelParams, + params: &DeleteConversationModelParams<'_>, ) -> Result> { let uri_str = format!( "{}/conversations/models/{modelId}", @@ -260,7 +261,7 @@ pub async fn retrieve_all_conversation_models( /// Retrieve a conversation model pub async fn retrieve_conversation_model( configuration: &configuration::Configuration, - params: &RetrieveConversationModelParams, + params: &RetrieveConversationModelParams<'_>, ) -> Result> { let uri_str = format!( "{}/conversations/models/{modelId}", @@ -321,7 +322,7 @@ pub async fn retrieve_conversation_model( /// Update a conversation model pub async fn update_conversation_model( configuration: &configuration::Configuration, - params: &UpdateConversationModelParams, + params: &UpdateConversationModelParams<'_>, ) -> Result> { let uri_str = format!( "{}/conversations/models/{modelId}", diff --git a/typesense_codegen/src/apis/curation_api.rs b/typesense_codegen/src/apis/curation_api.rs deleted file mode 100644 index 4d3c9c6..0000000 --- a/typesense_codegen/src/apis/curation_api.rs +++ /dev/null @@ -1,250 +0,0 @@ -/* - * Typesense API - * - * An open source search engine for building delightful search experiences. - * - * The version of the OpenAPI document: 30.0 - * - * Generated by: https://openapi-generator.tech - */ - -use super::{ContentType, Error, configuration}; -use crate::{apis::ResponseContent, models}; -use reqwest; -use serde::{Deserialize, Serialize, de::Error as _}; - -/// struct for passing parameters to the method [`delete_search_override`] -#[derive(Clone, Debug)] -pub struct DeleteSearchOverrideParams { - /// The name of the collection - pub collection_name: String, - /// The ID of the search override to delete - pub override_id: String, -} - -/// struct for passing parameters to the method [`get_search_overrides`] -#[derive(Clone, Debug)] -pub struct GetSearchOverridesParams { - /// The name of the collection - pub collection_name: String, -} - -/// struct for passing parameters to the method [`upsert_search_override`] -#[derive(Clone, Debug)] -pub struct UpsertSearchOverrideParams { - /// The name of the collection - pub collection_name: String, - /// The ID of the search override to create/update - pub override_id: String, - /// The search override object to be created/updated - pub search_override_schema: models::SearchOverrideSchema, -} - -/// struct for typed errors of method [`delete_search_override`] -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(untagged)] -pub enum DeleteSearchOverrideError { - Status404(models::ApiResponse), - UnknownValue(serde_json::Value), -} - -/// struct for typed errors of method [`get_search_overrides`] -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(untagged)] -pub enum GetSearchOverridesError { - UnknownValue(serde_json::Value), -} - -/// struct for typed errors of method [`upsert_search_override`] -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(untagged)] -pub enum UpsertSearchOverrideError { - Status404(models::ApiResponse), - UnknownValue(serde_json::Value), -} - -pub async fn delete_search_override( - configuration: &configuration::Configuration, - params: &DeleteSearchOverrideParams, -) -> Result> { - let uri_str = format!( - "{}/collections/{collectionName}/overrides/{overrideId}", - configuration.base_path, - collectionName = crate::apis::urlencode(¶ms.collection_name), - overrideId = crate::apis::urlencode(¶ms.override_id) - ); - let mut req_builder = configuration - .client - .request(reqwest::Method::DELETE, &uri_str); - - if let Some(ref user_agent) = configuration.user_agent { - req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); - } - if let Some(ref apikey) = configuration.api_key { - let key = &apikey.key; - let value = match apikey.prefix { - Some(ref prefix) => &format!("{prefix} {key}"), - None => key, - }; - req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); - }; - - let req = req_builder.build()?; - let resp = configuration.client.execute(req).await?; - - let status = resp.status(); - let content_type = resp - .headers() - .get("content-type") - .and_then(|v| v.to_str().ok()) - .unwrap_or("application/octet-stream"); - let content_type = super::ContentType::from(content_type); - - if !status.is_client_error() && !status.is_server_error() { - let content = resp.text().await?; - match content_type { - ContentType::Json => serde_json::from_str(&content).map_err(Error::from), - ContentType::Text => { - return Err(Error::from(serde_json::Error::custom( - "Received `text/plain` content type response that cannot be converted to `models::SearchOverrideDeleteResponse`", - ))); - } - ContentType::Unsupported(unknown_type) => { - return Err(Error::from(serde_json::Error::custom(format!( - "Received `{unknown_type}` content type response that cannot be converted to `models::SearchOverrideDeleteResponse`" - )))); - } - } - } else { - let content = resp.text().await?; - let entity: Option = serde_json::from_str(&content).ok(); - Err(Error::ResponseError(ResponseContent { - status, - content, - entity, - })) - } -} - -pub async fn get_search_overrides( - configuration: &configuration::Configuration, - params: &GetSearchOverridesParams, -) -> Result> { - let uri_str = format!( - "{}/collections/{collectionName}/overrides", - configuration.base_path, - collectionName = crate::apis::urlencode(¶ms.collection_name) - ); - let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); - - if let Some(ref user_agent) = configuration.user_agent { - req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); - } - if let Some(ref apikey) = configuration.api_key { - let key = &apikey.key; - let value = match apikey.prefix { - Some(ref prefix) => &format!("{prefix} {key}"), - None => key, - }; - req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); - }; - - let req = req_builder.build()?; - let resp = configuration.client.execute(req).await?; - - let status = resp.status(); - let content_type = resp - .headers() - .get("content-type") - .and_then(|v| v.to_str().ok()) - .unwrap_or("application/octet-stream"); - let content_type = super::ContentType::from(content_type); - - if !status.is_client_error() && !status.is_server_error() { - let content = resp.text().await?; - match content_type { - ContentType::Json => serde_json::from_str(&content).map_err(Error::from), - ContentType::Text => { - return Err(Error::from(serde_json::Error::custom( - "Received `text/plain` content type response that cannot be converted to `models::SearchOverridesResponse`", - ))); - } - ContentType::Unsupported(unknown_type) => { - return Err(Error::from(serde_json::Error::custom(format!( - "Received `{unknown_type}` content type response that cannot be converted to `models::SearchOverridesResponse`" - )))); - } - } - } else { - let content = resp.text().await?; - let entity: Option = serde_json::from_str(&content).ok(); - Err(Error::ResponseError(ResponseContent { - status, - content, - entity, - })) - } -} - -/// Create or update an override to promote certain documents over others. Using overrides, you can include or exclude specific documents for a given query. -pub async fn upsert_search_override( - configuration: &configuration::Configuration, - params: &UpsertSearchOverrideParams, -) -> Result> { - let uri_str = format!( - "{}/collections/{collectionName}/overrides/{overrideId}", - configuration.base_path, - collectionName = crate::apis::urlencode(¶ms.collection_name), - overrideId = crate::apis::urlencode(¶ms.override_id) - ); - let mut req_builder = configuration.client.request(reqwest::Method::PUT, &uri_str); - - if let Some(ref user_agent) = configuration.user_agent { - req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); - } - if let Some(ref apikey) = configuration.api_key { - let key = &apikey.key; - let value = match apikey.prefix { - Some(ref prefix) => &format!("{prefix} {key}"), - None => key, - }; - req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); - }; - req_builder = req_builder.json(¶ms.search_override_schema); - - let req = req_builder.build()?; - let resp = configuration.client.execute(req).await?; - - let status = resp.status(); - let content_type = resp - .headers() - .get("content-type") - .and_then(|v| v.to_str().ok()) - .unwrap_or("application/octet-stream"); - let content_type = super::ContentType::from(content_type); - - if !status.is_client_error() && !status.is_server_error() { - let content = resp.text().await?; - match content_type { - ContentType::Json => serde_json::from_str(&content).map_err(Error::from), - ContentType::Text => { - return Err(Error::from(serde_json::Error::custom( - "Received `text/plain` content type response that cannot be converted to `models::SearchOverride`", - ))); - } - ContentType::Unsupported(unknown_type) => { - return Err(Error::from(serde_json::Error::custom(format!( - "Received `{unknown_type}` content type response that cannot be converted to `models::SearchOverride`" - )))); - } - } - } else { - let content = resp.text().await?; - let entity: Option = serde_json::from_str(&content).ok(); - Err(Error::ResponseError(ResponseContent { - status, - content, - entity, - })) - } -} diff --git a/typesense_codegen/src/apis/curation_sets_api.rs b/typesense_codegen/src/apis/curation_sets_api.rs index 87a253f..f6a3ee2 100644 --- a/typesense_codegen/src/apis/curation_sets_api.rs +++ b/typesense_codegen/src/apis/curation_sets_api.rs @@ -10,66 +10,67 @@ use super::{ContentType, Error, configuration}; use crate::{apis::ResponseContent, models}; +use ::std::borrow::Cow; use reqwest; use serde::{Deserialize, Serialize, de::Error as _}; /// struct for passing parameters to the method [`delete_curation_set`] #[derive(Clone, Debug)] -pub struct DeleteCurationSetParams { +pub struct DeleteCurationSetParams<'p> { /// The name of the curation set to delete - pub curation_set_name: String, + pub curation_set_name: Cow<'p, str>, } /// struct for passing parameters to the method [`delete_curation_set_item`] #[derive(Clone, Debug)] -pub struct DeleteCurationSetItemParams { +pub struct DeleteCurationSetItemParams<'p> { /// The name of the curation set - pub curation_set_name: String, + pub curation_set_name: Cow<'p, str>, /// The id of the curation item to delete - pub item_id: String, + pub item_id: Cow<'p, str>, } /// struct for passing parameters to the method [`retrieve_curation_set`] #[derive(Clone, Debug)] -pub struct RetrieveCurationSetParams { +pub struct RetrieveCurationSetParams<'p> { /// The name of the curation set to retrieve - pub curation_set_name: String, + pub curation_set_name: Cow<'p, str>, } /// struct for passing parameters to the method [`retrieve_curation_set_item`] #[derive(Clone, Debug)] -pub struct RetrieveCurationSetItemParams { +pub struct RetrieveCurationSetItemParams<'p> { /// The name of the curation set - pub curation_set_name: String, + pub curation_set_name: Cow<'p, str>, /// The id of the curation item to retrieve - pub item_id: String, + pub item_id: Cow<'p, str>, } /// struct for passing parameters to the method [`retrieve_curation_set_items`] #[derive(Clone, Debug)] -pub struct RetrieveCurationSetItemsParams { +pub struct RetrieveCurationSetItemsParams<'p> { /// The name of the curation set to retrieve items for - pub curation_set_name: String, + pub curation_set_name: Cow<'p, str>, } /// struct for passing parameters to the method [`upsert_curation_set`] #[derive(Clone, Debug)] -pub struct UpsertCurationSetParams { +pub struct UpsertCurationSetParams<'p> { /// The name of the curation set to create/update - pub curation_set_name: String, + pub curation_set_name: Cow<'p, str>, /// The curation set to be created/updated - pub curation_set_create_schema: models::CurationSetCreateSchema, + pub curation_set_create_schema: models::CurationSetCreateSchema<'p>, } /// struct for passing parameters to the method [`upsert_curation_set_item`] #[derive(Clone, Debug)] -pub struct UpsertCurationSetItemParams { +pub struct UpsertCurationSetItemParams<'p> { /// The name of the curation set - pub curation_set_name: String, + pub curation_set_name: Cow<'p, str>, /// The id of the curation item to upsert - pub item_id: String, + pub item_id: Cow<'p, str>, /// The curation item to be created/updated - pub curation_item_create_schema: models::CurationItemCreateSchema, + pub curation_item_create_schema: models::CurationItemCreateSchema<'p>, } /// struct for typed errors of method [`delete_curation_set`] @@ -138,7 +139,7 @@ pub enum UpsertCurationSetItemError { /// Delete a specific curation set by its name pub async fn delete_curation_set( configuration: &configuration::Configuration, - params: &DeleteCurationSetParams, + params: &DeleteCurationSetParams<'_>, ) -> Result> { let uri_str = format!( "{}/curation_sets/{curationSetName}", @@ -201,7 +202,7 @@ pub async fn delete_curation_set( /// Delete a specific curation item by its id pub async fn delete_curation_set_item( configuration: &configuration::Configuration, - params: &DeleteCurationSetItemParams, + params: &DeleteCurationSetItemParams<'_>, ) -> Result> { let uri_str = format!( "{}/curation_sets/{curationSetName}/items/{itemId}", @@ -265,8 +266,8 @@ pub async fn delete_curation_set_item( /// Retrieve a specific curation set by its name pub async fn retrieve_curation_set( configuration: &configuration::Configuration, - params: &RetrieveCurationSetParams, -) -> Result> { + params: &RetrieveCurationSetParams<'_>, +) -> Result, Error> { let uri_str = format!( "{}/curation_sets/{curationSetName}", configuration.base_path, @@ -326,7 +327,7 @@ pub async fn retrieve_curation_set( /// Retrieve a specific curation item by its id pub async fn retrieve_curation_set_item( configuration: &configuration::Configuration, - params: &RetrieveCurationSetItemParams, + params: &RetrieveCurationSetItemParams<'_>, ) -> Result> { let uri_str = format!( "{}/curation_sets/{curationSetName}/items/{itemId}", @@ -388,7 +389,7 @@ pub async fn retrieve_curation_set_item( /// Retrieve all curation items in a set pub async fn retrieve_curation_set_items( configuration: &configuration::Configuration, - params: &RetrieveCurationSetItemsParams, + params: &RetrieveCurationSetItemsParams<'_>, ) -> Result, Error> { let uri_str = format!( "{}/curation_sets/{curationSetName}/items", @@ -505,7 +506,7 @@ pub async fn retrieve_curation_sets( /// Create or update a curation set with the given name pub async fn upsert_curation_set( configuration: &configuration::Configuration, - params: &UpsertCurationSetParams, + params: &UpsertCurationSetParams<'_>, ) -> Result> { let uri_str = format!( "{}/curation_sets/{curationSetName}", @@ -567,7 +568,7 @@ pub async fn upsert_curation_set( /// Create or update a curation set item with the given id pub async fn upsert_curation_set_item( configuration: &configuration::Configuration, - params: &UpsertCurationSetItemParams, + params: &UpsertCurationSetItemParams<'_>, ) -> Result> { let uri_str = format!( "{}/curation_sets/{curationSetName}/items/{itemId}", diff --git a/typesense_codegen/src/apis/debug_api.rs b/typesense_codegen/src/apis/debug_api.rs index 73a5dba..6543f64 100644 --- a/typesense_codegen/src/apis/debug_api.rs +++ b/typesense_codegen/src/apis/debug_api.rs @@ -10,6 +10,7 @@ use super::{ContentType, Error, configuration}; use crate::{apis::ResponseContent, models}; +use ::std::borrow::Cow; use reqwest; use serde::{Deserialize, Serialize, de::Error as _}; diff --git a/typesense_codegen/src/apis/documents_api.rs b/typesense_codegen/src/apis/documents_api.rs index eb146e3..d13e363 100644 --- a/typesense_codegen/src/apis/documents_api.rs +++ b/typesense_codegen/src/apis/documents_api.rs @@ -10,24 +10,25 @@ use super::{ContentType, Error, configuration}; use crate::{apis::ResponseContent, models}; +use ::std::borrow::Cow; use reqwest; use serde::{Deserialize, Serialize, de::Error as _}; /// struct for passing parameters to the method [`delete_document`] #[derive(Clone, Debug)] -pub struct DeleteDocumentParams { +pub struct DeleteDocumentParams<'p> { /// The name of the collection to search for the document under - pub collection_name: String, + pub collection_name: Cow<'p, str>, /// The Document ID - pub document_id: String, + pub document_id: Cow<'p, str>, } /// struct for passing parameters to the method [`delete_documents`] #[derive(Clone, Debug)] -pub struct DeleteDocumentsParams { +pub struct DeleteDocumentsParams<'p> { /// The name of the collection to delete documents from - pub collection_name: String, - pub filter_by: Option, + pub collection_name: Cow<'p, str>, + pub filter_by: Option>, pub batch_size: Option, pub ignore_not_found: Option, pub truncate: Option, @@ -35,30 +36,30 @@ pub struct DeleteDocumentsParams { /// struct for passing parameters to the method [`export_documents`] #[derive(Clone, Debug)] -pub struct ExportDocumentsParams { +pub struct ExportDocumentsParams<'p> { /// The name of the collection - pub collection_name: String, - pub filter_by: Option, - pub include_fields: Option, - pub exclude_fields: Option, + pub collection_name: Cow<'p, str>, + pub filter_by: Option>, + pub include_fields: Option>, + pub exclude_fields: Option>, } /// struct for passing parameters to the method [`get_document`] #[derive(Clone, Debug)] -pub struct GetDocumentParams { +pub struct GetDocumentParams<'p> { /// The name of the collection to search for the document under - pub collection_name: String, + pub collection_name: Cow<'p, str>, /// The Document ID - pub document_id: String, + pub document_id: Cow<'p, str>, } /// struct for passing parameters to the method [`import_documents`] #[derive(Clone, Debug)] -pub struct ImportDocumentsParams { +pub struct ImportDocumentsParams<'p> { /// The name of the collection - pub collection_name: String, + pub collection_name: Cow<'p, str>, /// The json array of documents or the JSONL file to import - pub body: String, + pub body: Cow<'p, str>, pub batch_size: Option, pub return_id: Option, pub remote_embedding_batch_size: Option, @@ -69,47 +70,47 @@ pub struct ImportDocumentsParams { /// struct for passing parameters to the method [`index_document`] #[derive(Clone, Debug)] -pub struct IndexDocumentParams { +pub struct IndexDocumentParams<'p> { /// The name of the collection to add the document to - pub collection_name: String, + pub collection_name: Cow<'p, str>, /// The document object to be indexed pub body: serde_json::Value, /// Additional action to perform - pub action: Option, + pub action: Option>, /// Dealing with Dirty Data pub dirty_values: Option, } /// struct for passing parameters to the method [`multi_search`] #[derive(Clone, Debug)] -pub struct MultiSearchParams { - pub q: Option, - pub query_by: Option, - pub query_by_weights: Option, - pub text_match_type: Option, - pub prefix: Option, - pub infix: Option, +pub struct MultiSearchParams<'p> { + pub q: Option>, + pub query_by: Option>, + pub query_by_weights: Option>, + pub text_match_type: Option>, + pub prefix: Option>, + pub infix: Option>, pub max_extra_prefix: Option, pub max_extra_suffix: Option, - pub filter_by: Option, - pub sort_by: Option, - pub facet_by: Option, + pub filter_by: Option>, + pub sort_by: Option>, + pub facet_by: Option>, pub max_facet_values: Option, - pub facet_query: Option, - pub num_typos: Option, + pub facet_query: Option>, + pub num_typos: Option>, pub page: Option, pub per_page: Option, pub limit: Option, pub offset: Option, - pub group_by: Option, + pub group_by: Option>, pub group_limit: Option, pub group_missing_values: Option, - pub include_fields: Option, - pub exclude_fields: Option, - pub highlight_full_fields: Option, + pub include_fields: Option>, + pub exclude_fields: Option>, + pub highlight_full_fields: Option>, pub highlight_affix_num_tokens: Option, - pub highlight_start_tag: Option, - pub highlight_end_tag: Option, + pub highlight_start_tag: Option>, + pub highlight_end_tag: Option>, pub snippet_threshold: Option, pub drop_tokens_threshold: Option, pub drop_tokens_mode: Option, @@ -120,12 +121,12 @@ pub struct MultiSearchParams { pub enable_analytics: Option, pub synonym_prefix: Option, pub synonym_num_typos: Option, - pub pinned_hits: Option, - pub hidden_hits: Option, - pub override_tags: Option, - pub highlight_fields: Option, + pub pinned_hits: Option>, + pub hidden_hits: Option>, + pub override_tags: Option>, + pub highlight_fields: Option>, pub pre_segmented_query: Option, - pub preset: Option, + pub preset: Option>, pub enable_overrides: Option, pub prioritize_exact_match: Option, pub prioritize_token_position: Option, @@ -137,58 +138,58 @@ pub struct MultiSearchParams { pub cache_ttl: Option, pub min_len_1typo: Option, pub min_len_2typo: Option, - pub vector_query: Option, + pub vector_query: Option>, pub remote_embedding_timeout_ms: Option, pub remote_embedding_num_tries: Option, - pub facet_strategy: Option, - pub stopwords: Option, - pub facet_return_parent: Option, - pub voice_query: Option, + pub facet_strategy: Option>, + pub stopwords: Option>, + pub facet_return_parent: Option>, + pub voice_query: Option>, pub conversation: Option, - pub conversation_model_id: Option, - pub conversation_id: Option, - pub multi_search_searches_parameter: Option, + pub conversation_model_id: Option>, + pub conversation_id: Option>, + pub multi_search_searches_parameter: Option>, } /// struct for passing parameters to the method [`search_collection`] #[derive(Clone, Debug)] -pub struct SearchCollectionParams { +pub struct SearchCollectionParams<'p> { /// The name of the collection to search for the document under - pub collection_name: String, - pub q: Option, - pub query_by: Option, + pub collection_name: Cow<'p, str>, + pub q: Option>, + pub query_by: Option>, pub nl_query: Option, - pub nl_model_id: Option, - pub query_by_weights: Option, - pub text_match_type: Option, - pub prefix: Option, - pub infix: Option, + pub nl_model_id: Option>, + pub query_by_weights: Option>, + pub text_match_type: Option>, + pub prefix: Option>, + pub infix: Option>, pub max_extra_prefix: Option, pub max_extra_suffix: Option, - pub filter_by: Option, + pub filter_by: Option>, pub max_filter_by_candidates: Option, - pub sort_by: Option, - pub facet_by: Option, + pub sort_by: Option>, + pub facet_by: Option>, pub max_facet_values: Option, - pub facet_query: Option, - pub num_typos: Option, + pub facet_query: Option>, + pub num_typos: Option>, pub page: Option, pub per_page: Option, pub limit: Option, pub offset: Option, - pub group_by: Option, + pub group_by: Option>, pub group_limit: Option, pub group_missing_values: Option, - pub include_fields: Option, - pub exclude_fields: Option, - pub highlight_full_fields: Option, + pub include_fields: Option>, + pub exclude_fields: Option>, + pub highlight_full_fields: Option>, pub highlight_affix_num_tokens: Option, - pub highlight_start_tag: Option, - pub highlight_end_tag: Option, + pub highlight_start_tag: Option>, + pub highlight_end_tag: Option>, pub enable_highlight_v1: Option, pub enable_analytics: Option, pub snippet_threshold: Option, - pub synonym_sets: Option, + pub synonym_sets: Option>, pub drop_tokens_threshold: Option, pub drop_tokens_mode: Option, pub typo_tokens_threshold: Option, @@ -197,13 +198,13 @@ pub struct SearchCollectionParams { pub enable_synonyms: Option, pub synonym_prefix: Option, pub synonym_num_typos: Option, - pub pinned_hits: Option, - pub hidden_hits: Option, - pub override_tags: Option, - pub highlight_fields: Option, - pub split_join_tokens: Option, + pub pinned_hits: Option>, + pub hidden_hits: Option>, + pub override_tags: Option>, + pub highlight_fields: Option>, + pub split_join_tokens: Option>, pub pre_segmented_query: Option, - pub preset: Option, + pub preset: Option>, pub enable_overrides: Option, pub prioritize_exact_match: Option, pub max_candidates: Option, @@ -216,25 +217,25 @@ pub struct SearchCollectionParams { pub cache_ttl: Option, pub min_len_1typo: Option, pub min_len_2typo: Option, - pub vector_query: Option, + pub vector_query: Option>, pub remote_embedding_timeout_ms: Option, pub remote_embedding_num_tries: Option, - pub facet_strategy: Option, - pub stopwords: Option, - pub facet_return_parent: Option, - pub voice_query: Option, + pub facet_strategy: Option>, + pub stopwords: Option>, + pub facet_return_parent: Option>, + pub voice_query: Option>, pub conversation: Option, - pub conversation_model_id: Option, - pub conversation_id: Option, + pub conversation_model_id: Option>, + pub conversation_id: Option>, } /// struct for passing parameters to the method [`update_document`] #[derive(Clone, Debug)] -pub struct UpdateDocumentParams { +pub struct UpdateDocumentParams<'p, B> { /// The name of the collection to search for the document under - pub collection_name: String, + pub collection_name: Cow<'p, str>, /// The Document ID - pub document_id: String, + pub document_id: Cow<'p, str>, /// The document object with fields to be updated pub body: B, /// Dealing with Dirty Data @@ -243,12 +244,12 @@ pub struct UpdateDocumentParams { /// struct for passing parameters to the method [`update_documents`] #[derive(Clone, Debug)] -pub struct UpdateDocumentsParams { +pub struct UpdateDocumentsParams<'p, B> { /// The name of the collection to update documents in - pub collection_name: String, + pub collection_name: Cow<'p, str>, /// The document fields to be updated pub body: B, - pub filter_by: Option, + pub filter_by: Option>, } /// struct for typed errors of method [`delete_document`] @@ -337,7 +338,7 @@ pub enum UpdateDocumentsError { /// Delete an individual document from a collection by using its ID. pub async fn delete_document( configuration: &configuration::Configuration, - params: &DeleteDocumentParams, + params: &DeleteDocumentParams<'_>, ) -> Result> { let uri_str = format!( "{}/collections/{collectionName}/documents/{documentId}", @@ -401,7 +402,7 @@ pub async fn delete_document( /// Delete a bunch of documents that match a specific filter condition. Use the `batch_size` parameter to control the number of documents that should deleted at a time. A larger value will speed up deletions, but will impact performance of other operations running on the server. pub async fn delete_documents( configuration: &configuration::Configuration, - params: &DeleteDocumentsParams, + params: &DeleteDocumentsParams<'_>, ) -> Result> { let uri_str = format!( "{}/collections/{collectionName}/documents", @@ -476,7 +477,7 @@ pub async fn delete_documents( /// Export all documents in a collection in JSON lines format. pub async fn export_documents( configuration: &configuration::Configuration, - params: &ExportDocumentsParams, + params: &ExportDocumentsParams<'_>, ) -> Result> { let uri_str = format!( "{}/collections/{collectionName}/documents/export", @@ -541,7 +542,7 @@ pub async fn export_documents( /// Fetch an individual document from a collection by using its ID. pub async fn get_document( configuration: &configuration::Configuration, - params: &GetDocumentParams, + params: &GetDocumentParams<'_>, ) -> Result> { let uri_str = format!( "{}/collections/{collectionName}/documents/{documentId}", @@ -603,7 +604,7 @@ pub async fn get_document( /// The documents to be imported must be formatted in a newline delimited JSON structure. You can feed the output file from a Typesense export operation directly as import. pub async fn import_documents( configuration: &configuration::Configuration, - params: &ImportDocumentsParams, + params: &ImportDocumentsParams<'_>, ) -> Result> { let uri_str = format!( "{}/collections/{collectionName}/documents/import", @@ -646,7 +647,7 @@ pub async fn import_documents( }; req_builder = req_builder .header(reqwest::header::CONTENT_TYPE, "text/plain") - .body(params.body.to_owned()); + .body(params.body.clone().into_owned()); let req = req_builder.build()?; let resp = configuration.client.execute(req).await?; @@ -683,7 +684,7 @@ pub async fn import_documents( /// A document to be indexed in a given collection must conform to the schema of the collection. pub async fn index_document( configuration: &configuration::Configuration, - params: &IndexDocumentParams, + params: &IndexDocumentParams<'_>, ) -> Result> { let uri_str = format!( "{}/collections/{collectionName}/documents", @@ -753,7 +754,7 @@ pub async fn index_document( /// This is especially useful to avoid round-trip network latencies incurred otherwise if each of these requests are sent in separate HTTP requests. You can also use this feature to do a federated search across multiple collections in a single HTTP request. pub async fn multi_search( configuration: &configuration::Configuration, - params: &MultiSearchParams, + params: &MultiSearchParams<'_>, ) -> Result> { let uri_str = format!("{}/multi_search", configuration.base_path); let mut req_builder = configuration @@ -1015,7 +1016,7 @@ pub async fn multi_search( /// Search for documents in a collection that match the search criteria. pub async fn search_collection serde::Deserialize<'de> + Serialize>( configuration: &configuration::Configuration, - params: &SearchCollectionParams, + params: &SearchCollectionParams<'_>, ) -> Result, Error> { let uri_str = format!( "{}/collections/{collectionName}/documents/search", @@ -1299,7 +1300,7 @@ pub async fn search_collection serde::Deserialize<'de> + Serialize>( /// Update an individual document from a collection by using its ID. The update can be partial. pub async fn update_document( configuration: &configuration::Configuration, - params: &UpdateDocumentParams, + params: &UpdateDocumentParams<'_, B>, ) -> Result> { let uri_str = format!( "{}/collections/{collectionName}/documents/{documentId}", @@ -1367,7 +1368,7 @@ pub async fn update_document( /// The filter_by query parameter is used to filter to specify a condition against which the documents are matched. The request body contains the fields that should be updated for any documents that match the filter condition. This endpoint is only available if the Typesense server is version `0.25.0.rc12` or later. pub async fn update_documents( configuration: &configuration::Configuration, - params: &UpdateDocumentsParams, + params: &UpdateDocumentsParams<'_, B>, ) -> Result> { let uri_str = format!( "{}/collections/{collectionName}/documents", diff --git a/typesense_codegen/src/apis/health_api.rs b/typesense_codegen/src/apis/health_api.rs index 8cc1524..e667693 100644 --- a/typesense_codegen/src/apis/health_api.rs +++ b/typesense_codegen/src/apis/health_api.rs @@ -10,6 +10,7 @@ use super::{ContentType, Error, configuration}; use crate::{apis::ResponseContent, models}; +use ::std::borrow::Cow; use reqwest; use serde::{Deserialize, Serialize, de::Error as _}; diff --git a/typesense_codegen/src/apis/keys_api.rs b/typesense_codegen/src/apis/keys_api.rs index c702492..d60ff76 100644 --- a/typesense_codegen/src/apis/keys_api.rs +++ b/typesense_codegen/src/apis/keys_api.rs @@ -10,14 +10,15 @@ use super::{ContentType, Error, configuration}; use crate::{apis::ResponseContent, models}; +use ::std::borrow::Cow; use reqwest; use serde::{Deserialize, Serialize, de::Error as _}; /// struct for passing parameters to the method [`create_key`] #[derive(Clone, Debug)] -pub struct CreateKeyParams { +pub struct CreateKeyParams<'p> { /// The object that describes API key scope - pub api_key_schema: Option, + pub api_key_schema: Option>, } /// struct for passing parameters to the method [`delete_key`] @@ -70,7 +71,7 @@ pub enum GetKeysError { /// Create an API Key with fine-grain access control. You can restrict access on both a per-collection and per-action level. The generated key is returned only during creation. You want to store this key carefully in a secure place. pub async fn create_key( configuration: &configuration::Configuration, - params: &CreateKeyParams, + params: &CreateKeyParams<'_>, ) -> Result> { let uri_str = format!("{}/keys", configuration.base_path); let mut req_builder = configuration diff --git a/typesense_codegen/src/apis/mod.rs b/typesense_codegen/src/apis/mod.rs index b66b0b3..6a7d0ac 100644 --- a/typesense_codegen/src/apis/mod.rs +++ b/typesense_codegen/src/apis/mod.rs @@ -1,4 +1,4 @@ -use std::{error, fmt}; +use ::std::{error, fmt}; #[derive(Debug, Clone)] pub struct ResponseContent { diff --git a/typesense_codegen/src/apis/nl_search_models_api.rs b/typesense_codegen/src/apis/nl_search_models_api.rs index 5458cbb..eef9fa0 100644 --- a/typesense_codegen/src/apis/nl_search_models_api.rs +++ b/typesense_codegen/src/apis/nl_search_models_api.rs @@ -10,37 +10,38 @@ use super::{ContentType, Error, configuration}; use crate::{apis::ResponseContent, models}; +use ::std::borrow::Cow; use reqwest; use serde::{Deserialize, Serialize, de::Error as _}; /// struct for passing parameters to the method [`create_nl_search_model`] #[derive(Clone, Debug)] -pub struct CreateNlSearchModelParams { +pub struct CreateNlSearchModelParams<'p> { /// The NL search model to be created - pub nl_search_model_create_schema: models::NlSearchModelCreateSchema, + pub nl_search_model_create_schema: models::NlSearchModelCreateSchema<'p>, } /// struct for passing parameters to the method [`delete_nl_search_model`] #[derive(Clone, Debug)] -pub struct DeleteNlSearchModelParams { +pub struct DeleteNlSearchModelParams<'p> { /// The ID of the NL search model to delete - pub model_id: String, + pub model_id: Cow<'p, str>, } /// struct for passing parameters to the method [`retrieve_nl_search_model`] #[derive(Clone, Debug)] -pub struct RetrieveNlSearchModelParams { +pub struct RetrieveNlSearchModelParams<'p> { /// The ID of the NL search model to retrieve - pub model_id: String, + pub model_id: Cow<'p, str>, } /// struct for passing parameters to the method [`update_nl_search_model`] #[derive(Clone, Debug)] -pub struct UpdateNlSearchModelParams { +pub struct UpdateNlSearchModelParams<'p> { /// The ID of the NL search model to update - pub model_id: String, + pub model_id: Cow<'p, str>, /// The NL search model fields to update - pub body: models::NlSearchModelCreateSchema, + pub body: models::NlSearchModelCreateSchema<'p>, } /// struct for typed errors of method [`create_nl_search_model`] @@ -86,7 +87,7 @@ pub enum UpdateNlSearchModelError { /// Create a new NL search model. pub async fn create_nl_search_model( configuration: &configuration::Configuration, - params: &CreateNlSearchModelParams, + params: &CreateNlSearchModelParams<'_>, ) -> Result> { let uri_str = format!("{}/nl_search_models", configuration.base_path); let mut req_builder = configuration @@ -146,7 +147,7 @@ pub async fn create_nl_search_model( /// Delete a specific NL search model by its ID. pub async fn delete_nl_search_model( configuration: &configuration::Configuration, - params: &DeleteNlSearchModelParams, + params: &DeleteNlSearchModelParams<'_>, ) -> Result> { let uri_str = format!( "{}/nl_search_models/{modelId}", @@ -265,7 +266,7 @@ pub async fn retrieve_all_nl_search_models( /// Retrieve a specific NL search model by its ID. pub async fn retrieve_nl_search_model( configuration: &configuration::Configuration, - params: &RetrieveNlSearchModelParams, + params: &RetrieveNlSearchModelParams<'_>, ) -> Result> { let uri_str = format!( "{}/nl_search_models/{modelId}", @@ -326,7 +327,7 @@ pub async fn retrieve_nl_search_model( /// Update an existing NL search model. pub async fn update_nl_search_model( configuration: &configuration::Configuration, - params: &UpdateNlSearchModelParams, + params: &UpdateNlSearchModelParams<'_>, ) -> Result> { let uri_str = format!( "{}/nl_search_models/{modelId}", diff --git a/typesense_codegen/src/apis/operations_api.rs b/typesense_codegen/src/apis/operations_api.rs index defe8cb..54b123e 100644 --- a/typesense_codegen/src/apis/operations_api.rs +++ b/typesense_codegen/src/apis/operations_api.rs @@ -10,14 +10,15 @@ use super::{ContentType, Error, configuration}; use crate::{apis::ResponseContent, models}; +use ::std::borrow::Cow; use reqwest; use serde::{Deserialize, Serialize, de::Error as _}; /// struct for passing parameters to the method [`take_snapshot`] #[derive(Clone, Debug)] -pub struct TakeSnapshotParams { +pub struct TakeSnapshotParams<'p> { /// The directory on the server where the snapshot should be saved. - pub snapshot_path: String, + pub snapshot_path: Cow<'p, str>, } /// struct for passing parameters to the method [`toggle_slow_request_log`] @@ -369,7 +370,7 @@ pub async fn retrieve_metrics( /// Creates a point-in-time snapshot of a Typesense node's state and data in the specified directory. You can then backup the snapshot directory that gets created and later restore it as a data directory, as needed. pub async fn take_snapshot( configuration: &configuration::Configuration, - params: &TakeSnapshotParams, + params: &TakeSnapshotParams<'_>, ) -> Result> { let uri_str = format!("{}/operations/snapshot", configuration.base_path); let mut req_builder = configuration diff --git a/typesense_codegen/src/apis/override_api.rs b/typesense_codegen/src/apis/override_api.rs deleted file mode 100644 index 9eadf5a..0000000 --- a/typesense_codegen/src/apis/override_api.rs +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Typesense API - * - * An open source search engine for building delightful search experiences. - * - * The version of the OpenAPI document: 30.0 - * - * Generated by: https://openapi-generator.tech - */ - -use super::{ContentType, Error, configuration}; -use crate::{apis::ResponseContent, models}; -use reqwest; -use serde::{Deserialize, Serialize, de::Error as _}; - -/// struct for passing parameters to the method [`get_search_override`] -#[derive(Clone, Debug)] -pub struct GetSearchOverrideParams { - /// The name of the collection - pub collection_name: String, - /// The id of the search override - pub override_id: String, -} - -/// struct for typed errors of method [`get_search_override`] -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(untagged)] -pub enum GetSearchOverrideError { - UnknownValue(serde_json::Value), -} - -/// Retrieve the details of a search override, given its id. -pub async fn get_search_override( - configuration: &configuration::Configuration, - params: &GetSearchOverrideParams, -) -> Result> { - let uri_str = format!( - "{}/collections/{collectionName}/overrides/{overrideId}", - configuration.base_path, - collectionName = crate::apis::urlencode(¶ms.collection_name), - overrideId = crate::apis::urlencode(¶ms.override_id) - ); - let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); - - if let Some(ref user_agent) = configuration.user_agent { - req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); - } - if let Some(ref apikey) = configuration.api_key { - let key = &apikey.key; - let value = match apikey.prefix { - Some(ref prefix) => &format!("{prefix} {key}"), - None => key, - }; - req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); - }; - - let req = req_builder.build()?; - let resp = configuration.client.execute(req).await?; - - let status = resp.status(); - let content_type = resp - .headers() - .get("content-type") - .and_then(|v| v.to_str().ok()) - .unwrap_or("application/octet-stream"); - let content_type = super::ContentType::from(content_type); - - if !status.is_client_error() && !status.is_server_error() { - let content = resp.text().await?; - match content_type { - ContentType::Json => serde_json::from_str(&content).map_err(Error::from), - ContentType::Text => { - return Err(Error::from(serde_json::Error::custom( - "Received `text/plain` content type response that cannot be converted to `models::SearchOverride`", - ))); - } - ContentType::Unsupported(unknown_type) => { - return Err(Error::from(serde_json::Error::custom(format!( - "Received `{unknown_type}` content type response that cannot be converted to `models::SearchOverride`" - )))); - } - } - } else { - let content = resp.text().await?; - let entity: Option = serde_json::from_str(&content).ok(); - Err(Error::ResponseError(ResponseContent { - status, - content, - entity, - })) - } -} diff --git a/typesense_codegen/src/apis/presets_api.rs b/typesense_codegen/src/apis/presets_api.rs index 8b296b2..280bebd 100644 --- a/typesense_codegen/src/apis/presets_api.rs +++ b/typesense_codegen/src/apis/presets_api.rs @@ -10,30 +10,31 @@ use super::{ContentType, Error, configuration}; use crate::{apis::ResponseContent, models}; +use ::std::borrow::Cow; use reqwest; use serde::{Deserialize, Serialize, de::Error as _}; /// struct for passing parameters to the method [`delete_preset`] #[derive(Clone, Debug)] -pub struct DeletePresetParams { +pub struct DeletePresetParams<'p> { /// The ID of the preset to delete. - pub preset_id: String, + pub preset_id: Cow<'p, str>, } /// struct for passing parameters to the method [`retrieve_preset`] #[derive(Clone, Debug)] -pub struct RetrievePresetParams { +pub struct RetrievePresetParams<'p> { /// The ID of the preset to retrieve. - pub preset_id: String, + pub preset_id: Cow<'p, str>, } /// struct for passing parameters to the method [`upsert_preset`] #[derive(Clone, Debug)] -pub struct UpsertPresetParams { +pub struct UpsertPresetParams<'p> { /// The name of the preset set to upsert. - pub preset_id: String, + pub preset_id: Cow<'p, str>, /// The stopwords set to upsert. - pub preset_upsert_schema: models::PresetUpsertSchema, + pub preset_upsert_schema: models::PresetUpsertSchema<'p>, } /// struct for typed errors of method [`delete_preset`] @@ -70,7 +71,7 @@ pub enum UpsertPresetError { /// Permanently deletes a preset, given it's name. pub async fn delete_preset( configuration: &configuration::Configuration, - params: &DeletePresetParams, + params: &DeletePresetParams<'_>, ) -> Result> { let uri_str = format!( "{}/presets/{presetId}", @@ -189,7 +190,7 @@ pub async fn retrieve_all_presets( /// Retrieve the details of a preset, given it's name. pub async fn retrieve_preset( configuration: &configuration::Configuration, - params: &RetrievePresetParams, + params: &RetrievePresetParams<'_>, ) -> Result> { let uri_str = format!( "{}/presets/{presetId}", @@ -250,7 +251,7 @@ pub async fn retrieve_preset( /// Create or update an existing preset. pub async fn upsert_preset( configuration: &configuration::Configuration, - params: &UpsertPresetParams, + params: &UpsertPresetParams<'_>, ) -> Result> { let uri_str = format!( "{}/presets/{presetId}", diff --git a/typesense_codegen/src/apis/stemming_api.rs b/typesense_codegen/src/apis/stemming_api.rs index de8470f..e685c62 100644 --- a/typesense_codegen/src/apis/stemming_api.rs +++ b/typesense_codegen/src/apis/stemming_api.rs @@ -10,23 +10,24 @@ use super::{ContentType, Error, configuration}; use crate::{apis::ResponseContent, models}; +use ::std::borrow::Cow; use reqwest; use serde::{Deserialize, Serialize, de::Error as _}; /// struct for passing parameters to the method [`get_stemming_dictionary`] #[derive(Clone, Debug)] -pub struct GetStemmingDictionaryParams { +pub struct GetStemmingDictionaryParams<'p> { /// The ID of the dictionary to retrieve - pub dictionary_id: String, + pub dictionary_id: Cow<'p, str>, } /// struct for passing parameters to the method [`import_stemming_dictionary`] #[derive(Clone, Debug)] -pub struct ImportStemmingDictionaryParams { +pub struct ImportStemmingDictionaryParams<'p> { /// The ID to assign to the dictionary - pub id: String, + pub id: Cow<'p, str>, /// The JSONL file containing word mappings - pub body: String, + pub body: Cow<'p, str>, } /// struct for typed errors of method [`get_stemming_dictionary`] @@ -55,7 +56,7 @@ pub enum ListStemmingDictionariesError { /// Fetch details of a specific stemming dictionary. pub async fn get_stemming_dictionary( configuration: &configuration::Configuration, - params: &GetStemmingDictionaryParams, + params: &GetStemmingDictionaryParams<'_>, ) -> Result> { let uri_str = format!( "{}/stemming/dictionaries/{dictionaryId}", @@ -116,7 +117,7 @@ pub async fn get_stemming_dictionary( /// Upload a JSONL file containing word mappings to create or update a stemming dictionary. pub async fn import_stemming_dictionary( configuration: &configuration::Configuration, - params: &ImportStemmingDictionaryParams, + params: &ImportStemmingDictionaryParams<'_>, ) -> Result> { let uri_str = format!("{}/stemming/dictionaries/import", configuration.base_path); let mut req_builder = configuration @@ -137,7 +138,7 @@ pub async fn import_stemming_dictionary( }; req_builder = req_builder .header(reqwest::header::CONTENT_TYPE, "text/plain") - .body(params.body.to_owned()); + .body(params.body.clone().into_owned()); let req = req_builder.build()?; let resp = configuration.client.execute(req).await?; diff --git a/typesense_codegen/src/apis/stopwords_api.rs b/typesense_codegen/src/apis/stopwords_api.rs index 76c120b..00cdc58 100644 --- a/typesense_codegen/src/apis/stopwords_api.rs +++ b/typesense_codegen/src/apis/stopwords_api.rs @@ -10,30 +10,31 @@ use super::{ContentType, Error, configuration}; use crate::{apis::ResponseContent, models}; +use ::std::borrow::Cow; use reqwest; use serde::{Deserialize, Serialize, de::Error as _}; /// struct for passing parameters to the method [`delete_stopwords_set`] #[derive(Clone, Debug)] -pub struct DeleteStopwordsSetParams { +pub struct DeleteStopwordsSetParams<'p> { /// The ID of the stopwords set to delete. - pub set_id: String, + pub set_id: Cow<'p, str>, } /// struct for passing parameters to the method [`retrieve_stopwords_set`] #[derive(Clone, Debug)] -pub struct RetrieveStopwordsSetParams { +pub struct RetrieveStopwordsSetParams<'p> { /// The ID of the stopwords set to retrieve. - pub set_id: String, + pub set_id: Cow<'p, str>, } /// struct for passing parameters to the method [`upsert_stopwords_set`] #[derive(Clone, Debug)] -pub struct UpsertStopwordsSetParams { +pub struct UpsertStopwordsSetParams<'p> { /// The ID of the stopwords set to upsert. - pub set_id: String, + pub set_id: Cow<'p, str>, /// The stopwords set to upsert. - pub stopwords_set_upsert_schema: models::StopwordsSetUpsertSchema, + pub stopwords_set_upsert_schema: models::StopwordsSetUpsertSchema<'p>, } /// struct for typed errors of method [`delete_stopwords_set`] @@ -70,7 +71,7 @@ pub enum UpsertStopwordsSetError { /// Permanently deletes a stopwords set, given it's name. pub async fn delete_stopwords_set( configuration: &configuration::Configuration, - params: &DeleteStopwordsSetParams, + params: &DeleteStopwordsSetParams<'_>, ) -> Result> { let uri_str = format!( "{}/stopwords/{setId}", @@ -133,7 +134,7 @@ pub async fn delete_stopwords_set( /// Retrieve the details of a stopwords set, given it's name. pub async fn retrieve_stopwords_set( configuration: &configuration::Configuration, - params: &RetrieveStopwordsSetParams, + params: &RetrieveStopwordsSetParams<'_>, ) -> Result> { let uri_str = format!( "{}/stopwords/{setId}", @@ -250,7 +251,7 @@ pub async fn retrieve_stopwords_sets( /// When an analytics rule is created, we give it a name and describe the type, the source collections and the destination collection. pub async fn upsert_stopwords_set( configuration: &configuration::Configuration, - params: &UpsertStopwordsSetParams, + params: &UpsertStopwordsSetParams<'_>, ) -> Result> { let uri_str = format!( "{}/stopwords/{setId}", diff --git a/typesense_codegen/src/apis/synonyms_api.rs b/typesense_codegen/src/apis/synonyms_api.rs index 53d5cbb..2aff93a 100644 --- a/typesense_codegen/src/apis/synonyms_api.rs +++ b/typesense_codegen/src/apis/synonyms_api.rs @@ -10,66 +10,67 @@ use super::{ContentType, Error, configuration}; use crate::{apis::ResponseContent, models}; +use ::std::borrow::Cow; use reqwest; use serde::{Deserialize, Serialize, de::Error as _}; /// struct for passing parameters to the method [`delete_synonym_set`] #[derive(Clone, Debug)] -pub struct DeleteSynonymSetParams { +pub struct DeleteSynonymSetParams<'p> { /// The name of the synonym set to delete - pub synonym_set_name: String, + pub synonym_set_name: Cow<'p, str>, } /// struct for passing parameters to the method [`delete_synonym_set_item`] #[derive(Clone, Debug)] -pub struct DeleteSynonymSetItemParams { +pub struct DeleteSynonymSetItemParams<'p> { /// The name of the synonym set - pub synonym_set_name: String, + pub synonym_set_name: Cow<'p, str>, /// The id of the synonym item to delete - pub item_id: String, + pub item_id: Cow<'p, str>, } /// struct for passing parameters to the method [`retrieve_synonym_set`] #[derive(Clone, Debug)] -pub struct RetrieveSynonymSetParams { +pub struct RetrieveSynonymSetParams<'p> { /// The name of the synonym set to retrieve - pub synonym_set_name: String, + pub synonym_set_name: Cow<'p, str>, } /// struct for passing parameters to the method [`retrieve_synonym_set_item`] #[derive(Clone, Debug)] -pub struct RetrieveSynonymSetItemParams { +pub struct RetrieveSynonymSetItemParams<'p> { /// The name of the synonym set - pub synonym_set_name: String, + pub synonym_set_name: Cow<'p, str>, /// The id of the synonym item to retrieve - pub item_id: String, + pub item_id: Cow<'p, str>, } /// struct for passing parameters to the method [`retrieve_synonym_set_items`] #[derive(Clone, Debug)] -pub struct RetrieveSynonymSetItemsParams { +pub struct RetrieveSynonymSetItemsParams<'p> { /// The name of the synonym set to retrieve items for - pub synonym_set_name: String, + pub synonym_set_name: Cow<'p, str>, } /// struct for passing parameters to the method [`upsert_synonym_set`] #[derive(Clone, Debug)] -pub struct UpsertSynonymSetParams { +pub struct UpsertSynonymSetParams<'p> { /// The name of the synonym set to create/update - pub synonym_set_name: String, + pub synonym_set_name: Cow<'p, str>, /// The synonym set to be created/updated - pub synonym_set_create_schema: models::SynonymSetCreateSchema, + pub synonym_set_create_schema: models::SynonymSetCreateSchema<'p>, } /// struct for passing parameters to the method [`upsert_synonym_set_item`] #[derive(Clone, Debug)] -pub struct UpsertSynonymSetItemParams { +pub struct UpsertSynonymSetItemParams<'p> { /// The name of the synonym set - pub synonym_set_name: String, + pub synonym_set_name: Cow<'p, str>, /// The id of the synonym item to upsert - pub item_id: String, + pub item_id: Cow<'p, str>, /// The synonym item to be created/updated - pub synonym_item_schema: models::SynonymItemSchema, + pub synonym_item_schema: models::SynonymItemSchema<'p>, } /// struct for typed errors of method [`delete_synonym_set`] @@ -138,7 +139,7 @@ pub enum UpsertSynonymSetItemError { /// Delete a specific synonym set by its name pub async fn delete_synonym_set( configuration: &configuration::Configuration, - params: &DeleteSynonymSetParams, + params: &DeleteSynonymSetParams<'_>, ) -> Result> { let uri_str = format!( "{}/synonym_sets/{synonymSetName}", @@ -201,7 +202,7 @@ pub async fn delete_synonym_set( /// Delete a specific synonym item by its id pub async fn delete_synonym_set_item( configuration: &configuration::Configuration, - params: &DeleteSynonymSetItemParams, + params: &DeleteSynonymSetItemParams<'_>, ) -> Result> { let uri_str = format!( "{}/synonym_sets/{synonymSetName}/items/{itemId}", @@ -265,8 +266,8 @@ pub async fn delete_synonym_set_item( /// Retrieve a specific synonym set by its name pub async fn retrieve_synonym_set( configuration: &configuration::Configuration, - params: &RetrieveSynonymSetParams, -) -> Result> { + params: &RetrieveSynonymSetParams<'_>, +) -> Result, Error> { let uri_str = format!( "{}/synonym_sets/{synonymSetName}", configuration.base_path, @@ -326,8 +327,8 @@ pub async fn retrieve_synonym_set( /// Retrieve a specific synonym item by its id pub async fn retrieve_synonym_set_item( configuration: &configuration::Configuration, - params: &RetrieveSynonymSetItemParams, -) -> Result> { + params: &RetrieveSynonymSetItemParams<'_>, +) -> Result, Error> { let uri_str = format!( "{}/synonym_sets/{synonymSetName}/items/{itemId}", configuration.base_path, @@ -388,8 +389,8 @@ pub async fn retrieve_synonym_set_item( /// Retrieve all synonym items in a set pub async fn retrieve_synonym_set_items( configuration: &configuration::Configuration, - params: &RetrieveSynonymSetItemsParams, -) -> Result, Error> { + params: &RetrieveSynonymSetItemsParams<'_>, +) -> Result>, Error> { let uri_str = format!( "{}/synonym_sets/{synonymSetName}/items", configuration.base_path, @@ -505,7 +506,7 @@ pub async fn retrieve_synonym_sets( /// Create or update a synonym set with the given name pub async fn upsert_synonym_set( configuration: &configuration::Configuration, - params: &UpsertSynonymSetParams, + params: &UpsertSynonymSetParams<'_>, ) -> Result> { let uri_str = format!( "{}/synonym_sets/{synonymSetName}", @@ -567,8 +568,8 @@ pub async fn upsert_synonym_set( /// Create or update a synonym set item with the given id pub async fn upsert_synonym_set_item( configuration: &configuration::Configuration, - params: &UpsertSynonymSetItemParams, -) -> Result> { + params: &UpsertSynonymSetItemParams<'_>, +) -> Result, Error> { let uri_str = format!( "{}/synonym_sets/{synonymSetName}/items/{itemId}", configuration.base_path, diff --git a/typesense_codegen/src/models/analytics_event.rs b/typesense_codegen/src/models/analytics_event.rs index 7ab05e0..ebfed1e 100644 --- a/typesense_codegen/src/models/analytics_event.rs +++ b/typesense_codegen/src/models/analytics_event.rs @@ -9,27 +9,28 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct AnalyticsEvent { +pub struct AnalyticsEvent<'a> { /// Name of the analytics rule this event corresponds to #[serde(rename = "name")] - pub name: String, + pub name: Cow<'a, str>, /// Type of event (e.g., click, conversion, query, visit) #[serde(rename = "event_type")] - pub event_type: String, + pub event_type: Cow<'a, str>, #[serde(rename = "data")] - pub data: Box, + pub data: Box>, } -impl AnalyticsEvent { +impl<'a> AnalyticsEvent<'a> { pub fn new( - name: String, - event_type: String, - data: models::AnalyticsEventData, - ) -> AnalyticsEvent { - AnalyticsEvent { + name: Cow<'a, str>, + event_type: Cow<'a, str>, + data: models::AnalyticsEventData<'a>, + ) -> Self { + Self { name, event_type, data: Box::new(data), diff --git a/typesense_codegen/src/models/analytics_event_create_response.rs b/typesense_codegen/src/models/analytics_event_create_response.rs index 2018290..9a85e91 100644 --- a/typesense_codegen/src/models/analytics_event_create_response.rs +++ b/typesense_codegen/src/models/analytics_event_create_response.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -18,7 +19,7 @@ pub struct AnalyticsEventCreateResponse { } impl AnalyticsEventCreateResponse { - pub fn new(ok: bool) -> AnalyticsEventCreateResponse { - AnalyticsEventCreateResponse { ok } + pub fn new(ok: bool) -> Self { + Self { ok } } } diff --git a/typesense_codegen/src/models/analytics_event_data.rs b/typesense_codegen/src/models/analytics_event_data.rs index 7c28241..93bf4d5 100644 --- a/typesense_codegen/src/models/analytics_event_data.rs +++ b/typesense_codegen/src/models/analytics_event_data.rs @@ -9,27 +9,28 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; /// AnalyticsEventData : Event payload #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct AnalyticsEventData { +pub struct AnalyticsEventData<'a> { #[serde(rename = "user_id", skip_serializing_if = "Option::is_none")] - pub user_id: Option, + pub user_id: Option>, #[serde(rename = "doc_id", skip_serializing_if = "Option::is_none")] - pub doc_id: Option, + pub doc_id: Option>, #[serde(rename = "doc_ids", skip_serializing_if = "Option::is_none")] pub doc_ids: Option>, #[serde(rename = "q", skip_serializing_if = "Option::is_none")] - pub q: Option, + pub q: Option>, #[serde(rename = "analytics_tag", skip_serializing_if = "Option::is_none")] - pub analytics_tag: Option, + pub analytics_tag: Option>, } -impl AnalyticsEventData { +impl<'a> AnalyticsEventData<'a> { /// Event payload - pub fn new() -> AnalyticsEventData { - AnalyticsEventData { + pub fn new() -> Self { + Self { user_id: None, doc_id: None, doc_ids: None, diff --git a/typesense_codegen/src/models/analytics_events_response.rs b/typesense_codegen/src/models/analytics_events_response.rs index a0de4e0..0181489 100644 --- a/typesense_codegen/src/models/analytics_events_response.rs +++ b/typesense_codegen/src/models/analytics_events_response.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -18,7 +19,7 @@ pub struct AnalyticsEventsResponse { } impl AnalyticsEventsResponse { - pub fn new(events: Vec) -> AnalyticsEventsResponse { - AnalyticsEventsResponse { events } + pub fn new(events: Vec) -> Self { + Self { events } } } diff --git a/typesense_codegen/src/models/analytics_events_response_events_inner.rs b/typesense_codegen/src/models/analytics_events_response_events_inner.rs index 9744a20..8ba6e05 100644 --- a/typesense_codegen/src/models/analytics_events_response_events_inner.rs +++ b/typesense_codegen/src/models/analytics_events_response_events_inner.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -32,8 +33,8 @@ pub struct AnalyticsEventsResponseEventsInner { } impl AnalyticsEventsResponseEventsInner { - pub fn new() -> AnalyticsEventsResponseEventsInner { - AnalyticsEventsResponseEventsInner { + pub fn new() -> Self { + Self { name: None, event_type: None, collection: None, diff --git a/typesense_codegen/src/models/analytics_rule.rs b/typesense_codegen/src/models/analytics_rule.rs index 4c01197..d644500 100644 --- a/typesense_codegen/src/models/analytics_rule.rs +++ b/typesense_codegen/src/models/analytics_rule.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -24,17 +25,12 @@ pub struct AnalyticsRule { #[serde(rename = "rule_tag", skip_serializing_if = "Option::is_none")] pub rule_tag: Option, #[serde(rename = "params", skip_serializing_if = "Option::is_none")] - pub params: Option>, + pub params: Option>>, } impl AnalyticsRule { - pub fn new( - name: String, - r#type: Type, - collection: String, - event_type: String, - ) -> AnalyticsRule { - AnalyticsRule { + pub fn new(name: String, r#type: Type, collection: String, event_type: String) -> Self { + Self { name, r#type, collection, diff --git a/typesense_codegen/src/models/analytics_rule_create.rs b/typesense_codegen/src/models/analytics_rule_create.rs index b9f8993..3d289e9 100644 --- a/typesense_codegen/src/models/analytics_rule_create.rs +++ b/typesense_codegen/src/models/analytics_rule_create.rs @@ -9,32 +9,33 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct AnalyticsRuleCreate { +pub struct AnalyticsRuleCreate<'a> { #[serde(rename = "name")] - pub name: String, + pub name: Cow<'a, str>, #[serde(rename = "type")] pub r#type: Type, #[serde(rename = "collection")] - pub collection: String, + pub collection: Cow<'a, str>, #[serde(rename = "event_type")] - pub event_type: String, + pub event_type: Cow<'a, str>, #[serde(rename = "rule_tag", skip_serializing_if = "Option::is_none")] - pub rule_tag: Option, + pub rule_tag: Option>, #[serde(rename = "params", skip_serializing_if = "Option::is_none")] - pub params: Option>, + pub params: Option>>, } -impl AnalyticsRuleCreate { +impl<'a> AnalyticsRuleCreate<'a> { pub fn new( - name: String, + name: Cow<'a, str>, r#type: Type, - collection: String, - event_type: String, - ) -> AnalyticsRuleCreate { - AnalyticsRuleCreate { + collection: Cow<'a, str>, + event_type: Cow<'a, str>, + ) -> Self { + Self { name, r#type, collection, diff --git a/typesense_codegen/src/models/analytics_rule_create_params.rs b/typesense_codegen/src/models/analytics_rule_create_params.rs index 6127dd3..c1bc721 100644 --- a/typesense_codegen/src/models/analytics_rule_create_params.rs +++ b/typesense_codegen/src/models/analytics_rule_create_params.rs @@ -9,15 +9,16 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct AnalyticsRuleCreateParams { +pub struct AnalyticsRuleCreateParams<'a> { #[serde( rename = "destination_collection", skip_serializing_if = "Option::is_none" )] - pub destination_collection: Option, + pub destination_collection: Option>, #[serde(rename = "limit", skip_serializing_if = "Option::is_none")] pub limit: Option, #[serde( @@ -30,14 +31,14 @@ pub struct AnalyticsRuleCreateParams { #[serde(rename = "expand_query", skip_serializing_if = "Option::is_none")] pub expand_query: Option, #[serde(rename = "counter_field", skip_serializing_if = "Option::is_none")] - pub counter_field: Option, + pub counter_field: Option>, #[serde(rename = "weight", skip_serializing_if = "Option::is_none")] pub weight: Option, } -impl AnalyticsRuleCreateParams { - pub fn new() -> AnalyticsRuleCreateParams { - AnalyticsRuleCreateParams { +impl<'a> AnalyticsRuleCreateParams<'a> { + pub fn new() -> Self { + Self { destination_collection: None, limit: None, capture_search_requests: None, diff --git a/typesense_codegen/src/models/analytics_rule_update.rs b/typesense_codegen/src/models/analytics_rule_update.rs index 3a42cc4..9fa0ac5 100644 --- a/typesense_codegen/src/models/analytics_rule_update.rs +++ b/typesense_codegen/src/models/analytics_rule_update.rs @@ -9,23 +9,24 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; /// AnalyticsRuleUpdate : Fields allowed to update on an analytics rule #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct AnalyticsRuleUpdate { +pub struct AnalyticsRuleUpdate<'a> { #[serde(rename = "name", skip_serializing_if = "Option::is_none")] - pub name: Option, + pub name: Option>, #[serde(rename = "rule_tag", skip_serializing_if = "Option::is_none")] - pub rule_tag: Option, + pub rule_tag: Option>, #[serde(rename = "params", skip_serializing_if = "Option::is_none")] - pub params: Option>, + pub params: Option>>, } -impl AnalyticsRuleUpdate { +impl<'a> AnalyticsRuleUpdate<'a> { /// Fields allowed to update on an analytics rule - pub fn new() -> AnalyticsRuleUpdate { - AnalyticsRuleUpdate { + pub fn new() -> Self { + Self { name: None, rule_tag: None, params: None, diff --git a/typesense_codegen/src/models/analytics_status.rs b/typesense_codegen/src/models/analytics_status.rs index ee3f151..f3ccc73 100644 --- a/typesense_codegen/src/models/analytics_status.rs +++ b/typesense_codegen/src/models/analytics_status.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -39,8 +40,8 @@ pub struct AnalyticsStatus { } impl AnalyticsStatus { - pub fn new() -> AnalyticsStatus { - AnalyticsStatus { + pub fn new() -> Self { + Self { popular_prefix_queries: None, nohits_prefix_queries: None, log_prefix_queries: None, diff --git a/typesense_codegen/src/models/api_key.rs b/typesense_codegen/src/models/api_key.rs index 511f8b9..1fc9239 100644 --- a/typesense_codegen/src/models/api_key.rs +++ b/typesense_codegen/src/models/api_key.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -30,8 +31,8 @@ pub struct ApiKey { } impl ApiKey { - pub fn new(description: String, actions: Vec, collections: Vec) -> ApiKey { - ApiKey { + pub fn new(description: String, actions: Vec, collections: Vec) -> Self { + Self { value: None, description, actions, diff --git a/typesense_codegen/src/models/api_key_delete_response.rs b/typesense_codegen/src/models/api_key_delete_response.rs index b9b453e..2368fa1 100644 --- a/typesense_codegen/src/models/api_key_delete_response.rs +++ b/typesense_codegen/src/models/api_key_delete_response.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -19,7 +20,7 @@ pub struct ApiKeyDeleteResponse { } impl ApiKeyDeleteResponse { - pub fn new(id: i64) -> ApiKeyDeleteResponse { - ApiKeyDeleteResponse { id } + pub fn new(id: i64) -> Self { + Self { id } } } diff --git a/typesense_codegen/src/models/api_key_schema.rs b/typesense_codegen/src/models/api_key_schema.rs index 095e5ec..5a7b7c3 100644 --- a/typesense_codegen/src/models/api_key_schema.rs +++ b/typesense_codegen/src/models/api_key_schema.rs @@ -9,14 +9,15 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct ApiKeySchema { +pub struct ApiKeySchema<'a> { #[serde(rename = "value", skip_serializing_if = "Option::is_none")] - pub value: Option, + pub value: Option>, #[serde(rename = "description")] - pub description: String, + pub description: Cow<'a, str>, #[serde(rename = "actions")] pub actions: Vec, #[serde(rename = "collections")] @@ -25,13 +26,9 @@ pub struct ApiKeySchema { pub expires_at: Option, } -impl ApiKeySchema { - pub fn new( - description: String, - actions: Vec, - collections: Vec, - ) -> ApiKeySchema { - ApiKeySchema { +impl<'a> ApiKeySchema<'a> { + pub fn new(description: Cow<'a, str>, actions: Vec, collections: Vec) -> Self { + Self { value: None, description, actions, diff --git a/typesense_codegen/src/models/api_keys_response.rs b/typesense_codegen/src/models/api_keys_response.rs index 784a6bf..1ccdd55 100644 --- a/typesense_codegen/src/models/api_keys_response.rs +++ b/typesense_codegen/src/models/api_keys_response.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -18,7 +19,7 @@ pub struct ApiKeysResponse { } impl ApiKeysResponse { - pub fn new(keys: Vec) -> ApiKeysResponse { - ApiKeysResponse { keys } + pub fn new(keys: Vec) -> Self { + Self { keys } } } diff --git a/typesense_codegen/src/models/api_response.rs b/typesense_codegen/src/models/api_response.rs index 0a27c4f..260faed 100644 --- a/typesense_codegen/src/models/api_response.rs +++ b/typesense_codegen/src/models/api_response.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -18,7 +19,7 @@ pub struct ApiResponse { } impl ApiResponse { - pub fn new(message: String) -> ApiResponse { - ApiResponse { message } + pub fn new(message: String) -> Self { + Self { message } } } diff --git a/typesense_codegen/src/models/api_stats_response.rs b/typesense_codegen/src/models/api_stats_response.rs index c35c77a..95c104b 100644 --- a/typesense_codegen/src/models/api_stats_response.rs +++ b/typesense_codegen/src/models/api_stats_response.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -66,8 +67,8 @@ pub struct ApiStatsResponse { } impl ApiStatsResponse { - pub fn new() -> ApiStatsResponse { - ApiStatsResponse { + pub fn new() -> Self { + Self { delete_latency_ms: None, delete_requests_per_second: None, import_latency_ms: None, diff --git a/typesense_codegen/src/models/collection_alias.rs b/typesense_codegen/src/models/collection_alias.rs index b5a61fe..a8e2fb6 100644 --- a/typesense_codegen/src/models/collection_alias.rs +++ b/typesense_codegen/src/models/collection_alias.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -22,8 +23,8 @@ pub struct CollectionAlias { } impl CollectionAlias { - pub fn new(name: String, collection_name: String) -> CollectionAlias { - CollectionAlias { + pub fn new(name: String, collection_name: String) -> Self { + Self { name, collection_name, } diff --git a/typesense_codegen/src/models/collection_alias_schema.rs b/typesense_codegen/src/models/collection_alias_schema.rs index b35d641..332356d 100644 --- a/typesense_codegen/src/models/collection_alias_schema.rs +++ b/typesense_codegen/src/models/collection_alias_schema.rs @@ -9,17 +9,18 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct CollectionAliasSchema { +pub struct CollectionAliasSchema<'a> { /// Name of the collection you wish to map the alias to #[serde(rename = "collection_name")] - pub collection_name: String, + pub collection_name: Cow<'a, str>, } -impl CollectionAliasSchema { - pub fn new(collection_name: String) -> CollectionAliasSchema { - CollectionAliasSchema { collection_name } +impl<'a> CollectionAliasSchema<'a> { + pub fn new(collection_name: Cow<'a, str>) -> Self { + Self { collection_name } } } diff --git a/typesense_codegen/src/models/collection_aliases_response.rs b/typesense_codegen/src/models/collection_aliases_response.rs index 3cf2d25..14a05f1 100644 --- a/typesense_codegen/src/models/collection_aliases_response.rs +++ b/typesense_codegen/src/models/collection_aliases_response.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -18,7 +19,7 @@ pub struct CollectionAliasesResponse { } impl CollectionAliasesResponse { - pub fn new(aliases: Vec) -> CollectionAliasesResponse { - CollectionAliasesResponse { aliases } + pub fn new(aliases: Vec) -> Self { + Self { aliases } } } diff --git a/typesense_codegen/src/models/collection_response.rs b/typesense_codegen/src/models/collection_response.rs index 1039909..422373c 100644 --- a/typesense_codegen/src/models/collection_response.rs +++ b/typesense_codegen/src/models/collection_response.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -59,8 +60,8 @@ impl CollectionResponse { fields: Vec, num_documents: i64, created_at: i64, - ) -> CollectionResponse { - CollectionResponse { + ) -> Self { + Self { name, fields, default_sorting_field: None, diff --git a/typesense_codegen/src/models/collection_schema.rs b/typesense_codegen/src/models/collection_schema.rs index b861bee..7269b82 100644 --- a/typesense_codegen/src/models/collection_schema.rs +++ b/typesense_codegen/src/models/collection_schema.rs @@ -9,15 +9,17 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(bon::Builder)] +#[builder(on(Cow<'_, str>, into))] #[builder(on(String, into))] #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct CollectionSchema { +pub struct CollectionSchema<'a> { /// Name of the collection #[serde(rename = "name")] - pub name: String, + pub name: Cow<'a, str>, /// A list of fields for querying, filtering and faceting #[serde(rename = "fields")] pub fields: Vec, @@ -26,7 +28,7 @@ pub struct CollectionSchema { rename = "default_sorting_field", skip_serializing_if = "Option::is_none" )] - pub default_sorting_field: Option, + pub default_sorting_field: Option>, /// List of symbols or special characters to be used for splitting the text into individual words in addition to space and new-line characters. #[serde(rename = "token_separators", skip_serializing_if = "Option::is_none")] pub token_separators: Option>, @@ -49,9 +51,9 @@ pub struct CollectionSchema { pub metadata: Option, } -impl CollectionSchema { - pub fn new(name: String, fields: Vec) -> CollectionSchema { - CollectionSchema { +impl<'a> CollectionSchema<'a> { + pub fn new(name: Cow<'a, str>, fields: Vec) -> Self { + Self { name, fields, default_sorting_field: None, diff --git a/typesense_codegen/src/models/collection_update_schema.rs b/typesense_codegen/src/models/collection_update_schema.rs index 27a4d09..c539a7a 100644 --- a/typesense_codegen/src/models/collection_update_schema.rs +++ b/typesense_codegen/src/models/collection_update_schema.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -25,8 +26,8 @@ pub struct CollectionUpdateSchema { } impl CollectionUpdateSchema { - pub fn new(fields: Vec) -> CollectionUpdateSchema { - CollectionUpdateSchema { + pub fn new(fields: Vec) -> Self { + Self { fields, synonym_sets: None, metadata: None, diff --git a/typesense_codegen/src/models/conversation_model_create_schema.rs b/typesense_codegen/src/models/conversation_model_create_schema.rs index fc587c3..24f91fd 100644 --- a/typesense_codegen/src/models/conversation_model_create_schema.rs +++ b/typesense_codegen/src/models/conversation_model_create_schema.rs @@ -9,28 +9,29 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct ConversationModelCreateSchema { +pub struct ConversationModelCreateSchema<'a> { /// An explicit id for the model, otherwise the API will return a response with an auto-generated conversation model id. #[serde(rename = "id", skip_serializing_if = "Option::is_none")] - pub id: Option, + pub id: Option>, /// Name of the LLM model offered by OpenAI, Cloudflare or vLLM #[serde(rename = "model_name")] - pub model_name: String, + pub model_name: Cow<'a, str>, /// The LLM service's API Key #[serde(rename = "api_key", skip_serializing_if = "Option::is_none")] - pub api_key: Option, + pub api_key: Option>, /// Typesense collection that stores the historical conversations #[serde(rename = "history_collection")] - pub history_collection: String, + pub history_collection: Cow<'a, str>, /// LLM service's account ID (only applicable for Cloudflare) #[serde(rename = "account_id", skip_serializing_if = "Option::is_none")] - pub account_id: Option, + pub account_id: Option>, /// The system prompt that contains special instructions to the LLM #[serde(rename = "system_prompt", skip_serializing_if = "Option::is_none")] - pub system_prompt: Option, + pub system_prompt: Option>, /// Time interval in seconds after which the messages would be deleted. Default: 86400 (24 hours) #[serde(rename = "ttl", skip_serializing_if = "Option::is_none")] pub ttl: Option, @@ -39,16 +40,12 @@ pub struct ConversationModelCreateSchema { pub max_bytes: i32, /// URL of vLLM service #[serde(rename = "vllm_url", skip_serializing_if = "Option::is_none")] - pub vllm_url: Option, + pub vllm_url: Option>, } -impl ConversationModelCreateSchema { - pub fn new( - model_name: String, - history_collection: String, - max_bytes: i32, - ) -> ConversationModelCreateSchema { - ConversationModelCreateSchema { +impl<'a> ConversationModelCreateSchema<'a> { + pub fn new(model_name: Cow<'a, str>, history_collection: Cow<'a, str>, max_bytes: i32) -> Self { + Self { id: None, model_name, api_key: None, diff --git a/typesense_codegen/src/models/conversation_model_schema.rs b/typesense_codegen/src/models/conversation_model_schema.rs index 1738f77..f98e4af 100644 --- a/typesense_codegen/src/models/conversation_model_schema.rs +++ b/typesense_codegen/src/models/conversation_model_schema.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -43,13 +44,8 @@ pub struct ConversationModelSchema { } impl ConversationModelSchema { - pub fn new( - id: String, - model_name: String, - history_collection: String, - max_bytes: i32, - ) -> ConversationModelSchema { - ConversationModelSchema { + pub fn new(id: String, model_name: String, history_collection: String, max_bytes: i32) -> Self { + Self { id, model_name, api_key: None, diff --git a/typesense_codegen/src/models/conversation_model_update_schema.rs b/typesense_codegen/src/models/conversation_model_update_schema.rs index 1335019..ef2b55e 100644 --- a/typesense_codegen/src/models/conversation_model_update_schema.rs +++ b/typesense_codegen/src/models/conversation_model_update_schema.rs @@ -9,28 +9,29 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct ConversationModelUpdateSchema { +pub struct ConversationModelUpdateSchema<'a> { /// An explicit id for the model, otherwise the API will return a response with an auto-generated conversation model id. #[serde(rename = "id", skip_serializing_if = "Option::is_none")] - pub id: Option, + pub id: Option>, /// Name of the LLM model offered by OpenAI, Cloudflare or vLLM #[serde(rename = "model_name", skip_serializing_if = "Option::is_none")] - pub model_name: Option, + pub model_name: Option>, /// The LLM service's API Key #[serde(rename = "api_key", skip_serializing_if = "Option::is_none")] - pub api_key: Option, + pub api_key: Option>, /// Typesense collection that stores the historical conversations #[serde(rename = "history_collection", skip_serializing_if = "Option::is_none")] - pub history_collection: Option, + pub history_collection: Option>, /// LLM service's account ID (only applicable for Cloudflare) #[serde(rename = "account_id", skip_serializing_if = "Option::is_none")] - pub account_id: Option, + pub account_id: Option>, /// The system prompt that contains special instructions to the LLM #[serde(rename = "system_prompt", skip_serializing_if = "Option::is_none")] - pub system_prompt: Option, + pub system_prompt: Option>, /// Time interval in seconds after which the messages would be deleted. Default: 86400 (24 hours) #[serde(rename = "ttl", skip_serializing_if = "Option::is_none")] pub ttl: Option, @@ -39,12 +40,12 @@ pub struct ConversationModelUpdateSchema { pub max_bytes: Option, /// URL of vLLM service #[serde(rename = "vllm_url", skip_serializing_if = "Option::is_none")] - pub vllm_url: Option, + pub vllm_url: Option>, } -impl ConversationModelUpdateSchema { - pub fn new() -> ConversationModelUpdateSchema { - ConversationModelUpdateSchema { +impl<'a> ConversationModelUpdateSchema<'a> { + pub fn new() -> Self { + Self { id: None, model_name: None, api_key: None, diff --git a/typesense_codegen/src/models/create_analytics_rule_200_response.rs b/typesense_codegen/src/models/create_analytics_rule_200_response.rs index 80053ca..4ccc84b 100644 --- a/typesense_codegen/src/models/create_analytics_rule_200_response.rs +++ b/typesense_codegen/src/models/create_analytics_rule_200_response.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] diff --git a/typesense_codegen/src/models/create_analytics_rule_200_response_one_of_inner.rs b/typesense_codegen/src/models/create_analytics_rule_200_response_one_of_inner.rs index bb9f001..db47734 100644 --- a/typesense_codegen/src/models/create_analytics_rule_200_response_one_of_inner.rs +++ b/typesense_codegen/src/models/create_analytics_rule_200_response_one_of_inner.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -24,19 +25,14 @@ pub struct CreateAnalyticsRule200ResponseOneOfInner { #[serde(rename = "rule_tag", skip_serializing_if = "Option::is_none")] pub rule_tag: Option, #[serde(rename = "params", skip_serializing_if = "Option::is_none")] - pub params: Option>, + pub params: Option>>, #[serde(rename = "error", skip_serializing_if = "Option::is_none")] pub error: Option, } impl CreateAnalyticsRule200ResponseOneOfInner { - pub fn new( - name: String, - r#type: Type, - collection: String, - event_type: String, - ) -> CreateAnalyticsRule200ResponseOneOfInner { - CreateAnalyticsRule200ResponseOneOfInner { + pub fn new(name: String, r#type: Type, collection: String, event_type: String) -> Self { + Self { name, r#type, collection, diff --git a/typesense_codegen/src/models/create_analytics_rule_200_response_one_of_inner_any_of.rs b/typesense_codegen/src/models/create_analytics_rule_200_response_one_of_inner_any_of.rs index 08500ef..222a16c 100644 --- a/typesense_codegen/src/models/create_analytics_rule_200_response_one_of_inner_any_of.rs +++ b/typesense_codegen/src/models/create_analytics_rule_200_response_one_of_inner_any_of.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -18,7 +19,7 @@ pub struct CreateAnalyticsRule200ResponseOneOfInnerAnyOf { } impl CreateAnalyticsRule200ResponseOneOfInnerAnyOf { - pub fn new() -> CreateAnalyticsRule200ResponseOneOfInnerAnyOf { - CreateAnalyticsRule200ResponseOneOfInnerAnyOf { error: None } + pub fn new() -> Self { + Self { error: None } } } diff --git a/typesense_codegen/src/models/create_analytics_rule_request.rs b/typesense_codegen/src/models/create_analytics_rule_request.rs index 262570e..e1da263 100644 --- a/typesense_codegen/src/models/create_analytics_rule_request.rs +++ b/typesense_codegen/src/models/create_analytics_rule_request.rs @@ -9,16 +9,17 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] #[serde(untagged)] -pub enum CreateAnalyticsRuleRequest { - AnalyticsRuleCreate(Box), - Array(Vec), +pub enum CreateAnalyticsRuleRequest<'a> { + AnalyticsRuleCreate(Box>), + Array(Vec>), } -impl Default for CreateAnalyticsRuleRequest { +impl Default for CreateAnalyticsRuleRequest<'_> { fn default() -> Self { Self::AnalyticsRuleCreate(Default::default()) } diff --git a/typesense_codegen/src/models/curation_exclude.rs b/typesense_codegen/src/models/curation_exclude.rs index af70fa3..e8b5170 100644 --- a/typesense_codegen/src/models/curation_exclude.rs +++ b/typesense_codegen/src/models/curation_exclude.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -19,7 +20,7 @@ pub struct CurationExclude { } impl CurationExclude { - pub fn new(id: String) -> CurationExclude { - CurationExclude { id } + pub fn new(id: String) -> Self { + Self { id } } } diff --git a/typesense_codegen/src/models/curation_include.rs b/typesense_codegen/src/models/curation_include.rs index 5f9c2e5..daded9d 100644 --- a/typesense_codegen/src/models/curation_include.rs +++ b/typesense_codegen/src/models/curation_include.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -22,7 +23,7 @@ pub struct CurationInclude { } impl CurationInclude { - pub fn new(id: String, position: i32) -> CurationInclude { - CurationInclude { id, position } + pub fn new(id: String, position: i32) -> Self { + Self { id, position } } } diff --git a/typesense_codegen/src/models/curation_item_create_schema.rs b/typesense_codegen/src/models/curation_item_create_schema.rs index 7f4f975..430d10f 100644 --- a/typesense_codegen/src/models/curation_item_create_schema.rs +++ b/typesense_codegen/src/models/curation_item_create_schema.rs @@ -9,10 +9,11 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct CurationItemCreateSchema { +pub struct CurationItemCreateSchema<'a> { #[serde(rename = "rule")] pub rule: Box, /// List of document `id`s that should be included in the search results with their corresponding `position`s. @@ -23,7 +24,7 @@ pub struct CurationItemCreateSchema { pub excludes: Option>, /// A filter by clause that is applied to any search query that matches the curation rule. #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] - pub filter_by: Option, + pub filter_by: Option>, /// Indicates whether search query tokens that exist in the curation's rule should be removed from the search query. #[serde( rename = "remove_matched_tokens", @@ -35,10 +36,10 @@ pub struct CurationItemCreateSchema { pub metadata: Option, /// A sort by clause that is applied to any search query that matches the curation rule. #[serde(rename = "sort_by", skip_serializing_if = "Option::is_none")] - pub sort_by: Option, + pub sort_by: Option>, /// Replaces the current search query with this value, when the search query matches the curation rule. #[serde(rename = "replace_query", skip_serializing_if = "Option::is_none")] - pub replace_query: Option, + pub replace_query: Option>, /// When set to true, the filter conditions of the query is applied to the curated records as well. Default: false. #[serde( rename = "filter_curated_hits", @@ -56,12 +57,12 @@ pub struct CurationItemCreateSchema { pub stop_processing: Option, /// ID of the curation item #[serde(rename = "id", skip_serializing_if = "Option::is_none")] - pub id: Option, + pub id: Option>, } -impl CurationItemCreateSchema { - pub fn new(rule: models::CurationRule) -> CurationItemCreateSchema { - CurationItemCreateSchema { +impl<'a> CurationItemCreateSchema<'a> { + pub fn new(rule: models::CurationRule) -> Self { + Self { rule: Box::new(rule), includes: None, excludes: None, diff --git a/typesense_codegen/src/models/curation_item_delete_schema.rs b/typesense_codegen/src/models/curation_item_delete_schema.rs index 9bdf0c5..aab7c17 100644 --- a/typesense_codegen/src/models/curation_item_delete_schema.rs +++ b/typesense_codegen/src/models/curation_item_delete_schema.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -19,7 +20,7 @@ pub struct CurationItemDeleteSchema { } impl CurationItemDeleteSchema { - pub fn new(id: String) -> CurationItemDeleteSchema { - CurationItemDeleteSchema { id } + pub fn new(id: String) -> Self { + Self { id } } } diff --git a/typesense_codegen/src/models/curation_item_schema.rs b/typesense_codegen/src/models/curation_item_schema.rs index 74c3d4e..a65b2e2 100644 --- a/typesense_codegen/src/models/curation_item_schema.rs +++ b/typesense_codegen/src/models/curation_item_schema.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -59,8 +60,8 @@ pub struct CurationItemSchema { } impl CurationItemSchema { - pub fn new(rule: models::CurationRule, id: String) -> CurationItemSchema { - CurationItemSchema { + pub fn new(rule: models::CurationRule, id: String) -> Self { + Self { rule: Box::new(rule), includes: None, excludes: None, diff --git a/typesense_codegen/src/models/curation_rule.rs b/typesense_codegen/src/models/curation_rule.rs index 45db3a0..c1b12eb 100644 --- a/typesense_codegen/src/models/curation_rule.rs +++ b/typesense_codegen/src/models/curation_rule.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -28,8 +29,8 @@ pub struct CurationRule { } impl CurationRule { - pub fn new() -> CurationRule { - CurationRule { + pub fn new() -> Self { + Self { tags: None, query: None, r#match: None, diff --git a/typesense_codegen/src/models/curation_set_create_schema.rs b/typesense_codegen/src/models/curation_set_create_schema.rs index 8676aaa..c8b336c 100644 --- a/typesense_codegen/src/models/curation_set_create_schema.rs +++ b/typesense_codegen/src/models/curation_set_create_schema.rs @@ -9,21 +9,22 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct CurationSetCreateSchema { +pub struct CurationSetCreateSchema<'a> { /// Array of curation items #[serde(rename = "items")] - pub items: Vec, + pub items: Vec>, /// Optional description for the curation set #[serde(rename = "description", skip_serializing_if = "Option::is_none")] - pub description: Option, + pub description: Option>, } -impl CurationSetCreateSchema { - pub fn new(items: Vec) -> CurationSetCreateSchema { - CurationSetCreateSchema { +impl<'a> CurationSetCreateSchema<'a> { + pub fn new(items: Vec>) -> Self { + Self { items, description: None, } diff --git a/typesense_codegen/src/models/curation_set_delete_schema.rs b/typesense_codegen/src/models/curation_set_delete_schema.rs index d57d683..46173d2 100644 --- a/typesense_codegen/src/models/curation_set_delete_schema.rs +++ b/typesense_codegen/src/models/curation_set_delete_schema.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -19,7 +20,7 @@ pub struct CurationSetDeleteSchema { } impl CurationSetDeleteSchema { - pub fn new(name: String) -> CurationSetDeleteSchema { - CurationSetDeleteSchema { name } + pub fn new(name: String) -> Self { + Self { name } } } diff --git a/typesense_codegen/src/models/curation_set_schema.rs b/typesense_codegen/src/models/curation_set_schema.rs index 70f5e54..6f4d2c8 100644 --- a/typesense_codegen/src/models/curation_set_schema.rs +++ b/typesense_codegen/src/models/curation_set_schema.rs @@ -9,13 +9,14 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct CurationSetSchema { /// Array of curation items #[serde(rename = "items")] - pub items: Vec, + pub items: Vec>, /// Optional description for the curation set #[serde(rename = "description", skip_serializing_if = "Option::is_none")] pub description: Option, @@ -24,8 +25,8 @@ pub struct CurationSetSchema { } impl CurationSetSchema { - pub fn new(items: Vec, name: String) -> CurationSetSchema { - CurationSetSchema { + pub fn new(items: Vec>, name: String) -> Self { + Self { items, description: None, name, diff --git a/typesense_codegen/src/models/debug_200_response.rs b/typesense_codegen/src/models/debug_200_response.rs index 946d84e..98f5e97 100644 --- a/typesense_codegen/src/models/debug_200_response.rs +++ b/typesense_codegen/src/models/debug_200_response.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -18,7 +19,7 @@ pub struct Debug200Response { } impl Debug200Response { - pub fn new() -> Debug200Response { - Debug200Response { version: None } + pub fn new() -> Self { + Self { version: None } } } diff --git a/typesense_codegen/src/models/delete_documents_200_response.rs b/typesense_codegen/src/models/delete_documents_200_response.rs index 169020c..910dd92 100644 --- a/typesense_codegen/src/models/delete_documents_200_response.rs +++ b/typesense_codegen/src/models/delete_documents_200_response.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -18,7 +19,7 @@ pub struct DeleteDocuments200Response { } impl DeleteDocuments200Response { - pub fn new(num_deleted: i32) -> DeleteDocuments200Response { - DeleteDocuments200Response { num_deleted } + pub fn new(num_deleted: i32) -> Self { + Self { num_deleted } } } diff --git a/typesense_codegen/src/models/delete_documents_parameters.rs b/typesense_codegen/src/models/delete_documents_parameters.rs index 55e8241..0a5f3bb 100644 --- a/typesense_codegen/src/models/delete_documents_parameters.rs +++ b/typesense_codegen/src/models/delete_documents_parameters.rs @@ -9,12 +9,13 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct DeleteDocumentsParameters { +pub struct DeleteDocumentsParameters<'a> { #[serde(rename = "filter_by")] - pub filter_by: String, + pub filter_by: Cow<'a, str>, /// Batch size parameter controls the number of documents that should be deleted at a time. A larger value will speed up deletions, but will impact performance of other operations running on the server. #[serde(rename = "batch_size", skip_serializing_if = "Option::is_none")] pub batch_size: Option, @@ -25,9 +26,9 @@ pub struct DeleteDocumentsParameters { pub truncate: Option, } -impl DeleteDocumentsParameters { - pub fn new(filter_by: String) -> DeleteDocumentsParameters { - DeleteDocumentsParameters { +impl<'a> DeleteDocumentsParameters<'a> { + pub fn new(filter_by: Cow<'a, str>) -> Self { + Self { filter_by, batch_size: None, ignore_not_found: None, diff --git a/typesense_codegen/src/models/delete_stopwords_set_200_response.rs b/typesense_codegen/src/models/delete_stopwords_set_200_response.rs index 03b93ef..5e6a405 100644 --- a/typesense_codegen/src/models/delete_stopwords_set_200_response.rs +++ b/typesense_codegen/src/models/delete_stopwords_set_200_response.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -18,7 +19,7 @@ pub struct DeleteStopwordsSet200Response { } impl DeleteStopwordsSet200Response { - pub fn new(id: String) -> DeleteStopwordsSet200Response { - DeleteStopwordsSet200Response { id } + pub fn new(id: String) -> Self { + Self { id } } } diff --git a/typesense_codegen/src/models/dirty_values.rs b/typesense_codegen/src/models/dirty_values.rs index 8caeab9..e52f06f 100644 --- a/typesense_codegen/src/models/dirty_values.rs +++ b/typesense_codegen/src/models/dirty_values.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; /// diff --git a/typesense_codegen/src/models/drop_tokens_mode.rs b/typesense_codegen/src/models/drop_tokens_mode.rs index 5b8ba01..a336ed9 100644 --- a/typesense_codegen/src/models/drop_tokens_mode.rs +++ b/typesense_codegen/src/models/drop_tokens_mode.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; /// DropTokensMode : Dictates the direction in which the words in the query must be dropped when the original words in the query do not appear in any document. Values: right_to_left (default), left_to_right, both_sides:3 A note on both_sides:3 - for queries up to 3 tokens (words) in length, this mode will drop tokens from both sides and exhaustively rank all matching results. If query length is greater than 3 words, Typesense will just fallback to default behavior of right_to_left diff --git a/typesense_codegen/src/models/export_documents_parameters.rs b/typesense_codegen/src/models/export_documents_parameters.rs index 36984f8..3dfca40 100644 --- a/typesense_codegen/src/models/export_documents_parameters.rs +++ b/typesense_codegen/src/models/export_documents_parameters.rs @@ -9,24 +9,25 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct ExportDocumentsParameters { +pub struct ExportDocumentsParameters<'a> { /// Filter conditions for refining your search results. Separate multiple conditions with &&. #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] - pub filter_by: Option, + pub filter_by: Option>, /// List of fields from the document to include in the search result #[serde(rename = "include_fields", skip_serializing_if = "Option::is_none")] - pub include_fields: Option, + pub include_fields: Option>, /// List of fields from the document to exclude in the search result #[serde(rename = "exclude_fields", skip_serializing_if = "Option::is_none")] - pub exclude_fields: Option, + pub exclude_fields: Option>, } -impl ExportDocumentsParameters { - pub fn new() -> ExportDocumentsParameters { - ExportDocumentsParameters { +impl<'a> ExportDocumentsParameters<'a> { + pub fn new() -> Self { + Self { filter_by: None, include_fields: None, exclude_fields: None, diff --git a/typesense_codegen/src/models/facet_counts.rs b/typesense_codegen/src/models/facet_counts.rs index d9cbdf3..fd3eca8 100644 --- a/typesense_codegen/src/models/facet_counts.rs +++ b/typesense_codegen/src/models/facet_counts.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -22,8 +23,8 @@ pub struct FacetCounts { } impl FacetCounts { - pub fn new() -> FacetCounts { - FacetCounts { + pub fn new() -> Self { + Self { counts: None, field_name: None, stats: None, diff --git a/typesense_codegen/src/models/facet_counts_counts_inner.rs b/typesense_codegen/src/models/facet_counts_counts_inner.rs index bc6ede2..7f26722 100644 --- a/typesense_codegen/src/models/facet_counts_counts_inner.rs +++ b/typesense_codegen/src/models/facet_counts_counts_inner.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -24,8 +25,8 @@ pub struct FacetCountsCountsInner { } impl FacetCountsCountsInner { - pub fn new() -> FacetCountsCountsInner { - FacetCountsCountsInner { + pub fn new() -> Self { + Self { count: None, highlighted: None, value: None, diff --git a/typesense_codegen/src/models/facet_counts_stats.rs b/typesense_codegen/src/models/facet_counts_stats.rs index 7f0d9dd..10b7146 100644 --- a/typesense_codegen/src/models/facet_counts_stats.rs +++ b/typesense_codegen/src/models/facet_counts_stats.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -26,8 +27,8 @@ pub struct FacetCountsStats { } impl FacetCountsStats { - pub fn new() -> FacetCountsStats { - FacetCountsStats { + pub fn new() -> Self { + Self { max: None, min: None, sum: None, diff --git a/typesense_codegen/src/models/field.rs b/typesense_codegen/src/models/field.rs index 39bacbb..b923c39 100644 --- a/typesense_codegen/src/models/field.rs +++ b/typesense_codegen/src/models/field.rs @@ -9,9 +9,11 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(bon::Builder)] +#[builder(on(Cow<'_, str>, into))] #[builder(on(String, into))] #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct Field { @@ -64,8 +66,8 @@ pub struct Field { } impl Field { - pub fn new(name: String, r#type: String) -> Field { - Field { + pub fn new(name: String, r#type: String) -> Self { + Self { name, r#type, optional: None, diff --git a/typesense_codegen/src/models/field_embed.rs b/typesense_codegen/src/models/field_embed.rs index 0e84fd9..bc0cb26 100644 --- a/typesense_codegen/src/models/field_embed.rs +++ b/typesense_codegen/src/models/field_embed.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -20,8 +21,8 @@ pub struct FieldEmbed { } impl FieldEmbed { - pub fn new(from: Vec, model_config: models::FieldEmbedModelConfig) -> FieldEmbed { - FieldEmbed { + pub fn new(from: Vec, model_config: models::FieldEmbedModelConfig) -> Self { + Self { from, model_config: Box::new(model_config), } diff --git a/typesense_codegen/src/models/field_embed_model_config.rs b/typesense_codegen/src/models/field_embed_model_config.rs index ee40bea..bbe8c0a 100644 --- a/typesense_codegen/src/models/field_embed_model_config.rs +++ b/typesense_codegen/src/models/field_embed_model_config.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -36,8 +37,8 @@ pub struct FieldEmbedModelConfig { } impl FieldEmbedModelConfig { - pub fn new(model_name: String) -> FieldEmbedModelConfig { - FieldEmbedModelConfig { + pub fn new(model_name: String) -> Self { + Self { model_name, api_key: None, url: None, diff --git a/typesense_codegen/src/models/get_collections_parameters.rs b/typesense_codegen/src/models/get_collections_parameters.rs index 02497f6..fbd6418 100644 --- a/typesense_codegen/src/models/get_collections_parameters.rs +++ b/typesense_codegen/src/models/get_collections_parameters.rs @@ -9,13 +9,14 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct GetCollectionsParameters { +pub struct GetCollectionsParameters<'a> { /// Comma-separated list of fields from the collection to exclude from the response #[serde(rename = "exclude_fields", skip_serializing_if = "Option::is_none")] - pub exclude_fields: Option, + pub exclude_fields: Option>, /// Number of collections to fetch. Default: returns all collections. #[serde(rename = "limit", skip_serializing_if = "Option::is_none")] pub limit: Option, @@ -24,9 +25,9 @@ pub struct GetCollectionsParameters { pub offset: Option, } -impl GetCollectionsParameters { - pub fn new() -> GetCollectionsParameters { - GetCollectionsParameters { +impl<'a> GetCollectionsParameters<'a> { + pub fn new() -> Self { + Self { exclude_fields: None, limit: None, offset: None, diff --git a/typesense_codegen/src/models/health_status.rs b/typesense_codegen/src/models/health_status.rs index 66cdf6b..f574fe6 100644 --- a/typesense_codegen/src/models/health_status.rs +++ b/typesense_codegen/src/models/health_status.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -18,7 +19,7 @@ pub struct HealthStatus { } impl HealthStatus { - pub fn new(ok: bool) -> HealthStatus { - HealthStatus { ok } + pub fn new(ok: bool) -> Self { + Self { ok } } } diff --git a/typesense_codegen/src/models/import_documents_parameters.rs b/typesense_codegen/src/models/import_documents_parameters.rs index e17494a..b325451 100644 --- a/typesense_codegen/src/models/import_documents_parameters.rs +++ b/typesense_codegen/src/models/import_documents_parameters.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -32,8 +33,8 @@ pub struct ImportDocumentsParameters { } impl ImportDocumentsParameters { - pub fn new() -> ImportDocumentsParameters { - ImportDocumentsParameters { + pub fn new() -> Self { + Self { batch_size: None, return_id: None, remote_embedding_batch_size: None, diff --git a/typesense_codegen/src/models/index_action.rs b/typesense_codegen/src/models/index_action.rs index eb52174..0666c87 100644 --- a/typesense_codegen/src/models/index_action.rs +++ b/typesense_codegen/src/models/index_action.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; /// diff --git a/typesense_codegen/src/models/list_stemming_dictionaries_200_response.rs b/typesense_codegen/src/models/list_stemming_dictionaries_200_response.rs index 9985e0d..46df5f8 100644 --- a/typesense_codegen/src/models/list_stemming_dictionaries_200_response.rs +++ b/typesense_codegen/src/models/list_stemming_dictionaries_200_response.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -18,7 +19,7 @@ pub struct ListStemmingDictionaries200Response { } impl ListStemmingDictionaries200Response { - pub fn new() -> ListStemmingDictionaries200Response { - ListStemmingDictionaries200Response { dictionaries: None } + pub fn new() -> Self { + Self { dictionaries: None } } } diff --git a/typesense_codegen/src/models/multi_search_collection_parameters.rs b/typesense_codegen/src/models/multi_search_collection_parameters.rs index 76f99e0..01ad2d4 100644 --- a/typesense_codegen/src/models/multi_search_collection_parameters.rs +++ b/typesense_codegen/src/models/multi_search_collection_parameters.rs @@ -9,30 +9,32 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(bon::Builder)] +#[builder(on(Cow<'_, str>, into))] #[builder(on(String, into))] #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct MultiSearchCollectionParameters { +pub struct MultiSearchCollectionParameters<'a> { /// The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. #[serde(rename = "q", skip_serializing_if = "Option::is_none")] - pub q: Option, + pub q: Option>, /// A list of `string` fields that should be queried against. Multiple fields are separated with a comma. #[serde(rename = "query_by", skip_serializing_if = "Option::is_none")] - pub query_by: Option, + pub query_by: Option>, /// The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. #[serde(rename = "query_by_weights", skip_serializing_if = "Option::is_none")] - pub query_by_weights: Option, + pub query_by_weights: Option>, /// In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. #[serde(rename = "text_match_type", skip_serializing_if = "Option::is_none")] - pub text_match_type: Option, + pub text_match_type: Option>, /// Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. #[serde(rename = "prefix", skip_serializing_if = "Option::is_none")] - pub prefix: Option, + pub prefix: Option>, /// If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results #[serde(rename = "infix", skip_serializing_if = "Option::is_none")] - pub infix: Option, + pub infix: Option>, /// There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query \"K2100\" has 2 extra symbols in \"6PK2100\". By default, any number of prefixes/suffixes can be present for a match. #[serde(rename = "max_extra_prefix", skip_serializing_if = "Option::is_none")] pub max_extra_prefix: Option, @@ -41,22 +43,22 @@ pub struct MultiSearchCollectionParameters { pub max_extra_suffix: Option, /// Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] - pub filter_by: Option, + pub filter_by: Option>, /// A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` #[serde(rename = "sort_by", skip_serializing_if = "Option::is_none")] - pub sort_by: Option, + pub sort_by: Option>, /// A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. #[serde(rename = "facet_by", skip_serializing_if = "Option::is_none")] - pub facet_by: Option, + pub facet_by: Option>, /// Maximum number of facet values to be returned. #[serde(rename = "max_facet_values", skip_serializing_if = "Option::is_none")] pub max_facet_values: Option, /// Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix \"shoe\". #[serde(rename = "facet_query", skip_serializing_if = "Option::is_none")] - pub facet_query: Option, + pub facet_query: Option>, /// The number of typographical errors (1 or 2) that would be tolerated. Default: 2 #[serde(rename = "num_typos", skip_serializing_if = "Option::is_none")] - pub num_typos: Option, + pub num_typos: Option>, /// Results from this specific page number would be fetched. #[serde(rename = "page", skip_serializing_if = "Option::is_none")] pub page: Option, @@ -71,7 +73,7 @@ pub struct MultiSearchCollectionParameters { pub offset: Option, /// You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. #[serde(rename = "group_by", skip_serializing_if = "Option::is_none")] - pub group_by: Option, + pub group_by: Option>, /// Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 #[serde(rename = "group_limit", skip_serializing_if = "Option::is_none")] pub group_limit: Option, @@ -83,16 +85,16 @@ pub struct MultiSearchCollectionParameters { pub group_missing_values: Option, /// List of fields from the document to include in the search result #[serde(rename = "include_fields", skip_serializing_if = "Option::is_none")] - pub include_fields: Option, + pub include_fields: Option>, /// List of fields from the document to exclude in the search result #[serde(rename = "exclude_fields", skip_serializing_if = "Option::is_none")] - pub exclude_fields: Option, + pub exclude_fields: Option>, /// List of fields which should be highlighted fully without snippeting #[serde( rename = "highlight_full_fields", skip_serializing_if = "Option::is_none" )] - pub highlight_full_fields: Option, + pub highlight_full_fields: Option>, /// The number of tokens that should surround the highlighted text on each side. Default: 4 #[serde( rename = "highlight_affix_num_tokens", @@ -104,10 +106,10 @@ pub struct MultiSearchCollectionParameters { rename = "highlight_start_tag", skip_serializing_if = "Option::is_none" )] - pub highlight_start_tag: Option, + pub highlight_start_tag: Option>, /// The end tag used for the highlighted snippets. Default: `` #[serde(rename = "highlight_end_tag", skip_serializing_if = "Option::is_none")] - pub highlight_end_tag: Option, + pub highlight_end_tag: Option>, /// Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 #[serde(rename = "snippet_threshold", skip_serializing_if = "Option::is_none")] pub snippet_threshold: Option, @@ -151,16 +153,16 @@ pub struct MultiSearchCollectionParameters { pub synonym_num_typos: Option, /// A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. #[serde(rename = "pinned_hits", skip_serializing_if = "Option::is_none")] - pub pinned_hits: Option, + pub pinned_hits: Option>, /// A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. #[serde(rename = "hidden_hits", skip_serializing_if = "Option::is_none")] - pub hidden_hits: Option, + pub hidden_hits: Option>, /// Comma separated list of tags to trigger the curations rules that match the tags. #[serde(rename = "override_tags", skip_serializing_if = "Option::is_none")] - pub override_tags: Option, + pub override_tags: Option>, /// A list of custom fields that must be highlighted even if you don't query for them #[serde(rename = "highlight_fields", skip_serializing_if = "Option::is_none")] - pub highlight_fields: Option, + pub highlight_fields: Option>, /// You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. Set this parameter to true to do the same #[serde( rename = "pre_segmented_query", @@ -169,7 +171,7 @@ pub struct MultiSearchCollectionParameters { pub pre_segmented_query: Option, /// Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. #[serde(rename = "preset", skip_serializing_if = "Option::is_none")] - pub preset: Option, + pub preset: Option>, /// If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false #[serde(rename = "enable_overrides", skip_serializing_if = "Option::is_none")] pub enable_overrides: Option, @@ -217,7 +219,7 @@ pub struct MultiSearchCollectionParameters { pub min_len_2typo: Option, /// Vector query expression for fetching documents \"closest\" to a given query/document vector. #[serde(rename = "vector_query", skip_serializing_if = "Option::is_none")] - pub vector_query: Option, + pub vector_query: Option>, /// Timeout (in milliseconds) for fetching remote embeddings. #[serde( rename = "remote_embedding_timeout_ms", @@ -232,19 +234,19 @@ pub struct MultiSearchCollectionParameters { pub remote_embedding_num_tries: Option, /// Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). #[serde(rename = "facet_strategy", skip_serializing_if = "Option::is_none")] - pub facet_strategy: Option, + pub facet_strategy: Option>, /// Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. #[serde(rename = "stopwords", skip_serializing_if = "Option::is_none")] - pub stopwords: Option, + pub stopwords: Option>, /// Comma separated string of nested facet fields whose parent object should be returned in facet response. #[serde( rename = "facet_return_parent", skip_serializing_if = "Option::is_none" )] - pub facet_return_parent: Option, + pub facet_return_parent: Option>, /// The base64 encoded audio file in 16 khz 16-bit WAV format. #[serde(rename = "voice_query", skip_serializing_if = "Option::is_none")] - pub voice_query: Option, + pub voice_query: Option>, /// Enable conversational search. #[serde(rename = "conversation", skip_serializing_if = "Option::is_none")] pub conversation: Option, @@ -253,19 +255,19 @@ pub struct MultiSearchCollectionParameters { rename = "conversation_model_id", skip_serializing_if = "Option::is_none" )] - pub conversation_model_id: Option, + pub conversation_model_id: Option>, /// The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. #[serde(rename = "conversation_id", skip_serializing_if = "Option::is_none")] - pub conversation_id: Option, + pub conversation_id: Option>, /// The collection to search in. #[serde(rename = "collection", skip_serializing_if = "Option::is_none")] - pub collection: Option, + pub collection: Option>, /// A separate search API key for each search within a multi_search request #[serde( rename = "x-typesense-api-key", skip_serializing_if = "Option::is_none" )] - pub x_typesense_api_key: Option, + pub x_typesense_api_key: Option>, /// When true, computes both text match and vector distance scores for all matches in hybrid search. Documents found only through keyword search will get a vector distance score, and documents found only through vector search will get a text match score. #[serde( rename = "rerank_hybrid_matches", @@ -274,9 +276,9 @@ pub struct MultiSearchCollectionParameters { pub rerank_hybrid_matches: Option, } -impl MultiSearchCollectionParameters { - pub fn new() -> MultiSearchCollectionParameters { - MultiSearchCollectionParameters { +impl<'a> MultiSearchCollectionParameters<'a> { + pub fn new() -> Self { + Self { q: None, query_by: None, query_by_weights: None, diff --git a/typesense_codegen/src/models/multi_search_parameters.rs b/typesense_codegen/src/models/multi_search_parameters.rs index 21b2db0..fba11b2 100644 --- a/typesense_codegen/src/models/multi_search_parameters.rs +++ b/typesense_codegen/src/models/multi_search_parameters.rs @@ -9,31 +9,33 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; /// MultiSearchParameters : Parameters for the multi search API. #[derive(bon::Builder)] +#[builder(on(Cow<'_, str>, into))] #[builder(on(String, into))] #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct MultiSearchParameters { +pub struct MultiSearchParameters<'a> { /// The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. #[serde(rename = "q", skip_serializing_if = "Option::is_none")] - pub q: Option, + pub q: Option>, /// A list of `string` fields that should be queried against. Multiple fields are separated with a comma. #[serde(rename = "query_by", skip_serializing_if = "Option::is_none")] - pub query_by: Option, + pub query_by: Option>, /// The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. #[serde(rename = "query_by_weights", skip_serializing_if = "Option::is_none")] - pub query_by_weights: Option, + pub query_by_weights: Option>, /// In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. #[serde(rename = "text_match_type", skip_serializing_if = "Option::is_none")] - pub text_match_type: Option, + pub text_match_type: Option>, /// Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. #[serde(rename = "prefix", skip_serializing_if = "Option::is_none")] - pub prefix: Option, + pub prefix: Option>, /// If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results #[serde(rename = "infix", skip_serializing_if = "Option::is_none")] - pub infix: Option, + pub infix: Option>, /// There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query \"K2100\" has 2 extra symbols in \"6PK2100\". By default, any number of prefixes/suffixes can be present for a match. #[serde(rename = "max_extra_prefix", skip_serializing_if = "Option::is_none")] pub max_extra_prefix: Option, @@ -42,22 +44,22 @@ pub struct MultiSearchParameters { pub max_extra_suffix: Option, /// Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] - pub filter_by: Option, + pub filter_by: Option>, /// A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` #[serde(rename = "sort_by", skip_serializing_if = "Option::is_none")] - pub sort_by: Option, + pub sort_by: Option>, /// A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. #[serde(rename = "facet_by", skip_serializing_if = "Option::is_none")] - pub facet_by: Option, + pub facet_by: Option>, /// Maximum number of facet values to be returned. #[serde(rename = "max_facet_values", skip_serializing_if = "Option::is_none")] pub max_facet_values: Option, /// Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix \"shoe\". #[serde(rename = "facet_query", skip_serializing_if = "Option::is_none")] - pub facet_query: Option, + pub facet_query: Option>, /// The number of typographical errors (1 or 2) that would be tolerated. Default: 2 #[serde(rename = "num_typos", skip_serializing_if = "Option::is_none")] - pub num_typos: Option, + pub num_typos: Option>, /// Results from this specific page number would be fetched. #[serde(rename = "page", skip_serializing_if = "Option::is_none")] pub page: Option, @@ -72,7 +74,7 @@ pub struct MultiSearchParameters { pub offset: Option, /// You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. #[serde(rename = "group_by", skip_serializing_if = "Option::is_none")] - pub group_by: Option, + pub group_by: Option>, /// Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 #[serde(rename = "group_limit", skip_serializing_if = "Option::is_none")] pub group_limit: Option, @@ -84,16 +86,16 @@ pub struct MultiSearchParameters { pub group_missing_values: Option, /// List of fields from the document to include in the search result #[serde(rename = "include_fields", skip_serializing_if = "Option::is_none")] - pub include_fields: Option, + pub include_fields: Option>, /// List of fields from the document to exclude in the search result #[serde(rename = "exclude_fields", skip_serializing_if = "Option::is_none")] - pub exclude_fields: Option, + pub exclude_fields: Option>, /// List of fields which should be highlighted fully without snippeting #[serde( rename = "highlight_full_fields", skip_serializing_if = "Option::is_none" )] - pub highlight_full_fields: Option, + pub highlight_full_fields: Option>, /// The number of tokens that should surround the highlighted text on each side. Default: 4 #[serde( rename = "highlight_affix_num_tokens", @@ -105,10 +107,10 @@ pub struct MultiSearchParameters { rename = "highlight_start_tag", skip_serializing_if = "Option::is_none" )] - pub highlight_start_tag: Option, + pub highlight_start_tag: Option>, /// The end tag used for the highlighted snippets. Default: `` #[serde(rename = "highlight_end_tag", skip_serializing_if = "Option::is_none")] - pub highlight_end_tag: Option, + pub highlight_end_tag: Option>, /// Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 #[serde(rename = "snippet_threshold", skip_serializing_if = "Option::is_none")] pub snippet_threshold: Option, @@ -152,16 +154,16 @@ pub struct MultiSearchParameters { pub synonym_num_typos: Option, /// A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. #[serde(rename = "pinned_hits", skip_serializing_if = "Option::is_none")] - pub pinned_hits: Option, + pub pinned_hits: Option>, /// A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. #[serde(rename = "hidden_hits", skip_serializing_if = "Option::is_none")] - pub hidden_hits: Option, + pub hidden_hits: Option>, /// Comma separated list of tags to trigger the curations rules that match the tags. #[serde(rename = "override_tags", skip_serializing_if = "Option::is_none")] - pub override_tags: Option, + pub override_tags: Option>, /// A list of custom fields that must be highlighted even if you don't query for them #[serde(rename = "highlight_fields", skip_serializing_if = "Option::is_none")] - pub highlight_fields: Option, + pub highlight_fields: Option>, /// You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. Set this parameter to true to do the same #[serde( rename = "pre_segmented_query", @@ -170,7 +172,7 @@ pub struct MultiSearchParameters { pub pre_segmented_query: Option, /// Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. #[serde(rename = "preset", skip_serializing_if = "Option::is_none")] - pub preset: Option, + pub preset: Option>, /// If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false #[serde(rename = "enable_overrides", skip_serializing_if = "Option::is_none")] pub enable_overrides: Option, @@ -218,7 +220,7 @@ pub struct MultiSearchParameters { pub min_len_2typo: Option, /// Vector query expression for fetching documents \"closest\" to a given query/document vector. #[serde(rename = "vector_query", skip_serializing_if = "Option::is_none")] - pub vector_query: Option, + pub vector_query: Option>, /// Timeout (in milliseconds) for fetching remote embeddings. #[serde( rename = "remote_embedding_timeout_ms", @@ -233,19 +235,19 @@ pub struct MultiSearchParameters { pub remote_embedding_num_tries: Option, /// Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). #[serde(rename = "facet_strategy", skip_serializing_if = "Option::is_none")] - pub facet_strategy: Option, + pub facet_strategy: Option>, /// Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. #[serde(rename = "stopwords", skip_serializing_if = "Option::is_none")] - pub stopwords: Option, + pub stopwords: Option>, /// Comma separated string of nested facet fields whose parent object should be returned in facet response. #[serde( rename = "facet_return_parent", skip_serializing_if = "Option::is_none" )] - pub facet_return_parent: Option, + pub facet_return_parent: Option>, /// The base64 encoded audio file in 16 khz 16-bit WAV format. #[serde(rename = "voice_query", skip_serializing_if = "Option::is_none")] - pub voice_query: Option, + pub voice_query: Option>, /// Enable conversational search. #[serde(rename = "conversation", skip_serializing_if = "Option::is_none")] pub conversation: Option, @@ -254,16 +256,16 @@ pub struct MultiSearchParameters { rename = "conversation_model_id", skip_serializing_if = "Option::is_none" )] - pub conversation_model_id: Option, + pub conversation_model_id: Option>, /// The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. #[serde(rename = "conversation_id", skip_serializing_if = "Option::is_none")] - pub conversation_id: Option, + pub conversation_id: Option>, } -impl MultiSearchParameters { +impl<'a> MultiSearchParameters<'a> { /// Parameters for the multi search API. - pub fn new() -> MultiSearchParameters { - MultiSearchParameters { + pub fn new() -> Self { + Self { q: None, query_by: None, query_by_weights: None, diff --git a/typesense_codegen/src/models/multi_search_result.rs b/typesense_codegen/src/models/multi_search_result.rs index bef6780..bf38dc2 100644 --- a/typesense_codegen/src/models/multi_search_result.rs +++ b/typesense_codegen/src/models/multi_search_result.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -20,8 +21,8 @@ pub struct MultiSearchResult { } impl MultiSearchResult { - pub fn new(results: Vec>) -> MultiSearchResult { - MultiSearchResult { + pub fn new(results: Vec>) -> Self { + Self { results, conversation: None, } diff --git a/typesense_codegen/src/models/multi_search_result_item.rs b/typesense_codegen/src/models/multi_search_result_item.rs index daa3024..b737833 100644 --- a/typesense_codegen/src/models/multi_search_result_item.rs +++ b/typesense_codegen/src/models/multi_search_result_item.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -59,8 +60,8 @@ pub struct MultiSearchResultItem { } impl MultiSearchResultItem { - pub fn new() -> MultiSearchResultItem { - MultiSearchResultItem { + pub fn new() -> Self { + Self { facet_counts: None, found: None, found_docs: None, diff --git a/typesense_codegen/src/models/multi_search_searches_parameter.rs b/typesense_codegen/src/models/multi_search_searches_parameter.rs index 36cd062..fa55c22 100644 --- a/typesense_codegen/src/models/multi_search_searches_parameter.rs +++ b/typesense_codegen/src/models/multi_search_searches_parameter.rs @@ -9,22 +9,21 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct MultiSearchSearchesParameter { +pub struct MultiSearchSearchesParameter<'a> { /// When true, merges the search results from each search query into a single ordered set of hits. #[serde(rename = "union", skip_serializing_if = "Option::is_none")] pub union: Option, #[serde(rename = "searches")] - pub searches: Vec, + pub searches: Vec>, } -impl MultiSearchSearchesParameter { - pub fn new( - searches: Vec, - ) -> MultiSearchSearchesParameter { - MultiSearchSearchesParameter { +impl<'a> MultiSearchSearchesParameter<'a> { + pub fn new(searches: Vec>) -> Self { + Self { union: None, searches, } diff --git a/typesense_codegen/src/models/nl_search_model_base.rs b/typesense_codegen/src/models/nl_search_model_base.rs index 5bb066f..a9b4d93 100644 --- a/typesense_codegen/src/models/nl_search_model_base.rs +++ b/typesense_codegen/src/models/nl_search_model_base.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -70,8 +71,8 @@ pub struct NlSearchModelBase { } impl NlSearchModelBase { - pub fn new() -> NlSearchModelBase { - NlSearchModelBase { + pub fn new() -> Self { + Self { model_name: None, api_key: None, api_url: None, diff --git a/typesense_codegen/src/models/nl_search_model_create_schema.rs b/typesense_codegen/src/models/nl_search_model_create_schema.rs index 34cb2d4..4c8aff0 100644 --- a/typesense_codegen/src/models/nl_search_model_create_schema.rs +++ b/typesense_codegen/src/models/nl_search_model_create_schema.rs @@ -9,19 +9,20 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct NlSearchModelCreateSchema { +pub struct NlSearchModelCreateSchema<'a> { /// Name of the NL model to use #[serde(rename = "model_name", skip_serializing_if = "Option::is_none")] - pub model_name: Option, + pub model_name: Option>, /// API key for the NL model service #[serde(rename = "api_key", skip_serializing_if = "Option::is_none")] - pub api_key: Option, + pub api_key: Option>, /// Custom API URL for the NL model service #[serde(rename = "api_url", skip_serializing_if = "Option::is_none")] - pub api_url: Option, + pub api_url: Option>, /// Maximum number of bytes to process #[serde(rename = "max_bytes", skip_serializing_if = "Option::is_none")] pub max_bytes: Option, @@ -30,7 +31,7 @@ pub struct NlSearchModelCreateSchema { pub temperature: Option, /// System prompt for the NL model #[serde(rename = "system_prompt", skip_serializing_if = "Option::is_none")] - pub system_prompt: Option, + pub system_prompt: Option>, /// Top-p parameter for the NL model (Google-specific) #[serde(rename = "top_p", skip_serializing_if = "Option::is_none")] pub top_p: Option, @@ -42,39 +43,39 @@ pub struct NlSearchModelCreateSchema { pub stop_sequences: Option>, /// API version for the NL model service #[serde(rename = "api_version", skip_serializing_if = "Option::is_none")] - pub api_version: Option, + pub api_version: Option>, /// Project ID for GCP Vertex AI #[serde(rename = "project_id", skip_serializing_if = "Option::is_none")] - pub project_id: Option, + pub project_id: Option>, /// Access token for GCP Vertex AI #[serde(rename = "access_token", skip_serializing_if = "Option::is_none")] - pub access_token: Option, + pub access_token: Option>, /// Refresh token for GCP Vertex AI #[serde(rename = "refresh_token", skip_serializing_if = "Option::is_none")] - pub refresh_token: Option, + pub refresh_token: Option>, /// Client ID for GCP Vertex AI #[serde(rename = "client_id", skip_serializing_if = "Option::is_none")] - pub client_id: Option, + pub client_id: Option>, /// Client secret for GCP Vertex AI #[serde(rename = "client_secret", skip_serializing_if = "Option::is_none")] - pub client_secret: Option, + pub client_secret: Option>, /// Region for GCP Vertex AI #[serde(rename = "region", skip_serializing_if = "Option::is_none")] - pub region: Option, + pub region: Option>, /// Maximum output tokens for GCP Vertex AI #[serde(rename = "max_output_tokens", skip_serializing_if = "Option::is_none")] pub max_output_tokens: Option, /// Account ID for Cloudflare-specific models #[serde(rename = "account_id", skip_serializing_if = "Option::is_none")] - pub account_id: Option, + pub account_id: Option>, /// Optional ID for the NL search model #[serde(rename = "id", skip_serializing_if = "Option::is_none")] - pub id: Option, + pub id: Option>, } -impl NlSearchModelCreateSchema { - pub fn new() -> NlSearchModelCreateSchema { - NlSearchModelCreateSchema { +impl<'a> NlSearchModelCreateSchema<'a> { + pub fn new() -> Self { + Self { model_name: None, api_key: None, api_url: None, diff --git a/typesense_codegen/src/models/nl_search_model_delete_schema.rs b/typesense_codegen/src/models/nl_search_model_delete_schema.rs index f31df7d..f42e5b9 100644 --- a/typesense_codegen/src/models/nl_search_model_delete_schema.rs +++ b/typesense_codegen/src/models/nl_search_model_delete_schema.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -19,7 +20,7 @@ pub struct NlSearchModelDeleteSchema { } impl NlSearchModelDeleteSchema { - pub fn new(id: String) -> NlSearchModelDeleteSchema { - NlSearchModelDeleteSchema { id } + pub fn new(id: String) -> Self { + Self { id } } } diff --git a/typesense_codegen/src/models/nl_search_model_schema.rs b/typesense_codegen/src/models/nl_search_model_schema.rs index b2066e0..37e6bee 100644 --- a/typesense_codegen/src/models/nl_search_model_schema.rs +++ b/typesense_codegen/src/models/nl_search_model_schema.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -73,8 +74,8 @@ pub struct NlSearchModelSchema { } impl NlSearchModelSchema { - pub fn new(id: String) -> NlSearchModelSchema { - NlSearchModelSchema { + pub fn new(id: String) -> Self { + Self { model_name: None, api_key: None, api_url: None, diff --git a/typesense_codegen/src/models/preset_delete_schema.rs b/typesense_codegen/src/models/preset_delete_schema.rs index 5203796..3c42b6c 100644 --- a/typesense_codegen/src/models/preset_delete_schema.rs +++ b/typesense_codegen/src/models/preset_delete_schema.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -18,7 +19,7 @@ pub struct PresetDeleteSchema { } impl PresetDeleteSchema { - pub fn new(name: String) -> PresetDeleteSchema { - PresetDeleteSchema { name } + pub fn new(name: String) -> Self { + Self { name } } } diff --git a/typesense_codegen/src/models/preset_schema.rs b/typesense_codegen/src/models/preset_schema.rs index c12caca..6aa6ac6 100644 --- a/typesense_codegen/src/models/preset_schema.rs +++ b/typesense_codegen/src/models/preset_schema.rs @@ -9,19 +9,20 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct PresetSchema { #[serde(rename = "value")] - pub value: Box, + pub value: Box>, #[serde(rename = "name")] pub name: String, } impl PresetSchema { - pub fn new(value: models::PresetUpsertSchemaValue, name: String) -> PresetSchema { - PresetSchema { + pub fn new(value: models::PresetUpsertSchemaValue<'static>, name: String) -> Self { + Self { value: Box::new(value), name, } diff --git a/typesense_codegen/src/models/preset_upsert_schema.rs b/typesense_codegen/src/models/preset_upsert_schema.rs index 92eff0d..94bb67c 100644 --- a/typesense_codegen/src/models/preset_upsert_schema.rs +++ b/typesense_codegen/src/models/preset_upsert_schema.rs @@ -9,17 +9,18 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct PresetUpsertSchema { +pub struct PresetUpsertSchema<'a> { #[serde(rename = "value")] - pub value: Box, + pub value: Box>, } -impl PresetUpsertSchema { - pub fn new(value: models::PresetUpsertSchemaValue) -> PresetUpsertSchema { - PresetUpsertSchema { +impl<'a> PresetUpsertSchema<'a> { + pub fn new(value: models::PresetUpsertSchemaValue<'a>) -> Self { + Self { value: Box::new(value), } } diff --git a/typesense_codegen/src/models/preset_upsert_schema_value.rs b/typesense_codegen/src/models/preset_upsert_schema_value.rs index 43335aa..e2aa5cd 100644 --- a/typesense_codegen/src/models/preset_upsert_schema_value.rs +++ b/typesense_codegen/src/models/preset_upsert_schema_value.rs @@ -9,16 +9,17 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] #[serde(untagged)] -pub enum PresetUpsertSchemaValue { - SearchParameters(Box), - MultiSearchSearchesParameter(Box), +pub enum PresetUpsertSchemaValue<'a> { + SearchParameters(Box>), + MultiSearchSearchesParameter(Box>), } -impl Default for PresetUpsertSchemaValue { +impl Default for PresetUpsertSchemaValue<'_> { fn default() -> Self { Self::SearchParameters(Default::default()) } diff --git a/typesense_codegen/src/models/presets_retrieve_schema.rs b/typesense_codegen/src/models/presets_retrieve_schema.rs index e64ecf1..c09f177 100644 --- a/typesense_codegen/src/models/presets_retrieve_schema.rs +++ b/typesense_codegen/src/models/presets_retrieve_schema.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -18,7 +19,7 @@ pub struct PresetsRetrieveSchema { } impl PresetsRetrieveSchema { - pub fn new(presets: Vec) -> PresetsRetrieveSchema { - PresetsRetrieveSchema { presets } + pub fn new(presets: Vec) -> Self { + Self { presets } } } diff --git a/typesense_codegen/src/models/schema_change_status.rs b/typesense_codegen/src/models/schema_change_status.rs index 22bb410..316e3c0 100644 --- a/typesense_codegen/src/models/schema_change_status.rs +++ b/typesense_codegen/src/models/schema_change_status.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -25,8 +26,8 @@ pub struct SchemaChangeStatus { } impl SchemaChangeStatus { - pub fn new() -> SchemaChangeStatus { - SchemaChangeStatus { + pub fn new() -> Self { + Self { collection: None, validated_docs: None, altered_docs: None, diff --git a/typesense_codegen/src/models/search_grouped_hit.rs b/typesense_codegen/src/models/search_grouped_hit.rs index 434b8fc..841b83b 100644 --- a/typesense_codegen/src/models/search_grouped_hit.rs +++ b/typesense_codegen/src/models/search_grouped_hit.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -23,11 +24,8 @@ pub struct SearchGroupedHit { } impl SearchGroupedHit { - pub fn new( - group_key: Vec, - hits: Vec>, - ) -> SearchGroupedHit { - SearchGroupedHit { + pub fn new(group_key: Vec, hits: Vec>) -> Self { + Self { found: None, group_key, hits, diff --git a/typesense_codegen/src/models/search_highlight.rs b/typesense_codegen/src/models/search_highlight.rs index c45afcc..4f2cef9 100644 --- a/typesense_codegen/src/models/search_highlight.rs +++ b/typesense_codegen/src/models/search_highlight.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -35,8 +36,8 @@ pub struct SearchHighlight { } impl SearchHighlight { - pub fn new() -> SearchHighlight { - SearchHighlight { + pub fn new() -> Self { + Self { field: None, snippet: None, snippets: None, diff --git a/typesense_codegen/src/models/search_override.rs b/typesense_codegen/src/models/search_override.rs deleted file mode 100644 index b1f8353..0000000 --- a/typesense_codegen/src/models/search_override.rs +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Typesense API - * - * An open source search engine for building delightful search experiences. - * - * The version of the OpenAPI document: 30.0 - * - * Generated by: https://openapi-generator.tech - */ - -use crate::models; -use serde::{Deserialize, Serialize}; - -#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct SearchOverride { - #[serde(rename = "rule")] - pub rule: Box, - /// List of document `id`s that should be included in the search results with their corresponding `position`s. - #[serde(rename = "includes", skip_serializing_if = "Option::is_none")] - pub includes: Option>, - /// List of document `id`s that should be excluded from the search results. - #[serde(rename = "excludes", skip_serializing_if = "Option::is_none")] - pub excludes: Option>, - /// A filter by clause that is applied to any search query that matches the override rule. - #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] - pub filter_by: Option, - /// Indicates whether search query tokens that exist in the override's rule should be removed from the search query. - #[serde( - rename = "remove_matched_tokens", - skip_serializing_if = "Option::is_none" - )] - pub remove_matched_tokens: Option, - /// Return a custom JSON object in the Search API response, when this rule is triggered. This can can be used to display a pre-defined message (eg: a promotion banner) on the front-end when a particular rule is triggered. - #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] - pub metadata: Option, - /// A sort by clause that is applied to any search query that matches the override rule. - #[serde(rename = "sort_by", skip_serializing_if = "Option::is_none")] - pub sort_by: Option, - /// Replaces the current search query with this value, when the search query matches the override rule. - #[serde(rename = "replace_query", skip_serializing_if = "Option::is_none")] - pub replace_query: Option, - /// When set to true, the filter conditions of the query is applied to the curated records as well. Default: false. - #[serde( - rename = "filter_curated_hits", - skip_serializing_if = "Option::is_none" - )] - pub filter_curated_hits: Option, - /// A Unix timestamp that indicates the date/time from which the override will be active. You can use this to create override rules that start applying from a future point in time. - #[serde(rename = "effective_from_ts", skip_serializing_if = "Option::is_none")] - pub effective_from_ts: Option, - /// A Unix timestamp that indicates the date/time until which the override will be active. You can use this to create override rules that stop applying after a period of time. - #[serde(rename = "effective_to_ts", skip_serializing_if = "Option::is_none")] - pub effective_to_ts: Option, - /// When set to true, override processing will stop at the first matching rule. When set to false override processing will continue and multiple override actions will be triggered in sequence. Overrides are processed in the lexical sort order of their id field. Default: true. - #[serde(rename = "stop_processing", skip_serializing_if = "Option::is_none")] - pub stop_processing: Option, - #[serde(rename = "id")] - pub id: String, -} - -impl SearchOverride { - pub fn new(rule: models::SearchOverrideRule, id: String) -> SearchOverride { - SearchOverride { - rule: Box::new(rule), - includes: None, - excludes: None, - filter_by: None, - remove_matched_tokens: None, - metadata: None, - sort_by: None, - replace_query: None, - filter_curated_hits: None, - effective_from_ts: None, - effective_to_ts: None, - stop_processing: None, - id, - } - } -} diff --git a/typesense_codegen/src/models/search_override_delete_response.rs b/typesense_codegen/src/models/search_override_delete_response.rs deleted file mode 100644 index 3949320..0000000 --- a/typesense_codegen/src/models/search_override_delete_response.rs +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Typesense API - * - * An open source search engine for building delightful search experiences. - * - * The version of the OpenAPI document: 30.0 - * - * Generated by: https://openapi-generator.tech - */ - -use crate::models; -use serde::{Deserialize, Serialize}; - -#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct SearchOverrideDeleteResponse { - /// The id of the override that was deleted - #[serde(rename = "id")] - pub id: String, -} - -impl SearchOverrideDeleteResponse { - pub fn new(id: String) -> SearchOverrideDeleteResponse { - SearchOverrideDeleteResponse { id } - } -} diff --git a/typesense_codegen/src/models/search_override_exclude.rs b/typesense_codegen/src/models/search_override_exclude.rs deleted file mode 100644 index 58b01b9..0000000 --- a/typesense_codegen/src/models/search_override_exclude.rs +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Typesense API - * - * An open source search engine for building delightful search experiences. - * - * The version of the OpenAPI document: 30.0 - * - * Generated by: https://openapi-generator.tech - */ - -use crate::models; -use serde::{Deserialize, Serialize}; - -#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct SearchOverrideExclude { - /// document id that should be excluded from the search results. - #[serde(rename = "id")] - pub id: String, -} - -impl SearchOverrideExclude { - pub fn new(id: String) -> SearchOverrideExclude { - SearchOverrideExclude { id } - } -} diff --git a/typesense_codegen/src/models/search_override_include.rs b/typesense_codegen/src/models/search_override_include.rs deleted file mode 100644 index 7756358..0000000 --- a/typesense_codegen/src/models/search_override_include.rs +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Typesense API - * - * An open source search engine for building delightful search experiences. - * - * The version of the OpenAPI document: 30.0 - * - * Generated by: https://openapi-generator.tech - */ - -use crate::models; -use serde::{Deserialize, Serialize}; - -#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct SearchOverrideInclude { - /// document id that should be included - #[serde(rename = "id")] - pub id: String, - /// position number where document should be included in the search results - #[serde(rename = "position")] - pub position: i32, -} - -impl SearchOverrideInclude { - pub fn new(id: String, position: i32) -> SearchOverrideInclude { - SearchOverrideInclude { id, position } - } -} diff --git a/typesense_codegen/src/models/search_override_rule.rs b/typesense_codegen/src/models/search_override_rule.rs deleted file mode 100644 index de9ebf5..0000000 --- a/typesense_codegen/src/models/search_override_rule.rs +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Typesense API - * - * An open source search engine for building delightful search experiences. - * - * The version of the OpenAPI document: 30.0 - * - * Generated by: https://openapi-generator.tech - */ - -use crate::models; -use serde::{Deserialize, Serialize}; - -#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct SearchOverrideRule { - /// List of tag values to associate with this override rule. - #[serde(rename = "tags", skip_serializing_if = "Option::is_none")] - pub tags: Option>, - /// Indicates what search queries should be overridden - #[serde(rename = "query", skip_serializing_if = "Option::is_none")] - pub query: Option, - /// Indicates whether the match on the query term should be `exact` or `contains`. If we want to match all queries that contained the word `apple`, we will use the `contains` match instead. - #[serde(rename = "match", skip_serializing_if = "Option::is_none")] - pub r#match: Option, - /// Indicates that the override should apply when the filter_by parameter in a search query exactly matches the string specified here (including backticks, spaces, brackets, etc). - #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] - pub filter_by: Option, -} - -impl SearchOverrideRule { - pub fn new() -> SearchOverrideRule { - SearchOverrideRule { - tags: None, - query: None, - r#match: None, - filter_by: None, - } - } -} -/// Indicates whether the match on the query term should be `exact` or `contains`. If we want to match all queries that contained the word `apple`, we will use the `contains` match instead. -#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] -pub enum Match { - #[serde(rename = "exact")] - Exact, - #[serde(rename = "contains")] - Contains, -} - -impl Default for Match { - fn default() -> Match { - Self::Exact - } -} diff --git a/typesense_codegen/src/models/search_override_schema.rs b/typesense_codegen/src/models/search_override_schema.rs deleted file mode 100644 index 1b0eede..0000000 --- a/typesense_codegen/src/models/search_override_schema.rs +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Typesense API - * - * An open source search engine for building delightful search experiences. - * - * The version of the OpenAPI document: 30.0 - * - * Generated by: https://openapi-generator.tech - */ - -use crate::models; -use serde::{Deserialize, Serialize}; - -#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct SearchOverrideSchema { - #[serde(rename = "rule")] - pub rule: Box, - /// List of document `id`s that should be included in the search results with their corresponding `position`s. - #[serde(rename = "includes", skip_serializing_if = "Option::is_none")] - pub includes: Option>, - /// List of document `id`s that should be excluded from the search results. - #[serde(rename = "excludes", skip_serializing_if = "Option::is_none")] - pub excludes: Option>, - /// A filter by clause that is applied to any search query that matches the override rule. - #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] - pub filter_by: Option, - /// Indicates whether search query tokens that exist in the override's rule should be removed from the search query. - #[serde( - rename = "remove_matched_tokens", - skip_serializing_if = "Option::is_none" - )] - pub remove_matched_tokens: Option, - /// Return a custom JSON object in the Search API response, when this rule is triggered. This can can be used to display a pre-defined message (eg: a promotion banner) on the front-end when a particular rule is triggered. - #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] - pub metadata: Option, - /// A sort by clause that is applied to any search query that matches the override rule. - #[serde(rename = "sort_by", skip_serializing_if = "Option::is_none")] - pub sort_by: Option, - /// Replaces the current search query with this value, when the search query matches the override rule. - #[serde(rename = "replace_query", skip_serializing_if = "Option::is_none")] - pub replace_query: Option, - /// When set to true, the filter conditions of the query is applied to the curated records as well. Default: false. - #[serde( - rename = "filter_curated_hits", - skip_serializing_if = "Option::is_none" - )] - pub filter_curated_hits: Option, - /// A Unix timestamp that indicates the date/time from which the override will be active. You can use this to create override rules that start applying from a future point in time. - #[serde(rename = "effective_from_ts", skip_serializing_if = "Option::is_none")] - pub effective_from_ts: Option, - /// A Unix timestamp that indicates the date/time until which the override will be active. You can use this to create override rules that stop applying after a period of time. - #[serde(rename = "effective_to_ts", skip_serializing_if = "Option::is_none")] - pub effective_to_ts: Option, - /// When set to true, override processing will stop at the first matching rule. When set to false override processing will continue and multiple override actions will be triggered in sequence. Overrides are processed in the lexical sort order of their id field. Default: true. - #[serde(rename = "stop_processing", skip_serializing_if = "Option::is_none")] - pub stop_processing: Option, -} - -impl SearchOverrideSchema { - pub fn new(rule: models::SearchOverrideRule) -> SearchOverrideSchema { - SearchOverrideSchema { - rule: Box::new(rule), - includes: None, - excludes: None, - filter_by: None, - remove_matched_tokens: None, - metadata: None, - sort_by: None, - replace_query: None, - filter_curated_hits: None, - effective_from_ts: None, - effective_to_ts: None, - stop_processing: None, - } - } -} diff --git a/typesense_codegen/src/models/search_overrides_response.rs b/typesense_codegen/src/models/search_overrides_response.rs deleted file mode 100644 index fd8aba3..0000000 --- a/typesense_codegen/src/models/search_overrides_response.rs +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Typesense API - * - * An open source search engine for building delightful search experiences. - * - * The version of the OpenAPI document: 30.0 - * - * Generated by: https://openapi-generator.tech - */ - -use crate::models; -use serde::{Deserialize, Serialize}; - -#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct SearchOverridesResponse { - #[serde(rename = "overrides")] - pub overrides: Vec, -} - -impl SearchOverridesResponse { - pub fn new(overrides: Vec) -> SearchOverridesResponse { - SearchOverridesResponse { overrides } - } -} diff --git a/typesense_codegen/src/models/search_parameters.rs b/typesense_codegen/src/models/search_parameters.rs index 860beff..59e82c3 100644 --- a/typesense_codegen/src/models/search_parameters.rs +++ b/typesense_codegen/src/models/search_parameters.rs @@ -9,36 +9,38 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(bon::Builder)] +#[builder(on(Cow<'_, str>, into))] #[builder(on(String, into))] #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct SearchParameters { +pub struct SearchParameters<'a> { /// The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. #[serde(rename = "q", skip_serializing_if = "Option::is_none")] - pub q: Option, + pub q: Option>, /// A list of `string` fields that should be queried against. Multiple fields are separated with a comma. #[serde(rename = "query_by", skip_serializing_if = "Option::is_none")] - pub query_by: Option, + pub query_by: Option>, /// Whether to use natural language processing to parse the query. #[serde(rename = "nl_query", skip_serializing_if = "Option::is_none")] pub nl_query: Option, /// The ID of the natural language model to use. #[serde(rename = "nl_model_id", skip_serializing_if = "Option::is_none")] - pub nl_model_id: Option, + pub nl_model_id: Option>, /// The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. #[serde(rename = "query_by_weights", skip_serializing_if = "Option::is_none")] - pub query_by_weights: Option, + pub query_by_weights: Option>, /// In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. #[serde(rename = "text_match_type", skip_serializing_if = "Option::is_none")] - pub text_match_type: Option, + pub text_match_type: Option>, /// Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. #[serde(rename = "prefix", skip_serializing_if = "Option::is_none")] - pub prefix: Option, + pub prefix: Option>, /// If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results #[serde(rename = "infix", skip_serializing_if = "Option::is_none")] - pub infix: Option, + pub infix: Option>, /// There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query \"K2100\" has 2 extra symbols in \"6PK2100\". By default, any number of prefixes/suffixes can be present for a match. #[serde(rename = "max_extra_prefix", skip_serializing_if = "Option::is_none")] pub max_extra_prefix: Option, @@ -47,7 +49,7 @@ pub struct SearchParameters { pub max_extra_suffix: Option, /// Filter conditions for refining your open api validator search results. Separate multiple conditions with &&. #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] - pub filter_by: Option, + pub filter_by: Option>, /// Controls the number of similar words that Typesense considers during fuzzy search on filter_by values. Useful for controlling prefix matches like company_name:Acm*. #[serde( rename = "max_filter_by_candidates", @@ -56,19 +58,19 @@ pub struct SearchParameters { pub max_filter_by_candidates: Option, /// A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` #[serde(rename = "sort_by", skip_serializing_if = "Option::is_none")] - pub sort_by: Option, + pub sort_by: Option>, /// A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. #[serde(rename = "facet_by", skip_serializing_if = "Option::is_none")] - pub facet_by: Option, + pub facet_by: Option>, /// Maximum number of facet values to be returned. #[serde(rename = "max_facet_values", skip_serializing_if = "Option::is_none")] pub max_facet_values: Option, /// Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix \"shoe\". #[serde(rename = "facet_query", skip_serializing_if = "Option::is_none")] - pub facet_query: Option, + pub facet_query: Option>, /// The number of typographical errors (1 or 2) that would be tolerated. Default: 2 #[serde(rename = "num_typos", skip_serializing_if = "Option::is_none")] - pub num_typos: Option, + pub num_typos: Option>, /// Results from this specific page number would be fetched. #[serde(rename = "page", skip_serializing_if = "Option::is_none")] pub page: Option, @@ -83,7 +85,7 @@ pub struct SearchParameters { pub offset: Option, /// You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. #[serde(rename = "group_by", skip_serializing_if = "Option::is_none")] - pub group_by: Option, + pub group_by: Option>, /// Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 #[serde(rename = "group_limit", skip_serializing_if = "Option::is_none")] pub group_limit: Option, @@ -95,16 +97,16 @@ pub struct SearchParameters { pub group_missing_values: Option, /// List of fields from the document to include in the search result #[serde(rename = "include_fields", skip_serializing_if = "Option::is_none")] - pub include_fields: Option, + pub include_fields: Option>, /// List of fields from the document to exclude in the search result #[serde(rename = "exclude_fields", skip_serializing_if = "Option::is_none")] - pub exclude_fields: Option, + pub exclude_fields: Option>, /// List of fields which should be highlighted fully without snippeting #[serde( rename = "highlight_full_fields", skip_serializing_if = "Option::is_none" )] - pub highlight_full_fields: Option, + pub highlight_full_fields: Option>, /// The number of tokens that should surround the highlighted text on each side. Default: 4 #[serde( rename = "highlight_affix_num_tokens", @@ -116,10 +118,10 @@ pub struct SearchParameters { rename = "highlight_start_tag", skip_serializing_if = "Option::is_none" )] - pub highlight_start_tag: Option, + pub highlight_start_tag: Option>, /// The end tag used for the highlighted snippets. Default: `` #[serde(rename = "highlight_end_tag", skip_serializing_if = "Option::is_none")] - pub highlight_end_tag: Option, + pub highlight_end_tag: Option>, /// Flag for enabling/disabling the deprecated, old highlight structure in the response. Default: true #[serde( rename = "enable_highlight_v1", @@ -134,7 +136,7 @@ pub struct SearchParameters { pub snippet_threshold: Option, /// List of synonym set names to associate with this search query #[serde(rename = "synonym_sets", skip_serializing_if = "Option::is_none")] - pub synonym_sets: Option, + pub synonym_sets: Option>, /// If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 #[serde( rename = "drop_tokens_threshold", @@ -172,19 +174,19 @@ pub struct SearchParameters { pub synonym_num_typos: Option, /// A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. #[serde(rename = "pinned_hits", skip_serializing_if = "Option::is_none")] - pub pinned_hits: Option, + pub pinned_hits: Option>, /// A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. #[serde(rename = "hidden_hits", skip_serializing_if = "Option::is_none")] - pub hidden_hits: Option, + pub hidden_hits: Option>, /// Comma separated list of tags to trigger the curations rules that match the tags. #[serde(rename = "override_tags", skip_serializing_if = "Option::is_none")] - pub override_tags: Option, + pub override_tags: Option>, /// A list of custom fields that must be highlighted even if you don't query for them #[serde(rename = "highlight_fields", skip_serializing_if = "Option::is_none")] - pub highlight_fields: Option, + pub highlight_fields: Option>, /// Treat space as typo: search for q=basket ball if q=basketball is not found or vice-versa. Splitting/joining of tokens will only be attempted if the original query produces no results. To always trigger this behavior, set value to `always``. To disable, set value to `off`. Default is `fallback`. #[serde(rename = "split_join_tokens", skip_serializing_if = "Option::is_none")] - pub split_join_tokens: Option, + pub split_join_tokens: Option>, /// You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. Set this parameter to true to do the same #[serde( rename = "pre_segmented_query", @@ -193,7 +195,7 @@ pub struct SearchParameters { pub pre_segmented_query: Option, /// Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. #[serde(rename = "preset", skip_serializing_if = "Option::is_none")] - pub preset: Option, + pub preset: Option>, /// If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false #[serde(rename = "enable_overrides", skip_serializing_if = "Option::is_none")] pub enable_overrides: Option, @@ -244,7 +246,7 @@ pub struct SearchParameters { pub min_len_2typo: Option, /// Vector query expression for fetching documents \"closest\" to a given query/document vector. #[serde(rename = "vector_query", skip_serializing_if = "Option::is_none")] - pub vector_query: Option, + pub vector_query: Option>, /// Timeout (in milliseconds) for fetching remote embeddings. #[serde( rename = "remote_embedding_timeout_ms", @@ -259,19 +261,19 @@ pub struct SearchParameters { pub remote_embedding_num_tries: Option, /// Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). #[serde(rename = "facet_strategy", skip_serializing_if = "Option::is_none")] - pub facet_strategy: Option, + pub facet_strategy: Option>, /// Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. #[serde(rename = "stopwords", skip_serializing_if = "Option::is_none")] - pub stopwords: Option, + pub stopwords: Option>, /// Comma separated string of nested facet fields whose parent object should be returned in facet response. #[serde( rename = "facet_return_parent", skip_serializing_if = "Option::is_none" )] - pub facet_return_parent: Option, + pub facet_return_parent: Option>, /// The base64 encoded audio file in 16 khz 16-bit WAV format. #[serde(rename = "voice_query", skip_serializing_if = "Option::is_none")] - pub voice_query: Option, + pub voice_query: Option>, /// Enable conversational search. #[serde(rename = "conversation", skip_serializing_if = "Option::is_none")] pub conversation: Option, @@ -280,15 +282,15 @@ pub struct SearchParameters { rename = "conversation_model_id", skip_serializing_if = "Option::is_none" )] - pub conversation_model_id: Option, + pub conversation_model_id: Option>, /// The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. #[serde(rename = "conversation_id", skip_serializing_if = "Option::is_none")] - pub conversation_id: Option, + pub conversation_id: Option>, } -impl SearchParameters { - pub fn new() -> SearchParameters { - SearchParameters { +impl<'a> SearchParameters<'a> { + pub fn new() -> Self { + Self { q: None, query_by: None, nl_query: None, diff --git a/typesense_codegen/src/models/search_request_params.rs b/typesense_codegen/src/models/search_request_params.rs index 7e67048..4fa0b55 100644 --- a/typesense_codegen/src/models/search_request_params.rs +++ b/typesense_codegen/src/models/search_request_params.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -24,8 +25,8 @@ pub struct SearchRequestParams { } impl SearchRequestParams { - pub fn new(collection_name: String, q: String, per_page: i32) -> SearchRequestParams { - SearchRequestParams { + pub fn new(collection_name: String, q: String, per_page: i32) -> Self { + Self { collection_name, q, per_page, diff --git a/typesense_codegen/src/models/search_request_params_voice_query.rs b/typesense_codegen/src/models/search_request_params_voice_query.rs index 97e78ef..fdbd32c 100644 --- a/typesense_codegen/src/models/search_request_params_voice_query.rs +++ b/typesense_codegen/src/models/search_request_params_voice_query.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -18,8 +19,8 @@ pub struct SearchRequestParamsVoiceQuery { } impl SearchRequestParamsVoiceQuery { - pub fn new() -> SearchRequestParamsVoiceQuery { - SearchRequestParamsVoiceQuery { + pub fn new() -> Self { + Self { transcribed_query: None, } } diff --git a/typesense_codegen/src/models/search_result.rs b/typesense_codegen/src/models/search_result.rs index 1805210..c28ca02 100644 --- a/typesense_codegen/src/models/search_result.rs +++ b/typesense_codegen/src/models/search_result.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -53,8 +54,8 @@ pub struct SearchResult { } impl SearchResult { - pub fn new() -> SearchResult { - SearchResult { + pub fn new() -> Self { + Self { facet_counts: None, found: None, found_docs: None, diff --git a/typesense_codegen/src/models/search_result_conversation.rs b/typesense_codegen/src/models/search_result_conversation.rs index 380be44..0de0d6d 100644 --- a/typesense_codegen/src/models/search_result_conversation.rs +++ b/typesense_codegen/src/models/search_result_conversation.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -29,8 +30,8 @@ impl SearchResultConversation { conversation_history: Vec, conversation_id: String, query: String, - ) -> SearchResultConversation { - SearchResultConversation { + ) -> Self { + Self { answer, conversation_history, conversation_id, diff --git a/typesense_codegen/src/models/search_result_hit.rs b/typesense_codegen/src/models/search_result_hit.rs index abe92e4..1cffcd4 100644 --- a/typesense_codegen/src/models/search_result_hit.rs +++ b/typesense_codegen/src/models/search_result_hit.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -43,8 +44,8 @@ pub struct SearchResultHit { } impl SearchResultHit { - pub fn new() -> SearchResultHit { - SearchResultHit { + pub fn new() -> Self { + Self { highlights: None, highlight: None, document: None, diff --git a/typesense_codegen/src/models/search_result_hit_hybrid_search_info.rs b/typesense_codegen/src/models/search_result_hit_hybrid_search_info.rs index ed0bea0..15752b7 100644 --- a/typesense_codegen/src/models/search_result_hit_hybrid_search_info.rs +++ b/typesense_codegen/src/models/search_result_hit_hybrid_search_info.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; /// SearchResultHitHybridSearchInfo : Information about hybrid search scoring @@ -21,8 +22,8 @@ pub struct SearchResultHitHybridSearchInfo { impl SearchResultHitHybridSearchInfo { /// Information about hybrid search scoring - pub fn new() -> SearchResultHitHybridSearchInfo { - SearchResultHitHybridSearchInfo { + pub fn new() -> Self { + Self { rank_fusion_score: None, } } diff --git a/typesense_codegen/src/models/search_result_hit_text_match_info.rs b/typesense_codegen/src/models/search_result_hit_text_match_info.rs index e77c40f..3a72de2 100644 --- a/typesense_codegen/src/models/search_result_hit_text_match_info.rs +++ b/typesense_codegen/src/models/search_result_hit_text_match_info.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -30,8 +31,8 @@ pub struct SearchResultHitTextMatchInfo { } impl SearchResultHitTextMatchInfo { - pub fn new() -> SearchResultHitTextMatchInfo { - SearchResultHitTextMatchInfo { + pub fn new() -> Self { + Self { best_field_score: None, best_field_weight: None, fields_matched: None, diff --git a/typesense_codegen/src/models/search_synonym.rs b/typesense_codegen/src/models/search_synonym.rs index 3d81eb7..1e91a15 100644 --- a/typesense_codegen/src/models/search_synonym.rs +++ b/typesense_codegen/src/models/search_synonym.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -30,8 +31,8 @@ pub struct SearchSynonym { } impl SearchSynonym { - pub fn new(synonyms: Vec, id: String) -> SearchSynonym { - SearchSynonym { + pub fn new(synonyms: Vec, id: String) -> Self { + Self { root: None, synonyms, locale: None, diff --git a/typesense_codegen/src/models/search_synonym_delete_response.rs b/typesense_codegen/src/models/search_synonym_delete_response.rs index c69e47b..7a9db0a 100644 --- a/typesense_codegen/src/models/search_synonym_delete_response.rs +++ b/typesense_codegen/src/models/search_synonym_delete_response.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -19,7 +20,7 @@ pub struct SearchSynonymDeleteResponse { } impl SearchSynonymDeleteResponse { - pub fn new(id: String) -> SearchSynonymDeleteResponse { - SearchSynonymDeleteResponse { id } + pub fn new(id: String) -> Self { + Self { id } } } diff --git a/typesense_codegen/src/models/search_synonym_schema.rs b/typesense_codegen/src/models/search_synonym_schema.rs index 192829f..95c8580 100644 --- a/typesense_codegen/src/models/search_synonym_schema.rs +++ b/typesense_codegen/src/models/search_synonym_schema.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -28,8 +29,8 @@ pub struct SearchSynonymSchema { } impl SearchSynonymSchema { - pub fn new(synonyms: Vec) -> SearchSynonymSchema { - SearchSynonymSchema { + pub fn new(synonyms: Vec) -> Self { + Self { root: None, synonyms, locale: None, diff --git a/typesense_codegen/src/models/search_synonyms_response.rs b/typesense_codegen/src/models/search_synonyms_response.rs index 40434ca..cc877a3 100644 --- a/typesense_codegen/src/models/search_synonyms_response.rs +++ b/typesense_codegen/src/models/search_synonyms_response.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -18,7 +19,7 @@ pub struct SearchSynonymsResponse { } impl SearchSynonymsResponse { - pub fn new(synonyms: Vec) -> SearchSynonymsResponse { - SearchSynonymsResponse { synonyms } + pub fn new(synonyms: Vec) -> Self { + Self { synonyms } } } diff --git a/typesense_codegen/src/models/stemming_dictionary.rs b/typesense_codegen/src/models/stemming_dictionary.rs index e90942c..abde86c 100644 --- a/typesense_codegen/src/models/stemming_dictionary.rs +++ b/typesense_codegen/src/models/stemming_dictionary.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -22,7 +23,7 @@ pub struct StemmingDictionary { } impl StemmingDictionary { - pub fn new(id: String, words: Vec) -> StemmingDictionary { - StemmingDictionary { id, words } + pub fn new(id: String, words: Vec) -> Self { + Self { id, words } } } diff --git a/typesense_codegen/src/models/stemming_dictionary_words_inner.rs b/typesense_codegen/src/models/stemming_dictionary_words_inner.rs index 30cbd40..b0fb605 100644 --- a/typesense_codegen/src/models/stemming_dictionary_words_inner.rs +++ b/typesense_codegen/src/models/stemming_dictionary_words_inner.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -22,7 +23,7 @@ pub struct StemmingDictionaryWordsInner { } impl StemmingDictionaryWordsInner { - pub fn new(word: String, root: String) -> StemmingDictionaryWordsInner { - StemmingDictionaryWordsInner { word, root } + pub fn new(word: String, root: String) -> Self { + Self { word, root } } } diff --git a/typesense_codegen/src/models/stopwords_set_retrieve_schema.rs b/typesense_codegen/src/models/stopwords_set_retrieve_schema.rs index b1b6913..cb1defc 100644 --- a/typesense_codegen/src/models/stopwords_set_retrieve_schema.rs +++ b/typesense_codegen/src/models/stopwords_set_retrieve_schema.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -18,8 +19,8 @@ pub struct StopwordsSetRetrieveSchema { } impl StopwordsSetRetrieveSchema { - pub fn new(stopwords: models::StopwordsSetSchema) -> StopwordsSetRetrieveSchema { - StopwordsSetRetrieveSchema { + pub fn new(stopwords: models::StopwordsSetSchema) -> Self { + Self { stopwords: Box::new(stopwords), } } diff --git a/typesense_codegen/src/models/stopwords_set_schema.rs b/typesense_codegen/src/models/stopwords_set_schema.rs index 8f13dee..dc7b25d 100644 --- a/typesense_codegen/src/models/stopwords_set_schema.rs +++ b/typesense_codegen/src/models/stopwords_set_schema.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -22,8 +23,8 @@ pub struct StopwordsSetSchema { } impl StopwordsSetSchema { - pub fn new(id: String, stopwords: Vec) -> StopwordsSetSchema { - StopwordsSetSchema { + pub fn new(id: String, stopwords: Vec) -> Self { + Self { id, stopwords, locale: None, diff --git a/typesense_codegen/src/models/stopwords_set_upsert_schema.rs b/typesense_codegen/src/models/stopwords_set_upsert_schema.rs index 1facab5..9e58790 100644 --- a/typesense_codegen/src/models/stopwords_set_upsert_schema.rs +++ b/typesense_codegen/src/models/stopwords_set_upsert_schema.rs @@ -9,19 +9,20 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct StopwordsSetUpsertSchema { +pub struct StopwordsSetUpsertSchema<'a> { #[serde(rename = "stopwords")] pub stopwords: Vec, #[serde(rename = "locale", skip_serializing_if = "Option::is_none")] - pub locale: Option, + pub locale: Option>, } -impl StopwordsSetUpsertSchema { - pub fn new(stopwords: Vec) -> StopwordsSetUpsertSchema { - StopwordsSetUpsertSchema { +impl<'a> StopwordsSetUpsertSchema<'a> { + pub fn new(stopwords: Vec) -> Self { + Self { stopwords, locale: None, } diff --git a/typesense_codegen/src/models/stopwords_sets_retrieve_all_schema.rs b/typesense_codegen/src/models/stopwords_sets_retrieve_all_schema.rs index 84703ac..9feaf43 100644 --- a/typesense_codegen/src/models/stopwords_sets_retrieve_all_schema.rs +++ b/typesense_codegen/src/models/stopwords_sets_retrieve_all_schema.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -18,7 +19,7 @@ pub struct StopwordsSetsRetrieveAllSchema { } impl StopwordsSetsRetrieveAllSchema { - pub fn new(stopwords: Vec) -> StopwordsSetsRetrieveAllSchema { - StopwordsSetsRetrieveAllSchema { stopwords } + pub fn new(stopwords: Vec) -> Self { + Self { stopwords } } } diff --git a/typesense_codegen/src/models/success_status.rs b/typesense_codegen/src/models/success_status.rs index c88ecb9..1472e0e 100644 --- a/typesense_codegen/src/models/success_status.rs +++ b/typesense_codegen/src/models/success_status.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -18,7 +19,7 @@ pub struct SuccessStatus { } impl SuccessStatus { - pub fn new(success: bool) -> SuccessStatus { - SuccessStatus { success } + pub fn new(success: bool) -> Self { + Self { success } } } diff --git a/typesense_codegen/src/models/synonym_item_delete_schema.rs b/typesense_codegen/src/models/synonym_item_delete_schema.rs index 91c3e1d..70f534a 100644 --- a/typesense_codegen/src/models/synonym_item_delete_schema.rs +++ b/typesense_codegen/src/models/synonym_item_delete_schema.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -19,7 +20,7 @@ pub struct SynonymItemDeleteSchema { } impl SynonymItemDeleteSchema { - pub fn new(id: String) -> SynonymItemDeleteSchema { - SynonymItemDeleteSchema { id } + pub fn new(id: String) -> Self { + Self { id } } } diff --git a/typesense_codegen/src/models/synonym_item_schema.rs b/typesense_codegen/src/models/synonym_item_schema.rs index bb03da3..e09ac8b 100644 --- a/typesense_codegen/src/models/synonym_item_schema.rs +++ b/typesense_codegen/src/models/synonym_item_schema.rs @@ -9,30 +9,31 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct SynonymItemSchema { +pub struct SynonymItemSchema<'a> { /// Unique identifier for the synonym item #[serde(rename = "id")] - pub id: String, + pub id: Cow<'a, str>, /// Array of words that should be considered as synonyms #[serde(rename = "synonyms")] pub synonyms: Vec, /// For 1-way synonyms, indicates the root word that words in the synonyms parameter map to #[serde(rename = "root", skip_serializing_if = "Option::is_none")] - pub root: Option, + pub root: Option>, /// Locale for the synonym, leave blank to use the standard tokenizer #[serde(rename = "locale", skip_serializing_if = "Option::is_none")] - pub locale: Option, + pub locale: Option>, /// By default, special characters are dropped from synonyms. Use this attribute to specify which special characters should be indexed as is #[serde(rename = "symbols_to_index", skip_serializing_if = "Option::is_none")] pub symbols_to_index: Option>, } -impl SynonymItemSchema { - pub fn new(id: String, synonyms: Vec) -> SynonymItemSchema { - SynonymItemSchema { +impl<'a> SynonymItemSchema<'a> { + pub fn new(id: Cow<'a, str>, synonyms: Vec) -> Self { + Self { id, synonyms, root: None, diff --git a/typesense_codegen/src/models/synonym_set_create_schema.rs b/typesense_codegen/src/models/synonym_set_create_schema.rs index 818ae98..7a2b537 100644 --- a/typesense_codegen/src/models/synonym_set_create_schema.rs +++ b/typesense_codegen/src/models/synonym_set_create_schema.rs @@ -9,17 +9,18 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct SynonymSetCreateSchema { +pub struct SynonymSetCreateSchema<'a> { /// Array of synonym items #[serde(rename = "items")] - pub items: Vec, + pub items: Vec>, } -impl SynonymSetCreateSchema { - pub fn new(items: Vec) -> SynonymSetCreateSchema { - SynonymSetCreateSchema { items } +impl<'a> SynonymSetCreateSchema<'a> { + pub fn new(items: Vec>) -> Self { + Self { items } } } diff --git a/typesense_codegen/src/models/synonym_set_delete_schema.rs b/typesense_codegen/src/models/synonym_set_delete_schema.rs index e127040..dc234d2 100644 --- a/typesense_codegen/src/models/synonym_set_delete_schema.rs +++ b/typesense_codegen/src/models/synonym_set_delete_schema.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -19,7 +20,7 @@ pub struct SynonymSetDeleteSchema { } impl SynonymSetDeleteSchema { - pub fn new(name: String) -> SynonymSetDeleteSchema { - SynonymSetDeleteSchema { name } + pub fn new(name: String) -> Self { + Self { name } } } diff --git a/typesense_codegen/src/models/synonym_set_schema.rs b/typesense_codegen/src/models/synonym_set_schema.rs index f9e171c..b7616d7 100644 --- a/typesense_codegen/src/models/synonym_set_schema.rs +++ b/typesense_codegen/src/models/synonym_set_schema.rs @@ -9,20 +9,21 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct SynonymSetSchema { /// Array of synonym items #[serde(rename = "items")] - pub items: Vec, + pub items: Vec>, /// Name of the synonym set #[serde(rename = "name")] pub name: String, } impl SynonymSetSchema { - pub fn new(items: Vec, name: String) -> SynonymSetSchema { - SynonymSetSchema { items, name } + pub fn new(items: Vec>, name: String) -> Self { + Self { items, name } } } diff --git a/typesense_codegen/src/models/synonym_sets_retrieve_schema.rs b/typesense_codegen/src/models/synonym_sets_retrieve_schema.rs index 612c1a5..1ca6e04 100644 --- a/typesense_codegen/src/models/synonym_sets_retrieve_schema.rs +++ b/typesense_codegen/src/models/synonym_sets_retrieve_schema.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -19,7 +20,7 @@ pub struct SynonymSetsRetrieveSchema { } impl SynonymSetsRetrieveSchema { - pub fn new(synonym_sets: Vec) -> SynonymSetsRetrieveSchema { - SynonymSetsRetrieveSchema { synonym_sets } + pub fn new(synonym_sets: Vec) -> Self { + Self { synonym_sets } } } diff --git a/typesense_codegen/src/models/toggle_slow_request_log_request.rs b/typesense_codegen/src/models/toggle_slow_request_log_request.rs index 2957298..1507ee4 100644 --- a/typesense_codegen/src/models/toggle_slow_request_log_request.rs +++ b/typesense_codegen/src/models/toggle_slow_request_log_request.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -18,8 +19,8 @@ pub struct ToggleSlowRequestLogRequest { } impl ToggleSlowRequestLogRequest { - pub fn new(log_slow_requests_time_ms: i32) -> ToggleSlowRequestLogRequest { - ToggleSlowRequestLogRequest { + pub fn new(log_slow_requests_time_ms: i32) -> Self { + Self { log_slow_requests_time_ms, } } diff --git a/typesense_codegen/src/models/update_documents_200_response.rs b/typesense_codegen/src/models/update_documents_200_response.rs index 88cc7b4..0adeb08 100644 --- a/typesense_codegen/src/models/update_documents_200_response.rs +++ b/typesense_codegen/src/models/update_documents_200_response.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] @@ -19,7 +20,7 @@ pub struct UpdateDocuments200Response { } impl UpdateDocuments200Response { - pub fn new(num_updated: i32) -> UpdateDocuments200Response { - UpdateDocuments200Response { num_updated } + pub fn new(num_updated: i32) -> Self { + Self { num_updated } } } diff --git a/typesense_codegen/src/models/update_documents_parameters.rs b/typesense_codegen/src/models/update_documents_parameters.rs index cf49cbb..f7f7b90 100644 --- a/typesense_codegen/src/models/update_documents_parameters.rs +++ b/typesense_codegen/src/models/update_documents_parameters.rs @@ -9,16 +9,17 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct UpdateDocumentsParameters { +pub struct UpdateDocumentsParameters<'a> { #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] - pub filter_by: Option, + pub filter_by: Option>, } -impl UpdateDocumentsParameters { - pub fn new() -> UpdateDocumentsParameters { - UpdateDocumentsParameters { filter_by: None } +impl<'a> UpdateDocumentsParameters<'a> { + pub fn new() -> Self { + Self { filter_by: None } } } diff --git a/typesense_codegen/src/models/voice_query_model_collection_config.rs b/typesense_codegen/src/models/voice_query_model_collection_config.rs index 38044c9..4dca0ba 100644 --- a/typesense_codegen/src/models/voice_query_model_collection_config.rs +++ b/typesense_codegen/src/models/voice_query_model_collection_config.rs @@ -9,6 +9,7 @@ */ use crate::models; +use ::std::borrow::Cow; use serde::{Deserialize, Serialize}; /// VoiceQueryModelCollectionConfig : Configuration for the voice query model @@ -20,7 +21,7 @@ pub struct VoiceQueryModelCollectionConfig { impl VoiceQueryModelCollectionConfig { /// Configuration for the voice query model - pub fn new() -> VoiceQueryModelCollectionConfig { - VoiceQueryModelCollectionConfig { model_name: None } + pub fn new() -> Self { + Self { model_name: None } } } diff --git a/typesense_derive/src/field_attributes.rs b/typesense_derive/src/field_attributes.rs index 8cdfcdb..fe4b7d1 100644 --- a/typesense_derive/src/field_attributes.rs +++ b/typesense_derive/src/field_attributes.rs @@ -257,17 +257,17 @@ fn build_regular_field(field: &Field, field_attrs: &FieldAttributes) -> proc_mac (&field.ty, false) }; - let name_ident = &field.ident; let field_name = if let Some(rename) = &field_attrs.rename { - quote! { #rename.to_string() } + quote! { #rename } } else { - quote! { std::string::String::from(stringify!(#name_ident)) } + let name_ident = field.ident.as_ref().unwrap().to_string(); + quote! { #name_ident } }; let typesense_field_type = if let Some(override_str) = &field_attrs.type_override { - quote! { #override_str.to_owned() } + quote! { #override_str } } else { - quote! { <#ty as typesense::prelude::ToTypesenseField>::to_typesense_type().to_owned() } + quote! { <#ty as ::typesense::prelude::ToTypesenseField>::to_typesense_type() } }; let optional = field_attrs @@ -286,17 +286,20 @@ fn build_regular_field(field: &Field, field_attrs: &FieldAttributes) -> proc_mac let num_dim = field_attrs.num_dim.map(|v| quote!(.num_dim(#v))); quote! { - vec![ - typesense::models::Field::builder().name(#field_name).r#type(#typesense_field_type) - #optional #facet #index #store #sort #infix #stem #range_index #locale #vec_dist #num_dim - .build() - ] + ::typesense::models::Field::builder().name(#field_name).r#type(#typesense_field_type) + #optional #facet #index #store #sort #infix #stem #range_index #locale #vec_dist #num_dim + .build() } } /// Processes a single struct field. /// Returns a TokenStream which evaluates to a `Vec`. -pub(crate) fn process_field(field: &Field) -> syn::Result { +pub(crate) fn process_field( + field: &Field, +) -> syn::Result<( + Option, + Option, +)> { let field_attrs = extract_field_attrs(field)?; if field_attrs.flatten { @@ -304,8 +307,8 @@ pub(crate) fn process_field(field: &Field) -> syn::Result syn::Result::collection_schema().fields + <#inner_type as ::typesense::prelude::Document>::collection_schema().fields .into_iter() .map(|mut f| { // Use the dynamically determined prefix here @@ -324,35 +327,25 @@ pub(crate) fn process_field(field: &Field) -> syn::Result>() }; if field_attrs.skip { // `#[typesense(flatten, skip)]` -> Only flattened fields - return Ok(quote! { - { - #flattened_fields - } - }); + return Ok((None, Some(quote! { #flattened_fields }))); } // `#[typesense(flatten)]` -> Flattened fields + object field let regular_field = build_regular_field(field, &field_attrs); - Ok(quote! { - { - let mut fields = #regular_field; - fields.extend(#flattened_fields); - fields - } - }) + Ok(( + Some(quote! { #regular_field }), + Some(quote! { #flattened_fields }), + )) } else { // --- REGULAR FIELD LOGIC --- if field_attrs.skip { - return Ok(quote! { - vec![] - }); + return Ok((None, None)); } - Ok(build_regular_field(field, &field_attrs)) + Ok((Some(build_regular_field(field, &field_attrs)), None)) } } diff --git a/typesense_derive/src/lib.rs b/typesense_derive/src/lib.rs index e62a65f..0e1b658 100644 --- a/typesense_derive/src/lib.rs +++ b/typesense_derive/src/lib.rs @@ -76,10 +76,17 @@ fn impl_typesense_collection(item: ItemStruct) -> syn::Result { } } - let typesense_fields = fields - .iter() - .map(process_field) - .collect::>>()?; + let mut regular_fields = Vec::new(); + let mut flattened_fields = Vec::new(); + for field in &fields { + let (regular, flattened) = process_field(field)?; + if let Some(f) = regular { + regular_fields.push(f); + } + if let Some(f) = flattened { + flattened_fields.push(f); + } + } let default_sorting_field = if let Some(v) = default_sorting_field { quote! { @@ -139,15 +146,15 @@ fn impl_typesense_collection(item: ItemStruct) -> syn::Result { impl #impl_generics ::typesense::prelude::Document for #ident #ty_generics #where_clause { const COLLECTION_NAME: &str = #collection_name; - type Partial = #name_partial; - fn collection_schema() -> ::typesense::models::CollectionSchema { - let name = Self::COLLECTION_NAME.to_owned(); + type Partial = #name_partial; - let mut fields = Vec::new(); - #(fields.extend(#typesense_fields);)* + fn collection_schema() -> ::typesense::models::CollectionSchema<'static> { + let fields = [#(#regular_fields,)*].into_iter() + #(.chain(#flattened_fields))* + .collect::>(); - let builder = ::typesense::models::CollectionSchema::builder().name(name).fields(fields); + let builder = ::typesense::models::CollectionSchema::builder().name(Self::COLLECTION_NAME).fields(fields); #default_sorting_field #enable_nested_fields diff --git a/xtask/Cargo.toml b/xtask/Cargo.toml index a37b55b..645e345 100644 --- a/xtask/Cargo.toml +++ b/xtask/Cargo.toml @@ -7,8 +7,13 @@ edition.workspace = true [dependencies] anyhow = { workspace = true } clap = { workspace = true } +indexmap = { workspace = true } reqwest = { version = "0.12", features = ["blocking"] } # "blocking" is simpler for scripts serde = { workspace = true } serde_yaml = { workspace = true } -typesense = { path = "../typesense"} tokio = { workspace = true} +typesense = { path = "../typesense", optional = true} + +[features] +default = [] +typesense = ["dep:typesense"] diff --git a/xtask/src/add_vendor_attributes.rs b/xtask/src/add_vendor_attributes.rs index 8fe1fe1..7a76054 100644 --- a/xtask/src/add_vendor_attributes.rs +++ b/xtask/src/add_vendor_attributes.rs @@ -1,9 +1,8 @@ -use crate::vendor_attributes::VendorAttributes; -use serde_yaml::Mapping; +use crate::{preprocess_openapi::OpenAPI, vendor_attributes::VendorAttributes}; -pub fn add_vendor_attributes(doc_root: &mut Mapping) -> Result<(), String> { +pub fn add_vendor_attributes(doc: &mut OpenAPI) -> Result<(), String> { println!("Adding custom x-* vendor attributes..."); - let mut attrs = VendorAttributes::new(doc_root); + let mut attrs = VendorAttributes::new(doc); // Schemas attrs.schema_builder([ @@ -15,11 +14,11 @@ pub fn add_vendor_attributes(doc_root: &mut Mapping) -> Result<(), String> { ])?; attrs.schema_generic_parameter([ - ("SearchResult", ""), - ("SearchGroupedHit", ""), - ("SearchResultHit", ""), - ("MultiSearchResult", ""), - ("MultiSearchResultItem", ""), + ("SearchResult", "D"), + ("SearchGroupedHit", "D"), + ("SearchResultHit", "D"), + ("MultiSearchResult", "D"), + ("MultiSearchResultItem", "D"), ])?; attrs.schema_field_type_overrides( @@ -42,56 +41,48 @@ pub fn add_vendor_attributes(doc_root: &mut Mapping) -> Result<(), String> { // Operations attrs .operation("/collections/{collectionName}/documents/search", "get") - .generic_parameter(" serde::Deserialize<'de> + Serialize>") - .return_type("models::SearchResult") - .done()?; + .generic_parameter("D: for<'de> serde::Deserialize<'de> + Serialize")? + .return_type("models::SearchResult")?; attrs .operation("/multi_search", "post") - .return_type("serde_json::Value") - .done()?; + .return_type("serde_json::Value")?; // The endpoint return `null` if no schema changes are in progress attrs .operation("/operations/schema_changes", "get") - .return_type("Option>") - .done()?; + .return_type("Option>")?; // The documents /import endpoint expects a text/plain body and response attrs .operation("/collections/{collectionName}/documents/import", "post") - .body_is_raw_text() - .supports_plain_text() - .done()?; + .body_is_raw_text()? + .supports_plain_text()?; // The stemming /import endpoint also expects a text/plain body and response attrs .operation("/stemming/dictionaries/import", "post") - .body_is_raw_text() - .supports_plain_text() - .done()?; + .body_is_raw_text()? + .supports_plain_text()?; attrs .operation("/collections/{collectionName}/documents/export", "get") - .supports_plain_text() - .done()?; + .supports_plain_text()?; attrs .operation("/collections/{collectionName}/documents", "patch") - .generic_parameter("") - .params_generic_parameter("") - .request_type("B") - .done()?; + .generic_parameter("B: Serialize")? + .params_generic_parameter("B")? + .request_type("B")?; attrs .operation( "/collections/{collectionName}/documents/{documentId}", "patch", ) - .generic_parameter("") - .params_generic_parameter("") - .request_type("B") - .done()?; + .generic_parameter("B: Serialize")? + .params_generic_parameter("B")? + .request_type("B")?; Ok(()) } diff --git a/xtask/src/main.rs b/xtask/src/main.rs index 29f84c9..bb60432 100644 --- a/xtask/src/main.rs +++ b/xtask/src/main.rs @@ -3,10 +3,12 @@ use clap::{Parser, ValueEnum}; use std::{env, fs, process::Command}; mod add_vendor_attributes; mod preprocess_openapi; +#[cfg(feature = "typesense")] mod test_clean; mod vendor_attributes; use preprocess_openapi::preprocess_openapi_file; +#[cfg(feature = "typesense")] use test_clean::test_clean; const SPEC_URL: &str = @@ -50,12 +52,14 @@ enum Task { /// Preprocesses fetched OpenAPI spec file into a new one Preprocess, /// Clean up test artifacts, e.g., collections + #[cfg(feature = "typesense")] TestClean, } fn main() -> Result<()> { let cli = Cli::parse(); + #[cfg(feature = "typesense")] let rt = tokio::runtime::Runtime::new().unwrap(); for task in cli.tasks { @@ -65,6 +69,7 @@ fn main() -> Result<()> { Task::Fetch => task_fetch_api_spec()?, Task::Preprocess => preprocess_openapi_file(INPUT_SPEC_FILE, OUTPUT_PREPROCESSED_FILE) .expect("Preprocess failed, aborting!"), + #[cfg(feature = "typesense")] Task::TestClean => { let test_args = cli.test_args.clone(); let is_wasm = cli.wasm; diff --git a/xtask/src/preprocess_openapi.rs b/xtask/src/preprocess_openapi.rs index d2a16ba..38be830 100644 --- a/xtask/src/preprocess_openapi.rs +++ b/xtask/src/preprocess_openapi.rs @@ -1,8 +1,142 @@ -use serde_yaml::{Mapping, Value}; -use std::fs; +use ::std::{collections::HashSet, fs}; +use indexmap::IndexMap; +use serde::{Deserialize, Serialize}; +use serde_yaml::Value; use crate::add_vendor_attributes::add_vendor_attributes; +#[derive(Clone, Debug, Default, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub(crate) struct OpenAPI { + #[serde(skip_serializing_if = "Option::is_none")] + openapi: Option, + #[serde(skip_serializing_if = "Option::is_none")] + info: Option, + #[serde(skip_serializing_if = "Option::is_none")] + servers: Option, + #[serde(skip_serializing_if = "Option::is_none")] + external_docs: Option, + #[serde(skip_serializing_if = "Option::is_none")] + security: Option, + #[serde(skip_serializing_if = "Option::is_none")] + tags: Option, + pub(crate) paths: IndexMap, + pub(crate) components: OpenAPIComponents, + + // Everything else not explicitly listed above + #[serde(flatten)] + pub(crate) extra: IndexMap, +} + +pub(crate) type OpenAPIPath = IndexMap; + +#[derive(Clone, Debug, Default, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub(crate) struct OpenAPIMethod { + #[serde(skip_serializing_if = "Option::is_none")] + pub(crate) tags: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub(crate) summary: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub(crate) description: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub(crate) operation_id: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub(crate) parameters: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub(crate) request_body: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub(crate) responses: Option>, + + // Everything else not explicitly listed above + #[serde(flatten)] + pub(crate) extra: IndexMap, +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub(crate) struct OpenAPIBody { + #[serde(skip_serializing_if = "Option::is_none")] + pub(crate) required: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub(crate) description: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub(crate) content: Option>, + + // Everything else not explicitly listed above + #[serde(flatten)] + pub(crate) extra: IndexMap, +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub(crate) struct OpenAPIBodyContent { + #[serde(skip_serializing_if = "Option::is_none")] + pub(crate) schema: Option, + + // Everything else not explicitly listed above + #[serde(flatten)] + pub(crate) extra: IndexMap, +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub(crate) struct OpenAPIParameter { + #[serde(skip_serializing_if = "Option::is_none")] + pub(crate) name: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub(crate) required: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub(crate) r#in: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub(crate) description: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub(crate) schema: Option, + + // Everything else not explicitly listed above + #[serde(flatten)] + pub(crate) extra: IndexMap, +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub(crate) struct OpenAPIComponents { + pub(crate) schemas: IndexMap, + + // Everything else not explicitly listed above + #[serde(flatten)] + pub(crate) extra: IndexMap, +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub(crate) struct OpenAPIProperty { + #[serde(skip_serializing_if = "Option::is_none")] + pub(crate) r#type: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub(crate) default: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub(crate) required: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub(crate) description: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub(crate) example: Option, + #[serde(rename = "$ref", skip_serializing_if = "Option::is_none")] + pub(crate) r#ref: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub(crate) properties: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub(crate) items: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub(crate) one_of: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub(crate) any_of: Option>, + + // Everything else not explicitly listed above + #[serde(flatten)] + pub(crate) extra: IndexMap, +} + // --- Main function to orchestrate the file reading, processing, and writing --- pub fn preprocess_openapi_file( input_path: &str, @@ -13,57 +147,48 @@ pub fn preprocess_openapi_file( println!("Reading OpenAPI spec from {}...", input_path); let input_content = fs::read_to_string(input_path) .map_err(|e| format!("Failed to read {}: {}", input_path, e))?; - let mut doc: Value = serde_yaml::from_str(&input_content)?; - - // Ensure the root is a mutable mapping - let doc_root = doc - .as_mapping_mut() - .ok_or("OpenAPI spec root is not a YAML map")?; + let mut doc: OpenAPI = serde_yaml::from_str(&input_content)?; // --- Step 2: Apply all the required transformations --- println!("Preprocessing the spec..."); println!("Adding custom x-* vendor attributes..."); - add_vendor_attributes(doc_root)?; + add_vendor_attributes(&mut doc)?; println!("Unwrapping parameters..."); - unwrap_search_parameters(doc_root)?; - unwrap_multi_search_parameters(doc_root)?; - unwrap_parameters_by_path( - doc_root, + doc.unwrap_search_parameters()?; + doc.unwrap_multi_search_parameters()?; + doc.unwrap_parameters_by_path( "/collections/{collectionName}/documents/import", "post", "importDocumentsParameters", Some("ImportDocumentsParameters"), // Copy schema to components )?; - unwrap_parameters_by_path( - doc_root, + doc.unwrap_parameters_by_path( "/collections/{collectionName}/documents/export", "get", "exportDocumentsParameters", Some("ExportDocumentsParameters"), )?; - unwrap_parameters_by_path( - doc_root, + doc.unwrap_parameters_by_path( "/collections/{collectionName}/documents", "patch", "updateDocumentsParameters", Some("UpdateDocumentsParameters"), )?; - unwrap_parameters_by_path( - doc_root, + doc.unwrap_parameters_by_path( "/collections/{collectionName}/documents", "delete", "deleteDocumentsParameters", Some("DeleteDocumentsParameters"), )?; - unwrap_parameters_by_path( - doc_root, + doc.unwrap_parameters_by_path( "/collections", "get", "getCollectionsParameters", Some("GetCollectionsParameters"), )?; + doc.mark_borrowed_data(); println!("Preprocessing complete."); // --- Step 3: Serialize the modified spec and write to the output file --- @@ -76,176 +201,267 @@ pub fn preprocess_openapi_file( Ok(()) } -/// A generic function to: -/// 1. (Optional) Copy an inline parameter schema to `components/schemas`. -/// 2. Unwrap that parameter object into individual query parameters within the `paths` definition. -fn unwrap_parameters_by_path( - doc: &mut Mapping, - path: &str, - method: &str, - param_name_to_unwrap: &str, - new_component_name: Option<&str>, -) -> Result<(), String> { - // --- Step 1 (Optional): Copy the inline schema to components --- - if let Some(component_name) = new_component_name { +impl OpenAPIProperty { + fn collect_properties(&self) -> Vec { + let mut data = Vec::new(); + if let Some(schema) = &self.r#ref { + data.push( + schema + .trim_start_matches("#/components/schemas/") + .to_owned(), + ); + } + if let Some(p) = &self.items { + data.extend(p.collect_properties()); + } + if let Some(v) = &self.any_of { + v.iter().for_each(|p| data.extend(p.collect_properties())); + } + if let Some(v) = &self.one_of { + v.iter().for_each(|p| data.extend(p.collect_properties())); + } + data + } + + fn mark_borrowed_property(&mut self) { + if self.r#type.as_deref() == Some("object") || self.one_of.is_some() { + self.extra + .insert("x-rust-has-borrowed-data".to_owned(), Value::Bool(true)); + } + } +} + +impl OpenAPI { + fn mark_borrowed_data(&mut self) { + println!("Marking borrowed data..."); + + let mut request_schemas = HashSet::new(); + self.paths.iter_mut().for_each(|(_, pms)| { + pms.iter_mut().for_each(|(_, pm)| { + if let Some(ps) = &mut pm.parameters { + ps.iter_mut().for_each(|p| { + if let Some(s) = &mut p.schema { + s.mark_borrowed_property(); + request_schemas.extend(s.collect_properties()); + } + }) + } + + if let Some(reqb) = &mut pm.request_body + && let Some(cs) = &mut reqb.content + { + cs.iter_mut().for_each(|(_, c)| { + if let Some(s) = &mut c.schema { + s.mark_borrowed_property(); + request_schemas.extend(s.collect_properties()); + } + }) + } + }) + }); + + let schemas = self + .components + .schemas + .iter() + .filter(|(n, _)| n.ends_with("Parameters") || request_schemas.contains(n.as_str())) + .map(|(n, _)| n.to_owned()) + .collect::>(); + drop(request_schemas); + + for schema_name in schemas { + let Some(schema) = self.components.schemas.get_mut(&schema_name) else { + continue; + }; + + schema + .extra + .insert("x-rust-has-borrowed-data".to_owned(), Value::Bool(true)); + + for (_, prop) in schema.properties.iter_mut().flat_map(|v| v.iter_mut()) { + for inner in prop.one_of.iter_mut().flat_map(|v| v.iter_mut()) { + inner.mark_borrowed_property(); + } + for inner in prop.any_of.iter_mut().flat_map(|v| v.iter_mut()) { + inner.mark_borrowed_property(); + } + if let Some(inner) = &mut prop.items { + inner.mark_borrowed_property(); + } + + prop.mark_borrowed_property(); + } + } + } + + /// A generic function to: + /// 1. (Optional) Copy an inline parameter schema to `components/schemas`. + /// 2. Unwrap that parameter object into individual query parameters within the `paths` definition. + fn unwrap_parameters_by_path( + &mut self, + path: &str, + method: &str, + param_name_to_unwrap: &str, + new_component_name: Option<&str>, + ) -> Result<(), String> { + // --- Step 1 (Optional): Copy the inline schema to components --- + if let Some(component_name) = new_component_name { + println!( + "- Copying inline schema for '{}' to components.schemas.{}...", + param_name_to_unwrap, component_name + ); + + // Find the parameter with the inline schema to copy using a read-only borrow + let params_for_copy = self + .paths + .get(path) + .and_then(|p| p.get(method)) + .and_then(|op| op.parameters.as_ref()) + .ok_or_else(|| format!("Could not find parameters for {} {}", method, path))?; + + let param_to_copy = params_for_copy + .iter() + .find(|p| p.name.as_deref() == Some(param_name_to_unwrap)) + .ok_or_else(|| { + format!("Parameter '{}' not found for copying", param_name_to_unwrap) + })?; + + let inline_schema = param_to_copy + .schema + .clone() + .ok_or_else(|| format!("No schema found for '{}'", param_name_to_unwrap))?; + + // Get a mutable borrow to insert the cloned schema into components + self.components + .schemas + .insert(component_name.into(), inline_schema); + } + + // --- Step 2: Unwrap the parameter object into individual parameters --- println!( - "- Copying inline schema for '{}' to components.schemas.{}...", - param_name_to_unwrap, component_name + "- Unwrapping parameter object '{}'...", + param_name_to_unwrap ); - // Find the parameter with the inline schema to copy using a read-only borrow - let params_for_copy = doc - .get("paths") - .and_then(|p| p.get(path)) - .and_then(|p| p.get(method)) - .and_then(|op| op.get("parameters")) - .and_then(|params| params.as_sequence()) + // Navigate down to the operation's parameters list (mutable) + let params_for_unwrap = self + .paths + .get_mut(path) + .and_then(|p| p.get_mut(method)) + .and_then(|op| op.parameters.as_mut()) .ok_or_else(|| format!("Could not find parameters for {} {}", method, path))?; - let param_to_copy = params_for_copy + let param_index = params_for_unwrap .iter() - .find(|p| p.get("name").and_then(|n| n.as_str()) == Some(param_name_to_unwrap)) - .ok_or_else(|| format!("Parameter '{}' not found for copying", param_name_to_unwrap))?; - - let inline_schema = param_to_copy - .get("schema") - .cloned() // Clone the schema to avoid borrowing issues - .ok_or_else(|| format!("No schema found for '{}'", param_name_to_unwrap))?; - - // Get a mutable borrow to insert the cloned schema into components - let schemas = doc - .get_mut("components") - .and_then(|c| c.get_mut("schemas")) - .and_then(|s| s.as_mapping_mut()) - .ok_or_else(|| "Could not find components/schemas section".to_string())?; - - schemas.insert(component_name.into(), inline_schema); - } + .position(|p| p.name.as_deref() == Some(param_name_to_unwrap)) + .ok_or_else(|| format!("Parameter '{}' not found in {}", param_name_to_unwrap, path))?; - // --- Step 2: Unwrap the parameter object into individual parameters --- - println!( - "- Unwrapping parameter object '{}'...", - param_name_to_unwrap - ); - - // Navigate down to the operation's parameters list (mutable) - let params_for_unwrap = doc - .get_mut("paths") - .and_then(|p| p.get_mut(path)) - .and_then(|p| p.get_mut(method)) - .and_then(|op| op.get_mut("parameters")) - .and_then(|params| params.as_sequence_mut()) - .ok_or_else(|| format!("Could not find parameters for {} {}", method, path))?; - - let param_index = params_for_unwrap - .iter() - .position(|p| p.get("name").and_then(|n| n.as_str()) == Some(param_name_to_unwrap)) - .ok_or_else(|| format!("Parameter '{}' not found in {}", param_name_to_unwrap, path))?; - - let param_object = params_for_unwrap.remove(param_index); - let properties = param_object - .get("schema") - .and_then(|s| s.get("properties")) - .and_then(|p| p.as_mapping()) - .ok_or_else(|| { - format!( - "Could not extract properties from '{}'", - param_name_to_unwrap - ) - })?; - - for (key, value) in properties { - let mut new_param = Mapping::new(); - new_param.insert("name".into(), key.clone()); - new_param.insert("in".into(), "query".into()); - new_param.insert("schema".into(), value.clone()); - params_for_unwrap.push(new_param.into()); - } + let param_object = params_for_unwrap.remove(param_index); + let properties = param_object + .schema + .and_then(|s| s.properties) + .ok_or_else(|| { + format!( + "Could not extract properties from '{}'", + param_name_to_unwrap + ) + })?; - Ok(()) -} + for (key, value) in properties { + let new_param = OpenAPIParameter { + name: Some(key), + r#in: Some("query".to_owned()), + schema: Some(value), + ..Default::default() + }; + params_for_unwrap.push(new_param); + } -/// Special handler for unwrapping search parameters from `components/schemas`. -fn unwrap_search_parameters(doc: &mut Mapping) -> Result<(), String> { - println!("- Unwrapping searchParameters..."); - // Get the definition of SearchParameters from components - let search_params_props = doc - .get("components") - .and_then(|c| c.get("schemas")) - .and_then(|s| s.get("SearchParameters")) - .and_then(|sp| sp.get("properties")) - .and_then(|p| p.as_mapping()) - .cloned() // Clone to avoid borrowing issues - .ok_or_else(|| "Could not find schema for SearchParameters".to_string())?; - - // Navigate to the operation's parameters list - let params = doc - .get_mut("paths") - .and_then(|p| p.get_mut("/collections/{collectionName}/documents/search")) - .and_then(|p| p.get_mut("get")) - .and_then(|op| op.get_mut("parameters")) - .and_then(|params| params.as_sequence_mut()) - .ok_or_else(|| { - "Could not find parameters for /collections/{collectionName}/documents/search" - .to_string() - })?; - - // Find and remove the old parameter object. - let param_index = params - .iter() - .position(|p| p.get("name").and_then(|n| n.as_str()) == Some("searchParameters")) - .ok_or_else(|| "searchParameters object not found".to_string())?; - params.remove(param_index); - - // Add the new individual parameters. - for (key, value) in search_params_props { - let mut new_param = Mapping::new(); - new_param.insert("name".into(), key.clone()); - new_param.insert("in".into(), "query".into()); - new_param.insert("schema".into(), value.clone()); - params.push(new_param.into()); + Ok(()) } - Ok(()) -} + /// Special handler for unwrapping search parameters from `components/schemas`. + fn unwrap_search_parameters(&mut self) -> Result<(), String> { + println!("- Unwrapping searchParameters..."); + // Get the definition of SearchParameters from components + let search_params_props = self + .components + .schemas + .get("SearchParameters") + .and_then(|sp| sp.properties.as_ref()) + .cloned() // Clone to avoid borrowing issues + .ok_or_else(|| "Could not find schema for SearchParameters".to_string())?; + + // Navigate to the operation's parameters list + let params = self + .paths + .get_mut("/collections/{collectionName}/documents/search") + .and_then(|p| p.get_mut("get")) + .and_then(|op| op.parameters.as_mut()) + .ok_or_else(|| { + "Could not find parameters for /collections/{collectionName}/documents/search" + .to_string() + })?; + + // Find and remove the old parameter object. + let param_index = params + .iter() + .position(|p| p.name.as_deref() == Some("searchParameters")) + .ok_or_else(|| "searchParameters object not found".to_string())?; + params.remove(param_index); + + // Add the new individual parameters. + for (key, value) in search_params_props { + let new_param = OpenAPIParameter { + name: Some(key), + r#in: Some("query".to_owned()), + schema: Some(value), + ..Default::default() + }; + params.push(new_param); + } -/// Special handler for unwrapping multi-search parameters from `components/schemas`. -fn unwrap_multi_search_parameters(doc: &mut Mapping) -> Result<(), String> { - println!("- Unwrapping multiSearchParameters..."); - // Get the definition of MultiSearchParameters from components - let search_params_props: Mapping = doc - .get("components") - .and_then(|c| c.get("schemas")) - .and_then(|s| s.get("MultiSearchParameters")) - .and_then(|sp| sp.get("properties")) - .and_then(|p| p.as_mapping()) - .cloned() - .ok_or_else(|| "Could not find schema for MultiSearchParameters".to_string())?; - - // Navigate to the operation's parameters list - let params = doc - .get_mut("paths") - .and_then(|p| p.get_mut("/multi_search")) - .and_then(|p| p.get_mut("post")) - .and_then(|op| op.get_mut("parameters")) - .and_then(|params| params.as_sequence_mut()) - .ok_or_else(|| "Could not find parameters for /multi_search".to_string())?; - - // Find and remove the old parameter object. - let param_index = params - .iter() - .position(|p| p.get("name").and_then(|n| n.as_str()) == Some("multiSearchParameters")) - .ok_or_else(|| "multiSearchParameters object not found".to_string())?; - params.remove(param_index); - - // Add the new individual parameters. - for (key, value) in search_params_props { - let mut new_param = Mapping::new(); - new_param.insert("name".into(), key.clone()); - new_param.insert("in".into(), "query".into()); - new_param.insert("schema".into(), value.clone()); - params.push(new_param.into()); + Ok(()) } - Ok(()) + /// Special handler for unwrapping multi-search parameters from `components/schemas`. + fn unwrap_multi_search_parameters(&mut self) -> Result<(), String> { + println!("- Unwrapping multiSearchParameters..."); + // Get the definition of MultiSearchParameters from components + let search_params_props = self + .components + .schemas + .get("MultiSearchParameters") + .and_then(|sp| sp.properties.as_ref()) + .cloned() + .ok_or_else(|| "Could not find schema for MultiSearchParameters".to_string())?; + + // Navigate to the operation's parameters list + let params = self + .paths + .get_mut("/multi_search") + .and_then(|p| p.get_mut("post")) + .and_then(|op| op.parameters.as_mut()) + .ok_or_else(|| "Could not find parameters for /multi_search".to_string())?; + + // Find and remove the old parameter object. + let param_index = params + .iter() + .position(|p| p.name.as_deref() == Some("multiSearchParameters")) + .ok_or_else(|| "multiSearchParameters object not found".to_string())?; + params.remove(param_index); + + // Add the new individual parameters. + for (key, value) in search_params_props { + let new_param = OpenAPIParameter { + name: Some(key), + r#in: Some("query".to_owned()), + schema: Some(value), + ..Default::default() + }; + params.push(new_param); + } + + Ok(()) + } } diff --git a/xtask/src/test_clean.rs b/xtask/src/test_clean.rs index 9acabf1..a527b92 100644 --- a/xtask/src/test_clean.rs +++ b/xtask/src/test_clean.rs @@ -27,7 +27,7 @@ async fn clean_test_artifacts() { } if let Err(err) = client - .collection_schemaless(&collection.name) + .collection_schemaless(collection.name.as_str()) .delete() .await { diff --git a/xtask/src/vendor_attributes.rs b/xtask/src/vendor_attributes.rs index 39baceb..3c90f81 100644 --- a/xtask/src/vendor_attributes.rs +++ b/xtask/src/vendor_attributes.rs @@ -1,128 +1,33 @@ -use serde_yaml::{Mapping, Value}; - -/// Where to apply a vendor (x-*) attribute. -pub enum VendorLocation<'a> { - Schema(&'a str), - SchemaField { - schema: &'a str, - field: &'a str, - }, - Operation { - path: &'a str, - method: &'a str, - }, - OperationField { - path: &'a str, - method: &'a str, - field: &'a str, - }, -} +use crate::preprocess_openapi::{OpenAPI, OpenAPIProperty}; +use indexmap::IndexMap; +use serde_yaml::Value; /// Main helper struct that holds a mutable borrow of the OpenAPI root mapping. pub struct VendorAttributes<'a> { - doc: &'a mut Mapping, + pub doc: &'a mut OpenAPI, } impl<'a> VendorAttributes<'a> { - pub fn new(doc: &'a mut Mapping) -> Self { + pub fn new(doc: &'a mut OpenAPI) -> Self { Self { doc } } - // internal helpers - - fn traverse_value_mut(&mut self, keys: &[&str]) -> Option<&mut Value> { - if keys.is_empty() { - return None; - } - let mut cur: Option<&mut Value> = self.doc.get_mut(keys[0]); - for k in &keys[1..] { - cur = cur.and_then(|v| v.get_mut(k)); - } - cur - } - - fn get_map_mut(&mut self, keys: &[&str]) -> Result<&mut Mapping, String> { - self.traverse_value_mut(keys) - .and_then(|v| v.as_mapping_mut()) - .ok_or_else(|| format!("expected mapping at path: {}", keys.join("."))) - } - - #[inline] - fn insert_into_map(map: &mut Mapping, attr: &str, val: Value) { - map.insert(Value::String(attr.to_string()), val); - } - - fn set_attr( - &mut self, - location: VendorLocation<'_>, - attr: &str, - val: Value, - ) -> Result<&mut Self, String> { - match location { - VendorLocation::Schema(schema_name) => { - let map = self.get_map_mut(&["components", "schemas", schema_name])?; - Self::insert_into_map(map, attr, val); - Ok(self) - } - VendorLocation::SchemaField { schema, field } => { - let props_map = self - .get_map_mut(&["components", "schemas", schema, "properties"]) - .map_err(|_| format!("schema '{}' has no properties mapping", schema))?; - - let prop_key = Value::String(field.to_string()); - match props_map.get_mut(&prop_key) { - Some(existing_val) => { - if let Some(field_map) = existing_val.as_mapping_mut() { - Self::insert_into_map(field_map, attr, val); - Ok(self) - } else { - Err(format!( - "property '{}' in schema '{}' exists but is not a mapping; cannot set '{}'", - field, schema, attr - )) - } - } - None => { - let mut new_field_map = Mapping::new(); - new_field_map.insert(Value::String(attr.to_string()), val); - props_map.insert(prop_key, Value::Mapping(new_field_map)); - Ok(self) - } - } - } - VendorLocation::Operation { path, method } => { - let op_map = self - .get_map_mut(&["paths", path, method]) - .map_err(|_| format!("operation not found: {} {}", method, path))?; - Self::insert_into_map(op_map, attr, val); - Ok(self) - } - VendorLocation::OperationField { - path, - method, - field, - } => { - let field_map = - self.get_map_mut(&["paths", path, method, field]) - .map_err(|_| { - format!("operation field not found: {} {} {}", method, path, field) - })?; - Self::insert_into_map(field_map, attr, val); - Ok(self) - } - } - } - pub fn schema_generic_parameter( &mut self, items: [(&str, &str); N], ) -> Result<&mut Self, String> { - for (schema, generic) in items { - self.set_attr( - VendorLocation::Schema(schema), - "x-rust-generic-parameter", + for (schema_name, generic) in items { + let map = self + .doc + .components + .schemas + .get_mut(schema_name) + .ok_or_else(|| format!("schema not found: {schema_name}"))?; + + map.extra.insert( + "x-rust-generic-parameter".to_owned(), Value::String(generic.into()), - )?; + ); } Ok(self) } @@ -131,12 +36,16 @@ impl<'a> VendorAttributes<'a> { &mut self, schemas: [&str; N], ) -> Result<&mut Self, String> { - for schema in schemas { - self.set_attr( - VendorLocation::Schema(schema), - "x-rust-builder", - Value::Bool(true), - )?; + for schema_name in schemas { + let map = self + .doc + .components + .schemas + .get_mut(schema_name) + .ok_or_else(|| format!("schema not found: {schema_name}"))?; + + map.extra + .insert("x-rust-builder".to_owned(), Value::Bool(true)); } Ok(self) } @@ -147,11 +56,32 @@ impl<'a> VendorAttributes<'a> { overrides: [(&str, &str); N], ) -> Result<&mut Self, String> { for (field, rust_type) in overrides { - self.set_attr( - VendorLocation::SchemaField { schema, field }, - "x-rust-type", - Value::String(rust_type.into()), - )?; + let props_map = self + .doc + .components + .schemas + .get_mut(schema) + .ok_or_else(|| format!("schema not found: {schema}"))? + .properties + .as_mut() + .ok_or_else(|| format!("No properties in schema: {schema}"))?; + match props_map.get_mut(field) { + Some(existing_val) => { + existing_val + .extra + .insert("x-rust-type".to_owned(), Value::String(rust_type.into())); + } + None => { + let new_field_map = OpenAPIProperty { + extra: IndexMap::from([( + "x-rust-type".to_owned(), + Value::String(rust_type.into()), + )]), + ..Default::default() + }; + props_map.insert(field.to_owned(), new_field_map); + } + } } Ok(self) } @@ -161,7 +91,6 @@ impl<'a> VendorAttributes<'a> { vendor: self, path, method, - error: None, } } } @@ -170,83 +99,68 @@ pub struct OperationContext<'a, 'b> { vendor: &'b mut VendorAttributes<'a>, path: &'b str, method: &'b str, - error: Option, } impl<'a, 'b> OperationContext<'a, 'b> { - fn try_set(&mut self, attr: &str, val: Value) { - if self.error.is_some() { - return; - } - if let Err(e) = self.vendor.set_attr( - VendorLocation::Operation { - path: self.path, - method: self.method, - }, - attr, - val, - ) { - self.error = Some(e); - } + fn try_set(&mut self, attr: &str, val: Value) -> Result<&mut Self, String> { + let method = self + .vendor + .doc + .paths + .get_mut(self.path) + .ok_or_else(|| format!("operation path not found: {}", self.path))? + .get_mut(self.method) + .ok_or_else(|| format!("operation method not found: {}.{}", self.path, self.method))?; + method.extra.insert(attr.to_owned(), val); + Ok(self) } - fn try_set_field(&mut self, field: &str, attr: &str, val: Value) { - if self.error.is_some() { - return; - } - if let Err(e) = self.vendor.set_attr( - VendorLocation::OperationField { - path: self.path, - method: self.method, - field, - }, - attr, - val, - ) { - self.error = Some(e); - } + fn try_set_request_body(&mut self, attr: &str, val: Value) -> Result<&mut Self, String> { + let req = &mut self + .vendor + .doc + .paths + .get_mut(self.path) + .ok_or_else(|| format!("operation path not found: {}", self.path))? + .get_mut(self.method) + .ok_or_else(|| format!("operation method not found: {}.{}", self.path, self.method))? + .request_body; + let req = match req { + Some(v) => v, + None => { + *req = Some(Default::default()); + req.as_mut().unwrap() + } + }; + req.extra.insert(attr.to_owned(), val); + Ok(self) } - pub fn generic_parameter(mut self, generic: &str) -> Self { - self.try_set("x-rust-generic-parameter", Value::String(generic.into())); - self + pub fn generic_parameter(&mut self, generic: &str) -> Result<&mut Self, String> { + self.try_set("x-rust-generic-parameter", Value::String(generic.into())) } - pub fn params_generic_parameter(mut self, generic: &str) -> Self { - self.try_set_field( - "requestBody", + pub fn params_generic_parameter(&mut self, generic: &str) -> Result<&mut Self, String> { + self.try_set_request_body( "x-rust-params-generic-parameter", Value::String(generic.into()), - ); - self + ) } - pub fn return_type(mut self, typ: &str) -> Self { - self.try_set("x-rust-return-type", Value::String(typ.into())); - self + pub fn return_type(&mut self, typ: &str) -> Result<&mut Self, String> { + self.try_set("x-rust-return-type", Value::String(typ.into())) } - pub fn request_type(mut self, typ: &str) -> Self { - self.try_set_field("requestBody", "x-rust-type", Value::String(typ.into())); - self + pub fn request_type(&mut self, typ: &str) -> Result<&mut Self, String> { + self.try_set_request_body("x-rust-type", Value::String(typ.into())) } - pub fn body_is_raw_text(mut self) -> Self { - self.try_set("x-rust-body-is-raw-text", Value::Bool(true)); - self + pub fn body_is_raw_text(&mut self) -> Result<&mut Self, String> { + self.try_set("x-rust-body-is-raw-text", Value::Bool(true)) } /// Indicate that the response supports plain text besides JSON - pub fn supports_plain_text(mut self) -> Self { - self.try_set("x-supports-plain-text", Value::Bool(true)); - self - } - - /// Return to VendorAttributes if no errors, or propagate the first error - pub fn done(self) -> Result<&'b mut VendorAttributes<'a>, String> { - match self.error { - Some(err) => Err(err), - None => Ok(self.vendor), - } + pub fn supports_plain_text(&mut self) -> Result<&mut Self, String> { + self.try_set("x-supports-plain-text", Value::Bool(true)) } }