Skip to content

List Available Models

client.Models.List(ctx, query) (*ModelListResponse, error)
get/v2/gen-ai/models

To list all models, send a GET request to /v2/gen-ai/models.

ParametersExpand Collapse
query ModelListParams
Page param.Field[int64]optional

Page number.

PerPage param.Field[int64]optional

Items per page.

PublicOnly param.Field[bool]optional

Only include models that are publicly available.

Usecases param.Field[[]ModelListParamsUsecase]optional

Include only models defined for the listed usecases.

  • MODEL_USECASE_UNKNOWN: The use case of the model is unknown
  • MODEL_USECASE_AGENT: The model maybe used in an agent
  • MODEL_USECASE_FINETUNED: The model maybe used for fine tuning
  • MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases (embedding models)
  • MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails
  • MODEL_USECASE_REASONING: The model usecase for reasoning
  • MODEL_USECASE_SERVERLESS: The model usecase for serverless inference
const ModelListParamsUsecaseModelUsecaseUnknown ModelListParamsUsecase = "MODEL_USECASE_UNKNOWN"
const ModelListParamsUsecaseModelUsecaseAgent ModelListParamsUsecase = "MODEL_USECASE_AGENT"
const ModelListParamsUsecaseModelUsecaseFinetuned ModelListParamsUsecase = "MODEL_USECASE_FINETUNED"
const ModelListParamsUsecaseModelUsecaseKnowledgebase ModelListParamsUsecase = "MODEL_USECASE_KNOWLEDGEBASE"
const ModelListParamsUsecaseModelUsecaseGuardrail ModelListParamsUsecase = "MODEL_USECASE_GUARDRAIL"
const ModelListParamsUsecaseModelUsecaseReasoning ModelListParamsUsecase = "MODEL_USECASE_REASONING"
const ModelListParamsUsecaseModelUsecaseServerless ModelListParamsUsecase = "MODEL_USECASE_SERVERLESS"
ReturnsExpand Collapse
type ModelListResponse struct{…}

A list of models

Meta APIMetaoptional

Meta information about the data set

Page int64optional

The current page

formatint64
Pages int64optional

Total number of pages

formatint64
Total int64optional

Total amount of items over all pages

formatint64
Models []APIModeloptional

The models

ID stringoptional

Human-readable model identifier

Agreement APIAgreementoptional

Agreement Description

Description stringoptional
Name stringoptional
URL stringoptional
Uuid stringoptional
CreatedAt Timeoptional

Creation date / time

formatdate-time
IsFoundational booloptional

True if it is a foundational model provided by do

KBDefaultChunkSize int64optional

Default chunking size limit to show in UI

formatint64
KBMaxChunkSize int64optional

Maximum chunk size limit of model

formatint64
KBMinChunkSize int64optional

Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase

formatint64
Name stringoptional

Display name of the model

ParentUuid stringoptional

Unique id of the model, this model is based on

UpdatedAt Timeoptional

Last modified

formatdate-time
UploadComplete booloptional

Model has been fully uploaded

URL stringoptional

Download url

Uuid stringoptional

Unique id

Version APIModelVersionoptional

Version Information about a Model

Major int64optional

Major version number

formatint64
Minor int64optional

Minor version number

formatint64
Patch int64optional

Patch version number

formatint64
List Available Models
package main

import (
  "context"
  "fmt"

  "github.com/stainless-sdks/-go"
  "github.com/stainless-sdks/-go/option"
)

func main() {
  client := gradient.NewClient(
    option.WithAccessToken("My Access Token"),
  )
  models, err := client.Models.List(context.TODO(), gradient.ModelListParams{

  })
  if err != nil {
    panic(err.Error())
  }
  fmt.Printf("%+v\n", models.Links)
}
{
  "links": {
    "pages": {
      "first": "example string",
      "last": "example string",
      "next": "example string",
      "previous": "example string"
    }
  },
  "meta": {
    "page": 123,
    "pages": 123,
    "total": 123
  },
  "models": [
    {
      "id": "llama3.3-70b-instruct",
      "agreement": {
        "description": "example string",
        "name": "example name",
        "url": "example string",
        "uuid": "123e4567-e89b-12d3-a456-426614174000"
      },
      "created_at": "2021-01-01T00:00:00Z",
      "is_foundational": true,
      "kb_default_chunk_size": 123,
      "kb_max_chunk_size": 123,
      "kb_min_chunk_size": 123,
      "name": "Llama 3.3 Instruct (70B)",
      "parent_uuid": "\"12345678-1234-1234-1234-123456789012\"",
      "updated_at": "2021-01-01T00:00:00Z",
      "upload_complete": true,
      "url": "https://example.com/model.zip",
      "uuid": "\"12345678-1234-1234-1234-123456789012\"",
      "version": {
        "major": 123,
        "minor": 123,
        "patch": 123
      }
    }
  ]
}
Returns Examples
{
  "links": {
    "pages": {
      "first": "example string",
      "last": "example string",
      "next": "example string",
      "previous": "example string"
    }
  },
  "meta": {
    "page": 123,
    "pages": 123,
    "total": 123
  },
  "models": [
    {
      "id": "llama3.3-70b-instruct",
      "agreement": {
        "description": "example string",
        "name": "example name",
        "url": "example string",
        "uuid": "123e4567-e89b-12d3-a456-426614174000"
      },
      "created_at": "2021-01-01T00:00:00Z",
      "is_foundational": true,
      "kb_default_chunk_size": 123,
      "kb_max_chunk_size": 123,
      "kb_min_chunk_size": 123,
      "name": "Llama 3.3 Instruct (70B)",
      "parent_uuid": "\"12345678-1234-1234-1234-123456789012\"",
      "updated_at": "2021-01-01T00:00:00Z",
      "upload_complete": true,
      "url": "https://example.com/model.zip",
      "uuid": "\"12345678-1234-1234-1234-123456789012\"",
      "version": {
        "major": 123,
        "minor": 123,
        "patch": 123
      }
    }
  ]
}