ai

package
v1.0.10 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Sep 24, 2025 License: MulanPSL-2.0 Imports: 11 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

func NewOllamaProvider

func NewOllamaProvider(log *core.AppLog, baseURL string) core.AIProvider

NewOllamaProvider creates an OllamaProvider instance.

Types

type ChatCompletion

type ChatCompletion struct {
	ID      string   `json:"id"`
	Object  string   `json:"object"`
	Created int64    `json:"created"`
	Model   string   `json:"model"`
	Choices []Choice `json:"choices"`
	Usage   Usage    `json:"usage"`
}

ChatCompletion mirrors the structure of a non-streaming OpenAI chat completion response.

type ChatCompletionChunk

type ChatCompletionChunk struct {
	ID      string        `json:"id"`
	Object  string        `json:"object"`
	Created int64         `json:"created"`
	Model   string        `json:"model"`
	Choices []ChunkChoice `json:"choices"`
}

ChatCompletionChunk mirrors the structure of a streaming OpenAI chat completion chunk.

type ChatCompletionRequest

type ChatCompletionRequest struct {
	Model    string        `json:"model"`
	Messages []ChatMessage `json:"messages"`
	Stream   bool          `json:"stream,omitempty"`
}

ChatCompletionRequest mirrors the structure of an OpenAI chat completion request.

type ChatMessage

type ChatMessage struct {
	Role    string `json:"role"`
	Content string `json:"content"`
}

ChatMessage mirrors the structure of a message in an OpenAI request.

type ChatRequest

type ChatRequest struct {
	Model     string    `json:"model"`
	Messages  []Message `json:"messages"` // Now uses ai.Message
	KeepAlive *int      `json:"keep_alive,omitempty"`
	Stream    bool      `json:"stream,omitempty"` // Add stream option for chat
}

ChatRequest for POST /api/chat

type ChatResponse

type ChatResponse struct {
	Model      string  `json:"model"`
	CreatedAt  string  `json:"created_at"`
	Message    Message `json:"message"` // Now uses ai.Message
	DoneReason string  `json:"done_reason"`
	Done       bool    `json:"done"`
}

ChatResponse for POST /api/chat (non-streaming)

type Choice

type Choice struct {
	Index        int         `json:"index"`
	Message      ChatMessage `json:"message"`
	FinishReason string      `json:"finish_reason"`
}

Choice mirrors the structure of a choice in an OpenAI response.

type ChunkChoice

type ChunkChoice struct {
	Index        int    `json:"index"`
	Delta        Delta  `json:"delta"`
	FinishReason string `json:"finish_reason,omitempty"`
}

ChunkChoice mirrors the structure of a choice in a streaming chunk.

type CopyRequest

type CopyRequest struct {
	Source      string `json:"source"`
	Destination string `json:"destination"`
}

CopyRequest for POST /api/copy

type CreateRequest

type CreateRequest struct {
	Model string            `json:"model"`
	Files map[string]string `json:"files"` // Map of filename to SHA256 digest
}

CreateRequest for POST /api/create

type CreateResponse

type CreateResponse struct {
	Status string `json:"status"`
}

CreateResponse for streaming POST /api/create

type DeleteRequest

type DeleteRequest struct {
	Model string `json:"model"`
}

DeleteRequest for DELETE /api/delete

type Delta

type Delta struct {
	Role    string `json:"role,omitempty"`
	Content string `json:"content,omitempty"`
}

Delta mirrors the structure of the delta field in a streaming choice.

type EmbedRequest

type EmbedRequest struct {
	Model string   `json:"model"`
	Input []string `json:"input"`
}

EmbedRequest for POST /api/embed (multiple inputs)

type EmbedResponse

type EmbedResponse struct {
	Model      string      `json:"model"`
	Embeddings [][]float64 `json:"embeddings"`
}

EmbedResponse for POST /api/embed

type ErrorDetail

type ErrorDetail struct {
	Message string `json:"message"`
	Type    string `json:"type"`
	Code    string `json:"code,omitempty"`
}

ErrorDetail provides details about the error.

type ErrorResponse

type ErrorResponse struct {
	Error ErrorDetail `json:"error"`
}

ErrorResponse mirrors the structure of an OpenAI error response.

type GenerateRequest

type GenerateRequest struct {
	Model     string `json:"model"`
	KeepAlive *int   `json:"keep_alive,omitempty"` // Use pointer for omitempty on zero value
	Prompt    string `json:"prompt,omitempty"`     // For actual generation, not just unload
}

GenerateRequest for POST /api/generate

type GenerateResponse

type GenerateResponse struct {
	Model      string `json:"model"`
	CreatedAt  string `json:"created_at"`
	Response   string `json:"response"`
	Done       bool   `json:"done"`
	DoneReason string `json:"done_reason"`
}

GenerateResponse for POST /api/generate

type ListModelsResponse

type ListModelsResponse struct {
	Models []Model `json:"models"`
}

ListModelsResponse for GET /api/tags

type ListRunningModelsResponse

type ListRunningModelsResponse struct {
	Models []RunningModel `json:"models"`
}

ListRunningModelsResponse for POST /api/ps

type LogEntry

type LogEntry struct {
	Timestamp time.Time `json:"timestamp"`
	Level     string    `json:"level"`
	Message   string    `json:"message"`
}

LogEntry represents a single log message sent to the frontend.

type Message

type Message struct {
	Role    string `json:"role"`
	Content string `json:"content"`
}

Message represents a message in a conversation.

type Model

type Model struct {
	Name       string       `json:"name"`
	ModifiedAt string       `json:"modified_at"`
	Size       int64        `json:"size"`
	Digest     string       `json:"digest"`
	Details    ModelDetails `json:"details"`
}

Model for GET /api/tags

type ModelDetails

type ModelDetails struct {
	Format            string   `json:"format"`
	Family            string   `json:"family"`
	Families          []string `json:"families"`
	ParameterSize     string   `json:"parameter_size"`
	QuantizationLevel string   `json:"quantization_level"`
}

ModelDetails represents the details of a model.

type OllamaClient

type OllamaClient struct {
	// contains filtered or unexported fields
}

OllamaClient encapsulates the Ollama API interactions.

func NewOllamaClient

func NewOllamaClient(log *core.AppLog, baseURL string) *OllamaClient

NewOllamaClient creates a new OllamaClient instance with a configurable base URL.

func (*OllamaClient) Chat

func (oc *OllamaClient) Chat(req ChatRequest, stream bool) (*ChatResponse, *http.Response, error)

Chat sends a request to the /api/chat endpoint. If stream is true, it returns a raw http.Response for streaming. If stream is false, it returns a parsed ChatResponse.

func (*OllamaClient) CopyModel

func (oc *OllamaClient) CopyModel(req CopyRequest) error

CopyModel sends a request to the /api/copy endpoint.

func (*OllamaClient) CreateModel

func (oc *OllamaClient) CreateModel(req CreateRequest) (*http.Response, error)

CreateModel sends a request to the /api/create endpoint. This is a streaming API, so it returns a raw http.Response.

func (*OllamaClient) DeleteModel

func (oc *OllamaClient) DeleteModel(req DeleteRequest) error

DeleteModel sends a request to the /api/delete endpoint.

func (*OllamaClient) GenerateCompletion

func (oc *OllamaClient) GenerateCompletion(req GenerateRequest) (*GenerateResponse, error)

GenerateCompletion sends a request to the /api/generate endpoint. It can be used for text generation or to unload a model.

func (*OllamaClient) GenerateEmbeddings

func (oc *OllamaClient) GenerateEmbeddings(req EmbedRequest) (*EmbedResponse, error)

GenerateEmbeddings sends a request to the /api/embed endpoint for multiple inputs.

func (*OllamaClient) GenerateSingleEmbedding

func (oc *OllamaClient) GenerateSingleEmbedding(req SingleEmbedRequest) (*SingleEmbedResponse, error)

GenerateSingleEmbedding sends a request to the /api/embeddings endpoint for a single input.

func (*OllamaClient) HttpClient

func (oc *OllamaClient) HttpClient() *core.HttpCli

HttpClient returns the underlying core.HttpCli instance.

func (*OllamaClient) ListModels

func (oc *OllamaClient) ListModels() (*ListModelsResponse, error)

ListModels sends a request to the /api/tags endpoint.

func (*OllamaClient) ListRunningModels

func (oc *OllamaClient) ListRunningModels() (*ListRunningModelsResponse, error)

ListRunningModels sends a request to the /api/ps endpoint.

func (*OllamaClient) PullModel

func (oc *OllamaClient) PullModel(req PullRequest) (*http.Response, error)

PullModel sends a request to the /api/pull endpoint. This is a streaming API, so it returns a raw http.Response.

func (*OllamaClient) ShowModelDetails

func (oc *OllamaClient) ShowModelDetails(req ShowRequest) (*ShowResponse, error)

ShowModelDetails sends a request to the /api/show endpoint.

type OllamaProvider

type OllamaProvider struct {
	// contains filtered or unexported fields
}

OllamaProvider implements core.AIProvider for Ollama.

func (*OllamaProvider) Chat

func (o *OllamaProvider) Chat(model string, messages []core.Message) (string, error)

Chat sends a chat message to Ollama.

func (*OllamaProvider) ChatStream

func (o *OllamaProvider) ChatStream(model string, messages []core.Message, callback func(string)) error

ChatStream sends a chat message to Ollama and streams the response.

func (*OllamaProvider) ListModels

func (o *OllamaProvider) ListModels() ([]string, error)

ListModels gets the list of models from Ollama.

func (*OllamaProvider) Validate

func (o *OllamaProvider) Validate() error

Validate validates the connection and configuration with Ollama.

type OpenAIAdapter

type OpenAIAdapter struct {
	// contains filtered or unexported fields
}

OpenAIAdapter holds the dependencies for the adapter service.

func NewOpenAIAdapter

func NewOpenAIAdapter(log *core.AppLog, ollamaClient *OllamaClient, ctx context.Context) *OpenAIAdapter

NewOpenAIAdapter creates a new adapter instance.

func (*OpenAIAdapter) ServeHTTP

func (a *OpenAIAdapter) ServeHTTP(w http.ResponseWriter, r *http.Request)

ServeHTTP is the main entry point for handling requests to /v1/chat/completions.

type PullRequest

type PullRequest struct {
	Model string `json:"model"`
}

PullRequest for POST /api/pull

type RunningModel

type RunningModel struct {
	Name      string       `json:"name"`
	Model     string       `json:"model"`
	Size      int64        `json:"size"`
	Digest    string       `json:"digest"`
	Details   ModelDetails `json:"details"`
	ExpiresAt string       `json:"expires_at"`
	SizeVRAM  int64        `json:"size_vram"`
}

RunningModel for POST /api/ps

type ShowRequest

type ShowRequest struct {
	Model string `json:"model"`
}

ShowRequest for POST /api/show

type ShowResponse

type ShowResponse struct {
	Modelfile  string                 `json:"modelfile"`
	Parameters string                 `json:"parameters"`
	Template   string                 `json:"template"`
	Details    ModelDetails           `json:"details"`
	ModelInfo  map[string]interface{} `json:"model_info"` // Can be complex, use map[string]interface{}
}

ShowResponse for POST /api/show

type SingleEmbedRequest

type SingleEmbedRequest struct {
	Model  string `json:"model"`
	Prompt string `json:"prompt"`
}

SingleEmbedRequest for POST /api/embeddings (single input)

type SingleEmbedResponse

type SingleEmbedResponse struct {
	Embedding []float64 `json:"embedding"`
}

SingleEmbedResponse for POST /api/embeddings

type Usage

type Usage struct {
	PromptTokens     int `json:"prompt_tokens"`
	CompletionTokens int `json:"completion_tokens"`
	TotalTokens      int `json:"total_tokens"`
}

Usage mirrors the structure of the usage field in an OpenAI response.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL