Class: Ollama
This class both implements the LLM and Embedding interfaces.
Extends
Extended by
Implements
LLM
Omit
<OllamaBase
,"chat"
>
Constructors
new Ollama()
new Ollama(
params
):Ollama
Parameters
• params: OllamaParams
Returns
Overrides
Source
packages/llamaindex/src/llm/ollama.ts:75
Properties
embedBatchSize
embedBatchSize:
number
=DEFAULT_EMBED_BATCH_SIZE
Inherited from
BaseEmbedding
. embedBatchSize
Source
packages/llamaindex/src/embeddings/types.ts:21
embedInfo?
optional
embedInfo:EmbeddingInfo
Inherited from
Source
packages/llamaindex/src/embeddings/types.ts:22
hasStreaming
readonly
hasStreaming:true
=true
Source
packages/llamaindex/src/llm/ollama.ts:61
model
model:
string
Source
packages/llamaindex/src/llm/ollama.ts:66
ollama
ollama:
Ollama
Source
packages/llamaindex/src/llm/ollama.ts:63
options
options:
Partial
<Omit
<Options
,"temperature"
|"top_p"
|"num_ctx"
>> &Pick
<Options
,"temperature"
|"top_p"
|"num_ctx"
>
Source
packages/llamaindex/src/llm/ollama.ts:68
Accessors
metadata
get
metadata():LLMMetadata
Returns
Source
packages/llamaindex/src/llm/ollama.ts:87
Methods
abort()
abort():
void
Returns
void
Implementation of
Omit.abort
Source
packages/llamaindex/src/llm/ollama.ts:205
chat()
chat(params)
chat(
params
):Promise
<AsyncIterable
<ChatResponseChunk
>>
Get a chat response from the LLM
Parameters
• params: LLMChatParamsStreaming
<object
, object
>
Returns
Promise
<AsyncIterable
<ChatResponseChunk
>>
Implementation of
Source
packages/llamaindex/src/llm/ollama.ts:99
chat(params)
chat(
params
):Promise
<ChatResponse
<object
>>
Parameters
• params: LLMChatParamsNonStreaming
<object
, object
>
Returns
Promise
<ChatResponse
<object
>>
Implementation of
Source
packages/llamaindex/src/llm/ollama.ts:102
complete()
complete(params)
complete(
params
):Promise
<AsyncIterable
<CompletionResponse
>>
Get a prompt completion from the LLM
Parameters
• params: LLMCompletionParamsStreaming
Returns
Promise
<AsyncIterable
<CompletionResponse
>>
Implementation of
Source
packages/llamaindex/src/llm/ollama.ts:140
complete(params)
complete(
params
):Promise
<CompletionResponse
>
Parameters
• params: LLMCompletionParamsNonStreaming
Returns
Promise
<CompletionResponse
>
Implementation of
Source
packages/llamaindex/src/llm/ollama.ts:143
copy()
copy(
request
):Promise
<StatusResponse
>
Parameters
• request: CopyRequest
Returns
Promise
<StatusResponse
>
Implementation of
Omit.copy
Source
packages/llamaindex/src/llm/ollama.ts:241
create()
create(request)
create(
request
):Promise
<AsyncGenerator
<ProgressResponse
,any
,unknown
>>
Parameters
• request: CreateRequest
& object
Returns
Promise
<AsyncGenerator
<ProgressResponse
, any
, unknown
>>
Implementation of
Omit.create
Source
packages/llamaindex/src/llm/ollama.ts:220
create(request)
create(
request
):Promise
<ProgressResponse
>
Parameters
• request: CreateRequest
& object
Returns
Promise
<ProgressResponse
>
Implementation of
Omit.create
Source
packages/llamaindex/src/llm/ollama.ts:223
delete()
delete(
request
):Promise
<StatusResponse
>
Parameters
• request: DeleteRequest
Returns
Promise
<StatusResponse
>
Implementation of
Omit.delete
Source
packages/llamaindex/src/llm/ollama.ts:238
embeddings()
embeddings(
request
):Promise
<EmbeddingsResponse
>
Parameters
• request: EmbeddingsRequest
Returns
Promise
<EmbeddingsResponse
>
Implementation of
Omit.embeddings
Source
packages/llamaindex/src/llm/ollama.ts:250
encodeImage()
encodeImage(
image
):Promise
<string
>
Parameters
• image: string
| Uint8Array
Returns
Promise
<string
>
Implementation of
Omit.encodeImage
Source
packages/llamaindex/src/llm/ollama.ts:208
generate()
generate(request)
generate(
request
):Promise
<AsyncGenerator
<GenerateResponse
,any
,unknown
>>
Parameters
• request: GenerateRequest
& object
Returns
Promise
<AsyncGenerator
<GenerateResponse
, any
, unknown
>>
Implementation of
Omit.generate
Source
packages/llamaindex/src/llm/ollama.ts:211
generate(request)
generate(
request
):Promise
<GenerateResponse
>
Parameters
• request: GenerateRequest
& object
Returns
Promise
<GenerateResponse
>
Implementation of
Omit.generate
Source
packages/llamaindex/src/llm/ollama.ts:214
getEmbedding()
private
getEmbedding(prompt
):Promise
<number
[]>
Parameters
• prompt: string
Returns
Promise
<number
[]>
Source
packages/llamaindex/src/llm/ollama.ts:176
getQueryEmbedding()
getQueryEmbedding(
query
):Promise
<null
|number
[]>
Parameters
• query: MessageContentDetail
Returns
Promise
<null
| number
[]>
Inherited from
BaseEmbedding
. getQueryEmbedding
Source
packages/llamaindex/src/embeddings/types.ts:34
getTextEmbedding()
getTextEmbedding(
text
):Promise
<number
[]>
Parameters
• text: string
Returns
Promise
<number
[]>
Overrides
BaseEmbedding
. getTextEmbedding
Source
packages/llamaindex/src/llm/ollama.ts:190
getTextEmbeddings()
getTextEmbeddings(
texts
):Promise
<number
[][]>
Optionally override this method to retrieve multiple embeddings in a single request
Parameters
• texts: string
[]
Returns
Promise
<number
[][]>
Inherited from
BaseEmbedding
. getTextEmbeddings
Source
packages/llamaindex/src/embeddings/types.ts:48
getTextEmbeddingsBatch()
getTextEmbeddingsBatch(
texts
,options
?):Promise
<number
[][]>
Get embeddings for a batch of texts
Parameters
• texts: string
[]
• options?
• options.logProgress?: boolean
Returns
Promise
<number
[][]>
Inherited from
BaseEmbedding
. getTextEmbeddingsBatch
Source
packages/llamaindex/src/embeddings/types.ts:64
list()
list():
Promise
<ListResponse
>
Returns
Promise
<ListResponse
>
Implementation of
Omit.list
Source
packages/llamaindex/src/llm/ollama.ts:244
pull()
pull(request)
pull(
request
):Promise
<AsyncGenerator
<ProgressResponse
,any
,unknown
>>
Parameters
• request: PullRequest
& object
Returns
Promise
<AsyncGenerator
<ProgressResponse
, any
, unknown
>>
Implementation of
Omit.pull
Source
packages/llamaindex/src/llm/ollama.ts:229
pull(request)
pull(
request
):Promise
<ProgressResponse
>
Parameters
• request: PullRequest
& object
Returns
Promise
<ProgressResponse
>