Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ try {
},
],
},
Provider.OpenAI
Provider.openai
); // Provider is optional

console.log('Response:', response.choices[0].message.content);
Expand Down Expand Up @@ -159,7 +159,7 @@ try {
onFinish: () => console.log('\nStream completed'),
onError: (error) => console.error('Stream error:', error),
},
Provider.Groq // Provider is optional
Provider.groq // Provider is optional
);
} catch (error) {
console.error('Error:', error);
Expand Down Expand Up @@ -241,7 +241,7 @@ const client = new InferenceGatewayClient({
});

try {
const response = await client.proxy(Provider.OpenAI, 'embeddings', {
const response = await client.proxy(Provider.openai, 'embeddings', {
method: 'POST',
body: JSON.stringify({
model: 'text-embedding-ada-002',
Expand Down
2 changes: 1 addition & 1 deletion Taskfile.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ tasks:
oas-download:
desc: Download OpenAPI specification
cmds:
- curl -o openapi.yaml https://github.com/raw/inference-gateway/inference-gateway/refs/heads/main/openapi.yaml
- curl -o openapi.yaml https://github.com/raw/inference-gateway/schemas/refs/heads/main/openapi.yaml

lint:
desc: Lint the SDK
Expand Down
46 changes: 39 additions & 7 deletions examples/.env.example
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@

# General settings
ENVIRONMENT=development
ENABLE_TELEMETRY=false
ENABLE_AUTH=false
ENVIRONMENT=production
ALLOWED_MODELS=
# Telemetry
TELEMETRY_ENABLE=false
TELEMETRY_METRICS_PORT=9464
# Model Context Protocol (MCP)
MCP_ENABLE=false
MCP_EXPOSE=false
Expand All @@ -13,10 +15,35 @@ MCP_TLS_HANDSHAKE_TIMEOUT=3s
MCP_RESPONSE_HEADER_TIMEOUT=3s
MCP_EXPECT_CONTINUE_TIMEOUT=1s
MCP_REQUEST_TIMEOUT=5s
# OpenID Connect
OIDC_ISSUER_URL=http://keycloak:8080/realms/inference-gateway-realm
OIDC_CLIENT_ID=inference-gateway-client
OIDC_CLIENT_SECRET=
MCP_MAX_RETRIES=3
MCP_RETRY_INTERVAL=5s
MCP_INITIAL_BACKOFF=1s
MCP_ENABLE_RECONNECT=true
MCP_RECONNECT_INTERVAL=30s
MCP_POLLING_ENABLE=true
MCP_POLLING_INTERVAL=30s
MCP_POLLING_TIMEOUT=5s
MCP_DISABLE_HEALTHCHECK_LOGS=true
# Agent-to-Agent (A2A) Protocol
A2A_ENABLE=false
A2A_EXPOSE=false
A2A_AGENTS=
A2A_CLIENT_TIMEOUT=30s
A2A_POLLING_ENABLE=true
A2A_POLLING_INTERVAL=1s
A2A_POLLING_TIMEOUT=30s
A2A_MAX_POLL_ATTEMPTS=30
A2A_MAX_RETRIES=3
A2A_RETRY_INTERVAL=5s
A2A_INITIAL_BACKOFF=1s
A2A_ENABLE_RECONNECT=true
A2A_RECONNECT_INTERVAL=30s
A2A_DISABLE_HEALTHCHECK_LOGS=true
# Authentication
AUTH_ENABLE=false
AUTH_OIDC_ISSUER=http://keycloak:8080/realms/inference-gateway-realm
AUTH_OIDC_CLIENT_ID=inference-gateway-client
AUTH_OIDC_CLIENT_SECRET=
# Server settings
SERVER_HOST=0.0.0.0
SERVER_PORT=8080
Expand All @@ -31,6 +58,9 @@ CLIENT_MAX_IDLE_CONNS=20
CLIENT_MAX_IDLE_CONNS_PER_HOST=20
CLIENT_IDLE_CONN_TIMEOUT=30s
CLIENT_TLS_MIN_VERSION=TLS12
CLIENT_DISABLE_COMPRESSION=true
CLIENT_RESPONSE_HEADER_TIMEOUT=10s
CLIENT_EXPECT_CONTINUE_TIMEOUT=1s
# Providers
ANTHROPIC_API_URL=https://api.anthropic.com/v1
ANTHROPIC_API_KEY=
Expand All @@ -46,3 +76,5 @@ OPENAI_API_URL=https://api.openai.com/v1
OPENAI_API_KEY=
DEEPSEEK_API_URL=https://api.deepseek.com
DEEPSEEK_API_KEY=
GOOGLE_API_URL=https://generativelanguage.googleapis.com/v1beta/openai
GOOGLE_API_KEY=
Loading