Skip to main content
This integration method is maintained but no longer actively developed. For the best experience and latest features, use our new AI Gateway with unified API access to 100+ models.
This integration is used to log usage with the Nvidia NIM API. For other Nvidia inference providers that are OpenAI-compatible, such as Dynamo, see here.
1
2
HELICONE_API_KEY=<your-helicone-api-key>
NVIDIA_API_KEY=<your-nvidia-api-key>
3
OpenAI SDK
from openai import OpenAI
from dotenv import load_dotenv
import os

load_dotenv()

helicone_api_key = os.getenv("HELICONE_API_KEY")
nvidia_api_key = os.getenv("NVIDIA_API_KEY")

client = OpenAI(
  api_key=nvidia_api_key,
  base_url="https://nvidia.helicone.ai/v1",
  default_headers={
    "Helicone-Auth": f"Bearer {helicone_api_key}"
  }
)

chat_completion = client.chat.completions.create(
  model="nvidia/llama-3.1-nemotron-70b-instruct",
  messages=[{"role": "user", "content": "Hello, how are you?"}],
  max_tokens=1024,
  temperature=0.7
)

print(chat_completion)
4