Python Integration
Ovexa is fully compatible with the OpenAI Python SDK. You can also use httpx, requests, or LangChain.
OpenAI SDK
The simplest approach. Install the SDK and change two lines:
pip install openai
Basic Usage
from openai import OpenAI
client = OpenAI(
base_url="https://api.ovexa.ai/v1",
api_key="vpx_live_YOUR_API_KEY",
)
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "What is the capital of Poland?"},
],
temperature=0.7,
max_tokens=500,
)
print(response.choices[0].message.content)
Async Usage
import asyncio
from openai import AsyncOpenAI
client = AsyncOpenAI(
base_url="https://api.ovexa.ai/v1",
api_key="vpx_live_YOUR_API_KEY",
)
async def main():
response = await client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "user", "content": "Explain RODO in simple terms."},
],
)
print(response.choices[0].message.content)
asyncio.run(main())
Streaming
from openai import OpenAI
client = OpenAI(
base_url="https://api.ovexa.ai/v1",
api_key="vpx_live_YOUR_API_KEY",
)
stream = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Write a short poem about Krakow."}],
stream=True,
)
for chunk in stream:
if chunk.choices[0].delta.content is not None:
print(chunk.choices[0].delta.content, end="")
Using Different Providers
# Anthropic
response = client.chat.completions.create(
model="anthropic/claude-4.6-sonnet",
messages=[{"role": "user", "content": "Hello from Claude!"}],
)
# Google
response = client.chat.completions.create(
model="google/gemini-2.5-pro",
messages=[{"role": "user", "content": "Hello from Gemini!"}],
)
# Mistral
response = client.chat.completions.create(
model="mistral/mistral-large-3",
messages=[{"role": "user", "content": "Opowiedz mi o historii Gdańska."}],
)
httpx
For full control over HTTP requests:
import httpx
OVEXA_URL = "https://api.ovexa.ai/v1/chat/completions"
API_KEY = "vpx_live_YOUR_API_KEY"
response = httpx.post(
OVEXA_URL,
headers={
"Authorization": f"Bearer {API_KEY}",
"Content-Type": "application/json",
},
json={
"model": "gpt-4o",
"messages": [
{"role": "user", "content": "Summarize: Jan Kowalski, PESEL 02271409862"}
],
},
timeout=60.0,
)
data = response.json()
print(data["choices"][0]["message"]["content"])
Async with httpx
import httpx
import asyncio
async def query_ovexa(prompt: str) -> str:
async with httpx.AsyncClient() as client:
response = await client.post(
"https://api.ovexa.ai/v1/chat/completions",
headers={
"Authorization": "Bearer vpx_live_YOUR_API_KEY",
"Content-Type": "application/json",
},
json={
"model": "gpt-4o",
"messages": [{"role": "user", "content": prompt}],
},
timeout=60.0,
)
response.raise_for_status()
return response.json()["choices"][0]["message"]["content"]
result = asyncio.run(query_ovexa("What is Ovexa?"))
print(result)
LangChain
Ovexa works with LangChain's ChatOpenAI class:
pip install langchain-openai
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(
base_url="https://api.ovexa.ai/v1",
api_key="vpx_live_YOUR_API_KEY",
model="gpt-4o",
temperature=0.7,
)
response = llm.invoke("What are the main features of Ovexa?")
print(response.content)
LangChain with Chains
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
llm = ChatOpenAI(
base_url="https://api.ovexa.ai/v1",
api_key="vpx_live_YOUR_API_KEY",
model="gpt-4o",
)
prompt = ChatPromptTemplate.from_messages([
("system", "You are a legal assistant specializing in Polish law."),
("human", "{question}"),
])
chain = prompt | llm
response = chain.invoke({"question": "What is RODO and how does it affect businesses?"})
print(response.content)
Error Handling
from openai import OpenAI, APIError, AuthenticationError, RateLimitError
client = OpenAI(
base_url="https://api.ovexa.ai/v1",
api_key="vpx_live_YOUR_API_KEY",
)
try:
response = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Hello"}],
)
print(response.choices[0].message.content)
except AuthenticationError:
print("Invalid API key. Check your Ovexa API key.")
except RateLimitError:
print("Rate limit exceeded. Wait and retry.")
except APIError as e:
print(f"API error: {e.status_code} - {e.message}")
Inspecting PII Anonymization
Use the X-Show-Raw-Prompt header to see what Ovexa sends to the AI provider. The anonymization details are returned in the response body anonymization field.
OpenAI SDK
from openai import OpenAI
client = OpenAI(
base_url="https://api.ovexa.ai/v1",
api_key="vpx_live_YOUR_API_KEY",
)
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "user", "content": "Contact Jan Kowalski at [email protected], PESEL 02271409862"}
],
extra_headers={"X-Show-Raw-Prompt": "true"},
)
# De-anonymized response (original names/data restored)
print("Response:", response.choices[0].message.content)
# Anonymization details (available because of X-Show-Raw-Prompt header)
anon = getattr(response, 'anonymization', None)
if anon:
print(f"PII entities found: {anon['entities_found']}")
print(f"Entity types: {anon['entity_types']}")
print(f"What AI saw: {anon['anonymized_prompt']}")
print(f"Raw AI response: {anon['raw_llm_response']}")
httpx (raw HTTP)
import httpx
response = httpx.post(
"https://api.ovexa.ai/v1/chat/completions",
headers={
"Authorization": "Bearer vpx_live_YOUR_API_KEY",
"Content-Type": "application/json",
"X-Show-Raw-Prompt": "true",
},
json={
"model": "gpt-4o",
"messages": [
{"role": "user", "content": "Contact Jan Kowalski at [email protected], PESEL 02271409862"}
],
},
)
data = response.json()
print("Response:", data["choices"][0]["message"]["content"])
# Anonymization info in response body
anon = data.get("anonymization")
if anon:
print(f"Entities found: {anon['entities_found']}")
print(f"Types: {anon['entity_types']}")
print(f"Anonymized prompt: {anon['anonymized_prompt']}")
PydanticAI
If you use PydanticAI with the OpenAI provider, create a subclass to capture the anonymization data:
from pydantic_ai.models.openai import OpenAIChatModel, OpenAIProvider
class AnonymizationAwareModel(OpenAIChatModel):
def _process_provider_details(self, response):
details = super()._process_provider_details(response) or {}
anon = getattr(response, 'anonymization', None)
if anon:
details['anonymization'] = anon
return details or None
provider = OpenAIProvider(
base_url="https://api.ovexa.ai/v1",
api_key="vpx_live_YOUR_API_KEY",
http_client=httpx.AsyncClient(headers={"X-Show-Raw-Prompt": "true"}),
)
model = AnonymizationAwareModel("gpt-4o", provider=provider)
agent = Agent(model, system_prompt="You are a helpful assistant.")
result = await agent.run("Summarize: Jan Kowalski, PESEL 85031501234")
print(result.data)
# Access anonymization info
anon = result.response.provider_details.get('anonymization')
if anon:
print(f"PII found: {anon['entities_found']}")