Python Integration
VaultProxy is fully compatible with the OpenAI Python SDK. You can also use httpx, requests, or LangChain.
OpenAI SDK
The simplest approach. Install the SDK and change two lines:
pip install openai
Basic Usage
from openai import OpenAI
client = OpenAI(
base_url="https://api.vaultproxy.ai/v1",
api_key="vpx_live_YOUR_API_KEY",
)
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "What is the capital of Poland?"},
],
temperature=0.7,
max_tokens=500,
)
print(response.choices[0].message.content)
Async Usage
import asyncio
from openai import AsyncOpenAI
client = AsyncOpenAI(
base_url="https://api.vaultproxy.ai/v1",
api_key="vpx_live_YOUR_API_KEY",
)
async def main():
response = await client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "user", "content": "Explain RODO in simple terms."},
],
)
print(response.choices[0].message.content)
asyncio.run(main())
Streaming
from openai import OpenAI
client = OpenAI(
base_url="https://api.vaultproxy.ai/v1",
api_key="vpx_live_YOUR_API_KEY",
)
stream = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Write a short poem about Krakow."}],
stream=True,
)
for chunk in stream:
if chunk.choices[0].delta.content is not None:
print(chunk.choices[0].delta.content, end="")
Using Different Providers
# Anthropic
response = client.chat.completions.create(
model="anthropic/claude-4.6-sonnet",
messages=[{"role": "user", "content": "Hello from Claude!"}],
)
# Google
response = client.chat.completions.create(
model="google/gemini-2.5-pro",
messages=[{"role": "user", "content": "Hello from Gemini!"}],
)
# Bielik (Polish model)
response = client.chat.completions.create(
model="bielik/bielik-13b-pro",
messages=[{"role": "user", "content": "Opowiedz mi o historii Gdańska."}],
)
httpx
For full control over HTTP requests:
import httpx
VAULTPROXY_URL = "https://api.vaultproxy.ai/v1/chat/completions"
API_KEY = "vpx_live_YOUR_API_KEY"
response = httpx.post(
VAULTPROXY_URL,
headers={
"Authorization": f"Bearer {API_KEY}",
"Content-Type": "application/json",
},
json={
"model": "gpt-4o",
"messages": [
{"role": "user", "content": "Summarize: Jan Kowalski, PESEL 02271409862"}
],
},
timeout=60.0,
)
data = response.json()
print(data["choices"][0]["message"]["content"])
Async with httpx
import httpx
import asyncio
async def query_vaultproxy(prompt: str) -> str:
async with httpx.AsyncClient() as client:
response = await client.post(
"https://api.vaultproxy.ai/v1/chat/completions",
headers={
"Authorization": "Bearer vpx_live_YOUR_API_KEY",
"Content-Type": "application/json",
},
json={
"model": "gpt-4o",
"messages": [{"role": "user", "content": prompt}],
},
timeout=60.0,
)
response.raise_for_status()
return response.json()["choices"][0]["message"]["content"]
result = asyncio.run(query_vaultproxy("What is VaultProxy?"))
print(result)
LangChain
VaultProxy works with LangChain's ChatOpenAI class:
pip install langchain-openai
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(
base_url="https://api.vaultproxy.ai/v1",
api_key="vpx_live_YOUR_API_KEY",
model="gpt-4o",
temperature=0.7,
)
response = llm.invoke("What are the main features of VaultProxy?")
print(response.content)
LangChain with Chains
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
llm = ChatOpenAI(
base_url="https://api.vaultproxy.ai/v1",
api_key="vpx_live_YOUR_API_KEY",
model="gpt-4o",
)
prompt = ChatPromptTemplate.from_messages([
("system", "You are a legal assistant specializing in Polish law."),
("human", "{question}"),
])
chain = prompt | llm
response = chain.invoke({"question": "What is RODO and how does it affect businesses?"})
print(response.content)
Error Handling
from openai import OpenAI, APIError, AuthenticationError, RateLimitError
client = OpenAI(
base_url="https://api.vaultproxy.ai/v1",
api_key="vpx_live_YOUR_API_KEY",
)
try:
response = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Hello"}],
)
print(response.choices[0].message.content)
except AuthenticationError:
print("Invalid API key. Check your VaultProxy API key.")
except RateLimitError:
print("Rate limit exceeded. Wait and retry.")
except APIError as e:
print(f"API error: {e.status_code} - {e.message}")
Debugging PII Detection
Use the X-Show-Raw-Prompt header to inspect what VaultProxy sends to the AI provider:
import httpx
response = httpx.post(
"https://api.vaultproxy.ai/v1/chat/completions",
headers={
"Authorization": "Bearer vpx_live_YOUR_API_KEY",
"Content-Type": "application/json",
"X-Show-Raw-Prompt": "true",
},
json={
"model": "gpt-4o",
"messages": [
{"role": "user", "content": "Contact Jan Kowalski at [email protected], PESEL 02271409862"}
],
},
)
data = response.json()
print("Response:", data["choices"][0]["message"]["content"])
print("Sanitized prompt:", data.get("_raw_prompt"))