Loading...
Loading...
Official client libraries for Python and Node.js. Both SDKs provide type-safe interfaces, automatic retries, and streaming support.
Requires Python 3.8 or later.
pip install vexnodeimport vexnode
client = vexnode.Client(api_key="vx-your-api-key")
# Submit a compute job
job = client.compute.create(
model="meta-llama/Llama-3-70B",
gpu_type="A100-80GB",
input={"prompt": "Hello, world!", "max_tokens": 256},
)
print(job.status)
# Stream inference responses
for chunk in client.inference.stream(
model="meta-llama/Llama-3-70B",
messages=[{"role": "user", "content": "What is VexNode?"}],
):
print(chunk.text, end="")
# Check usage
usage = client.usage.get()
print(f"Spend this month: ${usage.total_cost}")Requires Node.js 18 or later. Written in TypeScript with full type definitions included.
npm install @vexnode/sdkimport VexNode from "@vexnode/sdk";
const client = new VexNode({ apiKey: "vx-your-api-key" });
// Submit a compute job
const job = await client.compute.create({
model: "meta-llama/Llama-3-70B",
gpuType: "A100-80GB",
input: { prompt: "Hello, world!", maxTokens: 256 },
});
console.log(job.status);
// Stream inference responses
const stream = await client.inference.stream({
model: "meta-llama/Llama-3-70B",
messages: [{ role: "user", content: "What is VexNode?" }],
});
for await (const chunk of stream) {
process.stdout.write(chunk.text);
}
// Check usage
const usage = await client.usage.get();
console.log(`Spend this month: $${usage.totalCost}`);