Skip to main content

📚 openstackai API Reference

Table of Contents


Configuration

configure()

Configure openstackai globally. Call once at application startup.

import openstackai

openstackai.configure(
api_key="sk-...", # API key (or use OPENAI_API_KEY env var)
provider="openai", # "openai" | "anthropic" | "azure"
model="gpt-4o-mini", # Default model
azure_endpoint="...", # Azure OpenAI endpoint (if using Azure)
azure_deployment="...", # Azure deployment name
temperature=0.7, # Default temperature
max_tokens=2048, # Default max tokens
)

Environment Variables

VariableDescription
OPENAI_API_KEYOpenAI API key
ANTHROPIC_API_KEYAnthropic API key
AZURE_OPENAI_API_KEYAzure OpenAI API key
AZURE_OPENAI_ENDPOINTAzure OpenAI endpoint URL
AZURE_OPENAI_DEPLOYMENTAzure OpenAI deployment name

One-Liner Functions

ask()

Ask any question, get an intelligent answer.

from openstackai import ask

# Basic
answer = ask("What is the capital of France?")

# With options
answer = ask("Explain quantum computing",
detailed=True, # Comprehensive answer
concise=True, # Brief answer (mutually exclusive with detailed)
format="bullet", # "bullet" | "numbered" | "markdown"
creative=True, # More creative response
as_json=True, # Return as dict
model="gpt-4" # Specific model
)

Parameters:

ParameterTypeDefaultDescription
questionstrrequiredThe question to ask
detailedboolFalseGet comprehensive answer
conciseboolFalseGet brief answer
formatstrNoneOutput format
creativeboolFalseCreative/varied response
as_jsonboolFalseReturn as dict
modelstrNoneOverride model

Returns: str or dict (if as_json=True)


research()

Deep research on any topic.

from openstackai import research

# Full research
result = research("quantum computing applications")
print(result.summary) # Executive summary
print(result.key_points) # List of key points
print(result.insights) # Derived insights
print(result.sources) # Referenced sources
print(result.confidence) # Confidence score

# Quick summary only
summary = research("meditation benefits", quick=True)

# Insights only
insights = research("remote work future", as_insights=True)

# Focused research
result = research("climate change", focus="economic impact")

Parameters:

ParameterTypeDefaultDescription
topicstrrequiredTopic to research
quickboolFalseReturn summary only
as_insightsboolFalseReturn insights list only
focusstrNoneFocus area
depthstr"medium""shallow" | "medium" | "deep"

Returns: ResearchResult, str, or List[str]


summarize()

Summarize text, files, or URLs.

from openstackai import summarize

# Text
summary = summarize("Long article text here...")

# File
summary = summarize("./report.pdf")
summary = summarize("./document.docx")

# URL
summary = summarize("https://example.com/article")

# With options
summary = summarize(content,
length="short", # "short" | "medium" | "long"
focus="key findings",
as_bullets=True
)

Parameters:

ParameterTypeDefaultDescription
contentstrrequiredText, file path, or URL
lengthstr"medium"Summary length
focusstrNoneFocus area
as_bulletsboolFalseReturn as bullet points

Returns: str


extract()

Extract structured data from text.

from openstackai import extract
from pydantic import BaseModel

# With Pydantic schema
class Person(BaseModel):
name: str
age: int
email: str

person = extract(
"John Doe is 30 years old. Contact: john@email.com",
Person
)
print(person.name) # "John Doe"
print(person.age) # 30

# With dict schema
data = extract(text, {"name": str, "skills": list})

Parameters:

ParameterTypeDefaultDescription
contentstrrequiredText to extract from
schematyperequiredPydantic model or dict
strictboolFalseStrict validation

Returns: Instance of schema type


generate()

Generate content of various types.

from openstackai import generate

# Text content
blog = generate("blog post about AI trends", type="blog")

# Code
code = generate("fibonacci function", type="code", language="python")

# Email
email = generate("follow-up email after meeting", type="email")

# Documentation
docs = generate("API documentation for user service", type="docs")

Parameters:

ParameterTypeDefaultDescription
promptstrrequiredWhat to generate
typestr"text""text" | "code" | "email" | "blog" | "docs"
lengthstr"medium""short" | "medium" | "long"
stylestrNoneStyle guidance
languagestrNoneProgramming language (for code)

Returns: str


translate()

Translate text between languages.

from openstackai import translate

# Basic
spanish = translate("Hello, how are you?", to="es")

# With options
formal = translate(text,
to="de",
from_lang="en", # Auto-detected if omitted
formal=True, # Formal register
preserve_formatting=True
)

Parameters:

ParameterTypeDefaultDescription
textstrrequiredText to translate
tostrrequiredTarget language code
from_langstrNoneSource language (auto-detected)
formalboolFalseUse formal register
preserve_formattingboolTrueKeep formatting

Returns: str


chat()

Create an interactive chat session with memory.

from openstackai import chat

# Basic session
session = chat("You are a helpful assistant")
response1 = session.say("What is Python?")
response2 = session.say("How do I learn it?") # Remembers context!

# With persona
session = chat(persona="teacher")
session("Explain machine learning") # Shorthand for .say()

# Prebuilt personas
# "teacher", "advisor", "coder", "researcher", "writer",
# "analyst", "critic", "creative", "editor"

Parameters:

ParameterTypeDefaultDescription
system_messagestrNoneCustom system prompt
personastrNonePrebuilt persona
modelstrNoneOverride model

Returns: ChatSession

ChatSession Methods:

MethodDescription
.say(message)Send message, get response
(message)Shorthand for .say()
.reset()Clear conversation history
.historyGet conversation history

agent()

Create a custom AI agent.

from openstackai import agent

# Custom agent
coder = agent("You are an expert Python developer")
result = coder("Write a function to parse JSON")

# Prebuilt persona
researcher = agent(persona="researcher")
result = researcher("Research quantum computing")

# Named agent with memory
assistant = agent(
"You are a helpful assistant",
name="Alex",
memory=True
)
assistant("My name is John")
assistant("What's my name?") # Returns "John"

# Available personas:
# "coder", "researcher", "writer", "analyst", "teacher",
# "advisor", "critic", "creative", "editor", "python_expert"

Parameters:

ParameterTypeDefaultDescription
system_messagestrNoneCustom system prompt
personastrNonePrebuilt persona
namestrNoneAgent name
modelstrNoneOverride model
memoryboolTrueEnable memory

Returns: Agent


Modules

rag Module

RAG (Retrieval-Augmented Generation) operations.

from openstackai import rag

# One-shot RAG
answer = rag.ask("./docs/paper.pdf", "What is the conclusion?")

# Index documents for multiple queries
docs = rag.index(["doc1.txt", "doc2.pdf", "./folder"])
answer1 = docs.ask("What is the main finding?")
answer2 = docs.ask("What methodology was used?")

# From URL
answer = rag.from_url("https://example.com", "Summarize this")

# From raw text
answer = rag.from_text(long_text, "What are the key points?")

# Index options
docs = rag.index(sources,
chunk_size=500, # Characters per chunk
overlap=50 # Overlap between chunks
)

Functions:

FunctionDescription
rag.index(sources, ...)Index documents
rag.ask(source, question)One-shot RAG query
rag.from_url(url, question)RAG from URL
rag.from_text(text, question)RAG from text

fetch Module

Real-time data fetching.

from openstackai import fetch

# Weather
weather = fetch.weather("Tokyo")
print(weather.temperature) # 22.5
print(weather.conditions) # "Partly Cloudy"
print(weather.humidity) # 65
print(weather.wind_speed) # 12.3

# News
articles = fetch.news("artificial intelligence", limit=5)
for article in articles:
print(article.title)
print(article.source)
print(article.url)

# Stocks
stock = fetch.stock("AAPL")
print(stock.price) # 175.50
print(stock.change) # +2.30
print(stock.change_percent) # +1.33
print(stock.volume) # 52000000

# Crypto
btc = fetch.crypto("BTC")
print(btc.price) # 45000.00
print(btc.change_24h) # +3.5%
print(btc.market_cap) # 850000000000

# Facts
facts = fetch.facts("black holes", count=3)
for fact in facts:
print(f"- {fact}")

analyze Module

Data and text analysis.

from openstackai import analyze
import pandas as pd

# Data analysis
df = pd.DataFrame(...)
result = analyze.data(df, goal="find anomalies")
print(result.summary)
print(result.insights)
print(result.statistics)
print(result.recommendations)

# Text analysis
analysis = analyze.text(article, aspects=["tone", "complexity"])

# Sentiment
sentiment = analyze.sentiment("I love this product!")
print(sentiment.sentiment) # "positive"
print(sentiment.score) # 0.95
print(sentiment.aspects) # {"product": "positive"}

# Compare items
comparison = analyze.compare(
"Python", "JavaScript", "Rust",
criteria=["performance", "ease of use", "ecosystem"]
)

code Module

Code generation and analysis.

from openstackai import code

# Write code
python_code = code.write("function to calculate fibonacci")
js_code = code.write("react component for login form", language="javascript")

# Review code
review = code.review(my_code)
print(review.score) # 8/10
print(review.issues) # ["unused variable", ...]
print(review.suggestions) # ["consider using list comprehension", ...]
print(review.security_concerns)

# Debug errors
fix = code.debug("TypeError: cannot unpack...", code=buggy_code)

# Explain code
explanation = code.explain(complex_function, level="beginner")

# Refactor
improved = code.refactor(old_code, goal="performance")
improved = code.refactor(old_code, goal="readability")
improved = code.refactor(old_code, goal="type-safety")

# Convert between languages
js_code = code.convert(python_code, from_lang="python", to_lang="javascript")

Advanced Components

For advanced use cases, access the underlying components:

from openstackai.core import Agent, Memory, ConversationMemory, VectorMemory
from openstackai.core import LLMProvider, OpenAIProvider, AnthropicProvider
from openstackai.instructions import Instruction, SystemPrompt, Persona
from openstackai.skills import Skill, ToolSkill, ActionSkill, SkillRegistry
from openstackai.blueprint import Blueprint, Workflow, Pipeline

See ARCHITECTURE.md for advanced usage patterns.


Error Handling

from openstackai import ask
from openstackai.exceptions import (
openstackaiError, # Base exception
ConfigError, # Configuration issues
LLMError, # LLM provider errors
RateLimitError, # Rate limiting
TokenLimitError, # Token limit exceeded
)

try:
answer = ask("...", model="nonexistent-model")
except ConfigError as e:
print(f"Configuration issue: {e}")
except LLMError as e:
print(f"LLM error: {e}")
except openstackaiError as e:
print(f"General error: {e}")

Type Safety

openstackai is fully typed. Install type stubs are included:

# Full IDE support for:
from openstackai import ask, agent, rag, fetch, code, chat

# Hover documentation works
# Autocomplete works
# Type checking works

For architectural details, see ARCHITECTURE.md