🎯 Easy Module
The easy/ module provides one-liner APIs for common AI tasks. Zero configuration, instant results.
Module Overview
File Structure
src/openstackai/easy/
├── __init__.py
├── ask.py # Universal Q&A
├── research.py # Deep research
├── summarize.py # Summarization
├── extract.py # Data extraction
├── generate.py # Content generation
├── translate.py # Translation
├── fetch.py # Real-time data
├── analyze.py # Data analysis
├── rag.py # RAG system
├── code.py # Code operations
├── chat.py # Interactive chat
├── handoff.py # Agent handoffs
├── guardrails.py # Safety guards
├── trace.py # Tracing/logging
├── mcp.py # MCP protocol
├── agent_factory.py # Quick agent creation
├── config.py # Auto-configuration
└── llm_interface.py # LLM abstraction
Core Functions
ask() — Universal Question Answering
from openstackai import ask
# Simple question
answer = ask("What is Python?")
# With options
answer = ask(
"Explain quantum computing",
detailed=True, # Longer response
format="bullet", # Bullet points
model="gpt-4" # Specific model
)
Parameters:
| Parameter | Type | Default | Description |
|---|---|---|---|
question | str | required | The question |
detailed | bool | False | Longer response |
format | str | None | "bullet", "numbered", "json" |
model | str | auto | Model override |
research() — Deep Topic Research
from openstackai import research
result = research("AI trends in enterprise software")
print(result.summary) # Executive summary
print(result.key_points) # List of key findings
print(result.insights) # Strategic insights
print(result.sources) # Referenced sources
Returns: ResearchResult
| Field | Type | Description |
|---|---|---|
summary | str | Executive summary |
key_points | list | Key findings |
insights | list | Strategic insights |
sources | list | Source references |
summarize() — Summarization
from openstackai import summarize
# Summarize text
summary = summarize(long_text)
# Summarize a file
summary = summarize("./report.pdf")
summary = summarize("./document.docx")
# Summarize a URL
summary = summarize("https://example.com/article")
# With length control
summary = summarize(text, length="brief") # ~50 words
summary = summarize(text, length="detailed") # ~300 words
extract() — Structured Data Extraction
from openstackai import extract
# Extract specific fields
data = extract(
invoice_text,
fields=["invoice_number", "date", "total", "items"]
)
# Returns structured data
print(data["invoice_number"]) # "INV-2024-001"
print(data["total"]) # "1,234.56"
print(data["items"]) # [{"name": "Widget", "qty": 5}]
# With schema
from pydantic import BaseModel
class Person(BaseModel):
name: str
age: int
city: str
person = extract(text, schema=Person)
generate() — Content Generation
from openstackai import generate
# Generate different content types
blog = generate("Python async programming", type="article")
email = generate("thank you letter", type="email")
code = generate("fibonacci function", type="code")
sql = generate("users table with auth", type="sql")
# With customization
content = generate(
"API documentation",
type="markdown",
style="technical",
length="detailed"
)
translate() — Translation
from openstackai import translate
# Simple translation
spanish = translate("Hello, how are you?", to="spanish")
japanese = translate("Hello", to="japanese")
# Auto-detect source language
result = translate("Bonjour le monde", to="english")
Data Functions
fetch — Real-time Data
from openstackai import fetch
# Weather
weather = fetch.weather("New York")
print(weather.temperature)
print(weather.conditions)
# News
news = fetch.news("artificial intelligence")
for article in news.articles:
print(article.title)
print(article.summary)
# Stock data
stock = fetch.stock("AAPL")
print(stock.price)
print(stock.change)
# Any URL
content = fetch.url("https://api.example.com/data")
analyze — Data Analysis
from openstackai import analyze
# Sentiment analysis
sentiment = analyze.sentiment("I love this product!")
print(sentiment.score) # 0.9
print(sentiment.label) # "positive"
# Entity extraction
entities = analyze.entities(text)
for entity in entities:
print(entity.text, entity.type)
# Topic classification
topics = analyze.topics(document)
rag — RAG System
from openstackai import rag
# Index documents
docs = rag.index("./documents/")
docs = rag.index(["doc1.pdf", "doc2.md"])
# Ask questions
answer = docs.ask("What is the main conclusion?")
answer = docs.ask("Summarize the findings")
# With sources
result = docs.ask("What are the recommendations?", return_sources=True)
print(result.answer)
print(result.sources)
RAG Architecture:
Code Functions
code — Code Operations
from openstackai import code
# Write new code
api = code.write("REST API with CRUD operations")
function = code.write("async file downloader")
# Review existing code
review = code.review(my_code)
print(review.issues) # List of issues
print(review.suggestions) # Improvement suggestions
print(review.score) # Quality score
# Debug errors
fix = code.debug("""
TypeError: 'NoneType' object is not subscriptable
At line 42: result = data['key']
""")
print(fix.explanation)
print(fix.solution)
# Refactor code
improved = code.refactor(
old_code,
goal="convert to async"
)
# Generate tests
tests = code.test(my_function)
Advanced Functions
handoff — Agent Handoffs
from openstackai import handoff, Agent
agent_a = Agent(name="Researcher")
agent_b = Agent(name="Writer")
# Transfer control with context
result = handoff(
from_agent=agent_a,
to_agent=agent_b,
context={"research": research_data}
)
guardrails — Safety Guards
from openstackai import guardrails
# Create guarded function
safe_ask = guardrails.wrap(
ask,
block_pii=True, # Block PII
block_harmful=True, # Block harmful content
max_tokens=1000, # Limit response
allowed_topics=["tech"] # Topic filter
)
answer = safe_ask("Tell me about Python")
trace — Tracing
from openstackai import trace
# Enable tracing
trace.enable()
# Run operations (automatically traced)
answer = ask("What is AI?")
# View traces
trace.show()
# Export traces
trace.export("traces.json")
chat — Interactive Sessions
from openstackai import chat
# Start a chat session
session = chat.start()
# Conversation with memory
response1 = session.send("My name is Alice")
response2 = session.send("What's my name?") # "Alice"
# End session
session.end()
Flow Diagram
➡️ [[Core-Module]] | [[Home]] | [[Quick-Start]]