Skip to main content

🧠 Core Module

The core/ module provides the fundamental building blocks: Agent, Memory, LLM providers, and base components.


Module Overview


File Structure

src/openstackai/core/
├── __init__.py
├── agent.py # Core Agent class
├── memory.py # Memory implementations
├── llm.py # LLM provider abstraction
├── base.py # Base classes
└── cache.py # Response caching

Agent

The central orchestrator for AI agent behavior.

Agent Class

from openstackai import Agent
from openstackai.core import AgentConfig

# Basic agent
agent = Agent(
name="Assistant",
instructions="You are a helpful assistant."
)

# Full configuration
agent = Agent(
name="ResearchAgent",
instructions="You find and analyze information.",
skills=[search_skill, summarize_skill],
llm=azure_provider,
memory=ConversationMemory(),
config=AgentConfig(
max_iterations=10,
timeout_seconds=300.0,
verbose=True,
enable_memory=True
)
)

Agent Architecture

AgentConfig

ParameterTypeDefaultDescription
max_iterationsint10Max reasoning loops
timeout_secondsfloat300.0Execution timeout
verboseboolFalseDebug output
enable_memoryboolTrueUse memory
enable_loggingboolTrueLog operations
retry_on_failureboolTrueAuto-retry
max_retriesint3Retry count

Agent Methods

# Register skills
agent.register_skill(my_skill)

# Get skill
skill = agent.get_skill("search")

# List skills
skills = agent.available_skills

# Run agent (use Runner for full control)
from openstackai import Runner
result = Runner.run_sync(agent, "Find information about AI")

Memory

Memory systems for conversation history and semantic retrieval.

ConversationMemory

from openstackai import ConversationMemory

memory = ConversationMemory(max_messages=100)

# Add messages
memory.add_user_message("Hello!")
memory.add_assistant_message("Hi there!")

# Get history
history = memory.get_messages()

# Clear
memory.clear()

VectorMemory

from openstackai import VectorMemory

memory = VectorMemory(
embedding_model="text-embedding-3-small",
similarity_threshold=0.7
)

# Store information
memory.store("Python is a programming language")
memory.store("openstackai is an intelligence engine")

# Retrieve relevant
results = memory.retrieve("What is openstackai?", top_k=5)

Memory Architecture


LLM Providers

Multi-provider LLM abstraction layer.

Provider Hierarchy

OpenAI Provider

from openstackai.core.llm import OpenAIProvider

provider = OpenAIProvider(
api_key="sk-...",
model="gpt-4",
temperature=0.7,
max_tokens=1000
)

# Generate
response = await provider.generate("Hello!")

# Stream
async for chunk in provider.stream("Tell me a story"):
print(chunk, end="")

Azure OpenAI Provider

from openstackai.core.llm import OpenAIProvider
from azure.identity import DefaultAzureCredential

# Azure AD authentication (recommended)
provider = OpenAIProvider(
endpoint="https://your-resource.openai.azure.com/",
deployment="gpt-4o-mini",
credential=DefaultAzureCredential()
)

# Or with API key
provider = OpenAIProvider(
endpoint="https://your-resource.openai.azure.com/",
deployment="gpt-4o-mini",
api_key="your-key"
)

Anthropic Provider

from openstackai.core.llm import AnthropicProvider

provider = AnthropicProvider(
api_key="sk-ant-...",
model="claude-3-opus-20240229"
)

Ollama Provider (Local)

from openstackai.core.llm import OllamaProvider

provider = OllamaProvider(
host="http://localhost:11434",
model="llama2"
)

Base Components

BaseComponent

from openstackai.core.base import BaseComponent

class MyComponent(BaseComponent):
def __init__(self, name: str):
super().__init__(name=name)

def process(self, data):
# Implementation
pass

Executable Interface

from openstackai.core.base import Executable

class MyExecutable(Executable):
async def execute(self, input_data: Any) -> Any:
# Async execution
return result

Cache

Response caching for efficiency.

from openstackai.core.cache import ResponseCache

cache = ResponseCache(
backend="redis", # or "memory", "sqlite"
ttl=3600 # 1 hour TTL
)

# Cache operations
cache.set("key", response)
cached = cache.get("key")
cache.invalidate("key")

Integration Flow


➡️ [[Runner-Module]] | [[Easy-Module]] | [[Home]]