import unify
import asyncio
openai_endpoints = ("llama-3-8b-chat@together-ai", "gpt-4o@openai", "claude-3.5-sonnet@anthropic")
openai_client = unify.MultiLLMAsync(
endpoints=openai_endpoints,
system_message="This is a system message specifically optimized for OpenAI models."
)
anthropic_endpoints = ("llama-3-8b-chat@together-ai", "gpt-4o@openai", "claude-3.5-sonnet@anthropic")
anthropic_client = unify.MultiLLMAsync(
endpoints=anthropic_endpoints,
system_message="This is a system message specifically optimized for Anthropic models."
)
async def generate_responses(user_message: str):
openai_responses = openai_client.generate(user_message)
anthropic_responses = openai_client.generate(user_message)
return {"openai": openai_responses, "anthropic": anthropic_responses}
all_responses = asyncio.run(generate_responses("Hello, how's it going?"))
for provider, responses in all_responses.items():
print("provider: {}\n".format(provider))
for endpoint, response in responses.items():
print("endpoint: {}".format(endpoint))
print("response: {}\n".format(response))