import logging
import os
import requests
from autogen import UserProxyAgent, config_list_from_json
from autogen.agentchat.contrib.gpt_assistant_agent import GPTAssistantAgent
Configure logger
logger = logging.getLogger(name)
logger.setLevel(logging.WARNING)
API schema for OSS Insight
ossinsight_api_schema = {
"name": "ossinsight_data_api",
"parameters": {
"type": "object",
"properties": {
"question": {
"type": "string",
"description": (
"Enter your GitHub data question in the form of a clear and specific question "
"to ensure the returned data is accurate and valuable. "
"For optimal results, specify the desired format for the data table in your request."
),
}
},
"required": ["question"],
},
"description": "API endpoint for querying GitHub data in text format.",
}
Function to get data from OSS Insight API
def get_ossinsight(question):
url = "https://api.ossinsight.io/explorer/answer"
headers = {"Content-Type": "application/json"}
data = {"question": question, "ignoreCache": True}
response = requests.post(url, headers=headers, json=data)
if response.status_code != 200:
return f"Request to {url} failed with status code: {response.status_code}"
answer = response.json()
report_components = [
f"Question: {answer['question']['title']}",
f"querySQL: {answer['query']['sql']}" if answer["query"]["sql"] else "",
"Result:\n " + "\n ".join([str(row) for row in answer["result"]["rows"]]) if answer.get("result") else "Result: N/A",
f"Error: {answer['error']}" if answer.get("error") else ""
]
return "\n\n".join(report_components) + "\n\n"
Configure and create OSS Analyst Assistant Agent
assistant_id = os.environ.get("ASSISTANT_ID")
config_list = config_list_from_json("OAI_CONFIG_LIST")
llm_config = {
"config_list": config_list,
"assistant_id": assistant_id,
"tools": [{"type": "function", "function": ossinsight_api_schema}],
}
oss_analyst = GPTAssistantAgent(
name="OSS Analyst",
code_execution_config={
"work_dir": "coding",
"use_docker": False,
},
instructions=(
"Hello, Open Source Project Analyst. Conduct evaluations of GitHub projects, analyzing trajectories, "
"contributions, and trends. Address analysis questions or problems carefully."
),
llm_config=llm_config,
verbose=True,
)
oss_analyst.register_function(function_map={"ossinsight_data_api": get_ossinsight})
Configure and create User Proxy Agent
user_proxy = UserProxyAgent(
name="user_proxy",
code_execution_config={
"work_dir": "coding",
"use_docker": False,
},
is_termination_msg=lambda msg: "TERMINATE" in msg["content"],
human_input_mode="NEVER",
max_consecutive_auto_reply=1,
)
Initiate chat with OSS Analyst
user_proxy.initiate_chat(oss_analyst, message="Top 10 developers with the most followers and plot a chart")
OAI_CONFIG_LIST
[
{
"model": "gpt-4-turbo-preview"
}
]
Categories
Tools
pip install ollama
Post author
By praison
Post date
January 26, 2024
ollama run mistral
import ollama
stream = ollama.chat(
model='mistral',
messages=[{'role': 'user', 'content': 'Why is the sky blue?'}],
stream=True,
)
for chunk in stream:
print(chunk['message']['content'], end='', flush=True)
Multimodal
import ollama
with open('image.jpeg', 'rb') as file:
response = ollama.chat(
model='llava',
messages=[
{
'role': 'user',
'content': 'What is in this image?',
'images': [file.read()],
},
],
)
print(response['message']['content'])
All commands
import ollama
Chat function
response = ollama.chat(model='mistral', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}])
print("Chat response:", response['message']['content'])
Generate function
generate_response = ollama.generate(model='mistral', prompt='Why is the sky blue?')
print("Generate response:", generate_response['response'])
List function
models_list = ollama.list()
print("List of models:", models_list)
Show function
show_response = ollama.show('mistral')
print("Show model response:", show_response)
Create function
modelfile = '''
FROM mistral
SYSTEM You are Mario from Super Mario Bros.
'''
create_response = ollama.create(model='example', modelfile=modelfile)
print("Create model response:", create_response)
Copy function
copy_response = ollama.copy('mistral', 'user/mistral')
print("Copy model response:", copy_response)
Delete function
delete_response = ollama.delete('example')
print("Delete model response:", delete_response)
Pull function
pull_response = ollama.pull('mistral')
print("Pull model response:", pull_response)
Push function
push_response = ollama.push('user/mistral')
print("Push model response:", push_response)
Embeddings function
embeddings_response = ollama.embeddings(model='mistral', prompt='The sky is blue because of Rayleigh scattering')
print("Embeddings response:", embeddings_response)
Top comments (0)