from transformers import AutoTokenizer, AutoModelForCausalLM
def get_delivery_date(order_id=None):
# Define the implementation of each function in Tools
return "Unable to query without order number"
print("get_delivery_date: This should be replaced with the actual query method, and the result should be returned using return")
def get_response_call(tool_call_str):
pattern = r'(?<=```python\n)(.*?)(?=\n```\n)'
# Use regular expression to match
match = re.search(pattern, tool_call_str)
function_call = match.group(1)
"name": "get_delivery_date", # Function name, needs to define a matching Python function
"description": "Get the delivery date for a customer's order. Call this whenever you need to know the delivery date, for example when a customer asks 'Where is my package'",
"order_id": { # Parameter name
"type": "string", # Parameter type
"description": "The customer's order ID.", # Parameter description
"required": ["order_id"], # Which parameters are required
"additionalProperties": False,
"content": "You are a helpful customer support assistant. Use the supplied tools to assist the user.",
query = "Hi, can you tell me the delivery date for my order, my order id is 123456."
tokenizer = AutoTokenizer.from_pretrained(
"/root/ld/ld_model_pretrained/minicpm3", trust_remote_code=True
prompt = tokenizer.apply_chat_template(
messages, tools=tools, tokenize=False, add_generation_prompt=True
model = AutoModelForCausalLM.from_pretrained("/root/ld/ld_model_pretrained/minicpm3", trust_remote_code=True).cuda()
response, history = model.chat(tokenizer, query=query, history=messages, do_sample=False) # For precise function calls, set do_sample to False
call_str = get_response_call(response)