utils.py 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. import json
  2. from functools import partial
  3. import requests
  4. from ast_eval_hf import ast_eval_hf, ast_parse
  5. from ast_eval_tf import ast_eval_tf
  6. from ast_eval_th import ast_eval_th
  7. # This function is modified from Gorilla's APIBench implementations (https://github.com/ShishirPatil/gorilla/blob/main/eval/get_llm_responses.py).
  8. def encode_question(question, api_name):
  9. """Encode multiple prompt instructions into a single string."""
  10. prompts = []
  11. if api_name == 'torch':
  12. api_name = 'torchhub'
  13. domains = '1. $DOMAIN is inferred from the task description and should include one of {Classification, Semantic Segmentation, Object Detection, Audio Separation, Video Classification, Text-to-Speech}.'
  14. elif api_name == 'hf':
  15. api_name = 'huggingface'
  16. domains = '1. $DOMAIN should include one of {Multimodal Feature Extraction, Multimodal Text-to-Image, Multimodal Image-to-Text, Multimodal Text-to-Video, \
  17. Multimodal Visual Question Answering, Multimodal Document Question Answer, Multimodal Graph Machine Learning, Computer Vision Depth Estimation,\
  18. Computer Vision Image Classification, Computer Vision Object Detection, Computer Vision Image Segmentation, Computer Vision Image-to-Image, \
  19. Computer Vision Unconditional Image Generation, Computer Vision Video Classification, Computer Vision Zero-Shor Image Classification, \
  20. Natural Language Processing Text Classification, Natural Language Processing Token Classification, Natural Language Processing Table Question Answering, \
  21. Natural Language Processing Question Answering, Natural Language Processing Zero-Shot Classification, Natural Language Processing Translation, \
  22. Natural Language Processing Summarization, Natural Language Processing Conversational, Natural Language Processing Text Generation, Natural Language Processing Fill-Mask,\
  23. Natural Language Processing Text2Text Generation, Natural Language Processing Sentence Similarity, Audio Text-to-Speech, Audio Automatic Speech Recognition, \
  24. Audio Audio-to-Audio, Audio Audio Classification, Audio Voice Activity Detection, Tabular Tabular Classification, Tabular Tabular Regression, \
  25. Reinforcement Learning Reinforcement Learning, Reinforcement Learning Robotics }'
  26. elif api_name == 'tf':
  27. api_name = 'tensorhub'
  28. domains = '1. $DOMAIN is inferred from the task description and should include one of {text-sequence-alignment, text-embedding, text-language-model, text-preprocessing, text-classification, text-generation, text-question-answering, text-retrieval-question-answering, text-segmentation, text-to-mel, image-classification, image-feature-vector, image-object-detection, image-segmentation, image-generator, image-pose-detection, image-rnn-agent, image-augmentation, image-classifier, image-style-transfer, image-aesthetic-quality, image-depth-estimation, image-super-resolution, image-deblurring, image-extrapolation, image-text-recognition, image-dehazing, image-deraining, image-enhancemenmt, image-classification-logits, image-frame-interpolation, image-text-detection, image-denoising, image-others, video-classification, video-feature-extraction, video-generation, video-audio-text, video-text, audio-embedding, audio-event-classification, audio-command-detection, audio-paralinguists-classification, audio-speech-to-text, audio-speech-synthesis, audio-synthesis, audio-pitch-extraction}'
  29. else:
  30. print('Error: API name is not supported.')
  31. prompt = (
  32. question
  33. + '\nWrite a python program in 1 to 2 lines to call API in '
  34. + api_name
  35. + '.\n\nThe answer should follow the format: <<<domain>>> $DOMAIN, <<<api_call>>>: $API_CALL, <<<api_provider>>>: $API_PROVIDER, <<<explanation>>>: $EXPLANATION, <<<code>>>: $CODE}. Here are the requirements:\n'
  36. + domains
  37. + '\n2. The $API_CALL should have only 1 line of code that calls api.\n3. The $API_PROVIDER should be the programming framework used.\n4. $EXPLANATION should be a step-by-step explanation.\n5. The $CODE is the python code.\n6. Do not repeat the format in your answer.'
  38. )
  39. # prompts.append({"role": "system", "content": ""})
  40. prompts = (
  41. 'You are a helpful API writer who can write APIs based on requirements.\n'
  42. + prompt
  43. )
  44. return prompts
  45. def get_data(hub):
  46. if hub == 'hf':
  47. question_data = 'https://raw.githubusercontent.com/ShishirPatil/gorilla/main/eval/eval-data/questions/huggingface/questions_huggingface_0_shot.jsonl'
  48. api_dataset = 'https://raw.githubusercontent.com/ShishirPatil/gorilla/main/data/api/huggingface_api.jsonl'
  49. apibench = 'https://raw.githubusercontent.com/ShishirPatil/gorilla/main/data/apibench/huggingface_eval.json'
  50. ast_eval = ast_eval_hf
  51. if hub == 'torch':
  52. question_data = 'https://raw.githubusercontent.com/ShishirPatil/gorilla/main/eval/eval-data/questions/torchhub/questions_torchhub_0_shot.jsonl'
  53. api_dataset = 'https://raw.githubusercontent.com/ShishirPatil/gorilla/main/data/api/torchhub_api.jsonl'
  54. apibench = 'https://raw.githubusercontent.com/ShishirPatil/gorilla/main/data/apibench/torchhub_eval.json'
  55. ast_eval = ast_eval_th
  56. if hub == 'tf':
  57. question_data = 'https://raw.githubusercontent.com/ShishirPatil/gorilla/main/eval/eval-data/questions/tensorflowhub/questions_tensorflowhub_0_shot.jsonl'
  58. api_dataset = 'https://raw.githubusercontent.com/ShishirPatil/gorilla/main/data/api/tensorflowhub_api.jsonl'
  59. apibench = 'https://raw.githubusercontent.com/ShishirPatil/gorilla/main/data/apibench/tensorflow_eval.json'
  60. ast_eval = ast_eval_tf
  61. # get questions and question_ids
  62. questions = []
  63. question_ids = []
  64. question_data = requests.get(question_data)
  65. if question_data.status_code == 200:
  66. lines = question_data.text.splitlines()
  67. for line in lines:
  68. questions.append(json.loads(line)['text'])
  69. question_ids.append(json.loads(line)['question_id'])
  70. # get the api datasest
  71. api_database = []
  72. api_dataset = requests.get(api_dataset)
  73. if api_dataset.status_code == 200:
  74. lines = api_dataset.text.splitlines()
  75. for line in lines:
  76. api_database.append(json.loads(line))
  77. # get the question answer pair datasest
  78. qa_pairs = []
  79. apibench = requests.get(apibench)
  80. if apibench.status_code == 200:
  81. lines = apibench.text.splitlines()
  82. for line in lines:
  83. qa_pairs.append(json.loads(line)['api_data'])
  84. # Parse all apis to ast trees
  85. ast_database = []
  86. for data in api_database:
  87. ast_tree = ast_parse(data['api_call'])
  88. ast_database.append(ast_tree)
  89. ast_eval = partial(ast_eval, api_database, qa_pairs, ast_database)
  90. return questions, question_ids, ast_eval