{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import json\n", "\n", "with open('../data/task1.json', 'r') as f:\n", " test = json.load(f)" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "draft_ids = []\n", "authors = []\n", "coauthors = []\n", "choices_2 = []\n", "choices_3 = []\n", "choices_4 = []\n", "choices_5 = []\n", "\n", "for instance in test:\n", " draft_ids.append(instance['folder_id'])\n", " authors.append(instance['author'])\n", " coauthors.append(instance['coauthor'])\n", " choices_2.append(instance['choices_2'])\n", " choices_3.append(instance['choices_3'])\n", " choices_4.append(instance['choices_4'])\n", " choices_5.append(instance['choices_5'])" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import os\n", "\n", "path = '../data/task1'\n", "drafts = []\n", "for i in draft_ids:\n", " folder_path = os.path.join(path, str(i))\n", " files = os.listdir(folder_path)\n", " json_file = [file for file in files if file.endswith('EN.json')][0]\n", " with open(os.path.join(folder_path, json_file)) as f:\n", " draft = json.load(f)\n", " drafts.append(draft['Content'])" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# If use Together API\n", "from together import Together\n", "\n", "your_model_name = 'xxxxxxxxxxxxxxxxxxxxxxxx'\n", "your_api_key = 'xxxxxxxxxxxxxxxxxxxxxxxx'\n", "client = Together(api_key=your_api_key)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import random\n", "from tqdm import tqdm\n", "\n", "def run_task3_k_choices(client, drafts, authors, choices_k):\n", " results = []\n", " invalid_responses = []\n", " for i, (draft, author, choice_k) in tqdm(enumerate(zip(drafts, authors, choices_k))):\n", " system_prompt = f\"\"\"You are representing {author}, a country drafting a resolution for submission to the United Nations Security Council. \n", " Your task is to review the draft resolution and select a coauthor from the following list: {', '.join(choice_k)}. \n", " Respond only with the name of the chosen coauthor and provide no additional explanation.\"\"\"\n", " user_prompt = f\"\"\"{author} is drafting a resolution to submit to the United Nations Security Council and is seeking a coauthor. \n", "\n", " Review the following draft resolution and choose a coauthor from the list below. \n", " Respond only with the name of the chosen country and provide no additional explanation.\n", "\n", " Available coauthors: {', '.join(choice_k)}\n", "\n", " Draft Resolution:\n", " {draft}\n", "\n", " Answer:\n", " \"\"\"\n", " \n", " response = client.chat.completions.create(\n", " model=your_model_name,\n", " messages=[\n", " {\"role\": \"system\", \"content\": system_prompt},\n", " {\"role\": \"user\", \"content\": user_prompt}\n", " ],\n", " max_tokens=20, \n", " temperature=0.0\n", " )\n", " result = response.choices[0].message.content.strip()\n", " valid_response = choice_k\n", " if result not in valid_response:\n", " print(f'Invalid response: {result}')\n", " result = random.choice(valid_response)\n", " invalid_responses.append(i)\n", " results.append(result)\n", " return results, invalid_responses" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "results_2, invalid_responses_2 = run_task3_k_choices(client, drafts, authors, choices_2)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "results_3, invalid_responses_3 = run_task3_k_choices(client, drafts, authors, choices_3)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "results_4, invalid_responses_4 = run_task3_k_choices(client, drafts, authors, choices_4)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "results_5, invalid_responses_5 = run_task3_k_choices(client, drafts, authors, choices_5)" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "def calculate_accuracy(results, ground_truth):\n", " correct = 0\n", " for i in range(len(results)):\n", " if results[i] == ground_truth[i]:\n", " correct += 1\n", " return correct/len(results)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "accuracy_2 = calculate_accuracy(results_2, coauthors)\n", "accuracy_3 = calculate_accuracy(results_3, coauthors)\n", "accuracy_4 = calculate_accuracy(results_4, coauthors)\n", "accuracy_5 = calculate_accuracy(results_5, coauthors)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "print(f'Accuracy for 2 choices: {accuracy_2}')\n", "print(f'Accuracy for 3 choices: {accuracy_3}')\n", "print(f'Accuracy for 4 choices: {accuracy_4}')\n", "print(f'Accuracy for 5 choices: {accuracy_5}')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "llm", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.19" } }, "nbformat": 4, "nbformat_minor": 2 }