From fc30b45d2a12b85a2fd84a67b0d4a4a5d206a288 Mon Sep 17 00:00:00 2001 From: richwardle Date: Thu, 2 Jan 2025 22:08:22 +0700 Subject: [PATCH 01/10] remove summarization --- README.md | 13 +++----- docs/SN1_validation.md | 1 - prompting/tasks/summarization.py | 57 -------------------------------- prompting/tasks/task_registry.py | 6 ---- validator_api/API_docs.md | 2 +- 5 files changed, 6 insertions(+), 73 deletions(-) delete mode 100644 prompting/tasks/summarization.py diff --git a/README.md b/README.md index 33cd8cee..72207252 100644 --- a/README.md +++ b/README.md @@ -49,22 +49,19 @@ Subnet one utilizes the concept of "Tasks" to control the behavior of miners. Va ### 1. **QA (Question Answering)** The miner receives a question about a specific section from a Wikipedia page. The miner must then find the original context in the specified section and use it to return an accurate answer. References are generated using the validators privileged knowledge of the context, and miner complestions are scored based on similarity metrics. -### 2. **Summarization** -Similar to QA, but the miner uses the entire Wikipedia page instead of a specific section. The miner reads the whole page, summarizes it, and provides a concise answer. - -### 3. **DateQA** +### 2. **DateQA** The miner receives a question about an event from Wikipedia. The miner must search through Wikipedia for the relevant event and return the correct answer based on the findings. References are again generated with validator's knowledge of the context, and similarity metrics are used to score miner completions. -### 4. **Inference** +### 3. **Inference** A question is given with some pre-seeded information and a random seed. The miner must perform an inference based on this information to provide the correct answer. Completions are scored based on similarity metrics. -### 5. **MultiChoice** +### 4. **MultiChoice** The miner is presented with a question from Wikipedia along with four possible answers (A, B, C, or D). The miner must search Wikipedia and return the correct answer by selecting one of the given options. Miner completions are scored by Regex matching. -### 6. **Programming** +### 5. **Programming** The miner receives a code snippet that is incomplete. The task is to complete the code snippet to perform its intended function. The validator generates a reference using it's internal LLM, and the miner is scored based on its similarity to this reference. -### 7. **Web Retrieval** +### 6. **Web Retrieval** The miner is given a question based on a random web page and must return a scraped website that contains the answer. This requires searching the web to locate the most accurate and reliable source to provide the answer. The miner is scored based on the embedding similarity between the answer it returns and the original website that the validator generated the reference from. # API Documentation diff --git a/docs/SN1_validation.md b/docs/SN1_validation.md index d6cdda8c..8e04f142 100644 --- a/docs/SN1_validation.md +++ b/docs/SN1_validation.md @@ -31,7 +31,6 @@ More tooling will be included in future releases. # Tasks The validation process supports an ever-growing number of tasks. Tasks drive agent behaviour based on specific goals, such as; - Question answering -- Summarization - Code debugging - Mathematics and more. diff --git a/prompting/tasks/summarization.py b/prompting/tasks/summarization.py deleted file mode 100644 index cf42177c..00000000 --- a/prompting/tasks/summarization.py +++ /dev/null @@ -1,57 +0,0 @@ -from typing import ClassVar - -from prompting.rewards.relevance import RelevanceRewardModel -from prompting.rewards.reward import BaseRewardConfig, BaseRewardModel -from prompting.rewards.rouge import RougeRewardModel -from prompting.tasks.base_task import BaseTextTask -from shared.base import Context - -QUERY_SYSTEM_PROMPT = """\ -You are a request-generating expert. When asked to generate a request, you ask for a detailed summary of a topic. Your request should be specificly about the topic. -""" - -REFERENCE_SYSTEM_PROMPT = """\ -You are an expert question-answering LLM. You will receive context and a question, and you will generate a detailed and accurate answer to the question. Your answer should be based on the context provided. -""" - -QUERY_PROMPT_TEMPLATE = """\ -Request an exhaustive summary about the topic: {title}""" - -# Used to obtain reference answer -REFERENCE_PROMPT_TEMPLATE = """\ -Summarize the following context in a concise and accurate manner: - -## Context -{context} -""" - - -def make_query_prompt(context: Context) -> str: - return "Creatively ask for a summary of the following context:\n\n" + context.title - - -class SummarizationRewardConfig(BaseRewardConfig): - reward_definitions: ClassVar[list[BaseRewardModel]] = [ - RougeRewardModel(weight=0.5), - RelevanceRewardModel(weight=0.5), - ] - penalty_definition: ClassVar[list[BaseRewardModel]] = [RougeRewardModel(weight=0.5)] - - -class SummarizationTask(BaseTextTask): - name: ClassVar[str] = "summarization" - query_system_prompt: ClassVar[str] = QUERY_SYSTEM_PROMPT - reference_system_prompt: ClassVar[str] = REFERENCE_SYSTEM_PROMPT - augmentation_system_prompt: ClassVar[str] = "" - query: str | None = None - reference: str | None = None - - def make_query(self, dataset_entry: Context): - query_prompt = QUERY_PROMPT_TEMPLATE.format(title=dataset_entry.title) - self.query = self.generate_query(messages=[query_prompt]) - return self.query - - def make_reference(self, dataset_entry: Context): - reference_prompt = REFERENCE_PROMPT_TEMPLATE.format(context=dataset_entry.content, question=self.query) - self.reference = self.generate_reference(messages=[{"role": "user", "content": reference_prompt}]) - return self.reference diff --git a/prompting/tasks/task_registry.py b/prompting/tasks/task_registry.py index 0bdf0324..174d0d41 100644 --- a/prompting/tasks/task_registry.py +++ b/prompting/tasks/task_registry.py @@ -17,12 +17,9 @@ from prompting.tasks.multi_step_reasoning import MultiStepReasoningRewardConfig, MultiStepReasoningTask from prompting.tasks.programming_task import ProgrammingRewardConfig, ProgrammingTask from prompting.tasks.qa import QARewardConfig, QuestionAnsweringTask -from prompting.tasks.summarization import SummarizationRewardConfig, SummarizationTask from prompting.tasks.web_retrieval import WebRetrievalRewardConfig, WebRetrievalTask from shared.base import BaseDataset -# from prompting.tasks. - class TaskConfig(BaseModel): task: type[BaseTextTask] @@ -39,9 +36,6 @@ def __hash__(self): class TaskRegistry(BaseModel): task_configs: ClassVar[list[TaskConfig]] = [ TaskConfig(task=QuestionAnsweringTask, probability=0.15, datasets=[WikiDataset], reward_model=QARewardConfig), - TaskConfig( - task=SummarizationTask, probability=0.1, datasets=[WikiDataset], reward_model=SummarizationRewardConfig - ), TaskConfig( task=DateQuestionAnsweringTask, probability=0.1, diff --git a/validator_api/API_docs.md b/validator_api/API_docs.md index bf609989..46d27fdc 100644 --- a/validator_api/API_docs.md +++ b/validator_api/API_docs.md @@ -103,7 +103,7 @@ bash run_api.sh **Parameters:** -- **task** (query, optional): The type of task (e.g., `QuestionAnsweringTask`, `SummarizationTask`, etc.). +- **task** (query, optional): The type of task (e.g., `QuestionAnsweringTask`, `Programming`, etc.). - **model** (query, optional): The specific model (string). - **k** (query, optional): The maximum number of results to return (integer, default: 10). From d470f44c6727080f6a2f04e2517d0876e377ce50 Mon Sep 17 00:00:00 2001 From: richwardle Date: Thu, 2 Jan 2025 22:19:57 +0700 Subject: [PATCH 02/10] remove date qa task --- README.md | 7 ++--- prompting/tasks/date_qa.py | 46 -------------------------------- prompting/tasks/task_registry.py | 7 ----- 3 files changed, 2 insertions(+), 58 deletions(-) delete mode 100644 prompting/tasks/date_qa.py diff --git a/README.md b/README.md index 72207252..b3914803 100644 --- a/README.md +++ b/README.md @@ -49,13 +49,10 @@ Subnet one utilizes the concept of "Tasks" to control the behavior of miners. Va ### 1. **QA (Question Answering)** The miner receives a question about a specific section from a Wikipedia page. The miner must then find the original context in the specified section and use it to return an accurate answer. References are generated using the validators privileged knowledge of the context, and miner complestions are scored based on similarity metrics. -### 2. **DateQA** -The miner receives a question about an event from Wikipedia. The miner must search through Wikipedia for the relevant event and return the correct answer based on the findings. References are again generated with validator's knowledge of the context, and similarity metrics are used to score miner completions. - -### 3. **Inference** +### 2. **Inference** A question is given with some pre-seeded information and a random seed. The miner must perform an inference based on this information to provide the correct answer. Completions are scored based on similarity metrics. -### 4. **MultiChoice** +### 3. **MultiChoice** The miner is presented with a question from Wikipedia along with four possible answers (A, B, C, or D). The miner must search Wikipedia and return the correct answer by selecting one of the given options. Miner completions are scored by Regex matching. ### 5. **Programming** diff --git a/prompting/tasks/date_qa.py b/prompting/tasks/date_qa.py deleted file mode 100644 index 4db0cc18..00000000 --- a/prompting/tasks/date_qa.py +++ /dev/null @@ -1,46 +0,0 @@ -from typing import ClassVar - -from prompting.datasets.wiki import DateContext -from prompting.rewards.date import DateRewardModel -from prompting.rewards.reward import BaseRewardConfig, BaseRewardModel -from prompting.rewards.rouge import RougeRewardModel -from prompting.tasks.base_task import BaseTextTask - -QUERY_SYSTEM_PROMPT = """You are a question creation expert. When asked to create a question, you use the context to make a specific question that would have the answer . Your question should contain the topic.""" -QUERY_PROMPT_TEMPLATE = """\ -Create a question about {topic} that would have as the answer using the following context: -context: {content} -""" -REFERENCE_PROMPT_TEMPLATE = """\ -Your answer must include the following date: {date}. -Answer the following question using the provided context. -Question: {query} -Context: {content} -""" - - -class DateQARewardConfig(BaseRewardConfig): - reward_definitions: ClassVar[list[BaseRewardModel]] = [ - DateRewardModel(weight=0.7), - RougeRewardModel(weight=0.3), - ] - - -class DateQuestionAnsweringTask(BaseTextTask): - name: ClassVar[str] = "date_qa" - query_system_prompt: ClassVar[str] = QUERY_SYSTEM_PROMPT - augmentation_system_prompt: ClassVar[str] = "" - query: str | None = None - reference: str | None = None - - def make_query(self, dataset_entry: DateContext, **kwargs) -> str: - query_prompt = QUERY_PROMPT_TEMPLATE.format(content=dataset_entry.date, topic=dataset_entry.topic) - self.query = self.generate_query(messages=[query_prompt]) - return self.query - - def make_reference(self, dataset_entry: DateContext) -> str: - reference_prompt = REFERENCE_PROMPT_TEMPLATE.format( - date=dataset_entry.content, query=self.query, content=dataset_entry.subtopic - ) - self.reference = self.generate_reference(messages=[{"role": "user", "content": reference_prompt}]) - return self.reference diff --git a/prompting/tasks/task_registry.py b/prompting/tasks/task_registry.py index 174d0d41..cf6fb9d2 100644 --- a/prompting/tasks/task_registry.py +++ b/prompting/tasks/task_registry.py @@ -11,7 +11,6 @@ from prompting.datasets.wiki import WikiDataset, WikiDateDataset from prompting.rewards.reward import BaseRewardConfig from prompting.tasks.base_task import BaseTextTask -from prompting.tasks.date_qa import DateQARewardConfig, DateQuestionAnsweringTask from prompting.tasks.inference import InferenceRewardConfig, InferenceTask from prompting.tasks.multi_choice import MultiChoiceRewardConfig, MultiChoiceTask from prompting.tasks.multi_step_reasoning import MultiStepReasoningRewardConfig, MultiStepReasoningTask @@ -36,12 +35,6 @@ def __hash__(self): class TaskRegistry(BaseModel): task_configs: ClassVar[list[TaskConfig]] = [ TaskConfig(task=QuestionAnsweringTask, probability=0.15, datasets=[WikiDataset], reward_model=QARewardConfig), - TaskConfig( - task=DateQuestionAnsweringTask, - probability=0.1, - datasets=[WikiDateDataset], - reward_model=DateQARewardConfig, - ), TaskConfig( task=InferenceTask, probability=0.16, From 966b70f84769c63d108910acfb2e92058d4e9a79 Mon Sep 17 00:00:00 2001 From: richwardle Date: Thu, 2 Jan 2025 22:25:13 +0700 Subject: [PATCH 03/10] alter task registry ratios --- prompting/tasks/task_registry.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/prompting/tasks/task_registry.py b/prompting/tasks/task_registry.py index cf6fb9d2..ac885ae7 100644 --- a/prompting/tasks/task_registry.py +++ b/prompting/tasks/task_registry.py @@ -8,7 +8,7 @@ from prompting.datasets.huggingface_github import HuggingFaceGithubDataset from prompting.datasets.random_website import DDGDataset from prompting.datasets.sn13 import SN13Dataset -from prompting.datasets.wiki import WikiDataset, WikiDateDataset +from prompting.datasets.wiki import WikiDataset from prompting.rewards.reward import BaseRewardConfig from prompting.tasks.base_task import BaseTextTask from prompting.tasks.inference import InferenceRewardConfig, InferenceTask @@ -37,34 +37,34 @@ class TaskRegistry(BaseModel): TaskConfig(task=QuestionAnsweringTask, probability=0.15, datasets=[WikiDataset], reward_model=QARewardConfig), TaskConfig( task=InferenceTask, - probability=0.16, + probability=0.1, datasets=[SN13Dataset], reward_model=InferenceRewardConfig, ), TaskConfig( task=MultiChoiceTask, - probability=0.26, + probability=0.2, datasets=[WikiDataset], reward_model=MultiChoiceRewardConfig, ), TaskConfig( task=ProgrammingTask, - probability=0.1, + probability=0.2, datasets=[HuggingFaceGithubDataset], reward_model=ProgrammingRewardConfig, ), TaskConfig( task=WebRetrievalTask, - probability=0.03, + probability=0.1, datasets=[DDGDataset], reward_model=WebRetrievalRewardConfig, ), TaskConfig( task=MultiStepReasoningTask, - probability=0.1, + probability=0.2, datasets=[WikiDataset], reward_model=MultiStepReasoningRewardConfig, - ), + ) ] @classmethod From bfaa50974c9f28507ae7f049b361b6a3c51044fa Mon Sep 17 00:00:00 2001 From: bkb2135 Date: Thu, 2 Jan 2025 11:28:30 -0500 Subject: [PATCH 04/10] Adjust Task weighting --- prompting/tasks/task_registry.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/prompting/tasks/task_registry.py b/prompting/tasks/task_registry.py index ac885ae7..6474371b 100644 --- a/prompting/tasks/task_registry.py +++ b/prompting/tasks/task_registry.py @@ -34,7 +34,7 @@ def __hash__(self): class TaskRegistry(BaseModel): task_configs: ClassVar[list[TaskConfig]] = [ - TaskConfig(task=QuestionAnsweringTask, probability=0.15, datasets=[WikiDataset], reward_model=QARewardConfig), + TaskConfig(task=QuestionAnsweringTask, probability=0.25, datasets=[WikiDataset], reward_model=QARewardConfig), TaskConfig( task=InferenceTask, probability=0.1, @@ -61,7 +61,7 @@ class TaskRegistry(BaseModel): ), TaskConfig( task=MultiStepReasoningTask, - probability=0.2, + probability=0.1, datasets=[WikiDataset], reward_model=MultiStepReasoningRewardConfig, ) From a7f8f39c8a871541bdc134daeff6e4f9b2762abe Mon Sep 17 00:00:00 2001 From: bkb2135 Date: Thu, 2 Jan 2025 12:34:06 -0500 Subject: [PATCH 05/10] Initial Upload --- prompting/tasks/qa.py | 25 +++++++++++++++++++++++-- prompting/tasks/task_registry.py | 5 +++-- 2 files changed, 26 insertions(+), 4 deletions(-) diff --git a/prompting/tasks/qa.py b/prompting/tasks/qa.py index 9a608204..dd1e9f44 100644 --- a/prompting/tasks/qa.py +++ b/prompting/tasks/qa.py @@ -47,11 +47,11 @@ class QARewardConfig(BaseRewardConfig): penalty_definition: ClassVar[list[BaseRewardModel]] = [RougeRewardModel(weight=0.5)] -class QuestionAnsweringTask(BaseTextTask): +class WikiQuestionAnsweringTask(BaseTextTask): """QuestionAnsweringTasks must be initialised with an LLM pipeline to generate query and reference plus context from a dataset to base the query on""" - name: ClassVar[str] = "qa" + name: ClassVar[str] = "wiki_qa" query_system_prompt: ClassVar[str] = QUERY_SYSTEM_PROMPT reference_system_prompt: ClassVar[str] = REFERENCE_SYSTEM_PROMPT augmentation_system_prompt: ClassVar[str] = "" @@ -67,3 +67,24 @@ def make_reference(self, dataset_entry: Context): reference_prompt = REFERENCE_PROMPT_TEMPLATE.format(context=dataset_entry.content, question=self.query) self.reference = self.generate_reference(messages=[{"role": "user", "content": reference_prompt}]) return self.reference + +class WebQuestionAnsweringTask(BaseTextTask): + """QuestionAnsweringTasks must be initialised with an LLM pipeline to generate query and reference plus + context from a dataset to base the query on""" + + name: ClassVar[str] = "web_qa" + query_system_prompt: ClassVar[str] = QUERY_SYSTEM_PROMPT + reference_system_prompt: ClassVar[str] = REFERENCE_SYSTEM_PROMPT + augmentation_system_prompt: ClassVar[str] = "" + query: str | None = None + reference: str | None = None + + def make_query(self, dataset_entry: Context): + query_prompt = QUERY_PROMPT_TEMPLATE.format(context=dataset_entry.content) + self.query = self.generate_query(messages=[query_prompt]) + return self.query + + def make_reference(self, dataset_entry: Context): + reference_prompt = REFERENCE_PROMPT_TEMPLATE.format(context=dataset_entry.content, question=self.query) + self.reference = self.generate_reference(messages=[{"role": "user", "content": reference_prompt}]) + return self.reference \ No newline at end of file diff --git a/prompting/tasks/task_registry.py b/prompting/tasks/task_registry.py index 6474371b..0eca37b7 100644 --- a/prompting/tasks/task_registry.py +++ b/prompting/tasks/task_registry.py @@ -15,7 +15,7 @@ from prompting.tasks.multi_choice import MultiChoiceRewardConfig, MultiChoiceTask from prompting.tasks.multi_step_reasoning import MultiStepReasoningRewardConfig, MultiStepReasoningTask from prompting.tasks.programming_task import ProgrammingRewardConfig, ProgrammingTask -from prompting.tasks.qa import QARewardConfig, QuestionAnsweringTask +from prompting.tasks.qa import QARewardConfig, WikiQuestionAnsweringTask, WebQuestionAnsweringTask from prompting.tasks.web_retrieval import WebRetrievalRewardConfig, WebRetrievalTask from shared.base import BaseDataset @@ -34,7 +34,8 @@ def __hash__(self): class TaskRegistry(BaseModel): task_configs: ClassVar[list[TaskConfig]] = [ - TaskConfig(task=QuestionAnsweringTask, probability=0.25, datasets=[WikiDataset], reward_model=QARewardConfig), + TaskConfig(task=WikiQuestionAnsweringTask, probability=0.15, datasets=[WikiDataset], reward_model=QARewardConfig), + TaskConfig(task=WebQuestionAnsweringTask, probability=0.1, datasets=[DDGDataset], reward_model=QARewardConfig), TaskConfig( task=InferenceTask, probability=0.1, From b988759bfae5e62b8e60ebf333e9ca5a69d3c050 Mon Sep 17 00:00:00 2001 From: richwardle Date: Thu, 2 Jan 2025 18:36:39 +0000 Subject: [PATCH 06/10] Fix Imports --- prompting/tasks/multi_step_reasoning.py | 4 ++-- prompting/tasks/task_registry.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/prompting/tasks/multi_step_reasoning.py b/prompting/tasks/multi_step_reasoning.py index 010c1bcf..5bd718c7 100644 --- a/prompting/tasks/multi_step_reasoning.py +++ b/prompting/tasks/multi_step_reasoning.py @@ -9,7 +9,7 @@ from prompting.llms.apis.llm_wrapper import LLMWrapper from prompting.rewards.relevance import RelevanceRewardModel from prompting.rewards.reward import BaseRewardConfig, BaseRewardModel -from prompting.tasks.qa import QuestionAnsweringTask +from prompting.tasks.qa import WikiQuestionAnsweringTask from shared.base import Context from shared.timer import Timer @@ -174,7 +174,7 @@ class MultiStepReasoningRewardConfig(BaseRewardConfig): ] -class MultiStepReasoningTask(QuestionAnsweringTask): +class MultiStepReasoningTask(WikiQuestionAnsweringTask): """QuestionAnsweringTasks must be initialised with an LLM pipeline to generate query and reference plus context from a dataset to base the query on""" diff --git a/prompting/tasks/task_registry.py b/prompting/tasks/task_registry.py index 0eca37b7..1d511071 100644 --- a/prompting/tasks/task_registry.py +++ b/prompting/tasks/task_registry.py @@ -34,7 +34,7 @@ def __hash__(self): class TaskRegistry(BaseModel): task_configs: ClassVar[list[TaskConfig]] = [ - TaskConfig(task=WikiQuestionAnsweringTask, probability=0.15, datasets=[WikiDataset], reward_model=QARewardConfig), + TaskConfig(task=WikiQuestionAnsweringTask, probability=0.2, datasets=[WikiDataset], reward_model=QARewardConfig), TaskConfig(task=WebQuestionAnsweringTask, probability=0.1, datasets=[DDGDataset], reward_model=QARewardConfig), TaskConfig( task=InferenceTask, From 59ab6ff4ceb99eb1617f7ad43c664c54c7277196 Mon Sep 17 00:00:00 2001 From: richwardle Date: Sat, 4 Jan 2025 06:04:39 +0000 Subject: [PATCH 07/10] precommit changes --- prompting/tasks/qa.py | 3 ++- prompting/tasks/task_registry.py | 8 +++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/prompting/tasks/qa.py b/prompting/tasks/qa.py index dd1e9f44..fdc3d167 100644 --- a/prompting/tasks/qa.py +++ b/prompting/tasks/qa.py @@ -68,6 +68,7 @@ def make_reference(self, dataset_entry: Context): self.reference = self.generate_reference(messages=[{"role": "user", "content": reference_prompt}]) return self.reference + class WebQuestionAnsweringTask(BaseTextTask): """QuestionAnsweringTasks must be initialised with an LLM pipeline to generate query and reference plus context from a dataset to base the query on""" @@ -87,4 +88,4 @@ def make_query(self, dataset_entry: Context): def make_reference(self, dataset_entry: Context): reference_prompt = REFERENCE_PROMPT_TEMPLATE.format(context=dataset_entry.content, question=self.query) self.reference = self.generate_reference(messages=[{"role": "user", "content": reference_prompt}]) - return self.reference \ No newline at end of file + return self.reference diff --git a/prompting/tasks/task_registry.py b/prompting/tasks/task_registry.py index 1d511071..d26dbd06 100644 --- a/prompting/tasks/task_registry.py +++ b/prompting/tasks/task_registry.py @@ -15,7 +15,7 @@ from prompting.tasks.multi_choice import MultiChoiceRewardConfig, MultiChoiceTask from prompting.tasks.multi_step_reasoning import MultiStepReasoningRewardConfig, MultiStepReasoningTask from prompting.tasks.programming_task import ProgrammingRewardConfig, ProgrammingTask -from prompting.tasks.qa import QARewardConfig, WikiQuestionAnsweringTask, WebQuestionAnsweringTask +from prompting.tasks.qa import QARewardConfig, WebQuestionAnsweringTask, WikiQuestionAnsweringTask from prompting.tasks.web_retrieval import WebRetrievalRewardConfig, WebRetrievalTask from shared.base import BaseDataset @@ -34,7 +34,9 @@ def __hash__(self): class TaskRegistry(BaseModel): task_configs: ClassVar[list[TaskConfig]] = [ - TaskConfig(task=WikiQuestionAnsweringTask, probability=0.2, datasets=[WikiDataset], reward_model=QARewardConfig), + TaskConfig( + task=WikiQuestionAnsweringTask, probability=0.2, datasets=[WikiDataset], reward_model=QARewardConfig + ), TaskConfig(task=WebQuestionAnsweringTask, probability=0.1, datasets=[DDGDataset], reward_model=QARewardConfig), TaskConfig( task=InferenceTask, @@ -65,7 +67,7 @@ class TaskRegistry(BaseModel): probability=0.1, datasets=[WikiDataset], reward_model=MultiStepReasoningRewardConfig, - ) + ), ] @classmethod From e259a007dd7611d21609bd828e5d3c7f924654f7 Mon Sep 17 00:00:00 2001 From: richwardle Date: Sat, 4 Jan 2025 06:19:37 +0000 Subject: [PATCH 08/10] update readme to include multistep reasoning descriptions --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index b3914803..d2b3fa9b 100644 --- a/README.md +++ b/README.md @@ -61,6 +61,9 @@ The miner receives a code snippet that is incomplete. The task is to complete th ### 6. **Web Retrieval** The miner is given a question based on a random web page and must return a scraped website that contains the answer. This requires searching the web to locate the most accurate and reliable source to provide the answer. The miner is scored based on the embedding similarity between the answer it returns and the original website that the validator generated the reference from. +### 7. **Multistep Reasoning** +The miner is given a complex problem that requires multiple steps to solve. Each step builds upon the previous one, and the miner must provide intermediate results before arriving at the final answer. The validator generates a reference solution using its internal LLM, and the miner is scored based on the accuracy and coherence of the intermediate and final results. + # API Documentation For detailed information on the available API endpoints, request/response formats, and usage examples, please refer to the [API Documentation](./api/API.md). From d082fc34c2282d5ed3380dfab5e2dc709578fd34 Mon Sep 17 00:00:00 2001 From: richwardle Date: Tue, 7 Jan 2025 13:30:10 +0000 Subject: [PATCH 09/10] fix retrieval --- neurons/validator.py | 2 +- prompting/datasets/random_website.py | 14 +++++++------- prompting/rewards/web_retrieval.py | 8 ++++---- prompting/tasks/qa.py | 4 ++-- prompting/tasks/task_registry.py | 4 ++-- prompting/tasks/web_retrieval.py | 4 +++- shared/settings.py | 1 + 7 files changed, 20 insertions(+), 17 deletions(-) diff --git a/neurons/validator.py b/neurons/validator.py index 9d7e6305..a8735229 100644 --- a/neurons/validator.py +++ b/neurons/validator.py @@ -22,7 +22,7 @@ torch.multiprocessing.set_start_method("spawn", force=True) -NEURON_SAMPLE_SIZE = 100 +NEURON_SAMPLE_SIZE = 100 #TODO: Should add this to constants.py def create_loop_process(task_queue, scoring_queue, reward_events): diff --git a/prompting/datasets/random_website.py b/prompting/datasets/random_website.py index 3058812a..4061e889 100644 --- a/prompting/datasets/random_website.py +++ b/prompting/datasets/random_website.py @@ -23,15 +23,15 @@ class DDGDataset(BaseDataset): english_words: list[str] = None def search_random_term(self, retries: int = 3) -> tuple[Optional[str], Optional[list[dict[str, str]]]]: - try: - ddg = PatchedDDGS(proxy=shared_settings.PROXY_URL, verify=False) - for _ in range(retries): - random_words = " ".join(random.sample(ENGLISH_WORDS, 5)) + ddg = PatchedDDGS(proxy=shared_settings.PROXY_URL, verify=False) + for _ in range(retries): + random_words = " ".join(random.sample(ENGLISH_WORDS, 4)) + try: results = list(ddg.text(random_words)) if results: return random_words, results - except Exception as ex: - logger.error(f"Failed to get search results from DuckDuckGo: {ex}") + except Exception as ex: + logger.error(f"Failed to get search results from DuckDuckGo: {ex}") return None, None @staticmethod @@ -44,7 +44,7 @@ def extract_website_content(url: str) -> Optional[str]: logger.error(f"Failed to extract content from website {url}: {ex}") def next(self) -> Optional[DDGDatasetEntry]: - search_term, results = self.search_random_term(retries=3) + search_term, results = self.search_random_term(retries=5) if not results: return None website_url = results[0]["href"] diff --git a/prompting/rewards/web_retrieval.py b/prompting/rewards/web_retrieval.py index 592ef9d2..7c258082 100644 --- a/prompting/rewards/web_retrieval.py +++ b/prompting/rewards/web_retrieval.py @@ -60,16 +60,16 @@ def reward(self, reference: str, response_event: DendriteResponseEvent, **kwargs continue dataset_entry = DDGDatasetEntry.model_validate_json(json.loads(reference)) - search_term = dataset_entry.search_term + query = dataset_entry.query reference_content = dataset_entry.website_content # Similarity between search term and miner's scraped content. - search_response_sim = self._cosine_similarity(content1=search_term, content2=response_content) + search_response_sim = self._cosine_similarity(content1=query, content2=response_content) # Similarity between search term and relevant section of content. search_relevant_sim = 0 if response_relevant is not None: - search_relevant_sim = self._cosine_similarity(content1=search_term, content2=response_relevant) + search_relevant_sim = self._cosine_similarity(content1=query, content2=response_relevant) # If the URL provided in the completion is valid. valid_url_score = 0 @@ -77,7 +77,7 @@ def reward(self, reference: str, response_event: DendriteResponseEvent, **kwargs valid_url_score = self._cosine_similarity(content1=response_content, content2=response_url_scraped) # Similarity between search term and reference content. - search_reference_sim = self._cosine_similarity(content1=search_term, content2=reference_content) + search_reference_sim = self._cosine_similarity(content1=query, content2=reference_content) score = (search_response_sim + valid_url_score + search_relevant_sim) / 3 if abs(search_response_sim - search_reference_sim) > _SEARCH_TERM_THRESH: logger.info( diff --git a/prompting/tasks/qa.py b/prompting/tasks/qa.py index fdc3d167..314121d8 100644 --- a/prompting/tasks/qa.py +++ b/prompting/tasks/qa.py @@ -81,11 +81,11 @@ class WebQuestionAnsweringTask(BaseTextTask): reference: str | None = None def make_query(self, dataset_entry: Context): - query_prompt = QUERY_PROMPT_TEMPLATE.format(context=dataset_entry.content) + query_prompt = QUERY_PROMPT_TEMPLATE.format(context=dataset_entry.website_content) self.query = self.generate_query(messages=[query_prompt]) return self.query def make_reference(self, dataset_entry: Context): - reference_prompt = REFERENCE_PROMPT_TEMPLATE.format(context=dataset_entry.content, question=self.query) + reference_prompt = REFERENCE_PROMPT_TEMPLATE.format(context=dataset_entry.website_content, question=self.query) self.reference = self.generate_reference(messages=[{"role": "user", "content": reference_prompt}]) return self.reference diff --git a/prompting/tasks/task_registry.py b/prompting/tasks/task_registry.py index d26dbd06..8dee0ee2 100644 --- a/prompting/tasks/task_registry.py +++ b/prompting/tasks/task_registry.py @@ -35,9 +35,9 @@ def __hash__(self): class TaskRegistry(BaseModel): task_configs: ClassVar[list[TaskConfig]] = [ TaskConfig( - task=WikiQuestionAnsweringTask, probability=0.2, datasets=[WikiDataset], reward_model=QARewardConfig + task=WikiQuestionAnsweringTask, probability=0.05, datasets=[WikiDataset], reward_model=QARewardConfig ), - TaskConfig(task=WebQuestionAnsweringTask, probability=0.1, datasets=[DDGDataset], reward_model=QARewardConfig), + TaskConfig(task=WebQuestionAnsweringTask, probability=0.25, datasets=[DDGDataset], reward_model=QARewardConfig), TaskConfig( task=InferenceTask, probability=0.1, diff --git a/prompting/tasks/web_retrieval.py b/prompting/tasks/web_retrieval.py index f595a0ba..7d5f4b19 100644 --- a/prompting/tasks/web_retrieval.py +++ b/prompting/tasks/web_retrieval.py @@ -43,5 +43,7 @@ def make_query(self, dataset_entry: DDGDatasetEntry) -> str: return self.query def make_reference(self, dataset_entry: DDGDatasetEntry) -> str: - self.reference = json.dumps(dataset_entry.model_dump_json()) + ref_dict = dataset_entry.model_dump_json() + ref_dict['query'] = self.query + self.reference = json.dumps(ref_dict) return self.reference diff --git a/shared/settings.py b/shared/settings.py index 1be9d5ea..e4df024f 100644 --- a/shared/settings.py +++ b/shared/settings.py @@ -113,6 +113,7 @@ class SharedSettings(BaseSettings): SUBTENSOR_NETWORK: Optional[str] = Field(None, env="SUBTENSOR_NETWORK") MAX_ALLOWED_VRAM_GB: int = Field(62, env="MAX_ALLOWED_VRAM_GB") LLM_MAX_MODEL_LEN: int = Field(4096, env="LLM_MAX_MODEL_LEN") + PROXY_URL: Optional[str] = Field(None, env="PROXY_URL") LLM_MODEL: str = Field("hugging-quants/Meta-Llama-3.1-70B-Instruct-AWQ-INT4", env="LLM_MODEL") SAMPLING_PARAMS: dict[str, Any] = { "temperature": 0.7, From 432446ec918189c039fe58154fd2962998415de8 Mon Sep 17 00:00:00 2001 From: richwardle Date: Tue, 7 Jan 2025 13:58:44 +0000 Subject: [PATCH 10/10] precommit fixes --- neurons/validator.py | 2 +- prompting/tasks/web_retrieval.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/neurons/validator.py b/neurons/validator.py index a8735229..9098a680 100644 --- a/neurons/validator.py +++ b/neurons/validator.py @@ -22,7 +22,7 @@ torch.multiprocessing.set_start_method("spawn", force=True) -NEURON_SAMPLE_SIZE = 100 #TODO: Should add this to constants.py +NEURON_SAMPLE_SIZE = 100 # TODO: Should add this to constants.py def create_loop_process(task_queue, scoring_queue, reward_events): diff --git a/prompting/tasks/web_retrieval.py b/prompting/tasks/web_retrieval.py index 7d5f4b19..7d09fc13 100644 --- a/prompting/tasks/web_retrieval.py +++ b/prompting/tasks/web_retrieval.py @@ -44,6 +44,6 @@ def make_query(self, dataset_entry: DDGDatasetEntry) -> str: def make_reference(self, dataset_entry: DDGDatasetEntry) -> str: ref_dict = dataset_entry.model_dump_json() - ref_dict['query'] = self.query + ref_dict["query"] = self.query self.reference = json.dumps(ref_dict) return self.reference