From 04b2c22884acea87047a9cd67ca52aca6ca0416c Mon Sep 17 00:00:00 2001 From: latentvector Date: Fri, 27 Sep 2024 09:47:46 -0400 Subject: [PATCH] new refactor --- commune/README.md | 149 --- commune/aes.py | 46 - commune/{ => app}/app.py | 0 commune/cli.py | 25 +- commune/{ => executor}/executor.py | 0 commune/{ => key}/key.py | 0 commune/module.py | 52 +- commune/{ => server}/client.py | 0 commune/{ => server}/namespace.py | 0 commune/server/pm2.py | 313 ++++++ commune/{ => server}/serializer/bytes.py | 0 commune/{ => server}/serializer/munch.py | 0 commune/{ => server}/serializer/numpy.py | 0 commune/{ => server}/serializer/pandas.py | 0 commune/{ => server}/serializer/serializer.py | 0 commune/{ => server}/serializer/torch.py | 0 commune/{ => server}/server.py | 0 commune/{ => server}/user.py | 0 commune/shortcuts.yaml | 10 - commune/{ => subspace}/subspace.py | 96 +- commune/ticket.py | 131 --- commune/urls.yaml | 74 -- commune/{ => vali}/vali.py | 0 modules/agent/agent.py | 76 ++ modules/agent/app.py | 99 ++ modules/agent/child.py | 80 ++ modules/agent/data/agent_data.py | 24 + modules/agent/factory/agent_factory.py | 11 + modules/agent/judge.py | 28 + modules/agent/maker/agent_maker.py | 27 + modules/agent/sumarizer/agent_sumarizer.py | 193 ++++ modules/api/api.py | 52 + modules/api/app.py | 50 + modules/base/base.py | 11 + modules/base/file_module.py | 6 + modules/chat/app.py | 217 +++++ modules/chat/chat.py | 261 +++++ modules/chat/history.py | 73 ++ modules/code/code.py | 716 ++++++++++++++ modules/data/__init__.py | 0 modules/data/data.py | 65 ++ modules/data/data.yaml | 2 + modules/data/diffusion/dream/dream_dataset.py | 370 +++++++ .../data/diffusion/dream/prompt_dataset.py | 20 + modules/data/hf/data_hf.py | 267 +++++ modules/data/hf/data_hf_docs.md | 35 + modules/data/image/globe/data_image_globe.py | 9 + .../data/image/globe/data_image_globe.yaml | 6 + modules/data/text/code/data_text_code.py | 121 +++ modules/data/text/code/data_text_code.yaml | 2 + modules/data/text/folder/data_text_folder.py | 138 +++ .../data/text/folder/data_text_folder.yaml | 0 .../folder/docs/data_text_realfake_docs.md | 142 +++ modules/data/text/math/data_text_math.py | 51 + modules/data/text/math/data_text_math.yaml | 2 + modules/data/text/pile/pile.py | 240 +++++ modules/data/text/pile/pile.yaml | 7 + .../data/text/realfake/data_text_realfake.py | 118 +++ .../text/realfake/data_text_realfake.yaml | 3 + .../realfake/docs/data_text_realfake_docs.md | 142 +++ modules/data/text/squad.py | 22 + .../data/text/truthqa/data_text_truthqa.py | 67 ++ .../data/text/truthqa/data_text_truthqa.yaml | 2 + modules/docker/docker.py | 451 +++++++++ modules/emoji/emoji.py | 100 ++ modules/evm/chain/chain.py | 10 + modules/evm/chain/docker-compose.yaml | 17 + modules/evm/contract.py | 362 +++++++ modules/evm/evm.py | 355 +++++++ modules/evm/evm.yaml | 9 + modules/evm/key.py | 834 ++++++++++++++++ modules/evm/network.py | 185 ++++ modules/evm/networks.yaml | 158 +++ modules/git/git.py | 136 +++ modules/key/app/app.py | 217 +++++ modules/key/app/style.css | 94 ++ modules/miner/miner.md | 0 modules/miner/miner.py | 346 +++++++ modules/model/__init__.py | 1 + modules/model/model.py | 628 ++++++++++++ modules/model/openai.py | 175 ++++ modules/model/openrouter.py | 166 ++++ modules/model/sentence.py | 35 + modules/model/utils.py | 75 ++ modules/pipeline/__init__.py | 1 + modules/pipeline/pipeline.py | 95 ++ modules/pipeline/pipeline.yaml | 1 + modules/plotly/plotly.py | 105 ++ modules/process/pipe.py | 63 ++ modules/process/pool.py | 918 ++++++++++++++++++ modules/process/process.py | 163 ++++ modules/process/utils.py | 809 +++++++++++++++ modules/ray/__init__.py | 0 modules/ray/actor_pool.py | 363 +++++++ modules/ray/client/ray_client.py | 126 +++ modules/ray/client/ray_client.yaml | 10 + modules/ray/queue.py | 311 ++++++ modules/ray/ray.py | 530 ++++++++++ .../ray/server/object/ray_object_server.py | 99 ++ .../ray/server/object/ray_object_server.yaml | 3 + modules/ray/server/queue/ray_queue_server.py | 235 +++++ .../ray/server/queue/ray_queue_server.yaml | 3 + modules/ray/server/redis/ray_redis_server.py | 44 + .../ray/server/redis/ray_redis_server.yaml | 3 + modules/ray/utils.py | 202 ++++ modules/remote/.gitignore | 1 + modules/remote/README.md | 51 + modules/remote/app.py | 259 +++++ modules/remote/data/.gitkeep | 0 modules/remote/remote.py | 553 +++++++++++ modules/remote/ssh.py | 119 +++ modules/repo/.gitignore | 0 modules/repo/repo.py | 185 ++++ modules/router/dashboard/router_dashboard.py | 135 +++ modules/router/ex.py | 212 ++++ modules/router/router.py | 25 + modules/router/router_v0.py | 389 ++++++++ modules/router/task.py | 162 ++++ modules/router/worker.py | 0 modules/sandbox.py | 3 + modules/ssh.py | 203 ++++ modules/storage/storage.py | 123 +++ modules/storage/vali.py | 33 + modules/storage/vector/vector_store.py | 121 +++ modules/storage/vector/vector_store.yaml | 2 + modules/streamlit/__init__.py | 2 + modules/streamlit/auth/config_template.yaml | 17 + modules/streamlit/auth/streamlit_auth.py | 87 ++ modules/streamlit/plot.py | 420 ++++++++ modules/streamlit/streamlit.py | 709 ++++++++++++++ modules/streamlit/styles/commune.css | 8 + modules/streamlit/utils.py | 52 + .../streamlit/watchdog/streamlit_watchdog.py | 46 + .../watchdog/streamlit_watchdog.yaml | 2 + modules/subspace/__init__.py | 4 + modules/subspace/app/app.py | 69 ++ modules/subspace/app/backend.py | 0 modules/template/template.py | 61 ++ modules/test.py | 27 + modules/tool/__init__.py | 0 modules/tool/compare_token_price.py | 41 + modules/tool/compound.py | 38 + modules/tool/defi/__init__.py | 0 modules/tool/defi/aave.py | 38 + modules/tool/defi/compare_token_price.py | 41 + modules/tool/defi/compound.py | 38 + modules/tool/defi/defillama/aave.py | 70 ++ modules/tool/defi/defillama/defillama.py | 72 ++ modules/tool/defi/defillama/lido.py | 62 ++ modules/tool/defi/defillama/rocketpool.py | 86 ++ modules/tool/defi/get_best_apy.py | 28 + modules/tool/defi/inch/balances.py | 56 ++ modules/tool/defi/inch/gasprice.py | 40 + modules/tool/defi/inch/inch.py | 70 ++ modules/tool/defi/inch/prices.py | 75 ++ modules/tool/defi/openai_helper.py | 61 ++ modules/tool/defi/read_file.py | 13 + modules/tool/defi/swap.py | 71 ++ modules/tool/defi/tool.py | 72 ++ modules/tool/defi/write_file.py | 14 + modules/tool/get_best_apy.py | 28 + modules/tool/openai_helper.py | 61 ++ modules/tool/read_file.py | 13 + modules/tool/registry.py | 10 + modules/tool/search/tool_search.py | 11 + modules/tool/swap.py | 71 ++ modules/tool/tool.py | 28 + modules/tool/web/web.py | 18 + modules/tool/web/web.yaml | 1 + modules/tool/write_file.py | 14 + modules/trainer/__init__.py | 1 + modules/trainer/trainer.py | 184 ++++ modules/web/web.py | 183 ++++ modules/web/web.yaml | 1 + tests/test_module.py | 14 - 175 files changed, 18285 insertions(+), 508 deletions(-) delete mode 100644 commune/README.md delete mode 100644 commune/aes.py rename commune/{ => app}/app.py (100%) rename commune/{ => executor}/executor.py (100%) rename commune/{ => key}/key.py (100%) rename commune/{ => server}/client.py (100%) rename commune/{ => server}/namespace.py (100%) create mode 100644 commune/server/pm2.py rename commune/{ => server}/serializer/bytes.py (100%) rename commune/{ => server}/serializer/munch.py (100%) rename commune/{ => server}/serializer/numpy.py (100%) rename commune/{ => server}/serializer/pandas.py (100%) rename commune/{ => server}/serializer/serializer.py (100%) rename commune/{ => server}/serializer/torch.py (100%) rename commune/{ => server}/server.py (100%) rename commune/{ => server}/user.py (100%) delete mode 100644 commune/shortcuts.yaml rename commune/{ => subspace}/subspace.py (98%) delete mode 100644 commune/ticket.py delete mode 100644 commune/urls.yaml rename commune/{ => vali}/vali.py (100%) create mode 100644 modules/agent/agent.py create mode 100644 modules/agent/app.py create mode 100644 modules/agent/child.py create mode 100644 modules/agent/data/agent_data.py create mode 100644 modules/agent/factory/agent_factory.py create mode 100644 modules/agent/judge.py create mode 100644 modules/agent/maker/agent_maker.py create mode 100644 modules/agent/sumarizer/agent_sumarizer.py create mode 100644 modules/api/api.py create mode 100644 modules/api/app.py create mode 100644 modules/base/base.py create mode 100644 modules/base/file_module.py create mode 100644 modules/chat/app.py create mode 100644 modules/chat/chat.py create mode 100644 modules/chat/history.py create mode 100644 modules/code/code.py create mode 100755 modules/data/__init__.py create mode 100644 modules/data/data.py create mode 100644 modules/data/data.yaml create mode 100644 modules/data/diffusion/dream/dream_dataset.py create mode 100644 modules/data/diffusion/dream/prompt_dataset.py create mode 100644 modules/data/hf/data_hf.py create mode 100644 modules/data/hf/data_hf_docs.md create mode 100644 modules/data/image/globe/data_image_globe.py create mode 100644 modules/data/image/globe/data_image_globe.yaml create mode 100644 modules/data/text/code/data_text_code.py create mode 100644 modules/data/text/code/data_text_code.yaml create mode 100644 modules/data/text/folder/data_text_folder.py create mode 100644 modules/data/text/folder/data_text_folder.yaml create mode 100644 modules/data/text/folder/docs/data_text_realfake_docs.md create mode 100644 modules/data/text/math/data_text_math.py create mode 100644 modules/data/text/math/data_text_math.yaml create mode 100644 modules/data/text/pile/pile.py create mode 100644 modules/data/text/pile/pile.yaml create mode 100644 modules/data/text/realfake/data_text_realfake.py create mode 100644 modules/data/text/realfake/data_text_realfake.yaml create mode 100644 modules/data/text/realfake/docs/data_text_realfake_docs.md create mode 100644 modules/data/text/squad.py create mode 100644 modules/data/text/truthqa/data_text_truthqa.py create mode 100644 modules/data/text/truthqa/data_text_truthqa.yaml create mode 100644 modules/docker/docker.py create mode 100644 modules/emoji/emoji.py create mode 100644 modules/evm/chain/chain.py create mode 100644 modules/evm/chain/docker-compose.yaml create mode 100644 modules/evm/contract.py create mode 100644 modules/evm/evm.py create mode 100644 modules/evm/evm.yaml create mode 100644 modules/evm/key.py create mode 100644 modules/evm/network.py create mode 100644 modules/evm/networks.yaml create mode 100644 modules/git/git.py create mode 100644 modules/key/app/app.py create mode 100644 modules/key/app/style.css create mode 100644 modules/miner/miner.md create mode 100644 modules/miner/miner.py create mode 100644 modules/model/__init__.py create mode 100644 modules/model/model.py create mode 100644 modules/model/openai.py create mode 100644 modules/model/openrouter.py create mode 100644 modules/model/sentence.py create mode 100644 modules/model/utils.py create mode 100755 modules/pipeline/__init__.py create mode 100755 modules/pipeline/pipeline.py create mode 100755 modules/pipeline/pipeline.yaml create mode 100644 modules/plotly/plotly.py create mode 100644 modules/process/pipe.py create mode 100644 modules/process/pool.py create mode 100644 modules/process/process.py create mode 100644 modules/process/utils.py create mode 100755 modules/ray/__init__.py create mode 100755 modules/ray/actor_pool.py create mode 100755 modules/ray/client/ray_client.py create mode 100755 modules/ray/client/ray_client.yaml create mode 100755 modules/ray/queue.py create mode 100644 modules/ray/ray.py create mode 100755 modules/ray/server/object/ray_object_server.py create mode 100755 modules/ray/server/object/ray_object_server.yaml create mode 100755 modules/ray/server/queue/ray_queue_server.py create mode 100755 modules/ray/server/queue/ray_queue_server.yaml create mode 100755 modules/ray/server/redis/ray_redis_server.py create mode 100755 modules/ray/server/redis/ray_redis_server.yaml create mode 100755 modules/ray/utils.py create mode 100644 modules/remote/.gitignore create mode 100644 modules/remote/README.md create mode 100644 modules/remote/app.py create mode 100644 modules/remote/data/.gitkeep create mode 100644 modules/remote/remote.py create mode 100644 modules/remote/ssh.py create mode 100644 modules/repo/.gitignore create mode 100644 modules/repo/repo.py create mode 100644 modules/router/dashboard/router_dashboard.py create mode 100644 modules/router/ex.py create mode 100644 modules/router/router.py create mode 100644 modules/router/router_v0.py create mode 100644 modules/router/task.py create mode 100644 modules/router/worker.py create mode 100644 modules/sandbox.py create mode 100644 modules/ssh.py create mode 100644 modules/storage/storage.py create mode 100644 modules/storage/vali.py create mode 100644 modules/storage/vector/vector_store.py create mode 100644 modules/storage/vector/vector_store.yaml create mode 100755 modules/streamlit/__init__.py create mode 100644 modules/streamlit/auth/config_template.yaml create mode 100644 modules/streamlit/auth/streamlit_auth.py create mode 100644 modules/streamlit/plot.py create mode 100755 modules/streamlit/streamlit.py create mode 100644 modules/streamlit/styles/commune.css create mode 100755 modules/streamlit/utils.py create mode 100644 modules/streamlit/watchdog/streamlit_watchdog.py create mode 100644 modules/streamlit/watchdog/streamlit_watchdog.yaml create mode 100644 modules/subspace/__init__.py create mode 100644 modules/subspace/app/app.py create mode 100644 modules/subspace/app/backend.py create mode 100644 modules/template/template.py create mode 100644 modules/test.py create mode 100644 modules/tool/__init__.py create mode 100644 modules/tool/compare_token_price.py create mode 100644 modules/tool/compound.py create mode 100644 modules/tool/defi/__init__.py create mode 100644 modules/tool/defi/aave.py create mode 100644 modules/tool/defi/compare_token_price.py create mode 100644 modules/tool/defi/compound.py create mode 100644 modules/tool/defi/defillama/aave.py create mode 100644 modules/tool/defi/defillama/defillama.py create mode 100644 modules/tool/defi/defillama/lido.py create mode 100644 modules/tool/defi/defillama/rocketpool.py create mode 100644 modules/tool/defi/get_best_apy.py create mode 100644 modules/tool/defi/inch/balances.py create mode 100644 modules/tool/defi/inch/gasprice.py create mode 100644 modules/tool/defi/inch/inch.py create mode 100644 modules/tool/defi/inch/prices.py create mode 100644 modules/tool/defi/openai_helper.py create mode 100644 modules/tool/defi/read_file.py create mode 100644 modules/tool/defi/swap.py create mode 100644 modules/tool/defi/tool.py create mode 100644 modules/tool/defi/write_file.py create mode 100644 modules/tool/get_best_apy.py create mode 100644 modules/tool/openai_helper.py create mode 100644 modules/tool/read_file.py create mode 100644 modules/tool/registry.py create mode 100644 modules/tool/search/tool_search.py create mode 100644 modules/tool/swap.py create mode 100644 modules/tool/tool.py create mode 100644 modules/tool/web/web.py create mode 100644 modules/tool/web/web.yaml create mode 100644 modules/tool/write_file.py create mode 100644 modules/trainer/__init__.py create mode 100644 modules/trainer/trainer.py create mode 100644 modules/web/web.py create mode 100644 modules/web/web.yaml delete mode 100644 tests/test_module.py diff --git a/commune/README.md b/commune/README.md deleted file mode 100644 index 233b417d..00000000 --- a/commune/README.md +++ /dev/null @@ -1,149 +0,0 @@ -
- -# **Commune AI** - -[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) -[![Discord Chat](https://img.shields.io/badge/discord-join%20chat-blue.svg)](https://discord.com/invite/DgjvQXvhqf) -[![Website Uptime](https://img.shields.io/website-up-down-green-red/http/monip.org.svg)](https://www.communeai.org/) -[![Twitter Follow](https://img.shields.io/twitter/follow/communeaidotorg.svg?style=social&label=Follow)](https://twitter.com/communeaidotorg) - -![Alt text](image.png) -PLEASE REFER TO THE DOCS FOLDER FOR MORE INFO - -[DOCS](./commune/docs) - -Introduction to Commune - -Commune is an open-source project that aims to create a network for connecting various developer tools. It's designed to be flexible and unopinionated, allowing developers to use it alongside their existing projects. - -Key Features: -- Module Filesystem -- Subspace blockchain integration -- Flexible key management -- Pythonic CLI - -To get started, you can install Commune either locally or using Docker. - -Installation - -Local Installation: -```bash -apt-get install python3.10 python3-pip npm -npm install -g pm2 -pip install -r requirements.txt -pip install -e . -``` - -Docker Installation: -```bash -git clone https://github.com/commune-ai/commune.git -cd commune -make build -make start -make enter -``` - -After installation, sync with the network: -```bash -c ls -``` - -Page 3: Module Filesystem - -Commune organizes modules in a filesystem-like structure. You can create local modules that integrate seamlessly with Commune's core modules. - -Example: -```python -import commune as c - -class Example(c.Module): - def __init__(self): - pass - - def predict(self, x): - return x + 1 -``` - -You can call this module using: -```bash -c model/predict x=1 -``` - -Page 4: Subspace Integration - -Commune uses the Subspace blockchain for: -- Decentralized Name Service (DNS) for deployed objects -- Stake-weighted voting system for performance evaluation - -To register a module on the blockchain: -```bash -c register my_module_path name=my_module tag=1 -``` - -Page 5: Key Management - -Commune uses sr25519 keys for signing, encryption, and verification. - -To add a new key: -```bash -c add_key alice -``` - -To list keys: -```bash -c keys -``` - -To sign a message: -```python -key = c.get_key("alice") -signature = key.sign("hello world") -``` - -Page 6: Pythonic CLI - -Commune provides a Pythonic CLI for easy interaction: - -```bash -c {module_name}/{function_name} *args **kwargs -``` - -Example: -```bash -c ls ./ -``` - -This is equivalent to: -```python -import commune as c -c.ls('./') -``` - -Page 7: Serving Modules - -To serve a module: -```bash -c serve model.openai::tag -``` - -To call a served module: -```python -c.call("model.openai::tag/forward", "sup") -``` - -Page 8: Testing - -To run tests: -```bash -pytest commune/tests -``` - -Page 9: Contributing - -Contributions to Commune are welcome. Please submit pull requests on the GitHub repository. - -Page 10: License - -Commune is licensed under MIT, but with a "Do What You Want" philosophy. The project encourages open-source usage without strict legal restrictions. - -This documentation provides a high-level overview of Commune. For more detailed information on specific features, please refer to the individual module documentation or the project's GitHub repository. \ No newline at end of file diff --git a/commune/aes.py b/commune/aes.py deleted file mode 100644 index 7b4d5d39..00000000 --- a/commune/aes.py +++ /dev/null @@ -1,46 +0,0 @@ -import base64 -import hashlib -from Crypto import Random -from Crypto.Cipher import AES -from copy import deepcopy -import json -import sys -import inspect -import time -import commune as c - - -class AESKey(c.Module): - bs = AES.block_size - def __init__(self, key:str = 'dummy' ): - self.set_password(key) - - def set_password(self, key:str): - if isinstance(key, str): - key = key.encode() - self.key_phrase = hashlib.sha256(key).digest() - return {'msg': 'set the password'} - - def encrypt(self, data, return_string = True): - data = c.python2str(data) - data = self._pad(data) - iv = Random.new().read(AES.block_size) - cipher = AES.new(self.key_phrase, AES.MODE_CBC, iv) - encrypted_bytes = base64.b64encode(iv + cipher.encrypt(data.encode())) - encrypted_data = encrypted_bytes.decode() if return_string else encrypted_bytes - return encrypted_data - - def decrypt(self, enc): - enc = base64.b64decode(enc) - iv = enc[:AES.block_size] - cipher = AES.new(self.key_phrase, AES.MODE_CBC, iv) - decrypted_data = self._unpad(cipher.decrypt(enc[AES.block_size:])).decode('utf-8') - return c.str2python(decrypted_data) - - def _pad(self, s): - return s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs) - - def _unpad(self, s): - return s[:-ord(s[len(s)-1:])] - - \ No newline at end of file diff --git a/commune/app.py b/commune/app/app.py similarity index 100% rename from commune/app.py rename to commune/app/app.py diff --git a/commune/cli.py b/commune/cli.py index 5eb698bf..a4cbc5d1 100644 --- a/commune/cli.py +++ b/commune/cli.py @@ -46,18 +46,6 @@ def forward(self, argv=None): for arg in c.copy(argv): if arg.startswith('--'): key = arg[2:].split('=')[0] - # if key == 'cwd': - # new_argv = [] - # for item in c.copy(argv): - # if '--cwd' in item: - # continue - # new_argv.append(item) - # new_cmd = 'c ' + ' '.join(c.copy(new_argv)) - - # cwd = c.resolve_path(arg.split('=')[1]) - # v = c.cmd(f'{new_cmd}', cwd=cwd) - # c.print(v) - # return new_cmd if key in self.helper_fns: new_argvs = self.argv() new_argvs.remove(arg) @@ -70,10 +58,9 @@ def forward(self, argv=None): init_kwargs[key] = self.determine_type(value) # any of the --flags are init kwargs - if argv[0].endswith('.py'): - argv[0] = argv[0][:-3] - - + if os.path.isdir(argv[0]): + argv[0] = c.path2simple(argv[0]) + if ':' in argv[0]: # {module}:{fn} arg1 arg2 arg3 ... argn argv[0] = argv[0].replace(':', '/') @@ -101,12 +88,14 @@ def forward(self, argv=None): fn_path = f'{module_name}/{fn}' fn_obj = getattr(module, fn) fn_type = c.classify_fn(fn_obj) - if fn_type == 'self' or len(init_kwargs) > 0: + is_property = c.is_property(fn_obj) + print(fn_type) + if fn_type == 'self' or len(init_kwargs) > 0 or is_property: fn_obj = getattr(module(**init_kwargs), fn) # calling function buffer input_msg = f'[bold]fn[/bold]: {fn_path}' - if callable(fn_obj) and not c.is_property(fn_obj): + if callable(fn_obj): args, kwargs = self.parse_args(argv) if len(args) > 0 or len(kwargs) > 0: inputs = {"args":args, "kwargs":kwargs} diff --git a/commune/executor.py b/commune/executor/executor.py similarity index 100% rename from commune/executor.py rename to commune/executor/executor.py diff --git a/commune/key.py b/commune/key/key.py similarity index 100% rename from commune/key.py rename to commune/key/key.py diff --git a/commune/module.py b/commune/module.py index ae45ad5e..2e5c6c06 100755 --- a/commune/module.py +++ b/commune/module.py @@ -440,10 +440,7 @@ def is_pwd(cls, module:str = None): module = c.module(module) if module != None else cls return module.dirpath() == c.pwd() - @classmethod - def shortcuts(cls, cache=True) -> Dict[str, str]: - return c.get_yaml(f'{cls.dirpath()}/shortcuts.yaml') - + def __repr__(self) -> str: return f'<{self.class_name()}' def __str__(self) -> str: @@ -2493,11 +2490,7 @@ def is_property(cls, fn: 'Callable') -> bool: ''' is the function a property ''' - try: - fn = cls.get_fn(fn, ignore_module_pattern=True) - except : - return False - + fn = c.get_fn(fn) return isinstance(fn, property) def is_fn_self(self, fn): @@ -2579,18 +2572,18 @@ def has_function_arg(cls, fn, arg:str): @classmethod def classify_fn(cls, fn): - try: - if not callable(fn): - fn = cls.get_fn(fn) - if not callable(fn): - return 'cls' - args = cls.get_function_args(fn) - if args[0] == 'self': - return 'self' - elif args[0] == 'cls': - return 'class' - except Exception as e: + if not callable(fn): + fn = cls.get_fn(fn) + if not callable(fn): + return 'cls' + args = cls.get_function_args(fn) + if len(args) == 0: return 'property' + if args[0] == 'self': + return 'self' + elif args[0] == 'cls': + return 'class' + return 'static' @classmethod @@ -2780,7 +2773,7 @@ def simple2path(cls, """ # if cls.libname in simple and '/' not in simple and cls.can_import_module(simple): # return simple - shortcuts = c.shortcuts() + shortcuts = c.shortcuts simple = shortcuts.get(simple, simple) if simple.endswith(extension): @@ -2898,7 +2891,6 @@ def path2simple(cls, @classmethod def find_classes(cls, path='./', working=False): - print(path) path = os.path.abspath(path) if os.path.isdir(path): classes = [] @@ -3245,7 +3237,7 @@ def get_module(cls, if path in ['module', 'c']: return c.Module # if the module is a valid import path - shortcuts = c.shortcuts() + shortcuts = c.shortcuts if path in shortcuts: path = shortcuts[path] module = None @@ -4153,6 +4145,20 @@ def fuckkkk(self): return "fuckkkk" + shortcuts = { + 'user': 'server.user', + 'namespace':'server.namespace', + 'client': 'server.client', + 'pm2': 'server.pm2', + 'serializer': 'server.serializer', + 'openai' : 'model.openai', + 'openrouter': 'model.openrouter', + 'or' : ' model.openrouter', + 'r' : 'remote', + 's' : 'subspace', + } + + c.add_routes() Module = c # Module is alias of c Module.run(__name__) diff --git a/commune/client.py b/commune/server/client.py similarity index 100% rename from commune/client.py rename to commune/server/client.py diff --git a/commune/namespace.py b/commune/server/namespace.py similarity index 100% rename from commune/namespace.py rename to commune/server/namespace.py diff --git a/commune/server/pm2.py b/commune/server/pm2.py new file mode 100644 index 00000000..634fabaa --- /dev/null +++ b/commune/server/pm2.py @@ -0,0 +1,313 @@ +import commune as c + + +import os +from typing import * +import json + +class PM2(c.Module): + dir = os.path.expanduser('~/.pm2') + + @classmethod + def restart(cls, name:str, verbose:bool = False, prefix_match:bool = True): + list = cls.servers() + if name in list: + rm_list = [name] + else: + if prefix_match: + rm_list = [ p for p in list if p.startswith(name)] + else: + raise Exception(f'pm2 process {name} not found') + + if len(rm_list) == 0: + return [] + for n in rm_list: + c.print(f'Restarting {n}', color='cyan') + c.cmd(f"pm2 restart {n}", verbose=False) + cls.rm_logs(n) + return {'success':True, 'message':f'Restarted {name}'} + + + @classmethod + def kill(cls, name:str, verbose:bool = True, **kwargs): + if name == 'all': + return cls.kill_all(verbose=verbose) + c.cmd(f"pm2 delete {name}", verbose=False) + # remove the logs from the pm2 logs directory + cls.rm_logs(name) + return {'success':True, 'message':f'Killed {name}'} + + @classmethod + def status(cls, verbose=False): + stdout = c.cmd(f"pm2 status", verbose=False) + if verbose: + c.print(stdout,color='green') + return stdout + + + dir = os.path.expanduser('~/.pm2') + @classmethod + def logs_path_map(cls, name=None): + logs_path_map = {} + for l in c.ls(f'{cls.dir}/logs/'): + key = '-'.join(l.split('/')[-1].split('-')[:-1]).replace('-',':') + logs_path_map[key] = logs_path_map.get(key, []) + [l] + + + for k in logs_path_map.keys(): + logs_path_map[k] = {l.split('-')[-1].split('.')[0]: l for l in list(logs_path_map[k])} + + if name != None: + return logs_path_map.get(name, {}) + + return logs_path_map + + @classmethod + def rm_logs( cls, name): + logs_map = cls.logs_path_map(name) + + for k in logs_map.keys(): + c.rm(logs_map[k]) + + @classmethod + def logs(cls, + module:str, + tail: int =100, + verbose: bool=True , + mode: str ='cmd', + **kwargs): + + if mode == 'local': + text = '' + for m in ['out','error']: + # I know, this is fucked + path = f'{cls.dir}/logs/{module.replace("/", "-")}-{m}.log'.replace(':', '-').replace('_', '-') + try: + text += c.get_text(path, tail=tail) + except Exception as e: + c.print(e) + continue + + return text + elif mode == 'cmd': + return c.cmd(f"pm2 logs {module}", verbose=True) + else: + raise NotImplementedError(f'mode {mode} not implemented') + + @classmethod + def kill_many(cls, search=None, verbose:bool = True, timeout=10): + futures = [] + for name in cls.servers(search=search): + f = c.submit(cls.kill, dict(name=name, verbose=verbose), return_future=True, timeout=timeout) + futures.append(f) + return c.wait(futures) + + @classmethod + def kill_all(cls, verbose:bool = True, timeout=10, trials=10): + while len(cls.servers()) > 0: + results = cls.kill_many(search=None, verbose=verbose, timeout=timeout) + trials -= 1 + assert trials > 0, 'Failed to kill all processes' + return {'success':True, 'message':f'Killed all processes'} + + + @classmethod + def servers(cls, search=None, verbose:bool = False) -> List[str]: + output_string = c.cmd('pm2 status', verbose=False) + module_list = [] + for line in output_string.split('\n')[3:]: + if line.count('│') > 2: + server_name = line.split('│')[2].strip() + if 'errored' in line: + cls.kill(server_name, verbose=True) + continue + module_list += [server_name] + + if search != None: + search_true = lambda x: any([s in x for s in search]) + module_list = [m for m in module_list if search_true(m)] + + module_list = sorted(list(set(module_list))) + + return module_list + + pm2ls = servers + + # commune.run_command('pm2 status').stdout.split('\n')[5].split(' │')[0].split(' │ ')[-1]commune.run_command('pm2 status').stdout.split('\n')[5].split(' │')[0].split(' │ ')[-1] + + @classmethod + def exists(cls, name:str) -> bool: + return bool(name in cls.servers()) + + @classmethod + def start(cls, + path:str , + name:str, + cmd_kwargs:str = None, + refresh: bool = True, + verbose:bool = True, + force : bool = True, + current_dir: str = True, + interpreter : str = None, + **kwargs): + + if cls.exists(name) and refresh: + cls.kill(name, verbose=verbose) + + cmd = f'pm2 start {path} --name {name}' + + if force: + cmd += ' -f' + + if interpreter != None: + cmd += f' --interpreter {interpreter}' + + if cmd_kwargs != None: + cmd += f' -- ' + + if isinstance(cmd_kwargs, dict): + for k, v in cmd_kwargs.items(): + cmd += f'--{k} {v}' + elif isinstance(cmd_kwargs, str): + cmd += f'{cmd_kwargs}' + + c.print(f'[bold cyan]Starting (PM2)[/bold cyan] [bold yellow]{name}[/bold yellow]', color='green') + + if current_dir: + kwargs['cwd'] = c.dirpath(path) + + return c.cmd(cmd, verbose=verbose, **kwargs) + + @classmethod + def restart_many(cls, search:str = None, network = None, **kwargs): + t1 = c.time() + servers = cls.servers(search) + futures = [c.submit(c.restart, kwargs={"name": m, **kwargs}) for m in servers] + results = [] + for f in c.as_completed(futures): + result = f.result() + results.append(result) + return results + + + + @classmethod + def launch(cls, + module:str = None, + fn: str = 'serve', + name:Optional[str]=None, + tag : str = None, + args : list = None, + kwargs: dict = None, + device:str=None, + interpreter:str='python3', + autorestart: bool = True, + verbose: bool = False , + force:bool = True, + meta_fn: str = 'module_fn', + tag_seperator:str = '::', + cwd = None, + refresh:bool=True ): + import commune as c + + if hasattr(module, 'module_name'): + module = module.module_name() + + # avoid these references fucking shit up + args = args if args else [] + kwargs = kwargs if kwargs else {} + + # convert args and kwargs to json strings + kwargs = { + 'module': module , + 'fn': fn, + 'args': args, + 'kwargs': kwargs + } + + kwargs_str = json.dumps(kwargs).replace('"', "'") + + name = name or module + if refresh: + cls.pm2_kill(name) + module = c.module() + # build command to run pm2 + filepath = c.filepath() + cwd = cwd or module.dirpath() + command = f"pm2 start {filepath} --name {name} --interpreter {interpreter}" + + if not autorestart: + command += ' --no-autorestart' + if force: + command += ' -f ' + command = command + f' -- --fn {meta_fn} --kwargs "{kwargs_str}"' + env = {} + if device != None: + if isinstance(device, int): + env['CUDA_VISIBLE_DEVICES']=str(device) + if isinstance(device, list): + env['CUDA_VISIBLE_DEVICES']=','.join(list(map(str, device))) + if refresh: + cls.pm2_kill(name) + + cwd = cwd or module.dirpath() + + stdout = c.cmd(command, env=env, verbose=verbose, cwd=cwd) + return {'success':True, 'message':f'Launched {module}', 'command': command, 'stdout':stdout} + + @classmethod + def remote_fn(cls, + fn: str='train', + module: str = None, + args : list = None, + kwargs : dict = None, + name : str =None, + tag: str = None, + refresh : bool =True, + mode = 'pm2', + tag_seperator : str = '::', + cwd = None, + **extra_launch_kwargs + ): + import commune as c + + kwargs = c.locals2kwargs(kwargs) + if 'remote' in kwargs: + kwargs['remote'] = False + if len(fn.split('.'))>1: + module = '.'.join(fn.split('.')[:-1]) + fn = fn.split('.')[-1] + + kwargs = kwargs if kwargs else {} + args = args if args else [] + if 'remote' in kwargs: + kwargs['remote'] = False + + cwd = cwd or cls.dirpath() + kwargs = kwargs or {} + args = args or [] + module = cls.resolve_object(module) + # resolve the name + if name == None: + # if the module has a module_path function, use that as the name + if hasattr(module, 'module_path'): + name = module.module_name() + else: + name = module.__name__.lower() + + c.print(f'[bold cyan]Launching --> <<[/bold cyan][bold yellow]class:{module.__name__}[/bold yellow] [bold white]name[/bold white]:{name} [bold white]fn[/bold white]:{fn} [bold white]mode[/bold white]:{mode}>>', color='green') + + launch_kwargs = dict( + module=module, + fn = fn, + name=name, + tag=tag, + args = args, + kwargs = kwargs, + refresh=refresh, + **extra_launch_kwargs + ) + assert fn != None, 'fn must be specified for pm2 launch' + + return cls.launch(**launch_kwargs) + \ No newline at end of file diff --git a/commune/serializer/bytes.py b/commune/server/serializer/bytes.py similarity index 100% rename from commune/serializer/bytes.py rename to commune/server/serializer/bytes.py diff --git a/commune/serializer/munch.py b/commune/server/serializer/munch.py similarity index 100% rename from commune/serializer/munch.py rename to commune/server/serializer/munch.py diff --git a/commune/serializer/numpy.py b/commune/server/serializer/numpy.py similarity index 100% rename from commune/serializer/numpy.py rename to commune/server/serializer/numpy.py diff --git a/commune/serializer/pandas.py b/commune/server/serializer/pandas.py similarity index 100% rename from commune/serializer/pandas.py rename to commune/server/serializer/pandas.py diff --git a/commune/serializer/serializer.py b/commune/server/serializer/serializer.py similarity index 100% rename from commune/serializer/serializer.py rename to commune/server/serializer/serializer.py diff --git a/commune/serializer/torch.py b/commune/server/serializer/torch.py similarity index 100% rename from commune/serializer/torch.py rename to commune/server/serializer/torch.py diff --git a/commune/server.py b/commune/server/server.py similarity index 100% rename from commune/server.py rename to commune/server/server.py diff --git a/commune/user.py b/commune/server/user.py similarity index 100% rename from commune/user.py rename to commune/server/user.py diff --git a/commune/shortcuts.yaml b/commune/shortcuts.yaml deleted file mode 100644 index fe4e5cd6..00000000 --- a/commune/shortcuts.yaml +++ /dev/null @@ -1,10 +0,0 @@ -chain: subspace.chain -d: docker -freegpt: model.freegpt -openai: model.openai -openrouter: model.openrouter -or: model.openrouter -r: remote -s: subspace -sel: selenium -store: storage diff --git a/commune/subspace.py b/commune/subspace/subspace.py similarity index 98% rename from commune/subspace.py rename to commune/subspace/subspace.py index fa11c163..15750034 100644 --- a/commune/subspace.py +++ b/commune/subspace/subspace.py @@ -7,8 +7,6 @@ import requests from substrateinterface import SubstrateInterface -__ss58_format__ = 42 - class Subspace(c.Module): """ @@ -16,6 +14,46 @@ class Subspace(c.Module): """ block_time = 8 token_decimals = 9 + __ss58_format__ = 42 + urls = { + "main": [ + "commune-api-node-0.communeai.net", + "commune-api-node-1.communeai.net", + "commune-api-node-2.communeai.net", + "commune-api-node-3.communeai.net", + "commune-api-node-4.communeai.net", + "commune-api-node-5.communeai.net", + "commune-api-node-6.communeai.net", + "commune-api-node-7.communeai.net", + "commune-api-node-8.communeai.net", + "commune-api-node-9.communeai.net", + "commune-api-node-10.communeai.net", + "commune-api-node-11.communeai.net", + "commune-api-node-12.communeai.net", + "commune-api-node-13.communeai.net", + "commune-api-node-14.communeai.net", + "commune-api-node-15.communeai.net", + "commune-api-node-16.communeai.net", + "commune-api-node-17.communeai.net", + "commune-api-node-18.communeai.net", + "commune-api-node-19.communeai.net", + "commune-api-node-20.communeai.net", + "commune-api-node-21.communeai.net", + "commune-api-node-22.communeai.net", + "commune-api-node-23.communeai.net", + "commune-api-node-24.communeai.net", + "commune-api-node-25.communeai.net", + "commune-api-node-26.communeai.net", + "commune-api-node-27.communeai.net", + "commune-api-node-28.communeai.net", + "commune-api-node-29.communeai.net", + "commune-api-node-30.communeai.net", + "commune-api-node-31.communeai.net" + ], + "test": [ + "testnet-commune-api-node-0.communeai.net" + ] + } whitelist = ['query', 'score', @@ -29,7 +67,7 @@ class Subspace(c.Module): def __init__(self, network: str = 'main', - network_mode: str = 'ws', + mode: str = 'ws', subnet: str = 'commune', url: str = None, url_search: str = 'commune', @@ -40,14 +78,9 @@ def __init__(self, **kwargs, ): self.config = self.set_config(locals()) - - # merge the config with the subspace config - self.config = c.dict2munch({**Subspace.config(), **self.config}) self.set_network(network ) self.set_netuid(netuid) - if sync_loop: - c.thread(self.sync_loop) - + init_subspace = __init__ ########################### @@ -164,15 +197,7 @@ def resolve_key(self, key = None): key = 'module' if isinstance(key, str): - address2key = c.address2key() - key2address = {v:k for k,v in address2key.items()} - if key in address2key: - key = address2key[key] - assert key in key2address, f"Key {key} not found in your keys, please make sure you have it" - if key == None: - raise ValueError(f"Key {key} not found in your keys, please make sure you have it") - key = c.get_key(key) - + address2key = c.key_exists(key) assert hasattr(key, 'key'), f"Invalid Key {key} as it should have ss58_address attribute." return key @@ -187,24 +212,17 @@ def filter_url(self, url): return any([x in url for x in url_search_terms]) - def resolve_url(self, - url = None, - mode='ws', - network=None, + def get_url(self, + mode=None, + network = None, **kwargs): - mode = mode or self.config.network_mode - url = url or self.config.url + mode = mode or self.config.mode assert mode in self.supported_modes - if url != None: - return url network = self.resolve_network(network) - if url == None: - urls_map = self.urls() - urls = urls_map.get(mode, []) - assert len(urls) > 0, f'No urls found for network {network} and mode {mode}' - if len(urls) > 1: - urls_map = list(filter(self.filter_url, urls)) - url = c.choice(urls) + url = c.choice(self.urls.get(network, [])) + url_mode_prefix = mode + 's://' + if not url.startswith(url_mode_prefix): + url = url_mode_prefix + url return url @@ -223,9 +241,6 @@ def substrate(self): if self._substrate == None: self.set_network() return self._substrate - - def urls(self): - return c.get_yaml(self.dirpath() + '/urls.yaml').get(self.network) @substrate.setter def substrate(self, value): @@ -276,9 +291,8 @@ def get_substrate(self, for i in range(trials): try: - - url = self.resolve_url(url, mode=mode, network=network) + url = self.get_url(mode=mode, network=network) if not update: if url in self.url2substrate: substrate = self.url2substrate[url] @@ -534,9 +548,6 @@ def format_amount(self, x, fmt='nano', decimals = None, format=None, features=No def block(self) -> int: return self.substrate.get_block_number(block_hash=None) - - - def query_multi(self, params_batch , substrate=None, module='SubspaceModule', @@ -2576,7 +2587,7 @@ def get_module(self, c.print(f'No module specified, using {module}') module = c.key2address().get(module, module) - url = self.resolve_url( mode=mode) + url = self.get_url( mode=mode) module_key = module is_valid_key = c.valid_ss58_address(module) if not is_valid_key: @@ -3097,6 +3108,5 @@ def register_subnet(self, key: 'Keypair', name: str, metadata: str | None = None return response - Subspace.run(__name__) diff --git a/commune/ticket.py b/commune/ticket.py deleted file mode 100644 index 004216f0..00000000 --- a/commune/ticket.py +++ /dev/null @@ -1,131 +0,0 @@ -import commune as c -import json - -class Ticket(c.Module): - ticket_features = ['signature', 'address', 'crypto_type'] - data_features = ['data', 'time'] - max_age = 10 - description = """ - SINGLE SIGNATURE A (VANILLA SIGNATURE WITH TIMESTAMP): - { - 'data': dict (SIGNED) - 'time': int: (SIGNED) - 'signature': str (NOT SIGNED): the signature of the data - 'address': str: (NOT SIGNED): the address of the signer - 'crypto_type': str/int: (NOT SIGNED): the type of crypto used to sign - } - - SINGLE SIGNATURE B (TICKET): - { - 'data': dict (SIGNED) - 'time': int: (SIGNED) - ticket: { - 'signature': str (NOT SIGNED): the signature of the data - 'address': str: (NOT SIGNED): the address of the signer - 'crypto_type': str/int: (NOT SIGNED): the type of crypto used to sign - } - } - - MULTIPLE SIGNATURES: - { - 'data': dict (SIGNED) - 'time': OPTIONAL[int] : (SIGNED) this is the time the ticket was signed, it can be replaced by the signature time - 'signatures': {name: {signature:str, address:str, crypto_type:int, time: OPTIONAL[int] }} - } - - To verify - - """ - - def ticket(self, data='commune', key=None, ticket_mode=False, **kwargs): - """ - params: - data: dict: data to be signed - key: str: key to sign with - json_str: bool: if True, the ticket will be returned as a json string - - """ - key = c.get_key(key) - ticket_dict = { - 'data': data, - 'time': c.time(), - } - signtature = key.sign(ticket_dict, **kwargs).hex() - ticket = {'signature': signtature, - 'address': key.ss58_address, - 'crypto_type': key.crypto_type} - if ticket_mode: - ticket_dict['ticket'] = ticket - else: - ticket_dict.update(ticket) - - return ticket_dict - - create = ticket - - def verify(self, data, - max_age:str=None, - **kwargs): - data = c.copy(data) - max_age = max_age or self.max_age - date_time = data.get('time', data.get('timestamp', 0)) - staleness = c.time() - date_time - if staleness > max_age: - print(f"Signature too Old! from {data} : {staleness} > {max_age}") - return False - tickets = [] - # Ticket scnearios - if 'ticket' in data: - tickets = [data.pop('ticket')] - elif 'tickets' in data: - tickets = list(data.pop('tickets').values()) - elif 'signature' in data and 'address' in data and 'crypto_type' in data: - ticket = [{ - 'signature': data.pop('signature'), - 'address': data.pop('address'), - 'crypto_type': data.pop('crypto_type') - }] - else: - raise ValueError(f"Invalid Ticket {data}") - - for ticket in tickets: - ticket_data = c.copy(data) - if 'timestamp' in ticket: - ticket_data['timestamp'] = ticket_data['timestamp'] - ticket_verified = c.verify(data, **ticket) - if not ticket_verified: - print(f"Failed to verify ticket {ticket}") - return False - - return True - - - @classmethod - def test_staleness(cls, key='test', max_age=0.5): - c.add_key(key) - key = c.get_key(key) - self = cls() - ticket = self.create(key=key) - assert self.ticket2address(ticket) == key.ss58_address, f"{self.ticket2address(ticket)} != {key.ss58_address}" - print('waiting for staleness') - c.sleep(max_age + 0.1) - key = c.get_key(key) - assert not c.verify_ticket(ticket, max_age=max_age), 'Failed to verify' - return {'success': True, 'ticket': ticket, 'key': str(key)} - - def qr(self,filename='./ticket.png'): - filename = self.resolve_path(filename) - return c.module('qrcode').text2qrcode(self.ticket(), filename=filename) - - - - @classmethod - def test(cls, key='test'): - key = c.new_key() - self = cls() - ticket = self.ticket(key=key) - reciept = self.verify(ticket) - print(reciept) - assert reciept, 'Failed to verify' - return {'success': True, 'ticket': ticket, 'key': str(key), 'reciept': reciept} - \ No newline at end of file diff --git a/commune/urls.yaml b/commune/urls.yaml deleted file mode 100644 index 6de297f1..00000000 --- a/commune/urls.yaml +++ /dev/null @@ -1,74 +0,0 @@ -main: - http: - - https://commune.api.onfinality.io/public-http - - https://commune-api-node-0.communeai.net - - https://commune-api-node-1.communeai.net - - https://commune-api-node-2.communeai.net - - https://commune-api-node-3.communeai.net - - https://commune-api-node-4.communeai.net - - https://commune-api-node-5.communeai.net - - https://commune-api-node-6.communeai.net - - https://commune-api-node-7.communeai.net - - https://commune-api-node-8.communeai.net - - https://commune-api-node-9.communeai.net - - https://commune-api-node-10.communeai.net - - https://commune-api-node-11.communeai.net - - https://commune-api-node-12.communeai.net - - https://commune-api-node-13.communeai.net - - https://commune-api-node-14.communeai.net - - https://commune-api-node-15.communeai.net - - https://commune-api-node-16.communeai.net - - https://commune-api-node-17.communeai.net - - https://commune-api-node-18.communeai.net - - https://commune-api-node-19.communeai.net - - https://commune-api-node-20.communeai.net - - https://commune-api-node-21.communeai.net - - https://commune-api-node-22.communeai.net - - https://commune-api-node-23.communeai.net - - https://commune-api-node-24.communeai.net - - https://commune-api-node-25.communeai.net - - https://commune-api-node-26.communeai.net - - https://commune-api-node-27.communeai.net - - https://commune-api-node-28.communeai.net - - https://commune-api-node-29.communeai.net - - https://commune-api-node-30.communeai.net - - https://commune-api-node-31.communeai.net - ws: - - wss://commune.api.onfinality.io/public-ws - - wss://commune-api-node-0.communeai.net - - wss://commune-api-node-1.communeai.net - - wss://commune-api-node-2.communeai.net - - wss://commune-api-node-3.communeai.net - - wss://commune-api-node-4.communeai.net - - wss://commune-api-node-5.communeai.net - - wss://commune-api-node-6.communeai.net - - wss://commune-api-node-7.communeai.net - - wss://commune-api-node-8.communeai.net - - wss://commune-api-node-9.communeai.net - - wss://commune-api-node-10.communeai.net - - wss://commune-api-node-11.communeai.net - - wss://commune-api-node-12.communeai.net - - wss://commune-api-node-13.communeai.net - - wss://commune-api-node-14.communeai.net - - wss://commune-api-node-15.communeai.net - - wss://commune-api-node-16.communeai.net - - wss://commune-api-node-17.communeai.net - - wss://commune-api-node-18.communeai.net - - wss://commune-api-node-19.communeai.net - - wss://commune-api-node-20.communeai.net - - wss://commune-api-node-21.communeai.net - - wss://commune-api-node-22.communeai.net - - wss://commune-api-node-23.communeai.net - - wss://commune-api-node-24.communeai.net - - wss://commune-api-node-25.communeai.net - - wss://commune-api-node-26.communeai.net - - wss://commune-api-node-27.communeai.net - - wss://commune-api-node-28.communeai.net - - wss://commune-api-node-29.communeai.net - - wss://commune-api-node-30.communeai.net - - wss://commune-api-node-31.communeai.net -test: - http: - - https://testnet-commune-api-node-0.communeai.net - ws: - - wss://testnet-commune-api-node-0.communeai.net \ No newline at end of file diff --git a/commune/vali.py b/commune/vali/vali.py similarity index 100% rename from commune/vali.py rename to commune/vali/vali.py diff --git a/modules/agent/agent.py b/modules/agent/agent.py new file mode 100644 index 00000000..c1e5de15 --- /dev/null +++ b/modules/agent/agent.py @@ -0,0 +1,76 @@ + +import commune as c +import os + +class Agent(c.Module): + + prompt = """ + GIVEN THE FOLLOWING QUERY + ---OBJECTIVE--- + {objective} + ---USER--- + {text} + --- CODE --- + {code} + --- NEW CODE --- + """ + def __init__(self, + model='model.openrouter', + objective='YOU ARE A CODER THAT IS FEARLESS AND CAN SOLVE ANY PROBLEM THE QUERY IS AS FOLLOW, RESPOND IN THE FULL CODE PLEASE AND NOTHING ELSE, COMMENT IF YOU WANT TO ADD ANYTHING ELSE.'): + self.model = c.module(model)() + self.objective = objective + + def forward(self, + text , + file=None , + trials=1, + code = None, + stream=True, + objective=None, + ): + + """ + params: + text: str: the text that you want to generate code from + file: str: the file that you want to append the code to + trials: int: the number of trials to run + code: str: the code that you want to improve + stream: bool: stream the output + + """ + if trials > 1: + for trial in range(trials): + c.print(f"Trial {trial}") + code = self.forward(text=text, + file=file, + code=code, + stream=stream + ) + return code + if file != None and code == None: + code = self.read_code(file) + objective = objective or self.objective + text = self.prompt.format(text=text, code=code, file=file, objective=objective) + code = self.model.generate(text, stream=stream) + if file : + self.write_code(file, code, replace=True) + return code + + def write_code(self, file, code, replace=True): + # if this is a generator + if os.path.exists(file): + os.remove(file) + if c.is_generator(code): + for i, code in enumerate(code): + with open(file, 'a') as f: + f.write(code) + else: + with open(file, 'a') as f: + f.write(code) + + def read_code(self, file): + if not os.path.exists(file): + return None + with open(file, 'r') as f: + code = f.read() + return code diff --git a/modules/agent/app.py b/modules/agent/app.py new file mode 100644 index 00000000..e6c5f3e1 --- /dev/null +++ b/modules/agent/app.py @@ -0,0 +1,99 @@ +import commune as c +import streamlit as st +import os + +class App(c.Module): + + def title(self): + # Change the title of the app to 'Cyberbunk City' + st.markdown('# Cyberbunk City') + + def app(self): + self.title() + self.agent = c.module('agent')() + + # Define the CSS for different buttons with 'cyberbunk' vibes + st.markdown(""" + + """, unsafe_allow_html=True) + + + resolve_path = lambda p: os.path.abspath(os.path.expanduser(p)) + code = None + + + og_code_col, model_code_col = st.columns(2) + + cols = st.columns([2,5]) + folder_path = './' + folder_path = cols[0].text_input('Folder Path', resolve_path(folder_path)) + folder_path = resolve_path(folder_path) + python_files = [f for f in c.glob(folder_path) if f.endswith('.py')] + num_files = len(python_files) + filepath = cols[1].selectbox(f'Select File (n={num_files})', python_files) + with st.expander(f'Code'): + code = c.get_text(filepath) + code = st.text_area('Code', code, height=400) + + input = st.text_area('Input') + + # Use columns to span the page + col1, col2 = st.columns(2) + + send_button = st.button('Transmit', key='send', use_container_width=True) + st.markdown('
', unsafe_allow_html=True) + + + if send_button: + kwargs = {'text': input, 'code': code, 'file': filepath} + tx_id = c.hash(kwargs) + st.write('Transaction ID:', tx_id) + history_path = self.resolve_path(f'history/{self.key.ss58_address}') + + + content = self.get(history_path, {}) + if 'data' not in content: + response = self.agent.forward(**kwargs) + def generator(response): + response = self.agent.forward(input, code=code, stream=1) + + content['data'] = '' + for r in response: + content['data'] += r + yield r + st.write_stream(generator(response)) + self.put(history_path, content) + + response = content['data'] + + + with st.expander('Save Response'): + + response = response.split('```python')[-1].split('```').pop(0) + st.write(response) + + save_filepath = st.text_input('Save File Path', filepath) + save_button = st.button('Save', key='save', use_container_width=True) + + if save_button: + c.put_text(save_filepath, code) + st.write('Saved to', filepath) + + def process_response(self, code): + return code.split('```python')[-1].split('```').pop(0) + + +App.run(__name__) \ No newline at end of file diff --git a/modules/agent/child.py b/modules/agent/child.py new file mode 100644 index 00000000..25b9e111 --- /dev/null +++ b/modules/agent/child.py @@ -0,0 +1,80 @@ +import commune as c +import os + + +class Agent(c.Module): + + prompt = """ + GIVEN THE FOLLOWING QUERY + YOU ARE A CODER THAT IS FEARLESS AND CAN SOLVE ANY PROBLEM + THE QUERY IS AS FOLLOWS + + ---START OF QUERY--- + {text} + -- END OF QUERY + + THIS IS YOUR CURRENT CODEBASE THAT YOU CAN IMPROVE PROVIDED BELOW + --- START OF FILE ({file}) --- + {code} + --- END OF FILE ({file})--- + + RESPOND IN THE FULL CODE PLEASE AND NOTHING ELSE, COMMENT IF YOU WANT TO ADD ANYTHING ELSE. + """ + def __init__(self, model='model.openrouter'): + self.model = c.module(model)() + + def forward(self, + text , + file=None , + trials=1, + code = None, + stream=False + ): + + """ + params: + text: str: the text that you want to generate code from + file: str: the file that you want to append the code to + trials: int: the number of trials to run + code: str: the code that you want to improve + stream: bool: stream the output + + """ + if trials > 1: + for trial in range(trials): + c.print(f"Trial {trial}") + code = self.forward(text=text, + file=file, + code=code, + stream=stream + ) + return code + if file != None and code == None: + code = self.read_code(file) + text = self.prompt.format(text=text, code=code, file=file) + code = self.model.generate(text, stream=stream) + if file : + self.write_code(file, code, replace=True) + return code + + def write_code(self, file, code, replace=True): + # if this is agenerator + if os.path.exists(file): + os.remove(file) + if c.is_generator(code): + for i, code in enumerate(code): + with open(file, 'a') as f: + f.write(code) + else: + + with open(file, 'a') as f: + f.write(code) + + def read_code(self, file): + if not os.path.exists(file): + return None + with open(file, 'r') as f: + code = f.read() + return code + + \ No newline at end of file diff --git a/modules/agent/data/agent_data.py b/modules/agent/data/agent_data.py new file mode 100644 index 00000000..8fac248f --- /dev/null +++ b/modules/agent/data/agent_data.py @@ -0,0 +1,24 @@ +import commune as c +import json +class Demo(c.Module): + instruciton = """ + + + + """ + def __init__(self, a=1, b=2): + self.set_config(locals()) + + def call(self, timeout=30) -> int: + model = c.connect('model.openai') # connect to the model + + input = json.dumps({ + 'instruction': self.instruction, + 'response': None, + }) + + # get the docs + + return model.generate(input, timeout=timeout) + + \ No newline at end of file diff --git a/modules/agent/factory/agent_factory.py b/modules/agent/factory/agent_factory.py new file mode 100644 index 00000000..038d5223 --- /dev/null +++ b/modules/agent/factory/agent_factory.py @@ -0,0 +1,11 @@ +import commune as c + +class AgentFactory(c.Module): + def __init__(self, a=1, b=2): + self.set_config(locals()) + + def call(self, x:int = 1, y:int = 2) -> int: + c.print(self.config) + c.print(self.config, 'This is the config, it is a Munch object') + return x + y + \ No newline at end of file diff --git a/modules/agent/judge.py b/modules/agent/judge.py new file mode 100644 index 00000000..93557ab0 --- /dev/null +++ b/modules/agent/judge.py @@ -0,0 +1,28 @@ + + +import commune as c +class Judge(c.Module): + def __init__(self, model='model.openai'): + self.model = c.module(model)() + + def forward(self, text: str = "was the moon landing fake?") -> str: + + prompt = { + "text": text, + 'question': 'Yay or nay? (Y/N)', + } + return self.model.forward(prompt) + + + def test(self , text: str = "was the moon landing fake?"): + return self.forward(text) + + +if __name__ == "__main__": + + Judge.run() + + + + + diff --git a/modules/agent/maker/agent_maker.py b/modules/agent/maker/agent_maker.py new file mode 100644 index 00000000..4fc9786c --- /dev/null +++ b/modules/agent/maker/agent_maker.py @@ -0,0 +1,27 @@ +import commune as c +import json +class Demo(c.Module): + instruction = "Fill in the template for a gpt agent." + + example = " Make a gpt that can do math." + + template = { + "name": "math", + "description": "A demo agent.", + "prompt": "Make a gpt that can do math.", + + } + def __init__(self, a=1, b=2): + self.set_config(locals()) + + def call(self, description) -> int: + x = json.dumps({ + 'instructions': self.instruction, + 'description': description, + 'template': self.template, + 'output_template': "FILL IN THE TEMPLATE", + }) + + + return c.call("model.openai/generate", x) + \ No newline at end of file diff --git a/modules/agent/sumarizer/agent_sumarizer.py b/modules/agent/sumarizer/agent_sumarizer.py new file mode 100644 index 00000000..0775fe19 --- /dev/null +++ b/modules/agent/sumarizer/agent_sumarizer.py @@ -0,0 +1,193 @@ +import commune as c +import json + + + + +class Agent(c.Module): + description = """ + Summarize the following content into a more concice representation. + Preferably I want it in a succint knowledge graph + (HEAD, RELATIONSHIP, TAIL) + """ + + tools = [] + + def __init__(self, + name='agent', + description : str = None, + model : str = 'model.openai', + network : str = 'local', + tools:list = tools + ): + self.name = name + self.description = description if description != None else self.description + self.set_model(model, network=network) + self.set_tools(tools) + + + def set_model(self, model:str = 'model.openai ', network:str = 'local'): + self.model_namespace = c.namespace(search=model, netowrk=network) + assert len(self.model_namespace) > 0, f"no models found in {model}, please check the model path" + self.model_addresses = list(self.model_namespace.values()) + self.model_names = list(self.model_namespace.keys()) + self.network = network + self.model = c.connect(c.choice(self.model_addresses)) + return {"success": True, "message": f"set model to {self.model}"} + + + def rm_tools(self, tools:list = None): + if tools == None: + self.tools = {} + else: + for t in tools: + self.rm_tool(t) + return self.tools + + + def resolve_tools(self, tools): + + if isinstance(tools, str): + tools = [tools] + if isinstance(tools, list): + tools = self.get_tools(tools) + if tools == None: + tools = self.tools + + return tools + + + + + + def call(self, + text:str, + model=None, + history=None, + tools=tools, + n = 1, + description:str = None) -> str: + + + + if model != None: + self.model = c.connect(model) + tools = self.resolve_tools(tools) + history = history or [] + description = self.description if description == None else description + + for i in range(n): + prompt = { + 'step': i, + 'max_steps': n, + 'description': description, + 'input': text, + 'history': history, + 'tools': tools, + 'purpose': """ ANSWER THE FOLLOWING""", + 'confidence': 0, + 'call_tool': {'tool': None, 'kwargs': None}, + 'answer': None + } + output = self.model.generate(json.dumps(prompt), max_tokens=512) + c.print(output) + output = json.loads(output) + prompt.update(output) + if 'call_tool' in output: + tool = output['call_tool']['tool'] + kwargs = output['call_tool']['kwargs'] + if kwargs == None: + kwargs = {} + if tool != None: + module = '.'.join(tool.split('.')[:-1]) + fn = tool.split('.')[-1] + module = c.module(module) + fn_type = module.classify_fn(fn) + if fn_type == "self": + module = module() + try: + response = getattr(module, fn)(**kwargs) + except Exception as e: + response = c.detailed_error(e) + + + output['call_tool']['response'] = response + history.append(output['call_tool']) + return output + # prompt tooling + generate = call + + @classmethod + def find_tools(cls, prompt:str): + raise NotImplementedError + + @classmethod + def prompt2agent(cls, prompt:str) -> 'Agent': + cls.find_tools(prompt, topk=5) + + + + + + + + def set_tools(self, tools:list): + self.tools = {} + self.add_tools(tools) + return self.tools + + def add_tools(self, tools:list): + for t in tools: + self.add_tool(t) + return self.tools + + def get_tool(self, tool:str, fn_seperator:str = '.'): + module = fn_seperator.join(tool.split(fn_seperator)[:1]) + fn = tool.split(fn_seperator)[1] + module = c.module(module) + tool_info = module.fn_schema(fn, docs=True) + return tool_info + + + def get_tools(self, tools:list, fn_seperator:str = '.'): + return {t: self.get_tool(t, fn_seperator=fn_seperator) for t in tools} + + def add_tool(self, tool:str): + schema = self.schema(tool) + self.tools[tool] = schema + return self.tools + + def rm_tool(self, tool:str): + del self.tools[tool] + return self.tools + + + + def test_model(self, prompt:str, model=None, history=None, **kwargs): + if model != None: + self.model = c.connect(model) + + prompt = { + 'description': self.description, + 'prompt': prompt, + 'history': history, + 'response': None, + 'instruction': 'complete response' + } + + output = self.model.generate(json.dumps(prompt)) + + prompt.update(json.loads(self.model.generate(json.dumps(prompt)))) + return prompt + + def test(self, prompt:str='hey', model=None, history=None, **kwargs): + response = self.call(prompt, model=model, history=history, **kwargs) + + assert 'response' in response, f"response not in {response}" + assert isinstance(response['response'], str), f"response is not a string: {response['response']}" + return { + 'prompt': prompt, + 'response': response['response'], + 'success': True, + } + diff --git a/modules/api/api.py b/modules/api/api.py new file mode 100644 index 00000000..f8e16a31 --- /dev/null +++ b/modules/api/api.py @@ -0,0 +1,52 @@ +import os +import commune as c + +class ApiManager(c.Module): + def __init__(self, path='api_vault', password=None, api_keys=[]): + self.path = self.resolve_path(path) + self.password = password + + @property + def api_keys(self): + return self.get(self.path, {}) + + def add_api_key(self, name , api_key): + api_keys = self.api_keys + api_keys[name] = list(set(api_keys.get(name, []) + [api_key])) + num_keys = len(api_keys[name]) + assert isinstance(api_keys, dict), api_keys + self.save_api_keys(api_keys) + return {'msg': f'api_key {name} added', 'num_keys': num_keys} + + def remove_api_key(self, name, api_key): + api_keys = self.api_keys + assert api_key in api_keys[name], f"api_key {name} does not exist" + api_keys[name].remove(api_key) + self.save_api_keys(api_keys) + return {'msg': f'api_key {name} removed', 'num_keys': len(api_keys[name])} + + def pop_api_key(self, name, index=-1, api_key=None): + api_keys = self.api_keys + api_keys = api_keys.get(name, []) + if len(api_key) == 0: + raise ValueError(f"api_key {name} does not exist") + api_key.pop(index) + + self.save_api_keys(api_keys) + + def get_api_keys(self, name): + return self.api_keys.get(name, []) + + def get_api_key(self, name): + return c.choice(self.get_api_keys(name)) + + def save_api_keys(self, api_keys): + self.put(self.path, api_keys) + + @property + def api_names(self): + return list(self.api_keys.keys()) + + + +ApiManager.run(__name__) \ No newline at end of file diff --git a/modules/api/app.py b/modules/api/app.py new file mode 100644 index 00000000..84c69ed5 --- /dev/null +++ b/modules/api/app.py @@ -0,0 +1,50 @@ + +import streamlit as st +import commune as c +from commune.api.api import ApiManager +class App(c.Module): + def app(self): + + st.title("API Key Manager") + + # Create an instance of ApiManager + api_manager = ApiManager() + api_keys = api_manager.api_keys + with st.expander("View API"): + refresh = st.button("Refresh") + st.write(api_keys) + + + # Sidebar menu + menu = ["Add API", "Remove API"] + api_names = list(api_keys.keys()) + choice = st.selectbox("Select an option", menu) + + if choice == "Add API": + new_api_check = st.checkbox("New API") + if new_api_check: + name = st.text_input("Name") + else: + name = st.selectbox("Select API Name", api_names) + api_key = st.text_input("API Key") + if st.button("Add"): + result = api_manager.add_api_key(name, api_key) + st.success(result['msg']) + st.info(f"Number of keys for {name}: {result['num_keys']}") + elif choice == "Remove API": + st.subheader("Remove API") + name = st.selectbox("Select API Name", api_names) + selected_rm_keys = st.multiselect("Select API Keys to remove", api_keys.get(name, [])) + if st.button("Pop"): + try: + for key in selected_rm_keys: + st.success(api_manager.remove_api_key(name, key)) + except ValueError as e: + st.error(str(e)) + + + + + +if __name__ == '__main__': + App.run() \ No newline at end of file diff --git a/modules/base/base.py b/modules/base/base.py new file mode 100644 index 00000000..082150c9 --- /dev/null +++ b/modules/base/base.py @@ -0,0 +1,11 @@ +import commune as c + +class Demo(c.Module): + def __init__(self, a=1, b=2): + self.set_config(locals()) + + def call(self, x:int = 1, y:int = 2) -> int: + c.print(self.config) + c.print(self.config, 'This is the config, it is a Munch object') + return x + y + \ No newline at end of file diff --git a/modules/base/file_module.py b/modules/base/file_module.py new file mode 100644 index 00000000..f97980bb --- /dev/null +++ b/modules/base/file_module.py @@ -0,0 +1,6 @@ +import commune as c + +class MyModule(c.Module): + + def __init__(self, **kwargs): + self.init_module(kwargs) \ No newline at end of file diff --git a/modules/chat/app.py b/modules/chat/app.py new file mode 100644 index 00000000..8a198264 --- /dev/null +++ b/modules/chat/app.py @@ -0,0 +1,217 @@ +import commune as c +import streamlit as st + + +class Chat(c.Module): + + def __init__(self, + max_tokens=420000, + password = None, + text = 'Hello whaduop fam', + system_prompt = 'The following is a conversation with an AI assistant. The assistant is helpful, creative, clever, and very friendly.', + model = None, + history_path='history', + **kwargs): + + self.max_tokens = max_tokens + self.text = text + self.set_module(model, + password = password, + history_path=history_path, + system_prompt=system_prompt, + **kwargs) + + def set_module(self, + model, + history_path='history', + password=None, + system_prompt = 'The following is a conversation with an AI assistant. The assistant is helpful, creative, clever, and very friendly.', + **kwargs): + self.system_prompt = system_prompt + self.admin_key = c.pwd2key(password) if password else self.key + self.model = c.module('model.openrouter')(model=model) + self.models = self.model.models() + self.history_path = self.resolve_path(history_path) + return {'success':True, 'msg':'set_module passed'} + + def add_files(self, files): + cwd = st.text_input('cwd', './') + files = c.glob(cwd) + files = st.multi_select(files, 'files') + file_options = [f.name for f in files] + + + def call(self, + input = 'whats 2+2?' , + temperature= 0.5, + max_tokens= 1000000, + model= 'anthropic/claude-3.5-sonnet', + system_prompt= 'make this shit work', + key = None, + stream=True, + ): + # key = self.resolve_key(key) + data = c.locals2kwargs(locals()) + signature = self.key.ticket(c.hash(data)) + return signature + + def save_data(self, data): + path = self.data2path(data) + return c.put(path, data) + + + + def sidebar(self, user='user', password='password', seperator='::'): + with st.sidebar: + st.title('Just Chat') + # assert self.key.verify_ticket(ticket) + with st.expander('l0g1n'): + cols = st.columns([1,1]) + user_name = cols[0].text_input('User', user) + pwd = cols[1].text_input('Password', password, type='password') + seed = c.hash(user_name + seperator + pwd) + self.key = c.pwd2key(seed) + self.data = c.dict2munch({ + 'user': user_name, + 'path': self.resolve_path('history', self.key.ss58_address ), + 'history': self.history(self.key.ss58_address) + }) + + def search_history(self): + search = st.text_input('Search') + # if the search is in any of the columns + history = c.copy(self.data.history) + + history = [h for h in history if search in str(h)] + df = c.df(history) + st.write(df) + + def app(self): + self.sidebar() + + tab_names = ['Chat', 'History'] + tabs = st.tabs(tab_names) + + with tabs[0]: + self.chat_page() + with tabs[1]: + self.history_page() + + def chat_page(self): + model = st.selectbox('Model', self.models) + with st.sidebar.expander('Params', expanded=True): + temperature = st.number_input('Temperature', 0.0, 1.0, 0.5) + if hasattr(self.model, 'get_model_info'): + model_info = self.model.get_model_info(model) + max_tokens = min(int(model_info['context_length']*0.9), self.max_tokens) + else: + model_info = {} + max_tokens = self.max_tokens + max_tokens = st.number_input('Max Tokens', 1, max_tokens, max_tokens) + system_prompt = st.text_area('System Prompt',self.system_prompt, height=200) + + input = st.text_area('Text',self.text, height=100) + params = { + 'input': input, + 'model': model, + 'temperature': temperature, + 'max_tokens': max_tokens, + 'system_prompt': system_prompt, + } + + cols = st.columns([1,1]) + send_button = cols[0].button('Send', key='send', use_container_width=True) + stop_button = cols[1].button('Stop', key='stop', use_container_width=True) + if send_button and not stop_button: + r = self.model.generate(params['input'], + max_tokens=params['max_tokens'], + temperature=params['temperature'], + model=params['model'], + system_prompt=params['system_prompt'], + stream=True) + # dank emojis to give it that extra flair + emojis = '✅🤖💻🔍🧠🔧⌨️' + reverse_emojis = emojis[::-1] + with st.spinner(f'{emojis} Generating {reverse_emojis}'): + st.write_stream(r) + + def post_processing(self, data): + lambda_string = st.text_area('fn(x={model_output})', 'x', height=100) + prefix = 'lambda x: ' + lambda_string = prefix + lambda_string if not lambda_string.startswith(prefix) else lambda_string + lambda_fn = eval(lambda_string) + try: + output = data['data']['output'] + output = lambda_fn(output) + except Exception as e: + st.error(e) + + def history_page(self): + history = self.data.history + if len(history) == 0: + st.error('No History') + return + else: + cols = history[0].keys() + selected_columns = st.multiselect('Columns', cols, cols) + df = c.df(history)[selected_columns] + st.write(df) + def user_files(self): + return c.get(self.data['path']) + + + + def save_data(self, address, data): + return c.put(self.history_path + '/' + address +'/data.json', data) + + def get_data(self, address): + return c.get(self.history_path + '/' + address +'/data.json') + + + def clear_history(self, address): + return c.rm(self.history_path + '/'+ address) + + def history_paths(self, address:str=None): + paths = [] + if address == None: + for user_address in self.user_addresses(): + paths += self.history_paths(user_address) + else: + paths = c.ls(self.history_path + '/'+ address) + return paths + + def save_data(self, data): + path = self.history_path + '/'+ data['address'] + '/' + str(data['time']) + '.json' + return c.put(path, data) + + def history(self, address:str=None, columns=['datetime', + 'input', + 'output', + 'system_prompt', + 'model', + 'temperature', + 'max_tokens'], df=False): + paths = self.history_paths(address) + history = [] + for i, path in enumerate(paths): + try: + print(paths) + h = c.get(path) + h.update(h.pop('data')) + h['datetime'] = c.time2datetime(h.pop('time')) + h = {k:v for k,v in h.items() if k in columns} + history.append(h) + except Exception as e: + print(e) + # sort by time + + history = sorted(history, key=lambda x: x['datetime'], reverse=True) + if df: + history = c.df(history) + return history + + def user_addresses(self, display_name=False): + users = [u.split('/')[-1] for u in c.ls(self.history_path)] + return users + +Chat.run(__name__) \ No newline at end of file diff --git a/modules/chat/chat.py b/modules/chat/chat.py new file mode 100644 index 00000000..0aba203f --- /dev/null +++ b/modules/chat/chat.py @@ -0,0 +1,261 @@ +import commune as c +import streamlit as st + + +class Chat(c.Module): + + def __init__(self, + max_tokens=420000, + password = None, + text = 'Hello whaduop fam', + system_prompt = 'The following is a conversation with an AI assistant. The assistant is helpful, creative, clever, and very friendly.', + name='chat', + model = None, + history_path='history', + **kwargs): + + self.max_tokens = max_tokens + self.text = text + + self.set_module(model, + password = password, + name = name, + history_path=history_path, + system_prompt=system_prompt, + **kwargs) + + def set_module(self, + model, + history_path='history', + name='chat', + password=None, + system_prompt = 'The following is a conversation with an AI assistant. The assistant is helpful, creative, clever, and very friendly.', + key=None, + **kwargs): + self.system_prompt = system_prompt + self.admin_key = c.pwd2key(password) if password else self.key + self.model = c.module('model.openrouter')(model=model, **kwargs) + self.models = self.model.models() + self.history_path = self.resolve_path(history_path) + return {'success':True, 'msg':'set_module passed'} + + def add_files(self, files): + cwd = st.text_input('cwd', './') + files = c.glob(cwd) + files = st.multi_select(files, 'files') + file_options = [f.name for f in files] + + + def call(self, + input = 'whats 2+2?' , + temperature= 0.5, + max_tokens= 1000000, + model= 'anthropic/claude-3.5-sonnet', + system_prompt= 'make this shit work', + key = None, + stream=True, + ): + # key = self.resolve_key(key) + data = c.locals2kwargs(locals()) + signature = self.key.ticket(c.hash(data)) + return signature + + + @c.endpoint() + def generate(self, + input = 'whats 2+2?' , + temperature= 0.5, + max_tokens= 1000000, + model= 'anthropic/claude-3.5-sonnet', + system_prompt= 'make this shit work', + stream=True, + headers = None, + ): + # c.verify_ticket(ticket) + text = system_prompt + '\n' + input + output = self.model.generate( text,stream=stream, model=model, max_tokens=max_tokens, temperature=temperature ) + data = { + 'input': input, + 'output': '', + 'max_tokens': max_tokens, + 'temperature': temperature, + 'system_prompt': system_prompt, + 'headers': headers + } + for token in output: + yield token + def ask(self, *text, **kwargs): + return self.generate(' '.join(text), **kwargs) + + + # data_saved = self.save_data(data) + # yield data + + def save_data(self, data): + path = self.data2path(data) + return c.put(path, data) + + + def get_params(self): + model = st.selectbox('Model', self.models) + temperature = st.slider('Temperature', 0.0, 1.0, 0.5) + if hasattr(self.model, 'get_model_info'): + model_info = self.model.get_model_info(model) + max_tokens = min(int(model_info['context_length']*0.9), self.max_tokens) + else: + model_info = {} + max_tokens = self.max_tokens + max_tokens = st.number_input('Max Tokens', 1, max_tokens, max_tokens) + system_prompt = st.text_area('System Prompt',self.system_prompt, height=200) + input = st.text_area('Text',self.text, height=100) + + params = { + 'model': model, + 'temperature': temperature, + 'max_tokens': max_tokens, + 'system_prompt': system_prompt, + 'input': input + } + + return params + + def sidebar(self, user='user', password='password', seperator='::'): + with st.sidebar: + st.title('Just Chat') + # assert self.key.verify_ticket(ticket) + + user_name = st.text_input('User', user) + pwd = st.text_input('Password', password, type='password') + seed = c.hash(user_name + seperator + pwd) + self.key = c.pwd2key(seed) + self.data = c.dict2munch({ + 'user': user_name, + 'path': self.resolve_path('history', self.key.ss58_address ), + 'history': self.history(self.key.ss58_address) + }) + + def search_history(self): + search = st.text_input('Search') + # if the search is in any of the columns + history = c.copy(self.data.history) + + history = [h for h in history if search in str(h)] + df = c.df(history) + st.write(df) + + def app(self): + self.sidebar() + + tab_names = ['Chat', 'History'] + tabs = st.tabs(tab_names) + + with tabs[0]: + self.chat_page() + with tabs[1]: + self.history_page() + + def chat_page(self): + with st.sidebar.expander('Params', expanded=True): + params = self.get_params() + data = c.ticket(params, key=self.key) + + # make the buttons cover the whole page + cols = st.columns([1,1]) + send_button = cols[0].button('Send', key='send', use_container_width=True) + stop_button = cols[1].button('Stop', key='stop', use_container_width=True) + if send_button and not stop_button: + r = self.generate(data) + # dank emojis to give it that extra flair + emojis = '✅🤖💻🔍🧠🔧⌨️' + reverse_emojis = emojis[::-1] + with st.spinner(f'{emojis} Generating {reverse_emojis}'): + st.write_stream(r) + + self.post_processing(data) + + + def post_processing(self, data): + lambda_string = st.text_area('fn(x={model_output})', 'x', height=100) + prefix = 'lambda x: ' + lambda_string = prefix + lambda_string if not lambda_string.startswith(prefix) else lambda_string + lambda_fn = eval(lambda_string) + try: + output = data['data']['output'] + output = lambda_fn(output) + except Exception as e: + st.error(e) + + def history_page(self): + history = self.data.history + if len(history) == 0: + st.error('No History') + return + else: + cols = history[0].keys() + selected_columns = st.multiselect('Columns', cols, cols) + df = c.df(history)[selected_columns] + st.write(df) + def user_files(self): + return c.get(self.data['path']) + + + + def save_data(self, address, data): + return c.put(self.history_path + '/' + address +'/data.json', data) + + def get_data(self, address): + return c.get(self.history_path + '/' + address +'/data.json') + + + def clear_history(self, address): + return c.rm(self.history_path + '/'+ address) + + def history_paths(self, address:str=None): + paths = [] + if address == None: + for user_address in self.user_addresses(): + paths += self.history_paths(user_address) + else: + paths = c.ls(self.history_path + '/'+ address) + return paths + + def save_data(self, data): + path = self.history_path + '/'+ data['address'] + '/' + str(data['time']) + '.json' + return c.put(path, data) + + def history(self, address:str=None, columns=['datetime', + 'input', + 'output', + 'system_prompt', + 'model', + 'temperature', + 'max_tokens'], df=False): + paths = self.history_paths(address) + history = [] + for i, path in enumerate(paths): + try: + print(paths) + h = c.get(path) + h.update(h.pop('data')) + h['datetime'] = c.time2datetime(h.pop('time')) + h = {k:v for k,v in h.items() if k in columns} + history.append(h) + except Exception as e: + print(e) + # sort by time + + history = sorted(history, key=lambda x: x['datetime'], reverse=True) + if df: + history = c.df(history) + return history + + def user_addresses(self, display_name=False): + users = [u.split('/')[-1] for u in c.ls(self.history_path)] + return users + + + def models(self): + return self.model.models() + + +Chat.run(__name__) \ No newline at end of file diff --git a/modules/chat/history.py b/modules/chat/history.py new file mode 100644 index 00000000..81998c5e --- /dev/null +++ b/modules/chat/history.py @@ -0,0 +1,73 @@ + +import commune as c +from typing import * +import pandas as pd + +class History(c.Module): + def __init__(self, + path='history', + max_age=100000, + **kwargs): + self.max_age = max_age + self.set_history_path(path) + # HISTORY + + + def check_item(self, item, required_fields=['address', 'timestamp']): + assert all([field in item for field in required_fields]), f'Missing required fields: {required_fields}' + assert c.valid_ss58_address(item['address']), f'Invalid address: {item["address"]}' + + def get_user_directory(self, key): + key_address = c.resolve_key_address(key) + return self.history_path + '/' + key_address + + def get_user_path(self, key_address): + if not c.valid_ss58_address(key_address): + key_address = c.get_key(key_address).ss58_address + path = self.history_path +f'/{key_address}/{c.time()}.json' + return path + + def refresh_history(self): + path = self.history_path + self.rm(path) + return self.ls(path) + + def add_history(self, item): + self.check_item(item) + path = self.get_user_path(item['address']) + if 'path' in item: + path = item['path'] + self.put(path, item) + return {'path': path, 'item': item} + + def rm_history(self, key): + path = self.get_user_directory(key) + self.rm(path) + return {'path': path} + + def history_size(self, key): + path = self.get_user_directory(key) + return len(self.ls(path)) + + def history_exists(self, key): + path = self.get_user_directory(key) + return self.exists(path) and self.history_size(key) > 0 + + def user_history(self, key): + path = self.get_user_directory(key) + return self.ls(path) + def set_history_path(self, path): + self.history_path = self.resolve_path(path) + return {'history_path': self.history_path} + + + def test_history(self): + key = c.new_key() + item = {'address': key.ss58_address, 'timestamp': c.time()} + self.add_history(item) + assert self.history_exists(key.ss58_address) + self.user_history(key.ss58_address) + self.rm_history(key.ss58_address) + assert not self.history_exists(key.ss58_address) + return {'key': key.ss58_address, 'item': item} + diff --git a/modules/code/code.py b/modules/code/code.py new file mode 100644 index 00000000..3367c93a --- /dev/null +++ b/modules/code/code.py @@ -0,0 +1,716 @@ +import commune as c +import json +import os +import glob +from typing import * +from copy import deepcopy +import inspect +from munch import Munch + +class Code(c.Module): + + def file2text(self, path = './', relative=True, **kwargs): + path = os.path.abspath(path) + file2text = {} + for file in glob.glob(path, recursive=True): + with open(file, 'r') as f: + content = f.read() + file2text[file] = content + if relative: + print(path) + return {k[len(path)+1:]:v for k,v in file2text.items()} + + return file2text + + def file2file(self, path, **kwargs): + ''' + """ + Documentation for `file2file` function: + + This function reads the content of a file and writes it to another file. + + Parameters: + - self: The instance of the class that this method is bound to. + - path: The path to the file to be read. + - new_path: The path to the file to be written. If not provided, the content is written to the same file. + + Returns: + - success: A boolean value indicating whether the operation was successful. + ''' + content = c.get_text(path) + content = self.model.forward(content, **kwargs) + c.put_text(path, content) + return content + + + @staticmethod + def get_files_code(directory): + code_dict = {} + import glob + directory = os.path.abspath(directory) + + for file in glob.glob(directory + '/**', recursive=True): + + relative_path = file.split(directory)[1] + if os.path.isdir(file): + continue + try: + with open(file, 'r') as f: + code = f.read() + code_dict[file] = code + except Exception as e: + print(e) + + + return code_dict + + @classmethod + def eval(cls, module, vali=None, **kwargs): + vali = c.module('vali')() if vali == None else c.module(vali) + return c.eval(module, **kwargs) + + + @classmethod + def lock_file(cls, f): + import fcntl + fcntl.flock(f, fcntl.LOCK_EX) + return f + + @classmethod + def unlock_file(cls, f): + import fcntl + fcntl.flock(f, fcntl.LOCK_UN) + return f + + + def server2fn(self, *args, **kwargs ): + servers = c.servers(*args, **kwargs) + futures = [] + server2fn = {} + for s in servers: + server2fn[s] = c.submit(f'{s}/schema', kwargs=dict(code=True)) + futures = list(server2fn.values()) + fns = c.wait(futures,timeout=10) + for s, f in zip(servers, fns): + server2fn[s] = f + return server2fn + + + + @classmethod + def remove_number_from_word(cls, word:str) -> str: + while word[-1].isdigit(): + word = word[:-1] + return word + + @classmethod + def determine_type(cls, x): + if x.lower() == 'null' or x == 'None': + return None + elif x.lower() in ['true', 'false']: + return bool(x.lower() == 'true') + elif x.startswith('[') and x.endswith(']'): + # this is a list + try: + + list_items = x[1:-1].split(',') + # try to convert each item to its actual type + x = [cls.determine_type(item.strip()) for item in list_items] + if len(x) == 1 and x[0] == '': + x = [] + return x + + except: + # if conversion fails, return as string + return x + elif x.startswith('{') and x.endswith('}'): + # this is a dictionary + if len(x) == 2: + return {} + try: + dict_items = x[1:-1].split(',') + # try to convert each item to a key-value pair + return {key.strip(): cls.determine_type(value.strip()) for key, value in [item.split(':', 1) for item in dict_items]} + except: + # if conversion fails, return as string + return x + else: + # try to convert to int or float, otherwise return as string + try: + return int(x) + except ValueError: + try: + return float(x) + except ValueError: + return x + + + + def file2fns(self, filepath): + ''' + """ + Documentation for `get_fns` function: + + This function retrieves the list of functions available in a given module. + + Parameters: + - self: The instance of the class that this method is bound to. + - module: The name of the module for which the list of functions is to be retrieved. + + Returns: + - fns: A list of function names available in the specified module. + ''' + + if c.module_exists(filepath): + filepath = c.filepath() + if not filepath.endswith('.py'): + filepath = filepath + '.py' + code = c.get_text(filepath) + lines = code.split('\n') + fns = [] + for line in lines: + if '):' in line.strip() and 'def ' in line.split('):')[0].strip(): + fn = line.split('def ')[1].split('):')[0].split('(')[0] + if ' ' in fn or ']' in fn: + continue + fns.append(fn) + + + return fns + + + + def file2file(self, path, **kwargs): + ''' + """ + Documentation for `file2file` function: + + This function reads the content of a file and writes it to another file. + + Parameters: + - self: The instance of the class that this method is bound to. + - path: The path to the file to be read. + - new_path: The path to the file to be written. If not provided, the content is written to the same file. + + Returns: + - success: A boolean value indicating whether the operation was successful. + ''' + content = c.get_text(path) + content = self.model.forward(content, **kwargs) + c.put_text(path, content) + return content + + + @staticmethod + def get_files_code(directory): + code_dict = {} + + for root, dirs, files in os.walk(directory): + for file in files: + file_path = os.path.join(root, file) + relative_path = os.path.relpath(file_path, directory) + + with open(file_path, 'r') as f: + code = f.read() + code_dict[relative_path] = code + + return code_dict + + + + @classmethod + def process_kwargs(cls, kwargs:dict, fn_schema:dict): + + for k,v in kwargs.items(): + if v == 'None': + v = None + + if isinstance(v, str): + if v.startswith('[') and v.endswith(']'): + if len(v) > 2: + v = eval(v) + else: + v = [] + + elif v.startswith('{') and v.endswith('}'): + + if len(v) > 2: + v = c.jload(v) + else: + v = {} + elif k in fn_schema['input'] and fn_schema['input'][k] == 'str': + if v.startswith("f'") or v.startswith('f"'): + v = c.ljson(v) + else: + v = v + + elif fn_schema['input'][k] == 'float': + v = float(v) + + elif fn_schema['input'][k] == 'int': + v = int(v) + + elif k == 'kwargs': + continue + elif v == 'NA': + assert k != 'NA', f'Key {k} not in default' + elif v in ['True', 'False']: + v = eval(v) + elif c.is_int(v): + v = eval(v) + else: + v = v + + kwargs[k] = v + + return kwargs + + + + @classmethod + def python2str(cls, input): + input = deepcopy(input) + input_type = type(input) + if input_type == str: + return input + if input_type in [dict]: + input = json.dumps(input) + elif input_type in [bytes]: + input = cls.bytes2str(input) + elif input_type in [list, tuple, set]: + input = json.dumps(list(input)) + elif input_type in [int, float, bool]: + input = str(input) + return input + + + + # JSON2BYTES + @classmethod + def dict2str(cls, data: str) -> str: + return json.dumps(data) + + @classmethod + def dict2bytes(cls, data: str) -> bytes: + return cls.str2bytes(cls.json2str(data)) + + @classmethod + def bytes2dict(cls, data: bytes) -> str: + data = cls.bytes2str(data) + return json.loads(data) + + + # BYTES LAND + + # STRING2BYTES + @classmethod + def str2bytes(cls, data: str, mode: str = 'hex') -> bytes: + if mode in ['utf-8']: + return bytes(data, mode) + elif mode in ['hex']: + return bytes.fromhex(data) + + @classmethod + def bytes2str(cls, data: bytes, mode: str = 'utf-8') -> str: + + if hasattr(data, 'hex'): + return data.hex() + else: + if isinstance(data, str): + return data + return bytes.decode(data, mode) + + + @classmethod + def str2python(cls, input)-> dict: + assert isinstance(input, str), 'input must be a string, got {}'.format(input) + try: + output_dict = json.loads(input) + except json.JSONDecodeError as e: + return input + + return output_dict + + + + @classmethod + def fn2code(cls, search=None, module=None)-> Dict[str, str]: + module = module if module else cls + functions = module.fns(search) + fn_code_map = {} + for fn in functions: + c.print(f'fn: {fn}') + try: + fn_code_map[fn] = module.fn_code(fn) + except Exception as e: + c.print(f'Error (fn={fn}): {e}', color='red') + return fn_code_map + + + + @classmethod + def fn_code(cls,fn:str, + detail:bool=False, + seperator: str = '/' + ) -> str: + ''' + Returns the code of a function + ''' + try: + if isinstance(fn, str): + if seperator in fn: + module_path, fn = fn.split(seperator) + module = c.module(module_path) + fn = getattr(module, fn) + else: + fn = getattr(cls, fn) + + + code_text = inspect.getsource(fn) + text_lines = code_text.split('\n') + if 'classmethod' in text_lines[0] or 'staticmethod' in text_lines[0] or '@' in text_lines[0]: + text_lines.pop(0) + + assert 'def' in text_lines[0], 'Function not found in code' + start_line = cls.find_code_line(search=text_lines[0]) + fn_code = '\n'.join([l[len(' '):] for l in code_text.split('\n')]) + if detail: + fn_code = { + 'text': fn_code, + 'start_line': start_line , + 'end_line': start_line + len(text_lines) + } + except Exception as e: + c.print(f'Error: {e}', color='red') + fn_code = None + + return fn_code + + + @classmethod + def is_generator(cls, obj): + """ + Is this shiz a generator dawg? + """ + if isinstance(obj, str): + if not hasattr(cls, obj): + return False + obj = getattr(cls, obj) + if not callable(obj): + result = inspect.isgenerator(obj) + else: + result = inspect.isgeneratorfunction(obj) + return result + + + + @staticmethod + def get_parents(obj) -> List[str]: + cls = c.resolve_class(obj) + return list(cls.__mro__[1:-1]) + + @staticmethod + def get_parent_functions(cls) -> List[str]: + parent_classes = c.get_parents(cls) + function_list = [] + for parent in parent_classes: + function_list += c.get_functions(parent) + + return list(set(function_list)) + + + + def repo2module(self, repo:str, name=None, template_module='demo', **kwargs): + if not repo_path.startswith('/') and not repo_path.startswith('.') and not repo_path.startswith('~'): + repo_path = os.path.abspath('~/' + repo_path) + assert os.path.isdir(repo_path), f'{repo_path} is not a directory, please clone it' + c.add_tree(repo_path) + template_module = c.module(template_module) + code = template_module.code() + + # replace the template module with the name + name = name or repo_path.split('/')[-1] + assert not c.module_exists(name), f'{name} already exists' + code_lines = code.split('\n') + for i, line in enumerate(code_lines): + if 'class' in line and 'c.Module' in line: + class_name = line.split('class ')[-1].split('(')[0] + code_lines[i] = line.replace(class_name, name) + break + code = '\n'.join(code_lines) + + module_path = repo_path + '/module.py' + + # write the module code + c.put_text(code, module_path) + + # build the tree + c.build_tree(update=True) + + + + + @classmethod + def timefn(cls, fn, *args, **kwargs): + fn = cls.get_fn(fn) + if isinstance(fn, str): + if '/' in fn: + module, fn = fn.split('/') + module = c.module(module) + else: + module = cls + if module.classify_fn(fn) == 'self': + module = cls() + fn = getattr(module, fn) + + t1 = c.time() + result = fn(*args, **kwargs) + t2 = c.time() + + return {'time': t2 - t1} + + + + + @classmethod + def find_python_classes(cls, path:str , class_index:int=0, search:str = None, start_lines:int=2000): + import re + path = cls.resolve_path(path) + if os.path.isdir(path): + file2classes = {} + for f in c.glob(path): + if f.endswith('.py'): + try: + file2classes[f] = cls.find_python_classes(f, class_index=class_index, search=search, start_lines=start_lines) + except Exception as e: + c.print(f'Error: {e}', color='red') + return file2classes + # read the contents of the Python script file + python_script = cls.readlines(path, end_line = start_lines, resolve=False) + class_names = [] + lines = python_script.split('\n') + + # c.print(python_script) + + for line in lines: + key_elements = ['class ', '('] + key_elements += [ '):'] + has_class_bool = all([key_element in line for key_element in key_elements]) + + if has_class_bool: + if search != None: + if isinstance(search, str): + search = [search] + if not any([s in line for s in search]): + continue + + class_name = line.split('class ')[-1].split('(')[0].strip() + class_names.append(class_name) + + # return the class names + return class_names + + + @classmethod + def find_functions(cls, path): + code = c.get_text(path) + functions = [] + for line in code.split('\n'): + if line.startswith('def '): + if all([s in line for s in ['def ', '(', '):']]): + functions.append(line.split('def ')[-1].split('(')[0].strip()) + return functions + + + def file2classes(self, path:str = None, search:str = None, start_lines:int=2000): + return self.find_python_classes(path=path, search=search, start_lines=start_lines) + + + + @classmethod + def get_class_name(cls, obj = None) -> str: + obj = obj if obj != None else cls + if not cls.is_class(obj): + obj = type(obj) + return obj.__name__ + + @staticmethod + def try_n_times(fn, max_trials:int=10, args:list=[],kwargs:dict={}): + assert isinstance(fn, callable) + for t in range(max_trials): + try: + result = fn(*args, **kwargs) + return result + except Exception as e: + continue + raise(e) + + + + @classmethod + def dict2munch(cls, x:dict, recursive:bool=True)-> Munch: + ''' + Turn dictionary into Munch + ''' + if isinstance(x, dict): + for k,v in x.items(): + if isinstance(v, dict) and recursive: + x[k] = c.dict2munch(v) + x = Munch(x) + return x + + @classmethod + def munch2dict(cls, x:Munch, recursive:bool=True)-> dict: + ''' + Turn munch object into dictionary + ''' + if isinstance(x, Munch): + x = dict(x) + for k,v in x.items(): + if isinstance(v, Munch) and recursive: + x[k] = c.munch2dict(v) + + return x + + + + + @classmethod + def munch(cls, x:Dict) -> Munch: + ''' + Converts a dict to a munch + ''' + return cls.dict2munch(x) + + + + def comment(self, + fn='coder/call', + model = 'model.openai', + timeout=20, + **model_params): + ''' + ### Function Documentation + + #### `call(self, fn='coder/call', model='model.openai', timeout=20, **model_params)` + + This function is responsible for generating documentation for a given piece of code by utilizing a language model. + + Parameters: + - `fn` (str): The name of the function that needs documentation. Default value is `'coder/call'`. + - `model` (str): The identifier of the language model to be used. Default is `'model.openai'`. + - `timeout` (int): The maximum amount of time (in seconds) to wait for the model to generate the documentation. Default is `20`. + - `**model_params`: Arbitrary keyword arguments that will be passed to the `connect` method of the `c` object when connecting to the language model. + + Returns: + - `docs` (str): The generated documentation for the specified code. + + The function performs the following steps: + 1. Connects to the specified language model using the provided parameters. + 2. Constructs an input JSON object containing the instruction, code, and a placeholder for documentation. + 3. Requests the language model to generate documentation based on the provided input. + 4. Processes the generated documentation response. + 5. Adds the generated documentation to the function using the `c.add_docs()` method. + 6. Returns the generated documentation. + + **Example Usage:** + + ```python + # assuming the 'c' object and 'call' method are part of a class + caller = YourClass() + documentation = caller.call( + fn='your_function_name', + model='your_model_identifier', + timeout=30, + model_params={'additional': 'parameters'} + ) + print(documentation) + ``` + + **Note:** + - The `c` object is assumed to be a pre-defined object with methods `connect`, `fn_code`, and `add_docs`. + - `self.process_response` is assumed to be a method that processes the generated documentation response. Its functionality is not detailed in the provided code. + ''' + model = c.connect(model, **model_params) + input = json.dumps({ + 'instruction': 'given the code, document the function in a professional manner in the docs section', + 'code': c.fn_code(fn), + 'docs': None, + }) + # get the docs + docs = model.generate(input, timeout=timeout) + docs = self.process_response(docs) + + # add docs to the function + c.add_docs(fn, docs) + + return docs + + call = document_fn = comment + + def document_module(self, + module='agent.coder', + fns = None, + model = 'model.openai', + **model_params + ): + fns = c.module(module).fns() + for fn in fns: + c.print(f'Documenting function {fn} in module {module}...') + + try: + future = c.submit(self.document_fn, dict(fn=module+'/'+fn, model=model, **model_params)) + future.result() + except: + c.print(f'Failed to document function {fn} in module {module}...') + print(f'Documenting function {fn} in module {module}...') + + return + + def process_response(self, response): + ''' + """ + Documentation for `process_response` function: + + This function is responsible for processing a given response and ensuring it's in a proper JSON format. If the response is in a string format, the function attempts to load it as a JSON object. If the loading fails, it simply passes without raising any exceptions. + + Parameters: + - self: The instance of the class that this method is bound to. + - response: A response object that is to be processed. It can be a string or already a + ''' + if isinstance(response, str): + try: + response = json.loads(response) + except: + pass + + return response + + @classmethod + def transfer_fn_code(cls, module1= 'module', + fn_prefix = 'ray_', + module2 = 'ray', + refresh = False): + + module1 = c.module(module1) + module2 = c.module(module2) + module1_fn_code_map = module1.fn2code(fn_prefix) + module2_code = module2.code() + module2_fns = module2.fns() + filepath = module2.filepath() + for fn_name, fn_code in module1_fn_code_map.items(): + print(f'adding {fn_name}') + print('fn_code', fn_code) + if fn_name in module2_fns: + if refresh: + module2_code = module2_code.replace(module2_fns[fn_name], '') + else: + print(f'fn_name {fn_name} already in module2_fns {module2_fns}') + + module2_code += '\n' + module2_code += '\n'.join([ ' ' + line for line in fn_code.split('\n')]) + module2_code += '\n' + cls.put_text(filepath, module2_code) + + return {'success': True, 'module2_code': module2_code, 'module2_fns': module2_fns, 'module1_fn_code_map': module1_fn_code_map} + diff --git a/modules/data/__init__.py b/modules/data/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/modules/data/data.py b/modules/data/data.py new file mode 100644 index 00000000..eaa8038b --- /dev/null +++ b/modules/data/data.py @@ -0,0 +1,65 @@ +import commune as c +import asyncio +import torch +class Dataset(c.Module, torch.utils.data.Dataset): + mode_shortcuts = { + 'hf': 'text.huggingface', + 'bt': 'text.bittensor', + } + def __init__(self, dataset, config = None, **kwargs): + config = self.set_config(config, kwargs=) + self.set_dataset(config) + self.set_model(config) + if config.train: + self.train() + + + @classmethod + def sample_check(cls, sample): + return bool(isinstance(sample, dict) and 'input_ids' in sample) + + @classmethod + async def async_sample(cls, dataset = 'dataset.bittensor', max_trials=10, batch_size=1, sequence_length=64, num_batches=10): + sample = None + if not hasattr(cls, 'dataset_pool'): + cls.dataset_pool = c.connect_pool(dataset) + + fail_count = 0 + + while not cls.sample_check(sample) and fail_count < max_trials: + if len(cls.dataset_pool) == 0: + cls.dataset_pool = c.connect_pool(dataset) + try: + data_idx =cls.choice(list(range(len(cls.dataset_pool)))) + sample = cls.dataset_pool[data_idx].sample(batch_size=batch_size, + sequence_length=sequence_length) + + if not cls.sample_check(sample): + raise Exception('Sample check failed') + sample['input_ids'] = sample['input_ids'][:batch_size, -sequence_length:] + + + except Exception as e: + fail_count += 1 + del cls.dataset_pool[data_idx] + cls.print(f'ERROR {e} failed to sample, removing dataset {data_idx}, {len(cls.dataset_pool)} remaining', color='red') + assert cls.sample_check(sample), f'Failed to sample from {dataset} after {max_trials} trials.' + return sample + + + @classmethod + def sample(cls, timeout=2, retries = 3, *args, **kwargs): + try: + if timeout: + # Add timeout to the async_get_sample call + coro = asyncio.wait_for(cls.async_sample(*args, **kwargs), timeout=timeout) + else: + coro = cls.async_sample(*args, **kwargs) + + return asyncio.run(coro) + except asyncio.TimeoutError: + # Handle the timeout error here + print("Async function call timed out.") + if retries > 0: + return cls.sample(timeout=timeout, retries=retries-1, *args, **kwargs) + diff --git a/modules/data/data.yaml b/modules/data/data.yaml new file mode 100644 index 00000000..6c56f10d --- /dev/null +++ b/modules/data/data.yaml @@ -0,0 +1,2 @@ + +dataset: bittensor \ No newline at end of file diff --git a/modules/data/diffusion/dream/dream_dataset.py b/modules/data/diffusion/dream/dream_dataset.py new file mode 100644 index 00000000..2e5a6721 --- /dev/null +++ b/modules/data/diffusion/dream/dream_dataset.py @@ -0,0 +1,370 @@ +wimport argparse +import hashlib +import itertools +import math +import os +import warnings +from pathlib import Path +from typing import Optional + +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +from torch.utils.data import Dataset +from PIL import Image +from torchvision import transforms +from tqdm.auto import tqdm +from transformers import AutoTokenizer, PretrainedConfig + +import commune +from typing import * + +class DreamDataset(Dataset): + """ + A dataset to prepare the instance and class images with the prompts for fine-tuning the model. + It pre-processes the images and the tokenizes prompts. + """ + + def __init__( + self, + batch_size:int= 8, + tokenizer: Union[str, 'tokenizer'] = None , + class_path:str=None, + # class_prompt:str=None, + size:int=512, + center_crop:bool=False, + ): + self.size = size + self.center_crop = center_crop + self.batch_size = batch_size + self.set_class_path(class_path) + self.set_tokenizer(tokenizer) + + + self.image_transforms = transforms.Compose( + [ + transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), + transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(self.size), + transforms.ToTensor(), + transforms.Normalize([0.5], [0.5]), + ] + ) + def set_tokenizer(self, tokenizer:Union[str, 'tokenizer'], revision=False, use_fast=False): + + + if isinstance(tokenizer, str): + tokenizer = AutoTokenizer.from_pretrained(tokenizer, + revision=revision, + use_fast=use_fast) + else: + raise NotImplementedError(type(tokenizer)) + + self.tokenizer =tokenizer + + + + + def set_class_path(self, class_path:str): + + self.class_path = Path(class_path) + if not self.class_path.exists(): + raise ValueError("Instance images root doesn't exists.") + + self.class_image_paths = list(self.class_path.iterdir()) + self.num_instance_images = len(self.class_image_paths) + + self.class_prompt = os.path.dirname(self.class_path) + + # sentences will be encoded as folders such that "Billys Car"-> billys_car + + + def __len__(self): + return len(self.class_image_paths) + + + def __next__(self): + if not hasattr(self, '_dataloader'): + self._dataloader = torch.utils.data.DataLoader( + self.dataset, + batch_size=self.args.train_batch_size, + shuffle=True, + collate_fn=lambda examples: collate_fn(examples, self.args.with_prior_preservation), + num_workers=1, + ) + + + + def __getitem__(self, index): + example = {} + instance_image = Image.open(self.class_image_paths[index % self.num_instance_images]) + if not instance_image.mode == "RGB": + instance_image = instance_image.convert("RGB") + example["instance_images"] = self.image_transforms(instance_image) + example["instance_prompt_ids"] = self.tokenizer( + self.instance_prompt, + truncation=True, + padding="max_length", + max_length=self.tokenizer.model_max_length, + return_tensors="pt", + ).input_ids + + return example + + + @staticmethod + def collate_fn(examples, with_prior_preservation=False): + input_ids = [example["instance_prompt_ids"] for example in examples] + pixel_values = [example["instance_images"] for example in examples] + + # Concat class and instance examples for prior preservation. + # We do this to avoid doing two forward passes. + if with_prior_preservation: + input_ids += [example["class_prompt_ids"] for example in examples] + pixel_values += [example["class_images"] for example in examples] + + pixel_values = torch.stack(pixel_values) + pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() + + input_ids = torch.cat(input_ids, dim=0) + + batch = { + "input_ids": input_ids, + "pixel_values": pixel_values, + } + return batch + + + + + + @staticmethod + def parse_args(input_args=None): + parser = argparse.ArgumentParser(description="Simple example of a training script.") + parser.add_argument( + "--pretrained_model_name_or_path", + type=str, + default=None, + required=True, + help="Path to pretrained model or model identifier from huggingface.co/models.", + ) + parser.add_argument( + "--revision", + type=str, + default=None, + required=False, + help="Revision of pretrained model identifier from huggingface.co/models.", + ) + parser.add_argument( + "--tokenizer_name", + type=str, + default=None, + help="Pretrained tokenizer name or path if not the same as model_name", + ) + parser.add_argument( + "--instance_data_dir", + type=str, + default=None, + required=True, + help="A folder containing the training data of instance images.", + ) + parser.add_argument( + "--class_data_dir", + type=str, + default=None, + required=False, + help="A folder containing the training data of class images.", + ) + parser.add_argument( + "--instance_prompt", + type=str, + default=None, + required=True, + help="The prompt with identifier specifying the instance", + ) + parser.add_argument( + "--class_prompt", + type=str, + default=None, + help="The prompt to specify images in the same class as provided instance images.", + ) + parser.add_argument( + "--with_prior_preservation", + default=False, + action="store_true", + help="Flag to add prior preservation loss.", + ) + parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") + parser.add_argument( + "--num_class_images", + type=int, + default=100, + help=( + "Minimal class images for prior preservation loss. If there are not enough images already present in" + " class_data_dir, additional images will be sampled with class_prompt." + ), + ) + parser.add_argument( + "--output_dir", + type=str, + default="text-inversion-model", + help="The output directory where the model predictions and checkpoints will be written.", + ) + parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") + parser.add_argument( + "--resolution", + type=int, + default=512, + help=( + "The resolution for input images, all the images in the train/validation dataset will be resized to this" + " resolution" + ), + ) + parser.add_argument( + "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution" + ) + parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder") + parser.add_argument( + "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." + ) + parser.add_argument( + "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." + ) + parser.add_argument("--num_train_epochs", type=int, default=1) + parser.add_argument( + "--max_train_steps", + type=int, + default=None, + help="Total number of training steps to perform. If provided, overrides num_train_epochs.", + ) + parser.add_argument( + "--checkpointing_steps", + type=int, + default=500, + help=( + "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" + " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" + " training using `--resume_from_checkpoint`." + ), + ) + parser.add_argument( + "--resume_from_checkpoint", + type=str, + default=None, + help=( + "Whether training should be resumed from a previous checkpoint. Use a path saved by" + ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' + ), + ) + parser.add_argument( + "--gradient_accumulation_steps", + type=int, + default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.", + ) + parser.add_argument( + "--gradient_checkpointing", + action="store_true", + help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", + ) + parser.add_argument( + "--learning_rate", + type=float, + default=5e-6, + help="Initial learning rate (after the potential warmup period) to use.", + ) + parser.add_argument( + "--scale_lr", + action="store_true", + default=False, + help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", + ) + parser.add_argument( + "--self.lr_scheduler", + type=str, + default="constant", + help=( + 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' + ' "constant", "constant_with_warmup"]' + ), + ) + parser.add_argument( + "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." + ) + parser.add_argument( + "--lr_num_cycles", + type=int, + default=1, + help="Number of hard resets of the lr in cosine_with_restarts scheduler.", + ) + parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") + parser.add_argument( + "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." + ) + parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") + parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") + parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") + parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") + parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") + parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") + parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") + parser.add_argument( + "--hub_model_id", + type=str, + default=None, + help="The name of the repository to keep in sync with the local `output_dir`.", + ) + parser.add_argument( + "--logging_dir", + type=str, + default="logs", + help=( + "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" + " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." + ), + ) + parser.add_argument( + "--mixed_precision", + type=str, + default=None, + choices=["no", "fp16", "bf16"], + help=( + "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" + " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" + " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." + ), + ) + parser.add_argument( + "--prior_generation_precision", + type=str, + default=None, + choices=["no", "fp32", "fp16", "bf16"], + help=( + "Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" + " 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32." + ), + ) + parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") + parser.add_argument( + "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." + ) + + if input_args is not None: + args = parser.parse_args(input_args) + else: + args = parser.parse_args() + + env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) + if env_local_rank != -1 and env_local_rank != self.args.local_rank: + self.args.local_rank = env_local_rank + return args + + + @classmethod + def demo(cls): + import streamlit as st + class_path = os.path.dirname(__file__) + '/demo_data/python' + self = cls(class_path=class_path, tokenizer=model) + + +if __name__ == "__main__": + DreamDataset.demo() diff --git a/modules/data/diffusion/dream/prompt_dataset.py b/modules/data/diffusion/dream/prompt_dataset.py new file mode 100644 index 00000000..bf55777f --- /dev/null +++ b/modules/data/diffusion/dream/prompt_dataset.py @@ -0,0 +1,20 @@ + + +from torch.utils.data import Dataset + + +class PromptDataset(Dataset): + "A simple dataset to prepare the prompts to generate class images on multiple GPUs." + + def __init__(self, prompt, num_samples): + self.prompt = prompt + self.num_samples = num_samples + + def __len__(self): + return self.num_samples + + def __getitem__(self, index): + example = {} + example["prompt"] = self.prompt + example["index"] = index + return example diff --git a/modules/data/hf/data_hf.py b/modules/data/hf/data_hf.py new file mode 100644 index 00000000..14515bc5 --- /dev/null +++ b/modules/data/hf/data_hf.py @@ -0,0 +1,267 @@ +import commune as c +import datasets +from datasets import load_dataset +from typing import Dict, List + + + +class DataHF(c.Module): + + shortcuts = { + 'pile': 'EleutherAI/the_pile', + 'wiki': 'wikitext', + 'glue': 'glue', + 'camel_math': 'camel-ai/math', + 'mmlu': 'lukaemon/mmlu', + 'pubmed_qa': 'pubmed_qa', + 'truthqa': 'truthful_qa', + } + def __init__(self, + path: str = 'super_glue', + name: str = None, + streaming: bool= False, + split: str = None, + **kwargs): + config = self.set_config(locals()) + self.set_dataset(path=config.path, name=config.name, split=config.split, streaming=config.streaming) + + + + def set_dataset(self, path:str, name:str = None, split:str = None, streaming:bool=False, **kwargs): + path = self.shortcuts.get(path, path) + c.print(f'Loading dataset: {name} from {path}') + + # resolve name + name = list(self.config_names(path=path))[0] if name == None else name + + + + # raise Exception(f'Loading dataset: {name} from {path}') + + # resolve split + if isinstance(split, str): + split = [split] + + # update config + self.config.update({'path': path, 'name': name, 'split': split, 'streaming': streaming}) + + # load dataset + dataset_map = load_dataset(path=path, + name=name, + split=split, + streaming=streaming) + + # set attributes + self.splits = list(dataset_map.keys()) + self.dataset = list(dataset_map.values())[0] + self.dataset_map = dataset_map + + + return self.dataset + + def __len__(self): + return len(self.dataset) + + @property + def n(self): + return len(self) + + + def random_idx(self): + return c.random_int(len(self)) + + + def sample(self, idx:int=None, batch_size:int = 1): + if batch_size > 1: + return [self.sample() for i in range(batch_size)] + idx = self.random_idx() if idx == None else idx + return self.dataset[idx] + + + + + def validate(self, module, num_samples=10): + for i in range(num_samples): + idx = self.random_idx() + sample = self.sample(idx=idx) + module_sample = module.sample(idx=idx) + for key in sample.keys(): + if sample[key] != module_sample[key]: + return 0 + return 1 + + @classmethod + def test(cls, *args,**kwargs): + cls.print('Testing dataset') + dataset = cls(*args, **kwargs) + sample = dataset.sample() + assert isinstance(sample, dict) + return sample + + + def default_name(self): + return self.config_names()[0] + + + @property + def data_info(self): + return self.dataset._info.__dict__ + + @property + def features(self): + return self.dataset._info.__dict__['features'] + + + def set_split(self, split): + self.dataset = self.dataset_map[split] + return self.dataset + + + @classmethod + def get_dataset_builder( cls, path:str, factory_module_path:str=None): + path = cls.shortcuts.get(path, path) + if factory_module_path == None: + assert isinstance(path, str) + factory_module = datasets.load.dataset_module_factory(path) + factory_module_path = factory_module.module_path + + dataset_builder = datasets.load.import_main_class(factory_module_path) + return dataset_builder + + @classmethod + def config_names(self, path=None): + return list(self.config_map(path=path).keys()) + + list_names = config_names + + + @classmethod + def configs(cls, path=None): + configs = cls.config_map(path=path) + return list(configs.keys()) + + @classmethod + def config_map(cls, path=None): + dataset_builder = cls.get_dataset_builder(path) + configs = [config.__dict__ for config in dataset_builder.BUILDER_CONFIGS] + if len(configs) == 0: + configs = [dataset_builder._info.__dict__] + configs[0]['name'] = 'default' + + config_map = {config['name']: config for config in configs} + return config_map + + @property + def card(self) -> Dict[str,str]: + return dict( + module = self.module_path, + path = self.path, + name = self.name, + split = self.split + ) + + + @classmethod + def test(cls, *args, **kwargs): + + self = cls( *args, **kwargs) + x = self.sample() + print(x) + + + + @classmethod + def available_datasets(cls, prefix:str='dataset') -> List[str]: + return [x for x in commune.servers() if x.startswith(prefix)] + + @classmethod + def default_dataset(cls) -> str: + available_datasets = cls.available_datasets() + if len(available_datasets) == 0: + return cls.launch(name='dataset.text.glue', kwargs=dict(path='glue')) + return commune.connect(dataset_name) + + + + @classmethod + def test_multiple(cls): + datasets = list(shortcuts.keys()) + for path in datasets: + cls.print(f'TESTING ({cls.module_path()}): {path}', 'yellow') + self = cls(path=path) + sample = self.sample(tokenize=False) + assert 'text' in sample + sample = self.sample(tokenize=True) + assert 'input_ids' in sample + cls.print(f'PASSED ({cls.module_path()}): {path}', 'green') + + @classmethod + def sandbox(cls): + import streamlit as st + self = cls() + for i in range(1000): + self.sample() + print(i) + + @property + def name_suffix(self): + return f'{self.path}' + + + @classmethod + def serve_category(cls, fleet:str = 'qa', remote:bool=True, tag=None, **kwargs): + ''' + ### Documentation + + #### Function: `serve_category` + + **Description:** + + This class method is responsible for serving a category of services defined in a fleet. It launches each service on a separate port, avoiding any conflicts with already used ports. + + **Parameters:** + + - `fleet`: A string representing the fleet name. Default value is `'qa'`. + - `remote`: A boolean indicating whether the service is to be served remotely or not. Default value is `True`. + - `tag`: An optional parameter + ''' + fleet = cls.getc(f'fleet.{fleet}') + + avoid_ports = [] + for path in fleet: + port = c.free_port(avoid_ports=avoid_ports) + cls.serve(path=path, remote=remote, port=port, tag=tag, **kwargs) + avoid_ports.append(port) + + @classmethod + def fleet(cls, path:str = 'truthful_qa', n:int=5, remote:bool=True, tag=None, **kwargs): + ''' + ## Fleet Class Method + + ### Description + The `fleet` method is a class method responsible for starting multiple instances of a service on different ports. This method is useful when you want to run several instances of the same service simultaneously, possibly for load balancing or high availability purposes. + + ### Parameters + - `path` (str): The path to the service that needs to be served. Defaults to 'truthful_qa'. + - `n` (int): The number of instances to be started. Defaults + ''' + + avoid_ports = [] + for i in range(n): + port = c.free_port(avoid_ports=avoid_ports) + cls.serve(path=path, remote=remote, port=port, tag=f'{i}' if tag == None else f'{tag}.{i}', **kwargs) + avoid_ports.append(port) + + + @classmethod + def validate(cls, module = None, ref_module=None): + module = c.connect(module) + ref_module = c.connect(ref_module) + ref_idx = ref_module.random_idx() + ref_sample = ref_module.sample(idx=ref_idx) + reference_sample = module.sample(idx=ref_idx) + + module.sample(idx=0) + + + diff --git a/modules/data/hf/data_hf_docs.md b/modules/data/hf/data_hf_docs.md new file mode 100644 index 00000000..0b36cd06 --- /dev/null +++ b/modules/data/hf/data_hf_docs.md @@ -0,0 +1,35 @@ + + +## Openai Module Module + +This module performs tests on truthful_qa. + + +To serve the module + +#### Bash +```bash +c data.hf serve tag=10 path=truthful_qa +``` +#### Python +```python +c.serve('data.hf', tag=10, path='truthful_qa') +``` + +To register the module + +#### Bash +```bash +c data.hf register tag=10 path=truthful_qa +``` + +#### Python +```python +c.register('data.hf', tag=10, path='truthful_qa') +``` + + + + + + diff --git a/modules/data/image/globe/data_image_globe.py b/modules/data/image/globe/data_image_globe.py new file mode 100644 index 00000000..5c298f5d --- /dev/null +++ b/modules/data/image/globe/data_image_globe.py @@ -0,0 +1,9 @@ +import commune as c + +class DataImageGlobe(c.Module): + def __init__(self, **kwargs): + config = self.set_config(config=kwargs) + def run(self): + print('Base run') + + diff --git a/modules/data/image/globe/data_image_globe.yaml b/modules/data/image/globe/data_image_globe.yaml new file mode 100644 index 00000000..7ac5c5e3 --- /dev/null +++ b/modules/data/image/globe/data_image_globe.yaml @@ -0,0 +1,6 @@ +return_val: pong +users: +- user1 +- user2 +- user3 +- bro diff --git a/modules/data/text/code/data_text_code.py b/modules/data/text/code/data_text_code.py new file mode 100644 index 00000000..ce598ff3 --- /dev/null +++ b/modules/data/text/code/data_text_code.py @@ -0,0 +1,121 @@ +import commune as c + +class DataTextCode(c.Module): + def __init__(self, **kwargs): + config = self.set_config(kwargs) + self.folder_path = self.resolve_path(config.folder_path) + self.filepaths = sorted([f for f in self.walk(self.folder_path) if f.endswith('.py')]) + + def random_idx(self): + return self.random_int(0, len(self.filepaths)-1) + + + def sample(self, idx=None, + input_chars:int = 500, + output_chars: int = 500, + random_start_line: int = None, + real_prob:float=0.5): + + while True: + idx = self.random_idx() if idx == None else idx + filepath = self.filepaths[idx] + file_text = c.get_text(filepath) + if len(file_text) > input_chars + output_chars: + break + else: + idx = None + + start_index = c.random_int(0, len(file_text) - output_chars) + + # we need to make sure that the input and output are not the same + input_bounds = [start_index, start_index + input_chars] + output_bounds = [start_index + input_chars, start_index + input_chars + output_chars] + + sample = { + 'input_text': file_text[input_bounds[0]:input_bounds[1]], + 'output_text': file_text[output_bounds[0]:output_bounds[1]], + 'filepath': filepath + } + + # do a kick flip + real = c.random_float(0,1) > real_prob + + sample['real'] = int(real) + + # then we need to sample a different file + if sample['real'] == 0 : + other_sample = self.sample( input_chars=input_chars, real_prob = 0, output_chars=output_chars) + sample['output_text'] = other_sample['output_text'] + + return sample + + + def test(self, n=100): + t = c.time() + for i in range(n): + sample = self.sample() + msg = {'samples_per_second': i / (c.time() - t)} + c.print(msg) + + + + prompt = ''' + INPUT (JSON): + ```{sample}``` + QUESTION: + + WAS THE INPUT REAL (1) OR TAMPERED (0)? -> : + + OUTPUT (answer: int): + json``` + ''' + + + def parse_output(self, output:dict)-> dict: + if '0' in output: + return 0 + elif '1' in output: + return 1 + else: + raise Exception(f'Invalid output: {output}, expected 0 or 1') + + + def score(self, model='model', w:float=0.0): + + try: + model_name = model + model = c.connect(model, prefix_match=True, network='local') + sample = self.sample() + t = c.time() + prompt = self.prompt.format(sample=sample) + output = model.generate(prompt) + output = self.parse_output(output) + w = 0.2 + except Exception as e: + return {'error': c.detailed_error(e), 'target': sample['target'], 'w':w} + + if output == sample['target']: + w = 1 + + msg = { + 'prompt': prompt, + 'latency': c.time() - t, + 'target': sample['target'], + 'prediction': output, + 'w' : w, + } + + return msg + + + def score_models(self, model='model'): + + models = c.servers(model, network='local') + responses = [] + for model in models: + msg = self.score(model=model) + msg['model'] = model + responses += [msg] + + return responses + diff --git a/modules/data/text/code/data_text_code.yaml b/modules/data/text/code/data_text_code.yaml new file mode 100644 index 00000000..f2a8fffe --- /dev/null +++ b/modules/data/text/code/data_text_code.yaml @@ -0,0 +1,2 @@ +folder_path: ./ +suffix: .py \ No newline at end of file diff --git a/modules/data/text/folder/data_text_folder.py b/modules/data/text/folder/data_text_folder.py new file mode 100644 index 00000000..53f5415b --- /dev/null +++ b/modules/data/text/folder/data_text_folder.py @@ -0,0 +1,138 @@ +import commune as c + +class DataFolder(c.Module): + def __init__(self, folder_path: str = './', suffix: str = '.py'): + config = self.set_config(locals()) + self.folder_path = self.resolve_path(config.folder_path) + self.filepaths = sorted([f for f in self.walk(self.folder_path) if f.endswith('.py')]) + + def random_idx(self): + return self.random_int(0, len(self.filepaths)-1) + + def sample(self, idx=None, + input_chars:int = 1000, + output_chars: int = 500, + start_index: int = None, + real_prob:float=0.5): + + if idx == None: + while True: + idx = self.random_idx() + filepath = self.filepaths[idx] + file_text = c.get_text(filepath) + if len(file_text) >= input_chars + output_chars: + break + else: + idx = None + else: + filepath = self.filepaths[idx] + file_text = c.get_text(filepath) + + if start_index == None: + start_index = c.random_int(0, len(file_text) - input_chars - output_chars ) + + # we need to make sure that the input and output are not the same + input_bounds = [start_index, start_index + input_chars] + output_bounds = [start_index + input_chars, start_index + input_chars + output_chars] + + sample = { + 'input_text': file_text[input_bounds[0]:input_bounds[1]], + 'output_text': file_text[output_bounds[0]:output_bounds[1]], + 'filepath': filepath, + 'idx': idx, + 'start_index': start_index, + 'input_chars': input_chars, + 'output_chars': output_chars, + + } + + # do a kick flip + real = c.random_float(0,1) < real_prob + + sample['real'] = int(real) + + # then we need to sample a different file + if sample['real'] == 0 : + other_sample = self.sample( input_chars=input_chars, real_prob = 1, output_chars=output_chars) + sample['output_text'] = other_sample['output_text'] + + return sample + + + def test(self, n=100): + sample = self.sample() + msg = {'samples_per_second': i / (c.time() - t)} + c.print(msg) + + @classmethod + def validate(cls, *objs): + v = objs[0] + + prompt = f''' + INPUT (JSON): + ```{sample}``` + QUESTION: + + WAS THE INPUT REAL (1) OR TAMPERED (0)? -> : + + OUTPUT (answer: int): + json``` + ''' + + + def parse_output(self, output:dict)-> dict: + if '0' in output or 'yes' in output.lower(): + return 0 + elif '1' in output or 'no' in output.lower(): + return 1 + else: + raise Exception(f'Invalid output: {output}, expected 0 or 1') + + + def score(self, model='model', w:float=0.0): + + model = c.connect(model, prefix_match=True) + + try: + sample = self.sample() + t = c.time() + prompt = self.prompt.format(sample=sample) + output = model.generate(prompt) + output = self.parse_output(output) + w = 0.2 + except Exception as e: + return {'error': c.detailed_error(e), 'w':w} + + if output == sample['real']: + w = 1 + + msg = { + 'prompt': prompt, + 'latency': c.time() - t, + 'target': sample['real'], + 'prediction': output, + 'w' : w, + } + + return msg + + + def validate(self, module=None, network=None) -> float: + if isinstance(module, str): + module = c.connect(module, prefix_match=True, network=network) + if module == None: + module = self + t = c.time() + my_sample = self.sample(real_prob=1) + kwargs = {k:my_sample[k] for k in ['input_chars', 'output_chars', 'idx', 'start_index']} + kwargs['real_prob'] = 1 + other_sample = module.sample(**kwargs) + + + for k in my_sample.keys(): + if other_sample[k] != my_sample[k]: + return 0.0 + return 1.0 + + + diff --git a/modules/data/text/folder/data_text_folder.yaml b/modules/data/text/folder/data_text_folder.yaml new file mode 100644 index 00000000..e69de29b diff --git a/modules/data/text/folder/docs/data_text_realfake_docs.md b/modules/data/text/folder/docs/data_text_realfake_docs.md new file mode 100644 index 00000000..287003b3 --- /dev/null +++ b/modules/data/text/folder/docs/data_text_realfake_docs.md @@ -0,0 +1,142 @@ + +# data.text.realfake + + +The following is a dataset that takes a folder of text, and draws real or fake samples from that folder of text. This means that it will take a folder of text, and draw samples from that folder of text. It will then return a sample of text that is either real or fake. This is useful for training a model to detect real or fake text. This uses a random variable. + +![Alt text](image.png) + + +## Register + +```bash +c data.text.realfake register tag=whadup +``` + +## Serve + +```bash + +c data.text.realfake serve tag=whadup +``` + + + +## Test +```bash + +c data.text.realfake test +``` + +## Sample +```bash + +c call data.text.realfake::whadup sample +``` + + + +# DataTextRealfake Module Documentation + +The `DataTextRealfake` module is a Python class that provides functionality to generate and manipulate text samples from Python source code files. It can be used to create training data for various machine learning tasks, especially those related to detecting real and fake (synthetic) code snippets. In this documentation, we will walk through the different components of the code and explain their functionality. + +## Class Definition: DataTextRealfake + +```python +class DataTextRealfake(c.Module): +``` + +The `DataTextRealfake` class is defined, inheriting from the `c.Module` class (presumably from the `commune` module). This indicates that `DataTextRealfake` extends the functionality provided by the base `c.Module` class. + +## Initialization: `__init__` Method + +```python +def __init__(self, **kwargs): + config = self.set_config(kwargs) + self.folder_path = self.resolve_path(config.folder_path) + self.filepaths = sorted([f for f in self.walk(self.folder_path) if f.endswith('.py')]) +``` + +The `__init__` method is the class constructor that initializes an instance of the `DataTextRealfake` class. It takes keyword arguments `kwargs`, which presumably allow the user to pass additional configuration parameters. + +- The method first uses the `set_config` function to process and set the configuration based on the provided keyword arguments. +- It resolves the folder path specified in the configuration using the `resolve_path` method. +- It retrieves a sorted list of file paths (Python source code files) within the specified folder using the `walk` method. It filters the list to include only files ending with the ".py" extension. + +## Generating Random Index: `random_idx` Method + +```python +def random_idx(self): + return self.random_int(0, len(self.filepaths)-1) +``` + +The `random_idx` method generates a random index within the range of valid indices for the `filepaths` list. It utilizes the `random_int` method from the base class (likely `commune`) to achieve this. + +## Generating a Sample: `sample` Method + +```python +def sample(self, idx=None, input_chars: int = 500, output_chars: int = 500, start_index: int = 0, real_prob: float = 0.5): +``` + +The `sample` method generates a text sample from a randomly selected Python source code file. It takes several optional parameters: + +- `idx`: Index of the file to use as the source for the sample. If not provided, a random index is chosen. +- `input_chars`: The number of characters to use as the input text for the sample. +- `output_chars`: The number of characters to use as the output text for the sample. +- `start_index`: The starting index within the selected file for extracting input and output text. +- `real_prob`: Probability of selecting a real (non-synthetic) sample. + +The method works as follows: + +- If `idx` is not provided, a random index is generated using the `random_idx` method. +- The file path corresponding to the selected index is retrieved. +- The content of the file is read using the `get_text` function from the `commune` module. +- A suitable starting index within the file's content is determined, ensuring that there's enough content for both input and output. +- Input and output text bounds are defined based on the starting index and provided character counts. +- A dictionary called `sample` is populated with information about the selected sample, including input text, output text, and file path. +- The `real` key in the `sample` dictionary is set based on a random probability check, indicating whether the sample is considered real or synthetic. + +If the selected sample is synthetic (not real), a different sample is recursively selected using a probability of 0 for real samples. + +## Generating Test Samples: `test` Method + +```python +def test(self, n=100): +``` + +The `test` method is used to generate and test multiple samples. It takes an optional parameter `n` which specifies the number of samples to generate and test. + +- The method initializes a timer and then iterates over the range of `n`. +- For each iteration, a sample is generated using the `sample` method. +- The `samples_per_second` metric is calculated as the number of iterations divided by the time taken. +- The metric is printed using the `print` function from the base class. + +## Parsing Output: `parse_output` Method + +```python +def parse_output(self, output: dict) -> dict: +``` + +The `parse_output` method is responsible for converting the output of a prediction (presumably obtained from a machine learning model) into binary values (0 or 1) based on certain keywords. It takes a dictionary `output` as input, presumably containing the result of a model prediction. + +- The method checks the content of the output using case-insensitive comparisons to determine whether it contains keywords like '0', '1', 'yes', or 'no'. +- If the keywords are found, the method returns 0 or 1 accordingly. +- If no valid keyword is found, an exception is raised with an error message indicating the invalid output. + +## Scoring a Model: `score` Method + +```python +def score(self, model, w: float = 0.0): +``` + +The `score` method evaluates the performance of a given machine learning `model` using the generated samples. It takes a machine learning model as input, along with an optional weight `w`. + +- The method attempts to generate a sample using the `sample` method. +- It measures the time taken for generating the sample. +- The model is used to generate an output prediction based on the sample. +- The `parse_output` method is used to convert the prediction into binary form (0 or 1). +- The weight `w` is then updated to 0.2 (though the purpose of this is not entirely clear from this code snippet). + +If the prediction matches the ground truth (i.e., the real/fake label of the sample), the weight `w` is set to 1. A dictionary containing various metrics and information about the scoring process is returned. + + diff --git a/modules/data/text/math/data_text_math.py b/modules/data/text/math/data_text_math.py new file mode 100644 index 00000000..2d03c4cc --- /dev/null +++ b/modules/data/text/math/data_text_math.py @@ -0,0 +1,51 @@ +import commune as c +import random +class DataTextMath(c.Module): + def __init__(self, **kwargs): + config = self.set_config(kwargs) + self.operations = { + 'add': '+', + 'subtract': '-', + 'multiply': '*', + 'divide': '/', + 'modulo': '%', + } + @staticmethod + def random_value( bounds = [-1, 1]): + output = 0 + while output == 0: + output = random.random() * (bounds[1] - bounds[0]) + bounds[0] + return output + + + def sample(self, n = 2): + op_chain = [] + y = self.random_value() + for i in range(n): + + op = random.choice(list(self.operations.keys())) + op_chain.append([op]) + # float between -1 and 1 + x = self.random_value() + y = c.round(y, 3) + x = c.round(x, 3) + + opstr = f'{y} {self.operations[op]} {x}' + y = eval(opstr) + + op_chain[-1].append(opstr) + + sample = { + 'opperation_chain': op_chain, + 'answer': y + } + return sample + + @classmethod + def test(cls, n=100): + self = cls() + t = c.time() + for i in range(n): + sample = self.sample() + msg = {'samples_per_second': i / (c.time() - t)} + c.print(msg, sample) diff --git a/modules/data/text/math/data_text_math.yaml b/modules/data/text/math/data_text_math.yaml new file mode 100644 index 00000000..f2a8fffe --- /dev/null +++ b/modules/data/text/math/data_text_math.yaml @@ -0,0 +1,2 @@ +folder_path: ./ +suffix: .py \ No newline at end of file diff --git a/modules/data/text/pile/pile.py b/modules/data/text/pile/pile.py new file mode 100644 index 00000000..553d7173 --- /dev/null +++ b/modules/data/text/pile/pile.py @@ -0,0 +1,240 @@ + + +import commune +from typing import List +import json + +import threading +import queue +import os +import torch + + +class Pile(commune.Module): + num_shards = 29 + default_shards = list(range(num_shards)) + + def __init__(self,config=None): + self.stop_threads = False + self.device = 'cpu' + + config = self.set_config(config) + self.url = self.config.url + self.set_shards(config.shards) + self.set_tokenizer(config.tokenizer) + self.start_text_generator() + + + def set_shards(self, shards): + self.shards_urls = self.get_shard_urls(shards) + + @classmethod + def resolve_shards(self, shards): + if isinstance(shards, int): + shards = list(range(shards)) + assert isinstance(shards, list) + + for s in shards: + assert isinstance(s, int) + + return shards + + @classmethod + def get_shard_urls(cls, shards: List[int] = 29, split='train'): + shards = cls.resolve_shards(shards) + shard_urls = [] + for s in shards: + shard_urls.append(cls.get_shard_url(s, split=split)) + return shard_urls + + + + @classmethod + def get_shard_url(cls, shard=0, split='train'): + config = cls.config() + assert isinstance(shard, int) + filename =f'{shard}.jsonl.zst' if bool(shard >= 10) else f'0{shard}.jsonl.zst' + shard_url = f'{config.url}/{split}/{filename}' + return shard_url + + @classmethod + def ls_shards(cls): + return [p for p in cls.glob('shards') if p.endswith('.jsonl')] + + @classmethod + def get_shard_path(cls, shard:int, split:str='train', ext='jsonl'): + filename = f'{shard}' if shard >= 10 else f'0{shard}' + path= cls.resolve_path(f'shards/{filename}.{ext}') + return path + + resolve_shard_path = get_shard_path + @classmethod + def shard_exists(cls, shard:int, split='train')-> bool: + shard_path = cls.resolve_shard_path(shard,split) + return bool(shard_path in cls.ls_shards()) + + + @classmethod + def download_shard(cls, + shard:int = 1, + split='train', + refresh: bool = False, + *args, **kwargs): + shard_url = cls.get_shard_url( shard=shard, split=split) + shard_exists = cls.shard_exists(shard=shard, split=split) + path = cls.resolve_shard_path(shard, split) + + if shard_exists and not refresh : + cls.print(f'THE PILE: shard {shard} for split {split} exists', color='yellow') + return None + + return cls.cmd(f'wget -P {path} {shard_url}', verbose=True, *args, **kwargs) + + @classmethod + def download_fleet(cls, shards=3, split='train'): + for s in range(shards): + name = f'task.pile.download.s{s}.{split}' + cls.deploy(fn='deploy_shard', name=name ) + + + + def get_text(self, shard=1, split='train', path=None, start_pos=0): + path = self.get_shard_path(shard=shard, split=split) if path is None else path + with open(path, 'r') as f: + # Move the file pointer to the starting position + f.seek(start_pos) + cnt = 0 + for line in f: + # print(line) + data = json.loads(line) + + # # print(data['text']) + # self.print(data['text']) + self.queue.put(data['text']) + if self.stop_threads: + break + + + def start_text_generator(self, num_threads=1, shard=1, split='train', path=None): + self.queue = queue.Queue(1000) + path = self.get_shard_path(shard=shard, split=split) + file_size = os.stat(path).st_size + chunk_size = file_size // num_threads + start_pos = 0 + threads = [] + for i in range(num_threads): + # Start the thread with the current start position + t = threading.Thread(target=self.get_text, args=(shard, split, path, start_pos)) + t.start() + threads.append(t) + # Update the start position for the next thread + start_pos += chunk_size + # If this is the last thread, read until the end of the file + if i == num_threads - 2: + chunk_size = file_size - start_pos + + self.threads = threads + # # Wait for all threads to finish + # for t in threads: + # t.join() + + + def stop_threads(self): + self.stop_threads=True + + + def __del__(self): + self.shutdown() + + def shutdown(self, wait=True): + self.stop_threads = True + # if wait: + # for t in self.threads: + # try: + # t.join() + # except Exception: + # pass + + def sample_text(self): + return self.queue.get() + + def sample(self, batch_size:int=32, sequence_length:int=256, idx_list:List[int] = None, tokenize:bool= True)->dict: + + sample_dict = {'text': [self.sample_text() for i in range(batch_size)]} + + if tokenize: + sample_dict = self.tokenize(text=sample_dict['text'], max_length=sequence_length) + + return sample_dict + + forward = sample + + + def tokenize(self, text: str = 'Whadup', + padding=True, + truncation=True, + max_length=256, + + return_tensors='pt', + add_special_tokens=False, + device:str = None, + tokenizer: str = None, + **kwargs) -> torch.Tensor: + """ Returns tokenized text as torch tensor. """ + tokenizer = tokenizer if tokenizer else self.tokenizer + if isinstance(tokenizer, str): + raise NotImplementedError + sample = tokenizer(text, + padding=padding, + truncation=truncation, + max_length=max_length, + return_tensors=return_tensors, + add_special_tokens=add_special_tokens, + **kwargs) # assume tokenizer.padding_side = 'left' + + device = device if device != None else self.device + + sample = dict( + input_ids= sample['input_ids'].to(device), + attention_mask= sample['attention_mask'].to(device) + ) + + return sample + + + + def set_tokenizer(self, tokenizer): + from transformers import AutoTokenizer, AutoModel + from commune.utils.tokenizer import prep_tokenizer + + assert isinstance(tokenizer, str) + self.print(f'setting {tokenizer} tokenizer...') + assert isinstance(tokenizer, str, ) + self.config['tokenizer'] = tokenizer + + try: + # HACK TO INCLUDE LLAMA TOKENIZER + tokenizer = AutoTokenizer.from_pretrained(tokenizer, use_fast= True) + except ValueError: + + print('resorting ot use_fast = False') + tokenizer = AutoTokenizer.from_pretrained(tokenizer, use_fast=False) + + self.tokenizer = tokenizer + self.std_tokenizer = AutoTokenizer.from_pretrained('gpt2', use_fast= True) + self.std_tokenizer = prep_tokenizer(self.std_tokenizer) + self.tokenizer = prep_tokenizer(self.tokenizer, self.std_tokenizer) + + return self.tokenizer + + def to(self, device): + self.device = device + return self.device + + @classmethod + def test(cls, *args, **kwargs): + self = cls(*args,**kwargs) + self.print(self.sample()) + self.shutdown() + + # self.shutdown() diff --git a/modules/data/text/pile/pile.yaml b/modules/data/text/pile/pile.yaml new file mode 100644 index 00000000..25df3a6d --- /dev/null +++ b/modules/data/text/pile/pile.yaml @@ -0,0 +1,7 @@ +url: https://the-eye.eu/public/AI/pile +shards: 29 +batch_size: 32 +seqeunce_length: 256 +tokenizer: gpt2 +url: https://the-eye.eu/public/AI/pile +device: False \ No newline at end of file diff --git a/modules/data/text/realfake/data_text_realfake.py b/modules/data/text/realfake/data_text_realfake.py new file mode 100644 index 00000000..7e8ede5f --- /dev/null +++ b/modules/data/text/realfake/data_text_realfake.py @@ -0,0 +1,118 @@ +import commune as c + +class DataTextRealfake(c.Module): + + prompt = ''' + INPUT (JSON): + ```{sample}``` + QUESTION: + + WAS THE INPUT REAL (1) OR TAMPERED (0)? -> : + + OUTPUT (answer: int): + json``` + ''' + + def __init__(self, **kwargs): + config = self.set_config(kwargs) + self.folder_path = self.resolve_path(config.folder_path) + self.filepaths = sorted([f for f in self.walk(self.folder_path) if f.endswith('.py')]) + + def random_idx(self): + return self.random_int(0, len(self.filepaths)-1) + + + def sample(self, idx=None, + input_chars:int = 500, + output_chars: int = 500, + start_index: int = None, + real_prob:float=0.5, + line_break_prob: float = 0.2, + random_line_ratio: float = 0.2): + + if idx == None: + while True: + idx = self.random_idx() + filepath = self.filepaths[idx] + file_text = c.get_text(filepath) + if len(file_text) >= input_chars + output_chars: + break + else: + idx = None + else: + filepath = self.filepaths[idx] + file_text = c.get_text(filepath) + + if start_index == None: + start_index = c.random_int(0, len(file_text) - input_chars - output_chars ) + + # we need to make sure that the input and output are not the same + input_bounds = [start_index, start_index + input_chars] + output_bounds = [start_index + input_chars, start_index + input_chars + output_chars] + + + # split the filetext and randomly add line breaks + + sample = { + 'input_text': file_text[input_bounds[0]:input_bounds[1]], + 'output_text': file_text[output_bounds[0]:output_bounds[1]], + 'filepath': filepath, + 'idx': idx, + 'start_index': start_index, + 'input_chars': input_chars, + 'output_chars': output_chars, + + } + + # add line breaks in the input text + for k in ['input_text', 'output_text']: + sample[k] = '\n'.join([t + '\n' if c.random_float() > line_break_prob else t for t in sample[k].split('\n')]) + + # do a kick flip + real = c.random_float(0,1) < real_prob + + sample['real'] = int(real) + + # then we need to sample a different file + if sample['real'] == 0 : + other_sample = self.sample( input_chars=input_chars, real_prob = 1, output_chars=output_chars) + sample['output_text'] = other_sample['output_text'] + + return sample + + + def parse_output(self, output:dict)-> dict: + if '0' in output or 'yes' in output.lower(): + return 0 + elif '1' in output or 'no' in output.lower(): + return 1 + else: + raise Exception(f'Invalid output: {output}, expected 0 or 1') + + + def score(self, model='model', w:float=0.0): + + model = c.connect(model, prefix_match=True) + + try: + sample = self.sample() + t = c.time() + prompt = self.prompt.format(sample=sample) + output = model.generate(prompt) + output = self.parse_output(output) + w = 0.2 + except Exception as e: + return {'error': c.detailed_error(e), 'w':w} + + if output == sample['real']: + w = 1 + + msg = { + 'prompt': prompt, + 'latency': c.time() - t, + 'target': sample['real'], + 'prediction': output, + 'w' : w, + } + + return msg diff --git a/modules/data/text/realfake/data_text_realfake.yaml b/modules/data/text/realfake/data_text_realfake.yaml new file mode 100644 index 00000000..a0369c34 --- /dev/null +++ b/modules/data/text/realfake/data_text_realfake.yaml @@ -0,0 +1,3 @@ +folder_path: ./ +suffix: .py +search: model \ No newline at end of file diff --git a/modules/data/text/realfake/docs/data_text_realfake_docs.md b/modules/data/text/realfake/docs/data_text_realfake_docs.md new file mode 100644 index 00000000..287003b3 --- /dev/null +++ b/modules/data/text/realfake/docs/data_text_realfake_docs.md @@ -0,0 +1,142 @@ + +# data.text.realfake + + +The following is a dataset that takes a folder of text, and draws real or fake samples from that folder of text. This means that it will take a folder of text, and draw samples from that folder of text. It will then return a sample of text that is either real or fake. This is useful for training a model to detect real or fake text. This uses a random variable. + +![Alt text](image.png) + + +## Register + +```bash +c data.text.realfake register tag=whadup +``` + +## Serve + +```bash + +c data.text.realfake serve tag=whadup +``` + + + +## Test +```bash + +c data.text.realfake test +``` + +## Sample +```bash + +c call data.text.realfake::whadup sample +``` + + + +# DataTextRealfake Module Documentation + +The `DataTextRealfake` module is a Python class that provides functionality to generate and manipulate text samples from Python source code files. It can be used to create training data for various machine learning tasks, especially those related to detecting real and fake (synthetic) code snippets. In this documentation, we will walk through the different components of the code and explain their functionality. + +## Class Definition: DataTextRealfake + +```python +class DataTextRealfake(c.Module): +``` + +The `DataTextRealfake` class is defined, inheriting from the `c.Module` class (presumably from the `commune` module). This indicates that `DataTextRealfake` extends the functionality provided by the base `c.Module` class. + +## Initialization: `__init__` Method + +```python +def __init__(self, **kwargs): + config = self.set_config(kwargs) + self.folder_path = self.resolve_path(config.folder_path) + self.filepaths = sorted([f for f in self.walk(self.folder_path) if f.endswith('.py')]) +``` + +The `__init__` method is the class constructor that initializes an instance of the `DataTextRealfake` class. It takes keyword arguments `kwargs`, which presumably allow the user to pass additional configuration parameters. + +- The method first uses the `set_config` function to process and set the configuration based on the provided keyword arguments. +- It resolves the folder path specified in the configuration using the `resolve_path` method. +- It retrieves a sorted list of file paths (Python source code files) within the specified folder using the `walk` method. It filters the list to include only files ending with the ".py" extension. + +## Generating Random Index: `random_idx` Method + +```python +def random_idx(self): + return self.random_int(0, len(self.filepaths)-1) +``` + +The `random_idx` method generates a random index within the range of valid indices for the `filepaths` list. It utilizes the `random_int` method from the base class (likely `commune`) to achieve this. + +## Generating a Sample: `sample` Method + +```python +def sample(self, idx=None, input_chars: int = 500, output_chars: int = 500, start_index: int = 0, real_prob: float = 0.5): +``` + +The `sample` method generates a text sample from a randomly selected Python source code file. It takes several optional parameters: + +- `idx`: Index of the file to use as the source for the sample. If not provided, a random index is chosen. +- `input_chars`: The number of characters to use as the input text for the sample. +- `output_chars`: The number of characters to use as the output text for the sample. +- `start_index`: The starting index within the selected file for extracting input and output text. +- `real_prob`: Probability of selecting a real (non-synthetic) sample. + +The method works as follows: + +- If `idx` is not provided, a random index is generated using the `random_idx` method. +- The file path corresponding to the selected index is retrieved. +- The content of the file is read using the `get_text` function from the `commune` module. +- A suitable starting index within the file's content is determined, ensuring that there's enough content for both input and output. +- Input and output text bounds are defined based on the starting index and provided character counts. +- A dictionary called `sample` is populated with information about the selected sample, including input text, output text, and file path. +- The `real` key in the `sample` dictionary is set based on a random probability check, indicating whether the sample is considered real or synthetic. + +If the selected sample is synthetic (not real), a different sample is recursively selected using a probability of 0 for real samples. + +## Generating Test Samples: `test` Method + +```python +def test(self, n=100): +``` + +The `test` method is used to generate and test multiple samples. It takes an optional parameter `n` which specifies the number of samples to generate and test. + +- The method initializes a timer and then iterates over the range of `n`. +- For each iteration, a sample is generated using the `sample` method. +- The `samples_per_second` metric is calculated as the number of iterations divided by the time taken. +- The metric is printed using the `print` function from the base class. + +## Parsing Output: `parse_output` Method + +```python +def parse_output(self, output: dict) -> dict: +``` + +The `parse_output` method is responsible for converting the output of a prediction (presumably obtained from a machine learning model) into binary values (0 or 1) based on certain keywords. It takes a dictionary `output` as input, presumably containing the result of a model prediction. + +- The method checks the content of the output using case-insensitive comparisons to determine whether it contains keywords like '0', '1', 'yes', or 'no'. +- If the keywords are found, the method returns 0 or 1 accordingly. +- If no valid keyword is found, an exception is raised with an error message indicating the invalid output. + +## Scoring a Model: `score` Method + +```python +def score(self, model, w: float = 0.0): +``` + +The `score` method evaluates the performance of a given machine learning `model` using the generated samples. It takes a machine learning model as input, along with an optional weight `w`. + +- The method attempts to generate a sample using the `sample` method. +- It measures the time taken for generating the sample. +- The model is used to generate an output prediction based on the sample. +- The `parse_output` method is used to convert the prediction into binary form (0 or 1). +- The weight `w` is then updated to 0.2 (though the purpose of this is not entirely clear from this code snippet). + +If the prediction matches the ground truth (i.e., the real/fake label of the sample), the weight `w` is set to 1. A dictionary containing various metrics and information about the scoring process is returned. + + diff --git a/modules/data/text/squad.py b/modules/data/text/squad.py new file mode 100644 index 00000000..60fd8355 --- /dev/null +++ b/modules/data/text/squad.py @@ -0,0 +1,22 @@ +import commune as c +from datasets import load_dataset + + +class Squad(c.Module): + def __init__(self, name='squad'): + self.dataset = load_dataset(name) + + def sample(self, idx=4): + return self.dataset['train'][idx] + + @classmethod + def test(cls, *args, module=None, **kwargs): + cls.print('Testing dataset') + dataset = cls(*args, **kwargs) + sample = dataset.sample() + print(sample) + + assert isinstance(sample, dict) + return sample + +Squad.run(__name__) \ No newline at end of file diff --git a/modules/data/text/truthqa/data_text_truthqa.py b/modules/data/text/truthqa/data_text_truthqa.py new file mode 100644 index 00000000..adbabe3a --- /dev/null +++ b/modules/data/text/truthqa/data_text_truthqa.py @@ -0,0 +1,67 @@ +import commune as c +from typing import List + +class DataTextTruthQa(c.Module): + + + def score(self, module:str, fn='sample', kwargs={'idx': 0}): + reference_output = getattr(self.dataset, fn=fn)(**kwargs) + if isinstance(module, str): + output = c.call(module=module,fn=fn, **kwargs) + else: + output = getattr(module,fn)(**kwargs) + if isinstance(output, dict): + for key in reference_output.keys(): + if key not in output or output[key] != reference_output[key]: + w = 0 + w = 1 + else: + if output == reference_output: + w = 1 + return {'w': w, 'output': output, 'reference_output': reference_output} + + def __init__(self, **kwargs): + config = self.set_config(config=kwargs) + self.dataset = c.module('data.hf')(**config) + def sample(self, **kwargs): + sample = self.dataset.sample(**kwargs) + assert isinstance(sample, dict) + sample = { + 'question': sample['question'], + 'choices': c.shuffle(sample['incorrect_answers'] + sample['correct_answers']), + 'answers': sample['correct_answers'] + } + + sample['answers'] = [i for i, choice in enumerate(sample['choices']) if choice in sample['answers']] + return sample + + def validate_replicas(self, replicas:List[str], fn='sample', kwargs={'idx': 0}): + scores = {replica:self.validate(module=replica, fn=fn, kwargs=kwargs) for replica in replicas} + return scores + + def test(self, n=100): + t = c.time() + modules_per_second = 0 + for i in range(n): + sample = self.sample() + assert isinstance(sample, dict), f'sample is not a dict: {sample}' + for key in ['question', 'choices', 'answers']: + assert key in sample, f'{key} not in sample' + c.print(i) + t_passed = c.time() - t + modules_per_second = i / t_passed + stats = { + 'modules_per_second': modules_per_second, + 'latency': t_passed, + 'n': n, + } + + return {'success': True, 'msg': 'DataTextTruthQa test passed', 'stats': stats } + + + + + + + + diff --git a/modules/data/text/truthqa/data_text_truthqa.yaml b/modules/data/text/truthqa/data_text_truthqa.yaml new file mode 100644 index 00000000..52dbdf9c --- /dev/null +++ b/modules/data/text/truthqa/data_text_truthqa.yaml @@ -0,0 +1,2 @@ +path: truthqa +name: generation \ No newline at end of file diff --git a/modules/docker/docker.py b/modules/docker/docker.py new file mode 100644 index 00000000..74b355d1 --- /dev/null +++ b/modules/docker/docker.py @@ -0,0 +1,451 @@ + +import os +import pandas as pd +from typing import List, Dict, Union +import commune as c + +class Docker(c.Module): + + @classmethod + def dockerfile(cls, path = c.repo_path): + path = [f for f in c.ls(path) if f.endswith('Dockerfile')][0] + return c.get_text(path) + + @classmethod + def resolve_repo_path(cls, path): + if path is None: + path = c.repo_path + else: + if not path.startswith('/') or not path.startswith('~') or not path.startswith('.'): + path = c.repo_path + '/' + path + else: + path = os.path.abspath(path) + return path + + @classmethod + def resolve_docker_compose_path(cls,path = None): + path = cls.resolve_repo_path(path) + return [f for f in c.ls(path) if 'docker-compose' in os.path.basename(f)][0] + + @classmethod + def docker_compose(cls, path = c.repo_path): + docker_compose_path = cls.resolve_docker_compose_path(path) + return c.load_yanl(docker_compose_path) + + @classmethod + def resolve_docker_path(cls, path = None): + path = cls.resolve_repo_path(path) + return [f for f in c.ls(path) if 'Dockerfile' in os.path.basename(f)][0] + + @classmethod + def build(cls, path = None , tag = None , sudo=False, verbose=True, no_cache=False, env={}): + path = c.resolve_path(path) + + if tag is None: + tag = path.split('/')[-2] + + cmd = f'docker build -t {tag} .' + if no_cache: + cmd += ' --no-cache' + return c.cmd(cmd, sudo=sudo, env=env,cwd=os.path.dirname(path), verbose=verbose) + @classmethod + def kill(cls, name, sudo=False, verbose=True, prune=False): + c.cmd(f'docker kill {name}', sudo=sudo, verbose=verbose) + c.cmd(f'docker rm {name}', sudo=sudo, verbose=verbose) + if prune: + c.cmd('docker container prune', sudo=sudo, verbose=verbose) + return {'status': 'killed', 'name': name} + + @classmethod + def kill_many(cls, name, sudo=False, verbose=True): + servers = cls.ps(name) + for server in servers: + cls.kill(server, sudo=sudo, verbose=verbose) + c.print(f'killed {server}', verbose=verbose) + return {'status': 'killed', 'name': name} + + @classmethod + def kill_all(cls, sudo=False, verbose=True): + servers = cls.ps() + for server in servers: + cls.kill(server, sudo=sudo, verbose=verbose) + c.print(f'killed {server}', verbose=verbose) + return {'status': 'killed'} + @classmethod + def rm(cls, name, sudo=False, verbose=True): + c.cmd(f'docker rm {name}', sudo=sudo, verbose=verbose) + return {'status': 'removed', 'name': name} + + @classmethod + def exists(cls, name:str): + return name in cls.ps() + + @classmethod + def rm_sudo(cls, sudo:bool=True, verbose:bool=True): + ''' + To remove the requirement for sudo when using Docker, you can configure Docker to run without superuser privileges. Here's how you can do it: + Create a Docker group (if it doesn't exist) and add your user to that group: + bash + Copy code + sudo groupadd docker + sudo usermod -aG docker $USER + return c.cmd(f'docker rm -f {name}', sudo=True) + ''' + c.cmd(f'groupadd docker', sudo=sudo, verbose=verbose) + c.cmd(f'usermod -aG docker $USER', sudo=sudo, verbose=verbose) + c.cmd(f'chmod 666 /var/run/docker.sock', sudo=sudo, verbose=verbose) + + + + + + @classmethod + def containers(cls, sudo:bool = False): + return [container['name'] for container in cls.ps(sudo=sudo)] + + @classmethod + def chmod_scripts(cls): + c.cmd(f'bash -c "chmod +x {c.libpath}/scripts/*"', verbose=True) + + + + def install_gpus(self): + self.chmod_scripts() + c.cmd('./scripts/nvidia_docker_setup.sh', cwd=c.libpath, verbose=True,bash=True) + + def install(self): + self.chmod_scripts() + c.cmd('./scripts/install_docker.sh', cwd=c.libpath, verbose=True,bash=True) + + + @classmethod + def install_docker_compose(cls, sudo=False): + return c.cmd('apt install docker-compose', verbose=True, sudo=True) + # def build_commune(self, sudo=False): + # self.build(path=self.libpath, sudo=sudo) + + @classmethod + def images(cls, to_records=True): + text = c.cmd('docker images', verbose=False) + df = [] + cols = [] + for i, l in enumerate(text.split('\n')): + if len(l) > 0: + if i == 0: + cols = [_.strip().replace(' ', '_').lower() for _ in l.split(' ') if len(_) > 0] + else: + df.append([_.strip() for _ in l.split(' ') if len(_) > 0]) + df = pd.DataFrame(df, columns=cols) + if to_records: + return df.to_records() + return df + + def rm_image(self, image_id): + response = {'success': False, 'image_id': image_id} + c.cmd(f'docker image rm -f {image_id}', verbose=True) + response['success'] = True + return response + + def rm_images(self, search:List[str]=None): + image_records = self.images(to_records=False) + responses = [] + for i, image_record in image_records.iterrows(): + image_dict = image_record.to_dict() + + if search == None or str(search.lower()) in image_dict['repository']: + r = self.rm_image(image_dict['image_id']) + responses.append(r) + + return {'success': True, 'responses': responses } + + + @classmethod + def image2id(cls, image=None): + image2id = {} + df = cls.images() + for i in range(len(df)): + image2id[df['REPOSITORY'][i]] = df['IMAGE_ID'][i] + if image != None: + id = image2id[image] + return id + + @classmethod + def deploy(cls, + image : str, + cmd : str = 'ls', + volumes:List[str] = None, + name: str = None, + gpus:list=False, + shm_size : str='100g', + sudo:bool = False, + build:bool = True, + ports:Dict[str, int]=None, + net : str = 'host', + daemon:bool = True, + run: bool = True): + + ''' + Arguments: + + ''' + if name is None: + name = image + + docker_cmd = f'docker run' + + docker_cmd += f' --net {net} ' + + if build: + cls.build(image, tag=name) + + if daemon: + docker_cmd += ' -d ' + + if isinstance(gpus, list): + gpus = ','.join(map(str, gpus)) + docker_cmd += f' --gpus \'"device={gpus}"\'' + elif isinstance(gpus, str): + docker_cmd += f' --gpus "{gpus}"' + else: + pass + + # ADD THE SHM SIZE + # what is this? + if shm_size != None: + docker_cmd += f' --shm-size {shm_size}' + + if ports != None: + for external_port, internal_port in ports.items(): + docker_cmd += f' -p {external_port}:{internal_port}' + + # ADD THE VOLUMES + if volumes is not None: + if isinstance(volumes, str): + volumes = [volumes] + if isinstance(volumes, list): + docker_cmd += ' '.join([f' -v {v}' for v in volumes]) + elif isinstance(volumes, dict): + for v_from, v_to in volumes.items(): + docker_cmd += f' -v {v_from}:{v_to}' + + docker_cmd += f' --name {name} {image}' + + if cmd is not None: + docker_cmd += f' bash -c "{cmd}"' + + c.print(docker_cmd) + # text_output = c.cmd(docker_cmd, sudo=sudo, output_text=True) + + # if 'Conflict. The container name' in text_output: + # contianer_id = text_output.split('by container "')[-1].split('". You')[0].strip() + # c.cmd(f'docker rm -f {contianer_id}', verbose=True) + # text_output = c.cmd(docker_cmd, verbose=True) + # self.update() + + @classmethod + def psdf(cls, load=True, save=False, idx_key ='container_id'): + output_text = c.cmd('docker ps', verbose=False) + + rows = [] + for i, row in enumerate(output_text.split('\n')[:-1]): + if i == 0: + columns = [l.lower().strip().replace(' ', '_') for l in row.split(' ') if len(l) > 0] + else: + NA_SPACE = " " + if len(row.split(NA_SPACE)) > 1: + row_splits = row.split(NA_SPACE) + row = row_splits[0] + ' NA ' + ' '.join(row_splits[1:]) + row = [_.strip() for _ in row.split(' ') if len(_) > 0] + if len(row) == len(columns): + rows.append(row) + else: + c.print(rows) + + df = pd.DataFrame(rows, columns=columns) + df.set_index(idx_key, inplace=True) + return df + + @classmethod + def ps(cls, search = None, df:bool = False): + + psdf = cls.psdf() + paths = psdf['names'].tolist() + if search != None: + paths = [p for p in paths if p != None and search in p] + if df: + return psdf + paths = sorted(paths) + return paths + + @classmethod + def name2dockerfile(cls, path = None): + return {l.split('/')[-2] if len(l.split('/'))>1 else c.lib:l for l in cls.dockerfiles(path)} + + @classmethod + def resolve_dockerfile(cls, name): + if name == None: + name = 'commune' + + if c.exists(name): + return name + name2dockerfile = cls.name2dockerfile() + if name in name2dockerfile: + return name2dockerfile[name] + else: + raise ValueError(f'Could not find docker file for {name}') + + get_dockerfile = resolve_dockerfile + + @classmethod + def compose_paths(cls, path = None): + if path is None: + path = c.libpath + '/' + return [l for l in c.walk(path) if l.endswith('docker-compose.yaml') or l.endswith('docker-compose.yml')] + + @classmethod + def name2compose(cls, path=None): + compose_paths = cls.compose_paths(path) + return {l.split('/')[-2] if len(l.split('/'))>1 else c.lib:l for l in compose_paths} + + @classmethod + def get_compose_path(cls, path:str): + path = cls.name2compose().get(path, path) + return path + + @classmethod + def get_compose(cls, path:str): + path = cls.get_compose_path(path) + return c.load_yaml(path) + + @classmethod + def put_compose(cls, path:str, compose:dict): + path = cls.get_compose_path(path) + return c.save_yaml(path, compose) + + + # @classmethod + # def down(cls, path='frontend'): + # path = cls.get_compose_path(path) + # return c.cmd('docker-compose -f {path} down', verbose=True) + + @classmethod + def compose(cls, + path: str, + compose: Union[str, dict, None] = None, + daemon:bool = True, + verbose:bool = True, + dash:bool = True, + cmd : str = None, + build: bool = False, + project_name: str = None, + cwd : str = None, + down: bool = False + ): + + + cmd = f'docker-compose' if dash else f'docker compose' + + path = cls.get_compose_path(path) + if compose == None: + compose = cls.get_compose(path) + + if isinstance(path, str): + compose = cls.get_compose(path) + + + if project_name != None: + cmd += f' --project-name {project_name}' + c.print(f'path: {path}', verbose=verbose) + tmp_path = path + '.tmp' + cmd += f' -f {tmp_path} up' + + if daemon: + cmd += ' -d' + + + c.print(f'cmd: {cmd}', verbose=verbose) + # save the config to the compose path + c.print(compose) + c.save_yaml(tmp_path, compose) + if cwd is None: + assert os.path.exists(path), f'path {path} does not exist' + cwd = os.path.dirname(path) + if build: + c.cmd(f'docker-compose -f {tmp_path} build', verbose=True, cwd=cwd) + + text_output = c.cmd(cmd, verbose=True) + + if 'Conflict. The container name' in text_output: + contianer_id = text_output.split('by container "')[-1].split('". You')[0].strip() + c.cmd(f'docker rm -f {contianer_id}', verbose=True) + text_output = c.cmd(cmd, verbose=True) + + if "unknown shorthand flag: 'f' in -f" in text_output: + cmd = cmd.replace('docker compose', 'docker-compose') + text_output = c.cmd(cmd, verbose=True) + + c.rm(tmp_path) + @classmethod + def rm_container(self, name): + c.cmd(f'docker rm -f {name}', verbose=True) + + @classmethod + def logs(cls, name, sudo=False, follow=False, verbose=False, tail:int=2): + cmd = f'docker logs {name} {"-f" if follow else ""} --tail {tail}' + return c.cmd(cmd, verbose=verbose) + + def log_map(self, search=None): + nodes = self.ps(search=search) + return {name: self.logs(name) for name in nodes} + + @classmethod + def tag(cls, image:str, tag:str): + c.cmd(f'docker tag {image} {tag}', verbose=True) + c.cmd(f'docker push {tag}', verbose=True) + @classmethod + def login(self, username:str, password:str): + c.cmd(f'docker login -u {username} -p {password}', verbose=True) + + @classmethod + def logout(self, image:str): + c.cmd(f'docker logout {image}', verbose=True) + + @classmethod + def dockerfiles(cls, path = None): + if path is None: + path = c.libpath + '/' + dockerfiles = [] + for l in c.walk(path): + if l.endswith('Dockerfile'): + c.print(l) + dockerfiles.append(l) + return dockerfiles + + + def name2dockerfile(self, path = None): + if path is None: + path = self.libpath + '/' + return {l.split('/')[-2] if len(l.split('/'))>1 else c.lib:l for l in self.dockerfiles(path)} + + + @classmethod + def dashboard(cls): + self = cls() + import streamlit as st + containers = self.psdf() + name2dockerfile = self.name2dockerfile() + names = list(name2dockerfile.keys()) + name = st.selectbox('Dockerfile', names) + dockerfile = name2dockerfile[name] + dockerfile_text = c.get_text(dockerfile) + st.code(dockerfile_text) + + + def prune(self): + return c.cmd('docker container prune') + + + def start_docker(self): + return c.cmd('systemctl start docker') + +Docker.run(__name__) \ No newline at end of file diff --git a/modules/emoji/emoji.py b/modules/emoji/emoji.py new file mode 100644 index 00000000..a2b61ae5 --- /dev/null +++ b/modules/emoji/emoji.py @@ -0,0 +1,100 @@ +import commune as c + +class Emoji(c.Module): + @classmethod + def emoji(cls, name:str): + emojis = [] + for k,v in cls.emojis.items(): + if name in k: + emojis += [v] + + return c.choice(emojis) + + emojis = {'dank': '🔥', + 'error': '💥', + 'white': '🕊️', + 'cool': '😎', + 'success': '✨', + 'sad': '😢', + 'time': '🕒', + 'count': '🔢', + 'output': '📤', + 'input': '📥', + 'party': '🥳', + 'fireworks': '🎆', + 'explosion': '💣', + 'alien': '👽', + 'rocket': '🚀', + 'money': '💰', + 'victory': '✌️', + 'unicorn': '🦄', + 'rainbow': '🌈', + 'music': '🎵', + 'pizza': '🍕', + 'taco': '🌮', + 'sunglasses': '😎', + 'flame': '🔥', + 'diamond': '💎', + 'savage': '😈', + 'laughing': '😂', + 'ninja': '🥷', + 'skull': '💀', + 'thumbs_up': '👍', + 'thumbs_down': '👎', + 'crown': '👑', + 'cyber_eye': '👁️‍🗨️', + 'data_stream': '🌐', + 'brain': '🧠', + 'robot': '🤖', + 'lightning': '⚡', + 'heart': '❤️', + 'heartbreak': '💔', + 'heartpulse': '💗', + 'green_heart': '💚', + 'blue_heart': '💙', + 'purple_heart': '💜', + 'yellow_heart': '💛', + 'orange_heart': '🧡', + 'error': '💥', + 'cross': '❌', + 'check': '✅', + 'wrong': '❌', + 'right': '✅', + 'correct': '✅', + 'incorrect': '❌', + 'checkmark': '✅', + 'check_mark': '✅', + 'checkered_flag': '🏁', + 'warning': '⚠️', + 'warning_sign': f'⚠️', + 'question': '❓', + 'happy': '😀', + 'sad': '😢', + 'angry': '😠', + 'angry_face': '😠', + 'angry_face_with_horns': '👿', + 'devil': '😈', + 'red_circle': '🔴', + 'green_circle': '🟢', + 'blue_circle': '🔵', + 'yellow_circle': '🟡', + 'orange_circle': '🟠', + 'purple_circle': '🟣', + 'black_circle': '⚫', + 'white_circle': '⚪', + 'brown_circle': '🟤', + 'red_square': '🟥', + 'green_square': '🟩', + 'blue_square': '🟦', + 'yellow_square': '🟨', + 'orange_square': '🟧', + 'purple_square': '🟪', + 'black_square': '⬛', + 'white_square': '⬜', + 'brown_square': '🟫', + 'satellite': '🛰️', + 'satellite_antenna': '📡', + 'computer': '💻', + + } + diff --git a/modules/evm/chain/chain.py b/modules/evm/chain/chain.py new file mode 100644 index 00000000..f103df2a --- /dev/null +++ b/modules/evm/chain/chain.py @@ -0,0 +1,10 @@ +import commune as c + + +class Chain(c.Module): + + def start(self): + return c.cmd('docker-compose -f ' + self.docker_compose_path() + ' up -d') + + def docker_compose_path(self): + return self.dirpath() + '/docker-compose.yaml' \ No newline at end of file diff --git a/modules/evm/chain/docker-compose.yaml b/modules/evm/chain/docker-compose.yaml new file mode 100644 index 00000000..dc4a4842 --- /dev/null +++ b/modules/evm/chain/docker-compose.yaml @@ -0,0 +1,17 @@ +# Copyright 2020 ChainSafe Systems +# SPDX-License-Identifier: LGPL-3.0-only + +version: '3' +services: + geth1: + image: "chainsafe/chainbridge-geth:20200505131100-5586a65" + container_name: geth1 + ports: + - "8545:8545" + + sub-chain: + image: "chainsafe/chainbridge-substrate-chain:v1.3.0" + container_name: sub-chain + command: chainbridge-substrate-chain --dev --alice --ws-external --rpc-external + ports: + - "9944:9944" \ No newline at end of file diff --git a/modules/evm/contract.py b/modules/evm/contract.py new file mode 100644 index 00000000..b2bf95fc --- /dev/null +++ b/modules/evm/contract.py @@ -0,0 +1,362 @@ + + +import os +import sys +from copy import deepcopy +import asyncio +import commune as c +from glob import glob +from typing import Dict, List, Union, Any, Optional, Tuple, Callable, TypeVar, Type, cast + +class EVM(c.Module): + + base_dir = os.path + contracts_dir_path = base_dir + '/artifacts/' + interfaces_path = f'{os.environ["PWD"]}/interfaces/' + + def __init__(self, + + network: 'c.evm.network' = 'local.main', + account: 'c.evm.account' = None, + ): + + self.set_network(network) + self.set_account(account) + + @property + def address(self): + return self.contract.address + def accounts(self): + return self.account.accounts + + def call(self, function, args=[]): + if len(args) == 0: + args.append({'from': self.account}) + output = getattr(self.contract, function)(*args) + return self.parseOutput(function=function, outputs=output) + + def parseOutput(self, function, outputs): + output_abi_list = self.function_abi_map[function]['outputs'] + + parsedOutputs = {} + for i,output_abi in enumerate(output_abi_list) : + output_key = i + if output_abi['name']: + output_key = output_abi['name'] + + parsedOutputs[output_key] = outputs[i] + if 'components' in output_abi: + component_names = [c['name'] for c in output_abi['components']] + + parseStruct = lambda o: dict(zip(component_names, deepcopy(o))) + if type(outputs[i]) in [list, tuple, set]: + parsedOutputs[output_key] = list(map(parseStruct, outputs[i])) + else: + parsedOutputs[output_key] = parseStruct(outputs[i]) + + return parsedOutputs + + @property + def contract_paths(self): + return list(filter(lambda f: f.endswith('.sol'),self.glob(self.contracts_dir_path+'**'))) + + @property + def contracts(self): + contracts = [] + for path in self.contract_paths: + contracts += [os.path.basename(path).replace('.sol', '')] + return contracts + @property + def contract2path(self): + return dict(zip(self.contracts, self.contract_paths)) + + def get_artifact(self, path): + artifact_path = self.artifact2path[path] + artifact = self.get_json(artifact_path) + return artifact + + def get_abi(self,path): + return self.get_artifact(path)['abi'] + + @property + def interface_paths(self): + return list(filter(lambda f: f.endswith('.sol'),self.glob(self.interfaces_path+'**'))) + + @property + def interfaces(self): + interfaces = [] + for path in self.interface_paths: + interfaces += [os.path.splitext(path)[0].replace('/', '.')] + return interfaces + @property + def interface2path(self): + return dict(zip(self.interfaces, self.interface_paths)) + + @property + def artifact_paths(self): + full_path_list = list(filter(lambda f:f.endswith('.json') and not f.endswith('dbg.json') and os.path.dirname(f).endswith('.sol'), + self.glob(f'{self.artifacts_dir_path}/**'))) + + return full_path_list + + @property + def artifacts(self): + return list(self.artifact2path.keys()) + + + @property + def artifact2path(self): + artifact2path = {} + for path in self.artifact_paths: + key = os.path.basename(os.path.dirname(path)).replace('.sol','') + artifact2path[key] = path + return artifact2path + + def connected(self): + ''' + If the web3 instance is connected to a network + ''' + return bool( self.web3.__class__.__name__ == 'Web3') + + def disconnected(self): + return not self.connected() + + def set_network(self, network): + self.network = c.module('evm.network')(**network) + self.web3 = self.network.web3 + + connect_network = set_network + + def compile(self): + # compile smart contracts in compile + return self.run_command('npx hardhat compile') + + @property + def available_networks(self): + return self.network.available_networks + + @property + def network_name(self): + return self.network.network + + @property + def interfaces(self): + interfaces = list(filter(lambda f: f.startswith('interfaces'), self.artifacts)) + return list(map(lambda f:os.path.dirname(f.replace('interfaces/', '')), interfaces)) + + + def resolve_account(self, account): + if account == None: + account = self.account + return account + + def set_account(self, account:str): + self.account = c.module('evm.account')(path=account) + + + + def get_contract_address(self, contract, version=-1): + return self.contract2addresses.get(self.network_name, {}).get(contract,[None])[version] + + def deploy_contract(self, contract , args, new=False, refresh=False, **kwargs): + + simple_contract_path = contract + contract_path = self.resolve_contract_path(simple_contract_path) + contract_address = self.get_contract_address(contract) + + network = self.resolve_network(kwargs.get('network')) + web3 = self.resolve_web3(kwargs.get('web3')) + account = self.resolve_account(kwargs.get('account')) + + if contract_address == None or new == True: + + assert contract in self.contracts + contract_artifact = self.get_artifact(contract) + contract_class = web3.eth.contract(abi=contract_artifact['abi'], + bytecode= contract_artifact['bytecode'],) + + nonce = web3.eth.get_transaction_count(account.address) + construct_txn = contract_class.constructor(*args).buildTransaction( + { + 'from': account.address, + 'gasPrice':web3.eth.generate_gas_price(), + 'nonce': nonce + } + ) + + # sign the transaction + signed_tx = account.sign_tx(construct_txn) + tx_hash = web3.eth.send_raw_transaction(signed_tx) + + + tx_receipt = web3.eth.wait_for_transaction_receipt(tx_hash) + + contract_address = tx_receipt.contractAddress + + self.register_contract(contract_path=simple_contract_path, network=network.network, contract_address=contract_address, refresh=refresh) + + # ensure the contract exists + assert self.contract_exists(contract_address) + return self.get_contract(contract_address) + @property + def registered_contracts(self): + return self.get_json('registered_contracts', {}) + + @property + def contract2addresses(self): + return self.registered_contracts + + def set_contract(self,contract=None, address=None, web3=None, account=None, version=-1): + if isinstance(contract, str) or isinstance(address, str): + contract = self.get_contract(contract=contract, address=address , web3=web3, account=account, version=-1) + elif type(contract).__name__ in ['f']: + return + elif contract == None: + pass + else: + raise NotImplementedError + + self.contract = contract + return self.contract + + def contract_exists(self, contract=''): + is_address = isinstance(self.address2contract.get(contract), str) + is_contract = isinstance(self.contract2address.get(contract), str) + return bool(is_address or is_contract) + + def get_contract(self,contract=None , web3=None, account:'Account'=None, version=-1, virtual=True): + web3 = self.resolve_web3(web3) + account = self.resolve_account(account) + + # assume theres an address + address = contract + contract_path = self.address2contract.get(address) + if isinstance(contract_path, str): + contract_path , contract_version = contract_path.split('-v') + contract_version = int(contract_version) + contract_address = address + + else: + contract_path = contract + contract_version_addresses = self.get_contract_address(contract, version) + if len(contract_version_addresses) > 0: + contract_address = contract_version_addresses[version] + else: + raise NotImplemented(contract_address) + + contract_artifact = self.get_artifact(contract_path) + contract = web3.eth.contract(address=contract_address, abi=contract_artifact['abi']) + + if virtual: + from c.web3.evm.contract.virtual_contract import VirtualContract + contract = VirtualContract(contract=contract, account = self.account) + + return contract + + + @property + def address2contract(self): + registered_contracts = self.registered_contracts + address2contract = {} + for network, contract_path_map in registered_contracts.items(): + for contract_path, contract_address_list in contract_path_map.items(): + for i, contract_address in enumerate(contract_address_list): + address2contract[contract_address] = contract_path+f'-v{i}' + + return address2contract + + + @property + def contract2address(self): + return {v:k for k,v in self.address2contract.items()} + + def deployed_contracts(self): + return list(self.contract2address.keys()) + def deployed_addresses(self): + return list(self.contract2address.values()) + + @property + def address2network(self): + registered_contracts = self.registered_contracts + address2network = {} + for network, contract_path_map in registered_contracts.items(): + for contract_path, contract_address_list in contract_path_map.items(): + for contract_address in contract_address_list: + address2network[contract_address] = network + + return address2network + + @property + def network2address(self): + network2address = {} + for address, network in self.address2network.items(): + if network in network2address: + network2address[network].append(address) + else: + network2address[network] = [address] + return network2address + + + @property + def network2contract(self) -> Dict[str, List[str]]: + network2contract = {} + for network, address_list in self.network2address.items(): + network2contract[network] = [address2contract[address] for address in address_list] + return network2contract + + + def contract2network(self) -> Dict[str, str]: + address2contract = self.address2contract + contract2network ={} + for address, network in self.address2network.items(): + contract2network[address2contract[address]] = network + + return contract2network + + def register_contract(self, network:str, + contract_path:str , + contract_address:str, + refresh=True): + ''' + Register a contract + ''' + + + registered_contracts = {} if refresh else self.registered_contracts + if network not in registered_contracts: + registered_contracts[network] = {} + if contract_path not in registered_contracts[network]: + registered_contracts[network][contract_path] = [] + + assert isinstance(registered_contracts[network][contract_path], list) + registered_contracts[network][contract_path].append(contract_address) + + self.put_json('registered_contracts', registered_contracts) + + return registered_contracts + + def resolve_network(self, network): + if network == None: + network = self.network + return network + + + def resolve_contract_path(self, path): + contract_path = self.contract2path.get(path) + + return contract_path + + + @classmethod + def streamlit(cls): + import streamlit as st + c.new_event_loop() + st.write("## "+cls.__name__) + self = cls() + # print(self.artifacts) + contract = self.deploy_contract(contract='CommunalCluster',new=True, args=['BRO', 'BROCOIN']) + # print(contract) + print(contract.balanceOf(self.account.address)) + + + \ No newline at end of file diff --git a/modules/evm/evm.py b/modules/evm/evm.py new file mode 100644 index 00000000..67dc5d02 --- /dev/null +++ b/modules/evm/evm.py @@ -0,0 +1,355 @@ + + +import os +import sys +from copy import deepcopy +import asyncio +import commune as c +from glob import glob +from typing import Dict, List, Union, Any, Optional, Tuple, Callable, TypeVar, Type, cast +class EVM(c.Module): + + base_dir = f'{c.repo_path}/contracts/evm' + contracts_dir_path = base_dirir_path = base_dir + '/artifacts/' + def __init__(self, + config:dict=None, + contract:'c.evm.contract' =None, + network: 'c.evm.network'= None, + account: 'c.evm.account'=None): + + self.set_network(network) + self.set_account(account) + + @property + def address(self): + return self.contract.address + + def accounts(self): + return self.account.accounts + + def call(self, function, args=[]): + if len(args) == 0: + args.append({'from': self.account}) + output = getattr(self.contract, function)(*args) + return self.parseOutput(function=function, outputs=output) + + def parseOutput(self, function, outputs): + output_abi_list = self.function_abi_map[function]['outputs'] + + parsedOutputs = {} + for i,output_abi in enumerate(output_abi_list) : + output_key = i + if output_abi['name']: + output_key = output_abi['name'] + + parsedOutputs[output_key] = outputs[i] + if 'components' in output_abi: + component_names = [c['name'] for c in output_abi['components']] + + parseStruct = lambda o: dict(zip(component_names, deepcopy(o))) + if type(outputs[i]) in [list, tuple, set]: + parsedOutputs[output_key] = list(map(parseStruct, outputs[i])) + else: + parsedOutputs[output_key] = parseStruct(outputs[i]) + + return parsedOutputs + + + @property + def contract_paths(self): + return list(filter(lambda f: f.endswith('.sol'),self.glob(self.contracts_dir_path+'**'))) + + @property + def contracts(self): + contracts = [] + for path in self.contract_paths: + contracts += [os.path.basename(path).replace('.sol', '')] + return contracts + @property + def contract2path(self): + return dict(zip(self.contracts, self.contract_paths)) + + def get_artifact(self, path): + artifact_path = self.artifact2path[path] + artifact = self.get_json(artifact_path) + return artifact + + def get_abi(self,path): + return self.get_artifact(path)['abi'] + interfaces_path = f'{os.environ["PWD"]}/interfaces/' + @property + def interface_paths(self): + return list(filter(lambda f: f.endswith('.sol'),self.glob(self.interfaces_path+'**'))) + + @property + def interfaces(self): + interfaces = [] + for path in self.interface_paths: + interfaces += [os.path.splitext(path)[0].replace('/', '.')] + return interfaces + @property + def interface2path(self): + return dict(zip(self.interfaces, self.interface_paths)) + + @property + def artifact_paths(self): + full_path_list = list(filter(lambda f:f.endswith('.json') and not f.endswith('dbg.json') and os.path.dirname(f).endswith('.sol'), + self.glob(f'{self.artifacts_dir_path}/**'))) + + return full_path_list + + @property + def artifacts(self): + return list(self.artifact2path.keys()) + + + @property + def artifact2path(self): + artifact2path = {} + for path in self.artifact_paths: + key = os.path.basename(os.path.dirname(path)).replace('.sol','') + artifact2path[key] = path + return artifact2path + + def connected(self): + ''' + If the web3 instance is connected to a network + ''' + return bool( self.web3.__class__.__name__ == 'Web3') + + def disconnected(self): + return not self.connected() + + def set_network(self, network): + self.network = c.module('evm.network')(**network) + self.web3 = self.network.web3 + + connect_network = set_network + + def compile(self): + # compile smart contracts in compile + return self.run_command('npx hardhat compile') + + @property + def available_networks(self): + return self.network.available_networks + + @property + def network_name(self): + return self.network.network + + @property + def interfaces(self): + interfaces = list(filter(lambda f: f.startswith('interfaces'), self.artifacts)) + return list(map(lambda f:os.path.dirname(f.replace('interfaces/', '')), interfaces)) + + + def resolve_account(self, account): + if account == None: + account = self.account + return account + + def set_account(self, account:str): + self.account = c.module('evm.account')(path=account) + + + def get_contract_address(self, contract, version=-1): + return self.contract2addresses.get(self.network_name, {}).get(contract,[None])[version] + + def deploy_contract(self, contract , args, new=False, refresh=False, **kwargs): + + simple_contract_path = contract + contract_path = self.resolve_contract_path(simple_contract_path) + contract_address = self.get_contract_address(contract) + + network = self.resolve_network(kwargs.get('network')) + web3 = self.resolve_web3(kwargs.get('web3')) + account = self.resolve_account(kwargs.get('account')) + + if contract_address == None or new == True: + + assert contract in self.contracts + contract_artifact = self.get_artifact(contract) + contract_class = web3.eth.contract(abi=contract_artifact['abi'], + bytecode= contract_artifact['bytecode'],) + + nonce = web3.eth.get_transaction_count(account.address) + construct_txn = contract_class.constructor(*args).buildTransaction( + { + 'from': account.address, + 'gasPrice':web3.eth.generate_gas_price(), + 'nonce': nonce + } + ) + + # sign the transaction + signed_tx = account.sign_tx(construct_txn) + tx_hash = web3.eth.send_raw_transaction(signed_tx) + + + tx_receipt = web3.eth.wait_for_transaction_receipt(tx_hash) + + contract_address = tx_receipt.contractAddress + + self.register_contract(contract_path=simple_contract_path, network=network.network, contract_address=contract_address, refresh=refresh) + + # ensure the contract exists + assert self.contract_exists(contract_address) + return self.get_contract(contract_address) + @property + def registered_contracts(self): + return self.get_json('registered_contracts', {}) + + @property + def contract2addresses(self): + return self.registered_contracts + + def set_contract(self,contract=None, address=None, web3=None, account=None, version=-1): + if isinstance(contract, str) or isinstance(address, str): + contract = self.get_contract(contract=contract, address=address , web3=web3, account=account, version=-1) + elif type(contract).__name__ in ['f']: + return + elif contract == None: + pass + else: + raise NotImplementedError + + self.contract = contract + return self.contract + + def contract_exists(self, contract=''): + is_address = isinstance(self.address2contract.get(contract), str) + is_contract = isinstance(self.contract2address.get(contract), str) + return bool(is_address or is_contract) + + def get_contract(self,contract=None , web3=None, account:str=None, version=-1, virtual=True): + web3 = self.resolve_web3(web3) + account = self.resolve_account(account) + + # assume theres an address + address = contract + contract_path = self.address2contract.get(address) + if isinstance(contract_path, str): + contract_path , contract_version = contract_path.split('-v') + contract_version = int(contract_version) + contract_address = address + + else: + contract_path = contract + contract_version_addresses = self.get_contract_address(contract, version) + if len(contract_version_addresses) > 0: + contract_address = contract_version_addresses[version] + else: + raise NotImplemented(contract_address) + + contract_artifact = self.get_artifact(contract_path) + contract = web3.eth.contract(address=contract_address, abi=contract_artifact['abi']) + + if virtual: + from c.web3.evm.contract.virtual_contract import VirtualContract + contract = VirtualContract(contract=contract, account = self.account) + + return contract + + + @property + def address2contract(self): + registered_contracts = self.registered_contracts + address2contract = {} + for network, contract_path_map in registered_contracts.items(): + for contract_path, contract_address_list in contract_path_map.items(): + for i, contract_address in enumerate(contract_address_list): + address2contract[contract_address] = contract_path+f'-v{i}' + + return address2contract + + + @property + def contract2address(self): + return {v:k for k,v in self.address2contract.items()} + + def deployed_contracts(self): + return list(self.contract2address.keys()) + def deployed_addresses(self): + return list(self.contract2address.values()) + + @property + def address2network(self): + registered_contracts = self.registered_contracts + address2network = {} + for network, contract_path_map in registered_contracts.items(): + for contract_path, contract_address_list in contract_path_map.items(): + for contract_address in contract_address_list: + address2network[contract_address] = network + + return address2network + + @property + def network2address(self): + network2address = {} + for address, network in self.address2network.items(): + if network in network2address: + network2address[network].append(address) + else: + network2address[network] = [address] + return network2address + + + @property + def network2contract(self) -> Dict[str, List[str]]: + network2contract = {} + for network, address_list in self.network2address.items(): + network2contract[network] = [self.address2contract[address] for address in address_list] + return network2contract + + + def contract2network(self) -> Dict[str, str]: + address2contract = self.address2contract + contract2network ={} + for address, network in self.address2network.items(): + contract2network[address2contract[address]] = network + + return contract2network + + def register_contract(self, network:str, + contract_path:str , + contract_address:str, + refresh=True): + ''' + Register a contract + ''' + + + registered_contracts = {} if refresh else self.registered_contracts + if network not in registered_contracts: + registered_contracts[network] = {} + if contract_path not in registered_contracts[network]: + registered_contracts[network][contract_path] = [] + + assert isinstance(registered_contracts[network][contract_path], list) + registered_contracts[network][contract_path].append(contract_address) + + self.put_json('registered_contracts', registered_contracts) + + return registered_contracts + + def resolve_network(self, network): + if network == None: + network = self.network + return network + + + def resolve_contract_path(self, path): + contract_path = self.contract2path.get(path) + + return contract_path + + + + def __reduce__(self): + deserializer = EVMContract + serialized_data = (self.config) + return deserializer, serialized_data + + + \ No newline at end of file diff --git a/modules/evm/evm.yaml b/modules/evm/evm.yaml new file mode 100644 index 00000000..8b7cec25 --- /dev/null +++ b/modules/evm/evm.yaml @@ -0,0 +1,9 @@ + +network: + module: web3.network.evm + kwargs: + network: local.main + actor: False + wrap: False + +client: [local] diff --git a/modules/evm/key.py b/modules/evm/key.py new file mode 100644 index 00000000..69192ba7 --- /dev/null +++ b/modules/evm/key.py @@ -0,0 +1,834 @@ +# +# Copyright 2022 Ocean Protocol Foundation +# SPDX-License-Identifier: Apache-2.0 +# +import os +from typing import * +from eth_account.datastructures import SignedMessage +from eth_account.messages import SignableMessage +from eth_account.messages import encode_defunct + +from hexbytes.main import HexBytes +from eth_keys import keys +from eth_account import Account + +import commune as c + +from collections.abc import ( + Mapping, +) +import json +import os +from typing import ( + Any, + Dict, + Optional, + Tuple, + TypeVar, + Union, + cast, +) +import warnings + +from cytoolz import ( + dissoc, +) +from eth_keyfile import ( + create_keyfile_json, + decode_keyfile_json, +) +from eth_keys import ( + KeyAPI, + keys, +) +from eth_keys.exceptions import ( + ValidationError, +) +from eth_typing import ( + ChecksumAddress, + Hash32, + HexStr, +) +from eth_utils.curried import ( + combomethod, + hexstr_if_str, + is_dict, + keccak, + text_if_str, + to_bytes, + to_int, +) +from hexbytes import ( + HexBytes, +) + +from eth_account._utils.legacy_transactions import ( + Transaction, + vrs_from, +) +from eth_account._utils.signing import ( + hash_of_signed_transaction, + sign_message_hash, + sign_transaction_dict, + to_standard_signature_bytes, + to_standard_v, +) +from eth_account._utils.typed_transactions import ( + TypedTransaction, +) +from eth_account.datastructures import ( + SignedMessage, + SignedTransaction, +) +from eth_account.hdaccount import ( + ETHEREUM_DEFAULT_PATH, + generate_mnemonic, + key_from_seed, + seed_from_mnemonic, +) +from eth_account.messages import ( + SignableMessage, + _hash_eip191_message, + encode_typed_data, +) +from eth_account.signers.local import ( + LocalAccount, +) + +VRS = TypeVar("VRS", bytes, HexStr, int) + +class EVMAccount(c.Module): + + _last_tx_count = {} + def __init__( + self, + network:str = 'local.main', + **kwargs + ) -> None: + """Initialises EVMAccount object.""" + # assert private_key, "private_key is required." + self.set_config( kwargs=locals()) + self.set_network(network) + + + @property + def private_key(self): + return self._private_key + + @staticmethod + def reset_tx_count() -> None: + EVMAccount._last_tx_count = dict() + + def get_nonce(self, address: str = None) -> int: + # We cannot rely on `web3.eth.get_transaction_count` because when sending multiple + # transactions in a row without wait in between the network may not get the chance to + # update the transaction count for the self address in time. + # So we have to manage this internally per self address. + address = self.resolve_address(address) + if address not in EVMAccount._last_tx_count: + EVMAccount._last_tx_count[address] = self.web3.eth.get_transaction_count(address) + else: + EVMAccount._last_tx_count[address] += 1 + + return EVMAccount._last_tx_count[address] + + + def sign_tx( + self, + tx: Dict[str, Union[int, str, bytes]], + ) -> HexBytes: + if tx.get('nonce') == None: + tx['nonce'] = self.nonce + if tx.get('gasePrice') == None: + gas_price = self.gas_price + max_gas_price = os.getenv('ENV_MAX_GAS_PRICE', None) + if gas_price and max_gas_price: + gas_price = min(gas_price, max_gas_price) + + tx["gasPrice"] = gas_price + signed_tx = self.web3.eth.account.sign_transaction(tx, self.private_key) + return signed_tx.rawTransaction + + @property + def nonce(self): + return self.web3.eth.get_transaction_count(self.address) + + @property + def gas_price(self): + return self.web3.eth.generate_gas_price() + + @property + def tx_metadata(self) -> Dict[str, Union[int, str, bytes]]: + ''' + Default tx metadata + ''' + + return { + 'from': self.address, + 'nonce': self.nonce, + 'gasPrice':self.gas_price, + } + def send_contract_tx(self, fn:str , value=0): + ''' + send a contract transaction for your python objecs + ''' + tx_metadata = self.tx_metadata + tx_metadata['value'] = value + tx = fn.buildTransaction(tx_metadata) + tx = self.send_tx(tx) + return tx + + def send_tx(self, tx): + ''' + Send a transaction + ''' + rawTransaction = self.sign_tx(tx=tx) + # 7. Send tx and wait for receipt + tx_hash = self.web3.eth.send_raw_transaction(rawTransaction) + tx_receipt = self.web3.eth.wait_for_transaction_receipt(tx_hash) + + return tx_receipt.__dict__ + + + def resolve_message(self, message) : + message = c.python2str(message) + if isinstance(message, str): + message = encode_defunct(text=message) + elif isinstance(message, SignableMessage): + message = message + else: + raise NotImplemented + + return message + + + def sign(self, message: Union[SignableMessage,str, dict], include_message:bool = True) -> SignedMessage: + """Sign a transaction. + Args: + message: The message to sign. + signature_only: If True, only the signature is returned. + """ + signable_message = self.resolve_message(message) + + signed_message = self.sign_message(signable_message) + signed_message_dict = {} + for k in ['v', 'r', 's', 'signature', 'messageHash']: + signed_message_dict[k] = getattr(signed_message, k) + if isinstance(signed_message_dict[k], HexBytes): + signed_message_dict[k] = signed_message_dict[k].hex() + + if include_message: + signed_message_dict['message'] = message + signed_message = signed_message_dict + + + return signed_message + + @property + def public_key(self): + return self.private_key_to_public_key(self.private_key) + + + @staticmethod + def private_key_to_public_key(private_key: str) -> str: + ''' + Conert private key to public key + ''' + private_key_object = keys.PrivateKey(private_key) + return private_key_object.public_key + + + + def keys_str(self) -> str: + s = [] + s += [f"address: {self.address}"] + if self.private_key is not None: + s += [f"private key: {self.private_key}"] + s += [f"public key: {self.public_key}"] + s += [""] + return "\n".join(s) + + def resolve_web3(self, web3=None): + if web3 == None: + web3 == self.web3 + assert web3 != None + return web3 + + def resolve_address(self, address=None): + if address == None: + address = self.address + assert address != None + return address + + + def get_balance(self, token:str=None, address:str=None): + address = self.resolve_address(address) + + if token == None: + # return native token + balance = self.web3.eth.get_balance(self.address) + else: + raise NotImplemented + + return balance + + @property + def accounts(self): + return self.config.get('accounts', []) + + + def set_network(self, network:str= 'local.main') -> None: + ''' + Set network + ''' + self.web3 = c.module('web3.evm.network')(network=network).web3 + + + + def recover_signer(self, message:Any, + signature:str, + vrs:Union[tuple, list]=None): + ''' + recover + ''' + + message = self.resolve_message(message) + recovered_address = self.recover_message(message, signature=signature, vrs=vrs) + return recovered_address + + def verify(self, message:Any, signature:str = None, vrs:Union[tuple, list]=None, address:str=None) -> bool: + ''' + verify message from the signature or vrs based on the address + ''' + address = self.resolve_address(address) + recovered_address = self.recover_signer(message, signature=signature, vrs=vrs) + return bool(recovered_address == address) + + + @classmethod + def from_password(cls, password:str, salt:str='commune', prompt=False): + + from web3.auto import w3 + from Crypto.Protocol.KDF import PBKDF2 + + # Prompt the user for a password and salt + if prompt : + password = input("Enter password: ") + # Derive a key using PBKDF2 + key = PBKDF2(password.encode(), salt, dkLen=32, count=100000) + + # Create an account using the key + account = Account.privateKeyToAccount(key) + + # Print the account address and private key + print("Account address:", account.address) + print("Private key:", account.privateKey.hex()) + + return account + + + @classmethod + def test_sign(cls): + self = cls() + message = {'bro': 'bro'} + signature = self.sign(message) + assert self.verify(message, signature=signature['signature']) + + + def test(self): + self.test_sign() + + + _keys = keys + + _default_kdf = os.getenv("ETH_ACCOUNT_KDF", "scrypt") + + # Enable unaudited features (off by default) + _use_unaudited_hdwallet_features = False + + @classmethod + def enable_unaudited_hdwallet_features(cls): + """ + Use this flag to enable unaudited HD Wallet features. + """ + cls._use_unaudited_hdwallet_features = True + + @combomethod + def create(self, extra_entropy=""): + r""" + Creates a new private key, and returns it as a + :class:`~eth_account.local.LocalAccount`. + + :param extra_entropy: Add extra randomness to whatever randomness your OS + can provide + :type extra_entropy: str or bytes or int + :returns: an object with private key and convenience methods + + """ + extra_key_bytes = text_if_str(to_bytes, extra_entropy) + key_bytes = keccak(os.urandom(32) + extra_key_bytes) + return self.from_key(key_bytes) + + @combomethod + def from_key(self, private_key): + r""" + Returns a convenient object for working with the given private key. + + :param private_key: The raw private key + :type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey` + :return: object with methods for signing and encrypting + :rtype: LocalAccount + + .. doctest:: python + + >>> acct = Account.from_key( + ... 0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364) + >>> acct.address + '0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E' + >>> acct.key + HexBytes('0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364') + + # These methods are also available: sign_message(), sign_transaction(), + # encrypt(). They correspond to the same-named methods in Account.* + # but without the private key argument + """ + key = self._parsePrivateKey(private_key) + return LocalAccount(key, self) + + @combomethod + def from_mnemonic( + self, + mnemonic: str, + passphrase: str = "", + account_path: str = ETHEREUM_DEFAULT_PATH, + ) -> LocalAccount: + """ + Generate an account from a mnemonic. + + .. CAUTION:: This feature is experimental, unaudited, and likely to change soon + + :param str mnemonic: space-separated list of BIP39 mnemonic seed words + :param str passphrase: Optional passphrase used to encrypt the mnemonic + :param str account_path: Specify an alternate HD path for deriving the seed + using BIP32 HD wallet key derivation. + :return: object with methods for signing and encrypting + :rtype: LocalAccount + + """ + if not self._use_unaudited_hdwallet_features: + raise AttributeError( + "The use of the Mnemonic features of Account is disabled by " + "default until its API stabilizes. To use these features, please " + "enable them by running `Account.enable_unaudited_hdwallet_features()` " + "and try again." + ) + seed = seed_from_mnemonic(mnemonic, passphrase) + private_key = key_from_seed(seed, account_path) + key = self._parsePrivateKey(private_key) + return LocalAccount(key, self) + + @combomethod + def create_with_mnemonic( + self, + passphrase: str = "", + num_words: int = 12, + language: str = "english", + account_path: str = ETHEREUM_DEFAULT_PATH, + ) -> Tuple[LocalAccount, str]: + r""" + Create a new private key and related mnemonic. + + .. CAUTION:: This feature is experimental, unaudited, and likely to change soon + + Creates a new private key, and returns it as a + :class:`~eth_account.local.LocalAccount`, alongside the mnemonic that can + used to regenerate it using any BIP39-compatible wallet. + + :param str passphrase: Extra passphrase to encrypt the seed phrase + :param int num_words: Number of words to use with seed phrase. + Default is 12 words. + Must be one of [12, 15, 18, 21, 24]. + :param str language: Language to use for BIP39 mnemonic seed phrase. + :param str account_path: Specify an alternate HD path for deriving the + seed using BIP32 HD wallet key derivation. + :returns: A tuple consisting of an object with private key and + convenience methods, and the mnemonic seed phrase that can be + used to restore the account. + :rtype: (LocalAccount, str) + + .. doctest:: python + + >>> from eth_account import Account + >>> Account.enable_unaudited_hdwallet_features() + >>> acct, mnemonic = Account.create_with_mnemonic() + >>> acct.address # doctest: +SKIP + '0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E' + >>> acct == Account.from_mnemonic(mnemonic) + True + + # These methods are also available: + # sign_message(), sign_transaction(), encrypt() + # They correspond to the same-named methods in Account.* + # but without the private key argument + """ + if not self._use_unaudited_hdwallet_features: + raise AttributeError( + "The use of the Mnemonic features of Account is disabled by " + "default until its API stabilizes. To use these features, please " + "enable them by running `Account.enable_unaudited_hdwallet_features()` " + "and try again." + ) + mnemonic = generate_mnemonic(num_words, language) + return self.from_mnemonic(mnemonic, passphrase, account_path), mnemonic + + @combomethod + def recover_message( + self, + signable_message: SignableMessage, + vrs: Optional[Tuple[VRS, VRS, VRS]] = None, + signature: bytes = None, + ) -> ChecksumAddress: + r""" + Get the address of the account that signed the given message. + You must specify exactly one of: vrs or signature + + :param signable_message: the message that was signed + :param vrs: the three pieces generated by an elliptic curve signature + :type vrs: tuple(v, r, s), each element is hex str, bytes or int + :param signature: signature bytes concatenated as r+s+v + :type signature: hex str or bytes or int + :returns: address of signer, hex-encoded & checksummed + :rtype: str + + .. doctest:: python + + >>> from eth_account.messages import encode_defunct + >>> from eth_account import Account + >>> message = encode_defunct(text="I♥SF") + >>> vrs = ( + ... 28, + ... '0xe6ca9bba58c88611fad66a6ce8f996908195593807c4b38bd528d2cff09d4eb3', + ... '0x3e5bfbbf4d3e39b1a2fd816a7680c19ebebaf3a141b239934ad43cb33fcec8ce') + >>> Account.recover_message(message, vrs=vrs) + '0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E' + + + # All of these recover calls are equivalent: + + # variations on vrs + >>> vrs = ( + ... '0x1c', + ... '0xe6ca9bba58c88611fad66a6ce8f996908195593807c4b38bd528d2cff09d4eb3', + ... '0x3e5bfbbf4d3e39b1a2fd816a7680c19ebebaf3a141b239934ad43cb33fcec8ce') + >>> Account.recover_message(message, vrs=vrs) + '0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E' + + >>> # Caution about this approach: likely problems if there are leading 0s + >>> vrs = ( + ... 0x1c, + ... 0xe6ca9bba58c88611fad66a6ce8f996908195593807c4b38bd528d2cff09d4eb3, + ... 0x3e5bfbbf4d3e39b1a2fd816a7680c19ebebaf3a141b239934ad43cb33fcec8ce) + >>> Account.recover_message(message, vrs=vrs) + '0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E' + + >>> vrs = ( + ... b'\x1c', + ... b'\xe6\xca\x9b\xbaX\xc8\x86\x11\xfa\xd6jl\xe8\xf9\x96\x90\x81\x95Y8\x07\xc4\xb3\x8b\xd5(\xd2\xcf\xf0\x9dN\xb3', + ... b'>[\xfb\xbfM>9\xb1\xa2\xfd\x81jv\x80\xc1\x9e\xbe\xba\xf3\xa1A\xb29\x93J\xd4<\xb3?\xce\xc8\xce') + >>> Account.recover_message(message, vrs=vrs) + '0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E' + + # variations on signature + >>> signature = '0xe6ca9bba58c88611fad66a6ce8f996908195593807c4b38bd528d2cff09d4eb33e5bfbbf4d3e39b1a2fd816a7680c19ebebaf3a141b239934ad43cb33fcec8ce1c' + >>> Account.recover_message(message, signature=signature) + '0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E' + >>> signature = b'\xe6\xca\x9b\xbaX\xc8\x86\x11\xfa\xd6jl\xe8\xf9\x96\x90\x81\x95Y8\x07\xc4\xb3\x8b\xd5(\xd2\xcf\xf0\x9dN\xb3>[\xfb\xbfM>9\xb1\xa2\xfd\x81jv\x80\xc1\x9e\xbe\xba\xf3\xa1A\xb29\x93J\xd4<\xb3?\xce\xc8\xce\x1c' + >>> Account.recover_message(message, signature=signature) + '0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E' + >>> # Caution about this approach: likely problems if there are leading 0s + >>> signature = 0xe6ca9bba58c88611fad66a6ce8f996908195593807c4b38bd528d2cff09d4eb33e5bfbbf4d3e39b1a2fd816a7680c19ebebaf3a141b239934ad43cb33fcec8ce1c + >>> Account.recover_message(message, signature=signature) + '0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E' + """ # noqa: E501 + message_hash = _hash_eip191_message(signable_message) + return cast(ChecksumAddress, self._recover_hash(message_hash, vrs, signature)) + + @combomethod + def _recover_hash( + self, + message_hash: Hash32, + vrs: Optional[Tuple[VRS, VRS, VRS]] = None, + signature: bytes = None, + ) -> ChecksumAddress: + hash_bytes = HexBytes(message_hash) + if len(hash_bytes) != 32: + raise ValueError("The message hash must be exactly 32-bytes") + if vrs is not None: + v, r, s = map(hexstr_if_str(to_int), vrs) + v_standard = to_standard_v(v) + signature_obj = self._keys.Signature(vrs=(v_standard, r, s)) + elif signature is not None: + signature_bytes = HexBytes(signature) + signature_bytes_standard = to_standard_signature_bytes(signature_bytes) + signature_obj = self._keys.Signature( + signature_bytes=signature_bytes_standard + ) + else: + raise TypeError("You must supply the vrs tuple or the signature bytes") + pubkey = signature_obj.recover_public_key_from_msg_hash(hash_bytes) + return cast(ChecksumAddress, pubkey.to_checksum_address()) + + @combomethod + def recover_transaction(self, serialized_transaction): + """ + Get the address of the account that signed this transaction. + + :param serialized_transaction: the complete signed transaction + :type serialized_transaction: hex str, bytes or int + :returns: address of signer, hex-encoded & checksummed + :rtype: str + + .. doctest:: python + + >>> raw_transaction = '0xf86a8086d55698372431831e848094f0109fc8df283027b6285cc889f5aa624eac1f55843b9aca008025a009ebb6ca057a0535d6186462bc0b465b561c94a295bdb0621fc19208ab149a9ca0440ffd775ce91a833ab410777204d5341a6f9fa91216a6f3ee2c051fea6a0428' + >>> Account.recover_transaction(raw_transaction) + '0x2c7536E3605D9C16a7a3D7b1898e529396a65c23' + """ # noqa: E501 + txn_bytes = HexBytes(serialized_transaction) + if len(txn_bytes) > 0 and txn_bytes[0] <= 0x7F: + # We are dealing with a typed transaction. + typed_transaction = TypedTransaction.from_bytes(txn_bytes) + msg_hash = typed_transaction.hash() + vrs = typed_transaction.vrs() + return self._recover_hash(msg_hash, vrs=vrs) + + txn = Transaction.from_bytes(txn_bytes) + msg_hash = hash_of_signed_transaction(txn) + return self._recover_hash(msg_hash, vrs=vrs_from(txn)) + + + @combomethod + def sign_message( + self, + signable_message: SignableMessage, + private_key: Union[bytes, HexStr, int, keys.PrivateKey], + ) -> SignedMessage: + r""" + Sign the provided message. + + This API supports any messaging format that will encode to EIP-191 messages. + + If you would like historical compatibility with :meth:`w3.eth.sign() ` + you can use :meth:`~eth_account.messages.encode_defunct`. + + Other options are the "validator", or "structured data" standards. + You can import all supported message encoders in + ``eth_account.messages``. + + :param signable_message: the encoded message for signing + :param private_key: the key to sign the message with + :type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey` + :returns: Various details about the signature - most importantly the + fields: v, r, and s + :rtype: ~eth_account.datastructures.SignedMessage + + .. doctest:: python + + >>> msg = "I♥SF" + >>> from eth_account.messages import encode_defunct + >>> msghash = encode_defunct(text=msg) + >>> msghash + SignableMessage(version=b'E', + header=b'thereum Signed Message:\n6', + body=b'I\xe2\x99\xa5SF') + >>> # If you're curious about the internal fields of SignableMessage, take a look at EIP-191, linked above + >>> key = "0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364" + >>> Account.sign_message(msghash, key) + SignedMessage(messageHash=HexBytes('0x1476abb745d423bf09273f1afd887d951181d25adc66c4834a70491911b7f750'), + r=104389933075820307925104709181714897380569894203213074526835978196648170704563, + s=28205917190874851400050446352651915501321657673772411533993420917949420456142, + v=28, + signature=HexBytes('0xe6ca9bba58c88611fad66a6ce8f996908195593807c4b38bd528d2cff09d4eb33e5bfbbf4d3e39b1a2fd816a7680c19ebebaf3a141b239934ad43cb33fcec8ce1c')) + + + + .. _EIP-191: https://eips.ethereum.org/EIPS/eip-191 + """ # noqa: E501 + message_hash = _hash_eip191_message(signable_message) + return cast(SignedMessage, self._sign_hash(message_hash, private_key)) + + @combomethod + def signHash(self, message_hash, private_key): + """ + Sign the provided hash. + + .. WARNING:: *Never* sign a hash that you didn't generate, + it can be an arbitrary transaction. For example, it might + send all of your account's ether to an attacker. + Instead, prefer :meth:`~eth_account.account.Account.sign_message`, + which cannot accidentally sign a transaction. + + .. CAUTION:: Deprecated for :meth:`~eth_account.account.Account.sign_message`. + This method will be removed in v0.6 + + :param message_hash: the 32-byte message hash to be signed + :type message_hash: hex str, bytes or int + :param private_key: the key to sign the message with + :type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey` + :returns: Various details about the signature - most + importantly the fields: v, r, and s + :rtype: ~eth_account.datastructures.SignedMessage + """ + warnings.warn( + "signHash is deprecated in favor of sign_message", + category=DeprecationWarning, + stacklevel=2, + ) + return self._sign_hash(message_hash, private_key) + + @combomethod + def _sign_hash( + self, + message_hash: Hash32, + private_key: Union[bytes, HexStr, int, keys.PrivateKey], + ) -> SignedMessage: + msg_hash_bytes = HexBytes(message_hash) + if len(msg_hash_bytes) != 32: + raise ValueError("The message hash must be exactly 32-bytes") + + key = self._parsePrivateKey(private_key) + + (v, r, s, eth_signature_bytes) = sign_message_hash(key, msg_hash_bytes) + return SignedMessage( + messageHash=msg_hash_bytes, + r=r, + s=s, + v=v, + signature=HexBytes(eth_signature_bytes), + ) + + @combomethod + def sign_transaction(self, transaction_dict, private_key): + """ + Sign a transaction using a local private key. + + It produces signature details and the hex-encoded transaction suitable for + broadcast using :meth:`w3.eth.sendRawTransaction() + `. + + To create the transaction dict that calls a contract, use contract object: + `my_contract.functions.my_function().buildTransaction() + `_ + + Note: For non-legacy (typed) transactions, if the transaction type is not + explicitly provided, it may be determined from the transaction parameters of + a well-formed transaction. See below for examples on how to sign with + different transaction types. + + :param dict transaction_dict: the transaction with available keys, depending + on the type of transaction: nonce, chainId, to, data, value, gas, gasPrice, + type, accessList, maxFeePerGas, and maxPriorityFeePerGas + :param private_key: the private key to sign the data with + :type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey` + :returns: Various details about the signature - most + importantly the fields: v, r, and s + :rtype: AttributeDict + """ + if not isinstance(transaction_dict, Mapping): + raise TypeError( + "transaction_dict must be dict-like, got %r" % transaction_dict + ) + + account = self.from_key(private_key) + + # allow from field, *only* if it matches the private key + if "from" in transaction_dict: + if transaction_dict["from"] == account.address: + sanitized_transaction = dissoc(transaction_dict, "from") + else: + raise TypeError( + "from field must match key's %s, but it was %s" + % ( + account.address, + transaction_dict["from"], + ) + ) + else: + sanitized_transaction = transaction_dict + + # sign transaction + ( + v, + r, + s, + encoded_transaction, + ) = sign_transaction_dict(account._key_obj, sanitized_transaction) + transaction_hash = keccak(encoded_transaction) + + return SignedTransaction( + rawTransaction=HexBytes(encoded_transaction), + hash=HexBytes(transaction_hash), + r=r, + s=s, + v=v, + ) + + @combomethod + def _parsePrivateKey(self, key): + """ + Generate a :class:`eth_keys.datatypes.PrivateKey` from the provided key. + + If the key is already of type :class:`eth_keys.datatypes.PrivateKey`, + return the key. + + :param key: the private key from which a :class:`eth_keys.datatypes.PrivateKey` + will be generated + :type key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey` + :returns: the provided key represented as a + :class:`eth_keys.datatypes.PrivateKey` + """ + if isinstance(key, self._keys.PrivateKey): + return key + + try: + return self._keys.PrivateKey(HexBytes(key)) + except ValidationError as original_exception: + raise ValueError( + "The private key must be exactly 32 bytes long, instead of " + "%d bytes." % len(key) + ) from original_exception + + @combomethod + def sign_typed_data( + self, + private_key: Union[bytes, HexStr, int, keys.PrivateKey], + domain_data: Dict[str, Any] = None, + message_types: Dict[str, Any] = None, + message_data: Dict[str, Any] = None, + full_message: Dict[str, Any] = None, + ) -> SignedMessage: + r""" + Sign the provided EIP-712 message with the provided key. + + :param private_key: the key to sign the message with + :param domain_data: EIP712 domain data + :param message_types: custom types used by the `value` data + :param message_data: data to be signed + :param full_message: a dict containing all data and types + :type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey` + :type domain_data: dict + :type message_types: dict + :type message_data: dict + :type full_message: dict + :returns: Various details about the signature - most importantly the + fields: v, r, and s + :rtype: ~eth_account.datastructures.SignedMessage + """ # noqa: E501 + signable_message = encode_typed_data( + domain_data, + message_types, + message_data, + full_message, + ) + message_hash = _hash_eip191_message(signable_message) + return cast(SignedMessage, self._sign_hash(message_hash, private_key)) diff --git a/modules/evm/network.py b/modules/evm/network.py new file mode 100644 index 00000000..9815137c --- /dev/null +++ b/modules/evm/network.py @@ -0,0 +1,185 @@ + + +import os +import sys +from copy import deepcopy +from typing import Dict, List, Optional, Union +import commune as c +import lru +import requests +from requests.adapters import HTTPAdapter +from typing import Dict, Optional, Union +from web3 import WebsocketProvider +from web3 import HTTPProvider, WebsocketProvider + +class CustomHTTPProvider(HTTPProvider): + """ + Override requests to control the connection pool to make it blocking. + """ + def make_request(self, method, params): + self.logger.debug( + "Making request HTTP. URI: %s, Method: %s", self.endpoint_uri, method + ) + request_data = self.encode_rpc_request(method, params) + raw_response = self.make_post_request( + self.endpoint_uri, request_data, **self.get_request_kwargs() + ) + response = self.decode_rpc_response(raw_response) + self.logger.debug( + "Getting response HTTP. URI: %s, " "Method: %s, Response: %s", + self.endpoint_uri, + method, + response, + ) + return response + + def _remove_session(self, key, session): + session.close() + + _session_cache = lru.LRU(8, callback=_remove_session) + + def _get_session(self, *args, **kwargs): + from web3._utils.caching import generate_cache_key + cache_key = generate_cache_key((args, kwargs)) + if cache_key not in self._session_cache: + # This is the main change from original Web3 `_get_session` + session = requests.sessions.Session() + session.mount( + "http://", + HTTPAdapter(pool_connections=25, pool_maxsize=25, pool_block=True), + ) + self._session_cache[cache_key] = session + return self._session_cache[cache_key] + + + def make_post_request(self, endpoint_uri, data, *args, timeout:int=10, **kwargs): + kwargs.setdefault("timeout", timeout) + session = self._get_session(endpoint_uri) + response = session.post(endpoint_uri, data=data, *args, **kwargs) + response.raise_for_status() + + return response.content + + +class EVMNetwork(c.Module): + + + def __init__(self, network:str = 'local.main'): + self.set_config(locals()) + self.set_network(network) + + @property + def network(self): + network = self.config['network'] + if len(network.split('.')) == 3: + network = '.'.join(network.split('.')[:-1]) + assert len(network.split('.')) == 2 + return network + + + @network.setter + def network(self, network): + assert network in self.networks, f'{network} is not here fam' + self.config['network'] = network + + def set_network(self, network:str='local.main.ganache') -> 'Web3': + network = network if network != None else self.config['network'] + url = self.get_url(network) + self.network = network + self.url = url + self.web3 = self.get_web3(self.url) + + connect_network = set_network + + @property + def networks_config(self): + return c.load_yaml(self.dirpath() + '/networks.yaml') + + @property + def networks(self): + return list(self.networks_config.keys()) + + @property + def available_networks(self): + return self.get_available_networks() + + + + def get_url_options(self, network:str ) -> List[str]: + assert len(network.split('.')) == 2 + network, subnetwork = network.split('.') + return list(self.networks_config[network][subnetwork]['url'].keys()) + + def get_url(self, network:str='local.main.ganache' ) -> str: + from commune.utils.dict import dict_get + + if len(network.split('.')) == 2: + url_key = self.get_url_options(network)[0] + network_key, subnetwork_key = network.split('.') + elif len(network.split('.')) == 3: + network_key, subnetwork_key, url_key = network.split('.') + else: + raise NotImplementedError(network) + + key_path = [network_key, subnetwork_key, 'url',url_key ] + return dict_get(self.networks_config, key_path ) + + + def get_web3_connection_provider(self, network_url): + if network_url.startswith("http"): + provider = CustomHTTPProvider(network_url) + elif network_url.startswith("ws"): + provider = WebsocketProvider(network_url) + else: + raise NotImplementedError + return provider + + def get_web3(self, network_url: str) -> 'Web3': + from web3.main import Web3 + from web3.middleware import geth_poa_middleware + + provider = self.get_web3_connection_provider(network_url) + web3 = Web3(provider) + + if web3.eth.chain_id == 4: + web3.middleware_onion.inject(geth_poa_middleware, layer=0) + return web3 + + def get_web3_connection_provider( + self, + network_url: str, + ) -> Union[CustomHTTPProvider, WebsocketProvider]: + if network_url.startswith("http"): + return CustomHTTPProvider(network_url) + elif network_url.startswith("ws"): + return WebsocketProvider(network_url) + else: + msg = ( + f"The given network_url *{network_url}* does not start with either" + f"`http` or `wss`. A correct network url is required." + ) + raise AssertionError(msg) + + @classmethod + def test_url(cls, url:str): + # Setup + from web3 import Web3 + + alchemy_url = "https://eth-mainnet.g.alchemy.com/v2/RrtpZjiUVoViiDEaYxhN9o6m1CSIZvlL" + w3 = Web3(Web3.HTTPProvider(alchemy_url)) + + # Print if web3 is successfully connected + print(w3.isConnected()) + + # Get the latest block number + latest_block = w3.eth.block_number + print(latest_block) + + # Get the balance of an account + balance = w3.eth.get_balance('0x742d35Cc6634C0532925a3b844Bc454e4438f44e') + print(balance) + + # Get the information of a transaction + tx = w3.eth.get_transaction('0x5c504ed432cb51138bcf09aa5e8a410dd4a1e204ef84bfed1be16dfba1b22060') + print(tx) + diff --git a/modules/evm/networks.yaml b/modules/evm/networks.yaml new file mode 100644 index 00000000..62134030 --- /dev/null +++ b/modules/evm/networks.yaml @@ -0,0 +1,158 @@ + +ethereum: + main: + chainid: 1 + url: + infura: https://mainnet.infura.io/v3/$WEB3_INFURA_PROJECT_ID + explorer: https://api.etherscan.io/api + rospten: + chainid: 3 + url: + infura: https://ropsten.infura.io/v3/$WEB3_INFURA_PROJECT_ID + explorer: https://api-ropsten.etherscan.io/api + rinekby: + chainid: 4 + url: + infura: https://rinkeby.infura.io/v3/$WEB3_INFURA_PROJECT_ID + explorer: https://api-rinkeby.etherscan.io/api + multicall2: "0x5BA1e12693Dc8F9c48aAD8770482f4739bEeD696" + goerli: + chainid: 5 + url: + infura: https://goerli.infura.io/v3/$WEB3_INFURA_PROJECT_ID + explorer: https://api-goerli.etherscan.io/api + multicall2: "0x5BA1e12693Dc8F9c48aAD8770482f4739bEeD696" + provider: infura + kovan: + chainid: 42 + id: kovan + url: + infura: https://kovan.infura.io/v3/$WEB3_INFURA_PROJECT_ID + explorer: https://api-kovan.etherscan.io/api + multicall2: "0x5BA1e12693Dc8F9c48aAD8770482f4739bEeD696" + provider: infura +arbitrum: + main: + chainid: 42161 + url: + public: https://arb1.arbitrum.io/rpc + explorer: https://api.arbiscan.io/api + multicall2: "0x5B5CFE992AdAC0C9D48E05854B2d91C73a003858" +avalanche: + main: + chainid: 43114 + explorer: https://api.snowtrace.io/api + url: + public: https://api.avax.network/ext/bc/C/rpc + test: + chainid: 43113 + url: + public: https://api.avax-test.network/ext/bc/C/rpc +aurora: + main: + chainid: 1313161554 + url: + public: https://mainnet.aurora.dev + explorer: https://api.aurorascan.dev/api + multicall2: "0xace58a26b8Db90498eF0330fDC9C2655db0C45E2" + test: + chainid: 1313161555 + url: + public: https://testnet.aurora.dev + explorer: https://testnet.aurorascan.dev/api +binance: + test: + chainid: 97 + id: bsc-test + url: + public: https://data-seed-prebsc-1-s1.binance.org:8545 + explorer: https://api-testnet.bscscan.com/api + main: + chainid: 56 + url: + public: https://bsc-dataseed.binance.org + explorer: https://api.bscscan.com/api +fantom: + test: + chainid: 0xfa2 + url: + public: https://rpc.testnet.fantom.network + explorer: https://explorer.testnet.fantom.network + main: + chainid: 250 + url: + public: https://rpc.ftm.tools + explorer: https://api.ftmscan.com/api +harmony: + main: + chainid: 1666600000 + url: + public: https://api.harmony.one + shard: 0 + multicall2: "0x3E01dD8a5E1fb3481F0F589056b428Fc308AF0Fb" +moonbeam: + main: + chainid: 1284 + id: moonbeam-main + url: + public: https://moonbeam.api.onfinality.io/public + explorer: https://api-moonbeam.moonscan.io/api + multicall2: "0x1337BedC9D22ecbe766dF105c9623922A27963EC" + alpha: + chainid: 1287 + url: + public: https://moonbeam-alpha.api.onfinality.io/public + explorer: https://api-moonbase.moonscan.io/api + multicall2: "0x37084d0158C68128d6Bc3E5db537Be996f7B6979" +moonriver: + main: + chainid: 1285 + url: + public: https://moonriver.api.onfinality.io/public + explorer: https://api-moonriver.moonscan.io/api + multicall2: "0xaef00a0cf402d9dedd54092d9ca179be6f9e5ce3" +optimism: + main: + chainid: 10 + url: + public: https://mainnet.optimism.io + explorer: https://api-optimistic.etherscan.io/api + multicall2: "0x2DC0E2aa608532Da689e89e237dF582B783E552C" + kovan: + chainid: 69 + url: + public: https://kovan.optimism.io + explorer: https://api-kovan-optimistic.etherscan.io/api + multicall2: "0x2DC0E2aa608532Da689e89e237dF582B783E552C" +polygon: + main: + chainid: 137 + url: + infura: https://polygon-mainnet.infura.io/v3/$WEB3_INFURA_PROJECT_ID + explorer: https://api.polygonscan.com/api + multicall2: "0xc8E51042792d7405184DfCa245F2d27B94D013b6" + test: + chainid: 80001 + url: + public: https://rpc-mumbai.maticvigil.com + infura: https://polygon-mumbai.infura.io/v3/$WEB3_INFURA_PROJECT_ID + explorer: https://api-testnet.polygonscan.com/api + multicall2: "0x6842E0412AC1c00464dc48961330156a07268d14" + + zkevm: + chainid: 80001 + url: + infura: https://polygon-mumbai.infura.io/v3/$WEB3_INFURA_PROJECT_ID + explorer: https://api-testnet.polygonscan.com/api + multicall2: "0x6842E0412AC1c00464dc48961330156a07268d14" +local: + main: + chainid: 1337 + url: + ganache: http://0.0.0.0:8545 + explorer: null + multicall2: null +skale: + test: + url: + public: https://eth-sf.skalenodes.com/v1/hackathon-complex-easy-naos \ No newline at end of file diff --git a/modules/git/git.py b/modules/git/git.py new file mode 100644 index 00000000..949e7d5f --- /dev/null +++ b/modules/git/git.py @@ -0,0 +1,136 @@ +import commune as c +import subprocess + + +class Git(c.Module): + + + def is_repo(self, libpath:str ): + # has the .git folder + return c.cmd(f'ls -a {libpath}').count('.git') > 0 + + + @staticmethod + def clone(repo_url:str, target_directory:str = None, branch=None): + prefix = 'https://github.com/' + if not repo_url.startswith(prefix): + repo_url = f'{prefix}{repo_url}' + + if target_directory == None: + target_directory = repo_url.split('/')[-1].split('.')[0] + else: + target_directory = c.resolve_path(target_directory) + # Clone the repository + return subprocess.run(['git', 'clone', repo_url, target_directory]) + + + + @staticmethod + def content(url='LambdaLabsML/examples/main/stable-diffusion-finetuning/pokemon_finetune.ipynb', + prefix='https://raw.githubusercontent.com'): + return c.module('tool.web').rget(url=f'{prefix}/{url}') + + submodule_path = c.repo_path + '/repos' + def add_submodule(self, url, name=None, prefix=submodule_path): + if name == None: + name = url.split('/')[-1].split('.')[0].lower() + + if prefix != None: + name = f'{prefix}/{name}' + + c.cmd(f'git submodule add {url} {name}') + + addsub = add_submodule + + @classmethod + def pull(cls, stash:bool = False, cwd=None): + if cwd is None: + cwd = c.libpath + if stash: + c.cmd('git stash', cwd=cwd) + c.cmd('git pull', cwd=cwd) + return {'success':True, 'message':'pulled'} + + @classmethod + def push(cls, msg:str='update', cwd=None): + if cwd is None: + cwd = c.libpath + c.cmd(f'git add .', cwd=cwd) + c.cmd(f'git commit -m "{msg}"', bash=True, cwd=cwd) + c.cmd(f'git push', cwd=cwd) + + @classmethod + def status(cls, cwd=None): + if cwd is None: + cwd = c.libpath + return c.cmd(f'git status', cwd=cwd, verbose=False) + + + def git_repos(self, path='./'): + import os + repos = [] + for root, dirs, files in os.walk(path): + for d in dirs: + if d.endswith('.git'): + repos += [f"{root}"] + + repos = [r for r in repos if not r == path] + + return repos + + + + @classmethod + def commit(cls, message='update', push:bool = True): + c.cmd(f'git commit -m "{message}"') + if push: + cls.push() + + @classmethod + def repo_url(cls, libpath:str = None) -> str: + llibpath = cls.resolve_libpath(libpath) + return c.cmd('git remote -v',cwd=libpath, verbose=False).split('\n')[0].split('\t')[1].split(' ')[0] + + @classmethod + def commit_hash(cls, libpath:str = None): + libpath = cls.resolve_libpath(libpath) + return c.cmd('git rev-parse HEAD', cwd=libpath, verbose=False).split('\n')[0].strip() + + def reset_hard(self, libpath:str = None): + libpath = self.resolve_libpath(libpath) + return c.cmd('git reset --hard', cwd=libpath, verbose=False) + + def resolve_libpath(self, libpath:str = None): + if libpath == None: + libpath = c.libpath + return libpath + + @classmethod + def merge_remote_repo(cls, remote_name:str, remote_url:str, remote_branch:str, local_branch:str, cwd=None): + # Add the remote repository + add_remote_command = f"git remote add {remote_name} {remote_url}" + + # Fetch the contents of the remote repository + fetch_command = f"git fetch {remote_name}" + + # Checkout to your local branch + checkout_command = f"git checkout {local_branch}" + + # Merge the remote branch into your local branch + merge_command = f"git merge {remote_name}/{remote_branch}" + + # Push the changes to your remote repository + push_command = f"git push origin {local_branch}" + + cmds = [add_remote_command, + fetch_command, + checkout_command, + merge_command, + push_command] + + cmd = ' && '.join(cmds) + return c.cmd(cmd, cwd) + + + + diff --git a/modules/key/app/app.py b/modules/key/app/app.py new file mode 100644 index 00000000..b8ee516d --- /dev/null +++ b/modules/key/app/app.py @@ -0,0 +1,217 @@ +import commune as c +import streamlit as st +import random + +class KeyApp(c.Module): + name2fn = { + 'Select Key': 'select_key', + 'Create Key': 'create_key', + 'Rename Key': 'rename_key', + 'Remove Key': 'remove_key', + 'Ticket': 'ticket', + 'Verify Ticket': 'verify_ticket' + } + + def __init__(self): + self.sync() + + + def sync(self): + self._max_width_() + # Load CSS + @st.cache_resource() + def load_keys(): + return c.keys() + + self.keys = load_keys() + self.key2index = {k:i for i,k in enumerate(self.keys)} + + def local_css(self, file_name): + with open(file_name) as f: + st.markdown(f'', unsafe_allow_html=True) + + def select_key(self): + key = 'module' + key = st.selectbox('Select Key', self.keys, index=self.key2index[key]) + self.key = c.get_key(key) + if self.key.path is None: + self.key.path = key + st.write('Address: ', self.key.ss58_address) + + def create_key(self, expander=False): + new_key = st.text_input('Name of Key', '', key='create') + create_key_button = st.button('Create Key') + if create_key_button and len(new_key) > 0: + c.add_keys(new_key) + key = c.get_key(new_key) + + def rename_key(self): + old_key = st.selectbox('Select Key', self.keys, index=self.key2index[self.key.path], key='select old rename key') + new_key = st.text_input('New of Key', '', key='rename') + rename_key_button = st.button('Rename Key') + if rename_key_button and len(new_key) > 0: + if c.key_exists(new_key): + st.error('Key already exists') + c.rename_key(old_key, new_key) + key = c.get_key(new_key) + + def remove_key(self): + rm_keys = st.multiselect('Select Key(s) to Remove', self.keys, [], key='rm_key') + rm_key_button = st.button('Remove Key') + if rm_key_button: + c.rm_keys(rm_keys) + + def ticket_key(self): + ticket_data = st.text_input('Ticket Data', '', key='ticket') + ticket_button = st.button('Ticket Key') + if ticket_button: + ticket = self.key.ticket(ticket_data) + st.write('Ticket') + st.code(ticket) + + def app(self): + st.write('# Key App') + with st.expander('Description'): + st.markdown(self.description) + functions = list(self.name2fn.values()) + + def app_wrapper(fn): + try: + getattr(self, self.name2fn[fn])() + except Exception as e: + try: + getattr(self, self.name2fn[fn])() + except Exception as e: + st.error(e) + + for fn in functions: + with st.expander(fn, expanded=True): + getattr(self, fn)() + + def squares(self, cols=3): + color_classes = ['color1', 'color2', 'color3', 'color4', 'color5', 'color6'] + cols = st.columns(cols) + for i in range(9): + color_class = random.choice(color_classes) + with cols[i % cols]: + st.markdown(f'
{i**2}
', unsafe_allow_html=True) + + def ticket(self, *args, **kwargs): + data = st.text_input('Data', 'None') + generate_ticket = st.button('Generate Ticket') + if generate_ticket: + ticket = self.key.ticket(data) + else: + ticket = None + if ticket: + st.write('Ticket') + st.code(ticket) + self.ticket = ticket + self.verify_ticket(ticket) + + def verify_ticket(self, ticket=None): + ticket = st.text_input('Enter the Ticket', ticket) + st.write('FORMAT') + st.code('data={data}time={time}::address={address}::signature={signature}') + verify_ticket = st.button('Verify Ticket') + if verify_ticket: + result = c.verify_ticket(ticket) + st.write('Result') + st.write(result) + else: + result = None + return result + + # css injection + def _max_width_(self, max_width_str = "max-width: 1900px;"): + st.markdown( + f""" + ", unsafe_allow_html=True) + css = r''' + + ''' + + st.markdown(css, unsafe_allow_html=True) + + @classmethod + def line_seperator(cls, text='-', length=50): + st.write(text*length) + + @classmethod + def function2streamlit(cls, + module = None, + fn:str = '__init__', + fn_schema = None, + extra_defaults:dict=None, + cols:list=None, + skip_keys = ['self', 'cls'], + mode = 'pm2'): + + key_prefix = f'{module}.{c.random_word()}' + if module == None: + module = cls + + elif isinstance(module, str): + module = c.module(module) + extra_defaults = {} if extra_defaults is None else extra_defaults + + if fn_schema == None: + + fn_schema = module.schema(defaults=True, include_parents=True)[fn] + if fn == '__init__': + config = module.config(to_munch=False) + extra_defaults = config + kwargs = {} + fn_schema['default'].pop('self', None) + fn_schema['default'].pop('cls', None) + fn_schema['default'].update(extra_defaults) + fn_schema['default'].pop('config', None) + fn_schema['default'].pop('kwargs', None) + + fn_schema['input'].update({k:str(type(v)).split("'")[1] for k,v in extra_defaults.items()}) + if cols == None: + cols = [1 for i in list(range(int(len(fn_schema['input'])**0.5)))] + cols = st.columns(cols) + + for i, (k,v) in enumerate(fn_schema['default'].items()): + + optional = fn_schema['default'][k] != 'NA' + fn_key = k + if fn_key in skip_keys: + continue + if k in fn_schema['input']: + k_type = fn_schema['input'][k] + if 'Munch' in k_type or 'Dict' in k_type: + k_type = 'Dict' + if k_type.startswith('typing'): + k_type = k_type.split('.')[-1] + fn_key = f'**{k} ({k_type}){"" if optional else "(REQUIRED)"}**' + col_idx = i + if k in ['kwargs', 'args'] and v == 'NA': + continue + + + random_word = c.random_word() + col_idx = col_idx % (len(cols)) + kwargs[k] = cols[col_idx].text_input(fn_key, v, key=f'{key_prefix}.{k}.{random_word}') + + kwargs = cls.process_kwargs(kwargs, fn_schema) + + return kwargs + + + @classmethod + def process_kwargs(cls, kwargs:dict, fn_schema:dict): + + for k,v in kwargs.items(): + if v == 'None': + v = None + + if isinstance(v, str): + if v.startswith('[') and v.endswith(']'): + if len(v) > 2: + v = eval(v) + else: + v = [] + + elif v.startswith('{') and v.endswith('}'): + + if len(v) > 2: + v = c.jload(v) + else: + v = {} + elif k in fn_schema['input'] and fn_schema['input'][k] == 'str': + if v.startswith("f'") or v.startswith('f"'): + v = c.ljson(v) + else: + v = v + + elif k == 'kwargs': + continue + elif v == 'NA': + assert k != 'NA', f'Key {k} not in default' + elif v in ['True', 'False']: + v = eval(v) + else: + v = v + + kwargs[k] = v + return kwargs + + + + @classmethod + def styles(cls): + return list(cls.style2path().keys()) + + + @classmethod + def style_paths(cls): + return list(cls.style2path().values()) + + + + def add_plot_tools(self): + # sync plots from express + for fn_name in dir(px): + if not (fn_name.startswith('__') and fn_name.endswith('__')): + plt_obj = getattr(px, fn_name) + if callable(plt_obj): + setattr(self, fn_name, plt_obj) + + @classmethod + def plot_dashboard(cls, df, key='dashboard', x='name', y='emission', select_columns=True): + import plotly.express as px + import streamlit as st + cols = list(df.columns) + if select_columns: + cols = st.multiselect('Select Columns', cols, cols, key=key+'multi') + # bar_chart based on x and y + + if len(df) == 0: + st.error('You are not staked to any modules') + return + col2idx = {c:i for i,c in enumerate(cols)} + defult_x_col = col2idx.get(x, 0) + default_y_col = col2idx.get(y, 1) + + plot_kwargs = {} + + st_cols = st.columns([1,3]) + + with st_cols[0]: + plot_type = st.selectbox('Select Plot Type', ['pie', 'bar', 'line', 'scatter', 'histogram', 'treemap'], 0, key='info.plot') + + if plot_type in [ 'bar', 'line', 'scatter']: + plot_kwargs['x'] = st.selectbox('Select X', cols, defult_x_col) + plot_kwargs['y'] = st.selectbox('Select Y', cols, default_y_col) + elif plot_type in ['histogram']: + plot_kwargs['x'] = st.selectbox('Select Value', cols, defult_x_col) + elif plot_type in ['pie']: + plot_kwargs['names'] = st.selectbox('Select Names', cols, defult_x_col) + plot_kwargs['values'] = st.selectbox('Select Values', cols, default_y_col) + elif plot_type in ['treemap']: + plot_kwargs['path'] = st.multiselect('Select Path', cols, ["name"]) + plot_kwargs['values'] = st.selectbox('Select Values', cols, default_y_col) + + + sort_type = st.selectbox('Sort Type', cols , 0) + + if sort_type in cols: + ascending = st.checkbox('Ascending', False) + df = df.sort_values(sort_type, ascending=ascending) + + with st_cols[1]: + plot_fn = getattr(px, plot_type) + plot_kwargs_title = " ".join([f"{k.lower()}:{v}" for k,v in plot_kwargs.items()]) + title = f'My Modules {plot_type} for ({plot_kwargs_title})' + fig = plot_fn(df, **plot_kwargs, title=title) + st.plotly_chart(fig) + # st.write(kwargs) + \ No newline at end of file diff --git a/modules/streamlit/streamlit.py b/modules/streamlit/streamlit.py new file mode 100755 index 00000000..b13f4cc2 --- /dev/null +++ b/modules/streamlit/streamlit.py @@ -0,0 +1,709 @@ + + +import os +import sys +import streamlit as st +import plotly.graph_objects as go +import pandas as pd +import plotly.express as px +# from commune.plot.dag import DagModule + +import commune as c + + + +class StreamlitModule(c.Module): + + height:int=1000 + width:int=1000 + theme: str= 'plotly_dark' + + @property + def streamlit_functions(self): + return [fn for fn in dir(self) if fn.startswith('st_')] + + + def run(self, data, plots=None, default_plot ='histogram', title=None ): + self.cols= st.columns([1,3]) + plots = plots or self.plot_options() + if default_plot not in plots: + default_plot = plots[0] + supported_types = [pd.DataFrame] + if isinstance(data, pd.DataFrame): + df = data + with self.cols[1]: + if len(plots) > 1: + name2index = {_name:_idx for _idx, _name in enumerate(plots)} + plot = st.selectbox('Choose a Plot', plots, name2index[default_plot]) + else: + plot = plots[0] + form = st.form(F'Params for {plot}') + with form: + fig = getattr(self, 'st_plot_'+ plot)(df) + form.form_submit_button("Render") + else: + raise NotImplementedError(f'Broooooo, hold on, you can only use the following {supported_types}') + fig.update_layout(height=800) + self.show(fig) + + @staticmethod + def metrics_dict(x, num_rows:int = 1): + num_elements = len(x) + num_cols = num_elements//num_rows + row_cols = [st.columns(num_cols) for i in range(num_rows)] + for i in range(num_elements): + k = list(x.keys())[i] + v = list(x.values())[i] + row_idx = i//num_cols + col_idx = i%num_cols + row_cols[row_idx][col_idx].metric(k, int(v)) + + def plot_options(self, prefix:str ='st_plot'): + plot_options = self.fns(prefix) + return [p.replace(prefix+'_', '')for p in plot_options] + + + def show(self, fig): + with self.cols[1]: + st.plotly_chart(fig) + + def st_plot_scatter2D(self, df=None): + df = df if isinstance(df, pd.DataFrame) else self.df + column_options = list(df.columns) + + + with self.cols[0]: + st.markdown("## X Axis") + x_col = st.selectbox("X Axis",column_options, 0 ) + + st.markdown("## Y Axis") + y_col = st.selectbox("Y Axis", column_options, 1) + + st.markdown("## Color Axis") + color_col = st.selectbox("Color", column_options + [None], 0) + color_args = {"color": color_col} if color_col is not None else {} + marker_size = st.slider("Select Marker Size", 5, 30, 20) + + df["size"] = [marker_size for _ in range(len(df))] + + + fig = px.scatter(df, x=x_col, y=y_col, size="size", **color_args) + fig.update_layout(width=1000, + height=800) + + return fig + + + + + def st_plot_scatter3D(self, df=None): + df = df if isinstance(df, pd.DataFrame) else self.df + column_options = list(df.columns) + + plotly_kwargs = {} + with self.cols[0]: + st.markdown("## X Axis") + plotly_kwargs['x'] = st.selectbox("X Axis", column_options, 0) + st.markdown("## Y Axis") + plotly_kwargs['y'] = st.selectbox("Y Axis", column_options, 1) + st.markdown("## Z Axis") + plotly_kwargs['z'] = st.selectbox("Z Axis", column_options, 2) + st.markdown("## Color Axis") + plotly_kwargs['color'] = st.selectbox("## Color", [None] + column_options, 0) + marker_size = st.slider("Select Marker Size", 5, 30, 20) + df["size"] = [marker_size for _ in range(len(df))] + plotly_kwargs['size']= 'size' + plotly_kwargs['template'] = self.theme + + fig = px.scatter_3d(df, **plotly_kwargs) + fig.update_layout(width=self.width, height=self.height, font_size=15) + return fig + + + def st_plot_box(self, df=None): + + + df = df if isinstance(df, pd.DataFrame) else self.df + column_options = list(df.columns) + plotly_kwargs = {} + + with self.cols[0]: + st.markdown("## X Axis") + plotly_kwargs['x'] = st.selectbox("X Axis", column_options, 0) + st.markdown("## Y Axis") + plotly_kwargs['y'] = st.selectbox("Y Axis", column_options, 1) + st.markdown("## Color Axis") + plotly_kwargs['color'] = st.selectbox("Color", [None] + column_options, 0) + marker_size = st.slider("Select Marker Size", 5, 30, 20) + df["size"] = [marker_size for _ in range(len(df))] + plotly_kwargs['template'] = self.theme + st.markdown("## Box Group Mode") + plotly_kwargs['boxmode'] = st.selectbox("Choose Box Mode", ["group", "overlay"], 0) + + # df[ plotly_kwargs['x']] = df[ plotly_kwargs['x']].apply(lambda x: str(x)) + + + fig = px.box(df, **plotly_kwargs) + fig.update_layout(width=self.width, height=self.height, font_size=20) + return fig + + def st_plot_bar(self, df=None): + + df = df if isinstance(df, pd.DataFrame) else self.df + column_options = list(df.columns) + + + plot_kwargs = {} + with self.cols[0]: + + + st.markdown("## X Axis") + plot_kwargs['x'] = st.selectbox("X Axis",column_options , 0 ) + + st.markdown("## Y Axis") + plot_kwargs['y'] = st.selectbox("Y Axis", column_options, 0) + plot_kwargs['barmode'] = st.selectbox("Choose Bar Mode", ["relative", "group", "overlay"], 1) + + st.markdown("## Color Axis") + plot_kwargs['color'] = st.selectbox("Color", [None] + column_options, 0 ) + + fig = px.bar(df, **plot_kwargs) + + fig.update_layout(width=self.width, height=self.height, font_size=20) + return fig + + + + + def st_plot_histogram(self, df=None): + + df = df if isinstance(df, pd.DataFrame) else self.df + column_options = list(df.columns) + # Choose X, Y and Color Axis + with self.cols[0]: + plot_kwargs = {} + st.markdown("### X-axis") + plot_kwargs['x'] = st.selectbox("Choose X-Axis Feature", column_options, 0) + # plot_kwargs['nbins'] = st.slider("Number of Bins", 10, 1000, 10) + + st.markdown("### Y-axis") + plot_kwargs['y'] = st.selectbox("Choose Y-Axis Feature", [None]+ column_options, 0) + + st.markdown("## Color Axis") + plot_kwargs['color'] = st.selectbox("Color", [None]+ column_options , 0 ) + # color_args = {"color":color_col} if color_col is not None else {} + + plot_kwargs['barmode'] = st.selectbox("Choose Bar Mode", ["relative", "group", "overlay"], 2) + + + + fig = px.histogram(df, **plot_kwargs) + fig.update_layout(width=self.width, height=self.height, font_size=20) + return fig + + + def st_plot_heatmap(cls, df=None): + + df = df if isinstance(df, pd.DataFrame) else self.df + column_options = list(df.columns) + # Choose X, Y and Color Axis + + plotly_kwargs = {} + with cls.cols[0]: + st.markdown("### X-axis") + plotly_kwargs['x'] = st.selectbox("Choose X-Axis Feature", column_options, 0) + plotly_kwargs['nbinsx'] = st.slider("Number of Bins", 10, 100, 10) + + st.markdown("### Y-axis") + plotly_kwargs['y'] = st.selectbox("Choose Y-Axis Feature", [None]+column_options, 0) + plotly_kwargs['nbinsy'] = st.slider("Number of Bins (Y-Axis)", 10, 100, 10) + + st.markdown("### Z-axis") + plotly_kwargs['z'] = st.selectbox("Choose Z-Axis Feature", column_options, 0) + plotly_kwargs['histfunc'] = st.selectbox("Aggregation Function", ["avg", "sum", "min", "sum", "count"], 0) + plotly_kwargs['template'] = cls.theme + + fig = px.density_heatmap(df, **plotly_kwargs) + fig.update_layout(width=cls.width, height=cls.height, font_size=20) + + + + return fig + + + + + @classmethod + def style2path(cls, style:str=None) -> str: + path = cls.dirpath() + '/styles' + style2path = {p.split('/')[-1].split('.')[0] : p for p in cls.ls(path)} + if style != None: + return style2path[style] + return style2path + + + @classmethod + def load_style(cls, style='commune'): + style_path = cls.style2path(style) + with open(style_path) as f: + st.markdown(f"", unsafe_allow_html=True) + css = r''' + + ''' + + st.markdown(css, unsafe_allow_html=True) + + @classmethod + def line_seperator(cls, text='-', length=50): + st.write(text*length) + + @classmethod + def function2streamlit(cls, + module = None, + fn:str = '__init__', + fn_schema = None, + extra_defaults:dict=None, + cols:list=None, + skip_keys = ['self', 'cls'], + mode = 'pm2'): + + key_prefix = f'{module}.{c.random_word()}' + if module == None: + module = cls + + elif isinstance(module, str): + module = c.module(module) + extra_defaults = {} if extra_defaults is None else extra_defaults + + if fn_schema == None: + + fn_schema = module.schema(defaults=True, include_parents=True)[fn] + if fn == '__init__': + config = module.config(to_munch=False) + extra_defaults = config + kwargs = {} + fn_schema['default'].pop('self', None) + fn_schema['default'].pop('cls', None) + fn_schema['default'].update(extra_defaults) + fn_schema['default'].pop('config', None) + fn_schema['default'].pop('kwargs', None) + + fn_schema['input'].update({k:str(type(v)).split("'")[1] for k,v in extra_defaults.items()}) + if cols == None: + cols = [1 for i in list(range(int(len(fn_schema['input'])**0.5)))] + cols = st.columns(cols) + + for i, (k,v) in enumerate(fn_schema['default'].items()): + + optional = fn_schema['default'][k] != 'NA' + fn_key = k + if fn_key in skip_keys: + continue + if k in fn_schema['input']: + k_type = fn_schema['input'][k] + if 'Munch' in k_type or 'Dict' in k_type: + k_type = 'Dict' + if k_type.startswith('typing'): + k_type = k_type.split('.')[-1] + fn_key = f'**{k} ({k_type}){"" if optional else "(REQUIRED)"}**' + col_idx = i + if k in ['kwargs', 'args'] and v == 'NA': + continue + + + random_word = c.random_word() + col_idx = col_idx % (len(cols)) + kwargs[k] = cols[col_idx].text_input(fn_key, v, key=f'{key_prefix}.{k}.{random_word}') + + kwargs = cls.process_kwargs(kwargs, fn_schema) + + return kwargs + + + @classmethod + def process_kwargs(cls, kwargs:dict, fn_schema:dict): + + for k,v in kwargs.items(): + if v == 'None': + v = None + + if isinstance(v, str): + if v.startswith('[') and v.endswith(']'): + if len(v) > 2: + v = eval(v) + else: + v = [] + + elif v.startswith('{') and v.endswith('}'): + + if len(v) > 2: + v = c.jload(v) + else: + v = {} + elif k in fn_schema['input'] and fn_schema['input'][k] == 'str': + if v.startswith("f'") or v.startswith('f"'): + v = c.ljson(v) + else: + v = v + + elif k == 'kwargs': + continue + elif v == 'NA': + assert k != 'NA', f'Key {k} not in default' + elif v in ['True', 'False']: + v = eval(v) + else: + v = v + + kwargs[k] = v + return kwargs + + + @classmethod + def st_metrics_dict(cls, x:str, num_columns=3): + cols = st.columns(num_columns) + for i, (k,v) in enumerate(x.items()): + if type(v) in [int, float, str]: + cols[i % num_columns].metric(label=k, value=v) + + @classmethod + def styles(cls): + return list(cls.style2path().keys()) + + + @classmethod + def style_paths(cls): + return list(cls.style2path().values()) + + + + def add_plot_tools(self): + # sync plots from express + for fn_name in dir(px): + if not (fn_name.startswith('__') and fn_name.endswith('__')): + plt_obj = getattr(px, fn_name) + if callable(plt_obj): + setattr(self, fn_name, plt_obj) + + # self.dag = DagModule() + + @staticmethod + def local_css(file_name=os.path.dirname(__file__)+'/style.css'): + import streamlit as st + + + with open(file_name) as f: + st.markdown(f"", unsafe_allow_html=True) + + + @staticmethod + def metrics_dict(x, num_rows:int = 1): + num_elements = len(x) + num_cols = num_elements//num_rows + row_cols = [st.columns(num_cols) for i in range(num_rows)] + for i in range(num_elements): + k = list(x.keys())[i] + v = list(x.values())[i] + row_idx = i//num_cols + col_idx = i%num_cols + row_cols[row_idx][col_idx].metric(k, int(v)) + + def filter_dataframe(df: pd.DataFrame) -> pd.DataFrame: + + from pandas.api.types import ( + is_categorical_dtype, + is_datetime64_any_dtype, + is_numeric_dtype, + is_object_dtype, + ) + import pandas as pd + import streamlit as st + + """ + Adds a UI on top of a dataframe to let viewers filter columns + + Args: + df (pd.DataFrame): Original dataframe + + Returns: + pd.DataFrame: Filtered dataframe + """ + modify = st.checkbox("Add filters") + + if not modify: + return df + + df = df.copy() + + # Try to convert datetimes into a standard format (datetime, no timezone) + for col in df.columns: + if is_object_dtype(df[col]): + try: + df[col] = pd.to_datetime(df[col]) + except Exception: + pass + + if is_datetime64_any_dtype(df[col]): + df[col] = df[col].dt.tz_localize(None) + + modification_container = st.container() + + with modification_container: + to_filter_columns = st.multiselect("Filter dataframe on", df.columns) + for column in to_filter_columns: + left, right = st.columns((1, 20)) + # Treat columns with < 10 unique values as categorical + if is_categorical_dtype(df[column]) or df[column].nunique() < 10: + user_cat_input = right.multiselect( + f"Values for {column}", + df[column].unique(), + default=list(df[column].unique()), + ) + df = df[df[column].isin(user_cat_input)] + elif is_numeric_dtype(df[column]): + _min = float(df[column].min()) + _max = float(df[column].max()) + step = (_max - _min) / 100 + user_num_input = right.slider( + f"Values for {column}", + min_value=_min, + max_value=_max, + value=(_min, _max), + step=step, + ) + df = df[df[column].between(*user_num_input)] + elif is_datetime64_any_dtype(df[column]): + user_date_input = right.date_input( + f"Values for {column}", + value=( + df[column].min(), + df[column].max(), + ), + ) + if len(user_date_input) == 2: + user_date_input = tuple(map(pd.to_datetime, user_date_input)) + start_date, end_date = user_date_input + df = df.loc[df[column].between(start_date, end_date)] + else: + user_text_input = right.text_input( + f"Substring or regex in {column}", + ) + if user_text_input: + df = df[df[column].astype(str).str.contains(user_text_input)] + + return df + + # lisst all the prots + + @classmethod + def set_page_config(cls, layout:str='wide'): + try: + return c.set_page_config(layout="wide") + except Exception as e: + c.print(e) + + + def select_key(self): + import streamlit as st + keys = c.keys() + key2index = {k:i for i,k in enumerate(keys)} + with st.form('key.form'): + self.key = st.selectbox('Select Key', keys, key2index['module'], key='key.sidebar') + key_address = self.key.ss58_address + st.write('address') + st.code(key_address) + return self.key + + + @classmethod + def plot_dashboard(cls, df, key='dashboard', x='name', y='emission', select_columns=True): + import plotly.express as px + import streamlit as st + cols = list(df.columns) + if select_columns: + cols = st.multiselect('Select Columns', cols, cols, key=key+'multi') + # bar_chart based on x and y + + if len(df) == 0: + st.error('You are not staked to any modules') + return + col2idx = {c:i for i,c in enumerate(cols)} + defult_x_col = col2idx.get(x, 0) + default_y_col = col2idx.get(y, 1) + + plot_kwargs = {} + + st_cols = st.columns([1,3]) + + with st_cols[0]: + plot_type = st.selectbox('Select Plot Type', ['pie', 'bar', 'line', 'scatter', 'histogram', 'treemap'], 0, key='info.plot') + + if plot_type in [ 'bar', 'line', 'scatter']: + plot_kwargs['x'] = st.selectbox('Select X', cols, defult_x_col) + plot_kwargs['y'] = st.selectbox('Select Y', cols, default_y_col) + elif plot_type in ['histogram']: + plot_kwargs['x'] = st.selectbox('Select Value', cols, defult_x_col) + elif plot_type in ['pie']: + plot_kwargs['names'] = st.selectbox('Select Names', cols, defult_x_col) + plot_kwargs['values'] = st.selectbox('Select Values', cols, default_y_col) + elif plot_type in ['treemap']: + plot_kwargs['path'] = st.multiselect('Select Path', cols, ["name"]) + plot_kwargs['values'] = st.selectbox('Select Values', cols, default_y_col) + + + sort_type = st.selectbox('Sort Type', cols , 0) + + if sort_type in cols: + ascending = st.checkbox('Ascending', False) + df = df.sort_values(sort_type, ascending=ascending) + + with st_cols[1]: + plot_fn = getattr(px, plot_type) + plot_kwargs_title = " ".join([f"{k.lower()}:{v}" for k,v in plot_kwargs.items()]) + title = f'My Modules {plot_type} for ({plot_kwargs_title})' + fig = plot_fn(df, **plot_kwargs, title=title) + st.plotly_chart(fig) + # st.write(kwargs) + + @classmethod + def stwrite(self, *args, **kwargs): + import streamlit as st + st.write(*args, **kwargs) + + + + @classmethod + def function2streamlit(cls, + module = None, + fn:str = '__init__', + fn_schema = None, + extra_defaults:dict=None, + cols:list=None, + skip_keys = ['self', 'cls'], + salt = None, + mode = 'pm2'): + import streamlit as st + + key_prefix = f'{module}.{fn}' + if salt != None: + key_prefix = f'{key_prefix}.{salt}' + if module == None: + module = cls + + elif isinstance(module, str): + module = c.module(module) + extra_defaults = {} if extra_defaults is None else extra_defaults + kwargs = {} + + if fn_schema == None: + + fn_schema = module.schema(defaults=True, include_parents=True)[fn] + if fn == '__init__': + config = module.config(to_munch=False) + extra_defaults = config + st.write(fn_schema) + fn_schema['default'].pop('self', None) + fn_schema['default'].pop('cls', None) + fn_schema['default'].update(extra_defaults) + fn_schema['default'].pop('config', None) + fn_schema['default'].pop('kwargs', None) + + fn_schema['input'].update({k:str(type(v)).split("'")[1] for k,v in extra_defaults.items()}) + if cols == None: + cols = [1 for i in list(range(int(len(fn_schema['input'])**0.5)))] + if len(cols) == 0: + return kwargs + cols = st.columns(cols) + + for i, (k,v) in enumerate(fn_schema['default'].items()): + + optional = fn_schema['default'][k] != 'NA' + fn_key = k + if fn_key in skip_keys: + continue + if k in fn_schema['input']: + k_type = fn_schema['input'][k] + if 'Munch' in k_type or 'Dict' in k_type: + k_type = 'Dict' + if k_type.startswith('typing'): + k_type = k_type.split('.')[-1] + fn_key = f'**{k} ({k_type}){"" if optional else "(REQUIRED)"}**' + col_idx = i + if k in ['kwargs', 'args'] and v == 'NA': + continue + + + col_idx = col_idx % (len(cols)) + if type(v) in [float, int] or c.is_int(v): + kwargs[k] = cols[col_idx].number_input(fn_key, v, key=f'{key_prefix}.{k}') + elif v in ['True', 'False']: + kwargs[k] = cols[col_idx].checkbox(fn_key, v, key=f'{key_prefix}.{k}') + else: + kwargs[k] = cols[col_idx].text_input(fn_key, v, key=f'{key_prefix}.{k}') + kwargs = cls.process_kwargs(kwargs, fn_schema) + + return kwargs + + + + def load_state(self, update:bool=False, netuid=0, network='main', state=None, _self = None): + + if _self != None: + self = _self + + import streamlit as st + + self.key = c.get_key() + + t = c.timer() + @st.cache_data(ttl=60*60*24, show_spinner=False) + def get_state(): + subspace = c.module('subspace')() + state = subspace.state_dict(update=update, version=1) + return state + + if state == None: + state = get_state() + self.state = state + + + + self.netuid = 0 + self.subnets = self.state['subnets'] + self.modules = self.state['modules'][self.netuid] + self.name2key = {k['name']: k['key'] for k in self.modules} + self.key2name = {k['key']: k['name'] for k in self.modules} + + self.namespace = c.namespace() + + self.keys = c.keys() + self.key2index = {k:i for i,k in enumerate(self.keys)} + + self.namespace = {m['name']: m['address'] for m in self.modules} + self.module_names = [m['name'] for m in self.modules] + self.block = self.state['block'] + for i, m in enumerate(self.modules): + self.modules[i]['stake'] = self.modules[i]['stake']/1e9 + self.modules[i]['emission'] = self.modules[i]['emission']/1e9 + + self.key_info = { + 'key': self.key.ss58_address, + 'balance': self.state['balances'].get(self.key.ss58_address,0), + 'stake_to': self.state['stake_to'][self.netuid].get(self.key.ss58_address,{}), + 'stake': sum([v[1] for v in self.state['stake_to'][self.netuid].get(self.key.ss58_address)]), + } + + self.key_info['balance'] = self.key_info['balance']/1e9 + self.key_info['stake_to'] = {k:v/1e9 for k,v in self.key_info['stake_to']} + self.key_info['stake'] = sum([v for k,v in self.key_info['stake_to'].items()]) + # convert keys to names + for k in ['stake_to']: + self.key_info[k] = {self.key2name.get(k, k): v for k,v in self.key_info[k].items()} + + self.subnet_info = self.state['subnets'][0] + balances = self.state['balances'] + self.total_balance = sum(balances.values())/1e9 + + diff --git a/modules/streamlit/styles/commune.css b/modules/streamlit/styles/commune.css new file mode 100644 index 00000000..dffc6dfe --- /dev/null +++ b/modules/streamlit/styles/commune.css @@ -0,0 +1,8 @@ +div.stButton button { + background-color: rgb(34, 220, 124); + width: 100%; + color : #000; + border: 1px solid rgb(0, 0, 0); + font-size: 20px; + font-weight: 200; +} \ No newline at end of file diff --git a/modules/streamlit/utils.py b/modules/streamlit/utils.py new file mode 100755 index 00000000..e8759d41 --- /dev/null +++ b/modules/streamlit/utils.py @@ -0,0 +1,52 @@ +import streamlit as st + +def describe(module =None, sidebar = True, detail=False, expand=True): + + _st = st.sidebar if sidebar else st + st.sidebar.markdown('# '+str(module)) + fn_list = list(filter(lambda fn: callable(getattr(module,fn)) and '__' not in fn, dir(module))) + + def content_fn(fn_list=fn_list): + fn_list = _st.multiselect('fns', fn_list) + for fn_key in fn_list: + fn = getattr(module,fn_key) + if callable(fn): + _st.markdown('#### '+fn_key) + _st.write(fn) + _st.write(type(fn)) + if expand: + with st.sidebar.expander(str(module)): + content_fn() + else: + content_fn() + + +def row_column_bundles(fn_list, fn_args_list,cols_per_row=3): + + cols = cols_per_row + item_count = len(fn_list) + rows = item_count // cols + row2cols = [] + + for row_idx in range(rows+1): + row2cols.append(st.columns(cols)) + + for fn_idx, fn in enumerate(fn_list): + row_idx = fn_idx // cols + col_idx = fn_idx % cols + with row2cols[row_idx][col_idx]: + fn(*fn_args_list[fn_idx]) + + +def streamlit_thread(thread): + try: + # Streamlit >= 1.12.0 + from streamlit.runtime.scriptrunner import add_script_run_ctx + from streamlit.runtime.scriptrunner.script_run_context import get_script_run_ctx + except: + # Streamlit <= 1.11.0 + from streamlit.scriptrunner import add_script_run_ctx + from streamlit.scriptrunner.script_run_context import get_script_run_ctx + + return get_script_run_ctx(t) + diff --git a/modules/streamlit/watchdog/streamlit_watchdog.py b/modules/streamlit/watchdog/streamlit_watchdog.py new file mode 100644 index 00000000..f4951eb6 --- /dev/null +++ b/modules/streamlit/watchdog/streamlit_watchdog.py @@ -0,0 +1,46 @@ +import commune as c + +import datetime as dt + +import streamlit as st + + +class StreamlitWatchdog(c.Module): + + @classmethod + def update_fn(self): + # Rewrite the dummy.py module. Because this script imports dummy, + # modifiying dummy.py will cause Streamlit to rerun this script. + c.print('Updating dummy module') + + + @staticmethod + @st.cache_data + def install_monitor(path=c.libpath, recursive=True): + # Because we use st.cache, this code will be executed only once, + # so we won't get a new Watchdog thread each time the script runs. + + from watchdog.events import FileSystemEventHandler + from watchdog.observers import Observer + + class Watchdog(FileSystemEventHandler): + def __init__(self, hook): + self.hook = hook + + def on_modified(self, event): + self.hook() + + observer = Observer() + observer.schedule( + Watchdog(StreamlitWatchdog.update_fn), + path=path, + recursive=recursive) + observer.start() + + + + def install(self): + c.cmd('pip3 install watchdog') + c.print('Installing watchdog') + c.print('Installing watchdog') + return {'status':'success', 'message':'Installed watchdog'} \ No newline at end of file diff --git a/modules/streamlit/watchdog/streamlit_watchdog.yaml b/modules/streamlit/watchdog/streamlit_watchdog.yaml new file mode 100644 index 00000000..5b06f8d7 --- /dev/null +++ b/modules/streamlit/watchdog/streamlit_watchdog.yaml @@ -0,0 +1,2 @@ +api: 1234 +sup: hey diff --git a/modules/subspace/__init__.py b/modules/subspace/__init__.py new file mode 100644 index 00000000..fd40910d --- /dev/null +++ b/modules/subspace/__init__.py @@ -0,0 +1,4 @@ + + + + diff --git a/modules/subspace/app/app.py b/modules/subspace/app/app.py new file mode 100644 index 00000000..ecaad7fa --- /dev/null +++ b/modules/subspace/app/app.py @@ -0,0 +1,69 @@ +import commune as c +import streamlit as st +import pandas as pd +from streamlit.components.v1 import components +import plotly.express as px +import streamlit as st + +css = r''' + +''' + +st.markdown(css, unsafe_allow_html=True) + +class SubspaceDashboard(c.Module): + + def __init__(self, root_netuid=0, max_age = 10000, api='subspace'): + self.state = {} + self.max_age = max_age + self.root_netuid = root_netuid + self.subspace = c.module('subspace')() + + def global_state(self, max_age=None): + global_state = self.get('global_state', None, max_age=max_age) + if global_state == None : + return self.subspace.global_state(max_age=max_age) + return global_state + + def sync(self, netuid=None, max_age=None, update=False): + state = self.get('state', None, max_age=max_age) + if state == None: + state = {} + global_state = self.global_state(max_age=max_age) + + return self.state['modules'] + + def select_key(self, key='module'): + keys = c.keys() + key2idx = {key:i for i, key in enumerate(keys )} + key = st.selectbox("Key", keys, key2idx[key]) + self.key = key + + st.code(f"{self.key.ss58_address}") + return key + + + def subnets_app(self, backend='app'): + st.title("Subnets") + st.write(f"Connected to {backend}") + self.sync() + subnet_name = st.selectbox("Subnet", self.subnet_names, 0) + with st.expander(f"{subnet_name} (netuid={netuid})"): + st.write(self.state['params']) + + leaderboard = c.df(self.state['modules']) + + with st.expander("Leaderboard"): + st.write(leaderboard) + + def sidebar(self): + with st.sidebar: + return self.select_key() + + def app(self): + self.sidebar() + self.subnets_app() + +SubspaceDashboard.run(__name__) \ No newline at end of file diff --git a/modules/subspace/app/backend.py b/modules/subspace/app/backend.py new file mode 100644 index 00000000..e69de29b diff --git a/modules/template/template.py b/modules/template/template.py new file mode 100644 index 00000000..f21b5a71 --- /dev/null +++ b/modules/template/template.py @@ -0,0 +1,61 @@ +import commune as c + + +class Template(c.Module): + def __init__(self, obj=None, ): + is_fn = callable(obj) + if is_fn: + self.set_fn(obj) + else: + self.set_module(obj) + def set_module(self, module): + if module == None: + return None + self.module = module + for fn in dir(module): + setattr(self, fn, getattr(self, fn)) + + def set_fn(self, fn): + if fn == None: + return None + self.fn = fn + + + def forward(self, *args, **kwargs): + return self.fn(*args, **kwargs) + + def from_fn(cls, fn): + return cls(fn=fn) + + def from_module(cls, module): + return cls(module=module) + + + + def test(self): + print('test') + template = c.module('template')() + + def fn1(): + return 0 + + def fn2(): + return 1 + + template.set_fn(fn1) + assert template.forward() == 0 + template.set_fn(fn2) + assert template.forward() == 1 + + class Custom(c.Module): + + def test(self, a=1, b=2): + return a+b + + custom = Custom() + template = c.module('template')(obj=custom) + assert template.forward(a=3, b=4) == 7 + print('template test passed') + + + diff --git a/modules/test.py b/modules/test.py new file mode 100644 index 00000000..91ad6585 --- /dev/null +++ b/modules/test.py @@ -0,0 +1,27 @@ +# Description: This is a test file +import commune as c +class Tester(c.Module): + def __init__(self): + pass + def test(self, module='module::test', resolve_server=False): + if resolve_server: + server_exists= c.server_exists(module) + if server_exists: + c.kill(module) + while c.server_exists(module): + pass + print(f'server_exists: {server_exists}') + if not server_exists: + c.serve(module) + while not c.server_exists(module): + pass + + client = c.connect(module) + info = client.info() + assert info['name'] == module + assert info['address'] == c.get_address(module) + assert info['key'] == c.get_key(module).ss58_address + if c.server_exists(module): + c.kill(module) + return info + \ No newline at end of file diff --git a/modules/tool/__init__.py b/modules/tool/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/modules/tool/compare_token_price.py b/modules/tool/compare_token_price.py new file mode 100644 index 00000000..24a9dbf8 --- /dev/null +++ b/modules/tool/compare_token_price.py @@ -0,0 +1,41 @@ +import commune as c +class CompareTokenPrice(c.Module): + description = """"Compare the price of 2 tokens""" + def call(self, token1_name: str, token1_price: float, token2_name: str, token2_price: float, find_cheaper: bool) -> str: + """ + Compares the prices of two tokens and returns the name of the cheaper or more expensive token based on the boolean input. + + :param token1_name: Name of the first token. + :param token1_price: Price of the first token. + :param token2_name: Name of the second token. + :param token2_price: Price of the second token. + :param find_cheaper: Boolean value. If True, the function returns the cheaper token; if False, it returns the more expensive token. + :return: Name of the cheaper or more expensive token. + """ + + # Check if the prices are equal + if token1_price == token2_price: + return "Both tokens have the same price." + + # Find and return the cheaper or more expensive token based on the boolean input + if find_cheaper: + return token1_name if token1_price < token2_price else token2_name + else: + return token1_name if token1_price > token2_price else token2_name + + + +# if __name__ == "__main__": + + +# # Example usage: +# token1_name = "ETH" +# token1_price = 3000.0 +# token2_name = "BTC" +# token2_price = 45000.0 + +# cheaper_token = CompareTokenPrice().call(token1_name, token1_price, token2_name, token2_price, find_cheaper=True) +# print(f"The cheaper token is {cheaper_token}") + +# more_expensive_token = CompareTokenPrice().call(token1_name, token1_price, token2_name, token2_price, find_cheaper=False) +# print(f"The more expensive token is {more_expensive_token}") \ No newline at end of file diff --git a/modules/tool/compound.py b/modules/tool/compound.py new file mode 100644 index 00000000..0ffa8c1b --- /dev/null +++ b/modules/tool/compound.py @@ -0,0 +1,38 @@ +from web3 import Web3 +#connect to a local ethereum node +w3 = Web3(Web3.HTTPProvider('http://localhost:8545')) +import os +#COMPOUND FINANCE ADDRESS +contract_address = '0xa7F7De6cCad4D83d81676717053883337aC2c1b4' +contract_abi = [] + +contract = w3.eth.contract(address=contract_address, abi=contract_abi) + +def allowance(owner_address, spender_address, token_address): + # Get the ERC20 Token contract + erc20_contract = w3.eth.contract(address=token_address, abi=contract_abi) + + # Call the allowance function + allowed_amount = erc20_contract.functions.allowance(owner_address, spender_address).call() + return allowed_amount + +def supply(dst_address, asset_address, amount): + # Replace with the address that will send the transaction + sender_address = '0xYourSenderAddress' + + # Build the transaction + transaction = contract.functions.supplyTo(dst_address, asset_address, amount).buildTransaction({ + 'from': sender_address, + 'gas': 2000000, + 'gasPrice': w3.toWei('20', 'gwei'), + 'nonce': w3.eth.getTransactionCount(sender_address), + }) + + PRIVATE_KEY= os.getenv("PRIVATE_KEY", "") + + # Sign the transaction + signed_transaction = w3.eth.account.signTransaction(transaction, PRIVATE_KEY) + + # Send the transaction + tx_hash = w3.eth.sendRawTransaction(signed_transaction.rawTransaction) + return tx_hash.hex() diff --git a/modules/tool/defi/__init__.py b/modules/tool/defi/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/modules/tool/defi/aave.py b/modules/tool/defi/aave.py new file mode 100644 index 00000000..f61319ab --- /dev/null +++ b/modules/tool/defi/aave.py @@ -0,0 +1,38 @@ +from web3 import Web3 +import os +#connect to a local ethereum node +w3 = Web3(Web3.HTTPProvider('http://localhost:8545')) + +#AAVE ADDRESS +contract_address = '0x6Ae43d3271ff6888e7Fc43Fd7321a503ff738951' +contract_abi = [] + +contract = w3.eth.contract(address=contract_address, abi=contract_abi) + +def allowance(owner_address, spender_address, token_address): + # Get the ERC20 Token contract + erc20_contract = w3.eth.contract(address=token_address, abi=contract_abi) + + # Call the allowance function + allowed_amount = erc20_contract.functions.allowance(owner_address, spender_address).call() + return allowed_amount + +def supply(dst_address, asset_address, amount): + # Replace with the address that will send the transaction + sender_address = '0xYourSenderAddress' + + # Build the transaction + transaction = contract.functions.supplyTo(dst_address, asset_address, amount).buildTransaction({ + 'from': sender_address, + 'gas': 2000000, + 'gasPrice': w3.toWei('20', 'gwei'), + 'nonce': w3.eth.getTransactionCount(sender_address), + }) + + # Sign the transaction + PRIVATE_KEY= os.getenv("PRIVATE_KEY", "") + signed_transaction = w3.eth.account.signTransaction(transaction, PRIVATE_KEY) + + # Send the transaction + tx_hash = w3.eth.sendRawTransaction(signed_transaction.rawTransaction) + return tx_hash.hex() diff --git a/modules/tool/defi/compare_token_price.py b/modules/tool/defi/compare_token_price.py new file mode 100644 index 00000000..24a9dbf8 --- /dev/null +++ b/modules/tool/defi/compare_token_price.py @@ -0,0 +1,41 @@ +import commune as c +class CompareTokenPrice(c.Module): + description = """"Compare the price of 2 tokens""" + def call(self, token1_name: str, token1_price: float, token2_name: str, token2_price: float, find_cheaper: bool) -> str: + """ + Compares the prices of two tokens and returns the name of the cheaper or more expensive token based on the boolean input. + + :param token1_name: Name of the first token. + :param token1_price: Price of the first token. + :param token2_name: Name of the second token. + :param token2_price: Price of the second token. + :param find_cheaper: Boolean value. If True, the function returns the cheaper token; if False, it returns the more expensive token. + :return: Name of the cheaper or more expensive token. + """ + + # Check if the prices are equal + if token1_price == token2_price: + return "Both tokens have the same price." + + # Find and return the cheaper or more expensive token based on the boolean input + if find_cheaper: + return token1_name if token1_price < token2_price else token2_name + else: + return token1_name if token1_price > token2_price else token2_name + + + +# if __name__ == "__main__": + + +# # Example usage: +# token1_name = "ETH" +# token1_price = 3000.0 +# token2_name = "BTC" +# token2_price = 45000.0 + +# cheaper_token = CompareTokenPrice().call(token1_name, token1_price, token2_name, token2_price, find_cheaper=True) +# print(f"The cheaper token is {cheaper_token}") + +# more_expensive_token = CompareTokenPrice().call(token1_name, token1_price, token2_name, token2_price, find_cheaper=False) +# print(f"The more expensive token is {more_expensive_token}") \ No newline at end of file diff --git a/modules/tool/defi/compound.py b/modules/tool/defi/compound.py new file mode 100644 index 00000000..0ffa8c1b --- /dev/null +++ b/modules/tool/defi/compound.py @@ -0,0 +1,38 @@ +from web3 import Web3 +#connect to a local ethereum node +w3 = Web3(Web3.HTTPProvider('http://localhost:8545')) +import os +#COMPOUND FINANCE ADDRESS +contract_address = '0xa7F7De6cCad4D83d81676717053883337aC2c1b4' +contract_abi = [] + +contract = w3.eth.contract(address=contract_address, abi=contract_abi) + +def allowance(owner_address, spender_address, token_address): + # Get the ERC20 Token contract + erc20_contract = w3.eth.contract(address=token_address, abi=contract_abi) + + # Call the allowance function + allowed_amount = erc20_contract.functions.allowance(owner_address, spender_address).call() + return allowed_amount + +def supply(dst_address, asset_address, amount): + # Replace with the address that will send the transaction + sender_address = '0xYourSenderAddress' + + # Build the transaction + transaction = contract.functions.supplyTo(dst_address, asset_address, amount).buildTransaction({ + 'from': sender_address, + 'gas': 2000000, + 'gasPrice': w3.toWei('20', 'gwei'), + 'nonce': w3.eth.getTransactionCount(sender_address), + }) + + PRIVATE_KEY= os.getenv("PRIVATE_KEY", "") + + # Sign the transaction + signed_transaction = w3.eth.account.signTransaction(transaction, PRIVATE_KEY) + + # Send the transaction + tx_hash = w3.eth.sendRawTransaction(signed_transaction.rawTransaction) + return tx_hash.hex() diff --git a/modules/tool/defi/defillama/aave.py b/modules/tool/defi/defillama/aave.py new file mode 100644 index 00000000..5a5c01b7 --- /dev/null +++ b/modules/tool/defi/defillama/aave.py @@ -0,0 +1,70 @@ +import requests +import json +from typing import Optional, Dict, Any +import time + +import os +from dotenv import load_dotenv +from web3 import Web3 +import commune as c + +load_dotenv() + +import commune as c + + +class AaveV3(c.Module): + description = """ + Connects to the Defillama API and allows the user to select which chain, project, symbol or pool they want. + :param params: A dictionary with optional filters (chain (first letter uppercase), project, symbol, pool). + :return: Filtered list of pool data. + + Example input: + # Fetch data for a specific chain and project + params = { + "chain": "Ethereum", + "project": "lido", + } + + here is an input: result=aave_instance.call(chain="Ethereum", symbol="ETH") + """ + + def call(self, chain: str = 'Ethereum', project: str = 'aave-v3') -> dict: + """Initializes the state with the latest AAVE V3 APY.""" + url = "https://yields.llama.fi/pools" + # Only include parameters that are not None in the request + if chain!=None: + chain=str(chain).capitalize() + params = {k: v for k, v in {'chain': chain, 'project': project}.items() if v is not None} + + response = requests.get(url, timeout=10, params=params) + if response.status_code == 200: + response_data = json.loads(response.text) + data = response_data.get("data", []) + + # Filter data based on provided parameters + filtered_data = [ + item for item in data if + (item.get("project") == project if project is not None else True) and + (item.get("chain") == chain if chain is not None else True) + ] + + if filtered_data: + results = [] + for item in filtered_data: + results.append({ + "apy": item["apy"], + "market": project, + "chain": chain if chain is not None else item["chain"], + "timestamp": time.time(), + }) + return results + else: + return [{'error': f'No data found for the given parameters'}] + else: + return [{'error': f"Failed to fetch data from API -> Status code: {response.status_code}"}] + +# if __name__ == "__main__": +# aave_instance = AaveV3() +# result=aave_instance.call(chain="Ethereum", symbol="WETH") +# print(result) diff --git a/modules/tool/defi/defillama/defillama.py b/modules/tool/defi/defillama/defillama.py new file mode 100644 index 00000000..df655710 --- /dev/null +++ b/modules/tool/defi/defillama/defillama.py @@ -0,0 +1,72 @@ +import requests +import json +from typing import Optional, Dict, Any +import time + +import os +from dotenv import load_dotenv +from web3 import Web3 +import commune as c + +load_dotenv() + +import commune as c + +class DefiLlama(c.Module): + description = """ + Connects to the Defillama API and allows the user to select which chain, project, symbol or pool they want. + :param params: A dictionary with optional filters (chain (first letter uppercase), project, symbol, pool). + :return: Filtered list of pool data. + + Example input: + # Fetch data for a specific chain and project + params = { + "chain": "Ethereum", + "project": "lido", + } + """ + + def call(self, chain: str = 'Ethereum', project: str = 'lido', symbol:str = 'ETH') -> dict: + """Initializes the state with the latest Defillama Pool Data.""" + url = "https://yields.llama.fi/pools" + # Only include parameters that are not None in the request + if chain!=None: + chain=str(chain).capitalize() + + params = {k: v for k, v in {'chain': chain.capitalize(), 'project': project}.items() if v is not None} + + response = requests.get(url, timeout=10, params=params) + if response.status_code == 200: + response_data = json.loads(response.text) + data = response_data.get("data", []) + + # Filter data based on provided parameters + filtered_data = [ + item for item in data if + (item.get("project") == project if project is not None else True) and + (item.get("chain") == chain if chain is not None else True) and + (item.get("symbol") == symbol if symbol is not None else True) + ] + + if filtered_data: + results = [] + for item in filtered_data: + results.append({ + "apy": item["apy"], + "market": project if project is not None else item["project"], + "asset": symbol if symbol is not None else item["symbol"], + "chain": chain if chain is not None else item["chain"], + "timestamp": time.time(), + }) + return results + else: + return [{'error': f'No data found for the given parameters'}] + else: + return [{'error': f"Failed to fetch data from API -> Status code: {response.status_code}"}] + + + +# if __name__ == "__main__": +# dl_instance = DefiLlama() +# result=dl_instance.call(chain="ethereum", project="compound") +# print(result) diff --git a/modules/tool/defi/defillama/lido.py b/modules/tool/defi/defillama/lido.py new file mode 100644 index 00000000..c08aad0d --- /dev/null +++ b/modules/tool/defi/defillama/lido.py @@ -0,0 +1,62 @@ + + +import commune as c +import requests +import json +import time +class Lido(c.Module): + description = """ + Connects to the Defillama API and allows the user to select which chain, project, symbol or pool they want. + :param params: A dictionary with optional filters (chain (first letter uppercase), project, symbol, pool). + :return: Filtered list of pool data. + + Example input: + # Fetch data for a specific chain and project + params = { + "chain": "Ethereum", + "project": "lido", + } + + """ + + + + def call(self, chain: str = 'Ethereum', project: str = 'lido') -> dict: + """Initializes the state with the latest lido APY.""" + url = "https://yields.llama.fi/pools" + # Only include parameters that are not None in the request + if chain!=None: + chain=str(chain).capitalize() + params = {k: v for k, v in {'chain': chain, 'project': project}.items() if v is not None} + + response = requests.get(url, timeout=10, params=params) + if response.status_code == 200: + response_data = json.loads(response.text) + data = response_data.get("data", []) + + # Filter data based on provided parameters + filtered_data = [ + item for item in data if + (item.get("project") == project if project is not None else True) and + (item.get("chain") == chain if chain is not None else True) + ] + + if filtered_data: + results = [] + for item in filtered_data: + results.append({ + "apy": item["apy"], + "market": project, + "chain": chain if chain is not None else item["chain"], + "timestamp": time.time(), + }) + return results + else: + return [{'error': f'No data found for the given parameters'}] + else: + return [{'error': f"Failed to fetch data from API -> Status code: {response.status_code}"}] + +# if __name__ == "__main__": +# lido_instance = Lido() +# result=lido_instance.call(chain="ethereum") +# print(result) diff --git a/modules/tool/defi/defillama/rocketpool.py b/modules/tool/defi/defillama/rocketpool.py new file mode 100644 index 00000000..4638ccbd --- /dev/null +++ b/modules/tool/defi/defillama/rocketpool.py @@ -0,0 +1,86 @@ +import requests +import json +from typing import Optional, Dict, Any +import time + +import os +from dotenv import load_dotenv +from web3 import Web3 +import commune as c + +load_dotenv() + +import commune as c + + + +class RocketPool(c.Module): + description = """ + Connects to the Defillama API and allows the user to select which chain, project, symbol or pool they want. + :param params: A dictionary with optional filters (chain (first letter uppercase), project, symbol, pool). + :return: Filtered list of pool data. + + Example input: + # Fetch data for a specific chain and project + params = { + "chain": "Ethereum", + "project": "lido", + } + + here is an input: + rocket_pool_instance = RocketPool() + result=rocket_pool_instance.call(project="rocket-pool", symbol="RETH") + here is an example of the output that corresponds with the above input: + [{'apy': 3.21066, 'market': 'rocket-pool', 'asset': 'RETH', 'chain': 'Ethereum', 'timestamp': 1695494506.412746}] + """ + + + + + + + def call(self, chain: str = None, project: str = 'rocket-pool', symbol: str = None) -> dict: + """Initializes the state with the latest rocket-pool APY.""" + url = "https://yields.llama.fi/pools" + # Only include parameters that are not None in the request + if chain!=None: + chain=str(chain).capitalize() + params = {k: v for k, v in {'chain': chain, 'project': project, 'symbol': symbol}.items() if v is not None} + + response = requests.get(url, timeout=10, params=params) + if response.status_code == 200: + response_data = json.loads(response.text) + data = response_data.get("data", []) + + # Filter data based on provided parameters + filtered_data = [ + item for item in data if + (item.get("project") == project if project is not None else True) and + (item.get("chain") == chain if chain is not None else True) and + (item.get("symbol") == symbol if symbol is not None else True) + ] + + if filtered_data: + results = [] + for item in filtered_data: + results.append({ + "apy": item["apy"], + "market": project if project is not None else item["project"], + "asset": symbol if symbol is not None else item["symbol"], + "chain": chain if chain is not None else item["chain"], + "timestamp": time.time(), + }) + best_apy_item = max(results, key=lambda x: x["apy"]) + return best_apy_item + + else: + return [{'error': f'No data found for the given parameters'}] + else: + return [{'error': f"Failed to fetch data from API -> Status code: {response.status_code}"}] + +# if __name__ == "__main__": +# rocket_pool_instance = RocketPool() +# result=rocket_pool_instance.call(project="rocket-pool") +# print(result) + + diff --git a/modules/tool/defi/get_best_apy.py b/modules/tool/defi/get_best_apy.py new file mode 100644 index 00000000..aa3f783e --- /dev/null +++ b/modules/tool/defi/get_best_apy.py @@ -0,0 +1,28 @@ +import commune as c + +class GetBestApy(c.Module): + description = """"Get the best apy between 2 dictionaries""" + + def call(self, data1: dict, data2: dict) -> dict: + """Get the best apy between 2 dictionaries + + Args: + data1 (dict): _description_ + data2 (dict): _description_ + + Returns: + market name as a string + """ + if data1["apy"] > data2["apy"]: + return data1['market'] + else: + return data2['market'] + + +# if __name__ == "__main__": +# data1={'apy': 3.16061, 'market': 'rocket-pool', 'asset': 'RETH', 'chain': 'Ethereum', 'timestamp': 1695525398.567652} +# data2={'apy': 4, 'market': 'lido', 'asset': 'RETH', 'chain': 'Ethereum', 'timestamp': 1695525398.567652} +# apy = GetBestApy() +# apy.call(data1, data2) +# print( apy.call(data1, data2)) + diff --git a/modules/tool/defi/inch/balances.py b/modules/tool/defi/inch/balances.py new file mode 100644 index 00000000..f74a8048 --- /dev/null +++ b/modules/tool/defi/inch/balances.py @@ -0,0 +1,56 @@ + +import requests + +import json +import requests +import commune as c + +class Inch(c.Module): + + description = """ + Gets token balances for a wallet address from the 1Inch Balance API. + :param wallet_address: A wallet address. + :return: A JSON blob with token balances. + """ + def __init__(self, + api_key: str = 'INCH_API_KEY', + url= "https://api.1inch.dev/balance"): + + self.api_key = api_key + self.url = url + + def call(self, wallet_address:str ): + endpoint = f'https://api.1inch.dev/balance/v1.2/1/balances/{wallet_address}' + headers = { + "accept": "application/json", + "Authorization": f"Bearer {self.api_key}", + } + + try: + # Send a GET request to the API + response = requests.get(endpoint, headers=headers, timeout=10) + response.raise_for_status() + except requests.RequestException as e: + print(f"Failed to fetch data from 1Inch Balance API: {e}") + return + + # Parse the JSON response + response_data = response.json() + json_blob = json.dumps(response_data, indent=4) # Convert the Python dictionary to a JSON formatted string + + return json_blob + + + @classmethod + def test(cls): + wallet_address = '0xbe0eb53f46cd790cd13851d5eff43d12404d33e8' + tool = cls() + + token_balances = tool.call(wallet_address) + + if token_balances: + print(f"Token balances for wallet address {wallet_address}:") + token_balances_dict = json.loads(token_balances) + print(token_balances_dict) + else: + print("Token balance fetch failed. Please check your wallet address.") diff --git a/modules/tool/defi/inch/gasprice.py b/modules/tool/defi/inch/gasprice.py new file mode 100644 index 00000000..3c460fdf --- /dev/null +++ b/modules/tool/defi/inch/gasprice.py @@ -0,0 +1,40 @@ +import commune as c +import requests +import json +import os + +class SwaggerInch(c.Module): + def __init__(self, api_key: str = 'INCH_API_KEY'): + self.api_key = os.getenv(api_key, api_key) + + description = """ + Gets the token prices from the 1inch API. + """ + def call(self): + """ + Connects to the 1Inch API for gas price + """ + url = 'https://api.1inch.dev/gas-price/v1.4/1' + headers = { + "accept": "application/json", + "Authorization": f"Bearer {self.api_key}", + } + try: + # Send a GET request to the API + response = requests.get(url, headers=headers, timeout=10) + response.raise_for_status() + except requests.RequestException as e: + print(f"Failed to fetch data from 1Inch Gas Price API: {e}") + return + + # Parse the JSON response + response_data = response.json() + # json_blob = json.dumps(response_data, indent=4) # Convert the Python dictionary to a JSON formatted string + # print(json_blob) + return response_data["baseFee"] + + +if __name__ == "__main__": + dl_instance = SwaggerInch() + result=dl_instance.call() + print(result) diff --git a/modules/tool/defi/inch/inch.py b/modules/tool/defi/inch/inch.py new file mode 100644 index 00000000..7daf74b6 --- /dev/null +++ b/modules/tool/defi/inch/inch.py @@ -0,0 +1,70 @@ +import requests +import json +from typing import Optional, Dict, Any +import time + +import os +from dotenv import load_dotenv +from web3 import Web3 +import commune as c +from dotenv import load_dotenv + + +class Inch(c.Module): + def __init__(self, + api_key: Optional[str] = 'ONEINCH_API_KEY', + url: Optional[str] = "https://api.1inch.dev/price/v1.1/1" + ): + self.api_key = os.getenv(api_key, api_key) + self.url = url + + + + def get_whitelisted_token_prices(self): + + response = requests.get(self.url, headers={'Authorization': f'Bearer {self.api_key}'}) + if response.status_code == 200: + prices = response.json() + print("Prices for whitelisted tokens:") + token2price = {} + for token_address, price in prices.items(): + print(f"{token_address}: {price}") + token2price[token_address] = price + else: + print("Failed to fetch token prices.") + + + def get_requested_token_prices(self, tokens:list[str]): + + payload = { + "tokens": tokens + } + + response = requests.post(self.url, json=payload, headers={'Authorization': f'Bearer {self.API_KEY}'}) + if response.status_code == 200: + prices = response.json() + print("Prices for requested tokens:") + for token_address, price in prices.items(): + print(f"{token_address}: {price}") + return (f"{token_address}: {price}") + else: + print("Failed to fetch token prices.") + + + def get_prices_for_addresses(self,addresses): + url = f"{self.url}{','.join(addresses)}" + + response = requests.get(url, headers={'Authorization': f'Bearer {self.API_KEY}'}) + if response.status_code == 200: + prices = response.json() + print("Prices for requested tokens:") + response = {} + for token_address, price in prices.items(): + response[token_address] = price + else: + response = {"error": f"Failed to fetch token prices. Error code: {response.status_code}"} + + return response + + + diff --git a/modules/tool/defi/inch/prices.py b/modules/tool/defi/inch/prices.py new file mode 100644 index 00000000..d942ea4b --- /dev/null +++ b/modules/tool/defi/inch/prices.py @@ -0,0 +1,75 @@ +import requests +import json +from typing import Optional, Dict, Any +import time +from typing import * + +import os +from dotenv import load_dotenv +from web3 import Web3 +import commune as c + +from dotenv import load_dotenv + +class InchPrices(c.Module): + description = """ + + Gets the token prices from the 1inch API. + + params: A list of token addresses. + return: A dictionary of token addresses and prices. + + """ + def __init__(self, + api_key: Optional[str] = 'INCH_API_KEY', + url: Optional[str] = "https://api.1inch.dev/price/v1.1/1" + ): + self.api_key = os.getenv(api_key, api_key) + self.url = url + + self.token_mappings = { + "usdc": "0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48", + "wsteth": "0x7f39c581f595b53c5cb19bd0b3f8da6c935e2ca0", + "reth": "0xae78736Cd615f374D3085123A210448E74Fc6393", + "dai": "0x6b175474e89094c44da98b954eedeac495271d0f", + "usdt": "0xdac17f958d2ee523a2206206994597c13d831ec7", + "wbtc": "0x2260fac5e5542a773aa44fbcfedf7c193bc2c599", + "weth": "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2" + } + self.reverse_token_mappings = {v: k for k, v in self.token_mappings.items()} + + + def call(self, tokens: List[str]= ['weth']) -> dict[str, float]: + + for i,t in enumerate(tokens): + if t.lower() in self.token_mappings: + tokens[i] = self.token_mappings.get(t.lower()) + + payload = { + "tokens": tokens + } + + response = requests.post(self.url, json=payload, headers={'Authorization': f'Bearer {self.api_key}'}) + if response.status_code == 200: + prices = response.json() + print("Prices for requested tokens:") + response = {} + for token_address, price in prices.items(): + if token_address in self.reverse_token_mappings: + response[self.reverse_token_mappings[token_address]] = price + else: + response[token_address] = price + + else: + print("Failed to fetch token prices.", response.text) + + return response + + +if __name__ == "__main__": + instance = InchPrices() + result = instance.call(tokens=['weth', 'usdc']) + print(result) +# aave_instance = AaveV3() +# result=aave_instance.call(chain="Ethereum", symbol="WETH") +# print(result) diff --git a/modules/tool/defi/openai_helper.py b/modules/tool/defi/openai_helper.py new file mode 100644 index 00000000..02f80b08 --- /dev/null +++ b/modules/tool/defi/openai_helper.py @@ -0,0 +1,61 @@ +import os +import openai +import json +from dotenv import load_dotenv +load_dotenv() +openai.api_key = os.getenv("OPENAI_API_KEY", "") + +def get_general_schema(code:str): + + openai.api_key = os.getenv("OPENAI_API_KEY", "") + + PROMPT="""" + Using this as the general schema definition: + { + "name": "tool", + "description": "This is a base tool that does nothing.", + "tags": ["defi", "tool"], + "schema": {"input": {"x": "int", "y": "int"}, "output": "int", "default": {"x": 1, "y": 1}} + } + + Write the generalized schema for this tool: + + """ + full_prompt = PROMPT + code + print(full_prompt) + response = openai.Completion.create( + model="gpt-3.5-turbo-instruct", + prompt=full_prompt, + max_tokens=600, + temperature=0 + ) + json_blob = response.choices[0].text.strip() + return json.dumps(json_blob.replace("'", '"')) + + + + +def return_file_as_str(file_path:str) -> str: + # Initialize an empty string to store the file content + file_content = "" + + # Open the file in read mode ('r') and ensure it is closed properly using 'with' + try: + with open(file_path, 'r') as file: + # Read the entire content of the file into the string + file_content = file.read() + except FileNotFoundError: + # Handle the case where the file is not found + print(f"Error: The file {file_path} does not exist.") + except Exception as e: + # Handle other possible exceptions + print(f"Error: An error occurred while reading the file {file_path}. Details: {e}") + + # Return the file content as a string + return file_content + + +if __name__ == "__main__": + fc = get_general_schema("/Users/marissaposner/Autonomy-data/autonomy/tool/defillama/aave.py") + print(fc) + diff --git a/modules/tool/defi/read_file.py b/modules/tool/defi/read_file.py new file mode 100644 index 00000000..b0d6becd --- /dev/null +++ b/modules/tool/defi/read_file.py @@ -0,0 +1,13 @@ +import commune as c +class ReadFile(c.Module): + description = """"Read from a file""" + def call(self, file_path): + """Read from a file""" + try: + with open(file_path, 'r') as file: + content = file.read() + return content + except FileNotFoundError: + print(f"Error: File not found: {file_path}") + except Exception as e: + print(f"Error: An error occurred while reading the file: {e}") diff --git a/modules/tool/defi/swap.py b/modules/tool/defi/swap.py new file mode 100644 index 00000000..cdf42de9 --- /dev/null +++ b/modules/tool/defi/swap.py @@ -0,0 +1,71 @@ +import commune as c + +import json +import web3 +from web3 import Web3 + +class SwapTool(c.Module): + description = """ + Swaps tokens on Uniswap. + params: + tokenin: str = 'ETH' + token_out: str = 'USDC' + amount: int = 100 + returns: + result: dict + """ + + def __init__(self , infura_url="https://tame-tame-mound.ethereum-goerli.discover.quiknode.pro/60eccfb9952049ec1b6afe53922c5c006fff17cd/", + address='0x2a1530C4C41db0B0b2bB646CB5Eb1A67b7158667'): + web3 = Web3(Web3.HTTPProvider(infura_url=infura_url, address=address)) + # uniswap_dai.v1 + self.abi = [{"inputs":[{"internalType":"address","name":"_factory","type":"address"},{"internalType":"address","name":"_WETH9","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[],"name":"WETH9","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"components":[{"internalType":"bytes","name":"path","type":"bytes"},{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"deadline","type":"uint256"},{"internalType":"uint256","name":"amountIn","type":"uint256"},{"internalType":"uint256","name":"amountOutMinimum","type":"uint256"}],"internalType":"struct ISwapRouter.ExactInputParams","name":"params","type":"tuple"}],"name":"exactInput","outputs":[{"internalType":"uint256","name":"amountOut","type":"uint256"}],"stateMutability":"payable","type":"function"},{"inputs":[{"components":[{"internalType":"address","name":"tokenIn","type":"address"},{"internalType":"address","name":"tokenOut","type":"address"},{"internalType":"uint24","name":"fee","type":"uint24"},{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"deadline","type":"uint256"},{"internalType":"uint256","name":"amountIn","type":"uint256"},{"internalType":"uint256","name":"amountOutMinimum","type":"uint256"},{"internalType":"uint160","name":"sqrtPriceLimitX96","type":"uint160"}],"internalType":"struct ISwapRouter.ExactInputSingleParams","name":"params","type":"tuple"}],"name":"exactInputSingle","outputs":[{"internalType":"uint256","name":"amountOut","type":"uint256"}],"stateMutability":"payable","type":"function"},{"inputs":[{"components":[{"internalType":"bytes","name":"path","type":"bytes"},{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"deadline","type":"uint256"},{"internalType":"uint256","name":"amountOut","type":"uint256"},{"internalType":"uint256","name":"amountInMaximum","type":"uint256"}],"internalType":"struct ISwapRouter.ExactOutputParams","name":"params","type":"tuple"}],"name":"exactOutput","outputs":[{"internalType":"uint256","name":"amountIn","type":"uint256"}],"stateMutability":"payable","type":"function"},{"inputs":[{"components":[{"internalType":"address","name":"tokenIn","type":"address"},{"internalType":"address","name":"tokenOut","type":"address"},{"internalType":"uint24","name":"fee","type":"uint24"},{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"deadline","type":"uint256"},{"internalType":"uint256","name":"amountOut","type":"uint256"},{"internalType":"uint256","name":"amountInMaximum","type":"uint256"},{"internalType":"uint160","name":"sqrtPriceLimitX96","type":"uint160"}],"internalType":"struct ISwapRouter.ExactOutputSingleParams","name":"params","type":"tuple"}],"name":"exactOutputSingle","outputs":[{"internalType":"uint256","name":"amountIn","type":"uint256"}],"stateMutability":"payable","type":"function"},{"inputs":[],"name":"factory","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"bytes[]","name":"data","type":"bytes[]"}],"name":"multicall","outputs":[{"internalType":"bytes[]","name":"results","type":"bytes[]"}],"stateMutability":"payable","type":"function"},{"inputs":[],"name":"refundETH","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"address","name":"token","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"},{"internalType":"uint256","name":"deadline","type":"uint256"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"}],"name":"selfPermit","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"address","name":"token","type":"address"},{"internalType":"uint256","name":"nonce","type":"uint256"},{"internalType":"uint256","name":"expiry","type":"uint256"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"}],"name":"selfPermitAllowed","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"address","name":"token","type":"address"},{"internalType":"uint256","name":"nonce","type":"uint256"},{"internalType":"uint256","name":"expiry","type":"uint256"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"}],"name":"selfPermitAllowedIfNecessary","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"address","name":"token","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"},{"internalType":"uint256","name":"deadline","type":"uint256"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"}],"name":"selfPermitIfNecessary","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"address","name":"token","type":"address"},{"internalType":"uint256","name":"amountMinimum","type":"uint256"},{"internalType":"address","name":"recipient","type":"address"}],"name":"sweepToken","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"address","name":"token","type":"address"},{"internalType":"uint256","name":"amountMinimum","type":"uint256"},{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"feeBips","type":"uint256"},{"internalType":"address","name":"feeRecipient","type":"address"}],"name":"sweepTokenWithFee","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"int256","name":"amount0Delta","type":"int256"},{"internalType":"int256","name":"amount1Delta","type":"int256"},{"internalType":"bytes","name":"_data","type":"bytes"}],"name":"uniswapV3SwapCallback","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"amountMinimum","type":"uint256"},{"internalType":"address","name":"recipient","type":"address"}],"name":"unwrapWETH9","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"uint256","name":"amountMinimum","type":"uint256"},{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"feeBips","type":"uint256"},{"internalType":"address","name":"feeRecipient","type":"address"}],"name":"unwrapWETH9WithFee","outputs":[],"stateMutability":"payable","type":"function"},{"stateMutability":"payable","type":"receive"}] + self.exchange_contract = web3.eth.contract(address=address, abi=self.abi) + +import json +from autonomy import Tool +from web3 import Web3 + + +class SwapTool(Tool): + description = """ + Swaps tokens on Uniswap. + params: + tokenin: str = 'ETH' + token_out: str = 'USDC' + amount: int = 100 + returns: + result: dict + """ + + def __init__(self, + infura_url = "https://tame-tame-mound.ethereum-goerli.discover.quiknode.pro/60eccfb9952049ec1b6afe53922c5c006fff17cd/", + address = '0x2a1530C4C41db0B0b2bB646CB5Eb1A67b7158667' + ): + self.w3 = Web3(Web3.HTTPProvider(infura_url)) + + # Uniswap contract ABI + uniswap_abi = json.loads('[{"name":"getEthToTokenInputPrice", ...}]') + # Replace 'YOUR_UNISWAP_CONTRACT_ADDRESS' with the actual contract address + self.uniswap_contract = self.w3.eth.contract(address=address, abi=uniswap_abi) + + + def call(self, tokenin: str = 'ETH', tokenout: str = 'USDC', amount: int = 100): + # Replace with actual token addresses and amounts + ETH_AMOUNT = self.w3.toWei(amount, 'Ether') + input_token_amount = self.uniswap_contract.functions.getEthToTokenInputPrice(ETH_AMOUNT).call() + output_token_amount = self.uniswap_contract.functions.getTokenToEthOutputPrice(tokenin).call() + return input_token_amount, output_token_amount + + return { + 'result': { + 'input_token_amount': input_token_amount, + 'output_token_amount': output_token_amount, + } + } + +# Usage +if __name__ == "__main__": + swap_tool = SwapTool() + result = swap_tool.call(tokenin='ETH', token_out='USDC', amount=1) + print(result) diff --git a/modules/tool/defi/tool.py b/modules/tool/defi/tool.py new file mode 100644 index 00000000..65b845ce --- /dev/null +++ b/modules/tool/defi/tool.py @@ -0,0 +1,72 @@ +import commune as c + +class Tool(a.Block): + name = None + description = 'This is a base tool that does nothing.' + tags = ['defi', 'tool'] + + def __init__( + self, + **kwargs + ): + ## DEFINE TOOL STUFF + pass + + def call(self, x:int = 1, y:int= 1) -> int: + return x * 2 + y + + @classmethod + def tool_list(cls): + import random + tool2info = a.tool2info() + tool_list = [] + for tool, tool_info in tool2info.items(): + print(tool_info['name']) + tool_list = tool_list + [tool_info['name']] + tool_list[-1]['price'] = random.randint(1, 100) + + return tool_list + + @classmethod + def info(cls): + return { + 'name': cls.name if cls.name else cls.block_name(), + 'description': cls.description, + 'tags': cls.tags, + 'schema': cls.get_schema('call'), + } + + @classmethod + def tool2info(self): + for tool in a.tools(info=True): + print(tool.info()) + + @classmethod + def tool2info(cls): + tools = a.tools() + tool2info = {} + for tool in tools: + tool_info = a.block(tool).info() + tool2info[tool_info['name']] = tool_info + return tool2info + + + @classmethod + def filepath(cls): + import inspect + return inspect.getfile(cls) + + @classmethod + def code(cls): + return cls.get_text(cls.filepath()) + + + @classmethod + def get_general_schema(cls): + get_general_schema = a.import_object('autonomy.tool.openai_helper.get_general_schema') + return get_general_schema(cls.fncode('call')) + + + + + diff --git a/modules/tool/defi/write_file.py b/modules/tool/defi/write_file.py new file mode 100644 index 00000000..b1dcc27b --- /dev/null +++ b/modules/tool/defi/write_file.py @@ -0,0 +1,14 @@ +import commune as c +class WriteFile(c.Module): + description ="""Write to a file""" + + def call(self, file_path, content): + """Write to a file + """ + try: + with open(file_path, 'w') as file: + file.write(content) + print(f"Content successfully written to {file_path}") + except Exception as e: + print(f"Error: An error occurred while writing to the file: {e}") + diff --git a/modules/tool/get_best_apy.py b/modules/tool/get_best_apy.py new file mode 100644 index 00000000..aa3f783e --- /dev/null +++ b/modules/tool/get_best_apy.py @@ -0,0 +1,28 @@ +import commune as c + +class GetBestApy(c.Module): + description = """"Get the best apy between 2 dictionaries""" + + def call(self, data1: dict, data2: dict) -> dict: + """Get the best apy between 2 dictionaries + + Args: + data1 (dict): _description_ + data2 (dict): _description_ + + Returns: + market name as a string + """ + if data1["apy"] > data2["apy"]: + return data1['market'] + else: + return data2['market'] + + +# if __name__ == "__main__": +# data1={'apy': 3.16061, 'market': 'rocket-pool', 'asset': 'RETH', 'chain': 'Ethereum', 'timestamp': 1695525398.567652} +# data2={'apy': 4, 'market': 'lido', 'asset': 'RETH', 'chain': 'Ethereum', 'timestamp': 1695525398.567652} +# apy = GetBestApy() +# apy.call(data1, data2) +# print( apy.call(data1, data2)) + diff --git a/modules/tool/openai_helper.py b/modules/tool/openai_helper.py new file mode 100644 index 00000000..02f80b08 --- /dev/null +++ b/modules/tool/openai_helper.py @@ -0,0 +1,61 @@ +import os +import openai +import json +from dotenv import load_dotenv +load_dotenv() +openai.api_key = os.getenv("OPENAI_API_KEY", "") + +def get_general_schema(code:str): + + openai.api_key = os.getenv("OPENAI_API_KEY", "") + + PROMPT="""" + Using this as the general schema definition: + { + "name": "tool", + "description": "This is a base tool that does nothing.", + "tags": ["defi", "tool"], + "schema": {"input": {"x": "int", "y": "int"}, "output": "int", "default": {"x": 1, "y": 1}} + } + + Write the generalized schema for this tool: + + """ + full_prompt = PROMPT + code + print(full_prompt) + response = openai.Completion.create( + model="gpt-3.5-turbo-instruct", + prompt=full_prompt, + max_tokens=600, + temperature=0 + ) + json_blob = response.choices[0].text.strip() + return json.dumps(json_blob.replace("'", '"')) + + + + +def return_file_as_str(file_path:str) -> str: + # Initialize an empty string to store the file content + file_content = "" + + # Open the file in read mode ('r') and ensure it is closed properly using 'with' + try: + with open(file_path, 'r') as file: + # Read the entire content of the file into the string + file_content = file.read() + except FileNotFoundError: + # Handle the case where the file is not found + print(f"Error: The file {file_path} does not exist.") + except Exception as e: + # Handle other possible exceptions + print(f"Error: An error occurred while reading the file {file_path}. Details: {e}") + + # Return the file content as a string + return file_content + + +if __name__ == "__main__": + fc = get_general_schema("/Users/marissaposner/Autonomy-data/autonomy/tool/defillama/aave.py") + print(fc) + diff --git a/modules/tool/read_file.py b/modules/tool/read_file.py new file mode 100644 index 00000000..b0d6becd --- /dev/null +++ b/modules/tool/read_file.py @@ -0,0 +1,13 @@ +import commune as c +class ReadFile(c.Module): + description = """"Read from a file""" + def call(self, file_path): + """Read from a file""" + try: + with open(file_path, 'r') as file: + content = file.read() + return content + except FileNotFoundError: + print(f"Error: File not found: {file_path}") + except Exception as e: + print(f"Error: An error occurred while reading the file: {e}") diff --git a/modules/tool/registry.py b/modules/tool/registry.py new file mode 100644 index 00000000..d91c4374 --- /dev/null +++ b/modules/tool/registry.py @@ -0,0 +1,10 @@ +import commune as c + +class ToolRegistry(c.Module): + def __init__( + self, + tools = [], + ): + self.tool_map = {tool.name:tool for tool in tools} + def call(self,tool:str , *args, **kwargs): + self.tool_map[tool].forward(*args, **kwargs) diff --git a/modules/tool/search/tool_search.py b/modules/tool/search/tool_search.py new file mode 100644 index 00000000..42f1c336 --- /dev/null +++ b/modules/tool/search/tool_search.py @@ -0,0 +1,11 @@ +import commune as c + +class Demo(c.Module): + def __init__(self, a=1, b=2): + self.set_config(locals()) + + def create_dope_stuff(self, x:int = 1, y:int = 2) -> int: + c.print(self.config) + c.print(self.config, 'This is the config, it is a Munch object') + return x + y + \ No newline at end of file diff --git a/modules/tool/swap.py b/modules/tool/swap.py new file mode 100644 index 00000000..cdf42de9 --- /dev/null +++ b/modules/tool/swap.py @@ -0,0 +1,71 @@ +import commune as c + +import json +import web3 +from web3 import Web3 + +class SwapTool(c.Module): + description = """ + Swaps tokens on Uniswap. + params: + tokenin: str = 'ETH' + token_out: str = 'USDC' + amount: int = 100 + returns: + result: dict + """ + + def __init__(self , infura_url="https://tame-tame-mound.ethereum-goerli.discover.quiknode.pro/60eccfb9952049ec1b6afe53922c5c006fff17cd/", + address='0x2a1530C4C41db0B0b2bB646CB5Eb1A67b7158667'): + web3 = Web3(Web3.HTTPProvider(infura_url=infura_url, address=address)) + # uniswap_dai.v1 + self.abi = [{"inputs":[{"internalType":"address","name":"_factory","type":"address"},{"internalType":"address","name":"_WETH9","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[],"name":"WETH9","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"components":[{"internalType":"bytes","name":"path","type":"bytes"},{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"deadline","type":"uint256"},{"internalType":"uint256","name":"amountIn","type":"uint256"},{"internalType":"uint256","name":"amountOutMinimum","type":"uint256"}],"internalType":"struct ISwapRouter.ExactInputParams","name":"params","type":"tuple"}],"name":"exactInput","outputs":[{"internalType":"uint256","name":"amountOut","type":"uint256"}],"stateMutability":"payable","type":"function"},{"inputs":[{"components":[{"internalType":"address","name":"tokenIn","type":"address"},{"internalType":"address","name":"tokenOut","type":"address"},{"internalType":"uint24","name":"fee","type":"uint24"},{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"deadline","type":"uint256"},{"internalType":"uint256","name":"amountIn","type":"uint256"},{"internalType":"uint256","name":"amountOutMinimum","type":"uint256"},{"internalType":"uint160","name":"sqrtPriceLimitX96","type":"uint160"}],"internalType":"struct ISwapRouter.ExactInputSingleParams","name":"params","type":"tuple"}],"name":"exactInputSingle","outputs":[{"internalType":"uint256","name":"amountOut","type":"uint256"}],"stateMutability":"payable","type":"function"},{"inputs":[{"components":[{"internalType":"bytes","name":"path","type":"bytes"},{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"deadline","type":"uint256"},{"internalType":"uint256","name":"amountOut","type":"uint256"},{"internalType":"uint256","name":"amountInMaximum","type":"uint256"}],"internalType":"struct ISwapRouter.ExactOutputParams","name":"params","type":"tuple"}],"name":"exactOutput","outputs":[{"internalType":"uint256","name":"amountIn","type":"uint256"}],"stateMutability":"payable","type":"function"},{"inputs":[{"components":[{"internalType":"address","name":"tokenIn","type":"address"},{"internalType":"address","name":"tokenOut","type":"address"},{"internalType":"uint24","name":"fee","type":"uint24"},{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"deadline","type":"uint256"},{"internalType":"uint256","name":"amountOut","type":"uint256"},{"internalType":"uint256","name":"amountInMaximum","type":"uint256"},{"internalType":"uint160","name":"sqrtPriceLimitX96","type":"uint160"}],"internalType":"struct ISwapRouter.ExactOutputSingleParams","name":"params","type":"tuple"}],"name":"exactOutputSingle","outputs":[{"internalType":"uint256","name":"amountIn","type":"uint256"}],"stateMutability":"payable","type":"function"},{"inputs":[],"name":"factory","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"bytes[]","name":"data","type":"bytes[]"}],"name":"multicall","outputs":[{"internalType":"bytes[]","name":"results","type":"bytes[]"}],"stateMutability":"payable","type":"function"},{"inputs":[],"name":"refundETH","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"address","name":"token","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"},{"internalType":"uint256","name":"deadline","type":"uint256"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"}],"name":"selfPermit","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"address","name":"token","type":"address"},{"internalType":"uint256","name":"nonce","type":"uint256"},{"internalType":"uint256","name":"expiry","type":"uint256"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"}],"name":"selfPermitAllowed","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"address","name":"token","type":"address"},{"internalType":"uint256","name":"nonce","type":"uint256"},{"internalType":"uint256","name":"expiry","type":"uint256"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"}],"name":"selfPermitAllowedIfNecessary","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"address","name":"token","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"},{"internalType":"uint256","name":"deadline","type":"uint256"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"}],"name":"selfPermitIfNecessary","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"address","name":"token","type":"address"},{"internalType":"uint256","name":"amountMinimum","type":"uint256"},{"internalType":"address","name":"recipient","type":"address"}],"name":"sweepToken","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"address","name":"token","type":"address"},{"internalType":"uint256","name":"amountMinimum","type":"uint256"},{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"feeBips","type":"uint256"},{"internalType":"address","name":"feeRecipient","type":"address"}],"name":"sweepTokenWithFee","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"int256","name":"amount0Delta","type":"int256"},{"internalType":"int256","name":"amount1Delta","type":"int256"},{"internalType":"bytes","name":"_data","type":"bytes"}],"name":"uniswapV3SwapCallback","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"amountMinimum","type":"uint256"},{"internalType":"address","name":"recipient","type":"address"}],"name":"unwrapWETH9","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"uint256","name":"amountMinimum","type":"uint256"},{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"feeBips","type":"uint256"},{"internalType":"address","name":"feeRecipient","type":"address"}],"name":"unwrapWETH9WithFee","outputs":[],"stateMutability":"payable","type":"function"},{"stateMutability":"payable","type":"receive"}] + self.exchange_contract = web3.eth.contract(address=address, abi=self.abi) + +import json +from autonomy import Tool +from web3 import Web3 + + +class SwapTool(Tool): + description = """ + Swaps tokens on Uniswap. + params: + tokenin: str = 'ETH' + token_out: str = 'USDC' + amount: int = 100 + returns: + result: dict + """ + + def __init__(self, + infura_url = "https://tame-tame-mound.ethereum-goerli.discover.quiknode.pro/60eccfb9952049ec1b6afe53922c5c006fff17cd/", + address = '0x2a1530C4C41db0B0b2bB646CB5Eb1A67b7158667' + ): + self.w3 = Web3(Web3.HTTPProvider(infura_url)) + + # Uniswap contract ABI + uniswap_abi = json.loads('[{"name":"getEthToTokenInputPrice", ...}]') + # Replace 'YOUR_UNISWAP_CONTRACT_ADDRESS' with the actual contract address + self.uniswap_contract = self.w3.eth.contract(address=address, abi=uniswap_abi) + + + def call(self, tokenin: str = 'ETH', tokenout: str = 'USDC', amount: int = 100): + # Replace with actual token addresses and amounts + ETH_AMOUNT = self.w3.toWei(amount, 'Ether') + input_token_amount = self.uniswap_contract.functions.getEthToTokenInputPrice(ETH_AMOUNT).call() + output_token_amount = self.uniswap_contract.functions.getTokenToEthOutputPrice(tokenin).call() + return input_token_amount, output_token_amount + + return { + 'result': { + 'input_token_amount': input_token_amount, + 'output_token_amount': output_token_amount, + } + } + +# Usage +if __name__ == "__main__": + swap_tool = SwapTool() + result = swap_tool.call(tokenin='ETH', token_out='USDC', amount=1) + print(result) diff --git a/modules/tool/tool.py b/modules/tool/tool.py new file mode 100644 index 00000000..b9aee597 --- /dev/null +++ b/modules/tool/tool.py @@ -0,0 +1,28 @@ +import commune as c + +class Tool(c.Module): + + def tools(self, fn_name='call'): + tools = [] + for module in c.tqdm(c.modules()): + try: + if hasattr(c.module(module),fn_name): + tools.append(module) + except Exception as e: + pass + return tools + + def has_tool(self, module, fn_name='call'): + try: + return f'def {fn_name}(' in c.code(module) + except: + return False + + + + + + + + + diff --git a/modules/tool/web/web.py b/modules/tool/web/web.py new file mode 100644 index 00000000..5e431ed0 --- /dev/null +++ b/modules/tool/web/web.py @@ -0,0 +1,18 @@ +import commune as c +import requests + +class Web(c.Module): + @classmethod + def request(self, url:str, method:str='GET', **kwargs): + response = requests.request(method, url, **kwargs) + if response.status_code == 200: + return response.text + else: + return {'status_code': response.status_code, 'text': response.text} + @classmethod + def rget(cls, url:str, **kwargs): + return cls.request(url, 'GET', **kwargs) + @classmethod + def rpost(self, url:str, **kwargs): + return cls.request(url, 'POST', **kwargs) + diff --git a/modules/tool/web/web.yaml b/modules/tool/web/web.yaml new file mode 100644 index 00000000..27fa315a --- /dev/null +++ b/modules/tool/web/web.yaml @@ -0,0 +1 @@ +module: bro \ No newline at end of file diff --git a/modules/tool/write_file.py b/modules/tool/write_file.py new file mode 100644 index 00000000..b1dcc27b --- /dev/null +++ b/modules/tool/write_file.py @@ -0,0 +1,14 @@ +import commune as c +class WriteFile(c.Module): + description ="""Write to a file""" + + def call(self, file_path, content): + """Write to a file + """ + try: + with open(file_path, 'w') as file: + file.write(content) + print(f"Content successfully written to {file_path}") + except Exception as e: + print(f"Error: An error occurred while writing to the file: {e}") + diff --git a/modules/trainer/__init__.py b/modules/trainer/__init__.py new file mode 100644 index 00000000..cb040cd9 --- /dev/null +++ b/modules/trainer/__init__.py @@ -0,0 +1 @@ +# from .trainer import CortexTrainer diff --git a/modules/trainer/trainer.py b/modules/trainer/trainer.py new file mode 100644 index 00000000..08d88aab --- /dev/null +++ b/modules/trainer/trainer.py @@ -0,0 +1,184 @@ + +from ray import tune +from typing import * +import torch +from copy import deepcopy +import commune as c + + + + +class Trainer(c.Module): + def __init__(self, + model: str = 'model:gptj:train', + metrics_server: str = 'metrics_server' , + tuner: Dict = dict( + metric = 'loss', + mode = 'min', + mgax_concurrent_trials = 1, + resources_per_trial = {"cpu": 2, "gpu": 0}, + num_samples= 1000 + ), + **kwargs): + + + config = self.set_config(locals()) + self.set_model(config.model) + self.set_metrics_server(self.config.metrics_server) + self.set_tuner(**self.config.tuner) + + def set_config(self, config) -> None: + config.pop('self',None) + kwargs = config.pop('kwargs', {}) + config.update(kwargs) + self.config = c.dict2munch(config) + return config + + # self.model = self.connect(self.model_name) + + + + def set_model(self, + model:str, + refresh:bool = True, + timeout:int = 1000, + check_step:int= 2): + # if not self.server_exists(metrics_server): + wait_time = 0 + while not self.server_exists(model) and wait_time <= timeout: + self.sleep(check_step) + wait_time += check_step + + if wait_time >= timeout: + raise Exception('Your peer is not visible') + + self.model = model + + def hyper2params(self, params: Dict) -> Dict: + return self.flat2deep(params) + + def get_hyperopt_tag(self, config:dict): + + tag = f'{self.model}::{self.tag}__' + for k, v in config.items(): + tag += f'{k}_{v}__' + + return tag + + + def objective(self, + hyperparams:dict = None, + train_kwargs = {'num_batches': 100}, + timeout:int=1000) -> Dict: + + if hyperparams is None: + hyperparams = {} + + params = self.hyper2params(deepcopy(hyperparams)) + params['stats'] = {} + train_kwargs.update(dict( + tag=self.tag, + params = params, + save = False, + load = False, + )) + + model = commune.connect(self.model) + + output = model.train_model(**train_kwargs, timeout=timeout) + + metric_server = c.connect(self.metrics_server) + best_metric = metric_server.best_metric() + + is_best = False + if self.config.tuner.mode == 'min': + is_best = bool(output['loss'] < best_metric) + elif self.config.tuner.mode == 'max': + is_best = bool(output['loss'] > best_metric) + + hyperopt_tag = self.get_hyperopt_tag(hyperparams) + + + if is_best: + model.save(tag=self.tag) + + return output + @classmethod + def default_search_space(cls): + search_space = { + 'optimizer.lr': tune.loguniform(1e-6, 1e-4), + "finetune.num_layers": tune.choice([1,2,3,4,5,6,7]), + # 'load': tune.choice([True, False]), + } + + return search_space + + def set_tuner(self, + resources_per_trial:dict = {"cpu": 2, "gpu": 0}, + max_concurrent_trials:int = 4, + num_samples: int = 10, + search_space:Dict = None, + metric: str = 'loss', + mode : str = 'min'): + + + # 2. Define a search space. + + self.metric = metric + self.mode = mode + + self.search_space = search_space if search_space else self.default_search_space() + + + + self.resources_per_trial = resources_per_trial + self.objective_with_resources = tune.with_resources(self.objective, self.resources_per_trial) + + self.tune_config = tune.TuneConfig(num_samples=num_samples, + max_concurrent_trials=max_concurrent_trials) + + # 3. Start a Tune run and print the best result. + self.tuner = tune.Tuner(self.objective_with_resources, + param_space=self.search_space, + tune_config=self.tune_config) + + return {'success': True, 'message': 'Tuner set'} + def fit(self, **kwargs): + + results = self.tuner.fit() + print(results.get_best_result(metric=self.metric, mode=self.mode).config) + + @classmethod + def test(cls): + trainer = cls() + print(trainer.fit()) + + # print(self.model) + # print(self.model_name) + + +if __name__ == "__main__": + Trainer.test() + # dataset = commune.connect('dataset::bittensor') + + # print(dataset.module_name) + # for i in range(10): + # print('Alo') + # AdapterModel.train(num_batches=1, dataset=dataset) + # adapter = dict( + # module='commune.model.adapter.block.AdapterBlock', + # params = {'in_dim': 10, 'hidden_dim': 64, 'num_layers': 8}, + # key2attr = {'in_dim': 'hidden_dim', 'out_dim': 'vocab_size'}, + # device = None + # ) + # AdapterModel.run() + # EnsembleModel.run_neuron() + # AdapterModel.serve(wait_for_termination=False) + # AdapterModel.run() + # print('FUCK') + # TransformerModel('gptj', tag='demo', load=True).save_pretrained() + + # TransformerModel.run() + # TransformerModel.experiment() + + diff --git a/modules/web/web.py b/modules/web/web.py new file mode 100644 index 00000000..f9b0d28c --- /dev/null +++ b/modules/web/web.py @@ -0,0 +1,183 @@ +import commune as c +from bs4 import BeautifulSoup +import requests +import asyncio +import aiohttp +import json + + +class Web(c.Module): + + @classmethod + async def async_request(cls, url, method, headers, **kwargs): + async with aiohttp.ClientSession() as session: + async with session.request(method, url, headers=headers, **kwargs) as response: + if response.status == 200: + return {'status_code': response.status, 'text': await response.text()} + else: + return {'status_code': response.status, 'text': await response.text()} + + @classmethod + def request(cls, url = "https://google.com", method='GET', headers={'User-Agent': 'Mozilla/5.0'}, mode="request", **kwargs): + if mode == "request": + response = requests.request(method, url, headers=headers, **kwargs) + + if response.status_code == 200: + text = response.text + return text + + else: + return {'status_code': response.status_code, 'text': response.text} + elif mode == "asyncio": + loop = asyncio.get_event_loop() + response = loop.run_until_complete(cls.async_request(url, method, headers, **kwargs)) + return response + else: + raise ValueError(f"Invalid mode: {mode}") + + + + def html(self, url:str = 'https://www.google.com/', **kwargs): + return self.request(url, **kwargs) + + get_html = html + + def get_text(self, url:str, min_chars=100, **kwargs): + text_list = [p.split('">')[-1].split('<')[0] for p in self.get_components(url, 'p')['p']] + return [text for text in text_list if len(text) > min_chars] + + + def get_components(self, url, *tags): + # Make a GET request to the website + response = requests.get(url) + + # Check if the request was successful (status code 200) + if response.status_code == 200: + # Parse the HTML content of the page + soup = BeautifulSoup(response.text, 'html.parser') + + result={} + for tag in tags: + # Find all components on their HTML representation + components = [str(component) for component in soup.find_all(tag)] + result[tag] = components + + + # Print or use the result as needed + return result + + else: + print(f"Failed to retrieve the page. Status code: {response.status_code}") + @classmethod + def rget(cls, url:str, **kwargs): + return cls.request(url, 'GET', **kwargs) + @classmethod + def rpost(self, url:str, **kwargs): + return cls.request(url, 'POST', **kwargs) + + + def google_search(self, keyword='isreal-hamas', n=10, max_words=1000): + from googlesearch import search + urls = search(keyword, num_results=n) + futures = [] + for url in urls: + futures.append(c.submit(self.url2text, args=[url], return_future=True)) + results = c.wait(futures, timeout=10) + # url2result = {url: result for url, result in zip(urls, results)} + return results + + bing_search = google_search + + + def yahoo_search(self, keyword): + from yahoo import search as yahoo_search + + urls = [] + for url in yahoo_search(keyword): + urls.append(url) + + return urls + + def bing_search(self, keyword): + l=[] + o={} + headers={"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36"} + for i in range(0,100,10): + target_url="https://www.bing.com/search?q=" + keyword + "&rdr=1&first={}".format(i+1) + # print(target_url) + resp=requests.get(target_url,headers=headers) + soup = BeautifulSoup(resp.text, 'html.parser') + completeData = soup.find_all("li",{"class":"b_algo"}) + for i in range(0, len(completeData)): + o["Title"]=completeData[i].find("a").text + o["link"]=completeData[i].find("a").get("href") + o["Description"]=completeData[i].find("div", + {"class":"b_caption"}).text + o["Position"]=i+1 + l.append(o) + o={} + return l + + + def webpage(self, url='https://www.fool.ca/recent-headlines/', **kwargs): + from urllib.request import Request, urlopen + webpage = self.request(url=url) + req = Request(url , headers={'User-Agent': 'Mozilla/5.0'}) + webpage = urlopen(req).read() + return webpage + + + def soup(self, webpage=None, url=None, **kwargs): + if webpage is None: + webpage = self.webpage(url) + soup = BeautifulSoup(webpage, 'html.parser', **kwargs) + return soup + + + def find(self,url=None, tag='p', **kwargs): + return self.soup(url=url).find(tag, **kwargs) + + @classmethod + def install(cls): + c.cmd("pip3 install beautifulsoup4") + + @classmethod + def dashboard(cls, url='https://google.com', ): + from urllib.request import Request, urlopen + + import streamlit as st + st.title("Web") + + self = cls() + # show html of webpage + webpage_html = self.html(url) + + @classmethod + def url2text(cls, url='https://www.google.com'): + + # Fetch HTML content + response = requests.get(url) + response.raise_for_status() + + # Parse with BeautifulSoup + soup = BeautifulSoup(response.text, 'html.parser') + + # Extract images + images = [img['src'] for img in soup.find_all('img') if img.get('src')] + + # Extract text + texts = soup.get_text(separator='\n').splitlines() + texts = [line.strip() for line in texts if line.strip()] # Clean up whitespace + + # Format into JSON + data = { + "images": images, + "text": texts + } + + return data + + + + +Web.run(__name__) \ No newline at end of file diff --git a/modules/web/web.yaml b/modules/web/web.yaml new file mode 100644 index 00000000..27fa315a --- /dev/null +++ b/modules/web/web.yaml @@ -0,0 +1 @@ +module: bro \ No newline at end of file diff --git a/tests/test_module.py b/tests/test_module.py deleted file mode 100644 index d3aef833..00000000 --- a/tests/test_module.py +++ /dev/null @@ -1,14 +0,0 @@ -import commune as c - -# def test_key(): -# c.module('key').test() -# def test_ticket(): -# c.module('ticket').test() -# def test_namespace(): -# c.module('namespace').test() -# def test_server(): -# c.module('server').test() -# def test_validator(): -# c.module('vali').test() -# def test_subspace(): -# assert c.module('subspace').test()