Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add rate limit and faster default speed #35

Draft
wants to merge 10 commits into
base: main
Choose a base branch
from
Draft
11 changes: 8 additions & 3 deletions ape_alchemy/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from ape import plugins

from .provider import Alchemy
from .provider import Alchemy, AlchemyConfig

NETWORKS = {
"ethereum": [
Expand Down Expand Up @@ -32,6 +32,11 @@

@plugins.register(plugins.ProviderPlugin)
def providers():
for ecosystem_name in NETWORKS:
for network_name in NETWORKS[ecosystem_name]:
for ecosystem_name, networks in NETWORKS.items():
for network_name in networks:
yield ecosystem_name, network_name, Alchemy


@plugins.register(plugins.Config)
def config_class():
yield AlchemyConfig
119 changes: 97 additions & 22 deletions ape_alchemy/provider.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
import os
from typing import Any, Dict, List, Optional
import random
import time
from typing import Any, Dict, List, Optional, cast

from ape.api import ReceiptAPI, TransactionAPI, UpstreamProvider
from ape.api import PluginConfig, ReceiptAPI, TransactionAPI, UpstreamProvider
from ape.exceptions import (
APINotImplementedError,
ContractLogicError,
Expand Down Expand Up @@ -34,15 +36,54 @@
PRIVATE_TX_BLOCK_WAIT = 25


class AlchemyConfig(PluginConfig):
"""Configuration for Alchemy.

Attributes:
concurrency (int): The maximum number of concurrent requests to make.
Defaults to 1.
block_page_size (int): The maximum number of blocks to fetch in a single request.
Defaults to 250,000.
min_retry_delay (int): The amount of milliseconds to wait before retrying the request.
Defaults to 1000 (one second).
retry_backoff_factor (int): The multiplier applied to the retry delay after each failed
attempt. Defaults to 2.
max_retry_delay (int): The maximum length of the retry delay.
Defaults to 30,000 (30 seconds).
max_retries (int): The maximum number of retries.
Defaults to 3.
retry_jitter (int): A random number of milliseconds up to this limit is added to each retry
delay. Defaults to 250 milliseconds.
"""

concurrency: int = 1 # can't do exponential backoff with multiple threads
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does this make sense to expose then?

block_page_size: int = 25_000_000 # this acts as an upper limit, safe to set very high
min_retry_delay: int = 1_000 # 1 second
retry_backoff_factor: int = 2 # exponential backoff
max_retry_delay: int = 30_000 # 30 seconds
max_retries: int = 3
retry_jitter: int = 250 # 250 milliseconds


class Alchemy(Web3Provider, UpstreamProvider):
"""
A web3 provider using an HTTP connection to Alchemy.

Docs: https://docs.alchemy.com/alchemy/

Args:
network_uris: Dict[tuple, str]
A mapping of (ecosystem_name, network_name) -> URI
"""

network_uris: Dict[tuple, str] = {}

def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
alchemy_config = cast(AlchemyConfig, self.config_manager.get_config("alchemy"))
self.concurrency = alchemy_config.concurrency
self.block_page_size = alchemy_config.block_page_size

@property
def uri(self):
"""
Expand Down Expand Up @@ -179,26 +220,60 @@ def get_virtual_machine_error(self, exception: Exception, **kwargs) -> VirtualMa

return VirtualMachineError(message=message, txn=txn)

def _make_request(self, endpoint: str, parameters: Optional[List] = None) -> Any:
try:
return super()._make_request(endpoint, parameters)
except HTTPError as err:
response_data = err.response.json() if err.response else {}
if "error" not in response_data:
raise AlchemyProviderError(str(err)) from err

error_data = response_data["error"]
message = (
error_data.get("message", str(error_data))
if isinstance(error_data, dict)
else error_data
)
cls = (
AlchemyFeatureNotAvailable
if "is not available" in message
else AlchemyProviderError
)
raise cls(message) from err
def _make_request(
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this is awesome but i don't think it'll apply to all the requests. Maybe that is ok for a first pass..

But, we need to use something web3.py middlerware or something such that all requests have rate-limiting handling logic.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@wakamex This is still the main reason we haven't hammered this through the gate... .make_request is not called for every request, and it is not guaranteed to be called at all unless we refactor Web3Provider from core or come up with a plan.

Right now, make_request serves as a blocker-prevention... Meaning, if an API is not available at the Ape-level, you can make a raw request to the provider (whether than be HTTP, WS, under-the-hood, is up to the plugin's implementation), and still script out whatever you need. It is not a function called for every request. So we need a way such that every request made to Alchemy is rate-limited regardless if it using .make_request (as a work-around or optimized call) or some other method such as web3.eth.get_balance.. This make_request is not the same as web3.provider.make_request.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The base class has this same, internal method with different kwargs now, I wonder if that will cause problems?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

if they're strictly after the existing (unchanged) signature, then I think it's probably fine? since they're strictly optional. baking them into the parameters parameter would clean this up, but I have no idea where that's created or what it's used for.

self,
endpoint: str,
parameters: Optional[List] = None,
min_retry_delay: Optional[int] = None,
retry_backoff_factor: Optional[int] = None,
max_retry_delay: Optional[int] = None,
max_retries: Optional[int] = None,
retry_jitter: Optional[int] = None,
) -> Any:
alchemy_config = cast(AlchemyConfig, self.config_manager.get_config("alchemy"))
min_retry_delay = (
min_retry_delay if min_retry_delay is not None else alchemy_config.min_retry_delay
)
retry_backoff_factor = (
retry_backoff_factor
if retry_backoff_factor is not None
else alchemy_config.retry_backoff_factor
)
max_retry_delay = (
max_retry_delay if max_retry_delay is not None else alchemy_config.max_retry_delay
)
max_retries = max_retries if max_retries is not None else alchemy_config.max_retries
retry_jitter = retry_jitter if retry_jitter is not None else alchemy_config.retry_jitter
for attempt in range(max_retries):
try:
return super()._make_request(endpoint, parameters)
except HTTPError as err:
message = str(err)
if any(
error in message
for error in ["exceeded its compute units", "Too Many Requests for url"]
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this is the main part that changes per-provider (the specific error string to look for). I am thinking we may do what you suggested and move some of this to core when using default node connections (ape-geth), we can still receive this benefit.

Maybe we can a method to the base class that can be overriden like:

def is_rate_limit_error(self, err):

):
retry_interval = min(
max_retry_delay, min_retry_delay * retry_backoff_factor**attempt
)
logger.info(
"Alchemy compute units exceeded, retrying, attempt %s/%s in %s ms",
attempt + 1,
max_retries,
retry_interval,
)
delay = retry_interval + random.randint(0, retry_jitter)
time.sleep(delay / 1000)
continue
elif "error" not in message:
raise AlchemyProviderError(str(err)) from err
cls = (
AlchemyFeatureNotAvailable
if "is not available" in message
else AlchemyProviderError
)
raise cls(message) from err
raise AlchemyProviderError(f"Rate limit exceeded after {max_retries} attempts.")

def send_private_transaction(self, txn: TransactionAPI, **kwargs) -> ReceiptAPI:
"""
Expand Down
Loading