-
-
Notifications
You must be signed in to change notification settings - Fork 13
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
feat: add rate limit and faster default speed #35
base: main
Are you sure you want to change the base?
Changes from 9 commits
a51f2de
eb5de33
39cd057
3268022
0db62f1
25323a6
766a6d3
f332896
ad25ef2
4ef9d1b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,7 +1,9 @@ | ||
import os | ||
from typing import Any, Dict, List, Optional | ||
import random | ||
import time | ||
from typing import Any, Dict, List, Optional, cast | ||
|
||
from ape.api import ReceiptAPI, TransactionAPI, UpstreamProvider | ||
from ape.api import PluginConfig, ReceiptAPI, TransactionAPI, UpstreamProvider | ||
from ape.exceptions import ( | ||
APINotImplementedError, | ||
ContractLogicError, | ||
|
@@ -34,15 +36,54 @@ | |
PRIVATE_TX_BLOCK_WAIT = 25 | ||
|
||
|
||
class AlchemyConfig(PluginConfig): | ||
"""Configuration for Alchemy. | ||
|
||
Attributes: | ||
concurrency (int): The maximum number of concurrent requests to make. | ||
Defaults to 1. | ||
block_page_size (int): The maximum number of blocks to fetch in a single request. | ||
Defaults to 250,000. | ||
min_retry_delay (int): The amount of milliseconds to wait before retrying the request. | ||
Defaults to 1000 (one second). | ||
retry_backoff_factor (int): The multiplier applied to the retry delay after each failed | ||
attempt. Defaults to 2. | ||
max_retry_delay (int): The maximum length of the retry delay. | ||
Defaults to 30,000 (30 seconds). | ||
max_retries (int): The maximum number of retries. | ||
Defaults to 3. | ||
retry_jitter (int): A random number of milliseconds up to this limit is added to each retry | ||
delay. Defaults to 250 milliseconds. | ||
""" | ||
|
||
concurrency: int = 1 # can't do exponential backoff with multiple threads | ||
block_page_size: int = 25_000_000 # this acts as an upper limit, safe to set very high | ||
min_retry_delay: int = 1_000 # 1 second | ||
retry_backoff_factor: int = 2 # exponential backoff | ||
max_retry_delay: int = 30_000 # 30 seconds | ||
max_retries: int = 3 | ||
retry_jitter: int = 250 # 250 milliseconds | ||
|
||
|
||
class Alchemy(Web3Provider, UpstreamProvider): | ||
""" | ||
A web3 provider using an HTTP connection to Alchemy. | ||
|
||
Docs: https://docs.alchemy.com/alchemy/ | ||
|
||
Args: | ||
network_uris: Dict[tuple, str] | ||
A mapping of (ecosystem_name, network_name) -> URI | ||
""" | ||
|
||
network_uris: Dict[tuple, str] = {} | ||
|
||
def __init__(self, *args, **kwargs): | ||
super().__init__(*args, **kwargs) | ||
alchemy_config = cast(AlchemyConfig, self.config_manager.get_config("alchemy")) | ||
self.concurrency = alchemy_config.concurrency | ||
self.block_page_size = alchemy_config.block_page_size | ||
|
||
@property | ||
def uri(self): | ||
""" | ||
|
@@ -179,26 +220,60 @@ def get_virtual_machine_error(self, exception: Exception, **kwargs) -> VirtualMa | |
|
||
return VirtualMachineError(message=message, txn=txn) | ||
|
||
def _make_request(self, endpoint: str, parameters: Optional[List] = None) -> Any: | ||
try: | ||
return super()._make_request(endpoint, parameters) | ||
except HTTPError as err: | ||
response_data = err.response.json() if err.response else {} | ||
if "error" not in response_data: | ||
raise AlchemyProviderError(str(err)) from err | ||
|
||
error_data = response_data["error"] | ||
message = ( | ||
error_data.get("message", str(error_data)) | ||
if isinstance(error_data, dict) | ||
else error_data | ||
) | ||
cls = ( | ||
AlchemyFeatureNotAvailable | ||
if "is not available" in message | ||
else AlchemyProviderError | ||
) | ||
raise cls(message) from err | ||
def _make_request( | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this is awesome but i don't think it'll apply to all the requests. Maybe that is ok for a first pass.. But, we need to use something web3.py middlerware or something such that all requests have rate-limiting handling logic. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @wakamex This is still the main reason we haven't hammered this through the gate... Right now, There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The base class has this same, internal method with different kwargs now, I wonder if that will cause problems? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. if they're strictly after the existing (unchanged) signature, then I think it's probably fine? since they're strictly optional. baking them into the parameters parameter would clean this up, but I have no idea where that's created or what it's used for. |
||
self, | ||
endpoint: str, | ||
parameters: Optional[List] = None, | ||
min_retry_delay: Optional[int] = None, | ||
retry_backoff_factor: Optional[int] = None, | ||
max_retry_delay: Optional[int] = None, | ||
max_retries: Optional[int] = None, | ||
retry_jitter: Optional[int] = None, | ||
) -> Any: | ||
alchemy_config = cast(AlchemyConfig, self.config_manager.get_config("alchemy")) | ||
min_retry_delay = ( | ||
min_retry_delay if min_retry_delay is not None else alchemy_config.min_retry_delay | ||
) | ||
retry_backoff_factor = ( | ||
retry_backoff_factor | ||
if retry_backoff_factor is not None | ||
else alchemy_config.retry_backoff_factor | ||
) | ||
max_retry_delay = ( | ||
max_retry_delay if max_retry_delay is not None else alchemy_config.max_retry_delay | ||
) | ||
max_retries = max_retries if max_retries is not None else alchemy_config.max_retries | ||
retry_jitter = retry_jitter if retry_jitter is not None else alchemy_config.retry_jitter | ||
for attempt in range(max_retries): | ||
try: | ||
return super()._make_request(endpoint, parameters) | ||
except HTTPError as err: | ||
message = str(err) | ||
if any( | ||
error in message | ||
for error in ["exceeded its compute units", "Too Many Requests for url"] | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think this is the main part that changes per-provider (the specific error string to look for). I am thinking we may do what you suggested and move some of this to core when using default node connections (ape-geth), we can still receive this benefit. Maybe we can a method to the base class that can be overriden like: def is_rate_limit_error(self, err): |
||
): | ||
retry_interval = min( | ||
max_retry_delay, min_retry_delay * retry_backoff_factor**attempt | ||
) | ||
logger.info( | ||
"Alchemy compute units exceeded, retrying, attempt %s/%s in %s ms", | ||
attempt + 1, | ||
max_retries, | ||
retry_interval, | ||
) | ||
delay = retry_interval + random.randint(0, retry_jitter) | ||
time.sleep(delay / 1000) | ||
continue | ||
elif "error" not in message: | ||
raise AlchemyProviderError(str(err)) from err | ||
cls = ( | ||
AlchemyFeatureNotAvailable | ||
if "is not available" in message | ||
else AlchemyProviderError | ||
) | ||
raise cls(message) from err | ||
raise AlchemyProviderError(f"Rate limit exceeded after {max_retries} attempts.") | ||
|
||
def send_private_transaction(self, txn: TransactionAPI, **kwargs) -> ReceiptAPI: | ||
""" | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Does this make sense to expose then?