-
Notifications
You must be signed in to change notification settings - Fork 222
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Define delayed event ratelimit category #18019
base: develop
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
Define ratelimit configuration for delayed event management. |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -228,3 +228,9 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: | |
config.get("remote_media_download_burst_count", "500M") | ||
), | ||
) | ||
|
||
self.rc_delayed_event = RatelimitSettings.parse( | ||
config, | ||
"rc_delayed_event", | ||
defaults={"per_second": 10, "burst_count": 100}, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 10 per second seems quite high - may as well not have a rate-limit at all at that point? The default for |
||
) |
Original file line number | Diff line number | Diff line change | ||||||
---|---|---|---|---|---|---|---|---|
|
@@ -58,6 +58,7 @@ def __init__(self, hs: "HomeServer"): | |||||||
self._config = hs.config | ||||||||
self._clock = hs.get_clock() | ||||||||
self._request_ratelimiter = hs.get_request_ratelimiter() | ||||||||
self._delayed_event_ratelimiter = hs.get_delayed_event_ratelimiter() | ||||||||
self._event_creation_handler = hs.get_event_creation_handler() | ||||||||
self._room_member_handler = hs.get_room_member_handler() | ||||||||
|
||||||||
|
@@ -227,6 +228,8 @@ async def add( | |||||||
Raises: | ||||||||
SynapseError: if the delayed event fails validation checks. | ||||||||
""" | ||||||||
# Use standard request limiter for scheduling new delayed events. | ||||||||
# TODO: Instead apply rateliming based on the scheduled send time. | ||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||||
await self._request_ratelimiter.ratelimit(requester) | ||||||||
|
||||||||
self._event_creation_handler.validator.validate_builder( | ||||||||
|
@@ -285,7 +288,7 @@ async def cancel(self, requester: Requester, delay_id: str) -> None: | |||||||
NotFoundError: if no matching delayed event could be found. | ||||||||
""" | ||||||||
assert self._is_master | ||||||||
await self._request_ratelimiter.ratelimit(requester) | ||||||||
await self._delayed_event_ratelimiter.ratelimit(requester) | ||||||||
await self._initialized_from_db | ||||||||
|
||||||||
next_send_ts = await self._store.cancel_delayed_event( | ||||||||
|
@@ -308,7 +311,7 @@ async def restart(self, requester: Requester, delay_id: str) -> None: | |||||||
NotFoundError: if no matching delayed event could be found. | ||||||||
""" | ||||||||
assert self._is_master | ||||||||
await self._request_ratelimiter.ratelimit(requester) | ||||||||
await self._delayed_event_ratelimiter.ratelimit(requester) | ||||||||
await self._initialized_from_db | ||||||||
|
||||||||
next_send_ts = await self._store.restart_delayed_event( | ||||||||
|
@@ -332,6 +335,8 @@ async def send(self, requester: Requester, delay_id: str) -> None: | |||||||
NotFoundError: if no matching delayed event could be found. | ||||||||
""" | ||||||||
assert self._is_master | ||||||||
# Use standard request limiter for sending delayed events on-demand, | ||||||||
# as an on-demand send is similar to sending a regular event. | ||||||||
await self._request_ratelimiter.ratelimit(requester) | ||||||||
await self._initialized_from_db | ||||||||
|
||||||||
|
@@ -415,7 +420,7 @@ def _schedule_next_at(self, next_send_ts: Timestamp) -> None: | |||||||
|
||||||||
async def get_all_for_user(self, requester: Requester) -> List[JsonDict]: | ||||||||
"""Return all pending delayed events requested by the given user.""" | ||||||||
await self._request_ratelimiter.ratelimit(requester) | ||||||||
await self._delayed_event_ratelimiter.ratelimit(requester) | ||||||||
return await self._store.get_all_delayed_events_for_user( | ||||||||
requester.user.localpart | ||||||||
) | ||||||||
|
Original file line number | Diff line number | Diff line change | ||||
---|---|---|---|---|---|---|
|
@@ -109,6 +109,25 @@ | |||||
) | ||||||
self.assertEqual(setter_expected, content.get(setter_key), content) | ||||||
|
||||||
@unittest.override_config({"rc_delayed_event": {"per_second": 0.5, "burst_count": 1}}) | ||||||
def test_get_delayed_events_ratelimit(self) -> None: | ||||||
args = ("GET", PATH_PREFIX) | ||||||
|
||||||
channel = self.make_request(*args) | ||||||
self.assertEqual(HTTPStatus.OK, channel.code, channel.result) | ||||||
|
||||||
channel = self.make_request(*args) | ||||||
self.assertEqual(HTTPStatus.TOO_MANY_REQUESTS, channel.code, channel.result) | ||||||
|
||||||
# Add the current user to the ratelimit overrides, allowing them no ratelimiting. | ||||||
self.get_success( | ||||||
self.hs.get_datastores().main.set_ratelimit_for_user(self.user_id, 0, 0) | ||||||
) | ||||||
|
||||||
# Test that the request isn't ratelimited anymore. | ||||||
channel = self.make_request(*args) | ||||||
self.assertEqual(HTTPStatus.OK, channel.code, channel.result) | ||||||
|
||||||
def test_update_delayed_event_without_id(self) -> None: | ||||||
channel = self.make_request( | ||||||
"POST", | ||||||
|
@@ -206,6 +225,44 @@ | |||||
expect_code=HTTPStatus.NOT_FOUND, | ||||||
) | ||||||
|
||||||
@unittest.override_config({"rc_delayed_event": {"per_second": 0.5, "burst_count": 1}}) | ||||||
def test_cancel_delayed_event_ratelimit(self) -> None: | ||||||
delay_ids = [] | ||||||
for i in range(2): | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||
channel = self.make_request( | ||||||
"POST", | ||||||
_get_path_for_delayed_send(self.room_id, _EVENT_TYPE, 100000), | ||||||
{}, | ||||||
) | ||||||
self.assertEqual(HTTPStatus.OK, channel.code, channel.result) | ||||||
delay_id = channel.json_body.get("delay_id") | ||||||
self.assertIsNotNone(delay_id) | ||||||
delay_ids.append(delay_id) | ||||||
|
||||||
channel = self.make_request( | ||||||
"POST", | ||||||
f"{PATH_PREFIX}/{delay_ids.pop(0)}", | ||||||
{"action": "cancel"}, | ||||||
) | ||||||
self.assertEqual(HTTPStatus.OK, channel.code, channel.result) | ||||||
|
||||||
args = ( | ||||||
"POST", | ||||||
f"{PATH_PREFIX}/{delay_ids.pop(0)}", | ||||||
{"action": "cancel"}, | ||||||
) | ||||||
channel = self.make_request(*args) | ||||||
self.assertEqual(HTTPStatus.TOO_MANY_REQUESTS, channel.code, channel.result) | ||||||
|
||||||
# Add the current user to the ratelimit overrides, allowing them no ratelimiting. | ||||||
self.get_success( | ||||||
self.hs.get_datastores().main.set_ratelimit_for_user(self.user_id, 0, 0) | ||||||
) | ||||||
|
||||||
# Test that the request isn't ratelimited anymore. | ||||||
channel = self.make_request(*args) | ||||||
self.assertEqual(HTTPStatus.OK, channel.code, channel.result) | ||||||
|
||||||
def test_send_delayed_state_event(self) -> None: | ||||||
state_key = "to_send_on_request" | ||||||
|
||||||
|
@@ -250,6 +307,44 @@ | |||||
) | ||||||
self.assertEqual(setter_expected, content.get(setter_key), content) | ||||||
|
||||||
@unittest.override_config({"rc_message": {"per_second": 3, "burst_count": 4}}) | ||||||
def test_send_delayed_event_ratelimit(self) -> None: | ||||||
delay_ids = [] | ||||||
for i in range(2): | ||||||
channel = self.make_request( | ||||||
"POST", | ||||||
_get_path_for_delayed_send(self.room_id, _EVENT_TYPE, 100000), | ||||||
{}, | ||||||
) | ||||||
self.assertEqual(HTTPStatus.OK, channel.code, channel.result) | ||||||
delay_id = channel.json_body.get("delay_id") | ||||||
self.assertIsNotNone(delay_id) | ||||||
delay_ids.append(delay_id) | ||||||
|
||||||
channel = self.make_request( | ||||||
"POST", | ||||||
f"{PATH_PREFIX}/{delay_ids.pop(0)}", | ||||||
{"action": "send"}, | ||||||
) | ||||||
self.assertEqual(HTTPStatus.OK, channel.code, channel.result) | ||||||
|
||||||
args = ( | ||||||
"POST", | ||||||
f"{PATH_PREFIX}/{delay_ids.pop(0)}", | ||||||
{"action": "send"}, | ||||||
) | ||||||
channel = self.make_request(*args) | ||||||
self.assertEqual(HTTPStatus.TOO_MANY_REQUESTS, channel.code, channel.result) | ||||||
|
||||||
# Add the current user to the ratelimit overrides, allowing them no ratelimiting. | ||||||
self.get_success( | ||||||
self.hs.get_datastores().main.set_ratelimit_for_user(self.user_id, 0, 0) | ||||||
) | ||||||
|
||||||
# Test that the request isn't ratelimited anymore. | ||||||
channel = self.make_request(*args) | ||||||
self.assertEqual(HTTPStatus.OK, channel.code, channel.result) | ||||||
|
||||||
def test_restart_delayed_state_event(self) -> None: | ||||||
state_key = "to_send_on_restarted_timeout" | ||||||
|
||||||
|
@@ -309,6 +404,44 @@ | |||||
) | ||||||
self.assertEqual(setter_expected, content.get(setter_key), content) | ||||||
|
||||||
@unittest.override_config({"rc_delayed_event": {"per_second": 0.5, "burst_count": 1}}) | ||||||
def test_restart_delayed_event_ratelimit(self) -> None: | ||||||
delay_ids = [] | ||||||
for i in range(2): | ||||||
channel = self.make_request( | ||||||
"POST", | ||||||
_get_path_for_delayed_send(self.room_id, _EVENT_TYPE, 100000), | ||||||
{}, | ||||||
) | ||||||
self.assertEqual(HTTPStatus.OK, channel.code, channel.result) | ||||||
delay_id = channel.json_body.get("delay_id") | ||||||
self.assertIsNotNone(delay_id) | ||||||
delay_ids.append(delay_id) | ||||||
|
||||||
channel = self.make_request( | ||||||
"POST", | ||||||
f"{PATH_PREFIX}/{delay_ids.pop(0)}", | ||||||
{"action": "restart"}, | ||||||
) | ||||||
self.assertEqual(HTTPStatus.OK, channel.code, channel.result) | ||||||
|
||||||
args = ( | ||||||
"POST", | ||||||
f"{PATH_PREFIX}/{delay_ids.pop(0)}", | ||||||
{"action": "restart"}, | ||||||
) | ||||||
channel = self.make_request(*args) | ||||||
self.assertEqual(HTTPStatus.TOO_MANY_REQUESTS, channel.code, channel.result) | ||||||
|
||||||
# Add the current user to the ratelimit overrides, allowing them no ratelimiting. | ||||||
self.get_success( | ||||||
self.hs.get_datastores().main.set_ratelimit_for_user(self.user_id, 0, 0) | ||||||
) | ||||||
|
||||||
# Test that the request isn't ratelimited anymore. | ||||||
channel = self.make_request(*args) | ||||||
self.assertEqual(HTTPStatus.OK, channel.code, channel.result) | ||||||
|
||||||
def test_delayed_state_events_are_cancelled_by_more_recent_state(self) -> None: | ||||||
state_key = "to_be_cancelled" | ||||||
|
||||||
|
@@ -374,3 +507,8 @@ | |||||
room_id: str, event_type: str, state_key: str, delay_ms: int | ||||||
) -> str: | ||||||
return f"rooms/{room_id}/state/{event_type}/{state_key}?org.matrix.msc4140.delay={delay_ms}" | ||||||
|
||||||
def _get_path_for_delayed_send( | ||||||
room_id: str, event_type: str, delay_ms: int | ||||||
) -> str: | ||||||
return f"rooms/{room_id}/send/{event_type}?org.matrix.msc4140.delay={delay_ms}" |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why are the defaults so different from
rc_message
which is what is currently used? I expect the default forrc_delayed_event
to be similar.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The default is more lax to permit refreshing delayed events often / in a loop without risk of ratelimiting, whereas "actual" messages are sent less frequently / not periodically and benefit from a stricter ratelimit.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@AndrewFerr could you put that justification in the config documentation? I think it would help sysadmins understand why they may want to configure this new ratelimit.