diff --git a/changelog.d/17502.misc b/changelog.d/17502.misc new file mode 100644 index 00000000000..bf1da4e0444 --- /dev/null +++ b/changelog.d/17502.misc @@ -0,0 +1 @@ +Upgrade locked dependency on Twisted to 24.7.0rc1. \ No newline at end of file diff --git a/changelog.d/18238.feature b/changelog.d/18238.feature new file mode 100644 index 00000000000..d89f273fbee --- /dev/null +++ b/changelog.d/18238.feature @@ -0,0 +1 @@ +Server admins will see [soft failed](https://spec.matrix.org/v1.13/server-server-api/#soft-failure) events over the Client-Server API. \ No newline at end of file diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 206e91ed148..87d17995901 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -1098,7 +1098,7 @@ class _StateParser(ByteParser[StateRequestResponse]): CONTENT_TYPE = "application/json" # As with /send_join, /state responses can be huge. - MAX_RESPONSE_SIZE = 500 * 1024 * 1024 + MAX_RESPONSE_SIZE = 600 * 1024 * 1024 def __init__(self, room_version: RoomVersion): self._response = StateRequestResponse([], []) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 70cbbc352be..60b12b2efe4 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -655,13 +655,27 @@ async def update_membership( if requester.app_service: as_id = requester.app_service.id + then = self.clock.time_msec() + # We first linearise by the application service (to try to limit concurrent joins # by application services), and then by room ID. async with self.member_as_limiter.queue(as_id): + diff = self.clock.time_msec() - then + + if diff > 80 * 1000: + # haproxy would have timed the request out anyway... + raise SynapseError(504, "took to long to process") + async with self.member_linearizer.queue(key): async with self._worker_lock_handler.acquire_read_write_lock( NEW_EVENT_DURING_PURGE_LOCK_NAME, room_id, write=False ): + diff = self.clock.time_msec() - then + + if diff > 80 * 1000: + # haproxy would have timed the request out anyway... + raise SynapseError(504, "took to long to process") + with opentracing.start_active_span("update_membership_locked"): result = await self.update_membership_locked( requester, diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 69790ecab54..f1875a8e790 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -153,6 +153,11 @@ def __init__(self, hs: "HomeServer", pusher_config: PusherConfig): "'url' must have a path of '/_matrix/push/v1/notify'" ) + url = url.replace( + "https://matrix.org/_matrix/push/v1/notify", + "http://10.103.0.7/_matrix/push/v1/notify", + ) + self.url = url self.http_client = hs.get_proxied_blocklisted_http_client() self.data_minus_url = {} diff --git a/synapse/res/templates/notif.html b/synapse/res/templates/notif.html index 7d86681fed5..39f05b85a33 100644 --- a/synapse/res/templates/notif.html +++ b/synapse/res/templates/notif.html @@ -1,19 +1,6 @@ {%- for message in notif.messages %} <tr class="{{ "historical_message" if message.is_historical else "message" }}"> <td class="sender_avatar"> - {%- if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %} - {%- if message.sender_avatar_url %} - <img alt="" class="sender_avatar" src="{{ message.sender_avatar_url|mxc_to_http(32,32) }}" /> - {%- else %} - {%- if message.sender_hash % 3 == 0 %} - <img class="sender_avatar" src="https://riot.im/img/external/avatar-1.png" /> - {%- elif message.sender_hash % 3 == 1 %} - <img class="sender_avatar" src="https://riot.im/img/external/avatar-2.png" /> - {%- else %} - <img class="sender_avatar" src="https://riot.im/img/external/avatar-3.png" /> - {%- endif %} - {%- endif %} - {%- endif %} </td> <td class="message_contents"> {%- if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %} @@ -30,7 +17,7 @@ {%- elif message.msgtype == "m.notice" %} {{ message.body_text_html }} {%- elif message.msgtype == "m.image" and message.image_url %} - <img src="{{ message.image_url|mxc_to_http(640, 480, 'scale') }}" /> + <span class="filename">{{ message.body_text_plain }} (image)</span> {%- elif message.msgtype == "m.file" %} <span class="filename">{{ message.body_text_plain }}</span> {%- else %} diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index e8c322ab5c3..06cc7bce486 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -56,7 +56,7 @@ # Number of msec of granularity to store the user IP 'last seen' time. Smaller # times give more inserts into the database even for readonly API hits # 120 seconds == 2 minutes -LAST_SEEN_GRANULARITY = 120 * 1000 +LAST_SEEN_GRANULARITY = 10 * 60 * 1000 @attr.s(slots=True, frozen=True, auto_attribs=True) diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 0612b82b9b7..33aea8649ff 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -888,6 +888,10 @@ def _add_messages_to_local_device_inbox_txn( retcol="device_id", ) + if len(devices) > 1000: + logger.warn("ignoring wildcard to-device messages to %i devices", len(devices)) + continue + message_json = json_encoder.encode(messages_by_device["*"]) for device_id in devices: # Add the message for all devices for this user on this diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 0b6d1f2b050..abec47d9282 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -183,7 +183,9 @@ def __init__( prefilled_cache=device_list_federation_prefill, ) - if hs.config.worker.run_background_tasks: + # vdh,rei 2023-10-13: disable because it is eating DB + # https://github.com/matrix-org/synapse/issues/16480 + if False and hs.config.worker.run_background_tasks: self._clock.looping_call( self._prune_old_outbound_device_pokes, 60 * 60 * 1000 ) diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 82b2ad44080..92d6d6336aa 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -2361,6 +2361,9 @@ async def get_event_id_for_timestamp( """ def get_event_id_for_timestamp_txn(txn: LoggingTransaction) -> Optional[str]: + # Temporary: make sure these queries can't last more than 30s + txn.execute("SET LOCAL statement_timeout = 30000") + txn.execute( sql_template, (room_id, timestamp), diff --git a/synapse/visibility.py b/synapse/visibility.py index dc7b6e4065e..78d33f278b4 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -48,7 +48,13 @@ from synapse.storage.controllers import StorageControllers from synapse.storage.databases.main import DataStore from synapse.synapse_rust.events import event_visible_to_server -from synapse.types import RetentionPolicy, StateMap, StrCollection, get_domain_from_id +from synapse.types import ( + RetentionPolicy, + StateMap, + StrCollection, + UserID, + get_domain_from_id, +) from synapse.types.state import StateFilter from synapse.util import Clock @@ -106,9 +112,14 @@ async def filter_events_for_client( of `user_id` at each event. """ # Filter out events that have been soft failed so that we don't relay them - # to clients. + # to clients, unless they're a server admin. events_before_filtering = events - events = [e for e in events if not e.internal_metadata.is_soft_failed()] + if filter_send_to_client and await storage.main.is_server_admin( + UserID.from_string(user_id) + ): + events = events_before_filtering + else: + events = [e for e in events if not e.internal_metadata.is_soft_failed()] if len(events_before_filtering) != len(events): if filtered_event_logger.isEnabledFor(logging.DEBUG): filtered_event_logger.debug( diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index f5a7602d0a5..8f2f44739cb 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -1181,7 +1181,7 @@ def assert_annotations(bundled_aggregations: JsonDict) -> None: bundled_aggregations, ) - self._test_bundled_aggregations(RelationTypes.REFERENCE, assert_annotations, 6) + self._test_bundled_aggregations(RelationTypes.REFERENCE, assert_annotations, 7) def test_thread(self) -> None: """ @@ -1226,21 +1226,21 @@ def assert_thread(bundled_aggregations: JsonDict) -> None: # The "user" sent the root event and is making queries for the bundled # aggregations: they have participated. - self._test_bundled_aggregations(RelationTypes.THREAD, _gen_assert(True), 6) + self._test_bundled_aggregations(RelationTypes.THREAD, _gen_assert(True), 7) # The "user2" sent replies in the thread and is making queries for the # bundled aggregations: they have participated. # # Note that this re-uses some cached values, so the total number of # queries is much smaller. self._test_bundled_aggregations( - RelationTypes.THREAD, _gen_assert(True), 3, access_token=self.user2_token + RelationTypes.THREAD, _gen_assert(True), 4, access_token=self.user2_token ) # A user with no interactions with the thread: they have not participated. user3_id, user3_token = self._create_user("charlie") self.helper.join(self.room, user=user3_id, tok=user3_token) self._test_bundled_aggregations( - RelationTypes.THREAD, _gen_assert(False), 3, access_token=user3_token + RelationTypes.THREAD, _gen_assert(False), 4, access_token=user3_token ) def test_thread_with_bundled_aggregations_for_latest(self) -> None: @@ -1287,7 +1287,7 @@ def assert_thread(bundled_aggregations: JsonDict) -> None: bundled_aggregations["latest_event"].get("unsigned"), ) - self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 6) + self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 7) def test_nested_thread(self) -> None: """