|
| 1 | +use std::{ |
| 2 | + collections::{HashMap, HashSet}, |
| 3 | + hash::Hash, |
| 4 | +}; |
| 5 | + |
| 6 | +use relay_base_schema::project::ProjectKey; |
| 7 | + |
| 8 | +use super::{Item, Location, MetricMeta, StartOfDayUnixTimestamp}; |
| 9 | +use crate::{statsd::MetricCounters, MetricResourceIdentifier}; |
| 10 | + |
| 11 | +/// A metrics meta aggregator. |
| 12 | +/// |
| 13 | +/// Aggregates metric metadata based on their scope (project, mri, timestamp) and |
| 14 | +/// only keeps the most relevant entries. |
| 15 | +/// |
| 16 | +/// Currently we track the first N amount of unique metric meta elements we get. |
| 17 | +/// |
| 18 | +/// This should represent the actual adoption rate of different code versions. |
| 19 | +/// |
| 20 | +/// This aggregator is purely in memeory and will lose its state on restart, |
| 21 | +/// which may cause multiple different items being emitted after restarts. |
| 22 | +/// For this we have de-deuplication in the storage and the volume overall |
| 23 | +/// of this happening is small enough to just add it to the storage worst case. |
| 24 | +#[derive(Debug)] |
| 25 | +pub struct MetaAggregator { |
| 26 | + /// All tracked code locations. |
| 27 | + locations: hashbrown::HashMap<Scope, HashSet<Location>>, |
| 28 | + |
| 29 | + /// Maximum tracked locations. |
| 30 | + max_locations: usize, |
| 31 | +} |
| 32 | + |
| 33 | +impl MetaAggregator { |
| 34 | + /// Creates a new metrics meta aggregator. |
| 35 | + pub fn new(max_locations: usize) -> Self { |
| 36 | + Self { |
| 37 | + locations: hashbrown::HashMap::new(), |
| 38 | + max_locations, |
| 39 | + } |
| 40 | + } |
| 41 | + |
| 42 | + /// Adds a new meta item to the aggregator. |
| 43 | + /// |
| 44 | + /// Returns a new [`MetricMeta`] element when the element should be stored |
| 45 | + /// or sent upstream for storage. |
| 46 | + /// |
| 47 | + /// Returns `None` when the meta item was already seen or is not considered relevant. |
| 48 | + pub fn add(&mut self, project_key: ProjectKey, meta: MetricMeta) -> Option<MetricMeta> { |
| 49 | + let mut send_upstream = HashMap::new(); |
| 50 | + |
| 51 | + for (mri, items) in meta.mapping { |
| 52 | + let scope = Scope { |
| 53 | + timestamp: meta.timestamp, |
| 54 | + project_key, |
| 55 | + mri, |
| 56 | + }; |
| 57 | + |
| 58 | + if let Some(items) = self.add_scoped(&scope, items) { |
| 59 | + send_upstream.insert(scope.mri, items); |
| 60 | + } |
| 61 | + } |
| 62 | + |
| 63 | + if send_upstream.is_empty() { |
| 64 | + return None; |
| 65 | + } |
| 66 | + |
| 67 | + relay_statsd::metric!(counter(MetricCounters::MetaAggregatorUpdate) += 1); |
| 68 | + Some(MetricMeta { |
| 69 | + timestamp: meta.timestamp, |
| 70 | + mapping: send_upstream, |
| 71 | + }) |
| 72 | + } |
| 73 | + |
| 74 | + /// Retrieves all currently relevant metric meta for a project. |
| 75 | + pub fn get_all_relevant(&self, project_key: ProjectKey) -> impl Iterator<Item = MetricMeta> { |
| 76 | + let locations = self |
| 77 | + .locations |
| 78 | + .iter() |
| 79 | + .filter(|(scope, _)| scope.project_key == project_key); |
| 80 | + |
| 81 | + let mut result = HashMap::new(); |
| 82 | + |
| 83 | + for (scope, locations) in locations { |
| 84 | + result |
| 85 | + .entry(scope.timestamp) |
| 86 | + .or_insert_with(|| MetricMeta { |
| 87 | + timestamp: scope.timestamp, |
| 88 | + mapping: HashMap::new(), |
| 89 | + }) |
| 90 | + .mapping |
| 91 | + .entry(scope.mri.clone()) // This clone sucks |
| 92 | + .or_insert_with(Vec::new) |
| 93 | + .extend(locations.iter().cloned().map(Item::Location)); |
| 94 | + } |
| 95 | + |
| 96 | + result.into_values() |
| 97 | + } |
| 98 | + |
| 99 | + /// Remove all contained state related to a project. |
| 100 | + pub fn clear(&mut self, project_key: ProjectKey) { |
| 101 | + self.locations |
| 102 | + .retain(|scope, _| scope.project_key != project_key); |
| 103 | + } |
| 104 | + |
| 105 | + fn add_scoped(&mut self, scope: &Scope, items: Vec<Item>) -> Option<Vec<Item>> { |
| 106 | + // Entry ref needs hashbrown, we would have to clone the scope without or do a separate lookup. |
| 107 | + let locations = self.locations.entry_ref(scope).or_default(); |
| 108 | + let mut send_upstream = Vec::new(); |
| 109 | + |
| 110 | + for item in items { |
| 111 | + match item { |
| 112 | + Item::Location(location) => { |
| 113 | + if locations.len() > self.max_locations { |
| 114 | + break; |
| 115 | + } |
| 116 | + |
| 117 | + if !locations.contains(&location) { |
| 118 | + locations.insert(location.clone()); |
| 119 | + send_upstream.push(Item::Location(location)); |
| 120 | + } |
| 121 | + } |
| 122 | + Item::Unknown => {} |
| 123 | + } |
| 124 | + } |
| 125 | + |
| 126 | + (!send_upstream.is_empty()).then_some(send_upstream) |
| 127 | + } |
| 128 | +} |
| 129 | + |
| 130 | +/// The metadata scope. |
| 131 | +/// |
| 132 | +/// We scope metadata by project, mri and day, |
| 133 | +/// represented as a unix timestamp at the beginning of the day. |
| 134 | +/// |
| 135 | +/// The technical scope (e.g. redis key) also includes the organization id, but this |
| 136 | +/// can be inferred from the project. |
| 137 | +#[derive(Clone, Debug, PartialEq, Eq, Hash)] |
| 138 | +struct Scope { |
| 139 | + pub timestamp: StartOfDayUnixTimestamp, |
| 140 | + pub project_key: ProjectKey, |
| 141 | + pub mri: MetricResourceIdentifier<'static>, |
| 142 | +} |
| 143 | + |
| 144 | +impl From<&Scope> for Scope { |
| 145 | + fn from(value: &Scope) -> Self { |
| 146 | + value.clone() |
| 147 | + } |
| 148 | +} |
0 commit comments