@@ -9,7 +9,9 @@ use std::sync::Arc;
9
9
use std:: time:: Instant ;
10
10
11
11
use actix:: prelude:: * ;
12
+ use actix_web:: http:: Method ;
12
13
use chrono:: SecondsFormat ;
14
+ use futures:: future:: Future ;
13
15
use serde:: { Deserialize , Serialize } ;
14
16
15
17
use relay_common:: ProjectId ;
@@ -18,6 +20,8 @@ use relay_filter::FilterStatKey;
18
20
use relay_general:: protocol:: EventId ;
19
21
use relay_quotas:: { ReasonCode , Scoping } ;
20
22
23
+ use crate :: actors:: upstream:: SendQuery ;
24
+ use crate :: actors:: upstream:: { UpstreamQuery , UpstreamRelay } ;
21
25
use crate :: ServerError ;
22
26
23
27
// Choose the outcome module implementation (either the real one or the fake, no-op one).
@@ -27,6 +31,35 @@ pub use self::processing::*;
27
31
// No-op outcome implementation
28
32
#[ cfg( not( feature = "processing" ) ) ]
29
33
pub use self :: non_processing:: * ;
34
+ use std:: borrow:: Cow ;
35
+
36
+ const MAX_OUTCOME_BATCH_SIZE : usize = 1000 ;
37
+ const MAX_OUTCOME_BATCH_INTERVAL_MILLSEC : u64 = 500 ;
38
+
39
+ /// Defines the structure of the HTTP outcomes requests
40
+ #[ derive( Deserialize , Serialize , Debug , Default ) ]
41
+ #[ serde( default ) ]
42
+ pub struct SendOutcomes {
43
+ pub outcomes : Vec < TrackRawOutcome > ,
44
+ }
45
+
46
+ impl UpstreamQuery for SendOutcomes {
47
+ type Response = SendOutcomes ;
48
+
49
+ fn method ( & self ) -> Method {
50
+ Method :: POST
51
+ }
52
+
53
+ fn path ( & self ) -> Cow < ' static , str > {
54
+ Cow :: Borrowed ( "/api/0/relays/outcomes/" )
55
+ }
56
+ }
57
+
58
+ /// Defines the structure of the HTTP outcomes responses for successful requests
59
+ #[ derive( Serialize , Debug ) ]
60
+ pub struct SendOutcomesResponse {
61
+ // nothing yet, future features will go here
62
+ }
30
63
31
64
/// Tracks an outcome of an event.
32
65
///
@@ -315,6 +348,8 @@ mod processing {
315
348
316
349
use crate :: metrics:: RelayCounters ;
317
350
use crate :: service:: ServerErrorKind ;
351
+ use crate :: utils:: run_later;
352
+ use std:: time:: Duration ;
318
353
319
354
type ThreadedProducer = rdkafka:: producer:: ThreadedProducer < DefaultProducerContext > ;
320
355
@@ -329,10 +364,16 @@ mod processing {
329
364
pub struct OutcomeProducer {
330
365
config : Arc < Config > ,
331
366
producer : Option < ThreadedProducer > ,
367
+ upstream : Addr < UpstreamRelay > ,
368
+ unsent_outcomes : Vec < TrackRawOutcome > ,
369
+ send_scheduled : bool ,
332
370
}
333
371
334
372
impl OutcomeProducer {
335
- pub fn create ( config : Arc < Config > ) -> Result < Self , ServerError > {
373
+ pub fn create (
374
+ config : Arc < Config > ,
375
+ upstream : Addr < UpstreamRelay > ,
376
+ ) -> Result < Self , ServerError > {
336
377
let future_producer = if config. processing_enabled ( ) {
337
378
let mut client_config = ClientConfig :: new ( ) ;
338
379
for config_p in config. kafka_config ( ) {
@@ -349,10 +390,13 @@ mod processing {
349
390
Ok ( Self {
350
391
config,
351
392
producer : future_producer,
393
+ upstream,
394
+ unsent_outcomes : Vec :: new ( ) ,
395
+ send_scheduled : false ,
352
396
} )
353
397
}
354
398
355
- fn send_kafka_message ( & mut self , message : TrackRawOutcome ) -> Result < ( ) , OutcomeError > {
399
+ fn send_kafka_message ( & self , message : TrackRawOutcome ) -> Result < ( ) , OutcomeError > {
356
400
log:: trace!( "Tracking outcome: {:?}" , message) ;
357
401
358
402
let producer = match self . producer {
@@ -385,8 +429,38 @@ mod processing {
385
429
}
386
430
}
387
431
388
- fn send_http_message ( & mut self , _message : TrackRawOutcome ) -> Result < ( ) , OutcomeError > {
389
- unimplemented ! ( )
432
+ fn send_batch ( & mut self , context : & mut Context < Self > ) {
433
+ let request = SendOutcomes {
434
+ outcomes : self . unsent_outcomes . drain ( ..) . collect ( ) ,
435
+ } ;
436
+ self . send_scheduled = false ;
437
+
438
+ self . upstream
439
+ . send ( SendQuery ( request) )
440
+ . map ( |_| ( ) )
441
+ . map_err ( |_| ( ) )
442
+ . into_actor ( self )
443
+ . spawn ( context) ;
444
+ }
445
+
446
+ fn send_http_message (
447
+ & mut self ,
448
+ message : TrackRawOutcome ,
449
+ context : & mut Context < Self > ,
450
+ ) -> Result < ( ) , OutcomeError > {
451
+ self . unsent_outcomes . push ( message) ;
452
+ if self . unsent_outcomes . len ( ) >= MAX_OUTCOME_BATCH_SIZE {
453
+ self . send_batch ( context)
454
+ } else if !self . send_scheduled {
455
+ self . send_scheduled = true ;
456
+ run_later (
457
+ Duration :: from_millis ( MAX_OUTCOME_BATCH_INTERVAL_MILLSEC ) ,
458
+ Self :: send_batch,
459
+ )
460
+ . spawn ( context)
461
+ }
462
+
463
+ Ok ( ( ) )
390
464
}
391
465
}
392
466
@@ -417,11 +491,11 @@ mod processing {
417
491
418
492
impl Handler < TrackRawOutcome > for OutcomeProducer {
419
493
type Result = Result < ( ) , OutcomeError > ;
420
- fn handle ( & mut self , message : TrackRawOutcome , _ctx : & mut Self :: Context ) -> Self :: Result {
494
+ fn handle ( & mut self , message : TrackRawOutcome , ctx : & mut Self :: Context ) -> Self :: Result {
421
495
if self . config . processing_enabled ( ) {
422
496
self . send_kafka_message ( message)
423
497
} else if self . config . emit_outcomes ( ) {
424
- self . send_http_message ( message)
498
+ self . send_http_message ( message, ctx )
425
499
} else {
426
500
Ok ( ( ) ) // processing not enabled and emit_outcomes disabled
427
501
}
@@ -440,11 +514,15 @@ mod non_processing {
440
514
441
515
pub struct OutcomeProducer {
442
516
config : Arc < Config > ,
517
+ upstream : Addr < UpstreamRelay > ,
443
518
}
444
519
445
520
impl OutcomeProducer {
446
- pub fn create ( config : Arc < Config > ) -> Result < Self , ServerError > {
447
- Ok ( Self { config } )
521
+ pub fn create (
522
+ config : Arc < Config > ,
523
+ upstream : Addr < UpstreamRelay > ,
524
+ ) -> Result < Self , ServerError > {
525
+ Ok ( Self { config, upstream } )
448
526
}
449
527
}
450
528
0 commit comments