7
7
//! to specific topics and partitions, respectively.
8
8
//!
9
9
use std:: collections:: HashMap ;
10
+ use std:: sync:: atomic:: AtomicU32 ;
10
11
use std:: sync:: Arc ;
11
12
13
+ use event_listener:: Event ;
14
+ use tokio:: select;
12
15
use tracing:: instrument;
13
16
use async_lock:: RwLock ;
14
17
use anyhow:: Result ;
15
18
19
+ use fluvio_future:: task:: spawn;
20
+ use fluvio_future:: timer:: sleep;
16
21
use fluvio_protocol:: record:: ReplicaKey ;
17
22
use fluvio_protocol:: record:: Record ;
18
23
use fluvio_compression:: Compression ;
19
24
#[ cfg( feature = "compress" ) ]
20
25
use fluvio_sc_schema:: topic:: CompressionAlgorithm ;
26
+ use fluvio_sc_schema:: topic:: TopicSpec ;
27
+ use fluvio_sc_schema:: partition:: PartitionSpec ;
21
28
use fluvio_types:: PartitionId ;
22
29
use fluvio_types:: event:: StickyEvent ;
23
30
@@ -36,6 +43,7 @@ pub use fluvio_protocol::record::{RecordKey, RecordData};
36
43
37
44
use crate :: spu:: SpuPool ;
38
45
use crate :: spu:: SpuSocketPool ;
46
+ use crate :: sync:: StoreContext ;
39
47
use crate :: FluvioError ;
40
48
use crate :: metrics:: ClientMetrics ;
41
49
use crate :: producer:: accumulator:: { RecordAccumulator , PushRecord } ;
@@ -234,6 +242,7 @@ where
234
242
topic : String ,
235
243
spu_pool : Arc < S > ,
236
244
record_accumulator : Arc < RecordAccumulator > ,
245
+ partition_tracker : Arc < PartitionAvailabilityTracker > ,
237
246
producer_pool : Arc < RwLock < ProducerPool > > ,
238
247
metrics : Arc < ClientMetrics > ,
239
248
}
@@ -249,15 +258,16 @@ where
249
258
}
250
259
251
260
async fn push_record ( self : Arc < Self > , record : Record ) -> Result < PushRecord > {
252
- let topics = self . spu_pool . topics ( ) ;
261
+ let partition_count = self . partition_tracker . partition_count ( ) ;
262
+ let available_partitions = self . partition_tracker . available_partitions ( ) ;
263
+ let available_partitions_lock = available_partitions. read ( ) . await ;
253
264
254
- let topic_spec = topics
255
- . lookup_by_key ( & self . topic )
256
- . await ?
257
- . ok_or_else ( || FluvioError :: TopicNotFound ( self . topic . to_string ( ) ) ) ?
258
- . spec ;
259
- let partition_count = topic_spec. partitions ( ) ;
260
- let partition_config = PartitionerConfig { partition_count } ;
265
+ let partition_config = PartitionerConfig {
266
+ partition_count,
267
+ available_partitions : available_partitions_lock. clone ( ) ,
268
+ } ;
269
+
270
+ drop ( available_partitions_lock) ;
261
271
262
272
let key = record. key . as_ref ( ) . map ( |k| k. as_ref ( ) ) ;
263
273
let value = record. value . as_ref ( ) ;
@@ -402,6 +412,103 @@ cfg_if::cfg_if! {
402
412
}
403
413
}
404
414
415
+ /// Tracks the availability of partitions for a given topic
416
+ struct PartitionAvailabilityTracker {
417
+ topic_name : String ,
418
+ available_partitions : Arc < RwLock < Vec < PartitionId > > > ,
419
+ partition_count : AtomicU32 ,
420
+ partitions : StoreContext < PartitionSpec > ,
421
+ topics : StoreContext < TopicSpec > ,
422
+ terminate : Arc < Event > ,
423
+ }
424
+
425
+ impl Drop for PartitionAvailabilityTracker {
426
+ fn drop ( & mut self ) {
427
+ self . terminate . notify ( usize:: MAX ) ;
428
+ }
429
+ }
430
+
431
+ impl PartitionAvailabilityTracker {
432
+ // Spawn a task to update available partitions
433
+ fn start (
434
+ initial_partition_count : u32 ,
435
+ topic_name : String ,
436
+ partitions : StoreContext < PartitionSpec > ,
437
+ topics : StoreContext < TopicSpec > ,
438
+ ) -> Arc < Self > {
439
+ let tracker = Arc :: new ( Self {
440
+ topic_name,
441
+ available_partitions : Arc :: new ( RwLock :: new ( vec ! [ ] ) ) ,
442
+ partition_count : AtomicU32 :: new ( initial_partition_count) ,
443
+ partitions : partitions. clone ( ) ,
444
+ topics : topics. clone ( ) ,
445
+ terminate : Arc :: new ( Event :: new ( ) ) ,
446
+ } ) ;
447
+ let shared_tracker = tracker. clone ( ) ;
448
+
449
+ spawn ( async move {
450
+ loop {
451
+ select ! {
452
+ _ = async {
453
+ shared_tracker. update_available_partitions( ) . await ;
454
+ sleep( std:: time:: Duration :: from_secs( 1 ) ) . await ;
455
+ } => { }
456
+ _ = shared_tracker. terminate. listen( ) => {
457
+ break ;
458
+ } ,
459
+ }
460
+ }
461
+ } ) ;
462
+
463
+ tracker
464
+ }
465
+
466
+ async fn update_available_partitions ( & self ) {
467
+ let mut available_partitions = vec ! [ ] ;
468
+ if let Ok ( Some ( topic) ) = self . topics . lookup_by_key ( & self . topic_name ) . await {
469
+ let partition_count = topic. spec ( ) . partitions ( ) ;
470
+ // Update the partition count if it has changed
471
+ if partition_count
472
+ != self
473
+ . partition_count
474
+ . load ( std:: sync:: atomic:: Ordering :: Relaxed )
475
+ {
476
+ self . partition_count
477
+ . store ( partition_count, std:: sync:: atomic:: Ordering :: Relaxed ) ;
478
+ }
479
+
480
+ for partition_id in 0 ..partition_count {
481
+ if let Ok ( Some ( partition) ) = self
482
+ . partitions
483
+ . lookup_by_key ( & ReplicaKey :: new ( & self . topic_name , partition_id) )
484
+ . await
485
+ {
486
+ if partition. status . is_online ( ) {
487
+ available_partitions. push ( partition_id) ;
488
+ }
489
+ }
490
+ }
491
+ // Update the shared available partitions if they have changed
492
+ {
493
+ let read_lock = self . available_partitions . read ( ) . await ;
494
+ if available_partitions == * read_lock {
495
+ return ; // No change needed
496
+ }
497
+ }
498
+ * self . available_partitions . write ( ) . await = available_partitions;
499
+ }
500
+ }
501
+
502
+ fn available_partitions ( & self ) -> Arc < RwLock < Vec < PartitionId > > > {
503
+ self . available_partitions . clone ( )
504
+ }
505
+
506
+ fn partition_count ( & self ) -> u32 {
507
+ self . partition_count
508
+ . load ( std:: sync:: atomic:: Ordering :: Relaxed )
509
+ }
510
+ }
511
+
405
512
impl < S > TopicProducer < S >
406
513
where
407
514
S : SpuPool + Send + Sync + ' static ,
@@ -412,12 +519,13 @@ where
412
519
config : Arc < TopicProducerConfig > ,
413
520
metrics : Arc < ClientMetrics > ,
414
521
) -> Result < Self > {
415
- let topics = spu_pool. topics ( ) ;
416
- let topic_spec: fluvio_sc_schema :: topic :: TopicSpec = topics
522
+ let topic_store = spu_pool. topics ( ) ;
523
+ let topic_spec = topic_store
417
524
. lookup_by_key ( & topic)
418
525
. await ?
419
526
. ok_or_else ( || FluvioError :: TopicNotFound ( topic. to_string ( ) ) ) ?
420
527
. spec ;
528
+
421
529
let partition_count = topic_spec. partitions ( ) ;
422
530
423
531
cfg_if:: cfg_if! {
@@ -435,6 +543,9 @@ where
435
543
partition_count,
436
544
compression,
437
545
) ;
546
+
547
+ let partitions = spu_pool. partitions ( ) . clone ( ) ;
548
+
438
549
let producer_pool = ProducerPool :: new (
439
550
config. clone ( ) ,
440
551
topic. clone ( ) ,
@@ -444,13 +555,21 @@ where
444
555
config. callback . clone ( ) ,
445
556
) ;
446
557
558
+ let partition_tracker = PartitionAvailabilityTracker :: start (
559
+ partition_count,
560
+ topic. clone ( ) ,
561
+ partitions,
562
+ spu_pool. topics ( ) . clone ( ) ,
563
+ ) ;
564
+
447
565
Ok ( Self {
448
566
inner : Arc :: new ( InnerTopicProducer {
449
567
config,
450
568
topic,
451
569
spu_pool,
452
570
producer_pool : Arc :: new ( RwLock :: new ( producer_pool) ) ,
453
571
record_accumulator : Arc :: new ( record_accumulator) ,
572
+ partition_tracker,
454
573
metrics : metrics. clone ( ) ,
455
574
} ) ,
456
575
#[ cfg( feature = "smartengine" ) ]
@@ -641,11 +760,16 @@ mod tests {
641
760
use std:: sync:: Arc ;
642
761
643
762
use async_trait:: async_trait;
644
- use fluvio_protocol:: record:: RecordKey ;
645
- use fluvio_sc_schema:: { partition:: PartitionSpec , store:: MetadataStoreObject , topic:: TopicSpec } ;
763
+ use fluvio_future:: timer:: sleep;
764
+ use fluvio_protocol:: record:: { RecordKey , ReplicaKey } ;
765
+ use fluvio_sc_schema:: {
766
+ partition:: { PartitionSpec } ,
767
+ store:: MetadataStoreObject ,
768
+ topic:: TopicSpec ,
769
+ } ;
646
770
use fluvio_socket:: { ClientConfig , SocketError , StreamSocket , VersionedSerialSocket } ;
647
771
use fluvio_stream_dispatcher:: metadata:: local:: LocalMetadataItem ;
648
- use fluvio_types:: SpuId ;
772
+ use fluvio_types:: { PartitionId , SpuId } ;
649
773
650
774
use crate :: {
651
775
metrics:: ClientMetrics ,
@@ -656,6 +780,7 @@ mod tests {
656
780
657
781
struct SpuPoolMock {
658
782
topics : StoreContext < TopicSpec > ,
783
+ partitions : StoreContext < PartitionSpec > ,
659
784
}
660
785
661
786
#[ async_trait]
@@ -691,7 +816,7 @@ mod tests {
691
816
}
692
817
693
818
fn partitions ( & self ) -> & StoreContext < PartitionSpec > {
694
- todo ! ( )
819
+ & self . partitions
695
820
}
696
821
}
697
822
@@ -708,10 +833,21 @@ mod tests {
708
833
( partitions_count, 2 , false ) . into( ) , // 2 partitions, 2 replicas, not ignore rack
709
834
) ,
710
835
] ;
711
-
836
+ let partition_2 = vec ! [
837
+ MetadataStoreObject :: <PartitionSpec , LocalMetadataItem >:: with_spec(
838
+ ReplicaKey :: new( topic. clone( ) , 0 as PartitionId ) ,
839
+ vec![ 0 , 1 ] . into( ) ,
840
+ ) ,
841
+ MetadataStoreObject :: <PartitionSpec , LocalMetadataItem >:: with_spec(
842
+ ReplicaKey :: new( topic. clone( ) , 1 as PartitionId ) ,
843
+ vec![ 0 , 1 ] . into( ) ,
844
+ ) ,
845
+ ] ;
712
846
let topics = StoreContext :: < TopicSpec > :: new ( ) ;
713
- let spu_pool = Arc :: new ( SpuPoolMock { topics } ) ;
847
+ let partitions = StoreContext :: < PartitionSpec > :: new ( ) ;
848
+ let spu_pool = Arc :: new ( SpuPoolMock { topics, partitions } ) ;
714
849
spu_pool. topics ( ) . store ( ) . sync_all ( topic_2_partitions) . await ;
850
+ spu_pool. partitions ( ) . store ( ) . sync_all ( partition_2) . await ;
715
851
let producer = TopicProducer :: new ( topic. clone ( ) , spu_pool. clone ( ) , config, metrics)
716
852
. await
717
853
. expect ( "producer" ) ;
@@ -750,6 +886,8 @@ mod tests {
750
886
751
887
spu_pool. topics . store ( ) . sync_all ( topic_3_partitions) . await ;
752
888
889
+ sleep ( std:: time:: Duration :: from_secs ( 2 ) ) . await ;
890
+
753
891
let _ = producer
754
892
. send ( RecordKey :: NULL , "789" . to_string ( ) )
755
893
. await
0 commit comments