@@ -396,6 +396,10 @@ static int qc_send_ppkts(struct buffer *buf, struct ssl_sock_ctx *ctx)
396396 qc -> path -> ifae_pkts ++ ;
397397 if (qc -> flags & QUIC_FL_CONN_IDLE_TIMER_RESTARTED_AFTER_READ )
398398 qc_idle_timer_rearm (qc , 0 , 0 );
399+ if (cc -> algo -> on_transmit )
400+ cc -> algo -> on_transmit (cc );
401+ if (cc -> algo -> drs_on_transmit )
402+ cc -> algo -> drs_on_transmit (cc , pkt );
399403 }
400404 if (!(qc -> flags & QUIC_FL_CONN_CLOSING ) &&
401405 (pkt -> flags & QUIC_FL_TX_PACKET_CC )) {
@@ -476,6 +480,7 @@ enum quic_tx_err qc_send_mux(struct quic_conn *qc, struct list *frms,
476480 struct list send_list = LIST_HEAD_INIT (send_list );
477481 enum quic_tx_err ret = QUIC_TX_ERR_NONE ;
478482 int max_dgram = 0 , sent ;
483+ ullong ns_pkts ;
479484
480485 TRACE_ENTER (QUIC_EV_CONN_TXPKT , qc );
481486 BUG_ON (qc -> mux_state != QC_MUX_READY ); /* Only MUX can uses this function so it must be ready. */
@@ -495,8 +500,9 @@ enum quic_tx_err qc_send_mux(struct quic_conn *qc, struct list *frms,
495500 }
496501
497502 if (pacer ) {
498- const ullong ns_pkts = quic_pacing_ns_pkt (pacer );
499- max_dgram = global .tune .quic_frontend_max_tx_burst * 1000000 / (ns_pkts + 1 ) + 1 ;
503+ struct quic_cc * cc = & qc -> path -> cc ;
504+
505+ ns_pkts = cc -> algo -> pacing_delay_ns (cc , & max_dgram );
500506 }
501507
502508 TRACE_STATE ("preparing data (from MUX)" , QUIC_EV_CONN_TXPKT , qc );
@@ -508,7 +514,7 @@ enum quic_tx_err qc_send_mux(struct quic_conn *qc, struct list *frms,
508514 else if (pacer ) {
509515 if (max_dgram && max_dgram == sent && !LIST_ISEMPTY (frms ))
510516 ret = QUIC_TX_ERR_AGAIN ;
511- quic_pacing_sent_done (pacer , sent );
517+ quic_pacing_sent_done (pacer , sent , ns_pkts );
512518 }
513519
514520 TRACE_LEAVE (QUIC_EV_CONN_TXPKT , qc );
0 commit comments