linera_core/
updater.rs

1// Copyright (c) Facebook, Inc. and its affiliates.
2// Copyright (c) Zefchain Labs, Inc.
3// SPDX-License-Identifier: Apache-2.0
4
5use std::{
6    collections::{BTreeMap, BTreeSet, HashMap},
7    fmt,
8    hash::Hash,
9    mem,
10    sync::Arc,
11};
12
13use futures::{
14    stream::{FuturesUnordered, TryStreamExt},
15    Future, StreamExt,
16};
17use linera_base::{
18    crypto::ValidatorPublicKey,
19    data_types::{BlockHeight, Round, TimeDelta},
20    ensure,
21    identifiers::{BlobId, BlobType, ChainId, StreamId},
22    time::{timer::timeout, Duration, Instant},
23};
24use linera_chain::{
25    data_types::{BlockProposal, LiteVote},
26    manager::LockingBlock,
27    types::{ConfirmedBlock, GenericCertificate, ValidatedBlock, ValidatedBlockCertificate},
28};
29use linera_execution::{committee::Committee, system::EPOCH_STREAM_NAME};
30use linera_storage::{Clock, Storage};
31use thiserror::Error;
32use tokio::sync::mpsc;
33use tracing::{instrument, Level};
34
35use crate::{
36    client::{chain_client, Client},
37    data_types::{ChainInfo, ChainInfoQuery},
38    environment::Environment,
39    node::{CrossChainMessageDelivery, NodeError, ValidatorNode},
40    remote_node::RemoteNode,
41    LocalNodeError,
42};
43
44/// The default amount of time we wait for additional validators to contribute
45/// to the result, as a fraction of how long it took to reach a quorum.
46pub const DEFAULT_QUORUM_GRACE_PERIOD: f64 = 0.2;
47
48/// A report of clock skew from a validator, sent before retrying due to `InvalidTimestamp`.
49pub type ClockSkewReport = (ValidatorPublicKey, TimeDelta);
50/// The maximum timeout for requests to a stake-weighted quorum if no quorum is reached.
51const MAX_TIMEOUT: Duration = Duration::from_secs(60 * 60 * 24); // 1 day.
52
53/// Used for `communicate_chain_action`
54#[derive(Clone)]
55pub enum CommunicateAction {
56    SubmitBlock {
57        proposal: Box<BlockProposal>,
58        blob_ids: Vec<BlobId>,
59        /// Channel to report clock skew before sleeping, so the caller can aggregate reports.
60        clock_skew_sender: mpsc::UnboundedSender<ClockSkewReport>,
61    },
62    FinalizeBlock {
63        certificate: Box<ValidatedBlockCertificate>,
64        delivery: CrossChainMessageDelivery,
65    },
66    RequestTimeout {
67        chain_id: ChainId,
68        height: BlockHeight,
69        round: Round,
70    },
71}
72
73impl CommunicateAction {
74    /// The round to which this action pertains.
75    pub fn round(&self) -> Round {
76        match self {
77            CommunicateAction::SubmitBlock { proposal, .. } => proposal.content.round,
78            CommunicateAction::FinalizeBlock { certificate, .. } => certificate.round,
79            CommunicateAction::RequestTimeout { round, .. } => *round,
80        }
81    }
82}
83
84pub struct ValidatorUpdater<Env>
85where
86    Env: Environment,
87{
88    pub remote_node: RemoteNode<Env::ValidatorNode>,
89    pub client: Arc<Client<Env>>,
90    pub admin_chain_id: ChainId,
91}
92
93impl<Env: Environment> Clone for ValidatorUpdater<Env> {
94    fn clone(&self) -> Self {
95        ValidatorUpdater {
96            remote_node: self.remote_node.clone(),
97            client: self.client.clone(),
98            admin_chain_id: self.admin_chain_id,
99        }
100    }
101}
102
103/// An error result for requests to a stake-weighted quorum.
104#[derive(Error, Debug)]
105pub enum CommunicationError<E: fmt::Debug> {
106    /// No consensus is possible since validators returned different possibilities
107    /// for the next block
108    #[error(
109        "No error but failed to find a consensus block. Consensus threshold: {0}, Proposals: {1:?}"
110    )]
111    NoConsensus(u64, Vec<(u64, usize)>),
112    /// A single error that was returned by a sufficient number of nodes to be trusted as
113    /// valid.
114    #[error("Failed to communicate with a quorum of validators: {0}")]
115    Trusted(E),
116    /// No single error reached the validity threshold so we're returning a sample of
117    /// errors for debugging purposes, together with their weight.
118    #[error("Failed to communicate with a quorum of validators:\n{:#?}", .0)]
119    Sample(Vec<(E, u64)>),
120}
121
122/// Executes a sequence of actions in parallel for all validators.
123///
124/// Tries to stop early when a quorum is reached. If `quorum_grace_period` is specified, other
125/// validators are given additional time to contribute to the result. The grace period is
126/// calculated as a fraction (defaulting to `DEFAULT_QUORUM_GRACE_PERIOD`) of the time taken to
127/// reach quorum.
128pub async fn communicate_with_quorum<'a, A, V, K, F, R, G>(
129    validator_clients: &'a [RemoteNode<A>],
130    committee: &Committee,
131    group_by: G,
132    execute: F,
133    // Grace period as a fraction of time taken to reach quorum.
134    quorum_grace_period: f64,
135) -> Result<(K, Vec<(ValidatorPublicKey, V)>), CommunicationError<NodeError>>
136where
137    A: ValidatorNode + Clone + 'static,
138    F: Clone + Fn(RemoteNode<A>) -> R,
139    R: Future<Output = Result<V, chain_client::Error>> + 'a,
140    G: Fn(&V) -> K,
141    K: Hash + PartialEq + Eq + Clone + 'static,
142    V: 'static,
143{
144    let mut responses: futures::stream::FuturesUnordered<_> = validator_clients
145        .iter()
146        .filter_map(|remote_node| {
147            if committee.weight(&remote_node.public_key) == 0 {
148                // This should not happen but better prevent it because certificates
149                // are not allowed to include votes with weight 0.
150                return None;
151            }
152            let execute = execute.clone();
153            let remote_node = remote_node.clone();
154            Some(async move { (remote_node.public_key, execute(remote_node).await) })
155        })
156        .collect();
157
158    let start_time = Instant::now();
159    let mut end_time: Option<Instant> = None;
160    let mut remaining_votes = committee.total_votes();
161    let mut highest_key_score = 0;
162    let mut value_scores: HashMap<K, (u64, Vec<(ValidatorPublicKey, V)>)> = HashMap::new();
163    let mut error_scores = HashMap::new();
164
165    'vote_wait: while let Ok(Some((name, result))) = timeout(
166        end_time.map_or(MAX_TIMEOUT, |t| t.saturating_duration_since(Instant::now())),
167        responses.next(),
168    )
169    .await
170    {
171        remaining_votes -= committee.weight(&name);
172        match result {
173            Ok(value) => {
174                let key = group_by(&value);
175                let entry = value_scores.entry(key.clone()).or_insert((0, Vec::new()));
176                entry.0 += committee.weight(&name);
177                entry.1.push((name, value));
178                highest_key_score = highest_key_score.max(entry.0);
179            }
180            Err(err) => {
181                // TODO(#2857): Handle non-remote errors properly.
182                let err = match err {
183                    chain_client::Error::RemoteNodeError(err) => err,
184                    err => NodeError::ResponseHandlingError {
185                        error: err.to_string(),
186                    },
187                };
188                let entry = error_scores.entry(err.clone()).or_insert(0);
189                *entry += committee.weight(&name);
190                if *entry >= committee.validity_threshold() {
191                    // At least one honest node returned this error.
192                    // No quorum can be reached, so return early.
193                    return Err(CommunicationError::Trusted(err));
194                }
195            }
196        }
197        // If it becomes clear that no key can reach a quorum, break early.
198        if highest_key_score + remaining_votes < committee.quorum_threshold() {
199            break 'vote_wait;
200        }
201
202        // If a key reaches a quorum, wait for the grace period to collect more values
203        // or error information and then stop.
204        if end_time.is_none() && highest_key_score >= committee.quorum_threshold() {
205            end_time = Some(Instant::now() + start_time.elapsed().mul_f64(quorum_grace_period));
206        }
207    }
208
209    let scores = value_scores
210        .values()
211        .map(|(weight, values)| (*weight, values.len()))
212        .collect();
213    // If a key has a quorum, return it with its values.
214    if let Some((key, (_, values))) = value_scores
215        .into_iter()
216        .find(|(_, (score, _))| *score >= committee.quorum_threshold())
217    {
218        return Ok((key, values));
219    }
220
221    if error_scores.is_empty() {
222        return Err(CommunicationError::NoConsensus(
223            committee.quorum_threshold(),
224            scores,
225        ));
226    }
227
228    // No specific error is available to report reliably.
229    let mut sample = error_scores.into_iter().collect::<Vec<_>>();
230    sample.sort_by_key(|(_, score)| std::cmp::Reverse(*score));
231    sample.truncate(4);
232    Err(CommunicationError::Sample(sample))
233}
234
235impl<Env> ValidatorUpdater<Env>
236where
237    Env: Environment + 'static,
238{
239    #[instrument(
240        level = "trace", skip_all, err(level = Level::WARN),
241        fields(chain_id = %certificate.block().header.chain_id)
242    )]
243    async fn send_confirmed_certificate(
244        &mut self,
245        certificate: GenericCertificate<ConfirmedBlock>,
246        delivery: CrossChainMessageDelivery,
247    ) -> Result<Box<ChainInfo>, chain_client::Error> {
248        let mut result = self
249            .remote_node
250            .handle_optimized_confirmed_certificate(&certificate, delivery)
251            .await;
252
253        let mut sent_admin_chain = false;
254        let mut sent_blobs = false;
255        loop {
256            match result {
257                Err(NodeError::EventsNotFound(event_ids))
258                    if !sent_admin_chain
259                        && certificate.inner().chain_id() != self.admin_chain_id
260                        && event_ids.iter().all(|event_id| {
261                            event_id.stream_id == StreamId::system(EPOCH_STREAM_NAME)
262                                && event_id.chain_id == self.admin_chain_id
263                        }) =>
264                {
265                    // The validator doesn't have the committee that signed the certificate.
266                    self.update_admin_chain().await?;
267                    sent_admin_chain = true;
268                }
269                Err(NodeError::BlobsNotFound(blob_ids)) if !sent_blobs => {
270                    // The validator is missing the blobs required by the certificate.
271                    self.remote_node
272                        .check_blobs_not_found(&certificate, &blob_ids)?;
273                    // The certificate is confirmed, so the blobs must be in storage.
274                    let maybe_blobs = self
275                        .client
276                        .local_node
277                        .read_blobs_from_storage(&blob_ids)
278                        .await?;
279                    let blobs = maybe_blobs.ok_or(NodeError::BlobsNotFound(blob_ids))?;
280                    self.remote_node.node.upload_blobs(blobs).await?;
281                    sent_blobs = true;
282                }
283                result => return Ok(result?),
284            }
285            result = self
286                .remote_node
287                .handle_confirmed_certificate(certificate.clone(), delivery)
288                .await;
289        }
290    }
291
292    async fn send_validated_certificate(
293        &mut self,
294        certificate: GenericCertificate<ValidatedBlock>,
295        delivery: CrossChainMessageDelivery,
296    ) -> Result<Box<ChainInfo>, chain_client::Error> {
297        let result = self
298            .remote_node
299            .handle_optimized_validated_certificate(&certificate, delivery)
300            .await;
301
302        let chain_id = certificate.inner().chain_id();
303        match &result {
304            Err(original_err @ NodeError::BlobsNotFound(blob_ids)) => {
305                self.remote_node
306                    .check_blobs_not_found(&certificate, blob_ids)?;
307                // The certificate is for a validated block, i.e. for our locking block.
308                // Take the missing blobs from our local chain manager.
309                let blobs = self
310                    .client
311                    .local_node
312                    .get_locking_blobs(blob_ids, chain_id)
313                    .await?
314                    .ok_or_else(|| original_err.clone())?;
315                self.remote_node.send_pending_blobs(chain_id, blobs).await?;
316            }
317            Err(error) => {
318                self.sync_if_needed(
319                    chain_id,
320                    certificate.round,
321                    certificate.block().header.height,
322                    error,
323                )
324                .await?;
325            }
326            _ => return Ok(result?),
327        }
328        Ok(self
329            .remote_node
330            .handle_validated_certificate(certificate)
331            .await?)
332    }
333
334    /// Requests a vote for a timeout certificate for the given round from the remote node.
335    ///
336    /// If the remote node is not in that round or at that height yet, sends the chain information
337    /// to update it.
338    async fn request_timeout(
339        &mut self,
340        chain_id: ChainId,
341        round: Round,
342        height: BlockHeight,
343    ) -> Result<Box<ChainInfo>, chain_client::Error> {
344        let query = ChainInfoQuery::new(chain_id).with_timeout(height, round);
345        let result = self
346            .remote_node
347            .handle_chain_info_query(query.clone())
348            .await;
349        if let Err(err) = &result {
350            self.sync_if_needed(chain_id, round, height, err).await?;
351        }
352        Ok(result?)
353    }
354
355    /// Synchronizes either the local node or the remote node, if one of them is lagging behind.
356    async fn sync_if_needed(
357        &mut self,
358        chain_id: ChainId,
359        round: Round,
360        height: BlockHeight,
361        error: &NodeError,
362    ) -> Result<(), chain_client::Error> {
363        let address = &self.remote_node.address();
364        match error {
365            NodeError::WrongRound(validator_round) if *validator_round > round => {
366                tracing::debug!(
367                    address, %chain_id, %validator_round, %round,
368                    "validator is at a higher round; synchronizing",
369                );
370                self.client
371                    .synchronize_chain_state_from(&self.remote_node, chain_id)
372                    .await?;
373            }
374            NodeError::UnexpectedBlockHeight {
375                expected_block_height,
376                found_block_height,
377            } if expected_block_height > found_block_height => {
378                tracing::debug!(
379                    address,
380                    %chain_id,
381                    %expected_block_height,
382                    %found_block_height,
383                    "validator is at a higher height; synchronizing",
384                );
385                self.client
386                    .synchronize_chain_state_from(&self.remote_node, chain_id)
387                    .await?;
388            }
389            NodeError::WrongRound(validator_round) if *validator_round < round => {
390                tracing::debug!(
391                    address, %chain_id, %validator_round, %round,
392                    "validator is at a lower round; sending chain info",
393                );
394                self.send_chain_information(
395                    chain_id,
396                    height,
397                    CrossChainMessageDelivery::NonBlocking,
398                    None,
399                )
400                .await?;
401            }
402            NodeError::UnexpectedBlockHeight {
403                expected_block_height,
404                found_block_height,
405            } if expected_block_height < found_block_height => {
406                tracing::debug!(
407                    address,
408                    %chain_id,
409                    %expected_block_height,
410                    %found_block_height,
411                    "Validator is at a lower height; sending chain info.",
412                );
413                self.send_chain_information(
414                    chain_id,
415                    height,
416                    CrossChainMessageDelivery::NonBlocking,
417                    None,
418                )
419                .await?;
420            }
421            NodeError::InactiveChain(chain_id) => {
422                tracing::debug!(
423                    address,
424                    %chain_id,
425                    "Validator has inactive chain; sending chain info.",
426                );
427                self.send_chain_information(
428                    *chain_id,
429                    height,
430                    CrossChainMessageDelivery::NonBlocking,
431                    None,
432                )
433                .await?;
434            }
435            _ => {}
436        }
437        Ok(())
438    }
439
440    async fn send_block_proposal(
441        &mut self,
442        proposal: Box<BlockProposal>,
443        mut blob_ids: Vec<BlobId>,
444        clock_skew_sender: mpsc::UnboundedSender<ClockSkewReport>,
445    ) -> Result<Box<ChainInfo>, chain_client::Error> {
446        let chain_id = proposal.content.block.chain_id;
447        let mut sent_cross_chain_updates = BTreeMap::new();
448        let mut publisher_chain_ids_sent = BTreeSet::new();
449        let storage = self.client.local_node.storage_client();
450        loop {
451            let local_time = storage.clock().current_time();
452            match self
453                .remote_node
454                .handle_block_proposal(proposal.clone())
455                .await
456            {
457                Ok(info) => return Ok(info),
458                Err(NodeError::WrongRound(_round)) => {
459                    // The proposal is for a different round, so we need to update the validator.
460                    // TODO: this should probably be more specific as to which rounds are retried.
461                    tracing::debug!(
462                        remote_node = self.remote_node.address(),
463                        %chain_id,
464                        "wrong round; sending chain to validator",
465                    );
466                    self.send_chain_information(
467                        chain_id,
468                        proposal.content.block.height,
469                        CrossChainMessageDelivery::NonBlocking,
470                        None,
471                    )
472                    .await?;
473                }
474                Err(NodeError::UnexpectedBlockHeight {
475                    expected_block_height,
476                    found_block_height,
477                }) if expected_block_height < found_block_height
478                    && found_block_height == proposal.content.block.height =>
479                {
480                    tracing::debug!(
481                        remote_node = self.remote_node.address(),
482                        %chain_id,
483                        "wrong height; sending chain to validator",
484                    );
485                    // The proposal is for a later block height, so we need to update the validator.
486                    self.send_chain_information(
487                        chain_id,
488                        found_block_height,
489                        CrossChainMessageDelivery::NonBlocking,
490                        None,
491                    )
492                    .await?;
493                }
494                Err(NodeError::MissingCrossChainUpdate {
495                    chain_id,
496                    origin,
497                    height,
498                }) if chain_id == proposal.content.block.chain_id
499                    && sent_cross_chain_updates
500                        .get(&origin)
501                        .is_none_or(|h| *h < height) =>
502                {
503                    tracing::debug!(
504                        remote_node = %self.remote_node.address(),
505                        chain_id = %origin,
506                        "Missing cross-chain update; sending chain to validator.",
507                    );
508                    sent_cross_chain_updates.insert(origin, height);
509                    // Some received certificates may be missing for this validator
510                    // (e.g. to create the chain or make the balance sufficient) so we are going to
511                    // synchronize them now and retry.
512                    self.send_chain_information(
513                        origin,
514                        height.try_add_one()?,
515                        CrossChainMessageDelivery::Blocking,
516                        None,
517                    )
518                    .await?;
519                }
520                Err(NodeError::EventsNotFound(event_ids)) => {
521                    let mut publisher_heights = BTreeMap::new();
522                    let chain_ids = event_ids
523                        .iter()
524                        .map(|event_id| event_id.chain_id)
525                        .filter(|chain_id| !publisher_chain_ids_sent.contains(chain_id))
526                        .collect::<BTreeSet<_>>();
527                    tracing::debug!(
528                        remote_node = self.remote_node.address(),
529                        ?chain_ids,
530                        "missing events; sending chains to validator",
531                    );
532                    ensure!(!chain_ids.is_empty(), NodeError::EventsNotFound(event_ids));
533                    for chain_id in chain_ids {
534                        let height = self
535                            .client
536                            .local_node
537                            .get_next_height_to_preprocess(chain_id)
538                            .await?;
539                        publisher_heights.insert(chain_id, height);
540                        publisher_chain_ids_sent.insert(chain_id);
541                    }
542                    self.send_chain_info_up_to_heights(
543                        publisher_heights,
544                        CrossChainMessageDelivery::NonBlocking,
545                    )
546                    .await?;
547                }
548                Err(NodeError::BlobsNotFound(_) | NodeError::InactiveChain(_))
549                    if !blob_ids.is_empty() =>
550                {
551                    tracing::debug!("Missing blobs");
552                    // For `BlobsNotFound`, we assume that the local node should already be
553                    // updated with the needed blobs, so sending the chain information about the
554                    // certificates that last used the blobs to the validator node should be enough.
555                    let published_blob_ids =
556                        BTreeSet::from_iter(proposal.content.block.published_blob_ids());
557                    blob_ids.retain(|blob_id| !published_blob_ids.contains(blob_id));
558                    let published_blobs = self
559                        .client
560                        .local_node
561                        .get_proposed_blobs(chain_id, published_blob_ids.into_iter().collect())
562                        .await?;
563                    self.remote_node
564                        .send_pending_blobs(chain_id, published_blobs)
565                        .await?;
566                    let missing_blob_ids = self
567                        .remote_node
568                        .node
569                        .missing_blob_ids(mem::take(&mut blob_ids))
570                        .await?;
571
572                    tracing::debug!("Sending chains for missing blobs");
573                    self.send_chain_info_for_blobs(
574                        &missing_blob_ids,
575                        CrossChainMessageDelivery::NonBlocking,
576                    )
577                    .await?;
578                }
579                Err(NodeError::InvalidTimestamp {
580                    block_timestamp,
581                    local_time: validator_local_time,
582                    ..
583                }) => {
584                    // The validator's clock is behind the block's timestamp. We need to
585                    // wait for two things:
586                    // 1. Our clock to reach block_timestamp (in case the block timestamp
587                    //    is in the future from our perspective too).
588                    // 2. The validator's clock to catch up (in case of clock skew between
589                    //    us and the validator).
590                    let clock_skew = local_time.delta_since(validator_local_time);
591                    tracing::debug!(
592                        remote_node = self.remote_node.address(),
593                        %chain_id,
594                        %block_timestamp,
595                        ?clock_skew,
596                        "validator's clock is behind; waiting and retrying",
597                    );
598                    // Report the clock skew before sleeping so the caller can aggregate.
599                    // Receiver may have been dropped if the caller is no longer interested.
600                    clock_skew_sender
601                        .send((self.remote_node.public_key, clock_skew))
602                        .ok();
603                    storage
604                        .clock()
605                        .sleep_until(block_timestamp.saturating_add(clock_skew))
606                        .await;
607                }
608                // Fail immediately on other errors.
609                Err(err) => return Err(err.into()),
610            }
611        }
612    }
613
614    async fn update_admin_chain(&mut self) -> Result<(), chain_client::Error> {
615        let local_admin_info = self
616            .client
617            .local_node
618            .chain_info(self.admin_chain_id)
619            .await?;
620        Box::pin(self.send_chain_information(
621            self.admin_chain_id,
622            local_admin_info.next_block_height,
623            CrossChainMessageDelivery::NonBlocking,
624            None,
625        ))
626        .await
627    }
628
629    /// Sends chain information to bring a validator up to date with a specific chain.
630    ///
631    /// This method performs a two-phase synchronization:
632    /// 1. **Height synchronization**: Ensures the validator has all blocks up to `target_block_height`.
633    /// 2. **Round synchronization**: If heights match, ensures the validator has proposals/certificates
634    ///    for the current consensus round.
635    ///
636    /// # Height Sync Strategy
637    /// - For existing chains (target_block_height > 0):
638    ///   * Optimistically sends the last certificate first (often that's all that's missing).
639    ///   * Falls back to full chain query if the validator needs more context.
640    ///   * Sends any additional missing certificates in order.
641    /// - For new chains (target_block_height == 0):
642    ///   * Sends the chain description and dependencies first.
643    ///   * Then queries the validator's state.
644    ///
645    /// # Round Sync Strategy
646    /// Once heights match, if the local node is at a higher round, sends the evidence
647    /// (proposal, validated block, or timeout certificate) that proves the current round.
648    ///
649    /// # Parameters
650    /// - `chain_id`: The chain to synchronize
651    /// - `target_block_height`: The height the validator should reach
652    /// - `delivery`: Message delivery mode (blocking or non-blocking)
653    /// - `latest_certificate`: Optional certificate at target_block_height - 1 to avoid a storage lookup
654    ///
655    /// # Returns
656    /// - `Ok(())` if synchronization completed successfully or the validator is already up to date
657    /// - `Err` if there was a communication or storage error
658    #[instrument(level = "trace", skip_all)]
659    pub async fn send_chain_information(
660        &mut self,
661        chain_id: ChainId,
662        target_block_height: BlockHeight,
663        delivery: CrossChainMessageDelivery,
664        latest_certificate: Option<GenericCertificate<ConfirmedBlock>>,
665    ) -> Result<(), chain_client::Error> {
666        // Phase 1: Height synchronization
667        let info = if target_block_height.0 > 0 {
668            self.sync_chain_height(chain_id, target_block_height, delivery, latest_certificate)
669                .await?
670        } else {
671            self.initialize_new_chain_on_validator(chain_id).await?
672        };
673
674        // Phase 2: Round synchronization (if needed)
675        // Height synchronization is complete. Now check if we need to synchronize
676        // the consensus round at this height.
677        let (remote_height, remote_round) = (info.next_block_height, info.manager.current_round);
678        let query = ChainInfoQuery::new(chain_id).with_manager_values();
679        let local_info = match self.client.local_node.handle_chain_info_query(query).await {
680            Ok(response) => response.info,
681            // If we don't have the full chain description locally, we can't help the
682            // validator with round synchronization. This is not an error - the validator
683            // should retry later once the chain is fully initialized locally.
684            Err(LocalNodeError::BlobsNotFound(_)) => {
685                tracing::debug!("local chain description not fully available, skipping round sync");
686                return Ok(());
687            }
688            Err(error) => return Err(error.into()),
689        };
690
691        let manager = local_info.manager;
692        if local_info.next_block_height != remote_height || manager.current_round <= remote_round {
693            return Ok(());
694        }
695
696        // Validator is at our height but behind on consensus round
697        self.sync_consensus_round(remote_round, &manager).await
698    }
699
700    /// Synchronizes a validator to a specific block height by sending missing certificates.
701    ///
702    /// Uses an optimistic approach: sends the last certificate first, then fills in any gaps.
703    ///
704    /// Returns the [`ChainInfo`] from the validator after synchronization.
705    async fn sync_chain_height(
706        &mut self,
707        chain_id: ChainId,
708        target_block_height: BlockHeight,
709        delivery: CrossChainMessageDelivery,
710        latest_certificate: Option<GenericCertificate<ConfirmedBlock>>,
711    ) -> Result<Box<ChainInfo>, chain_client::Error> {
712        let height = target_block_height.try_sub_one()?;
713
714        // Get the certificate for the last block we want to send
715        let certificate = if let Some(cert) = latest_certificate {
716            cert
717        } else {
718            self.read_certificates_for_heights(chain_id, vec![height])
719                .await?
720                .into_iter()
721                .next()
722                .ok_or_else(|| {
723                    chain_client::Error::InternalError(
724                        "failed to read latest certificate for height sync",
725                    )
726                })?
727        };
728
729        // Optimistically try sending just the last certificate
730        let info = match self.send_confirmed_certificate(certificate, delivery).await {
731            Ok(info) => info,
732            Err(error) => {
733                tracing::debug!(
734                    address = self.remote_node.address(), %error,
735                    "validator failed to handle confirmed certificate; sending whole chain",
736                );
737                let query = ChainInfoQuery::new(chain_id);
738                self.remote_node.handle_chain_info_query(query).await?
739            }
740        };
741
742        // Calculate which block heights the validator is still missing
743        let heights: Vec<_> = (info.next_block_height.0..target_block_height.0)
744            .map(BlockHeight)
745            .collect();
746
747        if heights.is_empty() {
748            return Ok(info);
749        }
750
751        // Send any additional missing certificates in order
752        let certificates = self
753            .read_certificates_for_heights(chain_id, heights)
754            .await?;
755
756        for certificate in certificates {
757            self.send_confirmed_certificate(certificate, delivery)
758                .await?;
759        }
760
761        Ok(info)
762    }
763
764    /// Reads certificates for the given heights from storage.
765    async fn read_certificates_for_heights(
766        &self,
767        chain_id: ChainId,
768        heights: Vec<BlockHeight>,
769    ) -> Result<Vec<GenericCertificate<ConfirmedBlock>>, chain_client::Error> {
770        let storage = self.client.local_node.storage_client();
771
772        let certificates_by_height = storage
773            .read_certificates_by_heights(chain_id, &heights)
774            .await?;
775
776        Ok(certificates_by_height.into_iter().flatten().collect())
777    }
778
779    /// Initializes a new chain on the validator by sending the chain description and dependencies.
780    ///
781    /// This is called when the validator doesn't know about the chain yet.
782    ///
783    /// Returns the [`ChainInfo`] from the validator after initialization.
784    async fn initialize_new_chain_on_validator(
785        &mut self,
786        chain_id: ChainId,
787    ) -> Result<Box<ChainInfo>, chain_client::Error> {
788        // Send chain description and all dependency chains
789        self.send_chain_info_for_blobs(
790            &[BlobId::new(chain_id.0, BlobType::ChainDescription)],
791            CrossChainMessageDelivery::NonBlocking,
792        )
793        .await?;
794
795        // Query the validator's state for this chain
796        let query = ChainInfoQuery::new(chain_id);
797        let info = self.remote_node.handle_chain_info_query(query).await?;
798        Ok(info)
799    }
800
801    /// Synchronizes the consensus round state with the validator.
802    ///
803    /// If the validator is at the same height but an earlier round, sends the evidence
804    /// (proposal, validated block, or timeout certificate) that justifies the current round.
805    ///
806    /// This is a best-effort operation - failures are logged but don't fail the entire sync.
807    async fn sync_consensus_round(
808        &mut self,
809        remote_round: Round,
810        manager: &linera_chain::manager::ChainManagerInfo,
811    ) -> Result<(), chain_client::Error> {
812        // Try to send a proposal for the current round
813        for proposal in manager
814            .requested_proposed
815            .iter()
816            .chain(manager.requested_signed_proposal.iter())
817        {
818            if proposal.content.round == manager.current_round {
819                match self
820                    .remote_node
821                    .handle_block_proposal(proposal.clone())
822                    .await
823                {
824                    Ok(_) => {
825                        tracing::debug!("successfully sent block proposal for round sync");
826                        return Ok(());
827                    }
828                    Err(error) => {
829                        tracing::debug!(%error, "failed to send block proposal");
830                    }
831                }
832            }
833        }
834
835        // Try to send a validated block for the current round
836        if let Some(LockingBlock::Regular(validated)) = manager.requested_locking.as_deref() {
837            if validated.round == manager.current_round {
838                match self
839                    .remote_node
840                    .handle_optimized_validated_certificate(
841                        validated,
842                        CrossChainMessageDelivery::NonBlocking,
843                    )
844                    .await
845                {
846                    Ok(_) => {
847                        tracing::debug!("successfully sent validated block for round sync");
848                        return Ok(());
849                    }
850                    Err(error) => {
851                        tracing::debug!(%error, "failed to send validated block");
852                    }
853                }
854            }
855        }
856
857        // Try to send a timeout certificate
858        if let Some(cert) = &manager.timeout {
859            if cert.round >= remote_round {
860                match self
861                    .remote_node
862                    .handle_timeout_certificate(cert.as_ref().clone())
863                    .await
864                {
865                    Ok(_) => {
866                        tracing::debug!(round = %cert.round, "successfully sent timeout certificate");
867                        return Ok(());
868                    }
869                    Err(error) => {
870                        tracing::debug!(%error, round = %cert.round, "failed to send timeout certificate");
871                    }
872                }
873            }
874        }
875
876        // If we reach here, either we had no round sync data to send, or all attempts failed.
877        // This is not a fatal error - height sync succeeded which is the primary goal.
878        tracing::debug!("round sync not performed: no applicable data or all attempts failed");
879        Ok(())
880    }
881
882    /// Sends chain information for all chains referenced by the given blobs.
883    ///
884    /// Reads blob states from storage, determines the specific chain heights needed,
885    /// and sends chain information for those heights. With sparse chains, this only
886    /// sends the specific blocks containing the blobs, not all blocks up to those heights.
887    async fn send_chain_info_for_blobs(
888        &mut self,
889        blob_ids: &[BlobId],
890        delivery: CrossChainMessageDelivery,
891    ) -> Result<(), chain_client::Error> {
892        let blob_states = self
893            .client
894            .local_node
895            .read_blob_states_from_storage(blob_ids)
896            .await?;
897
898        let mut chain_heights: BTreeMap<ChainId, BTreeSet<BlockHeight>> = BTreeMap::new();
899        for blob_state in blob_states {
900            let block_chain_id = blob_state.chain_id;
901            let block_height = blob_state.block_height;
902            chain_heights
903                .entry(block_chain_id)
904                .or_default()
905                .insert(block_height);
906        }
907
908        self.send_chain_info_at_heights(chain_heights, delivery)
909            .await
910    }
911
912    /// Sends chain information for specific heights on multiple chains.
913    ///
914    /// Unlike `send_chain_info_up_to_heights`, this method only sends the blocks at the
915    /// specified heights, not all blocks up to those heights. This is more efficient for
916    /// sparse chains where only specific blocks are needed.
917    async fn send_chain_info_at_heights(
918        &mut self,
919        chain_heights: impl IntoIterator<Item = (ChainId, BTreeSet<BlockHeight>)>,
920        delivery: CrossChainMessageDelivery,
921    ) -> Result<(), chain_client::Error> {
922        FuturesUnordered::from_iter(chain_heights.into_iter().map(|(chain_id, heights)| {
923            let mut updater = self.clone();
924            async move {
925                // Get all block hashes for this chain at the specified heights in one call
926                let heights_vec: Vec<_> = heights.into_iter().collect();
927                let certificates = updater
928                    .client
929                    .local_node
930                    .storage_client()
931                    .read_certificates_by_heights(chain_id, &heights_vec)
932                    .await?
933                    .into_iter()
934                    .flatten()
935                    .collect::<Vec<_>>();
936
937                // Send each certificate
938                for certificate in certificates {
939                    updater
940                        .send_confirmed_certificate(certificate, delivery)
941                        .await?;
942                }
943
944                Ok::<_, chain_client::Error>(())
945            }
946        }))
947        .try_collect::<Vec<_>>()
948        .await?;
949        Ok(())
950    }
951
952    async fn send_chain_info_up_to_heights(
953        &mut self,
954        chain_heights: impl IntoIterator<Item = (ChainId, BlockHeight)>,
955        delivery: CrossChainMessageDelivery,
956    ) -> Result<(), chain_client::Error> {
957        FuturesUnordered::from_iter(chain_heights.into_iter().map(|(chain_id, height)| {
958            let mut updater = self.clone();
959            async move {
960                updater
961                    .send_chain_information(chain_id, height, delivery, None)
962                    .await
963            }
964        }))
965        .try_collect::<Vec<_>>()
966        .await?;
967        Ok(())
968    }
969
970    pub async fn send_chain_update(
971        &mut self,
972        action: CommunicateAction,
973    ) -> Result<LiteVote, chain_client::Error> {
974        let chain_id = match &action {
975            CommunicateAction::SubmitBlock { proposal, .. } => proposal.content.block.chain_id,
976            CommunicateAction::FinalizeBlock { certificate, .. } => {
977                certificate.inner().block().header.chain_id
978            }
979            CommunicateAction::RequestTimeout { chain_id, .. } => *chain_id,
980        };
981        // Send the block proposal, certificate or timeout request and return a vote.
982        let vote = match action {
983            CommunicateAction::SubmitBlock {
984                proposal,
985                blob_ids,
986                clock_skew_sender,
987            } => {
988                let info = self
989                    .send_block_proposal(proposal, blob_ids, clock_skew_sender)
990                    .await?;
991                info.manager.pending.ok_or_else(|| {
992                    NodeError::MissingVoteInValidatorResponse("submit a block proposal".into())
993                })?
994            }
995            CommunicateAction::FinalizeBlock {
996                certificate,
997                delivery,
998            } => {
999                let info = self
1000                    .send_validated_certificate(*certificate, delivery)
1001                    .await?;
1002                info.manager.pending.ok_or_else(|| {
1003                    NodeError::MissingVoteInValidatorResponse("finalize a block".into())
1004                })?
1005            }
1006            CommunicateAction::RequestTimeout { round, height, .. } => {
1007                let info = self.request_timeout(chain_id, round, height).await?;
1008                info.manager.timeout_vote.ok_or_else(|| {
1009                    NodeError::MissingVoteInValidatorResponse("request a timeout".into())
1010                })?
1011            }
1012        };
1013        vote.check(self.remote_node.public_key)?;
1014        Ok(vote)
1015    }
1016}