mirror of
				https://github.com/0glabs/0g-storage-node.git
				synced 2025-11-04 00:27:39 +00:00 
			
		
		
		
	fix: admin_getFileLocation; test: sync test (#141)
* feat: add all_shards in admin_getFileLocation * fix: admin_getFileLocation * test: improve sync test * fix: lint
This commit is contained in:
		
							parent
							
								
									f0c3f2cfd0
								
							
						
					
					
						commit
						533bacb234
					
				@ -190,6 +190,8 @@ impl RpcServer for RpcServerImpl {
 | 
			
		||||
        tx_seq: u64,
 | 
			
		||||
        all_shards: bool,
 | 
			
		||||
    ) -> RpcResult<Option<Vec<LocationInfo>>> {
 | 
			
		||||
        info!("admin_getFileLocation()");
 | 
			
		||||
 | 
			
		||||
        let tx = match self.ctx.log_store.get_tx_by_seq_number(tx_seq).await? {
 | 
			
		||||
            Some(tx) => tx,
 | 
			
		||||
            None => {
 | 
			
		||||
@ -225,7 +227,9 @@ impl RpcServer for RpcServerImpl {
 | 
			
		||||
                shard_config: shard_config.unwrap(),
 | 
			
		||||
            })
 | 
			
		||||
            .collect();
 | 
			
		||||
        if all_shards && all_shards_available(info.iter().map(|info| info.shard_config).collect()) {
 | 
			
		||||
 | 
			
		||||
        if !all_shards || all_shards_available(info.iter().map(|info| info.shard_config).collect())
 | 
			
		||||
        {
 | 
			
		||||
            Ok(Some(info))
 | 
			
		||||
        } else {
 | 
			
		||||
            Ok(None)
 | 
			
		||||
 | 
			
		||||
@ -29,6 +29,9 @@ class SyncTest(TestFramework):
 | 
			
		||||
        client1 = self.nodes[0]
 | 
			
		||||
        client2 = self.nodes[1]
 | 
			
		||||
 | 
			
		||||
        # stop client2, preventing it from receiving AnnounceFile
 | 
			
		||||
        client2.shutdown()
 | 
			
		||||
 | 
			
		||||
        # Create submission
 | 
			
		||||
        chunk_data = random.randbytes(256 * 1024)
 | 
			
		||||
        data_root = self.__create_submission(chunk_data)
 | 
			
		||||
@ -41,16 +44,22 @@ class SyncTest(TestFramework):
 | 
			
		||||
        segments = submit_data(client1, chunk_data)
 | 
			
		||||
        self.log.info("segments: %s", [(s["root"], s["index"], s["proof"]) for s in segments])
 | 
			
		||||
        wait_until(lambda: client1.zgs_get_file_info(data_root)["finalized"])
 | 
			
		||||
 | 
			
		||||
        # File should not be auto sync on node 2
 | 
			
		||||
        
 | 
			
		||||
        # restart client2
 | 
			
		||||
        client2.start()
 | 
			
		||||
        client2.wait_for_rpc_connection()
 | 
			
		||||
        
 | 
			
		||||
        # File should not be auto sync on node 2 and there is no cached file locations
 | 
			
		||||
        wait_until(lambda: client2.zgs_get_file_info(data_root) is not None)
 | 
			
		||||
        time.sleep(3)
 | 
			
		||||
        assert_equal(client2.zgs_get_file_info(data_root)["finalized"], False)
 | 
			
		||||
        assert(client2.admin_get_file_location(0) is None)
 | 
			
		||||
 | 
			
		||||
        # Trigger file sync by rpc
 | 
			
		||||
        assert(client2.admin_start_sync_file(0) is None)
 | 
			
		||||
        wait_until(lambda: client2.sync_status_is_completed_or_unknown(0))
 | 
			
		||||
        wait_until(lambda: client2.zgs_get_file_info(data_root)["finalized"])
 | 
			
		||||
        assert(client2.admin_get_file_location(0) is not None)
 | 
			
		||||
 | 
			
		||||
        # Validate data
 | 
			
		||||
        assert_equal(
 | 
			
		||||
 | 
			
		||||
@ -113,6 +113,9 @@ class ZgsNode(TestNode):
 | 
			
		||||
    def sync_status_is_completed_or_unknown(self, tx_seq):
 | 
			
		||||
        status = self.rpc.admin_getSyncStatus([tx_seq])
 | 
			
		||||
        return status == "Completed" or status == "unknown"
 | 
			
		||||
    
 | 
			
		||||
    def admin_get_file_location(self, tx_seq, all_shards = True):
 | 
			
		||||
        return self.rpc.admin_getFileLocation([tx_seq, all_shards])
 | 
			
		||||
 | 
			
		||||
    def clean_data(self):
 | 
			
		||||
        shutil.rmtree(os.path.join(self.data_dir, "db"))
 | 
			
		||||
 | 
			
		||||
		Loading…
	
		Reference in New Issue
	
	Block a user