Skip to content

Commit e95a823

Browse files
authored
added migrations for key fix (#2226)
# Goal The goal of this PR is <!-- insert goal here --> Closes #2227 # Details - The mainnet will be migrated as all other migrations since it is small enough that would easily fit into a block via ```rust pallet_stateful_storage::migration::v1::MigrateToV1<Runtime> ``` - For testnet we will use the same path as block just to mark the ```rust Pallet::<T>::on_chain_storage_version(); ``` but the actual multi-block migration is using `on_initialize` hook and a new storage item called `MigrationPageIndex` that would store the progress. # Verification - There are issues with try-runtime that doesn't allow me to run it against mainnet or testnet. Added some tests to verify the changes. # Checklist - [x] Unit Tests added? - [x] Spec version incremented?
1 parent f70489c commit e95a823

File tree

12 files changed

+430
-10
lines changed

12 files changed

+430
-10
lines changed

.github/workflows/verify-pr-commit.yml

+2-1
Original file line numberDiff line numberDiff line change
@@ -181,9 +181,10 @@ jobs:
181181
steps:
182182
- name: Check Out Repo
183183
uses: actions/checkout@v4
184+
# using older version of cargo deny since the new one requires rustc version >= 1.81
184185
- name: Set Up Cargo Deny
185186
run: |
186-
cargo install --force --locked cargo-deny
187+
cargo install --force --locked cargo-deny@0.16.1
187188
cargo generate-lockfile
188189
- name: Run Cargo Deny
189190
run: cargo deny check --hide-inclusion-graph -c deny.toml

Cargo.lock

+1
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pallets/capacity/src/migration/provider_boost_init.rs

+2
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@ use frame_support::{
33
pallet_prelude::Weight,
44
traits::{Get, OnRuntimeUpgrade},
55
};
6+
#[cfg(feature = "try-runtime")]
7+
use sp_runtime::TryRuntimeError;
68

79
#[cfg(feature = "try-runtime")]
810
use sp_std::vec::Vec;

pallets/stateful-storage/Cargo.toml

+2-1
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ common-runtime = { path = "../../runtime/common", default-features = false }
3333
env_logger = { workspace = true }
3434
pretty_assertions = { workspace = true }
3535
sp-keystore = { workspace = true }
36+
hex = { workspace = true, default-features = false, features = ["alloc"] }
3637

3738
[features]
3839
default = ['std']
@@ -57,4 +58,4 @@ std = [
5758
"common-runtime/std",
5859
]
5960
try-runtime = ['frame-support/try-runtime']
60-
test = []
61+
test = []

pallets/stateful-storage/src/lib.rs

+30-1
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,8 @@ mod tests;
3636
#[cfg(feature = "runtime-benchmarks")]
3737
use common_primitives::benchmarks::{MsaBenchmarkHelper, SchemaBenchmarkHelper};
3838
use sp_std::prelude::*;
39-
39+
/// storage migrations
40+
pub mod migration;
4041
mod stateful_child_tree;
4142
pub mod types;
4243
pub mod weights;
@@ -60,6 +61,8 @@ use sp_core::{bounded::BoundedVec, crypto::AccountId32};
6061
use sp_runtime::{traits::Convert, DispatchError, MultiSignature};
6162
pub use weights::*;
6263

64+
const LOG_TARGET: &str = "runtime::stateful-storage";
65+
6366
#[frame_support::pallet]
6467
pub mod pallet {
6568
use super::*;
@@ -124,8 +127,13 @@ pub mod pallet {
124127
// Simple declaration of the `Pallet` type. It is placeholder we use to implement traits and
125128
// method.
126129
#[pallet::pallet]
130+
#[pallet::storage_version(STATEFUL_STORAGE_VERSION)]
127131
pub struct Pallet<T>(_);
128132

133+
/// A temporary storage for migration
134+
#[pallet::storage]
135+
pub(super) type MigrationPageIndex<T: Config> = StorageValue<_, u32, ValueQuery>;
136+
129137
#[pallet::error]
130138
pub enum Error<T> {
131139
/// Page would exceed the highest allowable PageId
@@ -220,6 +228,27 @@ pub mod pallet {
220228
},
221229
}
222230

231+
#[pallet::hooks]
232+
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
233+
fn on_initialize(_current: BlockNumberFor<T>) -> Weight {
234+
// this should get removed after rolling out to testnet
235+
#[cfg(any(feature = "frequency-testnet", test))]
236+
{
237+
let page_index = <MigrationPageIndex<T>>::get();
238+
let (weight, continue_migration) = migration::v1::paginated_migration_testnet::<T>(
239+
MIGRATION_PAGE_SIZE,
240+
page_index,
241+
);
242+
if continue_migration {
243+
<MigrationPageIndex<T>>::set(page_index.saturating_add(1));
244+
}
245+
T::DbWeight::get().reads_writes(1, 1).saturating_add(weight)
246+
}
247+
#[cfg(not(any(feature = "frequency-testnet", test)))]
248+
Weight::zero()
249+
}
250+
}
251+
223252
#[pallet::call]
224253
impl<T: Config> Pallet<T> {
225254
/// Applies the Add or Delete Actions on the requested Itemized page.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
/// migrations to v1
2+
pub mod v1;
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,258 @@
1+
#[cfg(feature = "try-runtime")]
2+
use crate::types::STATEFUL_STORAGE_VERSION;
3+
use crate::{
4+
stateful_child_tree::StatefulChildTree,
5+
types::{
6+
ItemAction, ItemizedKey, ItemizedOperations, ItemizedPage, Page, ITEMIZED_STORAGE_PREFIX,
7+
PALLET_STORAGE_PREFIX,
8+
},
9+
Config, Pallet, LOG_TARGET,
10+
};
11+
use common_primitives::{
12+
msa::MessageSourceId,
13+
utils::{get_chain_type_by_genesis_hash, DetectedChainType},
14+
};
15+
use frame_support::{pallet_prelude::*, traits::OnRuntimeUpgrade, weights::Weight};
16+
use frame_system::pallet_prelude::BlockNumberFor;
17+
use log;
18+
#[cfg(feature = "try-runtime")]
19+
use sp_core::hexdisplay::HexDisplay;
20+
use sp_runtime::Saturating;
21+
#[cfg(feature = "try-runtime")]
22+
use sp_runtime::TryRuntimeError;
23+
use sp_std::{vec, vec::Vec};
24+
25+
/// testnet specific msa ids for migration
26+
#[cfg(any(feature = "frequency-testnet", test))]
27+
pub fn get_testnet_msa_ids() -> Vec<MessageSourceId> {
28+
vec![
29+
8004, 8009, 8816, 8817, 8818, 8819, 8820, 8822, 8823, 8824, 8825, 8826, 9384, 9753, 9919,
30+
9992, 9994, 9996, 9997, 10009, 10010, 10012, 10013, 10014, 10015, 10019, 10020, 10021,
31+
10022, 10023, 10024, 10025, 10026, 10027, 10028, 10029, 10030, 10031, 10032, 10033, 10034,
32+
10035, 10036, 10037, 10038, 10039, 10040, 10041, 10042, 10043, 10044, 10045, 10046, 10047,
33+
10048, 10049, 10050, 10051, 10052, 10053, 10054, 10055, 10056, 10057, 10058, 10059, 10061,
34+
10062, 10064, 10067, 10068, 10069, 10070, 10071, 10072, 10075, 10076, 10077, 10078, 10079,
35+
10138, 10139, 10140, 10206, 10207, 10209, 10212, 10218, 10219, 10220, 10221, 10222, 10223,
36+
10224, 10231, 10232, 10233, 10234, 10235, 10236, 10237, 10238, 10239, 10240, 10241, 10242,
37+
10243, 10247, 10248, 10251, 10253, 10254, 10255, 10256, 10257, 10258, 10259, 10260, 10261,
38+
10262, 10263, 10264, 10265, 10266, 10267, 10268, 10269, 10270, 10271, 10272, 10273, 10274,
39+
10275, 10287, 10288, 10289, 10290, 10291, 10292, 10293, 10294, 10295, 10296, 10297, 10298,
40+
10299, 10300, 10301, 10302, 10303, 10304, 10305, 10306, 10307, 10308, 10309, 10311, 10312,
41+
10313, 10314, 10315, 10316, 10317, 10318, 10319, 10320, 10321, 10322, 10323, 10324, 10325,
42+
10326, 10327, 10328, 10329,
43+
]
44+
}
45+
46+
/// returns the chain type from genesis hash
47+
pub fn get_chain_type<T: Config>() -> DetectedChainType {
48+
let genesis_block: BlockNumberFor<T> = 0u32.into();
49+
let genesis = <frame_system::Pallet<T>>::block_hash(genesis_block);
50+
get_chain_type_by_genesis_hash(&genesis.encode()[..])
51+
}
52+
53+
/// get the msa ids with key migrations
54+
pub fn get_msa_ids<T: Config>() -> Vec<MessageSourceId> {
55+
let chain_type = get_chain_type::<T>();
56+
if let DetectedChainType::FrequencyMainNet = chain_type {
57+
vec![
58+
227, 542, 1249820, 1287729, 1288925, 1309067, 1309241, 1309258, 1309367, 1309397,
59+
1329112, 1329535, 1330067,
60+
]
61+
} else {
62+
if cfg!(test) {
63+
// this allows to test the mainnet path
64+
vec![1]
65+
} else {
66+
// we are going to use hooks for this multi-block migration so this is empty to only flag that
67+
// it as done for consistency
68+
vec![]
69+
}
70+
}
71+
}
72+
73+
/// migration to v1 implementation
74+
pub struct MigrateToV1<T>(PhantomData<T>);
75+
76+
impl<T: Config> OnRuntimeUpgrade for MigrateToV1<T> {
77+
fn on_runtime_upgrade() -> Weight {
78+
migrate_to_v1::<T>()
79+
}
80+
81+
#[cfg(feature = "try-runtime")]
82+
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
83+
log::info!(target: LOG_TARGET, "Running pre_upgrade...");
84+
let on_chain_version = Pallet::<T>::on_chain_storage_version();
85+
let genesis_block: BlockNumberFor<T> = 0u32.into();
86+
let genesis = <frame_system::Pallet<T>>::block_hash(genesis_block);
87+
if on_chain_version >= 1 {
88+
return Ok(Vec::new())
89+
}
90+
log::info!(target: LOG_TARGET, "Found genesis... {:?}", genesis);
91+
let detected_chain = get_chain_type_by_genesis_hash(&genesis.encode()[..]);
92+
log::info!(target: LOG_TARGET,"Detected Chain is {:?}", detected_chain);
93+
Ok(Vec::new())
94+
}
95+
96+
#[cfg(feature = "try-runtime")]
97+
fn post_upgrade(_: Vec<u8>) -> Result<(), TryRuntimeError> {
98+
log::info!(target: LOG_TARGET, "Running post_upgrade...");
99+
let on_chain_version = Pallet::<T>::on_chain_storage_version();
100+
if on_chain_version > 1 {
101+
return Ok(())
102+
}
103+
let onchain_version = Pallet::<T>::on_chain_storage_version();
104+
assert_eq!(onchain_version, STATEFUL_STORAGE_VERSION);
105+
// check to ensure updates took place
106+
let schema_id = get_schema_id();
107+
let msa_ids = get_msa_ids::<T>();
108+
for msa_id in msa_ids.into_iter() {
109+
let itemized: ItemizedPage<T> = Pallet::<T>::get_itemized_page_for(msa_id, schema_id)
110+
.map_err(|_| TryRuntimeError::Other("can not get storage"))?
111+
.ok_or(TryRuntimeError::Other("no storage"))?;
112+
let (_, val) = <Page<T::MaxItemizedPageSizeBytes> as ItemizedOperations<T>>::try_parse(
113+
&itemized, false,
114+
)
115+
.map_err(|_| TryRuntimeError::Other("can not parse storage"))?
116+
.items
117+
.clone()
118+
.into_iter()
119+
.next()
120+
.ok_or(TryRuntimeError::Other("no item"))?;
121+
122+
assert_eq!(val.len(), 33);
123+
log::info!(target: LOG_TARGET, "{:?}", HexDisplay::from(&val));
124+
}
125+
log::info!(target: LOG_TARGET, "Finished post_upgrade");
126+
Ok(())
127+
}
128+
}
129+
130+
/// migrating to v1
131+
pub fn migrate_to_v1<T: Config>() -> Weight {
132+
log::info!(target: LOG_TARGET, "Running storage migration...");
133+
let onchain_version = Pallet::<T>::on_chain_storage_version();
134+
let current_version = Pallet::<T>::in_code_storage_version();
135+
log::info!(target: LOG_TARGET, "onchain_version= {:?}, current_version={:?}", onchain_version, current_version);
136+
if onchain_version < 1 {
137+
let msa_ids = get_msa_ids::<T>();
138+
let weights = migrate_msa_ids::<T>(&msa_ids[..]);
139+
// Set storage version to `1`.
140+
StorageVersion::new(1).put::<Pallet<T>>();
141+
let total_weight = T::DbWeight::get().writes(1).saturating_add(weights);
142+
log::info!(target: LOG_TARGET, "Migration Calculated weights={:?}",total_weight);
143+
total_weight
144+
} else {
145+
log::info!(
146+
target: LOG_TARGET,
147+
"Migration did not execute. This probably should be removed onchain:{:?}, current:{:?}",
148+
onchain_version,
149+
current_version
150+
);
151+
T::DbWeight::get().reads(1)
152+
}
153+
}
154+
155+
/// migrating all msa_ids
156+
pub fn migrate_msa_ids<T: Config>(msa_ids: &[MessageSourceId]) -> Weight {
157+
let schema_id = get_schema_id();
158+
let key: ItemizedKey = (schema_id,);
159+
let each_layer_access: u64 = 33 * 16;
160+
let mut reads = 1u64;
161+
let mut writes = 0u64;
162+
let mut bytes = 0u64;
163+
164+
for msa_id in msa_ids.iter() {
165+
reads.saturating_inc();
166+
// get the itemized storages
167+
let itemized_result: Result<Option<ItemizedPage<T>>, _> =
168+
Pallet::<T>::get_itemized_page_for(*msa_id, schema_id);
169+
match itemized_result {
170+
Ok(Some(existing_page)) => {
171+
bytes = bytes.saturating_add(existing_page.encode().len() as u64);
172+
bytes = bytes.saturating_add(each_layer_access * 3); // three layers in merkle tree
173+
174+
match <Page<T::MaxItemizedPageSizeBytes> as ItemizedOperations<T>>::try_parse(
175+
&existing_page,
176+
false,
177+
) {
178+
Ok(parsed_page) => match parsed_page.items.clone().into_iter().next() {
179+
Some((_, existing_value)) => match existing_value.len() {
180+
32usize => {
181+
// 64 is decimal value for 0x40
182+
let mut prefixed = vec![64u8];
183+
prefixed.extend_from_slice(existing_value);
184+
let bounded: BoundedVec<u8, T::MaxItemizedBlobSizeBytes> =
185+
prefixed.try_into().unwrap_or_default();
186+
187+
let empty_page = ItemizedPage::<T>::default();
188+
match <Page<<T as Config>::MaxItemizedPageSizeBytes> as ItemizedOperations<T>>::apply_item_actions(&empty_page,&vec![ItemAction::Add {
189+
data: bounded,
190+
}]) {
191+
Ok(mut updated_page) => {
192+
updated_page.nonce = existing_page.nonce;
193+
StatefulChildTree::<T::KeyHasher>::write(
194+
msa_id,
195+
PALLET_STORAGE_PREFIX,
196+
ITEMIZED_STORAGE_PREFIX,
197+
&key,
198+
&updated_page,
199+
);
200+
bytes = bytes.saturating_add(updated_page.encode().len() as u64);
201+
writes.saturating_inc();
202+
},
203+
Err(e) =>
204+
log::error!(target: LOG_TARGET, "Error appending prefixed value {:?} and schema_id {:?} with {:?}", msa_id, schema_id, e),
205+
}
206+
},
207+
33usize =>
208+
log::warn!(target: LOG_TARGET, "Itemized page item for msa_id {:?} and schema_id {:?} has correct size", msa_id, schema_id),
209+
_ =>
210+
log::warn!(target: LOG_TARGET, "Itemized page item for msa_id {:?} and schema_id {:?} has invalid size {:?}", msa_id, schema_id, existing_value.len()),
211+
},
212+
None =>
213+
log::warn!(target: LOG_TARGET, "Itemized page was empty for msa_id {:?} and schema_id {:?}", msa_id, schema_id),
214+
},
215+
Err(e) =>
216+
log::error!(target: LOG_TARGET, "Error parsing page for msa_id {:?} and schema_id {:?} with {:?}", msa_id, schema_id, e),
217+
}
218+
},
219+
Ok(None) =>
220+
log::warn!(target: LOG_TARGET, "No page found for msa_id {:?} and schema_id {:?}", msa_id, schema_id),
221+
Err(e) =>
222+
log::error!(target: LOG_TARGET, "Error getting the page for msa_id {:?} and schema_id {:?} with {:?}", msa_id, schema_id, e),
223+
}
224+
}
225+
log::info!(target: LOG_TARGET, "Storage migrated to version 1 read={:?}, write={:?}, bytes={:?}", reads, writes, bytes);
226+
let weights = T::DbWeight::get().reads_writes(reads, writes).add_proof_size(bytes);
227+
log::info!(target: LOG_TARGET, "migrate_msa_ids weights={:?}",weights);
228+
weights
229+
}
230+
231+
/// paginated migration for testnet
232+
#[cfg(any(feature = "frequency-testnet", test))]
233+
pub fn paginated_migration_testnet<T: Config>(page_size: u32, page_index: u32) -> (Weight, bool) {
234+
let msa_ids: Vec<MessageSourceId> = get_testnet_msa_ids();
235+
let mut chunks = msa_ids.chunks(page_size as usize);
236+
let chunk_len = chunks.len() as u32;
237+
let mut current = 0u32;
238+
while current < page_index && current < chunk_len {
239+
let _ = chunks.next();
240+
current += 1;
241+
}
242+
match chunks.next() {
243+
Some(page) => {
244+
let weight = migrate_msa_ids::<T>(page);
245+
(weight, true)
246+
},
247+
None => (Weight::zero(), false),
248+
}
249+
}
250+
251+
fn get_schema_id() -> u16 {
252+
if cfg!(test) {
253+
// Supported ITEMIZED_APPEND_ONLY_SCHEMA for tests
254+
103
255+
} else {
256+
7
257+
}
258+
}

0 commit comments

Comments
 (0)