-
Notifications
You must be signed in to change notification settings - Fork 23
Paginate by creation time instead of key order #96
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -53,6 +53,8 @@ macro_rules! define_kv_store_tests { | |
| create_test!(list_should_honour_page_size_and_key_prefix_if_provided); | ||
| create_test!(list_should_return_zero_global_version_when_global_versioning_not_enabled); | ||
| create_test!(list_should_limit_max_page_size); | ||
| create_test!(list_should_return_results_ordered_by_creation_time); | ||
| create_test!(list_should_paginate_by_creation_time_with_prefix); | ||
| }; | ||
| } | ||
|
|
||
|
|
@@ -393,12 +395,11 @@ pub trait KvStoreTestSuite { | |
| }, | ||
| }; | ||
|
|
||
| if current_page.key_versions.is_empty() { | ||
| break; | ||
| } | ||
|
|
||
| all_key_versions.extend(current_page.key_versions); | ||
| next_page_token = current_page.next_page_token; | ||
| match current_page.next_page_token { | ||
| Some(token) if !token.is_empty() => next_page_token = Some(token), | ||
| _ => break, | ||
| } | ||
| } | ||
|
|
||
| if let Some(k1_response) = all_key_versions.iter().find(|kv| kv.key == "k1") { | ||
|
|
@@ -444,13 +445,12 @@ pub trait KvStoreTestSuite { | |
| }, | ||
| }; | ||
|
|
||
| if current_page.key_versions.is_empty() { | ||
| break; | ||
| } | ||
|
|
||
| assert!(current_page.key_versions.len() <= page_size as usize); | ||
| all_key_versions.extend(current_page.key_versions); | ||
| next_page_token = current_page.next_page_token; | ||
| match current_page.next_page_token { | ||
| Some(token) if !token.is_empty() => next_page_token = Some(token), | ||
| _ => break, | ||
| } | ||
| } | ||
|
|
||
| let unique_keys: std::collections::HashSet<String> = | ||
|
|
@@ -490,12 +490,11 @@ pub trait KvStoreTestSuite { | |
| Some(next_page_token) => ctx.list(Some(next_page_token), None, None).await?, | ||
| }; | ||
|
|
||
| if current_page.key_versions.is_empty() { | ||
| break; | ||
| } | ||
|
|
||
| all_key_versions.extend(current_page.key_versions); | ||
| next_page_token = current_page.next_page_token; | ||
| match current_page.next_page_token { | ||
| Some(token) if !token.is_empty() => next_page_token = Some(token), | ||
| _ => break, | ||
| } | ||
| } | ||
|
|
||
| let unique_keys: std::collections::HashSet<String> = | ||
|
|
@@ -506,6 +505,66 @@ pub trait KvStoreTestSuite { | |
| Ok(()) | ||
| } | ||
|
|
||
| async fn list_should_return_results_ordered_by_creation_time() -> Result<(), VssError> { | ||
| let kv_store = Self::create_store().await; | ||
| let ctx = TestContext::new(&kv_store); | ||
|
|
||
| ctx.put_objects(Some(0), vec![kv("z_first", "v1", 0)]).await?; | ||
| ctx.put_objects(Some(1), vec![kv("a_third", "v1", 0)]).await?; | ||
| ctx.put_objects(Some(2), vec![kv("m_second", "v1", 0)]).await?; | ||
|
|
||
| let page = ctx.list(None, None, None).await?; | ||
| assert_eq!(page.global_version, Some(3)); | ||
| let keys: Vec<&str> = page.key_versions.iter().map(|kv| kv.key.as_str()).collect(); | ||
|
|
||
| // Results should be in reverse creation order (newest first), not alphabetical. | ||
| assert_eq!(keys, vec!["m_second", "a_third", "z_first"]); | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Since this is for VSS API tests, you can't assume you'll get all the keys back in a single go. It's the server's decision how many keys you get per page.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. i mean, can we at least assume the page size is >=3 for this test lol?
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. sounds good haha it's just a test |
||
|
|
||
| Ok(()) | ||
| } | ||
|
|
||
| async fn list_should_paginate_by_creation_time_with_prefix() -> Result<(), VssError> { | ||
| let kv_store = Self::create_store().await; | ||
| let ctx = TestContext::new(&kv_store); | ||
|
|
||
| // Insert prefixed keys in reverse-alphabetical order with a page_size of 1 | ||
| // to force multiple pages and verify cross-page ordering. | ||
| ctx.put_objects(Some(0), vec![kv("pfx_z", "v1", 0)]).await?; | ||
| ctx.put_objects(Some(1), vec![kv("pfx_a", "v1", 0)]).await?; | ||
| ctx.put_objects(Some(2), vec![kv("other", "v1", 0)]).await?; | ||
| ctx.put_objects(Some(3), vec![kv("pfx_m", "v1", 0)]).await?; | ||
|
|
||
| let mut next_page_token: Option<String> = None; | ||
| let mut all_keys: Vec<String> = Vec::new(); | ||
|
|
||
| loop { | ||
| let current_page = match next_page_token.take() { | ||
| None => { | ||
| let page = ctx.list(None, Some(1), Some("pfx_".to_string())).await?; | ||
| assert_eq!(page.global_version, Some(4)); | ||
| page | ||
| }, | ||
| Some(token) => { | ||
| let page = ctx.list(Some(token), Some(1), Some("pfx_".to_string())).await?; | ||
| assert!(page.global_version.is_none()); | ||
| page | ||
| }, | ||
| }; | ||
|
|
||
| assert!(current_page.key_versions.len() <= 1); | ||
| all_keys.extend(current_page.key_versions.into_iter().map(|kv| kv.key)); | ||
| match current_page.next_page_token { | ||
| Some(token) if !token.is_empty() => next_page_token = Some(token), | ||
benthecarman marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| _ => break, | ||
| } | ||
| } | ||
|
|
||
| // Should get prefixed keys in reverse creation order (newest first), excluding "other". | ||
| assert_eq!(all_keys, vec!["pfx_m", "pfx_a", "pfx_z"]); | ||
|
|
||
| Ok(()) | ||
| } | ||
|
|
||
| async fn list_should_limit_max_page_size() -> Result<(), VssError> { | ||
| let kv_store = Self::create_store().await; | ||
| let ctx = TestContext::new(&kv_store); | ||
|
|
@@ -524,16 +583,15 @@ pub trait KvStoreTestSuite { | |
| None => ctx.list(None, None, None).await?, | ||
| Some(next_page_token) => ctx.list(Some(next_page_token), None, None).await?, | ||
| }; | ||
| if current_page.key_versions.is_empty() { | ||
| break; | ||
| } | ||
|
|
||
| assert!( | ||
| current_page.key_versions.len() < vss_arbitrary_page_size_max as usize, | ||
| "Page size exceeds the maximum allowed size" | ||
| ); | ||
| all_key_versions.extend(current_page.key_versions); | ||
| next_page_token = current_page.next_page_token; | ||
| match current_page.next_page_token { | ||
| Some(token) if !token.is_empty() => next_page_token = Some(token), | ||
| _ => break, | ||
| } | ||
| } | ||
|
|
||
| assert_eq!(all_key_versions.len(), total_kv_objects as usize); | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -212,14 +212,16 @@ pub struct ListKeyVersionsRequest { | |
| #[allow(clippy::derive_partial_eq_without_eq)] | ||
| #[derive(Clone, PartialEq, ::prost::Message)] | ||
| pub struct ListKeyVersionsResponse { | ||
| /// Fetched keys and versions. | ||
| /// Fetched keys and versions, ordered by creation time (newest first) ties broken by key (alphabetically). | ||
benthecarman marked this conversation as resolved.
Show resolved
Hide resolved
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Let's remove this "ties broken by key (alphabetically)" too |
||
| /// Even though this API reuses the `KeyValue` struct, the `value` sub-field will not be set by the server. | ||
| #[prost(message, repeated, tag = "1")] | ||
| pub key_versions: ::prost::alloc::vec::Vec<KeyValue>, | ||
| /// `next_page_token` is a pagination token, used to retrieve the next page of results. | ||
| /// Use this value to query for next-page of paginated `ListKeyVersions` operation, by specifying | ||
| /// this value as the `page_token` in the next request. | ||
| /// | ||
| /// Following AIP-158 (<https://google.aip.dev/158>): | ||
| /// | ||
| /// If `next_page_token` is empty (""), then the "last page" of results has been processed and | ||
| /// there is no more data to be retrieved. | ||
| /// | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -35,6 +35,9 @@ pub(crate) const MIGRATIONS: &[&str] = &[ | |
| PRIMARY KEY (user_token, store_id, key) | ||
| );", | ||
| "ALTER TABLE vss_db DROP CONSTRAINT IF EXISTS vss_db_store_id_check;", | ||
| "UPDATE vss_db SET created_at = COALESCE(last_updated_at, NOW()) WHERE created_at IS NULL;", | ||
| "ALTER TABLE vss_db ALTER COLUMN created_at SET NOT NULL;", | ||
tankyleo marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| "CREATE INDEX idx_vss_db_created_at ON vss_db (user_token, store_id, created_at, key);", | ||
benthecarman marked this conversation as resolved.
Show resolved
Hide resolved
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Looks like we could append an |
||
| ]; | ||
| #[cfg(test)] | ||
| pub(crate) const DUMMY_MIGRATION: &str = "SELECT 1 WHERE FALSE;"; | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
No, I think we were intentionally following protobuf's/Google's best practices here. https://google.aip.dev/158 states:
IMO would be good to revert and include this context in the docs.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Okay updated