Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Nhse d32 nhskv.i17 #19

Merged
merged 19 commits into from
Mar 28, 2024
Merged
Show file tree
Hide file tree
Changes from 9 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 24 additions & 1 deletion priv/riak_kv.schema
Original file line number Diff line number Diff line change
Expand Up @@ -1517,11 +1517,34 @@
{default, disabled}
]}.

%% @doc Default Secondary Index Timeout (milliseconds)
%% Set the default timeout for running secondary index queries in millisceconds
%% This is the time to run the query, and collate the results - the encoding of
%% those results for dispatch back to the client is outside of the scope of
%% this timeout. Setting to 0 (default) implies no timeout.
%% On hitting the timeout a 503 error will be returned on the HTTP API
{mapping, "secondary_index_timeout", "riak_kv.secondary_index_timeout", [
{datatype, integer},
{default, 0}
]}.

%% @doc Json library for 2i results
%% Prior to Riak KV 3.2.1 the mochijson2 library was used to json encode 2i
%% results. For larger results, the performance of this library is slower
%% when compared to alternatives (e.g. thoas). The Thoas library is now the
%% default - but may return keys in a different order (i.e. the continuation
%% key may now be before the results key). To reverse back to mochijson, then
%% reset here.
{mapping, "secondary_index_json", "riak_kv.secondary_index_json", [
{datatype, {enum, [thoas, mochijson]}},
{default, thoas}
]}.

%% @doc For $key index queries, should keys which are tombstones be returned.
%% This config will only make a difference with the leveled backend, it is
%% ignored on other backends. Disable to change default behaviour and stop
%% returning keys of tombstones in $key queries
{mapping, "dollarkey_readtombs", "riak_kv.dollarkey_readtombs", [
{datatype, {flag, enabled, disabled}},
{default, enabled}
]}.
]}.
3 changes: 2 additions & 1 deletion rebar.config
Original file line number Diff line number Diff line change
Expand Up @@ -53,5 +53,6 @@
{riak_api, {git, "https://github.com/nhs-riak/riak_api.git", {branch, "nhse-develop"}}},
{hyper, {git, "https://github.com/nhs-riak/hyper", {branch, "nhse-develop"}}},
{kv_index_tictactree, {git, "https://github.com/nhs-riak/kv_index_tictactree.git", {branch, "nhse-develop"}}},
{rhc, {git, "https://github.com/nhs-riak/riak-erlang-http-client", {branch, "nhse-develop"}}}
{rhc, {git, "https://github.com/nhs-riak/riak-erlang-http-client", {branch, "nhse-develop"}}},
{thoas, {git, "https://github.com/nhs-riak/thoas", {branch, "nhse-d32-dialyzer2"}}}
]}.
9 changes: 6 additions & 3 deletions src/riak_client.erl
Original file line number Diff line number Diff line change
Expand Up @@ -1187,8 +1187,10 @@ wait_for_query_results(ReqId, Timeout) ->
wait_for_query_results(ReqId, Timeout, Acc) ->
receive
{ReqId, done} -> {ok, lists:flatten(lists:reverse(Acc))};
{ReqId,{results, Res}} -> wait_for_query_results(ReqId, Timeout, [Res | Acc]);
{ReqId, Error} -> {error, Error}
{ReqId, {results, Res}} ->
wait_for_query_results(ReqId, Timeout, [Res | Acc]);
{ReqId, {error, Error}} -> {error, Error};
{ReqId, UnexpectedMsg} -> {error, UnexpectedMsg}
after Timeout ->
{error, timeout}
end.
Expand All @@ -1200,7 +1202,8 @@ wait_for_query_results(ReqId, Timeout, Acc) ->
wait_for_fold_results(ReqId, Timeout) ->
receive
{ReqId, {results, Results}} -> {ok, Results};
{ReqId, Error} -> {error, Error}
{ReqId, {error, Error}} -> {error, Error};
{ReqId, UnexpectedMsg} -> {error, UnexpectedMsg}
after Timeout ->
{error, timeout}
end.
Expand Down
8 changes: 5 additions & 3 deletions src/riak_index.erl
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@

%% See GH610, this default is for backwards compat, so 2i behaves as
%% it did before the FSM timeout bug was "fixed"
-define(DEFAULT_TIMEOUT, infinity).
-define(DEFAULT_TIMEOUT, 0).

%% @type data_type_defs() = [data_type_def()].
%% @type data_type_def() = {MatchFunction :: function(), ParseFunction :: function()}.
Expand Down Expand Up @@ -512,8 +512,10 @@ decode_continuation(Bin) ->
%% We use `infinity' as the default to
%% match the behavior pre 1.4
add_timeout_opt(undefined, Opts) ->
martinsumner marked this conversation as resolved.
Show resolved Hide resolved
Timeout = app_helper:get_env(riak_kv, secondary_index_timeout, ?DEFAULT_TIMEOUT),
[{timeout, Timeout} | Opts];
Timeout =
app_helper:get_env(
riak_kv, secondary_index_timeout, ?DEFAULT_TIMEOUT),
add_timeout_opt(Timeout, Opts);
add_timeout_opt(0, Opts) ->
[{timeout, infinity} | Opts];
add_timeout_opt(Timeout, Opts) ->
Expand Down
1 change: 1 addition & 0 deletions src/riak_kv.app.src
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
recon,
riakc,
rhc,
thoas,
bitcask
]},
{registered, []},
Expand Down
100 changes: 56 additions & 44 deletions src/riak_kv_clusteraae_fsm.erl
Original file line number Diff line number Diff line change
Expand Up @@ -556,50 +556,62 @@ finish(clean, State=#state{from={raw, ReqId, ClientPid}}) ->
%% External functions
%% ===================================================================

-spec json_encode_results(query_types(), query_return()) -> iolist().
-spec json_encode_results(
query_types()|dispatched_count, query_return()) -> iolist().
%% @doc
%% Encode the results of a query in JSON
%% Expected this will be called from the webmachine module that needs to
%% generate the response
json_encode_results(merge_root_nval, Root) ->
RootEnc = base64:encode_to_string(Root),
Keys = {struct, [{<<"root">>, RootEnc}]},
mochijson2:encode(Keys);
Keys = #{<<"root">> => base64:encode_to_string(Root)},
thoas:encode_to_iodata(Keys);
json_encode_results(merge_branch_nval, Branches) ->
Keys = {struct, [{<<"branches">>, [{struct, [{<<"branch-id">>, BranchId},
{<<"branch">>, base64:encode_to_string(BranchBin)}]
} || {BranchId, BranchBin} <- Branches]
}]},
mochijson2:encode(Keys);
Keys =
#{<<"branches">> =>
lists:map(
fun({BranchId, BranchBin}) ->
#{<<"branch-id">> => BranchId,
<<"branch">> => base64:encode_to_string(BranchBin)}
end,
Branches)},
thoas:encode_to_iodata(Keys);
json_encode_results(fetch_clocks_nval, KeysNClocks) ->
encode_keys_and_clocks(KeysNClocks);
json_encode_results(merge_tree_range, Tree) ->
ExportedTree = leveled_tictac:export_tree(Tree),
JsonKeys1 = {struct, [{<<"tree">>, ExportedTree}]},
mochijson2:encode(JsonKeys1);
{struct,
[{<<"level1">>, EncodedL1},
{<<"level2">>, {struct, EncodedL2}}]} =
leveled_tictac:export_tree(Tree),
thoas:encode_to_iodata(
#{<<"tree">> =>
#{<<"level1">> => EncodedL1, <<"level2">> => EncodedL2}});
json_encode_results(fetch_clocks_range, KeysNClocks) ->
encode_keys_and_clocks(KeysNClocks);
json_encode_results(dispatched_count, Count) ->
R = #{<<"dispatched_count">> => Count},
thoas:encode_to_iodata(R);
json_encode_results(repl_keys_range, ReplResult) ->
R = {struct, [{<<"dispatched_count">>, element(2, ReplResult)}]},
mochijson2:encode(R);
json_encode_results(dispatched_count, element(2, ReplResult));
json_encode_results(repair_keys_range, ReplResult) ->
R = {struct, [{<<"dispatched_count">>, element(2, ReplResult)}]},
mochijson2:encode(R);
json_encode_results(repl_keys_range, ReplResult);
martinsumner marked this conversation as resolved.
Show resolved Hide resolved
json_encode_results(find_keys, Result) ->
Keys = {struct, [{<<"results">>, [{struct, encode_find_key(Key, Int)} || {_Bucket, Key, Int} <- Result]}
]},
mochijson2:encode(Keys);
json_encode_results(find_tombs, KeysNClocks) ->
encode_keys_and_clocks(KeysNClocks);
Keys =
#{<<"results">> =>
lists:map(
fun({_B, Key, Int}) -> encode_find_key(Key, Int) end,
Result)},
thoas:encode_to_iodata(Keys);
json_encode_results(find_tombs, Result) ->
json_encode_results(find_keys, Result);
json_encode_results(reap_tombs, Count) ->
mochijson2:encode({struct, [{<<"dispatched_count">>, Count}]});
json_encode_results(dispatched_count, Count);
json_encode_results(erase_keys, Count) ->
mochijson2:encode({struct, [{<<"dispatched_count">>, Count}]});
json_encode_results(dispatched_count, Count);
json_encode_results(object_stats, Stats) ->
mochijson2:encode({struct, Stats});
thoas:encode_to_iodata(Stats);
json_encode_results(list_buckets, BucketList) ->
EncodedList = lists:map(fun encode_bucket/1, BucketList),
mochijson2:encode({struct, [{<<"results">>, EncodedList}]}).
thoas:encode_to_iodata(#{<<"results">> => EncodedList}).


-spec pb_encode_results(query_types(), query_definition(), query_return())
Expand Down Expand Up @@ -737,30 +749,30 @@ convert_level2_element({Index, Bin}) ->

-spec encode_keys_and_clocks(keys_clocks()) -> iolist().
encode_keys_and_clocks(KeysNClocks) ->
Keys = {struct, [{<<"keys-clocks">>,
[{struct, encode_key_and_clock(Bucket, Key, Clock)} || {Bucket, Key, Clock} <- KeysNClocks]
}]},
mochijson2:encode(Keys).
Keys =
#{<<"keys-clocks">>
=> lists:map(fun encode_key_and_clock/1, KeysNClocks)},
thoas:encode_to_iodata(Keys).

encode_find_key(Key, Value) ->
[{<<"key">>, Key},
{<<"value">>, Value}].
#{<<"key">> => Key, <<"value">> => Value}.

encode_bucket({Type, Bucket}) ->
{struct,
[{<<"bucket-type">>, Type}, {<<"bucket">>, Bucket}]};
#{<<"bucket-type">> => Type, <<"bucket">> => Bucket};
encode_bucket(Bucket) ->
{struct, [{<<"bucket">>, Bucket}]}.

encode_key_and_clock({Type, Bucket}, Key, Clock) ->
[{<<"bucket-type">>, Type},
{<<"bucket">>, Bucket},
{<<"key">>, Key},
{<<"clock">>, base64:encode_to_string(riak_object:encode_vclock(Clock))}];
encode_key_and_clock(Bucket, Key, Clock) ->
[{<<"bucket">>, Bucket},
{<<"key">>, Key},
{<<"clock">>, base64:encode_to_string(riak_object:encode_vclock(Clock))}].
#{<<"bucket">> => Bucket}.

encode_key_and_clock({{Type, Bucket}, Key, Clock}) ->
#{<<"bucket-type">> => Type,
<<"bucket">> => Bucket,
<<"key">> => Key,
<<"clock">> =>
base64:encode_to_string(riak_object:encode_vclock(Clock))};
encode_key_and_clock({Bucket, Key, Clock}) ->
#{<<"bucket">> => Bucket,
<<"key">> => Key,
<<"clock">> =>
base64:encode_to_string(riak_object:encode_vclock(Clock))}.

-spec hash_function(hash_method()) ->
pre_hash|fun((vclock:vclock()) -> non_neg_integer()).
Expand Down
Loading
Loading