Skip to content

Commit

Permalink
Minor modification to build with g++13/spdlog-1.12
Browse files Browse the repository at this point in the history
  • Loading branch information
songweijia committed Jul 12, 2024
1 parent 323f068 commit e387b22
Show file tree
Hide file tree
Showing 5 changed files with 39 additions and 27 deletions.
4 changes: 2 additions & 2 deletions scripts/prerequisites/install-json.sh
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ WORKPATH=`mktemp -d`
cd ${WORKPATH}
git clone https://github.com/nlohmann/json.git
cd json
git checkout v3.11.1
git checkout v3.11.3
cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} .
make -j `lscpu | grep "^CPU(" | awk '{print $2}'`
make install
rm -rf ${WORKPATH}
rm -rf ${WORKPATH}
4 changes: 2 additions & 2 deletions src/applications/demos/simple_replicated_objects_overlap.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ int main(int argc, char** argv) {
#endif//NOLOG
// Replicated<Foo>& foo_rpc_handle = group.get_subgroup<Foo>();
dbg_default_crit("Here is FOO {}!", rank_in_foo);
dbg_default_crit("I see members of my shard: {}", foo_members);
dbg_default_crit("I see members of my shard: {}", fmt::join(foo_members,"|"));
}
auto find_in_bar_results = std::find(bar_members.begin(), bar_members.end(), my_id);
if(find_in_bar_results != bar_members.end()) {
Expand All @@ -60,7 +60,7 @@ int main(int argc, char** argv) {
#endif//NOLOG
// Replicated<Bar>& bar_rpc_handle = group.get_subgroup<Bar>();
dbg_default_crit("Here is BAR {}!", rank_in_bar);
dbg_default_crit("I see members of my shard: {}", bar_members);
dbg_default_crit("I see members of my shard: {}", fmt::join(bar_members,"|"));
}

cout << "Reached end of main(), entering infinite loop so program doesn't exit" << std::endl;
Expand Down
5 changes: 3 additions & 2 deletions src/applications/tests/unit_tests/client_callback_mockup.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ std::pair<persistent::version_t, uint64_t> InternalClientNode::submit_update(uin
}

void InternalClientNode::receive_callback(const ClientCallbackType& callback_type, persistent::version_t version, derecho::subgroup_id_t sending_subgroup) const {
dbg_default_debug("Got a callback of type {} for version {}", callback_type, version);
dbg_default_debug("Got a callback of type {} for version {}", static_cast<int>(callback_type), version);
//Send the callback event to the callback thread then return, so the P2P receive thread doesn't block for too long
std::unique_lock<std::mutex> lock(event_queue_mutex);
callback_event_queue.emplace(CallbackEvent{callback_type, version, sending_subgroup});
Expand Down Expand Up @@ -263,7 +263,8 @@ void StorageNode::callback_thread_function() {
for(auto requests_iter = requests_by_version.begin();
requests_iter != requests_by_version.end();) {
persistent::version_t requested_version = requests_iter->first;
dbg_default_debug("Callback thread checking a callback of type {} for version {}", requests_iter->second.callback_type, requested_version);
dbg_default_debug("Callback thread checking a callback of type {} for version {}",
static_cast<int>(requests_iter->second.callback_type), requested_version);
bool requested_event_happened = false;
{
std::unique_lock<std::mutex> query_results_lock(callback_thread_mutex);
Expand Down
18 changes: 10 additions & 8 deletions src/applications/tests/unit_tests/subgroup_function_tester.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,14 +70,15 @@ void test_fixed_allocation_functions() {
derecho::test_provision_subgroups(test_fixed_subgroups, nullptr, *curr_view);

std::set<int> ranks_to_fail{1, 3, 17, 38, 40};
rls_default_info("TEST 2: Failing some nodes that are in subgroups: {}", ranks_to_fail);
rls_default_info("TEST 2: Failing some nodes that are in subgroups: {}", fmt::join(ranks_to_fail,","));
std::unique_ptr<derecho::View> prev_view(std::move(curr_view));
curr_view = derecho::make_next_view(*prev_view, ranks_to_fail, {}, {});

derecho::test_provision_subgroups(test_fixed_subgroups, prev_view, *curr_view);

std::set<int> more_ranks_to_fail{13, 20, 59, 78, 89};
rls_default_info("TEST 3: Failing nodes both before and after the pointer. Ranks are {}", more_ranks_to_fail);
rls_default_info("TEST 3: Failing nodes both before and after the pointer. Ranks are {}",
fmt::join(more_ranks_to_fail,","));
prev_view.swap(curr_view);
curr_view = derecho::make_next_view(*prev_view, more_ranks_to_fail, {}, {});

Expand Down Expand Up @@ -144,13 +145,14 @@ void test_flexible_allocation_functions() {
derecho::test_provision_subgroups(test_flexible_subgroups, nullptr, *curr_view);

std::set<int> flexible_ranks_to_fail{3, 6, 31, 45, 57};
rls_default_info("TEST 7: Failing some nodes that are in subgroups: {}", flexible_ranks_to_fail);
rls_default_info("TEST 7: Failing some nodes that are in subgroups: {}", fmt::join(flexible_ranks_to_fail,","));
std::unique_ptr<derecho::View> prev_view(std::move(curr_view));
curr_view = derecho::make_next_view(*prev_view, flexible_ranks_to_fail, {}, {});
derecho::test_provision_subgroups(test_flexible_subgroups, prev_view, *curr_view);

std::set<int> flexible_ranks_to_fail_2{7, 8, 17, 18, 40, 41, 51, 61, 62};
rls_default_info("TEST 8: Failing more nodes so that shards must shrink. Ranks are: {}", flexible_ranks_to_fail_2);
rls_default_info("TEST 8: Failing more nodes so that shards must shrink. Ranks are: {}",
fmt::join(flexible_ranks_to_fail_2,","));
prev_view.swap(curr_view);
curr_view = derecho::make_next_view(*prev_view, flexible_ranks_to_fail_2, {}, {});
derecho::test_provision_subgroups(test_flexible_subgroups, prev_view, *curr_view);
Expand Down Expand Up @@ -227,7 +229,7 @@ void test_json_layout() {
rls_default_info("TEST 12: Nodes 0 and 2 fail; 2 is in both reserved node lists");
prev_view.swap(curr_view);
curr_view = derecho::make_next_view(*prev_view, ranks_to_fail, {}, {});
dbg_default_debug("New view has members: {}", curr_view->members);
dbg_default_debug("New view has members: {}", fmt::join(curr_view->members,"|"));
derecho::test_provision_subgroups(test_json_overlapping, prev_view, *curr_view);

std::vector<node_id_t> new_members_outside_reservation{5, 6};
Expand All @@ -236,7 +238,7 @@ void test_json_layout() {
rls_default_info("TEST 13: Nodes 5 and 6 join");
prev_view.swap(curr_view);
curr_view = derecho::make_next_view(*prev_view, {}, new_members_outside_reservation, ips_and_ports_2);
dbg_default_debug("New view has members: {}", curr_view->members);
dbg_default_debug("New view has members: {}", fmt::join(curr_view->members,"|"));
derecho::test_provision_subgroups(test_json_overlapping, prev_view, *curr_view);

std::set<int> ranks_to_fail_2{1, 3};
Expand All @@ -245,7 +247,7 @@ void test_json_layout() {
rls_default_info("TEST 14: Nodes 3 and 5 fail, node 2 rejoins");
prev_view.swap(curr_view);
curr_view = derecho::make_next_view(*prev_view, ranks_to_fail_2, node_rejoined, node_2_ip);
dbg_default_debug("New view has members: {}", curr_view->members);
dbg_default_debug("New view has members: {}", fmt::join(curr_view->members,"|"));
derecho::test_provision_subgroups(test_json_overlapping, prev_view, *curr_view);
}

Expand Down Expand Up @@ -288,7 +290,7 @@ void test_provision_subgroups(const SubgroupInfo& subgroup_info,
int32_t initial_next_unassigned_rank = curr_view.next_unassigned_rank;
curr_view.subgroup_shard_views.clear();
curr_view.subgroup_ids_by_type_id.clear();
rls_default_info("View has there members: {}", curr_view.members);
rls_default_info("View has there members: {}", fmt::join(curr_view.members,"|"));
std::map<std::type_index, subgroup_shard_layout_t> subgroup_allocations;
try {
auto temp = subgroup_info.subgroup_membership_function(curr_view.subgroup_type_order,
Expand Down
35 changes: 22 additions & 13 deletions src/core/subgroup_functions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ void DefaultSubgroupAllocator::compute_standard_memberships(
surviving_member_set.begin(), surviving_member_set.end(),
active_reserved_node_id_set.begin(), active_reserved_node_id_set.end(),
std::inserter(curr_members, curr_members.end()));
dbg_default_trace("With inherent nodes, curr_members is: {}", curr_members);
dbg_default_trace("With inherent nodes, curr_members is: {}", fmt::join(curr_members,"|"));

curr_view.next_unassigned_rank = curr_members.size();
dbg_default_trace("After rearranging inherent node_ids, curr_view.next_unassigned_rank is {}", curr_view.next_unassigned_rank);
Expand All @@ -208,7 +208,7 @@ void DefaultSubgroupAllocator::compute_standard_memberships(
added_member_set.begin(), added_member_set.end(),
all_reserved_node_ids.begin(), all_reserved_node_ids.end(),
std::inserter(curr_members, curr_members.end()));
dbg_default_trace("Adding newly added non-reserved nodes, curr_members is: {}", curr_members);
dbg_default_trace("Adding newly added non-reserved nodes, curr_members is: {}", fmt::join(curr_members,"|"));
}

for(uint32_t subgroup_type_id = 0; subgroup_type_id < subgroup_type_order.size();
Expand Down Expand Up @@ -247,7 +247,7 @@ DefaultSubgroupAllocator::compute_standard_shard_sizes(
all_reserved_node_ids.begin(), all_reserved_node_ids.end(),
std::inserter(all_active_reserved_node_id_set, all_active_reserved_node_id_set.begin()));

dbg_default_trace("Parsing all_active_reserved_node_id_set: {}", all_active_reserved_node_id_set);
dbg_default_trace("Parsing all_active_reserved_node_id_set: {}", fmt::join(all_active_reserved_node_id_set,","));

nodes_needed = all_active_reserved_node_id_set.size();
dbg_default_trace("After counting all_active_reserved_node_id_set, nodes_needed is {}", nodes_needed);
Expand Down Expand Up @@ -312,7 +312,8 @@ DefaultSubgroupAllocator::compute_standard_shard_sizes(
dbg_default_trace("There is no reserved node_id configured.");
}

dbg_default_trace("The active_reserved_node_id_set for current shard is: {}", active_reserved_node_id_set);
dbg_default_trace("The active_reserved_node_id_set for current shard is: {}",
fmt::join(active_reserved_node_id_set,","));

/* The inherent_node_id_set holds node_ids that are "inherent" or "intrinsic"
* to the this shard, for the node_ids are either surviving nodes from "the same shard"
Expand All @@ -323,7 +324,8 @@ DefaultSubgroupAllocator::compute_standard_shard_sizes(
survived_node_set.begin(), survived_node_set.end(),
active_reserved_node_id_set.begin(), active_reserved_node_id_set.end(),
std::inserter(inherent_node_id_set, inherent_node_id_set.end()));
dbg_default_trace("The inherent_node_id_set for current shard is: {}", inherent_node_id_set);
dbg_default_trace("The inherent_node_id_set for current shard is: {}",
fmt::join(inherent_node_id_set,","));
// All active reserved nodes just count once.
nodes_needed += inherent_node_id_set.size() - active_reserved_node_id_set.size();

Expand Down Expand Up @@ -426,7 +428,8 @@ subgroup_shard_layout_t DefaultSubgroupAllocator::allocate_standard_subgroup_typ
curr_member_set.begin(), curr_member_set.end(),
std::inserter(desired_nodes, desired_nodes.end()));
shard_size -= desired_nodes.size();
dbg_default_trace("Assigning shard {} active reserved nodes: {}", desired_nodes.size(), desired_nodes);
dbg_default_trace("Assigning shard {} active reserved nodes: {}", desired_nodes.size(),
fmt::join(desired_nodes,"|"));
}
} else {
dbg_default_trace("There is no reserved node_id configured.");
Expand All @@ -440,7 +443,9 @@ subgroup_shard_layout_t DefaultSubgroupAllocator::allocate_standard_subgroup_typ
// unassigned normal nodes, which I (Lichen) think is just OK and not in conflict with its definition.
curr_view.next_unassigned_rank += shard_size;

dbg_default_trace("Assigning shard {} nodes in total, with curr_view.next_unassigned_rank {}: {}", desired_nodes.size(), curr_view.next_unassigned_rank, desired_nodes);
dbg_default_trace("Assigning shard {} nodes in total, with curr_view.next_unassigned_rank {}: {}",
desired_nodes.size(),
curr_view.next_unassigned_rank, fmt::join(desired_nodes,"|"));

// Figure out the sender list
std::vector<int> is_sender;
Expand Down Expand Up @@ -489,8 +494,8 @@ subgroup_shard_layout_t DefaultSubgroupAllocator::update_standard_subgroup_type(
const subgroup_id_t previous_assignment_offset = prev_view->subgroup_ids_by_type_id.at(subgroup_type_id)[0];
subgroup_shard_layout_t next_assignment(shard_sizes.at(subgroup_type).size());

dbg_default_trace("The surviving_member_set is: {}", surviving_member_set);
dbg_default_trace("The added_member_set is: {}", added_member_set);
dbg_default_trace("The surviving_member_set is: {}", fmt::join(surviving_member_set,","));
dbg_default_trace("The added_member_set is: {}", fmt::join(added_member_set,","));

const SubgroupAllocationPolicy& subgroup_type_policy
= std::get<SubgroupAllocationPolicy>(policies.at(subgroup_type));
Expand All @@ -515,7 +520,8 @@ subgroup_shard_layout_t DefaultSubgroupAllocator::update_standard_subgroup_type(
next_shard_members.push_back(previous_shard_assignment.members[rank]);
next_is_sender.push_back(previous_shard_assignment.is_sender[rank]);
}
dbg_default_trace("After assigning surviving nodes, next_shard_members is: {}", next_shard_members);
dbg_default_trace("After assigning surviving nodes, next_shard_members is: {}",
fmt::join(next_shard_members,"|"));

const ShardAllocationPolicy& sharding_policy
= subgroup_type_policy.identical_subgroups
Expand All @@ -531,7 +537,8 @@ subgroup_shard_layout_t DefaultSubgroupAllocator::update_standard_subgroup_type(
sharding_policy.reserved_node_ids_by_shard[shard_num].end(),
std::inserter(added_reserved_node_id_set, added_reserved_node_id_set.end()));
if(added_reserved_node_id_set.size() > 0) {
dbg_default_trace("The added_reserved_node_id_set is not empty: {}", added_reserved_node_id_set);
dbg_default_trace("The added_reserved_node_id_set is not empty: {}",
fmt::join(added_reserved_node_id_set,"|"));

for(auto node_id : added_reserved_node_id_set) {
next_shard_members.push_back(node_id);
Expand All @@ -547,7 +554,7 @@ subgroup_shard_layout_t DefaultSubgroupAllocator::update_standard_subgroup_type(
}
}
}
dbg_default_trace("After assigning newly added reserved nodes, we get {} inherent node_id(s) assigned, next_shard_members is: {}", next_shard_members.size(), next_shard_members);
dbg_default_trace("After assigning newly added reserved nodes, we get {} inherent node_id(s) assigned, next_shard_members is: {}", next_shard_members.size(), fmt::join(next_shard_members,"|"));
} else {
dbg_default_trace("There is no reserved node_id configured.");
}
Expand All @@ -561,7 +568,9 @@ subgroup_shard_layout_t DefaultSubgroupAllocator::update_standard_subgroup_type(
//If senders are not specified, all nodes are senders; otherwise, additional members are not senders.
next_is_sender.push_back(sharding_policy.reserved_sender_ids_by_shard.empty() ? true : sharding_policy.reserved_sender_ids_by_shard[shard_num].empty());
}
dbg_default_trace("Assigned shard {} nodes in total, with curr_view.next_unassigned_rank {}: {}", next_shard_members.size(), curr_view.next_unassigned_rank, next_shard_members);
dbg_default_trace("Assigned shard {} nodes in total, with curr_view.next_unassigned_rank {}: {}",
next_shard_members.size(),
curr_view.next_unassigned_rank, fmt::join(next_shard_members,"|"));

next_assignment[subgroup_num].emplace_back(curr_view.make_subview(next_shard_members,
previous_shard_assignment.mode,
Expand Down

0 comments on commit e387b22

Please sign in to comment.