Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: 并发回表,删除索引range使用record的代码 #240

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -432,8 +432,8 @@ if (NOT DEBUG)
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DNDEBUG")
endif ()

SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17 ${DEBUG_SYMBOL} -O2 -pipe -m64 -fopenmp -Wall -W -fPIC -Wno-unused-parameter -Wno-strict-aliasing -Wno-parentheses -fno-omit-frame-pointer ")
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c99 ${DEBUG_SYMBOL} -O2 -pipe -m64 -fopenmp -Wall -W -fPIC -Wno-unused-parameter -Wno-strict-aliasing -Wno-parentheses -fno-omit-frame-pointer")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17 ${DEBUG_SYMBOL} -O2 -pipe -m64 -fopenmp -Wall -g -W -fPIC -Wno-unused-parameter -Wno-strict-aliasing -Wno-parentheses -fno-omit-frame-pointer ")
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c99 ${DEBUG_SYMBOL} -O2 -pipe -m64 -fopenmp -Wall -g -W -fPIC -Wno-unused-parameter -Wno-strict-aliasing -Wno-parentheses -fno-omit-frame-pointer")

add_definitions(
-D_GNU_SOURCE
Expand Down
8 changes: 8 additions & 0 deletions include/common/schema_factory.h
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ static const std::string TABLE_BINLOG_BACKUP_DAYS = "binlog_backup_days"; //

struct UserInfo;
class TableRecord;
class MutTableKey;
typedef std::shared_ptr<TableRecord> SmartRecord;
typedef std::map<std::string, int64_t> StrInt64Map;

Expand Down Expand Up @@ -975,6 +976,13 @@ typedef ::google::protobuf::RepeatedPtrField<pb::Statistics> StatisticsVec;
const std::vector<SmartRecord>& records,
std::vector<int64_t>& region_ids);

int get_region_by_primary_key(int64_t main_table_id,
IndexInfo& index,
MutTableKey &primary_key,
int partition_id,
pb::RegionInfo &region_info
);

bool exist_tableid(int64_t table_id);
void get_all_table_by_db(const std::string& namespace_, const std::string& db_name, std::vector<SmartTable>& table_ptrs);
void get_all_table_version(std::unordered_map<int64_t, int64_t>& table_id_version);
Expand Down
21 changes: 0 additions & 21 deletions include/engine/table_iterator.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,27 +57,6 @@ struct IndexRange {

IndexRange() {}

IndexRange(TableRecord* _left,
TableRecord* _right,
IndexInfo* _index_info,
IndexInfo* _pri_info,
pb::RegionInfo* _region_info,
int left_cnt,
int right_cnt,
bool _l_open,
bool _r_open,
bool _like_prefix) :
left(_left),
right(_right),
index_info(_index_info),
pri_info(_pri_info),
region_info(_region_info),
left_field_cnt(left_cnt),
right_field_cnt(right_cnt),
left_open(_l_open),
right_open(_r_open),
like_prefix(_like_prefix) {}

IndexRange(const MutTableKey& _left,
const MutTableKey& _right,
IndexInfo* _index_info,
Expand Down
128 changes: 118 additions & 10 deletions include/exec/fetcher_store.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,11 @@ class OnRPCDone: public google::protobuf::Closure {
void retry_times_inc() {
_retry_times++;
}


void set_primary_indexes(std::string *primary_indexes) {
_primary_indexes = primary_indexes;
}

void set_rpc_ctrl(RPCCtrl* ctrl) {
_rpc_ctrl = ctrl;
}
Expand Down Expand Up @@ -139,6 +143,7 @@ class OnRPCDone: public google::protobuf::Closure {
static bvar::LatencyRecorder total_send_request;
static bvar::LatencyRecorder add_backup_send_request;
static bvar::LatencyRecorder has_backup_send_request;
std::string *_primary_indexes = nullptr;
};

// RPCCtrl只控制rpc的异步发送和并发控制,具体rpc的成功与否结果收集由fetcher_store处理
Expand All @@ -153,12 +158,11 @@ class RPCCtrl {
std::unique_lock<bthread::Mutex> lck(_mutex);
while(true) {
// 完成
if (_todo_cnt == 0 && _doing_cnt == 0) {
return 1;
}
// if (_todo_cnt == 0 && _doing_cnt == 0) {
// return 1;
// }

// 获取任务
tasks.clear();
if (_todo_cnt > 0) {
for (auto& iter : _ip_task_group_map) {
auto task_group = iter.second;
Expand All @@ -178,7 +182,11 @@ class RPCCtrl {

if (tasks.empty()) {
// 没有获取到任务,等待唤醒
_cv.wait(lck);
if ((!_is_pipeline || _add_task_finish) && _todo_cnt == 0 && _doing_cnt == 0) {
return 1;
} else {
_cv.wait(lck);
}
} else {
// 获取成功
return 0;
Expand All @@ -200,6 +208,7 @@ class RPCCtrl {
}
task->set_rpc_ctrl(this);
_todo_cnt++;
_cv.notify_one();
}

void task_finish(OnRPCDone* task) {
Expand Down Expand Up @@ -227,21 +236,41 @@ class RPCCtrl {
_cv.notify_one();
}


void execute() {
while (true) {
std::vector<OnRPCDone*> tasks;
int ret = fetch_task(tasks);
if (ret == 1) {
return;
}

for (OnRPCDone* task : tasks) {
task->send_request();
}
}
}

void set_pipeline() {
_is_pipeline = true;
if (!_bthread_started) {
_bthread_started = true;
_bth.run([this](){execute();});
}
}

void wait_finish() {
{
std::unique_lock<bthread::Mutex> lck(_mutex);
_add_task_finish = true;
_cv.notify_one();
}
_bth.join();
_bthread_started = false;
}

void set_task_concurrency_per_group(int concurrency) {
_task_concurrency_per_group = concurrency;
}

private:
struct TaskGroup {
TaskGroup() { }
Expand Down Expand Up @@ -274,9 +303,13 @@ class RPCCtrl {
int _todo_cnt = 0;
int _done_cnt = 0;
int _doing_cnt = 0;
bool _add_task_finish = false;
bool _is_pipeline = false;
std::map<std::string, std::shared_ptr<TaskGroup>> _ip_task_group_map;
bthread::ConditionVariable _cv;
bthread::Mutex _mutex;
Bthread _bth;
bool _bthread_started = false;
};

// 全局二级索引降级使用,将主备请求分别发往不同集群
Expand Down Expand Up @@ -327,10 +360,20 @@ class FetcherStore {
FetcherStore() {
}
virtual ~FetcherStore() {
SAFE_DELETE(_rpc_control);
}


void init_rpc_control(RuntimeState * state, pb::OpType opType) {
if (_rpc_control == nullptr) {
_rpc_control = new RPCCtrl(state->calc_single_store_concurrency(opType));
} else {
_rpc_control->set_task_concurrency_per_group(state->calc_single_store_concurrency(opType));
}
}

void clear() {
region_batch.clear();
region_batch_list.clear();
index_records.clear();
start_key_sort.clear();
error = E_OK;
Expand Down Expand Up @@ -399,13 +442,74 @@ class FetcherStore {
}

int run_not_set_state(RuntimeState* state,
std::map<int64_t, pb::RegionInfo>& region_infos,
std::map<int64_t, pb::RegionInfo>& region_info,
ExecNode* store_request,
int start_seq_id,
int current_seq_id,
pb::OpType op_type,
GlobalBackupType backup_type);

int fetcher_select_with_region_primary(RuntimeState* state,
pb::RegionInfo *region_info,
std::string *region_primary,
ExecNode* store_request,
int start_seq_id,
int current_seq_id){

uint64_t log_id = state->log_id();
pb::OpType op_type = pb::OP_SELECT;
if (!_is_pipeline) {
set_pipeline_mode(state, op_type);
}
auto task = new OnRPCDone(this, state, store_request, region_info,
region_info->region_id(), region_info->region_id(), start_seq_id, current_seq_id, op_type);
task->set_primary_indexes(region_primary);
_rpc_control->add_new_task(task);
_traces.insert(task->get_trace());
return 0;
}

void clear_result(RuntimeState *state) {
region_batch.clear();
region_batch_list.clear();
index_records.clear();
start_key_sort.clear();
no_copy_cache_plan_set.clear();
error = E_OK;
skip_region_set.clear();
callids.clear();
primary_timestamp_updated = false;
affected_rows = 0;
scan_rows = 0;
filter_rows = 0;
row_cnt = 0;
client_conn = state->client_conn();
_traces.clear();
}

void set_pipeline_mode(RuntimeState *state, pb::OpType op_type ) {
_is_pipeline = true;
if(_rpc_control == nullptr) {
init_rpc_control(state, op_type);
_rpc_control->set_pipeline();
}
}

bool is_pipeline() {
return _is_pipeline;
}

void wait_finish() {
if (_rpc_control != nullptr) {
_rpc_control->wait_finish();
}
}


std::set<std::shared_ptr<pb::TraceNode>> & get_traces() {
return _traces;
}

int run(RuntimeState* state,
std::map<int64_t, pb::RegionInfo>& region_infos,
ExecNode* store_request,
Expand Down Expand Up @@ -563,6 +667,7 @@ class FetcherStore {
std::map<int64_t, std::vector<std::string>> return_str_records;
std::map<int64_t, std::vector<std::string>> return_str_old_records;
std::map<int64_t, std::shared_ptr<RowBatch>> region_batch;
std::map<int64_t, std::vector<std::shared_ptr<RowBatch>>> region_batch_list;
//std::map<int64_t, std::shared_ptr<RowBatch>> split_region_batch;
std::map<int64_t, std::vector<int64_t>> region_id_ttl_timestamp_batch;

Expand Down Expand Up @@ -592,6 +697,9 @@ class FetcherStore {
bool need_send_rollback = true;
WriteBinlogParam write_binlog_param;
GlobalBackupType global_backup_type = GBT_INIT;
RPCCtrl *_rpc_control = nullptr;
bool _is_pipeline = false;
std::set<std::shared_ptr<pb::TraceNode>> _traces;
};

template<typename Repeated>
Expand Down
12 changes: 6 additions & 6 deletions include/exec/rocksdb_scan_node.h
Original file line number Diff line number Diff line change
Expand Up @@ -209,14 +209,14 @@ class RocksdbScanNode : public ScanNode {
BatchTableKey _scan_range_keys;
BatchRecord _multiget_records;
RowBatch _multiget_row_batch;
std::vector<int> _left_field_cnts;
std::vector<int> _right_field_cnts;
std::vector<bool> _left_opens;
std::vector<bool> _right_opens;
std::vector<bool> _like_prefixs;
int _left_field_cnt = 0;
int _right_field_cnt = 0;
bool _left_open = false;
bool _right_open = false;
bool _like_prefix = false;
bool _is_eq = false;
std::vector<pb::SlotDescriptor> _update_slots;
std::vector<ExprNode*> _update_exprs;
bool _use_encoded_key = false;
bool _range_key_sorted = false;
// trace使用
int _scan_rows = 0;
Expand Down
29 changes: 18 additions & 11 deletions include/exec/scan_node.h
Original file line number Diff line number Diff line change
Expand Up @@ -269,6 +269,17 @@ class ScanNode : public ExecNode {
index.use_for = use_for;
_scan_indexs.emplace_back(std::move(index));
bool has_index = false;
if (pos_index.has_sort_index()) {
_has_index = true;
return;
}
if (pos_index.left_field_cnt() > 0) {
_has_index = true;
return;
}
if (pos_index.right_field_cnt() > 0) {
return;
}
for (auto& range : pos_index.ranges()) {
if (range.left_field_cnt() > 0) {
has_index = true;
Expand All @@ -278,10 +289,6 @@ class ScanNode : public ExecNode {
has_index = true;
break;
}
if (pos_index.has_sort_index()) {
has_index = true;
break;
}
}
_has_index = _has_index || has_index;
}
Expand Down Expand Up @@ -343,23 +350,23 @@ class ScanNode : public ExecNode {
}

void set_index_useage_and_lock(bool use_global_backup) {
_current_index_mutex.lock();
// 只有在存在global backup的时候才加锁
for (auto& scan_index_info : _scan_indexs) {
if (scan_index_info.use_for == ScanIndexInfo::U_GLOBAL_LEARNER) {
_current_index_mutex.lock();
_current_global_backup = use_global_backup;
break;
}
}
}

void current_index_unlock() {
for (auto& scan_index_info : _scan_indexs) {
if (scan_index_info.use_for == ScanIndexInfo::U_GLOBAL_LEARNER) {
_current_index_mutex.unlock();
break;
}
}
_current_index_mutex.unlock();
// for (auto& scan_index_info : _scan_indexs) {
// if (scan_index_info.use_for == ScanIndexInfo::U_GLOBAL_LEARNER) {
// break;
// }
// }
}

bool current_use_global_backup() const {
Expand Down
Loading
Loading