diff --git a/cpp/include/cuvs/neighbors/ivf_pq.h b/cpp/include/cuvs/neighbors/ivf_pq.h index 5a975a72c..d588fec41 100644 --- a/cpp/include/cuvs/neighbors/ivf_pq.h +++ b/cpp/include/cuvs/neighbors/ivf_pq.h @@ -116,6 +116,15 @@ struct cuvsIvfPqIndexParams { * flag to `true` if you prefer to use as little GPU memory for the database as possible. */ bool conservative_memory_allocation; + + /** + * The max number of data points to use per PQ code during PQ codebook training. Using more data + * points per PQ code may increase the quality of PQ codebook but may also increase the build + * time. The parameter is applied to both PQ codebook generation methods, i.e., PER_SUBSPACE and + * PER_CLUSTER. In both cases, we will use `pq_book_size * max_train_points_per_pq_code` training + * points to train each codebook. + */ + uint32_t max_train_points_per_pq_code; }; typedef struct cuvsIvfPqIndexParams* cuvsIvfPqIndexParams_t; diff --git a/cpp/include/cuvs/neighbors/ivf_pq.hpp b/cpp/include/cuvs/neighbors/ivf_pq.hpp index 70f56bf58..b2db96686 100644 --- a/cpp/include/cuvs/neighbors/ivf_pq.hpp +++ b/cpp/include/cuvs/neighbors/ivf_pq.hpp @@ -107,6 +107,15 @@ struct index_params : cuvs::neighbors::index_params { */ bool add_data_on_build = true; + /** + * The max number of data points to use per PQ code during PQ codebook training. Using more data + * points per PQ code may increase the quality of PQ codebook but may also increase the build + * time. The parameter is applied to both PQ codebook generation methods, i.e., PER_SUBSPACE and + * PER_CLUSTER. In both cases, we will use `pq_book_size * max_train_points_per_pq_code` training + * points to train each codebook. + */ + uint32_t max_train_points_per_pq_code = 256; + /** * Creates index_params based on shape of the input dataset. * Usage example: diff --git a/cpp/src/neighbors/ivf_pq/ivf_pq_build.cuh b/cpp/src/neighbors/ivf_pq/ivf_pq_build.cuh index d35394c99..c65ea8108 100644 --- a/cpp/src/neighbors/ivf_pq/ivf_pq_build.cuh +++ b/cpp/src/neighbors/ivf_pq/ivf_pq_build.cuh @@ -44,6 +44,7 @@ #include #include #include +#include #include #include #include @@ -69,51 +70,6 @@ using namespace cuvs::spatial::knn::detail; // NOLINT using internal_extents_t = int64_t; // The default mdspan extent type used internally. -template -__launch_bounds__(BlockDim) static __global__ void copy_warped_kernel( - T* out, uint32_t ld_out, const S* in, uint32_t ld_in, uint32_t n_cols, size_t n_rows) -{ - using warp = raft::Pow2; - size_t row_ix = warp::div(size_t(threadIdx.x) + size_t(BlockDim) * size_t(blockIdx.x)); - uint32_t i = warp::mod(threadIdx.x); - if (row_ix >= n_rows) return; - out += row_ix * ld_out; - in += row_ix * ld_in; - auto f = utils::mapping{}; - for (uint32_t col_ix = i; col_ix < n_cols; col_ix += warp::Value) { - auto x = f(in[col_ix]); - __syncwarp(); - out[col_ix] = x; - } -} - -/** - * raft::copy the data one warp-per-row: - * - * 1. load the data per-warp - * 2. apply the `utils::mapping{}` - * 3. sync within warp - * 4. store the data. - * - * Assuming sizeof(T) >= sizeof(S) and the data is properly aligned (see the usage in `build`), this - * allows to re-structure the data within rows in-place. - */ -template -void copy_warped(T* out, - uint32_t ld_out, - const S* in, - uint32_t ld_in, - uint32_t n_cols, - size_t n_rows, - rmm::cuda_stream_view stream) -{ - constexpr uint32_t kBlockDim = 128; - dim3 threads(kBlockDim, 1, 1); - dim3 blocks(raft::div_rounding_up_safe(n_rows, kBlockDim / raft::WarpSize), 1, 1); - copy_warped_kernel - <<>>(out, ld_out, in, ld_in, n_cols, n_rows); -} - /** * @brief Compute residual vectors from the source dataset given by selected indices. * @@ -358,14 +314,19 @@ void train_per_subset(raft::resources const& handle, size_t n_rows, const float* trainset, // [n_rows, dim] const uint32_t* labels, // [n_rows] - uint32_t kmeans_n_iters) + uint32_t kmeans_n_iters, + uint32_t max_train_points_per_pq_code) { auto stream = raft::resource::get_cuda_stream(handle); auto device_memory = raft::resource::get_workspace_resource(handle); rmm::device_uvector pq_centers_tmp(index.pq_centers().size(), stream, device_memory); - rmm::device_uvector sub_trainset(n_rows * size_t(index.pq_len()), stream, device_memory); - rmm::device_uvector sub_labels(n_rows, stream, device_memory); + // Subsampling the train set for codebook generation based on max_train_points_per_pq_code. + size_t big_enough = max_train_points_per_pq_code * size_t(index.pq_book_size()); + auto pq_n_rows = uint32_t(std::min(big_enough, n_rows)); + rmm::device_uvector sub_trainset( + pq_n_rows * size_t(index.pq_len()), stream, device_memory); + rmm::device_uvector sub_labels(pq_n_rows, stream, device_memory); rmm::device_uvector pq_cluster_sizes(index.pq_book_size(), stream, device_memory); @@ -376,7 +337,7 @@ void train_per_subset(raft::resources const& handle, // Get the rotated cluster centers for each training vector. // This will be subtracted from the input vectors afterwards. utils::copy_selected( - n_rows, + pq_n_rows, index.pq_len(), index.centers_rot().data_handle() + index.pq_len() * j, labels, @@ -392,7 +353,7 @@ void train_per_subset(raft::resources const& handle, true, false, index.pq_len(), - n_rows, + pq_n_rows, index.dim(), &alpha, index.rotation_matrix().data_handle() + index.dim() * index.pq_len() * j, @@ -406,13 +367,13 @@ void train_per_subset(raft::resources const& handle, // train PQ codebook for this subspace auto sub_trainset_view = raft::make_device_matrix_view( - sub_trainset.data(), n_rows, index.pq_len()); + sub_trainset.data(), pq_n_rows, index.pq_len()); auto centers_tmp_view = raft::make_device_matrix_view( pq_centers_tmp.data() + index.pq_book_size() * index.pq_len() * j, index.pq_book_size(), index.pq_len()); auto sub_labels_view = - raft::make_device_vector_view(sub_labels.data(), n_rows); + raft::make_device_vector_view(sub_labels.data(), pq_n_rows); auto cluster_sizes_view = raft::make_device_vector_view( pq_cluster_sizes.data(), index.pq_book_size()); cuvs::cluster::kmeans::balanced_params kmeans_params; @@ -435,7 +396,8 @@ void train_per_cluster(raft::resources const& handle, size_t n_rows, const float* trainset, // [n_rows, dim] const uint32_t* labels, // [n_rows] - uint32_t kmeans_n_iters) + uint32_t kmeans_n_iters, + uint32_t max_train_points_per_pq_code) { auto stream = raft::resource::get_cuda_stream(handle); auto device_memory = raft::resource::get_workspace_resource(handle); @@ -485,9 +447,11 @@ void train_per_cluster(raft::resources const& handle, indices + cluster_offsets[l], device_memory); - // limit the cluster size to bound the training time. + // limit the cluster size to bound the training time based on max_train_points_per_pq_code + // If pq_book_size is less than pq_dim, use max_train_points_per_pq_code per pq_dim instead // [sic] we interpret the data as pq_len-dimensional - size_t big_enough = 256ul * std::max(index.pq_book_size(), index.pq_dim()); + size_t big_enough = + max_train_points_per_pq_code * std::max(index.pq_book_size(), index.pq_dim()); size_t available_rows = size_t(cluster_size) * size_t(index.pq_dim()); auto pq_n_rows = uint32_t(std::min(big_enough, available_rows)); // train PQ codebook for this cluster @@ -1730,6 +1694,7 @@ auto build(raft::resources const& handle, utils::memzero(index.inds_ptrs().data_handle(), index.inds_ptrs().size(), stream); { + raft::random::RngState random_state{137}; auto trainset_ratio = std::max( 1, size_t(n_rows) / std::max(params.kmeans_trainset_fraction * n_rows, index.n_lists())); @@ -1749,9 +1714,11 @@ auto build(raft::resources const& handle, // Besides just sampling, we transform the input dataset into floats to make it easier // to use gemm operations from cublas. - rmm::device_uvector trainset(0, stream, big_memory_resource); + auto trainset = raft::make_device_mdarray( + handle, big_memory_resource, raft::make_extents(0, 0)); try { - trainset.resize(n_rows_train * index.dim(), stream); + trainset = raft::make_device_mdarray( + handle, big_memory_resource, raft::make_extents(n_rows_train, dim)); } catch (raft::logic_error& e) { RAFT_LOG_ERROR( "Insufficient memory for kmeans training set allocation. Please decrease " @@ -1760,54 +1727,19 @@ auto build(raft::resources const& handle, } // TODO: a proper sampling if constexpr (std::is_same_v) { - RAFT_CUDA_TRY(cudaMemcpy2DAsync(trainset.data(), - sizeof(T) * index.dim(), - dataset.data_handle(), - sizeof(T) * index.dim() * trainset_ratio, - sizeof(T) * index.dim(), - n_rows_train, - cudaMemcpyDefault, - stream)); + raft::matrix::sample_rows(handle, random_state, dataset, trainset.view()); } else { - size_t dim = index.dim(); - cudaPointerAttributes dataset_attr; - RAFT_CUDA_TRY(cudaPointerGetAttributes(&dataset_attr, dataset.data_handle())); - if (dataset_attr.devicePointer != nullptr) { - // data is available on device: just run the kernel to raft::copy and map the data - auto p = reinterpret_cast(dataset_attr.devicePointer); - auto trainset_view = - raft::make_device_vector_view(trainset.data(), dim * n_rows_train); - raft::linalg::map_offset( - handle, trainset_view, [p, trainset_ratio, dim] __device__(size_t i) { - auto col = i % dim; - return utils::mapping{}(p[(i - col) * size_t(trainset_ratio) + col]); - }); - } else { - // data is not available: first raft::copy, then map inplace - auto trainset_tmp = reinterpret_cast(reinterpret_cast(trainset.data()) + - (sizeof(float) - sizeof(T)) * index.dim()); - // We raft::copy the data in strides, one row at a time, and place the smaller rows of type - // T at the end of float rows. - RAFT_CUDA_TRY(cudaMemcpy2DAsync(trainset_tmp, - sizeof(float) * index.dim(), - dataset.data_handle(), - sizeof(T) * index.dim() * trainset_ratio, - sizeof(T) * index.dim(), - n_rows_train, - cudaMemcpyDefault, - stream)); - // Transform the input `{T -> float}`, one row per warp. - // The threads in each warp raft::copy the data synchronously; this and the layout of the - // data (content is aligned to the end of the rows) together allow doing the transform - // in-place. - copy_warped(trainset.data(), - index.dim(), - trainset_tmp, - index.dim() * sizeof(float) / sizeof(T), - index.dim(), - n_rows_train, - stream); - } + // TODO(tfeher): Enable codebook generation with any type T, and then remove trainset tmp. + auto trainset_tmp = raft::make_device_mdarray( + handle, big_memory_resource, raft::make_extents(n_rows_train, dim)); + + raft::matrix::sample_rows(handle, random_state, dataset, trainset_tmp.view()); + + raft::linalg::unaryOp(trainset.data_handle(), + trainset_tmp.data_handle(), + trainset.size(), + utils::mapping{}, + raft::resource::get_cuda_stream(handle)); } // NB: here cluster_centers is used as if it is [n_clusters, data_dim] not [n_clusters, @@ -1817,9 +1749,8 @@ auto build(raft::resources const& handle, auto cluster_centers = cluster_centers_buf.data(); // Train balanced hierarchical kmeans clustering - auto trainset_const_view = raft::make_device_matrix_view( - trainset.data(), n_rows_train, index.dim()); - auto centers_view = raft::make_device_matrix_view( + auto trainset_const_view = raft::make_const_mdspan(trainset.view()); + auto centers_view = raft::make_device_matrix_view( cluster_centers, index.n_lists(), index.dim()); cuvs::cluster::kmeans::balanced_params kmeans_params; kmeans_params.n_iters = params.kmeans_n_iters; @@ -1848,12 +1779,22 @@ auto build(raft::resources const& handle, // Train PQ codebooks switch (index.codebook_kind()) { case codebook_gen::PER_SUBSPACE: - train_per_subset( - handle, index, n_rows_train, trainset.data(), labels.data(), params.kmeans_n_iters); + train_per_subset(handle, + index, + n_rows_train, + trainset.data_handle(), + labels.data(), + params.kmeans_n_iters, + params.max_train_points_per_pq_code); break; case codebook_gen::PER_CLUSTER: - train_per_cluster( - handle, index, n_rows_train, trainset.data(), labels.data(), params.kmeans_n_iters); + train_per_cluster(handle, + index, + n_rows_train, + trainset.data_handle(), + labels.data(), + params.kmeans_n_iters, + params.max_train_points_per_pq_code); break; default: RAFT_FAIL("Unreachable code"); } diff --git a/cpp/src/neighbors/ivf_pq_c.cpp b/cpp/src/neighbors/ivf_pq_c.cpp index e2578345b..256d760e6 100644 --- a/cpp/src/neighbors/ivf_pq_c.cpp +++ b/cpp/src/neighbors/ivf_pq_c.cpp @@ -48,6 +48,7 @@ void* _build(cuvsResources_t res, cuvsIvfPqIndexParams params, DLManagedTensor* static_cast((int)params.codebook_kind); build_params.force_random_rotation = params.force_random_rotation; build_params.conservative_memory_allocation = params.conservative_memory_allocation; + build_params.max_train_points_per_pq_code = params.max_train_points_per_pq_code; auto dataset = dataset_tensor->dl_tensor; auto dim = dataset.shape[0]; @@ -214,7 +215,8 @@ extern "C" cuvsError_t cuvsIvfPqIndexParamsCreate(cuvsIvfPqIndexParams_t* params .pq_dim = 0, .codebook_kind = codebook_gen::PER_SUBSPACE, .force_random_rotation = false, - .conservative_memory_allocation = false}; + .conservative_memory_allocation = false, + .max_train_points_per_pq_code = 256}; }); } diff --git a/python/cuvs/cuvs/neighbors/ivf_pq/ivf_pq.pxd b/python/cuvs/cuvs/neighbors/ivf_pq/ivf_pq.pxd index c6eb85e35..e732cd7d2 100644 --- a/python/cuvs/cuvs/neighbors/ivf_pq/ivf_pq.pxd +++ b/python/cuvs/cuvs/neighbors/ivf_pq/ivf_pq.pxd @@ -50,6 +50,7 @@ cdef extern from "cuvs/neighbors/ivf_pq.h" nogil: codebook_gen codebook_kind bool force_random_rotation bool conservative_memory_allocation + uint32_t max_train_points_per_pq_code ctypedef cuvsIvfPqIndexParams* cuvsIvfPqIndexParams_t diff --git a/python/cuvs/cuvs/neighbors/ivf_pq/ivf_pq.pyx b/python/cuvs/cuvs/neighbors/ivf_pq/ivf_pq.pyx index de52cb2da..3add1df75 100644 --- a/python/cuvs/cuvs/neighbors/ivf_pq/ivf_pq.pyx +++ b/python/cuvs/cuvs/neighbors/ivf_pq/ivf_pq.pyx @@ -97,11 +97,27 @@ cdef class IndexParams: default, if `dim == rot_dim`, the rotation transform is initialized with the identity matrix. When `force_random_rotation == True`, a random orthogonal transform + matrix is generated regardless of the values of `dim` and `pq_dim`. add_data_on_build : bool, default = True After training the coarse and fine quantizers, we will populate the index with the dataset if add_data_on_build == True, otherwise the index is left empty, and the extend method can be used to add new vectors to the index. + conservative_memory_allocation : bool, default = True + By default, the algorithm allocates more space than necessary for + individual clusters (`list_data`). This allows to amortize the cost + of memory allocation and reduce the number of data copies during + repeated calls to `extend` (extending the database). + To disable this behavior and use as little GPU memory for the + database as possible, set this flat to `True`. + max_train_points_per_pq_code : int, default = 256 + The max number of data points to use per PQ code during PQ codebook + training. Using more data points per PQ code may increase the + quality of PQ codebook but may also increase the build time. The + parameter is applied to both PQ codebook generation methods, i.e., + PER_SUBSPACE and PER_CLUSTER. In both cases, we will use + pq_book_size * max_train_points_per_pq_code training points to + train each codebook. """ cdef cuvsIvfPqIndexParams* params @@ -124,7 +140,8 @@ cdef class IndexParams: codebook_kind="subspace", force_random_rotation=False, add_data_on_build=True, - conservative_memory_allocation=False): + conservative_memory_allocation=False, + max_train_points_per_pq_code=256): self.params.n_lists = n_lists self._metric = metric self.params.metric = DISTANCE_TYPES[metric] @@ -143,6 +160,8 @@ cdef class IndexParams: self.params.add_data_on_build = add_data_on_build self.params.conservative_memory_allocation = \ conservative_memory_allocation + self.params.max_train_points_per_pq_code = \ + max_train_points_per_pq_code @property def metric(self): @@ -192,6 +211,9 @@ cdef class IndexParams: def conservative_memory_allocation(self): return self.params.conservative_memory_allocation + @property + def max_train_points_per_pq_code(self): + return self.params.max_train_points_per_pq_code cdef class Index: """ diff --git a/rust/cuvs/src/ivf_pq/index_params.rs b/rust/cuvs/src/ivf_pq/index_params.rs index e38c0945f..321821bc3 100644 --- a/rust/cuvs/src/ivf_pq/index_params.rs +++ b/rust/cuvs/src/ivf_pq/index_params.rs @@ -124,6 +124,18 @@ impl IndexParams { self } + /// The max number of data points to use per PQ code during PQ codebook training. Using more data + /// points per PQ code may increase the quality of PQ codebook but may also increase the build + /// time. The parameter is applied to both PQ codebook generation methods, i.e., PER_SUBSPACE and + /// PER_CLUSTER. In both cases, we will use `pq_book_size * max_train_points_per_pq_code` training + /// points to train each codebook. + pub fn set_max_train_points_per_pq_code(self, max_pq_points: u32)-> IndexParams { + unsafe { + (*self.0).max_train_points_per_pq_code = max_pq_points; + } + self + } + /// After training the coarse and fine quantizers, we will populate /// the index with the dataset if add_data_on_build == true, otherwise /// the index is left empty, and the extend method can be used