diff --git a/faiss/IndexFastScan.cpp b/faiss/IndexFastScan.cpp index 8d10ecc9d9..b18d15bc17 100644 --- a/faiss/IndexFastScan.cpp +++ b/faiss/IndexFastScan.cpp @@ -33,22 +33,22 @@ inline size_t roundup(size_t a, size_t b) { void IndexFastScan::init_fastscan( int d, - size_t M_2, - size_t nbits_2, + size_t M_init, + size_t nbits_init, MetricType metric, int bbs) { - FAISS_THROW_IF_NOT(nbits_2 == 4); + FAISS_THROW_IF_NOT(nbits_init == 4); FAISS_THROW_IF_NOT(bbs % 32 == 0); this->d = d; - this->M = M_2; - this->nbits = nbits_2; + this->M = M_init; + this->nbits = nbits_init; this->metric_type = metric; this->bbs = bbs; - ksub = (1 << nbits_2); + ksub = (1 << nbits_init); - code_size = (M_2 * nbits_2 + 7) / 8; + code_size = (M_init * nbits_init + 7) / 8; ntotal = ntotal2 = 0; - M2 = roundup(M_2, 2); + M2 = roundup(M_init, 2); is_trained = false; } diff --git a/faiss/impl/HNSW.cpp b/faiss/impl/HNSW.cpp index 4f1ac7c3ba..642bf7c532 100644 --- a/faiss/impl/HNSW.cpp +++ b/faiss/impl/HNSW.cpp @@ -493,17 +493,17 @@ void HNSW::add_links_starting_from( ::faiss::shrink_neighbor_list(ptdis, link_targets, M, keep_max_size_level0); - std::vector neighbors_2; - neighbors_2.reserve(link_targets.size()); + std::vector neighbors_to_add; + neighbors_to_add.reserve(link_targets.size()); while (!link_targets.empty()) { storage_idx_t other_id = link_targets.top().id; add_link(*this, ptdis, pt_id, other_id, level, keep_max_size_level0); - neighbors_2.push_back(other_id); + neighbors_to_add.push_back(other_id); link_targets.pop(); } omp_unset_lock(&locks[pt_id]); - for (storage_idx_t other_id : neighbors_2) { + for (storage_idx_t other_id : neighbors_to_add) { omp_set_lock(&locks[other_id]); add_link(*this, ptdis, other_id, pt_id, level, keep_max_size_level0); omp_unset_lock(&locks[other_id]); diff --git a/faiss/utils/NeuralNet.cpp b/faiss/utils/NeuralNet.cpp index e26779f682..90d06c150e 100644 --- a/faiss/utils/NeuralNet.cpp +++ b/faiss/utils/NeuralNet.cpp @@ -212,19 +212,23 @@ nn::Int32Tensor2D QINCoStep::encode( // repeated codebook Tensor2D zqs_r(n * K, d); // size n, K, d Tensor2D cc(n * K, d * 2); // size n, K, d * 2 - size_t d_2 = this->d; - auto copy_row = [d_2](Tensor2D& t, size_t i, size_t j, const float* data) { - assert(i <= t.shape[0] && j <= t.shape[1]); - memcpy(t.data() + i * t.shape[1] + j, data, sizeof(float) * d_2); - }; + size_t local_d = this->d; + + auto copy_row = + [local_d](Tensor2D& t, size_t i, size_t j, const float* data) { + assert(i <= t.shape[0] && j <= t.shape[1]); + memcpy(t.data() + i * t.shape[1] + j, + data, + sizeof(float) * local_d); + }; // manual broadcasting for (size_t i = 0; i < n; i++) { for (size_t j = 0; j < K; j++) { - copy_row(zqs_r, i * K + j, 0, codebook.data() + j * d_2); - copy_row(cc, i * K + j, 0, codebook.data() + j * d_2); - copy_row(cc, i * K + j, d_2, xhat.data() + i * d_2); + copy_row(zqs_r, i * K + j, 0, codebook.data() + j * d); + copy_row(cc, i * K + j, 0, codebook.data() + j * d); + copy_row(cc, i * K + j, d, xhat.data() + i * d); } } @@ -237,13 +241,13 @@ nn::Int32Tensor2D QINCoStep::encode( // add the xhat for (size_t i = 0; i < n; i++) { - float* zqs_r_row = zqs_r.data() + i * K * d_2; - const float* xhat_row = xhat.data() + i * d_2; + float* zqs_r_row = zqs_r.data() + i * K * d; + const float* xhat_row = xhat.data() + i * d; for (size_t l = 0; l < K; l++) { - for (size_t j = 0; j < d_2; j++) { + for (size_t j = 0; j < d; j++) { zqs_r_row[j] += xhat_row[j]; } - zqs_r_row += d_2; + zqs_r_row += d; } } @@ -252,31 +256,31 @@ nn::Int32Tensor2D QINCoStep::encode( float* res = nullptr; if (residuals) { FAISS_THROW_IF_NOT( - residuals->shape[0] == n && residuals->shape[1] == d_2); + residuals->shape[0] == n && residuals->shape[1] == d); res = residuals->data(); } for (size_t i = 0; i < n; i++) { - const float* q = x.data() + i * d_2; - const float* db = zqs_r.data() + i * K * d_2; + const float* q = x.data() + i * d; + const float* db = zqs_r.data() + i * K * d; float dis_min = HUGE_VALF; int64_t idx = -1; for (size_t j = 0; j < K; j++) { - float dis = fvec_L2sqr(q, db, d_2); + float dis = fvec_L2sqr(q, db, d); if (dis < dis_min) { dis_min = dis; idx = j; } - db += d_2; + db += d; } codes.v[i] = idx; if (res) { - const float* xhat_row = xhat.data() + i * d_2; - const float* xhat_next_row = zqs_r.data() + (i * K + idx) * d_2; - for (size_t j = 0; j < d_2; j++) { + const float* xhat_row = xhat.data() + i * d; + const float* xhat_next_row = zqs_r.data() + (i * K + idx) * d; + for (size_t j = 0; j < d; j++) { res[j] = xhat_next_row[j] - xhat_row[j]; } - res += d_2; + res += d; } } return codes; diff --git a/faiss/utils/utils.cpp b/faiss/utils/utils.cpp index 99ece90cd6..85bf1348f2 100644 --- a/faiss/utils/utils.cpp +++ b/faiss/utils/utils.cpp @@ -589,9 +589,9 @@ int64_t count_gt(int64_t n, const T* row, T threshold) { } // namespace template -void CombinerRangeKNN::compute_sizes(int64_t* L_res_2) { - this->L_res = L_res_2; - L_res_2[0] = 0; +void CombinerRangeKNN::compute_sizes(int64_t* L_res_init) { + this->L_res = L_res_init; + L_res_init[0] = 0; int64_t j = 0; for (int64_t i = 0; i < nq; i++) { int64_t n_in; @@ -602,11 +602,11 @@ void CombinerRangeKNN::compute_sizes(int64_t* L_res_2) { n_in = lim_remain[j + 1] - lim_remain[j]; j++; } - L_res_2[i + 1] = n_in; // L_res_2[i] + n_in; + L_res_init[i + 1] = n_in; // L_res_init[i] + n_in; } // cumsum for (int64_t i = 0; i < nq; i++) { - L_res_2[i + 1] += L_res_2[i]; + L_res_init[i + 1] += L_res_init[i]; } }