Skip to content

Commit

Permalink
fix: fixes v2
Browse files Browse the repository at this point in the history
  • Loading branch information
dranikpg committed Oct 29, 2023
1 parent 7755b92 commit 8fe00de
Show file tree
Hide file tree
Showing 3 changed files with 8 additions and 11 deletions.
2 changes: 2 additions & 0 deletions src/core/search/search.cc
Original file line number Diff line number Diff line change
Expand Up @@ -415,6 +415,7 @@ struct BasicSearch {
profile_builder_ ? make_optional(profile_builder_->Take()) : nullopt;

size_t total = result.Size();

return SearchResult{total,
max(total, preagg_total_),
result.Take(limit_),
Expand All @@ -423,6 +424,7 @@ struct BasicSearch {
std::move(error_)};
}

private:
const FieldIndices* indices_;
size_t limit_;

Expand Down
7 changes: 4 additions & 3 deletions src/server/search/doc_index.cc
Original file line number Diff line number Diff line change
Expand Up @@ -224,11 +224,12 @@ bool ShardDocIndex::Matches(string_view key, unsigned obj_code) const {

io::Result<SearchResult, facade::ErrorReply> ShardDocIndex::Search(
const OpArgs& op_args, const SearchParams& params, search::SearchAlgorithm* search_algo) const {
auto search_results = search_algo->Search(&indices_);
size_t requested_count = params.limit_offset + params.limit_total;

auto search_results = search_algo->Search(&indices_, requested_count);
if (!search_results.error.empty())
return nonstd::make_unexpected(facade::ErrorReply(std::move(search_results.error)));

size_t requested_count = params.limit_offset + params.limit_total;
size_t return_count = min(requested_count, search_results.ids.size());

// Probabilistic optimization: If we are about 99% sure that all shards in total fetch more
Expand All @@ -252,7 +253,7 @@ io::Result<SearchResult, facade::ErrorReply> ShardDocIndex::Search(

Serialize(op_args, params, absl::MakeSpan(out));

return SearchResult{write_epoch_, search_results.ids.size(), std::move(out),
return SearchResult{write_epoch_, search_results.total, std::move(out),
std::move(search_results.profile)};
}

Expand Down
10 changes: 2 additions & 8 deletions src/server/search/search_family.cc
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ struct MultishardSearch {
return Reply();
}

VLOG(0) << "Failed completness check, refilling";
VLOG(1) << "Failed completness check, refilling";

// Otherwise, some results made it into the result set but were not serialized.
// Try refilling the requested values. If no reordering occured, reply immediately, otherwise
Expand All @@ -265,7 +265,7 @@ struct MultishardSearch {
return Reply();
}

VLOG(0) << "Failed refill and rebuild, re-searching";
VLOG(1) << "Failed refill and rebuild, re-searching";

// At this step all optimizations failed. Run search without any cutoffs.
{
Expand Down Expand Up @@ -413,10 +413,6 @@ struct MultishardSearch {
void BuildLinearOrder() {
size_t required = params_.limit_offset + params_.limit_total;

VLOG(0) << "Linear order";
for (auto& shard_result : sharded_results_)
VLOG(0) << "|->source " << shard_result.docs.size();

for (size_t idx = 0;; idx++) {
bool added = false;
for (auto& shard_result : sharded_results_) {
Expand Down Expand Up @@ -449,15 +445,13 @@ struct MultishardSearch {
}

absl::flat_hash_set<ShardId> VerifyOrderCompletness() {
VLOG(0) << "Verifying order completness of " << ordered_docs_.size();
absl::flat_hash_set<ShardId> incomplete_shards;
for (auto* doc : ordered_docs_) {
if (auto* ref = get_if<DocResult::DocReference>(&doc->value); ref) {
incomplete_shards.insert(ref->shard_id);
ref->requested = true;
}
}
VLOG(0) << "Num incomplete shards " << incomplete_shards.size();
return incomplete_shards;
}

Expand Down

0 comments on commit 8fe00de

Please sign in to comment.