<use_staged_search>true</use_staged_search>
<!--
+ For staged search, we estimate hits based on inclusion or exclusion.
+
+ Valid settings:
+ inclusion - visible ratio on superpage
+ exclusion - excluded ratio on superpage
+ delete_adjusted_inclusion - included ratio on superpage, ratio adjusted by deleted count
+ delete_adjusted_exclusion - excluded ratio on superpage, ratio adjusted by deleted count
+
+ Under normal circumstances, inclusion is the best strategy, and both delete_adjusted variants
+ will return the same value +/- 1. The exclusion strategy is the original, and works well
+ when there are few deleted or excluded records, in other words, when the superpage is not
+ sparsely populated with visible records.
+ -->
+ <estimation_strategy>inclusion</estimation_strategy>
+
+ <!--
Evergreen uses a cover density algorithm for calculating relative ranking of matches. There
are several tuning parameters and options available. By default, no document length normalization
is applied. From the Postgres documentation on ts_rank_cd() (the function used by Evergreen):
-->
<default_preferred_language_weight>5</default_preferred_language_weight>
- <!-- How many search results to return. Defaults to superpage_size * max_superpages, if they are defined and it isn't. -->
- <max_search_results>10000</max_search_results>
+ <!-- Baseline number of records to check for hit estimation. -->
+ <superpage_size>1000</superpage_size>
+
+ <!-- How many superpages to consider for searching overall. -->
+ <max_superpages>10</max_superpages>
<!-- zip code database file -->
<!--<zips_file>LOCALSTATEDIR/data/zips.txt</zips_file>-->
my $cache;
my $cache_timeout;
-my $max_search_results;
+my $superpage_size;
+my $max_superpages;
sub initialize {
$cache = OpenSRF::Utils::Cache->new('global');
$cache_timeout = $sclient->config_value(
"apps", "open-ils.search", "app_settings", "cache_timeout" ) || 300;
- my $superpage_size = $sclient->config_value(
+ $superpage_size = $sclient->config_value(
"apps", "open-ils.search", "app_settings", "superpage_size" ) || 500;
- my $max_superpages = $sclient->config_value(
+ $max_superpages = $sclient->config_value(
"apps", "open-ils.search", "app_settings", "max_superpages" ) || 20;
- $max_search_results = $sclient->config_value(
- "apps", "open-ils.search", "app_settings", "max_search_results" ) || ($superpage_size * $max_superpages);
-
$logger->info("Search cache timeout is $cache_timeout, ".
- " max_search_results is $max_search_results");
+ " superpage_size is $superpage_size, max_superpages is $max_superpages");
}
$user_offset = ($user_offset >= 0) ? $user_offset : 0;
$user_limit = ($user_limit >= 0) ? $user_limit : 10;
- # restrict DB query to our max results
- $search_hash->{core_limit} = $max_search_results;
+
+ # we're grabbing results on a per-superpage basis, which means the
+ # limit and offset should coincide with superpage boundaries
+ $search_hash->{offset} = 0;
+ $search_hash->{limit} = $superpage_size;
+
+ # force a well-known check_limit
+ $search_hash->{check_limit} = $superpage_size;
+ # restrict total tested to superpage size * number of superpages
+ $search_hash->{core_limit} = $superpage_size * $max_superpages;
+
+ # Set the configured estimation strategy, defaults to 'inclusion'.
+ my $estimation_strategy = OpenSRF::Utils::SettingsClient
+ ->new
+ ->config_value(
+ apps => 'open-ils.search', app_settings => 'estimation_strategy'
+ ) || 'inclusion';
+ $search_hash->{estimation_strategy} = $estimation_strategy;
# pull any existing results from the cache
my $key = search_cache_key($method, $search_hash);
# keep retrieving results until we find enough to
# fulfill the user-specified limit and offset
my $all_results = [];
-
- my $results;
- my $summary;
+ my $page; # current superpage
+ my $est_hit_count = 0;
+ my $current_page_summary = {};
+ my $global_summary = {checked => 0, visible => 0, excluded => 0, deleted => 0, total => 0};
+ my $is_real_hit_count = 0;
my $new_ids = [];
- if($cache_data->{summary}) {
- # this window of results is already cached
- $logger->debug("staged search: found cached results");
- $summary = $cache_data->{summary};
- $results = $cache_data->{results};
+ for($page = 0; $page < $max_superpages; $page++) {
- } else {
- # retrieve the window of results from the database
- $logger->debug("staged search: fetching results from the database");
- my $start = time;
- $results = $U->storagereq($method, %$search_hash);
- $search_duration = time - $start;
- $summary = shift(@$results) if $results;
-
- unless($summary) {
- $logger->info("search timed out: duration=$search_duration: params=".
- OpenSRF::Utils::JSON->perl2JSON($search_hash));
- return {count => 0};
- }
+ my $data = $cache_data->{$page};
+ my $results;
+ my $summary;
- $logger->info("staged search: DB call took $search_duration seconds and returned ".scalar(@$results)." rows, including summary");
+ $logger->debug("staged search: analyzing superpage $page");
- my $hc = $summary->{visible};
- if($hc == 0) {
- $logger->info("search returned 0 results: duration=$search_duration: params=".
- OpenSRF::Utils::JSON->perl2JSON($search_hash));
- }
+ if($data) {
+ # this window of results is already cached
+ $logger->debug("staged search: found cached results");
+ $summary = $data->{summary};
+ $results = $data->{results};
- # Create backwards-compatible result structures
- if($IAmMetabib) {
- $results = [map {[$_->{id}, $_->{rel}, $_->{record}]} @$results];
} else {
- $results = [map {[$_->{id}]} @$results];
+ # retrieve the window of results from the database
+ $logger->debug("staged search: fetching results from the database");
+ $search_hash->{skip_check} = $page * $superpage_size;
+ my $start = time;
+ $results = $U->storagereq($method, %$search_hash);
+ $search_duration = time - $start;
+ $summary = shift(@$results) if $results;
+
+ unless($summary) {
+ $logger->info("search timed out: duration=$search_duration: params=".
+ OpenSRF::Utils::JSON->perl2JSON($search_hash));
+ return {count => 0};
+ }
+
+ $logger->info("staged search: DB call took $search_duration seconds and returned ".scalar(@$results)." rows, including summary");
+
+ my $hc = $summary->{estimated_hit_count} || $summary->{visible};
+ if($hc == 0) {
+ $logger->info("search returned 0 results: duration=$search_duration: params=".
+ OpenSRF::Utils::JSON->perl2JSON($search_hash));
+ }
+
+ # Create backwards-compatible result structures
+ if($IAmMetabib) {
+ $results = [map {[$_->{id}, $_->{rel}, $_->{record}]} @$results];
+ } else {
+ $results = [map {[$_->{id}]} @$results];
+ }
+
+ push @$new_ids, grep {defined($_)} map {$_->[0]} @$results;
+ $results = [grep {defined $_->[0]} @$results];
+ cache_staged_search_page($key, $page, $summary, $results) if $docache;
}
- push @$new_ids, grep {defined($_)} map {$_->[0]} @$results;
- $results = [grep {defined $_->[0]} @$results];
- cache_staged_search($key, $summary, $results) if $docache;
- }
+ tag_circulated_records($search_hash->{authtoken}, $results, $IAmMetabib)
+ if $search_hash->{tag_circulated_records} and $search_hash->{authtoken};
+
+ $current_page_summary = $summary;
+
+ # add the new set of results to the set under construction
+ push(@$all_results, @$results);
+
+ my $current_count = scalar(@$all_results);
- tag_circulated_records($search_hash->{authtoken}, $results, $IAmMetabib)
- if $search_hash->{tag_circulated_records} and $search_hash->{authtoken};
+ $est_hit_count = $summary->{estimated_hit_count} || $summary->{visible}
+ if $page == 0;
+
+ $logger->debug("staged search: located $current_count, with estimated hits=".
+ $summary->{estimated_hit_count}." : visible=".$summary->{visible}.", checked=".$summary->{checked});
+
+ if (defined($summary->{estimated_hit_count})) {
+ foreach (qw/ checked visible excluded deleted /) {
+ $global_summary->{$_} += $summary->{$_};
+ }
+ $global_summary->{total} = $summary->{total};
+ }
- # add the new set of results to the set under construction
- push(@$all_results, @$results);
+ # we've found all the possible hits
+ last if $current_count == $summary->{visible}
+ and not defined $summary->{estimated_hit_count};
- my $current_count = scalar(@$all_results);
+ # we've found enough results to satisfy the requested limit/offset
+ last if $current_count >= ($user_limit + $user_offset);
- $logger->debug("staged search: located $current_count, visible=".$summary->{visible});
+ # we've scanned all possible hits
+ if($summary->{checked} < $superpage_size) {
+ $est_hit_count = scalar(@$all_results);
+ # we have all possible results in hand, so we know the final hit count
+ $is_real_hit_count = 1;
+ last;
+ }
+ }
my @results = grep {defined $_} @$all_results[$user_offset..($user_offset + $user_limit - 1)];
+ # refine the estimate if we have more than one superpage
+ if ($page > 0 and not $is_real_hit_count) {
+ if ($global_summary->{checked} >= $global_summary->{total}) {
+ $est_hit_count = $global_summary->{visible};
+ } else {
+ my $updated_hit_count = $U->storagereq(
+ 'open-ils.storage.fts_paging_estimate',
+ $global_summary->{checked},
+ $global_summary->{visible},
+ $global_summary->{excluded},
+ $global_summary->{deleted},
+ $global_summary->{total}
+ );
+ $est_hit_count = $updated_hit_count->{$estimation_strategy};
+ }
+ }
+
$conn->respond_complete(
{
- count => $summary->{visible},
+ count => $est_hit_count,
core_limit => $search_hash->{core_limit},
+ superpage_size => $search_hash->{check_limit},
+ superpage_summary => $current_page_summary,
facet_key => $facet_key,
ids => \@results
}
$cache->put_cache($key, $data, $cache_timeout);
}
-sub cache_staged_search {
+sub cache_staged_search_page {
# puts this set of results into the cache
- my($key, $summary, $results) = @_;
- my $data = {
+ my($key, $page, $summary, $results) = @_;
+ my $data = $cache->get_cache($key);
+ $data ||= {};
+ $data->{$page} = {
summary => $summary,
results => $results
};
- $logger->info("staged search: cached with key=$key, visible=".$summary->{visible});
+ $logger->info("staged search: cached with key=$key, superpage=$page, estimated=".
+ $summary->{estimated_hit_count}.", visible=".$summary->{visible});
$cache->put_cache($key, $data, $cache_timeout);
}
);
}
-sub location_groups_callback {
- my ($invocant, $self, $struct, $filter, $params, $negate) = @_;
-
- return sprintf(' %slocations(%s)',
- $negate ? '-' : '',
- join(
- ',',
- map {
- $_->location
- } @{
- OpenILS::Utils::CStoreEditor
- ->new
- ->search_asset_copy_location_group_map({ lgroup => $params })
- }
- )
- );
-}
-
sub format_callback {
my ($invocant, $self, $struct, $filter, $params, $negate) = @_;
use OpenSRF::Utils qw/:datetime/;
use Data::Dumper;
use OpenILS::Application::AppUtils;
-use OpenILS::Utils::CStoreEditor;
my $apputils = "OpenILS::Application::AppUtils";
-my $editor = OpenILS::Utils::CStoreEditor->new;
+
sub toSQL {
my $self = shift;
$filters{$col} = $filter->args->[0];
}
}
- $self->new_filter( statuses => [0,7,12] ) if ($self->find_modifier('available'));
$self->QueryParser->superpage($filters{superpage}) if ($filters{superpage});
$self->QueryParser->superpage_size($filters{superpage_size}) if ($filters{superpage_size});
}
$rel = "1.0/($rel)::NUMERIC";
+ my $mra_join = 'INNER JOIN metabib.record_attr mrd ON m.source = mrd.id';
+
my $rank = $rel;
my $desc = 'ASC';
if ($flat_where ne '') {
$flat_where = "AND (\n" . ${spc} x 5 . $flat_where . "\n" . ${spc} x 4 . ")";
}
+ my $with = $$flat_plan{with};
+ $with= "\nWITH $with" if $with;
- my $site = $self->find_filter('site');
- if ($site && $site->args) {
- $site = $site->args->[0];
- if ($site && $site !~ /^(-)?\d+$/) {
- my $search = $editor->search_actor_org_unit({ shortname => $site });
- $site = @$search[0]->id if($search && @$search);
- $site = undef unless ($search);
- }
- } else {
- $site = undef;
- }
- my $lasso = $self->find_filter('lasso');
- if ($lasso && $lasso->args) {
- $lasso = $lasso->args->[0];
- if ($lasso && $lasso !~ /^\d+$/) {
- my $search = $editor->search_actor_org_lasso({ name => $lasso });
- $lasso = @$search[0]->id if($search && @$search);
- $lasso = undef unless ($search);
- }
- } else {
- $lasso = undef;
- }
- my $depth = $self->find_filter('depth');
- if ($depth && $depth->args) {
- $depth = $depth->args->[0];
- if ($depth && $depth !~ /^\d+$/) {
- # This *is* what metabib.pm has been doing....but it makes no sense to me. :/
- # Should this be looking up the depth of the OU type on the OU in question?
- my $search = $editor->search_actor_org_unit([{ name => $depth },{ opac_label => $depth }]);
- $depth = @$search[0]->id if($search && @$search);
- $depth = undef unless($search);
- }
- } else {
- $depth = undef;
- }
- my $pref_ou = $self->find_filter('pref_ou');
- if ($pref_ou && $pref_ou->args) {
- $pref_ou = $pref_ou->args->[0];
- if ($pref_ou && $pref_ou !~ /^(-)?\d+$/) {
- my $search = $editor->search_actor_org_unit({ shortname => $pref_ou });
- $pref_ou = @$search[0]->id if($search && @$search);
- $pref_ou = undef unless ($search);
- }
- } else {
- $pref_ou = undef;
- }
-
- # Supposedly at some point a site of 0 and a depth will equal user lasso id.
- # We need OU buckets before that happens. 'my_lasso' is, I believe, the target filter for it.
-
- $site = -$lasso if ($lasso);
-
- # Default to the top of the org tree if we have nothing else. This would need tweaking for the user lasso bit.
- if (!$site) {
- my $search = $editor->search_actor_org_unit({ parent_ou => undef });
- $site = @$search[0]->id if ($search);
- }
-
- my $depth_check = '';
- $depth_check = ", $depth" if ($depth);
-
- my $with = '';
- $with .= " search_org_list AS (\n";
- if ($site < 0) {
- # Lasso!
- $lasso = -$site;
- $with .= " SELECT DISTINCT org_unit from actor.org_lasso_map WHERE lasso = $lasso\n";
- } elsif ($site > 0) {
- $with .= " SELECT DISTINCT id FROM actor.org_unit_descendants($site$depth_check)\n";
- } else {
- # Placeholder for user lasso stuff.
- }
- $with .= " ),\n";
- $with .= " luri_org_list AS (\n";
- if ($site < 0) {
- # We can re-use the lasso var, we already updated it above.
- $with .= " SELECT DISTINCT (actor.org_unit_ancestors(org_unit)).id from actor.org_lasso_map WHERE lasso = $lasso\n";
- } elsif ($site > 0) {
- $with .= " SELECT DISTINCT id FROM actor.org_unit_ancestors($site)\n";
- } else {
- # Placeholder for user lasso stuff.
- }
- if ($pref_ou) {
- $with .= " UNION\n";
- $with .= " SELECT DISTINCT id FROM actor.org_unit_ancestors($pref_ou)\n";
- }
- $with .= " )";
- $with .= ",\n " . $$flat_plan{with} if ($$flat_plan{with});
-
- # Limit stuff
- my $limit_where = <<" SQL";
--- Filter records based on visibility
- AND NOT bre.deleted
- AND (
- cbs.transcendant IS TRUE
- OR
- SQL
-
- if ($self->find_modifier('deleted')) {
- $limit_where = <<" SQL";
- AND bre.deleted
- SQL
- } elsif ($self->find_modifier('staff')) {
- $limit_where .= <<" SQL";
- EXISTS(
- SELECT 1 FROM asset.call_number cn
- JOIN asset.copy cp ON (cp.call_number = cn.id)
- WHERE NOT cn.deleted
- AND NOT cp.deleted
- AND cp.circ_lib IN ( SELECT * FROM search_org_list )
- AND cn.record = m.source
- LIMIT 1
- )
- OR
- EXISTS(
- SELECT 1 FROM biblio.peer_bib_copy_map pr
- JOIN asset.copy cp ON (cp.id = pr.target_copy)
- WHERE NOT cp.deleted
- AND cp.circ_lib IN ( SELECT * FROM search_org_list )
- AND pr.peer_record = m.source
- LIMIT 1
- )
- OR (
- NOT EXISTS(
- SELECT 1 FROM asset.call_number cn
- JOIN asset.copy cp ON (cp.call_number = cn.id)
- WHERE cn.record = m.source
- AND NOT cp.deleted
- LIMIT 1
- )
- AND
- NOT EXISTS(
- SELECT 1 FROM biblio.peer_bib_copy_map pr
- JOIN asset.copy cp ON (cp.id = pr.target_copy)
- WHERE NOT cp.deleted
- AND pr.peer_record = m.source
- LIMIT 1
- )
- AND
- NOT EXISTS(
- SELECT 1 FROM asset.call_number acn
- JOIN asset.uri_call_number_map aucnm ON acn.id = aucnm.call_number
- JOIN asset.uri uri ON aucnm.uri = uri.id
- WHERE NOT acn.deleted
- AND uri.active
- AND acn.record = m.source
- LIMIT 1
- )
- )
- OR
- EXISTS(
- SELECT 1 FROM asset.call_number acn
- JOIN asset.uri_call_number_map aucnm ON acn.id = aucnm.call_number
- JOIN asset.uri uri ON aucnm.uri = uri.id
- WHERE NOT acn.deleted AND uri.active AND acn.record = m.source AND acn.owning_lib IN (
- SELECT * FROM luri_org_list
- )
- LIMIT 1
- )
- )
- SQL
- } else {
- $limit_where .= <<" SQL";
- EXISTS(
- SELECT 1 FROM asset.opac_visible_copies
- WHERE circ_lib IN ( SELECT * FROM search_org_list )
- AND record = m.source
- LIMIT 1
- )
- OR
- EXISTS(
- SELECT 1 FROM biblio.peer_bib_copy_map pr
- JOIN asset.opac_visible_copies cp ON (cp.copy_id = pr.target_copy)
- WHERE cp.circ_lib IN ( SELECT * FROM search_org_list )
- AND pr.peer_record = m.source
- LIMIT 1
- )
- OR
- EXISTS(
- SELECT 1 FROM asset.call_number acn
- JOIN asset.uri_call_number_map aucnm ON acn.id = aucnm.call_number
- JOIN asset.uri uri ON aucnm.uri = uri.id
- WHERE NOT acn.deleted AND uri.active AND acn.record = m.source AND acn.owning_lib IN (
- SELECT * FROM luri_org_list
- )
- LIMIT 1
- )
- )
- SQL
- }
- # For single records we want the record id
- # For metarecords we want NULL or the only record ID.
- my $agg_record = 'm.source AS record';
+ # Need an array for query parser db function; this gives a better plan
+ # than the ARRAY_AGG(DISTINCT m.source) option as of PostgreSQL 9.1
+ my $agg_records = 'ARRAY[m.source] AS records';
if ($key =~ /metarecord/) {
- $agg_record = 'CASE WHEN COUNT(DISTINCT m.source) = 1 THEN FIRST(m.source) ELSE NULL END AS record';
+ # metarecord searches still require the ARRAY_AGG approach
+ $agg_records = 'ARRAY_AGG(DISTINCT m.source) AS records';
}
my $sql = <<SQL;
-WITH
$with
SELECT $key AS id,
- $agg_record,
+ $agg_records,
$rel AS rel,
$rank AS rank,
FIRST(mrd.attrs->'date1') AS tie_break
FROM metabib.metarecord_source_map m
$$flat_plan{from}
- INNER JOIN metabib.record_attr mrd ON m.source = mrd.id
- INNER JOIN biblio.record_entry bre ON m.source = bre.id
- LEFT JOIN config.bib_source cbs ON bre.source = cbs.id
+ $mra_join
WHERE 1=1
$flat_where
- $limit_where
GROUP BY 1
ORDER BY 4 $desc $nullpos, 5 DESC $nullpos, 3 DESC
LIMIT $core_limit
. ${spc} x 2 ."AND ${talias}.field IN (". join(',', @field_ids) . ")\n"
. "${spc})";
- if ($join_type ne 'INNER') {
+ if ($join_type != 'INNER') {
my $NOT = $node->negate ? '' : ' NOT';
$where .= "${talias}.id IS$NOT NULL";
} elsif ($where ne '') {
$where .= "bre.$datefilter BETWEEN \$_$$\$$cstart\$_$$\$ AND \$_$$\$$cend\$_$$\$";
}
}
- } elsif ($filter->name eq 'locations') {
- if (@{$filter->args} > 0) {
- my $spcdepth = $self->plan_level + 5;
- $where .= $joiner if $where ne '';
- $where .= "(\n"
- . ${spc} x ($spcdepth + 1) . "${NOT}EXISTS(\n"
- . ${spc} x ($spcdepth + 2) . "SELECT 1 FROM asset.call_number acn\n"
- . ${spc} x ($spcdepth + 5) . "JOIN asset.copy acp ON acn.id = acp.call_number\n"
- . ${spc} x ($spcdepth + 2) . "WHERE m.source = acn.record\n"
- . ${spc} x ($spcdepth + 5) . "AND acp.circ_lib IN (SELECT * FROM search_org_list)\n"
- . ${spc} x ($spcdepth + 5) . "AND NOT acn.deleted\n"
- . ${spc} x ($spcdepth + 5) . "AND NOT acp.deleted\n"
- . ${spc} x ($spcdepth + 5) . "AND acp.location IN (" . join(',', map { $self->QueryParser->quote_value($_) } @{ $filter->args }) . ")\n"
- . ${spc} x ($spcdepth + 2) . "LIMIT 1\n"
- . ${spc} x ($spcdepth + 1) . ")\n"
- . ${spc} x ($spcdepth + 1) . ($filter->negate ? 'AND' : 'OR') . "\n"
- . ${spc} x ($spcdepth + 1) . "${NOT}EXISTS(\n"
- . ${spc} x ($spcdepth + 2) . "SELECT 1 FROM biblio.peer_bib_copy_map pr\n"
- . ${spc} x ($spcdepth + 5) . "JOIN asset.copy acp ON pr.target_copy = acp.id\n"
- . ${spc} x ($spcdepth + 2) . "WHERE m.source = pr.peer_record\n"
- . ${spc} x ($spcdepth + 5) . "AND acp.circ_lib IN (SELECT * FROM search_org_list)\n"
- . ${spc} x ($spcdepth + 5) . "AND NOT acp.deleted\n"
- . ${spc} x ($spcdepth + 5) . "AND acp.location IN (" . join(',', map { $self->QueryParser->quote_value($_) } @{ $filter->args }) . ")\n"
- . ${spc} x ($spcdepth + 2) . "LIMIT 1\n"
- . ${spc} x ($spcdepth + 1) . ")\n"
- . ${spc} x $spcdepth . ")";
- }
- } elsif ($filter->name eq 'statuses') {
- if (@{$filter->args} > 0) {
- my $spcdepth = $self->plan_level + 5;
- $where .= $joiner if $where ne '';
- $where .= "(\n"
- . ${spc} x ($spcdepth + 1) . "${NOT}EXISTS(\n"
- . ${spc} x ($spcdepth + 2) . "SELECT 1 FROM asset.call_number acn\n"
- . ${spc} x ($spcdepth + 5) . "JOIN asset.copy acp ON acn.id = acp.call_number\n"
- . ${spc} x ($spcdepth + 2) . "WHERE m.source = acn.record\n"
- . ${spc} x ($spcdepth + 5) . "AND acp.circ_lib IN (SELECT * FROM search_org_list)\n"
- . ${spc} x ($spcdepth + 5) . "AND NOT acn.deleted\n"
- . ${spc} x ($spcdepth + 5) . "AND NOT acp.deleted\n"
- . ${spc} x ($spcdepth + 5) . "AND acp.status IN (" . join(',', map { $self->QueryParser->quote_value($_) } @{ $filter->args }) . ")\n"
- . ${spc} x ($spcdepth + 2) . "LIMIT 1\n"
- . ${spc} x ($spcdepth + 1) . ")\n"
- . ${spc} x ($spcdepth + 1) . ($filter->negate ? 'AND' : 'OR') . "\n"
- . ${spc} x ($spcdepth + 1) . "${NOT}EXISTS(\n"
- . ${spc} x ($spcdepth + 2) . "SELECT 1 FROM biblio.peer_bib_copy_map pr\n"
- . ${spc} x ($spcdepth + 5) . "JOIN asset.copy acp ON pr.target_copy = acp.id\n"
- . ${spc} x ($spcdepth + 2) . "WHERE m.source = pr.peer_record\n"
- . ${spc} x ($spcdepth + 5) . "AND acp.circ_lib IN (SELECT * FROM search_org_list)\n"
- . ${spc} x ($spcdepth + 5) . "AND NOT acp.deleted\n"
- . ${spc} x ($spcdepth + 5) . "AND acp.status IN (" . join(',', map { $self->QueryParser->quote_value($_) } @{ $filter->args }) . ")\n"
- . ${spc} x ($spcdepth + 2) . "LIMIT 1\n"
- . ${spc} x ($spcdepth + 1) . ")\n"
- . ${spc} x $spcdepth . ")";
- }
} elsif ($filter->name eq 'bib_source') {
if (@{$filter->args} > 0) {
$where .= $joiner if $where ne '';
# parse the query and supply any query-level %arg-based defaults
- # we expect, and make use of, query, debug and core_limit args
+ # we expect, and make use of, query, superpage, superpage_size, debug and core_limit args
my $query = $parser->new( %args )->parse;
my $config = OpenSRF::Utils::SettingsClient->new();
}
}
- my $sth = metabib::metarecord_source_map->db_Main->prepare($query->parse_tree->toSQL);
+ # gather the site, if one is specified, defaulting to the in-query version
+ my $ou = $args{org_unit};
+ if (my ($filter) = $query->parse_tree->find_filter('site')) {
+ $ou = $filter->args->[0] if (@{$filter->args});
+ }
+ $ou = actor::org_unit->search( { shortname => $ou } )->next->id if ($ou and $ou !~ /^(-)?\d+$/);
+
+ # gather lasso, as with $ou
+ my $lasso = $args{lasso};
+ if (my ($filter) = $query->parse_tree->find_filter('lasso')) {
+ $lasso = $filter->args->[0] if (@{$filter->args});
+ }
+ $lasso = actor::org_lasso->search( { name => $lasso } )->next->id if ($lasso and $lasso !~ /^\d+$/);
+ $lasso = -$lasso if ($lasso);
+
+
+# # XXX once we have org_unit containers, we can make user-defined lassos .. WHEEE
+# # gather user lasso, as with $ou and lasso
+# my $mylasso = $args{my_lasso};
+# if (my ($filter) = $query->parse_tree->find_filter('my_lasso')) {
+# $mylasso = $filter->args->[0] if (@{$filter->args});
+# }
+# $mylasso = actor::org_unit->search( { name => $mylasso } )->next->id if ($mylasso and $mylasso !~ /^\d+$/);
+
+
+ # if we have a lasso, go with that, otherwise ... ou
+ $ou = $lasso if ($lasso);
+
+ # gather the preferred OU, if one is specified, as with $ou
+ my $pref_ou = $args{pref_ou};
+ $log->info("pref_ou = $pref_ou");
+ if (my ($filter) = $query->parse_tree->find_filter('pref_ou')) {
+ $pref_ou = $filter->args->[0] if (@{$filter->args});
+ }
+ $pref_ou = actor::org_unit->search( { shortname => $pref_ou } )->next->id if ($pref_ou and $pref_ou !~ /^(-)?\d+$/);
+
+ # get the default $ou if we have nothing
+ $ou = actor::org_unit->search( { parent_ou => undef } )->next->id if (!$ou and !$lasso and !$mylasso);
+
+
+ # XXX when user lassos are here, check to make sure we don't have one -- it'll be passed in the depth, with an ou of 0
+ # gather the depth, if one is specified, defaulting to the in-query version
+ my $depth = $args{depth};
+ if (my ($filter) = $query->parse_tree->find_filter('depth')) {
+ $depth = $filter->args->[0] if (@{$filter->args});
+ }
+ $depth = actor::org_unit->search_where( [{ name => $depth },{ opac_label => $depth }], {limit => 1} )->next->id if ($depth and $depth !~ /^\d+$/);
+
+
+ # gather the limit or default to 10
+ my $limit = $args{check_limit} || 'NULL';
+ if (my ($filter) = $query->parse_tree->find_filter('limit')) {
+ $limit = $filter->args->[0] if (@{$filter->args});
+ }
+ if (my ($filter) = $query->parse_tree->find_filter('check_limit')) {
+ $limit = $filter->args->[0] if (@{$filter->args});
+ }
+
+
+ # gather the offset or default to 0
+ my $offset = $args{skip_check} || $args{offset} || 0;
+ if (my ($filter) = $query->parse_tree->find_filter('offset')) {
+ $offset = $filter->args->[0] if (@{$filter->args});
+ }
+ if (my ($filter) = $query->parse_tree->find_filter('skip_check')) {
+ $offset = $filter->args->[0] if (@{$filter->args});
+ }
+
+
+ # gather the estimation strategy or default to inclusion
+ my $estimation_strategy = $args{estimation_strategy} || 'inclusion';
+ if (my ($filter) = $query->parse_tree->find_filter('estimation_strategy')) {
+ $estimation_strategy = $filter->args->[0] if (@{$filter->args});
+ }
+
+
+ # gather the estimation strategy or default to inclusion
+ my $core_limit = $args{core_limit};
+ if (my ($filter) = $query->parse_tree->find_filter('core_limit')) {
+ $core_limit = $filter->args->[0] if (@{$filter->args});
+ }
+
+
+ # gather statuses, and then forget those if we have an #available modifier
+ my @statuses;
+ if (my ($filter) = $query->parse_tree->find_filter('statuses')) {
+ @statuses = @{$filter->args} if (@{$filter->args});
+ }
+ @statuses = (0,7,12) if ($query->parse_tree->find_modifier('available'));
+
+
+ # gather locations
+ my @location;
+ if (my ($filter) = $query->parse_tree->find_filter('locations')) {
+ @location = @{$filter->args} if (@{$filter->args});
+ }
+
+ # gather location_groups
+ if (my ($filter) = $query->parse_tree->find_filter('location_groups')) {
+ my @loc_groups = @{$filter->args} if (@{$filter->args});
+
+ # collect the mapped locations and add them to the locations() filter
+ if (@loc_groups) {
+
+ my $cstore = OpenSRF::AppSession->create( 'open-ils.cstore' );
+ my $maps = $cstore->request(
+ 'open-ils.cstore.direct.asset.copy_location_group_map.search.atomic',
+ {lgroup => \@loc_groups})->gather(1);
+
+ push(@location, $_->location) for @$maps;
+ }
+ }
+
+
+ my $param_check = $limit || $query->superpage_size || 'NULL';
+ my $param_offset = $offset || 'NULL';
+ my $param_limit = $core_limit || 'NULL';
+
+ my $sp = $query->superpage || 1;
+ if ($sp > 1) {
+ $param_offset = ($sp - 1) * $sp_size;
+ }
+
+ my $param_search_ou = $ou;
+ my $param_depth = $depth; $param_depth = 'NULL' unless (defined($depth) and length($depth) > 0 );
+ my $param_core_query = "\$core_query_$$\$" . $query->parse_tree->toSQL . "\$core_query_$$\$";
+ my $param_statuses = '$${' . join(',', map { s/\$//go; "\"$_\""} @statuses) . '}$$';
+ my $param_locations = '$${' . join(',', map { s/\$//go; "\"$_\""} @location) . '}$$';
+ my $staff = ($self->api_name =~ /staff/ or $query->parse_tree->find_modifier('staff')) ? "'t'" : "'f'";
+ my $metarecord = ($self->api_name =~ /metabib/ or $query->parse_tree->find_modifier('metabib') or $query->parse_tree->find_modifier('metarecord')) ? "'t'" : "'f'";
+ my $param_pref_ou = $pref_ou || 'NULL';
+
+ my $sth = metabib::metarecord_source_map->db_Main->prepare(<<" SQL");
+ SELECT * -- bib search: $args{query}
+ FROM search.query_parser_fts(
+ $param_search_ou\:\:INT,
+ $param_depth\:\:INT,
+ $param_core_query\:\:TEXT,
+ $param_statuses\:\:INT[],
+ $param_locations\:\:INT[],
+ $param_offset\:\:INT,
+ $param_check\:\:INT,
+ $param_limit\:\:INT,
+ $metarecord\:\:BOOL,
+ $staff\:\:BOOL,
+ $param_pref_ou\:\:INT
+ );
+ SQL
+
$sth->execute;
my $recs = $sth->fetchall_arrayref({});
- $log->debug("Search yielded ".scalar(@$recs)." checked, visible results.",DEBUG);
+ my $summary_row = pop @$recs;
- $client->respond({visible => scalar(@$recs)});
+ my $total = $$summary_row{total};
+ my $checked = $$summary_row{checked};
+ my $visible = $$summary_row{visible};
+ my $deleted = $$summary_row{deleted};
+ my $excluded = $$summary_row{excluded};
+
+ my $estimate = $visible;
+ if ( $total > $checked && $checked ) {
+
+ $$summary_row{hit_estimate} = FTS_paging_estimate($self, $client, $checked, $visible, $excluded, $deleted, $total);
+ $estimate = $$summary_row{estimated_hit_count} = $$summary_row{hit_estimate}{$estimation_strategy};
+
+ }
+
+ delete $$summary_row{id};
+ delete $$summary_row{rel};
+ delete $$summary_row{record};
+
+ if (defined($simple_plan)) {
+ $$summary_row{complex_query} = $simple_plan ? 0 : 1;
+ } else {
+ $$summary_row{complex_query} = $query->simple_plan ? 0 : 1;
+ }
+
+ $client->respond( $summary_row );
+
+ $log->debug("Search yielded ".scalar(@$recs)." checked, visible results with an approximate visible total of $estimate.",DEBUG);
for my $rec (@$recs) {
+ delete $$rec{checked};
+ delete $$rec{visible};
+ delete $$rec{excluded};
+ delete $$rec{deleted};
+ delete $$rec{total};
$$rec{rel} = sprintf('%0.3f',$$rec{rel});
$client->respond( $rec );