JBAS-2098 Migrate browse data back to stock tables
authorBill Erickson <berickxx@gmail.com>
Thu, 30 Aug 2018 21:34:45 +0000 (17:34 -0400)
committerBill Erickson <berickxx@gmail.com>
Thu, 21 Mar 2019 19:46:23 +0000 (15:46 -0400)
Move data from the various
metabib.browse_[title|author|subject|series|call_number]_entry_* tables into
the stock metabib.browse_entry_* tables.

The data migration step handles de-duplication and creation of entry,
def_map, and simple_heading_map rows.

UI code is updated to refer to the stock tables as well.

Some local additions added to improve stock browse speed and resolve
index-size issues (combo_sort_value field, metabib_fields_cache field,
and substr indexes).

Includes updates to the new headings report to reference the new tables,
including a materialized view to speed up the initial report collection.

Signed-off-by: Bill Erickson <berickxx@gmail.com>
19 files changed:
KCLS/openils/var/templates_kcls/opac/browse_results.tt2
KCLS/openils/var/templates_kcls/opac/parts/qtype_selector.tt2
KCLS/sql/schema/deploy/stock-browse-cleanup.sql [new file with mode: 0644]
KCLS/sql/schema/deploy/stock-browse-headings-report.sql [new file with mode: 0644]
KCLS/sql/schema/deploy/stock-browse-schema.sql [new file with mode: 0644]
KCLS/sql/schema/revert/stock-browse-cleanup.sql [new file with mode: 0644]
KCLS/sql/schema/revert/stock-browse-headings-report.sql [new file with mode: 0644]
KCLS/sql/schema/revert/stock-browse-schema.sql [new file with mode: 0644]
KCLS/sql/schema/sqitch.plan
KCLS/utility-scripts/CRONTAB
KCLS/utility-scripts/headings_report/refresh_mat_view.sh [new file with mode: 0755]
Open-ILS/examples/fm_IDL.xml
Open-ILS/src/perlmods/lib/OpenILS/Application/Search.pm
Open-ILS/src/perlmods/lib/OpenILS/Application/Storage/Driver/Pg/QueryParser.pm
Open-ILS/src/perlmods/lib/OpenILS/WWW/EGCatLoader/Browse.pm
Open-ILS/web/js/dojo/openils/CGI.js
Open-ILS/web/js/ui/default/cat/authority/new_headings.js
Open-ILS/web/js/ui/default/opac/browse_set_navigation.js
recover-stock-browse-db.sql [new file with mode: 0644]

index c06410a..026b530 100644 (file)
@@ -80,7 +80,7 @@
                         [% formatted_qtype = "Subject"; %]
                     [% CASE "series" %]
                         [% formatted_qtype = "Series Title"; %]
-                    [% CASE "id|bibcn" %]
+                    [% CASE "identifier" %]
                         [% formatted_qtype = "Bib Call Number"; %]
                     [% END %]
                     <div class="browse-pager">
                                         [% IF ctx.is_staff %] 
                                             <a class="staff-client-link" url="[% mkurl(
                                                 ctx.opac_root _ '/browse_items', {
-                                                    'fi:has_browse_entry' => (CGI.param('qtype') _ ',' _ result.browse_entry _ ',' _ result.fields)                                                                                                     
+                                                    'fi:has_browse_entry' => (result.browse_entry _ ',' _ result.fields)
                                                 }) %]" 
                                                 onclick = "new_tab( this )" 
                                                 >
                                         [% ELSE %]
                                             <a href="[% mkurl(
                                                 ctx.opac_root _ '/browse_items', {
-                                                    'fi:has_browse_entry' => (CGI.param('qtype') _ ',' _ result.browse_entry _ ',' _ result.fields)
+                                                    'fi:has_browse_entry' => (result.browse_entry _ ',' _ result.fields)
                                                 }) %]">
                                                 [% result.value %]
                                             </a>
             <div class="common-full-pad">
             </div>
             [% # Display a link to do a contains phrase search for the same bib call number browse search term. %]
-            [% IF current_qtype == "id|bibcn" %]
+            [% IF current_qtype == "identifier" %]
                 <div class="contains-phrase-container">
                     [% IF ctx.is_staff %]
-                        <a class="staff-client-link" url="/eg/opac/results?qtype=id|bibcn&query=[% CGI.param('bterm') %]&page=0&x=0&y=0&fi%3Amattype=&loc=1" onclick = "new_tab( this )">
+                        <a class="staff-client-link" url="/eg/opac/results?qtype=identifier&query=[% CGI.param('bterm') %]&page=0&x=0&y=0&fi%3Amattype=&loc=1" onclick = "new_tab( this )">
                             * Perform a Bib Call Number contains "[% CGI.param('bterm') %]" search.
                         </a>
                     [% ELSE %]
-                        <a href="results?qtype=id|bibcn&query=[% CGI.param('bterm') %]&page=0&x=0&y=0&fi%3Amattype=&loc=1">
+                        <a href="results?qtype=identifier&query=[% CGI.param('bterm') %]&page=0&x=0&y=0&fi%3Amattype=&loc=1">
                             * Perform a Bib Call Number contains <span class="bold">[% CGI.param('bterm') %]</span> search.
                         </a>
                      [% END %]
index 9fbb7b6..ca67299 100644 (file)
@@ -4,7 +4,9 @@
     {value => "author", label => l("Author"), plural_label => l("Authors (Last, First)"), browse => 1},
     {value => "subject", label => l("Subject"), plural_label => l("Subjects"), browse => 1},
     {value => "series", label => l("Series"), plural_label => l("Series Titles"), browse => 1},
-    {value => "id|bibcn", label => l("Bib Call Number"), plural_label => l("Bib Call Numbers"), browse => 1}
+    # "bibcn" is the only identifier browse field
+    {value => browse_only ? 'identifier' : "id|bibcn", 
+      label => l("Bib Call Number"), plural_label => l("Bib Call Numbers"), browse => 1}
     {value => "identifier", label => l("ISBN/ISSN/Other")}
     {value => "keyword|publisher", label => l("Publisher")}
 ] %]
diff --git a/KCLS/sql/schema/deploy/stock-browse-cleanup.sql b/KCLS/sql/schema/deploy/stock-browse-cleanup.sql
new file mode 100644 (file)
index 0000000..c7af1d1
--- /dev/null
@@ -0,0 +1,28 @@
+-- Deploy kcls-evergreen:stock-browse-cleanup to pg
+-- requires: stock-browse-schema
+
+BEGIN;
+
+-- Remove all of the legacy browse data
+
+DROP TABLE metabib.browse_title_entry_def_map;
+DROP TABLE metabib.browse_title_entry_simple_heading_map;
+DROP TABLE metabib.browse_title_entry;
+
+DROP TABLE metabib.browse_author_entry_def_map;
+DROP TABLE metabib.browse_author_entry_simple_heading_map;
+DROP TABLE metabib.browse_author_entry;
+
+DROP TABLE metabib.browse_subject_entry_def_map;
+DROP TABLE metabib.browse_subject_entry_simple_heading_map;
+DROP TABLE metabib.browse_subject_entry;
+
+DROP TABLE metabib.browse_series_entry_def_map;
+DROP TABLE metabib.browse_series_entry_simple_heading_map;
+DROP TABLE metabib.browse_series_entry;
+
+DROP TABLE metabib.browse_call_number_entry_def_map;
+DROP TABLE metabib.browse_call_number_entry;
+
+COMMIT;
+
diff --git a/KCLS/sql/schema/deploy/stock-browse-headings-report.sql b/KCLS/sql/schema/deploy/stock-browse-headings-report.sql
new file mode 100644 (file)
index 0000000..85527a2
--- /dev/null
@@ -0,0 +1,150 @@
+-- Deploy kcls-evergreen:stock-browse-headings-report to pg
+-- requires: stock-browse-schema
+
+BEGIN;
+
+SET STATEMENT_TIMEOUT = 0;
+
+-- Returns a browse entry summary object of the selected type directly
+-- preceeding or following the provided browse entry sort value.
+-- If 'is_next' is true, return the next entry, otherwise the preceeding.
+CREATE OR REPLACE FUNCTION
+    reporter.metabib_browse_entry_window_part
+    (browse_type TEXT, pivot_sort_value TEXT, is_next BOOLEAN)
+    RETURNS TABLE (
+        entry BIGINT,
+        entry_value TEXT,
+        entry_sort_value TEXT,
+        entry_create_date TIMESTAMPTZ,
+        bib_record BIGINT,
+        bib_editor INTEGER,
+        bib_edit_date TIMESTAMPTZ,
+        bib_create_date TIMESTAMPTZ,
+        bib_cataloging_date TIMESTAMPTZ,
+        field_def INTEGER,
+        field_label TEXT,
+        auth_tag CHARACTER(3)
+    ) AS $FUNK$
+DECLARE
+    pivot_where TEXT;
+    pivot_sort TEXT;
+BEGIN
+
+    pivot_where := '<';
+    pivot_sort := 'DESC';
+
+    IF is_next THEN
+        pivot_where := '>';
+        pivot_sort := 'ASC';
+    END IF;
+
+    RETURN QUERY EXECUTE $$
+        WITH target_entry AS (
+            SELECT * FROM metabib.browse_entry mbe
+            WHERE metabib_fields_cache && (
+                SELECT ARRAY_AGG(id) 
+                FROM config.metabib_field 
+                WHERE field_class = $$ || QUOTE_LITERAL(browse_type) || $$
+            )
+            AND mbe.combo_sort_value $$ || pivot_where || ' ' 
+                || QUOTE_LITERAL(pivot_sort_value) || $$
+            ORDER BY mbe.combo_sort_value $$ || pivot_sort || $$
+            LIMIT 1
+        )
+        SELECT 
+            entry.id, entry.value, entry.sort_value, entry.create_date,
+            bre.id, bre.editor, bre.edit_date, bre.create_date,
+            bre.cataloging_date, map.def, cmf.label, field.tag
+            FROM target_entry entry
+                LEFT JOIN metabib.browse_entry_def_map map
+                    ON (map.entry = entry.id)
+                LEFT JOIN biblio.record_entry bre
+                    ON (bre.id = map.source AND NOT bre.deleted)
+                LEFT JOIN config.metabib_field cmf ON (cmf.id = map.def)
+                LEFT JOIN metabib.browse_entry_simple_heading_map hmap
+                    ON (map.source IS NULL AND hmap.entry = entry.id)
+                LEFT JOIN authority.simple_heading ash ON
+                    (ash.id = hmap.simple_heading)
+                LEFT JOIN authority.control_set_authority_field field
+                    ON (field.id = ash.atag)
+    $$;
+END;
+$FUNK$ LANGUAGE PLPGSQL;
+
+
+DROP VIEW IF EXISTS reporter.cataloged_browse_entry;
+DROP VIEW IF EXISTS reporter.cataloged_browse_entry_combined;
+DROP FUNCTION IF EXISTS reporter.cataloged_browse_entries(TEXT);
+DROP MATERIALIZED VIEW IF EXISTS reporter.cataloged_browse_entries;
+
+SELECT CLOCK_TIMESTAMP(), 'Creating materialized browse entries view';
+
+CREATE MATERIALIZED VIEW reporter.materialized_cataloged_browse_entry AS
+    WITH cataloged_entries AS (
+        SELECT
+            entry.id AS entry,
+            entry.value AS entry_value,
+            entry.sort_value AS entry_sort_value,
+            entry.create_date AS entry_create_date,
+            map.source AS bib_record,
+            map.def AS field_def,
+            bre.cataloging_date AS bib_cataloging_date,
+            bre.editor AS bib_editor,
+            bre.edit_date AS bib_edit_date,
+            bre.create_date AS bib_create_date,
+            cmf.field_class AS browse_axis,
+            cmf.label AS field_label,
+            ccvm.code AS mattype,
+            GREATEST(bre.cataloging_date, entry.create_date) AS heading_date,
+            RANK() OVER (
+                PARTITION BY entry.id
+                ORDER BY
+                    bre.cataloging_date,
+                    map.id -- tie breaker
+            ) AS rank
+        FROM metabib.browse_entry entry
+            JOIN metabib.browse_entry_def_map map ON (map.entry = entry.id)
+            JOIN config.metabib_field cmf ON (cmf.id = map.def)
+            JOIN biblio.record_entry bre ON (bre.id = map.source)
+            JOIN metabib.record_attr_vector_list vec ON (vec.source = bre.id)
+            JOIN config.coded_value_map ccvm
+                ON (ccvm.ctype = 'mattype' AND ccvm.id = ANY (vec.vlist))
+        -- presence of a def_map means the bib is not deleted
+        WHERE bre.cataloging_date IS NOT NULL AND
+              cmf.field_class IN ('author', 'subject', 'series')
+    ) 
+    SELECT all_entries.*
+    FROM cataloged_entries all_entries
+    -- we only want the most relevent linked bib record
+    WHERE all_entries.rank = 1;
+
+DROP INDEX IF EXISTS reporter.rmcbe_heading_date_idx;
+CREATE INDEX rmcbe_heading_date_idx 
+    ON reporter.materialized_cataloged_browse_entry(heading_date);
+
+SELECT CLOCK_TIMESTAMP(), 'Done creating materialized browse entries view';
+
+CREATE OR REPLACE VIEW reporter.cataloged_browse_entry_detail AS
+SELECT entry.*,
+    usr.usrname AS bib_editor_usrname,
+    evergreen.marc_tag_to_string('1', bre.marc) AS bib_marc_1xx,
+    evergreen.marc_tag_to_string('245', bre.marc) AS bib_marc_245,
+    (reporter.metabib_browse_entry_window_prev(
+        entry.browse_axis, entry.entry_sort_value)).*,
+    (reporter.metabib_browse_entry_window_next(
+        entry.browse_axis, entry.entry_sort_value)).*
+FROM reporter.materialized_cataloged_browse_entry entry
+    JOIN biblio.record_entry bre ON (bre.id = entry.bib_record)
+    JOIN actor.usr usr ON (usr.id = bre.editor)
+ORDER BY 
+    CASE entry.browse_axis 
+        WHEN 'author' THEN 1 
+        WHEN 'subject' THEN 2 
+        ELSE 3 
+    END, 
+    entry.entry_sort_value
+;
+
+COMMIT;
+
+--ROLLBACK;
diff --git a/KCLS/sql/schema/deploy/stock-browse-schema.sql b/KCLS/sql/schema/deploy/stock-browse-schema.sql
new file mode 100644 (file)
index 0000000..d73c21f
--- /dev/null
@@ -0,0 +1,1032 @@
+-- Deploy kcls-evergreen:stock-browse-schema to pg
+-- requires: ecard-notice-validator
+
+-- TODO: matttype CCVM filter
+BEGIN;
+
+SET STATEMENT_TIMEOUT = 0;
+
+-- Support call_number browse by ensureing bibcn is the only 'identifier'
+-- browse field.
+UPDATE config.metabib_field SET browse_field = FALSE
+WHERE browse_field AND field_class = 'identifier' AND name <> 'bibcn';
+
+-- Add custom create_date column and update indexes
+-- FOR STOCK ADD combo_sort_value AND add index
+ALTER TABLE metabib.browse_entry
+    ADD COLUMN create_date 
+        TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
+    ADD COLUMN metabib_fields_cache INT[],
+    -- remove this column post-migration.
+    ALTER COLUMN truncated_sort_value DROP NOT NULL,
+    ADD COLUMN combo_sort_value TEXT,
+    DROP CONSTRAINT IF EXISTS browse_entry_sort_value_value_key;
+
+-- prevent dupes
+CREATE UNIQUE INDEX browse_entry_sort_value_value_key
+    ON metabib.browse_entry USING btree (DIGEST((value || sort_value), 'sha1'));
+
+-- KCLS has this index already
+-- drop now for speed, add back later.
+DROP INDEX IF EXISTS metabib.browse_entry_combo_sort_value_idx;
+
+-- substr the indexes to avoid insert failures on excessively long values
+DROP INDEX IF EXISTS metabib.browse_entry_sort_value_idx;
+CREATE INDEX browse_entry_sort_value_idx 
+    ON metabib.browse_entry (SUBSTR(sort_value, 1, 2048));
+
+DROP INDEX IF EXISTS metabib.browse_entry_value_idx;
+CREATE INDEX browse_entry_value_idx 
+    ON metabib.browse_entry (SUBSTR(value, 1, 2048));
+
+-- KCLS custom
+CREATE OR REPLACE FUNCTION
+    metabib.maintain_browse_metabib_fields_cache()
+    RETURNS TRIGGER AS
+$FUNK$
+DECLARE
+    entry_id BIGINT;
+BEGIN
+
+    SELECT INTO entry_id
+        CASE WHEN TG_OP = 'DELETE' THEN OLD.entry ELSE NEW.entry END;
+    
+    -- distinct list of config.metabib_field IDs which link 
+    -- to a given metabib.browse_entry via bib or auth maps.
+    UPDATE metabib.browse_entry
+    SET metabib_fields_cache = ARRAY(
+        SELECT DISTINCT(x.def_id) FROM (
+            SELECT mbedm.def AS def_id
+                FROM metabib.browse_entry_def_map mbedm
+                WHERE mbedm.entry = entry_id
+            UNION
+            SELECT map.metabib_field AS def_id
+                FROM metabib.browse_entry_simple_heading_map mbeshm
+                JOIN authority.simple_heading ash ON (mbeshm.simple_heading = ash.id)
+                JOIN authority.control_set_auth_field_metabib_field_map_refs map
+                    ON (ash.atag = map.authority_field)
+                WHERE mbeshm.entry = entry_id
+        )x
+    )
+    WHERE id = entry_id;
+
+    IF TG_OP = 'DELETE' THEN
+        -- remove browse entries that link to neither a bib nor auth field
+        PERFORM TRUE FROM metabib.browse_entry
+        WHERE id = entry_id AND ARRAY_LENGTH(metabib_fields_cache, 1) > 0;
+
+        IF NOT FOUND THEN
+            DELETE FROM metabib.browse_entry WHERE id = entry_id;
+        END IF;
+    END IF;
+
+    RETURN NEW;
+END;
+$FUNK$ LANGUAGE PLPGSQL;
+
+CREATE TRIGGER maintain_browse_metabib_fields_cache_bib_tgr 
+    AFTER INSERT OR DELETE ON metabib.browse_entry_def_map 
+    FOR EACH ROW EXECUTE PROCEDURE metabib.maintain_browse_metabib_fields_cache();
+
+CREATE TRIGGER maintain_browse_metabib_fields_cache_auth_tgr 
+    AFTER INSERT OR DELETE ON metabib.browse_entry_simple_heading_map 
+    FOR EACH ROW EXECUTE PROCEDURE metabib.maintain_browse_metabib_fields_cache();
+
+CREATE OR REPLACE FUNCTION metabib.browse_pivot(
+    search_field INT[], browse_term TEXT
+) RETURNS BIGINT AS $p$
+    SELECT id FROM metabib.browse_entry
+    WHERE metabib_fields_cache && $1
+    AND combo_sort_value >= public.naco_normalize($2)
+    ORDER BY combo_sort_value LIMIT 1;
+$p$ LANGUAGE SQL STABLE;
+
+-- Modify KCLS custom function.  Called from Browse.pm
+-- Returns the authority record MARC for a given browse entry.
+CREATE OR REPLACE FUNCTION 
+    metabib.get_browse_entry_marc_record(browse_entry BIGINT, search_class TEXT)
+    RETURNS TEXT AS
+$FUNK$
+    SELECT are.marc
+    FROM metabib.browse_entry mbe
+    JOIN metabib.browse_entry_simple_heading_map mbeshm 
+        ON (mbeshm.entry = mbe.id)
+    JOIN authority.simple_heading ash ON (ash.id = mbeshm.simple_heading)
+    JOIN authority.control_set_authority_field acsaf ON (acsaf.id = ash.atag)
+    JOIN authority.record_entry are ON (are.id = ash.record)
+    JOIN authority.control_set_auth_field_metabib_field_map_refs map 
+        ON (map.authority_field = ash.atag)
+    JOIN config.metabib_field cmf ON (cmf.id = map.metabib_field)
+    WHERE   mbe.id = browse_entry
+        AND acsaf.main_entry IS NULL
+        AND cmf.field_class = search_class;
+$FUNK$ LANGUAGE SQL STABLE;
+
+-- DROP LOCAL FUNCS TO ENSURE NONE STICK AROUND w/ DIFFERENT SIGNATURES
+
+DROP FUNCTION IF EXISTS metabib.browse(TEXT, TEXT, INT, INT, BOOLEAN, BIGINT, INT);
+DROP FUNCTION IF EXISTS metabib.browse(TEXT, TEXT, INT, INT, BOOLEAN, BIGINT, INT, TEXT);
+DROP FUNCTION IF EXISTS metabib.staged_browse(TEXT, INT[], INT, INT[], BOOLEAN, INT, BOOLEAN, INT, INT);
+DROP FUNCTION IF EXISTS metabib.staged_browse(TEXT, INT[], INT, INT[], BOOLEAN, INT, BOOLEAN, INT, INT, TEXT);
+DROP FUNCTION IF EXISTS metabib.staged_browse(TEXT, INT[], INT, INT[], BOOLEAN, INT, BOOLEAN, INT, INT, TEXT, TEXT);
+
+-- RECOVER STOCK FUNCTIONS
+
+CREATE OR REPLACE FUNCTION metabib.reingest_metabib_field_entries( bib_id BIGINT, skip_facet BOOL DEFAULT FALSE, skip_browse BOOL DEFAULT FALSE, skip_search BOOL DEFAULT FALSE ) RETURNS VOID AS $func$
+DECLARE
+    fclass          RECORD;
+    ind_data        metabib.field_entry_template%ROWTYPE;
+    mbe_row         metabib.browse_entry%ROWTYPE;
+    mbe_id          BIGINT;
+    b_skip_facet    BOOL;
+    b_skip_browse   BOOL;
+    b_skip_search   BOOL;
+    value_prepped   TEXT;
+    combo_sort_val TEXT;
+BEGIN
+
+    SELECT COALESCE(NULLIF(skip_facet, FALSE), EXISTS (SELECT enabled FROM config.internal_flag WHERE name =  'ingest.skip_facet_indexing' AND enabled)) INTO b_skip_facet;
+    SELECT COALESCE(NULLIF(skip_browse, FALSE), EXISTS (SELECT enabled FROM config.internal_flag WHERE name =  'ingest.skip_browse_indexing' AND enabled)) INTO b_skip_browse;
+    SELECT COALESCE(NULLIF(skip_search, FALSE), EXISTS (SELECT enabled FROM config.internal_flag WHERE name =  'ingest.skip_search_indexing' AND enabled)) INTO b_skip_search;
+
+    PERFORM * FROM config.internal_flag WHERE name = 'ingest.assume_inserts_only' AND enabled;
+    IF NOT FOUND THEN
+        IF NOT b_skip_search THEN
+            FOR fclass IN SELECT * FROM config.metabib_class LOOP
+                -- RAISE NOTICE 'Emptying out %', fclass.name;
+                EXECUTE $$DELETE FROM metabib.$$ || fclass.name || $$_field_entry WHERE source = $$ || bib_id;
+            END LOOP;
+        END IF;
+        IF NOT b_skip_facet THEN
+            DELETE FROM metabib.facet_entry WHERE source = bib_id;
+        END IF;
+        IF NOT b_skip_browse THEN
+            DELETE FROM metabib.browse_entry_def_map WHERE source = bib_id;
+        END IF;
+    END IF;
+
+    FOR ind_data IN SELECT * FROM biblio.extract_metabib_field_entry( bib_id ) LOOP
+
+    -- don't store what has been normalized away
+        CONTINUE WHEN ind_data.value IS NULL;
+
+        IF ind_data.field < 0 THEN
+            ind_data.field = -1 * ind_data.field;
+        END IF;
+
+        IF ind_data.facet_field AND NOT b_skip_facet THEN
+            INSERT INTO metabib.facet_entry (field, source, value)
+                VALUES (ind_data.field, ind_data.source, ind_data.value);
+        END IF;
+
+        IF ind_data.browse_field AND NOT b_skip_browse THEN
+            -- A caveat about this SELECT: this should take care of replacing
+            -- old mbe rows when data changes, but not if normalization (by
+            -- which I mean specifically the output of
+            -- evergreen.oils_tsearch2()) changes.  It may or may not be
+            -- expensive to add a comparison of index_vector to index_vector
+            -- to the WHERE clause below.
+
+            CONTINUE WHEN ind_data.sort_value IS NULL;
+
+            value_prepped := metabib.browse_normalize(ind_data.value, ind_data.field);
+
+            combo_sort_val := 
+                SUBSTR((ind_data.sort_value || value_prepped), 1, 2048);
+
+            SELECT INTO mbe_row * FROM metabib.browse_entry
+                WHERE value = value_prepped AND sort_value = sort_value;
+
+            IF FOUND THEN
+                mbe_id := mbe_row.id;
+            ELSE
+                INSERT INTO metabib.browse_entry
+                    ( value, sort_value, combo_sort_value, metabib_fields_cache) 
+                    VALUES
+                    ( value_prepped, sort_value_prepped, combo_sort_val, '{}' );
+
+                mbe_id := CURRVAL('metabib.browse_entry_id_seq'::REGCLASS);
+            END IF;
+
+            INSERT INTO metabib.browse_entry_def_map (entry, def, source, authority)
+                VALUES (mbe_id, ind_data.field, ind_data.source, ind_data.authority);
+        END IF;
+
+        IF ind_data.search_field AND NOT b_skip_search THEN
+            -- Avoid inserting duplicate rows
+            EXECUTE 'SELECT 1 FROM metabib.' || ind_data.field_class ||
+                '_field_entry WHERE field = $1 AND source = $2 AND value = $3'
+                INTO mbe_id USING ind_data.field, ind_data.source, ind_data.value;
+                -- RAISE NOTICE 'Search for an already matching row returned %', mbe_id;
+            IF mbe_id IS NULL THEN
+                EXECUTE $$
+                INSERT INTO metabib.$$ || ind_data.field_class || $$_field_entry (field, source, value)
+                    VALUES ($$ ||
+                        quote_literal(ind_data.field) || $$, $$ ||
+                        quote_literal(ind_data.source) || $$, $$ ||
+                        quote_literal(ind_data.value) ||
+                    $$);$$;
+            END IF;
+        END IF;
+
+    END LOOP;
+
+    IF NOT b_skip_search THEN
+        PERFORM metabib.update_combined_index_vectors(bib_id);
+    END IF;
+
+    RETURN;
+END;
+$func$ LANGUAGE PLPGSQL;
+
+CREATE OR REPLACE FUNCTION metabib.staged_browse(
+    query                   TEXT,
+    fields                  INT[],
+    context_org             INT,
+    context_locations       INT[],
+    staff                   BOOL,
+    browse_superpage_size   INT,
+    count_up_from_zero      BOOL,   -- if false, count down from -1
+    result_limit            INT,
+    next_pivot_pos          INT,
+    ccvm_ctype              TEXT DEFAULT NULL,
+    ccvm_code               TEXT DEFAULT NULL
+) RETURNS SETOF metabib.flat_browse_entry_appearance AS $p$
+DECLARE
+    curs                    REFCURSOR;
+    rec                     RECORD;
+    qpfts_query             TEXT;
+    aqpfts_query            TEXT;
+    afields                 INT[];
+    bfields                 INT[];
+    result_row              metabib.flat_browse_entry_appearance%ROWTYPE;
+    results_skipped         INT := 0;
+    row_counter             INT := 0;
+    row_number              INT;
+    slice_start             INT;
+    slice_end               INT;
+    full_end                INT;
+    all_records             BIGINT[];
+    all_brecords             BIGINT[];
+    all_arecords            BIGINT[];
+    superpage_of_records    BIGINT[];
+    superpage_size          INT;
+BEGIN
+    IF count_up_from_zero THEN
+        row_number := 0;
+    ELSE
+        row_number := -1;
+    END IF;
+
+    OPEN curs FOR EXECUTE query;
+
+    LOOP
+        FETCH curs INTO rec;
+        IF NOT FOUND THEN
+            IF result_row.pivot_point IS NOT NULL THEN
+                RETURN NEXT result_row;
+            END IF;
+            RETURN;
+        END IF;
+
+
+        -- Gather aggregate data based on the MBE row we're looking at now, authority axis
+        SELECT INTO all_arecords, result_row.sees, afields
+                ARRAY_AGG(DISTINCT abl.bib), -- bibs to check for visibility
+                STRING_AGG(DISTINCT aal.source::TEXT, $$,$$), -- authority record ids
+                ARRAY_AGG(DISTINCT map.metabib_field) -- authority-tag-linked CMF rows
+
+          FROM  metabib.browse_entry_simple_heading_map mbeshm
+                JOIN authority.simple_heading ash ON ( mbeshm.simple_heading = ash.id )
+                JOIN authority.authority_linking aal ON ( ash.record = aal.source )
+                JOIN authority.bib_linking abl ON ( aal.target = abl.authority )
+                JOIN authority.control_set_auth_field_metabib_field_map_refs map ON (
+                    ash.atag = map.authority_field
+                    AND map.metabib_field = ANY(fields)
+                )
+          WHERE mbeshm.entry = rec.id;
+
+
+        -- Gather aggregate data based on the MBE row we're looking at now, bib axis
+        SELECT INTO all_brecords, result_row.authorities, bfields
+                ARRAY_AGG(DISTINCT source),
+                STRING_AGG(DISTINCT authority::TEXT, $$,$$),
+                ARRAY_AGG(DISTINCT def)
+          FROM  metabib.browse_entry_def_map
+          WHERE entry = rec.id
+                AND def = ANY(fields);
+
+        SELECT INTO result_row.fields STRING_AGG(DISTINCT x::TEXT, $$,$$) FROM UNNEST(afields || bfields) x;
+
+        result_row.sources := 0;
+        result_row.asources := 0;
+
+        /** Remove refs to linked bib records that do not 
+            match the requested ccvm filter */
+        IF ccvm_ctype IS NOT NULL THEN
+
+            -- bib-linked bibs
+            IF ARRAY_UPPER(all_brecords, 1) IS NOT NULL THEN
+                SELECT INTO all_brecords ARRAY_AGG(DISTINCT vec.source)
+                FROM metabib.record_attr_vector_list vec
+                JOIN config.coded_value_map ccvm
+                    ON (
+                        ccvm.ctype = ccvm_ctype
+                        AND ccvm.code = ccvm_code
+                        AND ccvm.id = ANY (vec.vlist)
+                    )
+                WHERE vec.source = ANY(all_brecords);
+            END IF;
+
+            -- auth-linked bibs
+            IF ARRAY_UPPER(all_arecords, 1) IS NOT NULL THEN
+                SELECT INTO all_arecords ARRAY_AGG(DISTINCT vec.source)
+                FROM metabib.record_attr_vector_list vec
+                JOIN config.coded_value_map ccvm
+                    ON (
+                        ccvm.ctype = ccvm_ctype
+                        AND ccvm.code = ccvm_code
+                        AND ccvm.id = ANY (vec.vlist)
+                    )
+                WHERE vec.source = ANY(all_arecords);
+            END IF;
+        END IF;
+
+        -- Bib-linked vis checking
+        IF ARRAY_UPPER(all_brecords,1) IS NOT NULL THEN
+
+            full_end := ARRAY_LENGTH(all_brecords, 1);
+            superpage_size := COALESCE(browse_superpage_size, full_end);
+            slice_start := 1;
+            slice_end := superpage_size;
+
+            WHILE result_row.sources = 0 AND slice_start <= full_end LOOP
+                superpage_of_records := all_brecords[slice_start:slice_end];
+                qpfts_query :=
+                    'SELECT NULL::BIGINT AS id, ARRAY[r] AS records, ' ||
+                    'NULL AS badges, NULL::NUMERIC AS popularity, ' ||
+                    '1::NUMERIC AS rel FROM (SELECT UNNEST(' ||
+                    quote_literal(superpage_of_records) || '::BIGINT[]) AS r) rr';
+
+                -- We use search.query_parser_fts() for visibility testing.
+                -- We're calling it once per browse-superpage worth of records
+                -- out of the set of records related to a given mbe, until we've
+                -- either exhausted that set of records or found at least 1
+                -- visible record.
+
+                SELECT INTO result_row.sources visible
+                    FROM search.query_parser_fts(
+                        context_org, NULL, qpfts_query, NULL,
+                        context_locations, 0, NULL, NULL, FALSE, staff, FALSE
+                    ) qpfts
+                    WHERE qpfts.rel IS NULL;
+
+                slice_start := slice_start + superpage_size;
+                slice_end := slice_end + superpage_size;
+            END LOOP;
+
+            -- Accurate?  Well, probably.
+            result_row.accurate := browse_superpage_size IS NULL OR
+                browse_superpage_size >= full_end;
+
+        END IF;
+
+        -- Authority-linked vis checking
+        IF ARRAY_UPPER(all_arecords,1) IS NOT NULL THEN
+
+            full_end := ARRAY_LENGTH(all_arecords, 1);
+            superpage_size := COALESCE(browse_superpage_size, full_end);
+            slice_start := 1;
+            slice_end := superpage_size;
+
+            WHILE result_row.asources = 0 AND slice_start <= full_end LOOP
+                superpage_of_records := all_arecords[slice_start:slice_end];
+                qpfts_query :=
+                    'SELECT NULL::BIGINT AS id, ARRAY[r] AS records, ' ||
+                    'NULL AS badges, NULL::NUMERIC AS popularity, ' ||
+                    '1::NUMERIC AS rel FROM (SELECT UNNEST(' ||
+                    quote_literal(superpage_of_records) || '::BIGINT[]) AS r) rr';
+
+                -- We use search.query_parser_fts() for visibility testing.
+                -- We're calling it once per browse-superpage worth of records
+                -- out of the set of records related to a given mbe, via
+                -- authority until we've either exhausted that set of records
+                -- or found at least 1 visible record.
+
+                SELECT INTO result_row.asources visible
+                    FROM search.query_parser_fts(
+                        context_org, NULL, qpfts_query, NULL,
+                        context_locations, 0, NULL, NULL, FALSE, staff, FALSE
+                    ) qpfts
+                    WHERE qpfts.rel IS NULL;
+
+                slice_start := slice_start + superpage_size;
+                slice_end := slice_end + superpage_size;
+            END LOOP;
+
+
+            -- Accurate?  Well, probably.
+            result_row.aaccurate := browse_superpage_size IS NULL OR
+                browse_superpage_size >= full_end;
+
+        END IF;
+
+        IF result_row.sources > 0 OR result_row.asources > 0 THEN
+
+            -- The function that calls this function needs row_number in order
+            -- to correctly order results from two different runs of this
+            -- functions.
+            result_row.row_number := row_number;
+
+            -- Now, if row_counter is still less than limit, return a row.  If
+            -- not, but it is less than next_pivot_pos, continue on without
+            -- returning actual result rows until we find
+            -- that next pivot, and return it.
+
+            IF row_counter < result_limit THEN
+                result_row.browse_entry := rec.id;
+                result_row.value := rec.value;
+
+                RETURN NEXT result_row;
+            ELSE
+                result_row.browse_entry := NULL;
+                result_row.authorities := NULL;
+                result_row.fields := NULL;
+                result_row.value := NULL;
+                result_row.sources := NULL;
+                result_row.sees := NULL;
+                result_row.accurate := NULL;
+                result_row.aaccurate := NULL;
+                result_row.pivot_point := rec.id;
+
+                IF row_counter >= next_pivot_pos THEN
+                    RETURN NEXT result_row;
+                    RETURN;
+                END IF;
+            END IF;
+
+            IF count_up_from_zero THEN
+                row_number := row_number + 1;
+            ELSE
+                row_number := row_number - 1;
+            END IF;
+
+            -- row_counter is different from row_number.
+            -- It simply counts up from zero so that we know when
+            -- we've reached our limit.
+            row_counter := row_counter + 1;
+        END IF;
+    END LOOP;
+END;
+$p$ LANGUAGE PLPGSQL;
+
+
+CREATE OR REPLACE FUNCTION metabib.browse(
+    search_field            INT[],
+    browse_term             TEXT,
+    context_org             INT DEFAULT NULL,
+    context_loc_group       INT DEFAULT NULL,
+    staff                   BOOL DEFAULT FALSE,
+    pivot_id                BIGINT DEFAULT NULL,
+    result_limit            INT DEFAULT 10,
+    ccvm_ctype              TEXT DEFAULT NULL,
+    ccvm_code               TEXT DEFAULT NULL
+) RETURNS SETOF metabib.flat_browse_entry_appearance AS $p$
+DECLARE
+    core_query              TEXT;
+    back_query              TEXT;
+    forward_query           TEXT;
+    pivot_sort_value        TEXT;
+    context_locations       INT[];
+    browse_superpage_size   INT;
+    results_skipped         INT := 0;
+    back_limit              INT;
+    back_to_pivot           INT;
+    forward_limit           INT;
+    forward_to_pivot        INT;
+    ccvm_filter_join        TEXT := '';
+BEGIN
+    -- First, find the pivot if we were given a browse term but not a pivot.
+    IF pivot_id IS NULL THEN
+        pivot_id := metabib.browse_pivot(search_field, browse_term);
+    END IF;
+
+    SELECT INTO pivot_sort_value combo_sort_value 
+        FROM metabib.browse_entry WHERE id = pivot_id;
+
+    -- Bail if we couldn't find a pivot.
+    IF pivot_sort_value IS NULL THEN
+        RETURN;
+    END IF;
+
+    -- Transform the context_loc_group argument (if any) (logc at the
+    -- TPAC layer) into a form we'll be able to use.
+    IF context_loc_group IS NOT NULL THEN
+        SELECT INTO context_locations ARRAY_AGG(location)
+            FROM asset.copy_location_group_map
+            WHERE lgroup = context_loc_group;
+    END IF;
+
+    -- Get the configured size of browse superpages.
+    SELECT INTO browse_superpage_size value     -- NULL ok
+        FROM config.global_flag
+        WHERE enabled AND name = 'opac.browse.holdings_visibility_test_limit';
+
+    -- First we're going to search backward from the pivot, then we're going
+    -- to search forward.  In each direction, we need two limits.  At the
+    -- lesser of the two limits, we delineate the edge of the result set
+    -- we're going to return.  At the greater of the two limits, we find the
+    -- pivot value that would represent an offset from the current pivot
+    -- at a distance of one "page" in either direction, where a "page" is a
+    -- result set of the size specified in the "result_limit" argument.
+    --
+    -- The two limits in each direction make four derived values in total,
+    -- and we calculate them now.
+    back_limit := CEIL(result_limit::FLOAT / 2);
+    back_to_pivot := result_limit;
+    forward_limit := result_limit / 2;
+    forward_to_pivot := result_limit - 1;
+
+    IF ccvm_ctype IS NOT NULL THEN
+        ccvm_filter_join := $x$
+            JOIN metabib.browse_entry_def_map mbedm ON (mbedm.entry = mbe.id)
+            JOIN metabib.record_attr_vector_list vec ON (vec.source = mbedm.source)
+            JOIN config.coded_value_map ccvm ON (
+                ccvm.ctype = $x$ || QUOTE_LITERAL(ccvm_ctype) || $x$ 
+                AND ccvm.id = ANY (vec.vlist)
+                AND ccvm.code = $x$ || QUOTE_LITERAL(ccvm_code) || $x$
+            )
+        $x$;
+    END IF;
+
+    -- This is the meat of the SQL query that finds browse entries.  We'll
+    -- pass this to a function which uses it with a cursor, so that individual
+    -- rows may be fetched in a loop until some condition is satisfied, without
+    -- waiting for a result set of fixed size to be collected all at once.
+    core_query := '
+SELECT  mbe.id,
+        mbe.value,
+        mbe.sort_value
+  FROM  metabib.browse_entry mbe
+        ' || ccvm_filter_join || '
+  WHERE mbe.metabib_fields_cache && ' || quote_literal(search_field) ||
+      ' AND ';
+
+    -- This is the variant of the query for browsing backward.
+    back_query := core_query ||
+        ' mbe.combo_sort_value <= ' || quote_literal(pivot_sort_value) ||
+    ' ORDER BY mbe.combo_sort_value DESC ';
+
+    -- This variant browses forward.
+    forward_query := core_query ||
+        ' mbe.combo_sort_value > ' || quote_literal(pivot_sort_value) ||
+    ' ORDER BY mbe.combo_sort_value ';
+
+    -- We now call the function which applies a cursor to the provided
+    -- queries, stopping at the appropriate limits and also giving us
+    -- the next page's pivot.
+    RETURN QUERY
+        SELECT * FROM metabib.staged_browse(
+            back_query, search_field, context_org, context_locations,
+            staff, browse_superpage_size, TRUE, back_limit, back_to_pivot,
+            ccvm_ctype, ccvm_code
+        ) UNION
+        SELECT * FROM metabib.staged_browse(
+            forward_query, search_field, context_org, context_locations,
+            staff, browse_superpage_size, FALSE, forward_limit, forward_to_pivot,
+            ccvm_ctype, ccvm_code
+        ) ORDER BY row_number DESC;
+
+END;
+$p$ LANGUAGE PLPGSQL;
+
+CREATE OR REPLACE FUNCTION metabib.browse(
+    search_class        TEXT,
+    browse_term         TEXT,
+    context_org         INT DEFAULT NULL,
+    context_loc_group   INT DEFAULT NULL,
+    staff               BOOL DEFAULT FALSE,
+    pivot_id            BIGINT DEFAULT NULL,
+    result_limit        INT DEFAULT 10,
+    ccvm_ctype          TEXT DEFAULT NULL,
+    ccvm_code           TEXT DEFAULT NULL
+) RETURNS SETOF metabib.flat_browse_entry_appearance AS $p$
+BEGIN
+    RETURN QUERY SELECT * FROM metabib.browse(
+        (SELECT COALESCE(ARRAY_AGG(id), ARRAY[]::INT[])
+            FROM config.metabib_field WHERE field_class = search_class),
+        browse_term,
+        context_org,
+        context_loc_group,
+        staff,
+        pivot_id,
+        result_limit,
+        ccvm_ctype,
+        ccvm_code
+    );
+END;
+$p$ LANGUAGE PLPGSQL;
+
+-- AFTER UPDATE OR INSERT trigger for authority.record_entry
+CREATE OR REPLACE FUNCTION authority.indexing_ingest_or_delete () RETURNS TRIGGER AS $func$
+DECLARE
+    ashs    authority.simple_heading%ROWTYPE;
+    mbe_row metabib.browse_entry%ROWTYPE;
+    mbe_id  BIGINT;
+    ash_id  BIGINT;
+    combo_sort_val TEXT;
+BEGIN
+
+    IF NEW.deleted IS TRUE THEN -- If this authority is deleted
+        DELETE FROM authority.bib_linking WHERE authority = NEW.id; -- Avoid updating fields in bibs that are no longer visible
+        DELETE FROM authority.full_rec WHERE record = NEW.id; -- Avoid validating fields against deleted authority records
+        DELETE FROM authority.simple_heading WHERE record = NEW.id;
+          -- Should remove matching $0 from controlled fields at the same time?
+
+        -- XXX What do we about the actual linking subfields present in
+        -- authority records that target this one when this happens?
+        DELETE FROM authority.authority_linking
+            WHERE source = NEW.id OR target = NEW.id;
+
+        RETURN NEW; -- and we're done
+    END IF;
+
+    IF TG_OP = 'UPDATE' THEN -- re-ingest?
+        PERFORM * FROM config.internal_flag WHERE name = 'ingest.reingest.force_on_same_marc' AND enabled;
+
+        IF NOT FOUND AND OLD.marc = NEW.marc THEN -- don't do anything if the MARC didn't change
+            RETURN NEW;
+        END IF;
+
+        -- Unless there's a setting stopping us, propagate these updates to any linked bib records when the heading changes
+        PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_authority_auto_update' AND enabled;
+
+        IF NOT FOUND AND NEW.heading <> OLD.heading THEN
+            PERFORM authority.propagate_changes(NEW.id);
+        END IF;
+    
+        DELETE FROM authority.simple_heading WHERE record = NEW.id;
+        DELETE FROM authority.authority_linking WHERE source = NEW.id;
+    END IF;
+
+    INSERT INTO authority.authority_linking (source, target, field)
+        SELECT source, target, field FROM authority.calculate_authority_linking(
+            NEW.id, NEW.control_set, NEW.marc::XML
+        );
+
+    FOR ashs IN SELECT * FROM authority.simple_heading_set(NEW.marc) LOOP
+
+        INSERT INTO authority.simple_heading (record,atag,value,sort_value,thesaurus)
+        VALUES (ashs.record, ashs.atag, ashs.value, ashs.sort_value, ashs.thesaurus);
+
+        ash_id := CURRVAL('authority.simple_heading_id_seq'::REGCLASS);
+
+        combo_sort_val := SUBSTR((ashs.sort_value || ashs.value), 1, 2048);
+
+        SELECT INTO mbe_row * FROM metabib.browse_entry
+            WHERE value = ashs.value AND sort_value = ashs.sort_value;
+
+        IF FOUND THEN
+            mbe_id := mbe_row.id;
+        ELSE
+            INSERT INTO metabib.browse_entry
+                ( value, sort_value, combo_sort_value, metabib_fields_cache ) VALUES
+                ( ashs.value, ashs.sort_value, combo_sort_val, '{}' );
+
+            mbe_id := CURRVAL('metabib.browse_entry_id_seq'::REGCLASS);
+        END IF;
+
+        INSERT INTO metabib.browse_entry_simple_heading_map (entry,simple_heading) VALUES (mbe_id,ash_id);
+
+    END LOOP;
+
+    -- Flatten and insert the afr data
+    PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_authority_full_rec' AND enabled;
+    IF NOT FOUND THEN
+        PERFORM authority.reingest_authority_full_rec(NEW.id);
+        PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_authority_rec_descriptor' AND enabled;
+        IF NOT FOUND THEN
+            PERFORM authority.reingest_authority_rec_descriptor(NEW.id);
+        END IF;
+    END IF;
+
+    RETURN NEW;
+END;
+$func$ LANGUAGE PLPGSQL;
+
+
+-- DROP UNNEEDED LOCAL FUNCTIONS
+DROP FUNCTION IF EXISTS metabib.browse_title_authority_refs_pivot(INTEGER[], TEXT);
+DROP FUNCTION IF EXISTS metabib.browse_title_bib_pivot(INTEGER[], TEXT);
+DROP FUNCTION IF EXISTS metabib.browse_title_pivot(INTEGER[], TEXT);
+
+DROP FUNCTION IF EXISTS metabib.browse_author_authority_refs_pivot(INTEGER[], TEXT);
+DROP FUNCTION IF EXISTS metabib.browse_author_bib_pivot(INTEGER[], TEXT);
+DROP FUNCTION IF EXISTS metabib.browse_author_pivot(INTEGER[], TEXT);
+
+DROP FUNCTION IF EXISTS metabib.browse_subject_authority_refs_pivot(INTEGER[], TEXT);
+DROP FUNCTION IF EXISTS metabib.browse_subject_bib_pivot(INTEGER[], TEXT);
+DROP FUNCTION IF EXISTS metabib.browse_subject_pivot(INTEGER[], TEXT);
+
+DROP FUNCTION IF EXISTS metabib.browse_series_authority_refs_pivot(INTEGER[], TEXT);
+DROP FUNCTION IF EXISTS metabib.browse_series_bib_pivot(INTEGER[], TEXT);
+DROP FUNCTION IF EXISTS metabib.browse_series_pivot(INTEGER[], TEXT);
+
+DROP FUNCTION IF EXISTS metabib.browse_call_number_authority_refs_pivot(TEXT);
+DROP FUNCTION IF EXISTS metabib.browse_call_number_bib_pivot(TEXT);
+DROP FUNCTION IF EXISTS metabib.browse_call_number_pivot(TEXT);
+
+CREATE OR REPLACE FUNCTION metabib.migrate_legacy_browse_entry(
+    legacy_entry metabib.browse_entry, field_class TEXT, force BOOLEAN DEFAULT FALSE)
+    RETURNS VOID AS
+$FUNK$
+DECLARE
+    new_entry_id BIGINT;
+    existing_entry metabib.browse_entry%ROWTYPE;
+    new_def_map metabib.browse_entry_def_map%ROWTYPE;
+    new_heading_map metabib.browse_entry_simple_heading_map%ROWTYPE;
+BEGIN
+
+    -- See if we have an existing entry to merge into
+    -- 'force' means we know there will be no dupes, so don't bother checking.
+    existing_entry.id := NULL;
+    new_entry_id := NULL;
+
+    IF NOT force THEN
+        SELECT INTO existing_entry * 
+        FROM metabib.browse_entry entry
+        WHERE   entry.value = legacy_entry.value 
+            AND entry.sort_value = legacy_entry.sort_value;
+    END IF;
+
+    IF existing_entry.id IS NOT NULL THEN
+        new_entry_id := existing_entry.id;
+
+        -- always use the oldest create_date
+        IF legacy_entry.create_date < existing_entry.create_date THEN
+            UPDATE metabib.browse_entry 
+            SET create_date = legacy_entry.create_date
+            WHERE id = existing_entry.id;
+        END IF;
+
+    ELSE
+        -- add all new rows
+        -- existing entries do not have truncated sort values that
+        -- contain both sort value and value.  fix that here.
+        INSERT INTO metabib.browse_entry 
+            (value, index_vector, sort_value, combo_sort_value)
+        SELECT 
+            legacy_entry.value, 
+            legacy_entry.index_vector, 
+            legacy_entry.sort_value,
+            SUBSTR((legacy_entry.sort_value || legacy_entry.value), 1, 2048)
+        RETURNING id INTO new_entry_id;
+
+    END IF;
+
+    -- Now migrate the def and simple heading map tables
+
+    -- Collect data for the new def map and simple heading map tables
+    IF field_class = 'title' THEN
+
+        INSERT INTO metabib.browse_entry_def_map 
+            (entry, def, source, authority) 
+            SELECT new_entry_id, legacy.def, legacy.source, legacy.authority 
+            FROM metabib.browse_title_entry_def_map legacy 
+            WHERE legacy.entry = legacy_entry.id;
+
+        INSERT INTO metabib.browse_entry_simple_heading_map
+            (entry, simple_heading)
+            SELECT new_entry_id, legacy.simple_heading
+            FROM metabib.browse_title_entry_simple_heading_map legacy
+            WHERE legacy.entry = legacy_entry.id;
+
+    ELSIF field_class = 'author' THEN
+
+        INSERT INTO metabib.browse_entry_def_map 
+            (entry, def, source, authority) 
+            SELECT new_entry_id, legacy.def, legacy.source, legacy.authority 
+            FROM metabib.browse_author_entry_def_map legacy 
+            WHERE legacy.entry = legacy_entry.id;
+
+        INSERT INTO metabib.browse_entry_simple_heading_map
+            (entry, simple_heading)
+            SELECT new_entry_id, legacy.simple_heading
+            FROM metabib.browse_author_entry_simple_heading_map legacy
+            WHERE legacy.entry = legacy_entry.id;
+
+    ELSIF field_class = 'subject' THEN
+
+        INSERT INTO metabib.browse_entry_def_map 
+            (entry, def, source, authority) 
+            SELECT new_entry_id, legacy.def, legacy.source, legacy.authority 
+            FROM metabib.browse_subject_entry_def_map legacy 
+            WHERE legacy.entry = legacy_entry.id;
+
+        INSERT INTO metabib.browse_entry_simple_heading_map
+            (entry, simple_heading)
+            SELECT new_entry_id, legacy.simple_heading
+            FROM metabib.browse_subject_entry_simple_heading_map legacy
+            WHERE legacy.entry = legacy_entry.id;
+
+    ELSIF field_class = 'series' THEN
+
+        INSERT INTO metabib.browse_entry_def_map 
+            (entry, def, source, authority) 
+            SELECT new_entry_id, legacy.def, legacy.source, legacy.authority 
+            FROM metabib.browse_series_entry_def_map legacy 
+            WHERE legacy.entry = legacy_entry.id;
+
+        INSERT INTO metabib.browse_entry_simple_heading_map
+            (entry, simple_heading)
+            SELECT new_entry_id, legacy.simple_heading
+            FROM metabib.browse_series_entry_simple_heading_map legacy
+            WHERE legacy.entry = legacy_entry.id;
+
+    ELSIF field_class = 'call_number' THEN
+
+        INSERT INTO metabib.browse_entry_def_map 
+            (entry, def, source, authority) 
+            SELECT new_entry_id, legacy.def, legacy.source, legacy.authority 
+            FROM metabib.browse_call_number_entry_def_map legacy 
+            WHERE legacy.entry = legacy_entry.id;
+    
+    END IF;
+
+END $FUNK$ LANGUAGE PLPGSQL;
+
+-- AFTER UPDATE OR INSERT trigger for biblio.record_entry
+CREATE OR REPLACE FUNCTION biblio.indexing_ingest_or_delete () RETURNS TRIGGER AS $func$
+DECLARE
+    tmp_bool BOOL;
+BEGIN
+
+    IF NEW.deleted THEN -- If this bib is deleted
+
+        PERFORM * FROM config.internal_flag WHERE
+            name = 'ingest.metarecord_mapping.preserve_on_delete' AND enabled;
+
+        tmp_bool := FOUND; -- Just in case this is changed by some other statement
+
+        PERFORM metabib.remap_metarecord_for_bib( NEW.id, NEW.fingerprint, TRUE, tmp_bool );
+
+        IF NOT tmp_bool THEN
+            -- One needs to keep these around to support searches
+            -- with the #deleted modifier, so one should turn on the named
+            -- internal flag for that functionality.
+            DELETE FROM metabib.record_attr_vector_list WHERE source = NEW.id;
+        END IF;
+
+        DELETE FROM authority.bib_linking WHERE bib = NEW.id; -- Avoid updating fields in bibs that are no longer visible
+        DELETE FROM biblio.peer_bib_copy_map WHERE peer_record = NEW.id; -- Separate any multi-homed items
+        DELETE FROM metabib.browse_entry_def_map WHERE source = NEW.id; -- Don't auto-suggest deleted bibs
+        RETURN NEW; -- and we're done
+    END IF;
+
+    IF TG_OP = 'UPDATE' THEN -- re-ingest?
+        PERFORM * FROM config.internal_flag WHERE name = 'ingest.reingest.force_on_same_marc' AND enabled;
+
+        IF NOT FOUND AND OLD.marc = NEW.marc THEN -- don't do anything if the MARC didn't change
+            RETURN NEW;
+        END IF;
+    END IF;
+
+    -- Record authority linking
+    PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_authority_linking' AND enabled;
+    IF NOT FOUND THEN
+        PERFORM biblio.map_authority_linking( NEW.id, NEW.marc );
+    END IF;
+
+    -- Flatten and insert the mfr data
+    PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_metabib_full_rec' AND enabled;
+    IF NOT FOUND THEN
+        PERFORM metabib.reingest_metabib_full_rec(NEW.id);
+
+        -- Now we pull out attribute data, which is dependent on the mfr for all but XPath-based fields
+        PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_metabib_rec_descriptor' AND enabled;
+        IF NOT FOUND THEN
+            PERFORM metabib.reingest_record_attributes(NEW.id, NULL, NEW.marc, TG_OP = 'INSERT' OR OLD.deleted);
+        END IF;
+    END IF;
+
+    -- Gather and insert the field entry data
+    PERFORM metabib.reingest_metabib_field_entries(NEW.id);
+
+    -- Located URI magic
+    IF TG_OP = 'INSERT' THEN
+        PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_located_uri' AND enabled;
+        IF NOT FOUND THEN
+            PERFORM biblio.extract_located_uris( NEW.id, NEW.marc, NEW.editor );
+        END IF;
+    ELSE
+        PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_located_uri' AND enabled;
+        IF NOT FOUND THEN
+            PERFORM biblio.extract_located_uris( NEW.id, NEW.marc, NEW.editor );
+        END IF;
+    END IF;
+
+    -- (re)map metarecord-bib linking
+    IF TG_OP = 'INSERT' THEN -- if not deleted and performing an insert, check for the flag
+        PERFORM * FROM config.internal_flag WHERE name = 'ingest.metarecord_mapping.skip_on_insert' AND enabled;
+        IF NOT FOUND THEN
+            PERFORM metabib.remap_metarecord_for_bib( NEW.id, NEW.fingerprint );
+        END IF;
+    ELSE -- we're doing an update, and we're not deleted, remap
+        PERFORM * FROM config.internal_flag WHERE name = 'ingest.metarecord_mapping.skip_on_update' AND enabled;
+        IF NOT FOUND THEN
+            PERFORM metabib.remap_metarecord_for_bib( NEW.id, NEW.fingerprint );
+        END IF;
+    END IF;
+
+    RETURN NEW;
+END;
+$func$ LANGUAGE PLPGSQL;
+
+SELECT CLOCK_TIMESTAMP(), 'Migrating legacy entries';
+
+DO $MIGRATE$
+DECLARE
+    legacy_entry metabib.browse_entry%ROWTYPE;
+    existing_entry metabib.browse_entry%ROWTYPE;
+    last_id BIGINT;
+    counter BIGINT;
+BEGIN
+
+    RAISE NOTICE '% Migrating title entries', CLOCK_TIMESTAMP();
+
+    counter := 0;
+    FOR legacy_entry IN SELECT * FROM metabib.browse_title_entry LOOP
+        PERFORM metabib.migrate_legacy_browse_entry(legacy_entry, 'title', TRUE);
+        counter := counter + 1;
+        IF (counter % 50000) = 0 THEN
+            RAISE NOTICE 'Migrated % entries.', counter;
+        END IF;
+    END LOOP;
+
+    RAISE NOTICE '% Migrating author entries', CLOCK_TIMESTAMP();
+
+    counter := 0;
+    FOR legacy_entry IN SELECT * FROM metabib.browse_author_entry LOOP
+        PERFORM metabib.migrate_legacy_browse_entry(legacy_entry, 'author');
+        counter := counter + 1;
+        IF (counter % 50000) = 0 THEN
+            RAISE NOTICE 'Migrated % entries.', counter;
+        END IF;
+    END LOOP;
+
+    RAISE NOTICE '% Migrating subject entries', CLOCK_TIMESTAMP();
+
+    counter := 0;
+    FOR legacy_entry IN SELECT * FROM metabib.browse_subject_entry LOOP
+        PERFORM metabib.migrate_legacy_browse_entry(legacy_entry, 'subject');
+        counter := counter + 1;
+        IF (counter % 50000) = 0 THEN
+            RAISE NOTICE 'Migrated % entries.', counter;
+        END IF;
+    END LOOP;
+
+    RAISE NOTICE '% Migrating series entries', CLOCK_TIMESTAMP();
+
+    counter := 0;
+    FOR legacy_entry IN SELECT * FROM metabib.browse_series_entry LOOP
+        PERFORM metabib.migrate_legacy_browse_entry(legacy_entry, 'series');
+        counter := counter + 1;
+        IF (counter % 50000) = 0 THEN
+            RAISE NOTICE 'Migrated % entries.', counter;
+        END IF;
+    END LOOP;
+
+    RAISE NOTICE '% Migrating call_number entries', CLOCK_TIMESTAMP();
+
+    counter := 0;
+    FOR legacy_entry IN SELECT * FROM metabib.browse_call_number_entry LOOP
+        PERFORM metabib.migrate_legacy_browse_entry(legacy_entry, 'call_number');
+        counter := counter + 1;
+        IF (counter % 50000) = 0 THEN
+            RAISE NOTICE 'Migrated % entries.', counter;
+        END IF;
+    END LOOP;
+
+END $MIGRATE$;
+
+-- function is unused
+DROP FUNCTION authority.unlinked_bibs_to_given_auth_text(TEXT, TEXT);
+
+-- drop legacy functions
+DROP FUNCTION metabib.get_browse_author_entry_marc_record(BIGINT);
+DROP FUNCTION metabib.get_browse_series_entry_marc_record(BIGINT);
+DROP FUNCTION metabib.get_browse_subject_entry_marc_record(BIGINT);
+DROP FUNCTION metabib.browse_table_bounds(TEXT, TEXT, INTEGER); 
+
+ALTER TABLE metabib.browse_entry DROP COLUMN truncated_sort_value;
+
+CREATE INDEX browse_entry_combo_sort_value_idx 
+    ON metabib.browse_entry USING btree(combo_sort_value);
+
+DROP FUNCTION metabib.migrate_legacy_browse_entry(
+    metabib.browse_entry, TEXT, BOOLEAN);
+
+COMMIT;
+
+--ROLLBACK;
+
diff --git a/KCLS/sql/schema/revert/stock-browse-cleanup.sql b/KCLS/sql/schema/revert/stock-browse-cleanup.sql
new file mode 100644 (file)
index 0000000..7cc4baa
--- /dev/null
@@ -0,0 +1,7 @@
+-- Revert kcls-evergreen:stock-browse-cleanup from pg
+
+BEGIN;
+
+-- XXX Add DDLs here.
+
+COMMIT;
diff --git a/KCLS/sql/schema/revert/stock-browse-headings-report.sql b/KCLS/sql/schema/revert/stock-browse-headings-report.sql
new file mode 100644 (file)
index 0000000..784cd34
--- /dev/null
@@ -0,0 +1,7 @@
+-- Revert kcls-evergreen:stock-browse-headings-report from pg
+
+BEGIN;
+
+-- XXX Add DDLs here.
+
+COMMIT;
diff --git a/KCLS/sql/schema/revert/stock-browse-schema.sql b/KCLS/sql/schema/revert/stock-browse-schema.sql
new file mode 100644 (file)
index 0000000..7ecadd3
--- /dev/null
@@ -0,0 +1,5 @@
+-- Revert kcls-evergreen:stock-browse-schema from pg
+
+BEGIN;
+
+COMMIT;
index a71d6a2..c8143ad 100644 (file)
@@ -72,3 +72,6 @@ acq-inv-close-fields [search-index-keep-periods] 2018-04-12T19:03:29Z Bill Erick
 ecard-data [2.10-to-2.12-upgrade] 2018-01-03T21:55:03Z Bill Erickson,,, <berick@kcls-dev-local> # Ecard lib settings, policy data, etc.
 remove-gender [ecard-data] 2018-06-06T14:44:36Z Bill Erickson,,, <berick@kcls-dev-local> # Remove gender field/data
 ecard-notice-validator [remove-gender] 2018-07-26T14:33:57Z Bill Erickson,,, <berick@kcls-dev-local> # eCard UMS notice validator
+stock-browse-schema [ecard-notice-validator] 2018-08-31T15:22:58Z Bill Erickson,,, <berick@kcls-dev-local> # Recover stock browse data tables, etc.
+stock-browse-headings-report [stock-browse-schema] 2018-10-04T15:56:18Z Bill Erickson,,, <berick@kcls-dev> # New heading report updates for stock browse
+stock-browse-cleanup [stock-browse-schema] 2018-10-03T18:05:49Z Bill Erickson,,, <berick@kcls-dev> # Delete old browse data
index 87121ae..7de991e 100644 (file)
@@ -58,6 +58,10 @@ BACKSTAGE_PASSWORD = BSPASS
 # batch job to remove inacitve patron cards per HS#18605 ESI/Galen Charlton 2012-03-27
 45 0 * * *  . ~/.bashrc &&  cd $SCRIPT_DIR/inactive_cards && ./remove_inactive_patron_cards.sh
 
+# Refresh the new headings report materialized view nightly.
+# This is relatively fast, so it could be run more often if necessary.
+30 4 * * * . ~/.bashrc && cd $SCRIPT_DIR/headings_report && ./refresh_mat_view.sh
+
 # Update copy status for BC
 #0 22 * * * . ~/.bashrc && cd $SCRIPT_DIR/bc_missing/ && ./bc_missing.sh
 
diff --git a/KCLS/utility-scripts/headings_report/refresh_mat_view.sh b/KCLS/utility-scripts/headings_report/refresh_mat_view.sh
new file mode 100755 (executable)
index 0000000..aef5855
--- /dev/null
@@ -0,0 +1,13 @@
+#!/bin/bash
+# Kick off the holds purge function.
+# Set PGHOST, PGPASSWORD, PGUSER environment variables!
+set -eu
+PSQL="psql"
+
+echo -n "Refreshing materialized headings view at "
+date +"%F %T" 
+
+echo "SET STATEMENT_TIMEOUT = 0; REFRESH MATERIALIZED VIEW reporter.materialized_cataloged_browse_entry;" | $PSQL;
+
+echo -n "Refreshing materialized headings view complete at "
+date +"%F %T" 
index dadc96d..52f3f66 100644 (file)
@@ -4112,36 +4112,6 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA
                        <link field="def_maps" reltype="has_many" key="entry" map="" class="mbedm"/>
                </links>
        </class>
-       <class id="mbae" controller="open-ils.cstore open-ils.pcrud" oils_obj:fieldmapper="metabib::browse_author_entry" oils_persist:tablename="metabib.browse_author_entry" reporter:label="Combined Author Browse Entry" oils_persist:readonly="true">
-               <fields oils_persist:primary="id" oils_persist:sequence="metabib.browse_author_entry_id_seq">
-                       <field name="id" reporter:datatype="id" />
-                       <field name="value" reporter:datatype="text"/>
-                       <field name="def_maps" oils_persist:virtual="true" reporter:datatype="link"/>
-               </fields>
-               <links>
-                       <link field="def_maps" reltype="has_many" key="entry" map="" class="mbaedm"/>
-               </links>
-       </class>
-       <class id="mbse" controller="open-ils.cstore open-ils.pcrud" oils_obj:fieldmapper="metabib::browse_subject_entry" oils_persist:tablename="metabib.browse_subject_entry" reporter:label="Combined Subject Browse Entry" oils_persist:readonly="true">
-               <fields oils_persist:primary="id" oils_persist:sequence="metabib.browse_subject_entry_id_seq">
-                       <field name="id" reporter:datatype="id" />
-                       <field name="value" reporter:datatype="text"/>
-                       <field name="def_maps" oils_persist:virtual="true" reporter:datatype="link"/>
-               </fields>
-               <links>
-                       <link field="def_maps" reltype="has_many" key="entry" map="" class="mbsedm"/>
-               </links>
-       </class>
-       <class id="mbsee" controller="open-ils.cstore open-ils.pcrud" oils_obj:fieldmapper="metabib::browse_series_entry" oils_persist:tablename="metabib.browse_series_entry" reporter:label="Combined Series Browse Entry" oils_persist:readonly="true">
-               <fields oils_persist:primary="id" oils_persist:sequence="metabib.browse_series_entry_id_seq">
-                       <field name="id" reporter:datatype="id" />
-                       <field name="value" reporter:datatype="text"/>
-                       <field name="def_maps" oils_persist:virtual="true" reporter:datatype="link"/>
-               </fields>
-               <links>
-                       <link field="def_maps" reltype="has_many" key="entry" map="" class="mbseedm"/>
-               </links>
-       </class>
        <class id="mbedm" controller="open-ils.cstore open-ils.pcrud" oils_obj:fieldmapper="metabib::browse_entry_def_map" oils_persist:tablename="metabib.browse_entry_def_map" reporter:label="Combined Browse Entry Definition Map" oils_persist:readonly="true">
                <fields oils_persist:primary="id" oils_persist:sequence="metabib.browse_entry_def_map_id_seq">
                        <field name="id" reporter:datatype="id" />
@@ -4166,46 +4136,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA
                        <link field="simple_heading" reltype="has_a" key="id" map="" class="ash"/>
                </links>
        </class>
-       <class id="mbaedm" controller="open-ils.cstore open-ils.pcrud" oils_obj:fieldmapper="metabib::browse_author_entry_def_map" oils_persist:tablename="metabib.browse_author_entry_def_map" reporter:label="Combined Author Browse Entry Definition Map" oils_persist:readonly="true">
-               <fields oils_persist:primary="id" oils_persist:sequence="metabib.browse_author_entry_def_map_id_seq">
-                       <field name="id" reporter:datatype="id" />
-                       <field name="entry" reporter:datatype="link"/>
-                       <field name="def" reporter:datatype="link"/>
-                       <field name="source" reporter:datatype="link"/>
-               </fields>
-               <links>
-                       <link field="entry" reltype="has_a" key="id" map="" class="mbae"/>
-                       <link field="def" reltype="has_a" key="id" map="" class="cmf"/>
-                       <link field="source" reltype="has_a" key="id" map="" class="bre"/>
-               </links>
-       </class>
-       <class id="mbsedm" controller="open-ils.cstore open-ils.pcrud" oils_obj:fieldmapper="metabib::browse_subject_entry_def_map" oils_persist:tablename="metabib.browse_subject_entry_def_map" reporter:label="Combined Subject Browse Entry Definition Map" oils_persist:readonly="true">
-               <fields oils_persist:primary="id" oils_persist:sequence="metabib.browse_subject_entry_def_map_id_seq">
-                       <field name="id" reporter:datatype="id" />
-                       <field name="entry" reporter:datatype="link"/>
-                       <field name="def" reporter:datatype="link"/>
-                       <field name="source" reporter:datatype="link"/>
-               </fields>
-               <links>
-                       <link field="entry" reltype="has_a" key="id" map="" class="mbse"/>
-                       <link field="def" reltype="has_a" key="id" map="" class="cmf"/>
-                       <link field="source" reltype="has_a" key="id" map="" class="bre"/>
-               </links>
-       </class>
-       <class id="mbseedm" controller="open-ils.cstore open-ils.pcrud" oils_obj:fieldmapper="metabib::browse_series_entry_def_map" oils_persist:tablename="metabib.browse_series_entry_def_map" reporter:label="Combined Series Browse Entry Definition Map" oils_persist:readonly="true">
-               <fields oils_persist:primary="id" oils_persist:sequence="metabib.browse_series_entry_def_map_id_seq">
-                       <field name="id" reporter:datatype="id" />
-                       <field name="entry" reporter:datatype="link"/>
-                       <field name="def" reporter:datatype="link"/>
-                       <field name="source" reporter:datatype="link"/>
-               </fields>
-               <links>
-                       <link field="entry" reltype="has_a" key="id" map="" class="mbsee"/>
-                       <link field="def" reltype="has_a" key="id" map="" class="cmf"/>
-                       <link field="source" reltype="has_a" key="id" map="" class="bre"/>
-               </links>
-       </class>
-    <class id="mbeshm" controller="open-ils.cstore open-ils.pcrud" oils_obj:fieldmapper="metabib::browse_entry_simple_heading_map" oils_persist:tablename="metabib.browse_entry_simple_heading_map" reporter:label="Combined Browse Entry Simple Authority Heading Map" oils_persist:readonly="true">
+  <class id="mbeshm" controller="open-ils.cstore open-ils.pcrud" oils_obj:fieldmapper="metabib::browse_entry_simple_heading_map" oils_persist:tablename="metabib.browse_entry_simple_heading_map" reporter:label="Combined Browse Entry Simple Authority Heading Map" oils_persist:readonly="true">
         <fields oils_persist:primary="id" oils_persist:sequence="metabib.browse_entry_simple_heading_map_id_seq">
             <field name="id" reporter:datatype="id" />
             <field name="entry" reporter:datatype="link"/>
@@ -4216,43 +4147,10 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA
             <link field="simple_heading" reltype="has_a" key="id" map="" class="ash"/>
         </links>
     </class>
-       <class id="mbaeshm" controller="open-ils.cstore open-ils.pcrud" oils_obj:fieldmapper="metabib::browse_author_entry_simple_heading_map" oils_persist:tablename="metabib.browse_author_entry_simple_heading_map" reporter:label="Combined Author Browse Entry Simple Authority Heading Map" oils_persist:readonly="true">
-        <fields oils_persist:primary="id" oils_persist:sequence="metabib.browse_author_entry_simple_heading_map_id_seq">
-            <field name="id" reporter:datatype="id" />
-            <field name="entry" reporter:datatype="link"/>
-            <field name="simple_heading" reporter:datatype="link"/>
-        </fields>
-        <links>
-            <link field="entry" reltype="has_a" key="id" map="" class="mbae"/>
-            <link field="simple_heading" reltype="has_a" key="id" map="" class="ash"/>
-        </links>
-    </class>
-       <class id="mbseshm" controller="open-ils.cstore open-ils.pcrud" oils_obj:fieldmapper="metabib::browse_subject_entry_simple_heading_map" oils_persist:tablename="metabib.browse_subject_entry_simple_heading_map" reporter:label="Combined Subject Browse Entry Simple Authority Heading Map" oils_persist:readonly="true">
-        <fields oils_persist:primary="id" oils_persist:sequence="metabib.browse_subject_entry_simple_heading_map_id_seq">
-            <field name="id" reporter:datatype="id" />
-            <field name="entry" reporter:datatype="link"/>
-            <field name="simple_heading" reporter:datatype="link"/>
-        </fields>
-        <links>
-            <link field="entry" reltype="has_a" key="id" map="" class="mbse"/>
-            <link field="simple_heading" reltype="has_a" key="id" map="" class="ash"/>
-        </links>
-    </class>
-    <class id="mbseeshm" controller="open-ils.cstore open-ils.pcrud" oils_obj:fieldmapper="metabib::browse_series_entry_simple_heading_map" oils_persist:tablename="metabib.browse_series_entry_simple_heading_map" reporter:label="Combined Series Browse Entry Simple Authority Heading Map" oils_persist:readonly="true">
-        <fields oils_persist:primary="id" oils_persist:sequence="metabib.browse_series_entry_simple_heading_map_id_seq">
-            <field name="id" reporter:datatype="id" />
-            <field name="entry" reporter:datatype="link"/>
-            <field name="simple_heading" reporter:datatype="link"/>
-        </fields>
-        <links>
-            <link field="entry" reltype="has_a" key="id" map="" class="mbsee"/>
-            <link field="simple_heading" reltype="has_a" key="id" map="" class="ash"/>
-        </links>
-    </class>
-    <class id="rcbe" 
+    <class id="rcbed" 
       controller="open-ils.cstore open-ils.pcrud" 
-      oils_obj:fieldmapper="reporter::cataloged_browse_entry" 
-      oils_persist:tablename="reporter.cataloged_browse_entry"
+      oils_obj:fieldmapper="reporter::cataloged_browse_entry_detail
+      oils_persist:tablename="reporter.cataloged_browse_entry_detail"
       reporter:label="Cataloged Headings" 
       reporter:core="true" oils_persist:readonly="true">
       <fields>
index eee4db3..a4a314d 100644 (file)
@@ -65,7 +65,7 @@ sub browseSetNav {
 
     my $results = $e->json_query({
         from => [ "metabib.browse", $searchClass, $searchTerm, 
-            $locg, undef, $isStaffClient, $browseEntry, '3', $mattype ]
+            $locg, undef, $isStaffClient, $browseEntry, '3' ]# , $mattype ]
     });
 
     my $navResults = {};
index b0d653c..2a16058 100644 (file)
@@ -1350,18 +1350,13 @@ sub flatten {
                 $where .= "$NOT(" . $talias . ".id IS NOT NULL";
                 if (@{$node->phrases}) {
                     $where .= ' AND ' . join(' AND ', map {
-                        "${talias}.value ~* xml_encode_special_chars(search_normalize(".$self->QueryParser->quote_phrase_value($_, 1)."))"
+                        "${talias}.value ~* ".$self->QueryParser->quote_phrase_value($_, 1)
                     } @{$node->phrases});
-                } elsif (((@{$node->only_real_atoms}[0])->{content}) =~ /^\^/) { # matches exactly
-                    $where .= " AND ${talias}.value ilike xml_encode_special_chars(search_normalize(";
-                    my $phrase_list;
+                } else {
                     for my $atom (@{$node->only_real_atoms}) {
-                        my $content = ref($atom) ? $atom->{content} : $atom;
-                        $content =~ s/^\^//;
-                        $content =~ s/\$$//;
-                        $phrase_list .= "$content ";
+                        next unless $atom->{content} && $atom->{content} =~ /(^\^|\$$)/;
+                        $where .= " AND ${talias}.value ~* ".$self->QueryParser->quote_phrase_value($atom->{content});
                     }
-                    $where .= $self->QueryParser->quote_phrase_value($phrase_list).'))';
                 }
                 $where .= ')';
 
@@ -1665,20 +1660,10 @@ sub flatten {
             # Needs sub-casesses for title, author, subject, series, call number(id:bibcn)
 
             } elsif ($filter->name eq 'has_browse_entry') {
-                if (@{$filter->args} >= 3) {
-                    my $browseType = shift @{$filter->args};
+                if (@{$filter->args} >= 2) {
                     my $entry = int(shift @{$filter->args});
                     my $fields = join(",", map(int, @{$filter->args}));
-
-                    $browseType ||= "title";
-                    $browseType = "call_number"
-                        if $browseType eq "identifier#id|bibcn";
-
-                    $browseType = "title" unless
-                        $browseType =~ /title|author|subject|series|call_number/;
-
-                    my $defMap = "browse_${browseType}_entry_def_map";
-                    $from .= "\n" . $spc x 3 . sprintf("INNER JOIN metabib.$defMap mbedm ON (mbedm.source = m.source AND mbedm.entry = %d AND mbedm.def IN (%s))", $entry, $fields);
+                    $from .= "\n" . $spc x 3 . sprintf("INNER JOIN metabib.browse_entry_def_map mbedm ON (mbedm.source = m.source AND mbedm.entry = %d AND mbedm.def IN (%s))", $entry, $fields);
                 }
             } elsif ($filter->name eq 'edit_date' or $filter->name eq 'create_date') {
                 # bre.create_date and bre.edit_date filtering
@@ -2000,9 +1985,9 @@ sub only_real_atoms {
     my $atoms = $self->query_atoms;
     my @only_real_atoms;
     for my $a (@$atoms) {
-        push(@only_real_atoms, $a) if ((ref($a) && $a->isa('QueryParser::query_plan::node::atom') && !($a->{dummy})) ||
-                                        $self->plan->QueryParser->query =~ '^.*&.*$');
+        push(@only_real_atoms, $a) if (ref($a) && $a->isa('QueryParser::query_plan::node::atom') && !($a->{dummy}));
     }
+
     return \@only_real_atoms;
 }
 
index c3f0a59..b9fa7c3 100644 (file)
@@ -69,11 +69,14 @@ sub prepare_browse_parameters {
         int(
             $self->cgi->param('blimit') ||
             $self->ctx->{opac_hits_per_page} || 10
-        ),
-        # KCLS JBAS-1929
-        $self->cgi->param('fi:mattype') || undef
+        )
     );
 
+    # Append a mattype CCVM filter to the browse call.
+    if (my $mattype = $self->cgi->param('fi:mattype')) {
+        push(@params, 'mattype', $mattype);
+    }
+
     return (
         "oils_browse_" . md5_hex(OpenSRF::Utils::JSON->perl2JSON(\@params)),
         @params
@@ -670,93 +673,40 @@ sub get_browse_entry_marc {
 # record.  This is useful to know so we can filter out See Also references
 # for non-authoritative entries.
 sub is_not_authoritative {
-       my $self = shift;
-       my $id = shift;
+    my $self = shift;
+    my $id = shift;
                
-       my $result;
-       
-       if($self->cgi->param('qtype') eq 'author') {
-               $result = $self->editor->json_query({
-                       select => {
-                               acsaf => ["tag"],
-                               ash => ["record"]
-                       },
-                       from => {
-                               acsaf => {
-                                       ash => {
-                                               fkey => "id", field => "atag",
-                                               join => {
-                                                       mbaeshm => {
-                                                               fkey => "id", field => "simple_heading",
-                                                               join => {
-                                                                       mbae => {
-                                                                               fkey => "entry", field => "id"
-                                                                       }
-                                                               }
-                                                       }
-                                               }
-                                       }
-                               }
-                       },
-                       where => {"+mbae" => {id => $id}}
-               });
-       } elsif($self->cgi->param('qtype') eq 'subject') {
-               $result = $self->editor->json_query({
-                       select => {
-                               acsaf => ["tag"],
-                               ash => ["record"]
-                       },
-                       from => {
-                               acsaf => {
-                                       ash => {
-                                               fkey => "id", field => "atag",
-                                               join => {
-                                                       mbseshm => {
-                                                               fkey => "id", field => "simple_heading",
-                                                               join => {
-                                                                       mbse => {
-                                                                               fkey => "entry", field => "id"
-                                                                       }
-                                                               }
-                                                       }
-                                               }
-                                       }
-                               }
-                       },
-                       where => {"+mbse" => {id => $id}}
-               });
-       } elsif($self->cgi->param('qtype') eq 'series') {
-               $result = $self->editor->json_query({
-                       select => {
-                               acsaf => ["tag"],
-                               ash => ["record"]
-                       },
-                       from => {
-                               acsaf => {
-                                       ash => {
-                                               fkey => "id", field => "atag",
-                                               join => {
-                                                       mbseeshm => {
-                                                               fkey => "id", field => "simple_heading",
-                                                               join => {
-                                                                       mbsee => {
-                                                                               fkey => "entry", field => "id"
-                                                                       }
-                                                               }
-                                                       }
-                                               }
-                                       }
-                               }
-                       },
-                       where => {"+mbsee" => {id => $id}}
-               });
-       }
+    my $result = $self->editor->json_query({
+        select => {
+            acsaf => ["tag"],
+            ash => ["record"]
+        },
+        from => {
+            acsaf => {
+                ash => {
+                    fkey => "id", field => "atag",
+                    join => {
+                        mbeshm => {
+                            fkey => "id", field => "simple_heading",
+                            join => {
+                                mbe => {
+                                    fkey => "entry", field => "id"
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        },
+        where => {"+mbe" => {id => $id}}
+    });
 
        # If the result tag begins with a 4 we have an unauthorized heading so return true.
-       if($result->[0]{tag} =~ /^4/) {
-               return $result->[0]{record};
-       } 
-       return 0;
+    if ($result->[0] && $result->[0]{tag} =~ /^4/) {
+        return $result->[0]{record};
+    } 
+
+    return 0;
 }
 
 1;
index e353e4e..f18795f 100644 (file)
@@ -75,6 +75,7 @@ if(!dojo._hasResource["openils.CGI"]) {
             }
 
             if (key.length) {
+                key = decodeURIComponent(key);
                 if( ! this.data[key] ) this.data[key] = [];
                 this.data[key].push(decodeURIComponent(value));
                 this._keys.push(key);
index 47ce3c7..2df3b8f 100644 (file)
@@ -149,7 +149,7 @@ function load_headings(is_new, new_page) {
     /* cache 4 pages, plus 1 heading at a time */
     var fetch_count = page_size * 4 + 1;
 
-    pcrud.search('rcbe', compile_query_filter(), {
+    pcrud.search('rcbed', compile_query_filter(), {
         offset : page_offset,
         limit : fetch_count, 
         async : true,
index d335e9d..0ed4452 100644 (file)
@@ -4,43 +4,25 @@ function set_navigator () {
        dojo.require("fieldmapper.dojoData");
        dojo.require("fieldmapper.OrgUtils");
        dojo.require('openils.PermaCrud');
-       
-       var matches
-       var url = document.URL;
-       //get the following values to run metabib.browse
-       // - browse entry
-       var browseEntryRegex = /\%2C(\d+?)\%2C/;
-       matches = browseEntryRegex.exec(url);
-       var browseEntry = matches[1];
-       // - serch class
-       var searchClassRegex = /qtype\=([a-zA-Z]+?)\;/;
-       matches = searchClassRegex.exec(url);
+    dojo.require('openils.CGI');
 
-    // The URL includes the  pipe code, not the pipe actual and the
-    // regex won't match on it.  So we do the following...
-    var searchClass = 'id|bibcn';
+       var url = document.URL;
+    var cgi = new openils.CGI();
 
-    if (matches != null && matches.length > 1){
+    // metabib.browse_entry.id,config.metabib_field
+    var browseEntry = cgi.param('fi:has_browse_entry').split(',')[0];
 
-        searchClass = matches[1];
-    }
+       // - serch class
+    var searchClass = cgi.param('qtype');
 
     // JBAS-1929
-    var mattype = null;
-    var mattypeRegex = /fi%3Amattype=(\w)/; // %3A => :
-    var mattypeMatches = mattypeRegex.exec(url);
-    if (mattypeMatches && mattypeMatches.length > 1) {
-        mattype = mattypeMatches[1];
-    }
+    var mattype = cgi.param('fi:mattype');
 
        // - browse term
-       var searchTermRegex = /bterm\=(.+?)\;/;
-       matches = searchTermRegex.exec(url);
-       var searchTerm = matches[1];
+    var searchTerm = cgi.param('bterm');
+
        // - context_org (locg)
-       var locgRegex = /locg\=(\d+?)\;/;
-       matches = locgRegex.exec(url);
-       var locg = matches[1];
+    var locg = cgi.param('locg');
 
        var retrieve = ['open-ils.search', 'open-ils.search.metabib.browse.setnav'];
        var params = [ browseEntry, searchClass, searchTerm, locg, mattype ];
@@ -68,13 +50,26 @@ function set_navigator () {
                 //var previousUrl;
                 //var nextUrl;
 
+                /*
                 var previousUrl = url.replace(/(.*[a-z]+%2C)[0-9]+%2C[0-9]+(.*)/,
                         "$1" + results.previous_browse + '%2C'
                         + results.previous_field + "$2");
+                */
+
+                cgi.param('fi:has_browse_entry', 
+                    results.previous_browse + ',' + results.previous_field);
+                var previousUrl = cgi.url();
 
+                /*
                 var nextUrl = url.replace(/(.*[a-z]+%2C)[0-9]+%2C[0-9]+(.*)/,
                         "$1" + results.next_browse + '%2C'
                         + results.next_field + "$2");
+                */
+
+                cgi.param('fi:has_browse_entry', 
+                    results.next_browse + ',' + results.next_field);
+                var nextUrl = cgi.url();
+
 
                 //if (/.*qtype=id%7Cbibcn;.*/.test(url)) {
 
diff --git a/recover-stock-browse-db.sql b/recover-stock-browse-db.sql
new file mode 100644 (file)
index 0000000..bff1ed7
--- /dev/null
@@ -0,0 +1,741 @@
+
+BEGIN;
+
+ALTER TABLE metabib.browse_entry
+    ADD COLUMN create_date TIMESTAMP WITH TIME ZONE
+    NOT NULL DEFAULT NOW();
+
+-- function is unused
+DROP FUNCTION authority.unlinked_bibs_to_given_auth_text(TEXT, TEXT);
+
+DROP FUNCTION metabib.get_browse_author_entry_marc_record(BIGINT);
+DROP FUNCTION metabib.get_browse_series_entry_marc_record(BIGINT);
+DROP FUNCTION metabib.get_browse_subject_entry_marc_record(BIGINT);
+
+-- Retainn custom function for backwards compat until we reimplement the
+-- browse UI in Angular.  Called from Browse.pm
+CREATE OR REPLACE FUNCTION 
+    metabib.get_browse_entry_marc_record(browse_entry BIGINT, search_class TEXT)
+    RETURNS TEXT AS
+$FUNK$
+    SELECT are.marc
+    FROM authority.record_entry are
+    JOIN authority.simple_heading ash ON (are.id = ash.record)
+    JOIN metabib.browse_entry_simple_heading_map mbeshm 
+        ON (ash.id = mbeshm.simple_heading)
+    JOIN metabib.browse_entry mbe ON (mbeshm.entry = mbe.id)
+    JOIN authority.control_set_authority_field acsaf ON (ash.atag = acsaf.id)
+    WHERE mbe.id = browse_entry AND acsaf.tag ILIKE '1__';
+$FUNK$ LANGUAGE SQL STABLE;
+
+-- DROP CLASS-SPECIFIC FUNCTIONS
+
+DROP TABLE metabib.browse_title_entry_def_map;
+DROP TABLE metabib.browse_title_entry_simple_heading_map;
+DROP TABLE metabib.browse_title_entry;
+
+DROP TABLE metabib.browse_author_entry_def_map;
+DROP TABLE metabib.browse_author_entry_simple_heading_map;
+DROP TABLE metabib.browse_author_entry;
+
+DROP TABLE metabib.browse_subject_entry_def_map;
+DROP TABLE metabib.browse_subject_entry_simple_heading_map;
+DROP TABLE metabib.browse_subject_entry;
+
+DROP TABLE metabib.browse_series_entry_def_map;
+DROP TABLE metabib.browse_series_entry_simple_heading_map;
+DROP TABLE metabib.browse_series_entry;
+
+DROP TABLE metabib.browse_call_number_entry_def_map;
+DROP TABLE metabib.browse_call_number_entry;
+
+-- DROP LOCAL FUNCS TO ENSURE NONE STICK AROUND w/ DIFFERENT SIGNATURES
+
+DROP FUNCTION metabib.browse(TEXT, TEXT, INT, INT, BOOLEAN, BIGINT, INT);
+DROP FUNCTION metabib.browse(TEXT, TEXT, INT, INT, BOOLEAN, BIGINT, INT, TEXT);
+DROP FUNCTION metabib.staged_browse(TEXT, INT[], INT, INT[], BOOLEAN, INT, BOOLEAN, INT, INT);
+DROP FUNCTION metabib.staged_browse(TEXT, INT[], INT, INT[], BOOLEAN, INT, BOOLEAN, INT, INT, TEXT);
+DROP FUNCTION metabib.staged_browse(TEXT, INT[], INT, INT[], BOOLEAN, INT, BOOLEAN, INT, INT, TEXT, TEXT);
+
+-- RECOVER STOCK FUNCTIONS
+
+-- TODO add cmf for field_class=call_number search & browse
+
+CREATE OR REPLACE FUNCTION metabib.reingest_metabib_field_entries( bib_id BIGINT, skip_facet BOOL DEFAULT FALSE, skip_browse BOOL DEFAULT FALSE, skip_search BOOL DEFAULT FALSE ) RETURNS VOID AS $func$
+DECLARE
+    fclass          RECORD;
+    ind_data        metabib.field_entry_template%ROWTYPE;
+    mbe_row         metabib.browse_entry%ROWTYPE;
+    mbe_id          BIGINT;
+    b_skip_facet    BOOL;
+    b_skip_browse   BOOL;
+    b_skip_search   BOOL;
+    value_prepped   TEXT;
+BEGIN
+
+    SELECT COALESCE(NULLIF(skip_facet, FALSE), EXISTS (SELECT enabled FROM config.internal_flag WHERE name =  'ingest.skip_facet_indexing' AND enabled)) INTO b_skip_facet;
+    SELECT COALESCE(NULLIF(skip_browse, FALSE), EXISTS (SELECT enabled FROM config.internal_flag WHERE name =  'ingest.skip_browse_indexing' AND enabled)) INTO b_skip_browse;
+    SELECT COALESCE(NULLIF(skip_search, FALSE), EXISTS (SELECT enabled FROM config.internal_flag WHERE name =  'ingest.skip_search_indexing' AND enabled)) INTO b_skip_search;
+
+    PERFORM * FROM config.internal_flag WHERE name = 'ingest.assume_inserts_only' AND enabled;
+    IF NOT FOUND THEN
+        IF NOT b_skip_search THEN
+            FOR fclass IN SELECT * FROM config.metabib_class LOOP
+                -- RAISE NOTICE 'Emptying out %', fclass.name;
+                EXECUTE $$DELETE FROM metabib.$$ || fclass.name || $$_field_entry WHERE source = $$ || bib_id;
+            END LOOP;
+        END IF;
+        IF NOT b_skip_facet THEN
+            DELETE FROM metabib.facet_entry WHERE source = bib_id;
+        END IF;
+        IF NOT b_skip_browse THEN
+            DELETE FROM metabib.browse_entry_def_map WHERE source = bib_id;
+        END IF;
+    END IF;
+
+    FOR ind_data IN SELECT * FROM biblio.extract_metabib_field_entry( bib_id ) LOOP
+
+       -- don't store what has been normalized away
+        CONTINUE WHEN ind_data.value IS NULL;
+
+        IF ind_data.field < 0 THEN
+            ind_data.field = -1 * ind_data.field;
+        END IF;
+
+        IF ind_data.facet_field AND NOT b_skip_facet THEN
+            INSERT INTO metabib.facet_entry (field, source, value)
+                VALUES (ind_data.field, ind_data.source, ind_data.value);
+        END IF;
+
+        IF ind_data.browse_field AND NOT b_skip_browse THEN
+            -- A caveat about this SELECT: this should take care of replacing
+            -- old mbe rows when data changes, but not if normalization (by
+            -- which I mean specifically the output of
+            -- evergreen.oils_tsearch2()) changes.  It may or may not be
+            -- expensive to add a comparison of index_vector to index_vector
+            -- to the WHERE clause below.
+
+            CONTINUE WHEN ind_data.sort_value IS NULL;
+
+            value_prepped := metabib.browse_normalize(ind_data.value, ind_data.field);
+            SELECT INTO mbe_row * FROM metabib.browse_entry
+                WHERE value = value_prepped AND sort_value = ind_data.sort_value;
+
+            IF FOUND THEN
+                mbe_id := mbe_row.id;
+            ELSE
+                INSERT INTO metabib.browse_entry
+                    ( value, sort_value ) VALUES
+                    ( value_prepped, ind_data.sort_value );
+
+                mbe_id := CURRVAL('metabib.browse_entry_id_seq'::REGCLASS);
+            END IF;
+
+            INSERT INTO metabib.browse_entry_def_map (entry, def, source, authority)
+                VALUES (mbe_id, ind_data.field, ind_data.source, ind_data.authority);
+        END IF;
+
+        IF ind_data.search_field AND NOT b_skip_search THEN
+            -- Avoid inserting duplicate rows
+            EXECUTE 'SELECT 1 FROM metabib.' || ind_data.field_class ||
+                '_field_entry WHERE field = $1 AND source = $2 AND value = $3'
+                INTO mbe_id USING ind_data.field, ind_data.source, ind_data.value;
+                -- RAISE NOTICE 'Search for an already matching row returned %', mbe_id;
+            IF mbe_id IS NULL THEN
+                EXECUTE $$
+                INSERT INTO metabib.$$ || ind_data.field_class || $$_field_entry (field, source, value)
+                    VALUES ($$ ||
+                        quote_literal(ind_data.field) || $$, $$ ||
+                        quote_literal(ind_data.source) || $$, $$ ||
+                        quote_literal(ind_data.value) ||
+                    $$);$$;
+            END IF;
+        END IF;
+
+    END LOOP;
+
+    IF NOT b_skip_search THEN
+        PERFORM metabib.update_combined_index_vectors(bib_id);
+    END IF;
+
+    RETURN;
+END;
+$func$ LANGUAGE PLPGSQL;
+
+CREATE OR REPLACE FUNCTION metabib.staged_browse(
+    query                   TEXT,
+    fields                  INT[],
+    context_org             INT,
+    context_locations       INT[],
+    staff                   BOOL,
+    browse_superpage_size   INT,
+    count_up_from_zero      BOOL,   -- if false, count down from -1
+    result_limit            INT,
+    next_pivot_pos          INT
+) RETURNS SETOF metabib.flat_browse_entry_appearance AS $p$
+DECLARE
+    curs                    REFCURSOR;
+    rec                     RECORD;
+    qpfts_query             TEXT;
+    aqpfts_query            TEXT;
+    afields                 INT[];
+    bfields                 INT[];
+    result_row              metabib.flat_browse_entry_appearance%ROWTYPE;
+    results_skipped         INT := 0;
+    row_counter             INT := 0;
+    row_number              INT;
+    slice_start             INT;
+    slice_end               INT;
+    full_end                INT;
+    all_records             BIGINT[];
+    all_brecords             BIGINT[];
+    all_arecords            BIGINT[];
+    superpage_of_records    BIGINT[];
+    superpage_size          INT;
+BEGIN
+    IF count_up_from_zero THEN
+        row_number := 0;
+    ELSE
+        row_number := -1;
+    END IF;
+
+    OPEN curs FOR EXECUTE query;
+
+    LOOP
+        FETCH curs INTO rec;
+        IF NOT FOUND THEN
+            IF result_row.pivot_point IS NOT NULL THEN
+                RETURN NEXT result_row;
+            END IF;
+            RETURN;
+        END IF;
+
+
+        -- Gather aggregate data based on the MBE row we're looking at now, authority axis
+        SELECT INTO all_arecords, result_row.sees, afields
+                ARRAY_AGG(DISTINCT abl.bib), -- bibs to check for visibility
+                STRING_AGG(DISTINCT aal.source::TEXT, $$,$$), -- authority record ids
+                ARRAY_AGG(DISTINCT map.metabib_field) -- authority-tag-linked CMF rows
+
+          FROM  metabib.browse_entry_simple_heading_map mbeshm
+                JOIN authority.simple_heading ash ON ( mbeshm.simple_heading = ash.id )
+                JOIN authority.authority_linking aal ON ( ash.record = aal.source )
+                JOIN authority.bib_linking abl ON ( aal.target = abl.authority )
+                JOIN authority.control_set_auth_field_metabib_field_map_refs map ON (
+                    ash.atag = map.authority_field
+                    AND map.metabib_field = ANY(fields)
+                )
+          WHERE mbeshm.entry = rec.id;
+
+
+        -- Gather aggregate data based on the MBE row we're looking at now, bib axis
+        SELECT INTO all_brecords, result_row.authorities, bfields
+                ARRAY_AGG(DISTINCT source),
+                STRING_AGG(DISTINCT authority::TEXT, $$,$$),
+                ARRAY_AGG(DISTINCT def)
+          FROM  metabib.browse_entry_def_map
+          WHERE entry = rec.id
+                AND def = ANY(fields);
+
+        SELECT INTO result_row.fields STRING_AGG(DISTINCT x::TEXT, $$,$$) FROM UNNEST(afields || bfields) x;
+
+        result_row.sources := 0;
+        result_row.asources := 0;
+
+        -- Bib-linked vis checking
+        IF ARRAY_UPPER(all_brecords,1) IS NOT NULL THEN
+
+            full_end := ARRAY_LENGTH(all_brecords, 1);
+            superpage_size := COALESCE(browse_superpage_size, full_end);
+            slice_start := 1;
+            slice_end := superpage_size;
+
+            WHILE result_row.sources = 0 AND slice_start <= full_end LOOP
+                superpage_of_records := all_brecords[slice_start:slice_end];
+                qpfts_query :=
+                    'SELECT NULL::BIGINT AS id, ARRAY[r] AS records, ' ||
+                    'NULL AS badges, NULL::NUMERIC AS popularity, ' ||
+                    '1::NUMERIC AS rel FROM (SELECT UNNEST(' ||
+                    quote_literal(superpage_of_records) || '::BIGINT[]) AS r) rr';
+
+                -- We use search.query_parser_fts() for visibility testing.
+                -- We're calling it once per browse-superpage worth of records
+                -- out of the set of records related to a given mbe, until we've
+                -- either exhausted that set of records or found at least 1
+                -- visible record.
+
+                SELECT INTO result_row.sources visible
+                    FROM search.query_parser_fts(
+                        context_org, NULL, qpfts_query, NULL,
+                        context_locations, 0, NULL, NULL, FALSE, staff, FALSE
+                    ) qpfts
+                    WHERE qpfts.rel IS NULL;
+
+                slice_start := slice_start + superpage_size;
+                slice_end := slice_end + superpage_size;
+            END LOOP;
+
+            -- Accurate?  Well, probably.
+            result_row.accurate := browse_superpage_size IS NULL OR
+                browse_superpage_size >= full_end;
+
+        END IF;
+
+        -- Authority-linked vis checking
+        IF ARRAY_UPPER(all_arecords,1) IS NOT NULL THEN
+
+            full_end := ARRAY_LENGTH(all_arecords, 1);
+            superpage_size := COALESCE(browse_superpage_size, full_end);
+            slice_start := 1;
+            slice_end := superpage_size;
+
+            WHILE result_row.asources = 0 AND slice_start <= full_end LOOP
+                superpage_of_records := all_arecords[slice_start:slice_end];
+                qpfts_query :=
+                    'SELECT NULL::BIGINT AS id, ARRAY[r] AS records, ' ||
+                    'NULL AS badges, NULL::NUMERIC AS popularity, ' ||
+                    '1::NUMERIC AS rel FROM (SELECT UNNEST(' ||
+                    quote_literal(superpage_of_records) || '::BIGINT[]) AS r) rr';
+
+                -- We use search.query_parser_fts() for visibility testing.
+                -- We're calling it once per browse-superpage worth of records
+                -- out of the set of records related to a given mbe, via
+                -- authority until we've either exhausted that set of records
+                -- or found at least 1 visible record.
+
+                SELECT INTO result_row.asources visible
+                    FROM search.query_parser_fts(
+                        context_org, NULL, qpfts_query, NULL,
+                        context_locations, 0, NULL, NULL, FALSE, staff, FALSE
+                    ) qpfts
+                    WHERE qpfts.rel IS NULL;
+
+                slice_start := slice_start + superpage_size;
+                slice_end := slice_end + superpage_size;
+            END LOOP;
+
+
+            -- Accurate?  Well, probably.
+            result_row.aaccurate := browse_superpage_size IS NULL OR
+                browse_superpage_size >= full_end;
+
+        END IF;
+
+        IF result_row.sources > 0 OR result_row.asources > 0 THEN
+
+            -- The function that calls this function needs row_number in order
+            -- to correctly order results from two different runs of this
+            -- functions.
+            result_row.row_number := row_number;
+
+            -- Now, if row_counter is still less than limit, return a row.  If
+            -- not, but it is less than next_pivot_pos, continue on without
+            -- returning actual result rows until we find
+            -- that next pivot, and return it.
+
+            IF row_counter < result_limit THEN
+                result_row.browse_entry := rec.id;
+                result_row.value := rec.value;
+
+                RETURN NEXT result_row;
+            ELSE
+                result_row.browse_entry := NULL;
+                result_row.authorities := NULL;
+                result_row.fields := NULL;
+                result_row.value := NULL;
+                result_row.sources := NULL;
+                result_row.sees := NULL;
+                result_row.accurate := NULL;
+                result_row.aaccurate := NULL;
+                result_row.pivot_point := rec.id;
+
+                IF row_counter >= next_pivot_pos THEN
+                    RETURN NEXT result_row;
+                    RETURN;
+                END IF;
+            END IF;
+
+            IF count_up_from_zero THEN
+                row_number := row_number + 1;
+            ELSE
+                row_number := row_number - 1;
+            END IF;
+
+            -- row_counter is different from row_number.
+            -- It simply counts up from zero so that we know when
+            -- we've reached our limit.
+            row_counter := row_counter + 1;
+        END IF;
+    END LOOP;
+END;
+$p$ LANGUAGE PLPGSQL;
+
+
+CREATE OR REPLACE FUNCTION metabib.browse(
+    search_field            INT[],
+    browse_term             TEXT,
+    context_org             INT DEFAULT NULL,
+    context_loc_group       INT DEFAULT NULL,
+    staff                   BOOL DEFAULT FALSE,
+    pivot_id                BIGINT DEFAULT NULL,
+    result_limit            INT DEFAULT 10
+) RETURNS SETOF metabib.flat_browse_entry_appearance AS $p$
+DECLARE
+    core_query              TEXT;
+    back_query              TEXT;
+    forward_query           TEXT;
+    pivot_sort_value        TEXT;
+    pivot_sort_fallback     TEXT;
+    context_locations       INT[];
+    browse_superpage_size   INT;
+    results_skipped         INT := 0;
+    back_limit              INT;
+    back_to_pivot           INT;
+    forward_limit           INT;
+    forward_to_pivot        INT;
+BEGIN
+    -- First, find the pivot if we were given a browse term but not a pivot.
+    IF pivot_id IS NULL THEN
+        pivot_id := metabib.browse_pivot(search_field, browse_term);
+    END IF;
+
+    SELECT INTO pivot_sort_value, pivot_sort_fallback
+        sort_value, value FROM metabib.browse_entry WHERE id = pivot_id;
+
+    -- Bail if we couldn't find a pivot.
+    IF pivot_sort_value IS NULL THEN
+        RETURN;
+    END IF;
+
+    -- Transform the context_loc_group argument (if any) (logc at the
+    -- TPAC layer) into a form we'll be able to use.
+    IF context_loc_group IS NOT NULL THEN
+        SELECT INTO context_locations ARRAY_AGG(location)
+            FROM asset.copy_location_group_map
+            WHERE lgroup = context_loc_group;
+    END IF;
+
+    -- Get the configured size of browse superpages.
+    SELECT INTO browse_superpage_size value     -- NULL ok
+        FROM config.global_flag
+        WHERE enabled AND name = 'opac.browse.holdings_visibility_test_limit';
+
+    -- First we're going to search backward from the pivot, then we're going
+    -- to search forward.  In each direction, we need two limits.  At the
+    -- lesser of the two limits, we delineate the edge of the result set
+    -- we're going to return.  At the greater of the two limits, we find the
+    -- pivot value that would represent an offset from the current pivot
+    -- at a distance of one "page" in either direction, where a "page" is a
+    -- result set of the size specified in the "result_limit" argument.
+    --
+    -- The two limits in each direction make four derived values in total,
+    -- and we calculate them now.
+    back_limit := CEIL(result_limit::FLOAT / 2);
+    back_to_pivot := result_limit;
+    forward_limit := result_limit / 2;
+    forward_to_pivot := result_limit - 1;
+
+    -- This is the meat of the SQL query that finds browse entries.  We'll
+    -- pass this to a function which uses it with a cursor, so that individual
+    -- rows may be fetched in a loop until some condition is satisfied, without
+    -- waiting for a result set of fixed size to be collected all at once.
+    core_query := '
+SELECT  mbe.id,
+        mbe.value,
+        mbe.sort_value
+  FROM  metabib.browse_entry mbe
+  WHERE (
+            EXISTS ( -- are there any bibs using this mbe via the requested fields?
+                SELECT  1
+                  FROM  metabib.browse_entry_def_map mbedm
+                  WHERE mbedm.entry = mbe.id AND mbedm.def = ANY(' || quote_literal(search_field) || ')
+                  LIMIT 1
+            ) OR EXISTS ( -- are there any authorities using this mbe via the requested fields?
+                SELECT  1
+                  FROM  metabib.browse_entry_simple_heading_map mbeshm
+                        JOIN authority.simple_heading ash ON ( mbeshm.simple_heading = ash.id )
+                        JOIN authority.control_set_auth_field_metabib_field_map_refs map ON (
+                            ash.atag = map.authority_field
+                            AND map.metabib_field = ANY(' || quote_literal(search_field) || ')
+                        )
+                  WHERE mbeshm.entry = mbe.id
+            )
+        ) AND ';
+
+    -- This is the variant of the query for browsing backward.
+    back_query := core_query ||
+        ' mbe.sort_value <= ' || quote_literal(pivot_sort_value) ||
+    ' ORDER BY mbe.sort_value DESC, mbe.value DESC ';
+
+    -- This variant browses forward.
+    forward_query := core_query ||
+        ' mbe.sort_value > ' || quote_literal(pivot_sort_value) ||
+    ' ORDER BY mbe.sort_value, mbe.value ';
+
+    -- We now call the function which applies a cursor to the provided
+    -- queries, stopping at the appropriate limits and also giving us
+    -- the next page's pivot.
+    RETURN QUERY
+        SELECT * FROM metabib.staged_browse(
+            back_query, search_field, context_org, context_locations,
+            staff, browse_superpage_size, TRUE, back_limit, back_to_pivot
+        ) UNION
+        SELECT * FROM metabib.staged_browse(
+            forward_query, search_field, context_org, context_locations,
+            staff, browse_superpage_size, FALSE, forward_limit, forward_to_pivot
+        ) ORDER BY row_number DESC;
+
+END;
+$p$ LANGUAGE PLPGSQL;
+
+CREATE OR REPLACE FUNCTION metabib.browse(
+    search_class        TEXT,
+    browse_term         TEXT,
+    context_org         INT DEFAULT NULL,
+    context_loc_group   INT DEFAULT NULL,
+    staff               BOOL DEFAULT FALSE,
+    pivot_id            BIGINT DEFAULT NULL,
+    result_limit        INT DEFAULT 10
+) RETURNS SETOF metabib.flat_browse_entry_appearance AS $p$
+BEGIN
+    RETURN QUERY SELECT * FROM metabib.browse(
+        (SELECT COALESCE(ARRAY_AGG(id), ARRAY[]::INT[])
+            FROM config.metabib_field WHERE field_class = search_class),
+        browse_term,
+        context_org,
+        context_loc_group,
+        staff,
+        pivot_id,
+        result_limit
+    );
+END;
+$p$ LANGUAGE PLPGSQL;
+
+
+CREATE OR REPLACE FUNCTION metabib.remap_metarecord_for_bib( bib_id BIGINT, fp TEXT, bib_is_deleted BOOL DEFAULT FALSE, retain_deleted BOOL DEFAULT FALSE ) RETURNS BIGINT AS $func$
+DECLARE
+    new_mapping     BOOL := TRUE;
+    source_count    INT;
+    old_mr          BIGINT;
+    tmp_mr          metabib.metarecord%ROWTYPE;
+    deleted_mrs     BIGINT[];
+BEGIN
+
+    -- We need to make sure we're not a deleted master record of an MR
+    IF bib_is_deleted THEN
+        FOR old_mr IN SELECT id FROM metabib.metarecord WHERE master_record = bib_id LOOP
+
+            IF NOT retain_deleted THEN -- Go away for any MR that we're master of, unless retained
+                DELETE FROM metabib.metarecord_source_map WHERE source = bib_id;
+            END IF;
+
+            -- Now, are there any more sources on this MR?
+            SELECT COUNT(*) INTO source_count FROM metabib.metarecord_source_map WHERE metarecord = old_mr;
+
+            IF source_count = 0 AND NOT retain_deleted THEN -- No other records
+                deleted_mrs := ARRAY_APPEND(deleted_mrs, old_mr); -- Just in case...
+                DELETE FROM metabib.metarecord WHERE id = old_mr;
+
+            ELSE -- indeed there are. Update it with a null cache and recalcualated master record
+                UPDATE  metabib.metarecord
+                  SET   mods = NULL,
+                        master_record = ( SELECT id FROM biblio.record_entry WHERE fingerprint = fp AND NOT deleted ORDER BY quality DESC LIMIT 1)
+                  WHERE id = old_mr;
+            END IF;
+        END LOOP;
+
+    ELSE -- insert or update
+
+        FOR tmp_mr IN SELECT m.* FROM metabib.metarecord m JOIN metabib.metarecord_source_map s ON (s.metarecord = m.id) WHERE s.source = bib_id LOOP
+
+            -- Find the first fingerprint-matching
+            IF old_mr IS NULL AND fp = tmp_mr.fingerprint THEN
+                old_mr := tmp_mr.id;
+                new_mapping := FALSE;
+
+            ELSE -- Our fingerprint changed ... maybe remove the old MR
+                DELETE FROM metabib.metarecord_source_map WHERE metarecord = tmp_mr.id AND source = bib_id; -- remove the old source mapping
+                SELECT COUNT(*) INTO source_count FROM metabib.metarecord_source_map WHERE metarecord = tmp_mr.id;
+                IF source_count = 0 THEN -- No other records
+                    deleted_mrs := ARRAY_APPEND(deleted_mrs, tmp_mr.id);
+                    DELETE FROM metabib.metarecord WHERE id = tmp_mr.id;
+                END IF;
+            END IF;
+
+        END LOOP;
+
+        -- we found no suitable, preexisting MR based on old source maps
+        IF old_mr IS NULL THEN
+            SELECT id INTO old_mr FROM metabib.metarecord WHERE fingerprint = fp; -- is there one for our current fingerprint?
+
+            IF old_mr IS NULL THEN -- nope, create one and grab its id
+                INSERT INTO metabib.metarecord ( fingerprint, master_record ) VALUES ( fp, bib_id );
+                SELECT id INTO old_mr FROM metabib.metarecord WHERE fingerprint = fp;
+
+            ELSE -- indeed there is. update it with a null cache and recalcualated master record
+                UPDATE  metabib.metarecord
+                  SET   mods = NULL,
+                        master_record = ( SELECT id FROM biblio.record_entry WHERE fingerprint = fp AND NOT deleted ORDER BY quality DESC LIMIT 1)
+                  WHERE id = old_mr;
+            END IF;
+
+        ELSE -- there was one we already attached to, update its mods cache and master_record
+            UPDATE  metabib.metarecord
+              SET   mods = NULL,
+                    master_record = ( SELECT id FROM biblio.record_entry WHERE fingerprint = fp AND NOT deleted ORDER BY quality DESC LIMIT 1)
+              WHERE id = old_mr;
+        END IF;
+
+        IF new_mapping THEN
+            INSERT INTO metabib.metarecord_source_map (metarecord, source) VALUES (old_mr, bib_id); -- new source mapping
+        END IF;
+
+    END IF;
+
+    IF ARRAY_UPPER(deleted_mrs,1) > 0 THEN
+        UPDATE action.hold_request SET target = old_mr WHERE target IN ( SELECT unnest(deleted_mrs) ) AND hold_type = 'M'; -- if we had to delete any MRs above, make sure their holds are moved
+    END IF;
+
+    RETURN old_mr;
+
+END;
+$func$ LANGUAGE PLPGSQL;
+
+
+
+-- AFTER UPDATE OR INSERT trigger for authority.record_entry
+CREATE OR REPLACE FUNCTION authority.indexing_ingest_or_delete () RETURNS TRIGGER AS $func$
+DECLARE
+    ashs    authority.simple_heading%ROWTYPE;
+    mbe_row metabib.browse_entry%ROWTYPE;
+    mbe_id  BIGINT;
+    ash_id  BIGINT;
+BEGIN
+
+    IF NEW.deleted IS TRUE THEN -- If this authority is deleted
+        DELETE FROM authority.bib_linking WHERE authority = NEW.id; -- Avoid updating fields in bibs that are no longer visible
+        DELETE FROM authority.full_rec WHERE record = NEW.id; -- Avoid validating fields against deleted authority records
+        DELETE FROM authority.simple_heading WHERE record = NEW.id;
+          -- Should remove matching $0 from controlled fields at the same time?
+
+        -- XXX What do we about the actual linking subfields present in
+        -- authority records that target this one when this happens?
+        DELETE FROM authority.authority_linking
+            WHERE source = NEW.id OR target = NEW.id;
+
+        RETURN NEW; -- and we're done
+    END IF;
+
+    IF TG_OP = 'UPDATE' THEN -- re-ingest?
+        PERFORM * FROM config.internal_flag WHERE name = 'ingest.reingest.force_on_same_marc' AND enabled;
+
+        IF NOT FOUND AND OLD.marc = NEW.marc THEN -- don't do anything if the MARC didn't change
+            RETURN NEW;
+        END IF;
+
+        -- Unless there's a setting stopping us, propagate these updates to any linked bib records when the heading changes
+        PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_authority_auto_update' AND enabled;
+
+        IF NOT FOUND AND NEW.heading <> OLD.heading THEN
+            PERFORM authority.propagate_changes(NEW.id);
+        END IF;
+       
+        DELETE FROM authority.simple_heading WHERE record = NEW.id;
+        DELETE FROM authority.authority_linking WHERE source = NEW.id;
+    END IF;
+
+    INSERT INTO authority.authority_linking (source, target, field)
+        SELECT source, target, field FROM authority.calculate_authority_linking(
+            NEW.id, NEW.control_set, NEW.marc::XML
+        );
+
+    FOR ashs IN SELECT * FROM authority.simple_heading_set(NEW.marc) LOOP
+
+        INSERT INTO authority.simple_heading (record,atag,value,sort_value,thesaurus)
+            VALUES (ashs.record, ashs.atag, ashs.value, ashs.sort_value, ashs.thesaurus);
+            ash_id := CURRVAL('authority.simple_heading_id_seq'::REGCLASS);
+
+        SELECT INTO mbe_row * FROM metabib.browse_entry
+            WHERE value = ashs.value AND sort_value = ashs.sort_value;
+
+        IF FOUND THEN
+            mbe_id := mbe_row.id;
+        ELSE
+            INSERT INTO metabib.browse_entry
+                ( value, sort_value ) VALUES
+                ( ashs.value, ashs.sort_value );
+
+            mbe_id := CURRVAL('metabib.browse_entry_id_seq'::REGCLASS);
+        END IF;
+
+        INSERT INTO metabib.browse_entry_simple_heading_map (entry,simple_heading) VALUES (mbe_id,ash_id);
+
+    END LOOP;
+
+    -- Flatten and insert the afr data
+    PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_authority_full_rec' AND enabled;
+    IF NOT FOUND THEN
+        PERFORM authority.reingest_authority_full_rec(NEW.id);
+        PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_authority_rec_descriptor' AND enabled;
+        IF NOT FOUND THEN
+            PERFORM authority.reingest_authority_rec_descriptor(NEW.id);
+        END IF;
+    END IF;
+
+    RETURN NEW;
+END;
+$func$ LANGUAGE PLPGSQL;
+
+
+-- DROP UNNEEDED LOCAL FUNCTIONS
+DROP FUNCTION IF EXISTS metabib.browse_title_authority_refs_pivot(INTEGER[], TEXT);
+DROP FUNCTION IF EXISTS metabib.browse_title_bib_pivot(INTEGER[], TEXT);
+DROP FUNCTION IF EXISTS metabib.browse_title_pivot(INTEGER[], TEXT);
+
+DROP FUNCTION IF EXISTS metabib.browse_author_authority_refs_pivot(INTEGER[], TEXT);
+DROP FUNCTION IF EXISTS metabib.browse_author_bib_pivot(INTEGER[], TEXT);
+DROP FUNCTION IF EXISTS metabib.browse_author_pivot(INTEGER[], TEXT);
+
+DROP FUNCTION IF EXISTS metabib.browse_subject_authority_refs_pivot(INTEGER[], TEXT);
+DROP FUNCTION IF EXISTS metabib.browse_subject_bib_pivot(INTEGER[], TEXT);
+DROP FUNCTION IF EXISTS metabib.browse_subject_pivot(INTEGER[], TEXT);
+
+DROP FUNCTION IF EXISTS metabib.browse_series_authority_refs_pivot(INTEGER[], TEXT);
+DROP FUNCTION IF EXISTS metabib.browse_series_bib_pivot(INTEGER[], TEXT);
+DROP FUNCTION IF EXISTS metabib.browse_series_pivot(INTEGER[], TEXT);
+
+--ROLLBACK;
+COMMIT;
+
+-- Authority record partial re-ingest
+
+BEGIN;
+
+UPDATE config.internal_flag SET enabled = TRUE 
+    WHERE name = 'ingest.reingest.force_on_same_marc';
+
+UPDATE config.internal_flag SET enabled = FALSE
+    WHERE name = 'ingest.disable_authority_auto_update';
+
+INSERT INTO config.internal_flag (name, enabled) 
+    VALUES ('ingest.disable_authority_full_rec', TRUE);
+
+COMMIT;
+
+-- avoid unnecessary transaction here.
+UPDATE authority.record_entry SET marc = marc; 
+
+BEGIN;
+
+UPDATE config.internal_flag SET enabled = FALSE
+    WHERE name = 'ingest.reingest.force_on_same_marc';
+
+UPDATE config.internal_flag SET enabled = TRUE
+    WHERE name = 'ingest.disable_authority_auto_update';
+
+DELETE FROM config.internal_flag 
+    WHERE name = 'ingest.disable_authority_full_rec';
+
+COMMIT;
+
+