+++ /dev/null
-
-BEGIN;
-
-ALTER TABLE metabib.browse_entry
- ADD COLUMN create_date TIMESTAMP WITH TIME ZONE
- NOT NULL DEFAULT NOW();
-
--- function is unused
-DROP FUNCTION authority.unlinked_bibs_to_given_auth_text(TEXT, TEXT);
-
-DROP FUNCTION metabib.get_browse_author_entry_marc_record(BIGINT);
-DROP FUNCTION metabib.get_browse_series_entry_marc_record(BIGINT);
-DROP FUNCTION metabib.get_browse_subject_entry_marc_record(BIGINT);
-
--- Retainn custom function for backwards compat until we reimplement the
--- browse UI in Angular. Called from Browse.pm
-CREATE OR REPLACE FUNCTION
- metabib.get_browse_entry_marc_record(browse_entry BIGINT, search_class TEXT)
- RETURNS TEXT AS
-$FUNK$
- SELECT are.marc
- FROM authority.record_entry are
- JOIN authority.simple_heading ash ON (are.id = ash.record)
- JOIN metabib.browse_entry_simple_heading_map mbeshm
- ON (ash.id = mbeshm.simple_heading)
- JOIN metabib.browse_entry mbe ON (mbeshm.entry = mbe.id)
- JOIN authority.control_set_authority_field acsaf ON (ash.atag = acsaf.id)
- WHERE mbe.id = browse_entry AND acsaf.tag ILIKE '1__';
-$FUNK$ LANGUAGE SQL STABLE;
-
--- DROP CLASS-SPECIFIC FUNCTIONS
-
-DROP TABLE metabib.browse_title_entry_def_map;
-DROP TABLE metabib.browse_title_entry_simple_heading_map;
-DROP TABLE metabib.browse_title_entry;
-
-DROP TABLE metabib.browse_author_entry_def_map;
-DROP TABLE metabib.browse_author_entry_simple_heading_map;
-DROP TABLE metabib.browse_author_entry;
-
-DROP TABLE metabib.browse_subject_entry_def_map;
-DROP TABLE metabib.browse_subject_entry_simple_heading_map;
-DROP TABLE metabib.browse_subject_entry;
-
-DROP TABLE metabib.browse_series_entry_def_map;
-DROP TABLE metabib.browse_series_entry_simple_heading_map;
-DROP TABLE metabib.browse_series_entry;
-
-DROP TABLE metabib.browse_call_number_entry_def_map;
-DROP TABLE metabib.browse_call_number_entry;
-
--- DROP LOCAL FUNCS TO ENSURE NONE STICK AROUND w/ DIFFERENT SIGNATURES
-
-DROP FUNCTION metabib.browse(TEXT, TEXT, INT, INT, BOOLEAN, BIGINT, INT);
-DROP FUNCTION metabib.browse(TEXT, TEXT, INT, INT, BOOLEAN, BIGINT, INT, TEXT);
-DROP FUNCTION metabib.staged_browse(TEXT, INT[], INT, INT[], BOOLEAN, INT, BOOLEAN, INT, INT);
-DROP FUNCTION metabib.staged_browse(TEXT, INT[], INT, INT[], BOOLEAN, INT, BOOLEAN, INT, INT, TEXT);
-DROP FUNCTION metabib.staged_browse(TEXT, INT[], INT, INT[], BOOLEAN, INT, BOOLEAN, INT, INT, TEXT, TEXT);
-
--- RECOVER STOCK FUNCTIONS
-
--- TODO add cmf for field_class=call_number search & browse
-
-CREATE OR REPLACE FUNCTION metabib.reingest_metabib_field_entries( bib_id BIGINT, skip_facet BOOL DEFAULT FALSE, skip_browse BOOL DEFAULT FALSE, skip_search BOOL DEFAULT FALSE ) RETURNS VOID AS $func$
-DECLARE
- fclass RECORD;
- ind_data metabib.field_entry_template%ROWTYPE;
- mbe_row metabib.browse_entry%ROWTYPE;
- mbe_id BIGINT;
- b_skip_facet BOOL;
- b_skip_browse BOOL;
- b_skip_search BOOL;
- value_prepped TEXT;
-BEGIN
-
- SELECT COALESCE(NULLIF(skip_facet, FALSE), EXISTS (SELECT enabled FROM config.internal_flag WHERE name = 'ingest.skip_facet_indexing' AND enabled)) INTO b_skip_facet;
- SELECT COALESCE(NULLIF(skip_browse, FALSE), EXISTS (SELECT enabled FROM config.internal_flag WHERE name = 'ingest.skip_browse_indexing' AND enabled)) INTO b_skip_browse;
- SELECT COALESCE(NULLIF(skip_search, FALSE), EXISTS (SELECT enabled FROM config.internal_flag WHERE name = 'ingest.skip_search_indexing' AND enabled)) INTO b_skip_search;
-
- PERFORM * FROM config.internal_flag WHERE name = 'ingest.assume_inserts_only' AND enabled;
- IF NOT FOUND THEN
- IF NOT b_skip_search THEN
- FOR fclass IN SELECT * FROM config.metabib_class LOOP
- -- RAISE NOTICE 'Emptying out %', fclass.name;
- EXECUTE $$DELETE FROM metabib.$$ || fclass.name || $$_field_entry WHERE source = $$ || bib_id;
- END LOOP;
- END IF;
- IF NOT b_skip_facet THEN
- DELETE FROM metabib.facet_entry WHERE source = bib_id;
- END IF;
- IF NOT b_skip_browse THEN
- DELETE FROM metabib.browse_entry_def_map WHERE source = bib_id;
- END IF;
- END IF;
-
- FOR ind_data IN SELECT * FROM biblio.extract_metabib_field_entry( bib_id ) LOOP
-
- -- don't store what has been normalized away
- CONTINUE WHEN ind_data.value IS NULL;
-
- IF ind_data.field < 0 THEN
- ind_data.field = -1 * ind_data.field;
- END IF;
-
- IF ind_data.facet_field AND NOT b_skip_facet THEN
- INSERT INTO metabib.facet_entry (field, source, value)
- VALUES (ind_data.field, ind_data.source, ind_data.value);
- END IF;
-
- IF ind_data.browse_field AND NOT b_skip_browse THEN
- -- A caveat about this SELECT: this should take care of replacing
- -- old mbe rows when data changes, but not if normalization (by
- -- which I mean specifically the output of
- -- evergreen.oils_tsearch2()) changes. It may or may not be
- -- expensive to add a comparison of index_vector to index_vector
- -- to the WHERE clause below.
-
- CONTINUE WHEN ind_data.sort_value IS NULL;
-
- value_prepped := metabib.browse_normalize(ind_data.value, ind_data.field);
- SELECT INTO mbe_row * FROM metabib.browse_entry
- WHERE value = value_prepped AND sort_value = ind_data.sort_value;
-
- IF FOUND THEN
- mbe_id := mbe_row.id;
- ELSE
- INSERT INTO metabib.browse_entry
- ( value, sort_value ) VALUES
- ( value_prepped, ind_data.sort_value );
-
- mbe_id := CURRVAL('metabib.browse_entry_id_seq'::REGCLASS);
- END IF;
-
- INSERT INTO metabib.browse_entry_def_map (entry, def, source, authority)
- VALUES (mbe_id, ind_data.field, ind_data.source, ind_data.authority);
- END IF;
-
- IF ind_data.search_field AND NOT b_skip_search THEN
- -- Avoid inserting duplicate rows
- EXECUTE 'SELECT 1 FROM metabib.' || ind_data.field_class ||
- '_field_entry WHERE field = $1 AND source = $2 AND value = $3'
- INTO mbe_id USING ind_data.field, ind_data.source, ind_data.value;
- -- RAISE NOTICE 'Search for an already matching row returned %', mbe_id;
- IF mbe_id IS NULL THEN
- EXECUTE $$
- INSERT INTO metabib.$$ || ind_data.field_class || $$_field_entry (field, source, value)
- VALUES ($$ ||
- quote_literal(ind_data.field) || $$, $$ ||
- quote_literal(ind_data.source) || $$, $$ ||
- quote_literal(ind_data.value) ||
- $$);$$;
- END IF;
- END IF;
-
- END LOOP;
-
- IF NOT b_skip_search THEN
- PERFORM metabib.update_combined_index_vectors(bib_id);
- END IF;
-
- RETURN;
-END;
-$func$ LANGUAGE PLPGSQL;
-
-CREATE OR REPLACE FUNCTION metabib.staged_browse(
- query TEXT,
- fields INT[],
- context_org INT,
- context_locations INT[],
- staff BOOL,
- browse_superpage_size INT,
- count_up_from_zero BOOL, -- if false, count down from -1
- result_limit INT,
- next_pivot_pos INT
-) RETURNS SETOF metabib.flat_browse_entry_appearance AS $p$
-DECLARE
- curs REFCURSOR;
- rec RECORD;
- qpfts_query TEXT;
- aqpfts_query TEXT;
- afields INT[];
- bfields INT[];
- result_row metabib.flat_browse_entry_appearance%ROWTYPE;
- results_skipped INT := 0;
- row_counter INT := 0;
- row_number INT;
- slice_start INT;
- slice_end INT;
- full_end INT;
- all_records BIGINT[];
- all_brecords BIGINT[];
- all_arecords BIGINT[];
- superpage_of_records BIGINT[];
- superpage_size INT;
-BEGIN
- IF count_up_from_zero THEN
- row_number := 0;
- ELSE
- row_number := -1;
- END IF;
-
- OPEN curs FOR EXECUTE query;
-
- LOOP
- FETCH curs INTO rec;
- IF NOT FOUND THEN
- IF result_row.pivot_point IS NOT NULL THEN
- RETURN NEXT result_row;
- END IF;
- RETURN;
- END IF;
-
-
- -- Gather aggregate data based on the MBE row we're looking at now, authority axis
- SELECT INTO all_arecords, result_row.sees, afields
- ARRAY_AGG(DISTINCT abl.bib), -- bibs to check for visibility
- STRING_AGG(DISTINCT aal.source::TEXT, $$,$$), -- authority record ids
- ARRAY_AGG(DISTINCT map.metabib_field) -- authority-tag-linked CMF rows
-
- FROM metabib.browse_entry_simple_heading_map mbeshm
- JOIN authority.simple_heading ash ON ( mbeshm.simple_heading = ash.id )
- JOIN authority.authority_linking aal ON ( ash.record = aal.source )
- JOIN authority.bib_linking abl ON ( aal.target = abl.authority )
- JOIN authority.control_set_auth_field_metabib_field_map_refs map ON (
- ash.atag = map.authority_field
- AND map.metabib_field = ANY(fields)
- )
- WHERE mbeshm.entry = rec.id;
-
-
- -- Gather aggregate data based on the MBE row we're looking at now, bib axis
- SELECT INTO all_brecords, result_row.authorities, bfields
- ARRAY_AGG(DISTINCT source),
- STRING_AGG(DISTINCT authority::TEXT, $$,$$),
- ARRAY_AGG(DISTINCT def)
- FROM metabib.browse_entry_def_map
- WHERE entry = rec.id
- AND def = ANY(fields);
-
- SELECT INTO result_row.fields STRING_AGG(DISTINCT x::TEXT, $$,$$) FROM UNNEST(afields || bfields) x;
-
- result_row.sources := 0;
- result_row.asources := 0;
-
- -- Bib-linked vis checking
- IF ARRAY_UPPER(all_brecords,1) IS NOT NULL THEN
-
- full_end := ARRAY_LENGTH(all_brecords, 1);
- superpage_size := COALESCE(browse_superpage_size, full_end);
- slice_start := 1;
- slice_end := superpage_size;
-
- WHILE result_row.sources = 0 AND slice_start <= full_end LOOP
- superpage_of_records := all_brecords[slice_start:slice_end];
- qpfts_query :=
- 'SELECT NULL::BIGINT AS id, ARRAY[r] AS records, ' ||
- 'NULL AS badges, NULL::NUMERIC AS popularity, ' ||
- '1::NUMERIC AS rel FROM (SELECT UNNEST(' ||
- quote_literal(superpage_of_records) || '::BIGINT[]) AS r) rr';
-
- -- We use search.query_parser_fts() for visibility testing.
- -- We're calling it once per browse-superpage worth of records
- -- out of the set of records related to a given mbe, until we've
- -- either exhausted that set of records or found at least 1
- -- visible record.
-
- SELECT INTO result_row.sources visible
- FROM search.query_parser_fts(
- context_org, NULL, qpfts_query, NULL,
- context_locations, 0, NULL, NULL, FALSE, staff, FALSE
- ) qpfts
- WHERE qpfts.rel IS NULL;
-
- slice_start := slice_start + superpage_size;
- slice_end := slice_end + superpage_size;
- END LOOP;
-
- -- Accurate? Well, probably.
- result_row.accurate := browse_superpage_size IS NULL OR
- browse_superpage_size >= full_end;
-
- END IF;
-
- -- Authority-linked vis checking
- IF ARRAY_UPPER(all_arecords,1) IS NOT NULL THEN
-
- full_end := ARRAY_LENGTH(all_arecords, 1);
- superpage_size := COALESCE(browse_superpage_size, full_end);
- slice_start := 1;
- slice_end := superpage_size;
-
- WHILE result_row.asources = 0 AND slice_start <= full_end LOOP
- superpage_of_records := all_arecords[slice_start:slice_end];
- qpfts_query :=
- 'SELECT NULL::BIGINT AS id, ARRAY[r] AS records, ' ||
- 'NULL AS badges, NULL::NUMERIC AS popularity, ' ||
- '1::NUMERIC AS rel FROM (SELECT UNNEST(' ||
- quote_literal(superpage_of_records) || '::BIGINT[]) AS r) rr';
-
- -- We use search.query_parser_fts() for visibility testing.
- -- We're calling it once per browse-superpage worth of records
- -- out of the set of records related to a given mbe, via
- -- authority until we've either exhausted that set of records
- -- or found at least 1 visible record.
-
- SELECT INTO result_row.asources visible
- FROM search.query_parser_fts(
- context_org, NULL, qpfts_query, NULL,
- context_locations, 0, NULL, NULL, FALSE, staff, FALSE
- ) qpfts
- WHERE qpfts.rel IS NULL;
-
- slice_start := slice_start + superpage_size;
- slice_end := slice_end + superpage_size;
- END LOOP;
-
-
- -- Accurate? Well, probably.
- result_row.aaccurate := browse_superpage_size IS NULL OR
- browse_superpage_size >= full_end;
-
- END IF;
-
- IF result_row.sources > 0 OR result_row.asources > 0 THEN
-
- -- The function that calls this function needs row_number in order
- -- to correctly order results from two different runs of this
- -- functions.
- result_row.row_number := row_number;
-
- -- Now, if row_counter is still less than limit, return a row. If
- -- not, but it is less than next_pivot_pos, continue on without
- -- returning actual result rows until we find
- -- that next pivot, and return it.
-
- IF row_counter < result_limit THEN
- result_row.browse_entry := rec.id;
- result_row.value := rec.value;
-
- RETURN NEXT result_row;
- ELSE
- result_row.browse_entry := NULL;
- result_row.authorities := NULL;
- result_row.fields := NULL;
- result_row.value := NULL;
- result_row.sources := NULL;
- result_row.sees := NULL;
- result_row.accurate := NULL;
- result_row.aaccurate := NULL;
- result_row.pivot_point := rec.id;
-
- IF row_counter >= next_pivot_pos THEN
- RETURN NEXT result_row;
- RETURN;
- END IF;
- END IF;
-
- IF count_up_from_zero THEN
- row_number := row_number + 1;
- ELSE
- row_number := row_number - 1;
- END IF;
-
- -- row_counter is different from row_number.
- -- It simply counts up from zero so that we know when
- -- we've reached our limit.
- row_counter := row_counter + 1;
- END IF;
- END LOOP;
-END;
-$p$ LANGUAGE PLPGSQL;
-
-
-CREATE OR REPLACE FUNCTION metabib.browse(
- search_field INT[],
- browse_term TEXT,
- context_org INT DEFAULT NULL,
- context_loc_group INT DEFAULT NULL,
- staff BOOL DEFAULT FALSE,
- pivot_id BIGINT DEFAULT NULL,
- result_limit INT DEFAULT 10
-) RETURNS SETOF metabib.flat_browse_entry_appearance AS $p$
-DECLARE
- core_query TEXT;
- back_query TEXT;
- forward_query TEXT;
- pivot_sort_value TEXT;
- pivot_sort_fallback TEXT;
- context_locations INT[];
- browse_superpage_size INT;
- results_skipped INT := 0;
- back_limit INT;
- back_to_pivot INT;
- forward_limit INT;
- forward_to_pivot INT;
-BEGIN
- -- First, find the pivot if we were given a browse term but not a pivot.
- IF pivot_id IS NULL THEN
- pivot_id := metabib.browse_pivot(search_field, browse_term);
- END IF;
-
- SELECT INTO pivot_sort_value, pivot_sort_fallback
- sort_value, value FROM metabib.browse_entry WHERE id = pivot_id;
-
- -- Bail if we couldn't find a pivot.
- IF pivot_sort_value IS NULL THEN
- RETURN;
- END IF;
-
- -- Transform the context_loc_group argument (if any) (logc at the
- -- TPAC layer) into a form we'll be able to use.
- IF context_loc_group IS NOT NULL THEN
- SELECT INTO context_locations ARRAY_AGG(location)
- FROM asset.copy_location_group_map
- WHERE lgroup = context_loc_group;
- END IF;
-
- -- Get the configured size of browse superpages.
- SELECT INTO browse_superpage_size value -- NULL ok
- FROM config.global_flag
- WHERE enabled AND name = 'opac.browse.holdings_visibility_test_limit';
-
- -- First we're going to search backward from the pivot, then we're going
- -- to search forward. In each direction, we need two limits. At the
- -- lesser of the two limits, we delineate the edge of the result set
- -- we're going to return. At the greater of the two limits, we find the
- -- pivot value that would represent an offset from the current pivot
- -- at a distance of one "page" in either direction, where a "page" is a
- -- result set of the size specified in the "result_limit" argument.
- --
- -- The two limits in each direction make four derived values in total,
- -- and we calculate them now.
- back_limit := CEIL(result_limit::FLOAT / 2);
- back_to_pivot := result_limit;
- forward_limit := result_limit / 2;
- forward_to_pivot := result_limit - 1;
-
- -- This is the meat of the SQL query that finds browse entries. We'll
- -- pass this to a function which uses it with a cursor, so that individual
- -- rows may be fetched in a loop until some condition is satisfied, without
- -- waiting for a result set of fixed size to be collected all at once.
- core_query := '
-SELECT mbe.id,
- mbe.value,
- mbe.sort_value
- FROM metabib.browse_entry mbe
- WHERE (
- EXISTS ( -- are there any bibs using this mbe via the requested fields?
- SELECT 1
- FROM metabib.browse_entry_def_map mbedm
- WHERE mbedm.entry = mbe.id AND mbedm.def = ANY(' || quote_literal(search_field) || ')
- LIMIT 1
- ) OR EXISTS ( -- are there any authorities using this mbe via the requested fields?
- SELECT 1
- FROM metabib.browse_entry_simple_heading_map mbeshm
- JOIN authority.simple_heading ash ON ( mbeshm.simple_heading = ash.id )
- JOIN authority.control_set_auth_field_metabib_field_map_refs map ON (
- ash.atag = map.authority_field
- AND map.metabib_field = ANY(' || quote_literal(search_field) || ')
- )
- WHERE mbeshm.entry = mbe.id
- )
- ) AND ';
-
- -- This is the variant of the query for browsing backward.
- back_query := core_query ||
- ' mbe.sort_value <= ' || quote_literal(pivot_sort_value) ||
- ' ORDER BY mbe.sort_value DESC, mbe.value DESC ';
-
- -- This variant browses forward.
- forward_query := core_query ||
- ' mbe.sort_value > ' || quote_literal(pivot_sort_value) ||
- ' ORDER BY mbe.sort_value, mbe.value ';
-
- -- We now call the function which applies a cursor to the provided
- -- queries, stopping at the appropriate limits and also giving us
- -- the next page's pivot.
- RETURN QUERY
- SELECT * FROM metabib.staged_browse(
- back_query, search_field, context_org, context_locations,
- staff, browse_superpage_size, TRUE, back_limit, back_to_pivot
- ) UNION
- SELECT * FROM metabib.staged_browse(
- forward_query, search_field, context_org, context_locations,
- staff, browse_superpage_size, FALSE, forward_limit, forward_to_pivot
- ) ORDER BY row_number DESC;
-
-END;
-$p$ LANGUAGE PLPGSQL;
-
-CREATE OR REPLACE FUNCTION metabib.browse(
- search_class TEXT,
- browse_term TEXT,
- context_org INT DEFAULT NULL,
- context_loc_group INT DEFAULT NULL,
- staff BOOL DEFAULT FALSE,
- pivot_id BIGINT DEFAULT NULL,
- result_limit INT DEFAULT 10
-) RETURNS SETOF metabib.flat_browse_entry_appearance AS $p$
-BEGIN
- RETURN QUERY SELECT * FROM metabib.browse(
- (SELECT COALESCE(ARRAY_AGG(id), ARRAY[]::INT[])
- FROM config.metabib_field WHERE field_class = search_class),
- browse_term,
- context_org,
- context_loc_group,
- staff,
- pivot_id,
- result_limit
- );
-END;
-$p$ LANGUAGE PLPGSQL;
-
-
-CREATE OR REPLACE FUNCTION metabib.remap_metarecord_for_bib( bib_id BIGINT, fp TEXT, bib_is_deleted BOOL DEFAULT FALSE, retain_deleted BOOL DEFAULT FALSE ) RETURNS BIGINT AS $func$
-DECLARE
- new_mapping BOOL := TRUE;
- source_count INT;
- old_mr BIGINT;
- tmp_mr metabib.metarecord%ROWTYPE;
- deleted_mrs BIGINT[];
-BEGIN
-
- -- We need to make sure we're not a deleted master record of an MR
- IF bib_is_deleted THEN
- FOR old_mr IN SELECT id FROM metabib.metarecord WHERE master_record = bib_id LOOP
-
- IF NOT retain_deleted THEN -- Go away for any MR that we're master of, unless retained
- DELETE FROM metabib.metarecord_source_map WHERE source = bib_id;
- END IF;
-
- -- Now, are there any more sources on this MR?
- SELECT COUNT(*) INTO source_count FROM metabib.metarecord_source_map WHERE metarecord = old_mr;
-
- IF source_count = 0 AND NOT retain_deleted THEN -- No other records
- deleted_mrs := ARRAY_APPEND(deleted_mrs, old_mr); -- Just in case...
- DELETE FROM metabib.metarecord WHERE id = old_mr;
-
- ELSE -- indeed there are. Update it with a null cache and recalcualated master record
- UPDATE metabib.metarecord
- SET mods = NULL,
- master_record = ( SELECT id FROM biblio.record_entry WHERE fingerprint = fp AND NOT deleted ORDER BY quality DESC LIMIT 1)
- WHERE id = old_mr;
- END IF;
- END LOOP;
-
- ELSE -- insert or update
-
- FOR tmp_mr IN SELECT m.* FROM metabib.metarecord m JOIN metabib.metarecord_source_map s ON (s.metarecord = m.id) WHERE s.source = bib_id LOOP
-
- -- Find the first fingerprint-matching
- IF old_mr IS NULL AND fp = tmp_mr.fingerprint THEN
- old_mr := tmp_mr.id;
- new_mapping := FALSE;
-
- ELSE -- Our fingerprint changed ... maybe remove the old MR
- DELETE FROM metabib.metarecord_source_map WHERE metarecord = tmp_mr.id AND source = bib_id; -- remove the old source mapping
- SELECT COUNT(*) INTO source_count FROM metabib.metarecord_source_map WHERE metarecord = tmp_mr.id;
- IF source_count = 0 THEN -- No other records
- deleted_mrs := ARRAY_APPEND(deleted_mrs, tmp_mr.id);
- DELETE FROM metabib.metarecord WHERE id = tmp_mr.id;
- END IF;
- END IF;
-
- END LOOP;
-
- -- we found no suitable, preexisting MR based on old source maps
- IF old_mr IS NULL THEN
- SELECT id INTO old_mr FROM metabib.metarecord WHERE fingerprint = fp; -- is there one for our current fingerprint?
-
- IF old_mr IS NULL THEN -- nope, create one and grab its id
- INSERT INTO metabib.metarecord ( fingerprint, master_record ) VALUES ( fp, bib_id );
- SELECT id INTO old_mr FROM metabib.metarecord WHERE fingerprint = fp;
-
- ELSE -- indeed there is. update it with a null cache and recalcualated master record
- UPDATE metabib.metarecord
- SET mods = NULL,
- master_record = ( SELECT id FROM biblio.record_entry WHERE fingerprint = fp AND NOT deleted ORDER BY quality DESC LIMIT 1)
- WHERE id = old_mr;
- END IF;
-
- ELSE -- there was one we already attached to, update its mods cache and master_record
- UPDATE metabib.metarecord
- SET mods = NULL,
- master_record = ( SELECT id FROM biblio.record_entry WHERE fingerprint = fp AND NOT deleted ORDER BY quality DESC LIMIT 1)
- WHERE id = old_mr;
- END IF;
-
- IF new_mapping THEN
- INSERT INTO metabib.metarecord_source_map (metarecord, source) VALUES (old_mr, bib_id); -- new source mapping
- END IF;
-
- END IF;
-
- IF ARRAY_UPPER(deleted_mrs,1) > 0 THEN
- UPDATE action.hold_request SET target = old_mr WHERE target IN ( SELECT unnest(deleted_mrs) ) AND hold_type = 'M'; -- if we had to delete any MRs above, make sure their holds are moved
- END IF;
-
- RETURN old_mr;
-
-END;
-$func$ LANGUAGE PLPGSQL;
-
-
-
--- AFTER UPDATE OR INSERT trigger for authority.record_entry
-CREATE OR REPLACE FUNCTION authority.indexing_ingest_or_delete () RETURNS TRIGGER AS $func$
-DECLARE
- ashs authority.simple_heading%ROWTYPE;
- mbe_row metabib.browse_entry%ROWTYPE;
- mbe_id BIGINT;
- ash_id BIGINT;
-BEGIN
-
- IF NEW.deleted IS TRUE THEN -- If this authority is deleted
- DELETE FROM authority.bib_linking WHERE authority = NEW.id; -- Avoid updating fields in bibs that are no longer visible
- DELETE FROM authority.full_rec WHERE record = NEW.id; -- Avoid validating fields against deleted authority records
- DELETE FROM authority.simple_heading WHERE record = NEW.id;
- -- Should remove matching $0 from controlled fields at the same time?
-
- -- XXX What do we about the actual linking subfields present in
- -- authority records that target this one when this happens?
- DELETE FROM authority.authority_linking
- WHERE source = NEW.id OR target = NEW.id;
-
- RETURN NEW; -- and we're done
- END IF;
-
- IF TG_OP = 'UPDATE' THEN -- re-ingest?
- PERFORM * FROM config.internal_flag WHERE name = 'ingest.reingest.force_on_same_marc' AND enabled;
-
- IF NOT FOUND AND OLD.marc = NEW.marc THEN -- don't do anything if the MARC didn't change
- RETURN NEW;
- END IF;
-
- -- Unless there's a setting stopping us, propagate these updates to any linked bib records when the heading changes
- PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_authority_auto_update' AND enabled;
-
- IF NOT FOUND AND NEW.heading <> OLD.heading THEN
- PERFORM authority.propagate_changes(NEW.id);
- END IF;
-
- DELETE FROM authority.simple_heading WHERE record = NEW.id;
- DELETE FROM authority.authority_linking WHERE source = NEW.id;
- END IF;
-
- INSERT INTO authority.authority_linking (source, target, field)
- SELECT source, target, field FROM authority.calculate_authority_linking(
- NEW.id, NEW.control_set, NEW.marc::XML
- );
-
- FOR ashs IN SELECT * FROM authority.simple_heading_set(NEW.marc) LOOP
-
- INSERT INTO authority.simple_heading (record,atag,value,sort_value,thesaurus)
- VALUES (ashs.record, ashs.atag, ashs.value, ashs.sort_value, ashs.thesaurus);
- ash_id := CURRVAL('authority.simple_heading_id_seq'::REGCLASS);
-
- SELECT INTO mbe_row * FROM metabib.browse_entry
- WHERE value = ashs.value AND sort_value = ashs.sort_value;
-
- IF FOUND THEN
- mbe_id := mbe_row.id;
- ELSE
- INSERT INTO metabib.browse_entry
- ( value, sort_value ) VALUES
- ( ashs.value, ashs.sort_value );
-
- mbe_id := CURRVAL('metabib.browse_entry_id_seq'::REGCLASS);
- END IF;
-
- INSERT INTO metabib.browse_entry_simple_heading_map (entry,simple_heading) VALUES (mbe_id,ash_id);
-
- END LOOP;
-
- -- Flatten and insert the afr data
- PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_authority_full_rec' AND enabled;
- IF NOT FOUND THEN
- PERFORM authority.reingest_authority_full_rec(NEW.id);
- PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_authority_rec_descriptor' AND enabled;
- IF NOT FOUND THEN
- PERFORM authority.reingest_authority_rec_descriptor(NEW.id);
- END IF;
- END IF;
-
- RETURN NEW;
-END;
-$func$ LANGUAGE PLPGSQL;
-
-
--- DROP UNNEEDED LOCAL FUNCTIONS
-DROP FUNCTION IF EXISTS metabib.browse_title_authority_refs_pivot(INTEGER[], TEXT);
-DROP FUNCTION IF EXISTS metabib.browse_title_bib_pivot(INTEGER[], TEXT);
-DROP FUNCTION IF EXISTS metabib.browse_title_pivot(INTEGER[], TEXT);
-
-DROP FUNCTION IF EXISTS metabib.browse_author_authority_refs_pivot(INTEGER[], TEXT);
-DROP FUNCTION IF EXISTS metabib.browse_author_bib_pivot(INTEGER[], TEXT);
-DROP FUNCTION IF EXISTS metabib.browse_author_pivot(INTEGER[], TEXT);
-
-DROP FUNCTION IF EXISTS metabib.browse_subject_authority_refs_pivot(INTEGER[], TEXT);
-DROP FUNCTION IF EXISTS metabib.browse_subject_bib_pivot(INTEGER[], TEXT);
-DROP FUNCTION IF EXISTS metabib.browse_subject_pivot(INTEGER[], TEXT);
-
-DROP FUNCTION IF EXISTS metabib.browse_series_authority_refs_pivot(INTEGER[], TEXT);
-DROP FUNCTION IF EXISTS metabib.browse_series_bib_pivot(INTEGER[], TEXT);
-DROP FUNCTION IF EXISTS metabib.browse_series_pivot(INTEGER[], TEXT);
-
---ROLLBACK;
-COMMIT;
-
--- Authority record partial re-ingest
-
-BEGIN;
-
-UPDATE config.internal_flag SET enabled = TRUE
- WHERE name = 'ingest.reingest.force_on_same_marc';
-
-UPDATE config.internal_flag SET enabled = FALSE
- WHERE name = 'ingest.disable_authority_auto_update';
-
-INSERT INTO config.internal_flag (name, enabled)
- VALUES ('ingest.disable_authority_full_rec', TRUE);
-
-COMMIT;
-
--- avoid unnecessary transaction here.
-UPDATE authority.record_entry SET marc = marc;
-
-BEGIN;
-
-UPDATE config.internal_flag SET enabled = FALSE
- WHERE name = 'ingest.reingest.force_on_same_marc';
-
-UPDATE config.internal_flag SET enabled = TRUE
- WHERE name = 'ingest.disable_authority_auto_update';
-
-DELETE FROM config.internal_flag
- WHERE name = 'ingest.disable_authority_full_rec';
-
-COMMIT;
-
-