From c8bec697257c1bcd3e2d672df499b0558934a4ab Mon Sep 17 00:00:00 2001 From: Bill Erickson Date: Mon, 29 Jan 2018 16:25:03 -0500 Subject: [PATCH] JBAS-1802 2.10->2.12 SQL Upgrade Signed-off-by: Bill Erickson --- KCLS/sql/schema/deploy/2.10-to-2.12-upgrade.sql | 5512 +++++++++++++++++++++++ KCLS/sql/schema/revert/2.10-to-2.12-upgrade.sql | 7 + KCLS/sql/schema/sqitch.plan | 1 + KCLS/sql/schema/verify/2.10-to-2.12-upgrade.sql | 7 + 4 files changed, 5527 insertions(+) create mode 100644 KCLS/sql/schema/deploy/2.10-to-2.12-upgrade.sql create mode 100644 KCLS/sql/schema/revert/2.10-to-2.12-upgrade.sql create mode 100644 KCLS/sql/schema/verify/2.10-to-2.12-upgrade.sql diff --git a/KCLS/sql/schema/deploy/2.10-to-2.12-upgrade.sql b/KCLS/sql/schema/deploy/2.10-to-2.12-upgrade.sql new file mode 100644 index 0000000000..d2a3c7bdf3 --- /dev/null +++ b/KCLS/sql/schema/deploy/2.10-to-2.12-upgrade.sql @@ -0,0 +1,5512 @@ + +BEGIN; + +SELECT 'Starting Upgrade: ', CLOCK_TIMESTAMP(); + +-- DEPS 0979 + +-- Replace connectby from the tablefunc extension with CTEs + +CREATE OR REPLACE FUNCTION permission.grp_ancestors( INT ) RETURNS SETOF permission.grp_tree AS $$ + WITH RECURSIVE grp_ancestors_distance(id, distance) AS ( + SELECT $1, 0 + UNION + SELECT ou.parent, ouad.distance+1 + FROM permission.grp_tree ou JOIN grp_ancestors_distance ouad ON (ou.id = ouad.id) + WHERE ou.parent IS NOT NULL + ) + SELECT ou.* FROM permission.grp_tree ou JOIN grp_ancestors_distance ouad USING (id) ORDER BY ouad.distance DESC; +$$ LANGUAGE SQL ROWS 1; + +-- Add a utility function to find descendant groups. + +CREATE OR REPLACE FUNCTION permission.grp_descendants( INT ) RETURNS SETOF permission.grp_tree AS $$ + WITH RECURSIVE descendant_depth AS ( + SELECT gr.id, + gr.parent + FROM permission.grp_tree gr + WHERE gr.id = $1 + UNION ALL + SELECT gr.id, + gr.parent + FROM permission.grp_tree gr + JOIN descendant_depth dd ON (dd.id = gr.parent) + ) SELECT gr.* FROM permission.grp_tree gr JOIN descendant_depth USING (id); +$$ LANGUAGE SQL ROWS 1; + +-- Add utility functions to work with permission groups as general tree-ish sets. + +CREATE OR REPLACE FUNCTION permission.grp_tree_full_path ( INT ) RETURNS SETOF permission.grp_tree AS $$ + SELECT * + FROM permission.grp_ancestors($1) + UNION + SELECT * + FROM permission.grp_descendants($1); +$$ LANGUAGE SQL STABLE ROWS 1; + +CREATE OR REPLACE FUNCTION permission.grp_tree_combined_ancestors ( INT, INT ) RETURNS SETOF permission.grp_tree AS $$ + SELECT * + FROM permission.grp_ancestors($1) + UNION + SELECT * + FROM permission.grp_ancestors($2); +$$ LANGUAGE SQL STABLE ROWS 1; + +CREATE OR REPLACE FUNCTION permission.grp_tree_common_ancestors ( INT, INT ) RETURNS SETOF permission.grp_tree AS $$ + SELECT * + FROM permission.grp_ancestors($1) + INTERSECT + SELECT * + FROM permission.grp_ancestors($2); +$$ LANGUAGE SQL STABLE ROWS 1; + +-- DEPS 0980 + +ALTER TABLE vandelay.merge_profile ADD COLUMN update_bib_source BOOLEAN NOT NULL DEFAULT false; +UPDATE vandelay.merge_profile SET update_bib_source = true WHERE id=2; + +CREATE OR REPLACE FUNCTION vandelay.overlay_bib_record ( import_id BIGINT, eg_id BIGINT, merge_profile_id INT ) RETURNS BOOL AS $$ +DECLARE + editor_string TEXT; + editor_id INT; + v_marc TEXT; + v_bib_source INT; + update_fields TEXT[]; + update_query TEXT; + update_bib BOOL; +BEGIN + + SELECT q.marc, q.bib_source INTO v_marc, v_bib_source + FROM vandelay.queued_bib_record q + JOIN vandelay.bib_match m ON (m.queued_record = q.id AND q.id = import_id) + LIMIT 1; + + IF v_marc IS NULL THEN + -- RAISE NOTICE 'no marc for vandelay or bib record'; + RETURN FALSE; + END IF; + + IF vandelay.template_overlay_bib_record( v_marc, eg_id, merge_profile_id) THEN + UPDATE vandelay.queued_bib_record + SET imported_as = eg_id, + import_time = NOW() + WHERE id = import_id; + + SELECT q.update_bib_source INTO update_bib FROM vandelay.merge_profile q where q.id = merge_profile_id; + + IF update_bib THEN + editor_string := (oils_xpath('//*[@tag="905"]/*[@code="u"]/text()',v_marc))[1]; + + IF editor_string IS NOT NULL AND editor_string <> '' THEN + SELECT usr INTO editor_id FROM actor.card WHERE barcode = editor_string; + + IF editor_id IS NULL THEN + SELECT id INTO editor_id FROM actor.usr WHERE usrname = editor_string; + END IF; + + IF editor_id IS NOT NULL THEN + --only update the edit date if we have a valid editor + update_fields := ARRAY_APPEND(update_fields, 'editor = ' || editor_id || ', edit_date = NOW()'); + END IF; + END IF; + + IF v_bib_source IS NOT NULL THEN + update_fields := ARRAY_APPEND(update_fields, 'source = ' || v_bib_source); + END IF; + + IF ARRAY_LENGTH(update_fields, 1) > 0 THEN + update_query := 'UPDATE biblio.record_entry SET ' || ARRAY_TO_STRING(update_fields, ',') || ' WHERE id = ' || eg_id || ';'; + --RAISE NOTICE 'query: %', update_query; + EXECUTE update_query; + END IF; + END IF; + + RETURN TRUE; + END IF; + + -- RAISE NOTICE 'update of biblio.record_entry failed'; + + RETURN FALSE; + +END; +$$ LANGUAGE PLPGSQL; + + +-- DEPS 0982 + +CREATE OR REPLACE FUNCTION permission.usr_has_perm_at_nd( + user_id IN INTEGER, + perm_code IN TEXT +) +RETURNS SETOF INTEGER AS $$ +-- +-- Return a set of all the org units for which a given user has a given +-- permission, granted directly (not through inheritance from a parent +-- org unit). +-- +-- The permissions apply to a minimum depth of the org unit hierarchy, +-- for the org unit(s) to which the user is assigned. (They also apply +-- to the subordinates of those org units, but we don't report the +-- subordinates here.) +-- +-- For purposes of this function, the permission.usr_work_ou_map table +-- defines which users belong to which org units. I.e. we ignore the +-- home_ou column of actor.usr. +-- +-- The result set may contain duplicates, which should be eliminated +-- by a DISTINCT clause. +-- +DECLARE + b_super BOOLEAN; + n_perm INTEGER; + n_min_depth INTEGER; + n_work_ou INTEGER; + n_curr_ou INTEGER; + n_depth INTEGER; + n_curr_depth INTEGER; +BEGIN + -- + -- Check for superuser + -- + SELECT INTO b_super + super_user + FROM + actor.usr + WHERE + id = user_id; + -- + IF NOT FOUND THEN + return; -- No user? No permissions. + ELSIF b_super THEN + -- + -- Super user has all permissions everywhere + -- + FOR n_work_ou IN + SELECT + id + FROM + actor.org_unit + WHERE + parent_ou IS NULL + LOOP + RETURN NEXT n_work_ou; + END LOOP; + RETURN; + END IF; + -- + -- Translate the permission name + -- to a numeric permission id + -- + SELECT INTO n_perm + id + FROM + permission.perm_list + WHERE + code = perm_code; + -- + IF NOT FOUND THEN + RETURN; -- No such permission + END IF; + -- + -- Find the highest-level org unit (i.e. the minimum depth) + -- to which the permission is applied for this user + -- + -- This query is modified from the one in permission.usr_perms(). + -- + SELECT INTO n_min_depth + min( depth ) + FROM ( + SELECT depth + FROM permission.usr_perm_map upm + WHERE upm.usr = user_id + AND (upm.perm = n_perm OR upm.perm = -1) + UNION + SELECT gpm.depth + FROM permission.grp_perm_map gpm + WHERE (gpm.perm = n_perm OR gpm.perm = -1) + AND gpm.grp IN ( + SELECT (permission.grp_ancestors( + (SELECT profile FROM actor.usr WHERE id = user_id) + )).id + ) + UNION + SELECT p.depth + FROM permission.grp_perm_map p + WHERE (p.perm = n_perm OR p.perm = -1) + AND p.grp IN ( + SELECT (permission.grp_ancestors(m.grp)).id + FROM permission.usr_grp_map m + WHERE m.usr = user_id + ) + ) AS x; + -- + IF NOT FOUND THEN + RETURN; -- No such permission for this user + END IF; + -- + -- Identify the org units to which the user is assigned. Note that + -- we pay no attention to the home_ou column in actor.usr. + -- + FOR n_work_ou IN + SELECT + work_ou + FROM + permission.usr_work_ou_map + WHERE + usr = user_id + LOOP -- For each org unit to which the user is assigned + -- + -- Determine the level of the org unit by a lookup in actor.org_unit_type. + -- We take it on faith that this depth agrees with the actual hierarchy + -- defined in actor.org_unit. + -- + SELECT INTO n_depth + type.depth + FROM + actor.org_unit_type type + INNER JOIN actor.org_unit ou + ON ( ou.ou_type = type.id ) + WHERE + ou.id = n_work_ou; + -- + IF NOT FOUND THEN + CONTINUE; -- Maybe raise exception? + END IF; + -- + -- Compare the depth of the work org unit to the + -- minimum depth, and branch accordingly + -- + IF n_depth = n_min_depth THEN + -- + -- The org unit is at the right depth, so return it. + -- + RETURN NEXT n_work_ou; + ELSIF n_depth > n_min_depth THEN + -- + -- Traverse the org unit tree toward the root, + -- until you reach the minimum depth determined above + -- + n_curr_depth := n_depth; + n_curr_ou := n_work_ou; + WHILE n_curr_depth > n_min_depth LOOP + SELECT INTO n_curr_ou + parent_ou + FROM + actor.org_unit + WHERE + id = n_curr_ou; + -- + IF FOUND THEN + n_curr_depth := n_curr_depth - 1; + ELSE + -- + -- This can happen only if the hierarchy defined in + -- actor.org_unit is corrupted, or out of sync with + -- the depths defined in actor.org_unit_type. + -- Maybe we should raise an exception here, instead + -- of silently ignoring the problem. + -- + n_curr_ou = NULL; + EXIT; + END IF; + END LOOP; + -- + IF n_curr_ou IS NOT NULL THEN + RETURN NEXT n_curr_ou; + END IF; + ELSE + -- + -- The permission applies only at a depth greater than the work org unit. + -- Use connectby() to find all dependent org units at the specified depth. + -- + FOR n_curr_ou IN + SELECT id + FROM actor.org_unit_descendants_distance(n_work_ou) + WHERE + distance = n_min_depth - n_depth + LOOP + RETURN NEXT n_curr_ou; + END LOOP; + END IF; + -- + END LOOP; + -- + RETURN; + -- +END; +$$ LANGUAGE 'plpgsql' ROWS 1; + + +CREATE OR REPLACE FUNCTION permission.usr_has_perm_at_all_nd( + user_id IN INTEGER, + perm_code IN TEXT +) +RETURNS SETOF INTEGER AS $$ +-- +-- Return a set of all the org units for which a given user has a given +-- permission, granted either directly or through inheritance from a parent +-- org unit. +-- +-- The permissions apply to a minimum depth of the org unit hierarchy, and +-- to the subordinates of those org units, for the org unit(s) to which the +-- user is assigned. +-- +-- For purposes of this function, the permission.usr_work_ou_map table +-- assigns users to org units. I.e. we ignore the home_ou column of actor.usr. +-- +-- The result set may contain duplicates, which should be eliminated +-- by a DISTINCT clause. +-- +DECLARE + n_head_ou INTEGER; + n_child_ou INTEGER; +BEGIN + FOR n_head_ou IN + SELECT DISTINCT * FROM permission.usr_has_perm_at_nd( user_id, perm_code ) + LOOP + -- + -- The permission applies only at a depth greater than the work org unit. + -- + FOR n_child_ou IN + SELECT id + FROM actor.org_unit_descendants(n_head_ou) + LOOP + RETURN NEXT n_child_ou; + END LOOP; + END LOOP; + -- + RETURN; + -- +END; +$$ LANGUAGE 'plpgsql' ROWS 1; + + +\qecho The tablefunc database extension is no longer necessary for Evergreen. +\qecho Unless you use some of its functions in your own scripts, you may +\qecho want to run the following command in the database to drop it: +\qecho DROP EXTENSION tablefunc; + + +-- DEPS 0983 + +-- Create these so that the queries in the UDFs will validate +CREATE TEMP TABLE precalc_filter_bib_list ( + id BIGINT +) ON COMMIT DROP; + +CREATE TEMP TABLE precalc_bib_filter_bib_list ( + id BIGINT +) ON COMMIT DROP; + +CREATE TEMP TABLE precalc_src_filter_bib_list ( + id BIGINT +) ON COMMIT DROP; + +CREATE TEMP TABLE precalc_copy_filter_bib_list ( + id BIGINT, + copy BIGINT +) ON COMMIT DROP; + +CREATE TEMP TABLE precalc_circ_mod_filter_bib_list ( + id BIGINT, + copy BIGINT +) ON COMMIT DROP; + +CREATE TEMP TABLE precalc_location_filter_bib_list ( + id BIGINT, + copy BIGINT +) ON COMMIT DROP; + +CREATE TEMP TABLE precalc_attr_filter_bib_list ( + id BIGINT +) ON COMMIT DROP; + +CREATE TEMP TABLE precalc_bibs_by_copy_list ( + id BIGINT +) ON COMMIT DROP; + +CREATE TEMP TABLE precalc_bibs_by_uri_list ( + id BIGINT +) ON COMMIT DROP; + +CREATE TEMP TABLE precalc_bibs_by_copy_or_uri_list ( + id BIGINT +) ON COMMIT DROP; + +CREATE TEMP TABLE precalc_bib_list ( + id BIGINT +) ON COMMIT DROP; + +-- rhrr needs to be a real table, so it can be fast. To that end, we use +-- a materialized view updated via a trigger. + +DROP VIEW reporter.hold_request_record; + +CREATE TABLE reporter.hold_request_record AS +SELECT id, + target, + hold_type, + CASE + WHEN hold_type = 'T' + THEN target + WHEN hold_type = 'I' + THEN (SELECT ssub.record_entry FROM serial.subscription ssub JOIN serial.issuance si ON (si.subscription = ssub.id) WHERE si.id = ahr.target) + WHEN hold_type = 'V' + THEN (SELECT cn.record FROM asset.call_number cn WHERE cn.id = ahr.target) + WHEN hold_type IN ('C','R','F') + THEN (SELECT cn.record FROM asset.call_number cn JOIN asset.copy cp ON (cn.id = cp.call_number) WHERE cp.id = ahr.target) + WHEN hold_type = 'M' + THEN (SELECT mr.master_record FROM metabib.metarecord mr WHERE mr.id = ahr.target) + WHEN hold_type = 'P' + THEN (SELECT bmp.record FROM biblio.monograph_part bmp WHERE bmp.id = ahr.target) + END AS bib_record + FROM action.hold_request ahr; + +CREATE UNIQUE INDEX reporter_hold_request_record_pkey_idx ON reporter.hold_request_record (id); +CREATE INDEX reporter_hold_request_record_bib_record_idx ON reporter.hold_request_record (bib_record); + +ALTER TABLE reporter.hold_request_record ADD PRIMARY KEY USING INDEX reporter_hold_request_record_pkey_idx; + +CREATE FUNCTION reporter.hold_request_record_mapper () RETURNS TRIGGER AS $$ +BEGIN + IF TG_OP = 'INSERT' THEN + INSERT INTO reporter.hold_request_record (id, target, hold_type, bib_record) + SELECT NEW.id, + NEW.target, + NEW.hold_type, + CASE + WHEN NEW.hold_type = 'T' + THEN NEW.target + WHEN NEW.hold_type = 'I' + THEN (SELECT ssub.record_entry FROM serial.subscription ssub JOIN serial.issuance si ON (si.subscription = ssub.id) WHERE si.id = NEW.target) + WHEN NEW.hold_type = 'V' + THEN (SELECT cn.record FROM asset.call_number cn WHERE cn.id = NEW.target) + WHEN NEW.hold_type IN ('C','R','F') + THEN (SELECT cn.record FROM asset.call_number cn JOIN asset.copy cp ON (cn.id = cp.call_number) WHERE cp.id = NEW.target) + WHEN NEW.hold_type = 'M' + THEN (SELECT mr.master_record FROM metabib.metarecord mr WHERE mr.id = NEW.target) + WHEN NEW.hold_type = 'P' + THEN (SELECT bmp.record FROM biblio.monograph_part bmp WHERE bmp.id = NEW.target) + END AS bib_record; + ELSIF TG_OP = 'UPDATE' AND (OLD.target <> NEW.target OR OLD.hold_type <> NEW.hold_type) THEN + UPDATE reporter.hold_request_record + SET target = NEW.target, + hold_type = NEW.hold_type, + bib_record = CASE + WHEN NEW.hold_type = 'T' + THEN NEW.target + WHEN NEW.hold_type = 'I' + THEN (SELECT ssub.record_entry FROM serial.subscription ssub JOIN serial.issuance si ON (si.subscription = ssub.id) WHERE si.id = NEW.target) + WHEN NEW.hold_type = 'V' + THEN (SELECT cn.record FROM asset.call_number cn WHERE cn.id = NEW.target) + WHEN NEW.hold_type IN ('C','R','F') + THEN (SELECT cn.record FROM asset.call_number cn JOIN asset.copy cp ON (cn.id = cp.call_number) WHERE cp.id = NEW.target) + WHEN NEW.hold_type = 'M' + THEN (SELECT mr.master_record FROM metabib.metarecord mr WHERE mr.id = NEW.target) + WHEN NEW.hold_type = 'P' + THEN (SELECT bmp.record FROM biblio.monograph_part bmp WHERE bmp.id = NEW.target) + END; + END IF; + RETURN NEW; +END; +$$ LANGUAGE PLPGSQL; + +CREATE TRIGGER reporter_hold_request_record_trigger AFTER INSERT OR UPDATE ON action.hold_request + FOR EACH ROW EXECUTE PROCEDURE reporter.hold_request_record_mapper(); + +CREATE SCHEMA rating; + +CREATE TABLE rating.popularity_parameter ( + id INT PRIMARY KEY, + name TEXT NOT NULL UNIQUE, -- i18n + description TEXT, + func TEXT, + require_horizon BOOL NOT NULL DEFAULT FALSE, + require_importance BOOL NOT NULL DEFAULT FALSE, + require_percentile BOOL NOT NULL DEFAULT FALSE +); + + +DO $INSERT$ +BEGIN + IF evergreen.insert_on_deploy() THEN ----------------------------------- +INSERT INTO config.global_flag (name, label, value, enabled) VALUES ( + 'opac.default_sort', + 'OPAC Default Sort (titlesort, authorsort, pubdate, popularity, poprel, or empty for relevance)', + '', + TRUE +); + +INSERT INTO config.global_flag (name, label, value, enabled) VALUES ( + 'search.max_popularity_importance_multiplier', + oils_i18n_gettext( + 'search.max_popularity_importance_multiplier', + 'Maximum popularity importance multiplier for popularity-adjusted relevance searches (decimal value between 1.0 and 2.0)', + 'cgf', + 'label' + ), + '1.1', + TRUE +); + +INSERT INTO rating.popularity_parameter (id,name,func,require_horizon,require_importance,require_percentile) VALUES + (1,'Holds Filled Over Time','rating.holds_filled_over_time',TRUE,FALSE,TRUE), + (2,'Holds Requested Over Time','rating.holds_placed_over_time',TRUE,FALSE,TRUE), + (3,'Current Hold Count','rating.current_hold_count',FALSE,FALSE,TRUE), + (4,'Circulations Over Time','rating.circs_over_time',TRUE,FALSE,TRUE), + (5,'Current Circulation Count','rating.current_circ_count',FALSE,FALSE,TRUE), + (6,'Out/Total Ratio','rating.checked_out_total_ratio',FALSE,FALSE,TRUE), + (7,'Holds/Total Ratio','rating.holds_total_ratio',FALSE,FALSE,TRUE), + (8,'Holds/Holdable Ratio','rating.holds_holdable_ratio',FALSE,FALSE,TRUE), + (9,'Percent of Time Circulating','rating.percent_time_circulating',FALSE,FALSE,TRUE), + (10,'Bibliographic Record Age (days, newer is better)','rating.bib_record_age',FALSE,FALSE,TRUE), + (11,'Publication Age (days, newer is better)','rating.bib_pub_age',FALSE,FALSE,TRUE), + (12,'On-line Bib has attributes','rating.generic_fixed_rating_by_uri',FALSE,FALSE,FALSE), + (13,'Bib has attributes and copies','rating.generic_fixed_rating_by_copy',FALSE,FALSE,FALSE), + (14,'Bib has attributes and copies or URIs','rating.generic_fixed_rating_by_copy_or_uri',FALSE,FALSE,FALSE), + (15,'Bib has attributes','rating.generic_fixed_rating_global',FALSE,FALSE,FALSE); + + END IF; +END $INSERT$; ------------------------------------------------------------- + + +CREATE TABLE rating.badge ( + id SERIAL PRIMARY KEY, + name TEXT NOT NULL, + description TEXT, + scope INT NOT NULL REFERENCES actor.org_unit (id) ON UPDATE CASCADE ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, + weight INT NOT NULL DEFAULT 1, + horizon_age INTERVAL, + importance_age INTERVAL, + importance_interval INTERVAL NOT NULL DEFAULT '1 day', + importance_scale NUMERIC CHECK (importance_scale IS NULL OR importance_scale > 0.0), + recalc_interval INTERVAL NOT NULL DEFAULT '1 month', + attr_filter TEXT, + src_filter INT REFERENCES config.bib_source (id) ON UPDATE CASCADE ON DELETE SET NULL DEFERRABLE INITIALLY DEFERRED, + circ_mod_filter TEXT REFERENCES config.circ_modifier (code) ON UPDATE CASCADE ON DELETE SET NULL DEFERRABLE INITIALLY DEFERRED, + loc_grp_filter INT REFERENCES asset.copy_location_group (id) ON UPDATE CASCADE ON DELETE SET NULL DEFERRABLE INITIALLY DEFERRED, + popularity_parameter INT NOT NULL REFERENCES rating.popularity_parameter (id) ON UPDATE CASCADE ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, + fixed_rating INT CHECK (fixed_rating IS NULL OR fixed_rating BETWEEN -5 AND 5), + percentile NUMERIC CHECK (percentile IS NULL OR (percentile >= 50.0 AND percentile < 100.0)), + discard INT NOT NULL DEFAULT 0, + last_calc TIMESTAMPTZ, + CONSTRAINT unique_name_scope UNIQUE (name,scope) +); + +CREATE TABLE rating.record_badge_score ( + id BIGSERIAL PRIMARY KEY, + record BIGINT NOT NULL REFERENCES biblio.record_entry (id) ON UPDATE CASCADE ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, + badge INT NOT NULL REFERENCES rating.badge (id) ON UPDATE CASCADE ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, + score INT NOT NULL CHECK (score BETWEEN -5 AND 5), + CONSTRAINT unique_record_badge UNIQUE (record,badge) +); +CREATE INDEX record_badge_score_badge_idx ON rating.record_badge_score (badge); +CREATE INDEX record_badge_score_record_idx ON rating.record_badge_score (record); + +CREATE OR REPLACE VIEW rating.badge_with_orgs AS + WITH org_scope AS ( + SELECT id, + array_agg(tree) AS orgs + FROM (SELECT id, + (actor.org_unit_descendants(id)).id AS tree + FROM actor.org_unit + ) x + GROUP BY 1 + ) + SELECT b.*, + s.orgs + FROM rating.badge b + JOIN org_scope s ON (b.scope = s.id); + +CREATE OR REPLACE FUNCTION rating.precalc_src_filter(src INT) + RETURNS INT AS $f$ +DECLARE + cnt INT := 0; +BEGIN + + SET LOCAL client_min_messages = error; + DROP TABLE IF EXISTS precalc_src_filter_bib_list; + IF src IS NOT NULL THEN + CREATE TEMP TABLE precalc_src_filter_bib_list ON COMMIT DROP AS + SELECT id FROM biblio.record_entry + WHERE source = src AND NOT deleted; + ELSE + CREATE TEMP TABLE precalc_src_filter_bib_list ON COMMIT DROP AS + SELECT id FROM biblio.record_entry + WHERE id > 0 AND NOT deleted; + END IF; + + SELECT count(*) INTO cnt FROM precalc_src_filter_bib_list; + RETURN cnt; +END; +$f$ LANGUAGE PLPGSQL; + +CREATE OR REPLACE FUNCTION rating.precalc_circ_mod_filter(cm TEXT) + RETURNS INT AS $f$ +DECLARE + cnt INT := 0; +BEGIN + + SET LOCAL client_min_messages = error; + DROP TABLE IF EXISTS precalc_circ_mod_filter_bib_list; + IF cm IS NOT NULL THEN + CREATE TEMP TABLE precalc_circ_mod_filter_bib_list ON COMMIT DROP AS + SELECT cn.record AS id, + cp.id AS copy + FROM asset.call_number cn + JOIN asset.copy cp ON (cn.id = cp.call_number) + WHERE cp.circ_modifier = cm + AND NOT cp.deleted; + ELSE + CREATE TEMP TABLE precalc_circ_mod_filter_bib_list ON COMMIT DROP AS + SELECT cn.record AS id, + cp.id AS copy + FROM asset.call_number cn + JOIN asset.copy cp ON (cn.id = cp.call_number) + WHERE NOT cp.deleted; + END IF; + + SELECT count(*) INTO cnt FROM precalc_circ_mod_filter_bib_list; + RETURN cnt; +END; +$f$ LANGUAGE PLPGSQL; + +CREATE OR REPLACE FUNCTION rating.precalc_location_filter(loc INT) + RETURNS INT AS $f$ +DECLARE + cnt INT := 0; +BEGIN + + SET LOCAL client_min_messages = error; + DROP TABLE IF EXISTS precalc_location_filter_bib_list; + IF loc IS NOT NULL THEN + CREATE TEMP TABLE precalc_location_filter_bib_list ON COMMIT DROP AS + SELECT cn.record AS id, + cp.id AS copy + FROM asset.call_number cn + JOIN asset.copy cp ON (cn.id = cp.call_number) + JOIN asset.copy_location_group_map lg ON (cp.location = lg.location) + WHERE lg.lgroup = loc + AND NOT cp.deleted; + ELSE + CREATE TEMP TABLE precalc_location_filter_bib_list ON COMMIT DROP AS + SELECT cn.record AS id, + cp.id AS copy + FROM asset.call_number cn + JOIN asset.copy cp ON (cn.id = cp.call_number) + WHERE NOT cp.deleted; + END IF; + + SELECT count(*) INTO cnt FROM precalc_location_filter_bib_list; + RETURN cnt; +END; +$f$ LANGUAGE PLPGSQL; + +-- all or limited... +CREATE OR REPLACE FUNCTION rating.precalc_attr_filter(attr_filter TEXT) + RETURNS INT AS $f$ +DECLARE + cnt INT := 0; + afilter TEXT; +BEGIN + + SET LOCAL client_min_messages = error; + DROP TABLE IF EXISTS precalc_attr_filter_bib_list; + IF attr_filter IS NOT NULL THEN + afilter := metabib.compile_composite_attr(attr_filter); + CREATE TEMP TABLE precalc_attr_filter_bib_list ON COMMIT DROP AS + SELECT source AS id FROM metabib.record_attr_vector_list + WHERE vlist @@ metabib.compile_composite_attr(attr_filter); + ELSE + CREATE TEMP TABLE precalc_attr_filter_bib_list ON COMMIT DROP AS + SELECT source AS id FROM metabib.record_attr_vector_list; + END IF; + + SELECT count(*) INTO cnt FROM precalc_attr_filter_bib_list; + RETURN cnt; +END; +$f$ LANGUAGE PLPGSQL; + +CREATE OR REPLACE FUNCTION rating.precalc_bibs_by_copy(badge_id INT) + RETURNS INT AS $f$ +DECLARE + cnt INT := 0; + badge_row rating.badge_with_orgs%ROWTYPE; + base TEXT; + whr TEXT; +BEGIN + + SELECT * INTO badge_row FROM rating.badge_with_orgs WHERE id = badge_id; + + SET LOCAL client_min_messages = error; + DROP TABLE IF EXISTS precalc_bibs_by_copy_list; + CREATE TEMP TABLE precalc_bibs_by_copy_list ON COMMIT DROP AS + SELECT DISTINCT cn.record AS id + FROM asset.call_number cn + JOIN asset.copy cp ON (cp.call_number = cn.id AND NOT cp.deleted) + JOIN precalc_copy_filter_bib_list f ON (cp.id = f.copy) + WHERE cn.owning_lib = ANY (badge_row.orgs) + AND NOT cn.deleted; + + SELECT count(*) INTO cnt FROM precalc_bibs_by_copy_list; + RETURN cnt; +END; +$f$ LANGUAGE PLPGSQL; + +CREATE OR REPLACE FUNCTION rating.precalc_bibs_by_uri(badge_id INT) + RETURNS INT AS $f$ +DECLARE + cnt INT := 0; + badge_row rating.badge_with_orgs%ROWTYPE; +BEGIN + + SELECT * INTO badge_row FROM rating.badge_with_orgs WHERE id = badge_id; + + SET LOCAL client_min_messages = error; + DROP TABLE IF EXISTS precalc_bibs_by_uri_list; + CREATE TEMP TABLE precalc_bibs_by_uri_list ON COMMIT DROP AS + SELECT DISTINCT record AS id + FROM asset.call_number cn + JOIN asset.uri_call_number_map urim ON (urim.call_number = cn.id) + JOIN asset.uri uri ON (urim.uri = uri.id AND uri.active) + WHERE cn.owning_lib = ANY (badge_row.orgs) + AND cn.label = '##URI##' + AND NOT cn.deleted; + + SELECT count(*) INTO cnt FROM precalc_bibs_by_uri_list; + RETURN cnt; +END; +$f$ LANGUAGE PLPGSQL; + +CREATE OR REPLACE FUNCTION rating.precalc_bibs_by_copy_or_uri(badge_id INT) + RETURNS INT AS $f$ +DECLARE + cnt INT := 0; +BEGIN + + PERFORM rating.precalc_bibs_by_copy(badge_id); + PERFORM rating.precalc_bibs_by_uri(badge_id); + + SET LOCAL client_min_messages = error; + DROP TABLE IF EXISTS precalc_bibs_by_copy_or_uri_list; + CREATE TEMP TABLE precalc_bibs_by_copy_or_uri_list ON COMMIT DROP AS + SELECT id FROM precalc_bibs_by_copy_list + UNION + SELECT id FROM precalc_bibs_by_uri_list; + + SELECT count(*) INTO cnt FROM precalc_bibs_by_copy_or_uri_list; + RETURN cnt; +END; +$f$ LANGUAGE PLPGSQL; + + +CREATE OR REPLACE FUNCTION rating.recalculate_badge_score ( badge_id INT, setup_only BOOL DEFAULT FALSE ) RETURNS VOID AS $f$ +DECLARE + badge_row rating.badge%ROWTYPE; + param rating.popularity_parameter%ROWTYPE; +BEGIN + SET LOCAL client_min_messages = error; + + -- Find what we're doing + SELECT * INTO badge_row FROM rating.badge WHERE id = badge_id; + SELECT * INTO param FROM rating.popularity_parameter WHERE id = badge_row.popularity_parameter; + + -- Calculate the filtered bib set, or all bibs if none + PERFORM rating.precalc_attr_filter(badge_row.attr_filter); + PERFORM rating.precalc_src_filter(badge_row.src_filter); + PERFORM rating.precalc_circ_mod_filter(badge_row.circ_mod_filter); + PERFORM rating.precalc_location_filter(badge_row.loc_grp_filter); + + -- Bring the bib-level filter lists together + DROP TABLE IF EXISTS precalc_bib_filter_bib_list; + CREATE TEMP TABLE precalc_bib_filter_bib_list ON COMMIT DROP AS + SELECT id FROM precalc_attr_filter_bib_list + INTERSECT + SELECT id FROM precalc_src_filter_bib_list; + + -- Bring the copy-level filter lists together. We're keeping this for bib_by_copy filtering later. + DROP TABLE IF EXISTS precalc_copy_filter_bib_list; + CREATE TEMP TABLE precalc_copy_filter_bib_list ON COMMIT DROP AS + SELECT id, copy FROM precalc_circ_mod_filter_bib_list + INTERSECT + SELECT id, copy FROM precalc_location_filter_bib_list; + + -- Bring the collapsed filter lists together + DROP TABLE IF EXISTS precalc_filter_bib_list; + CREATE TEMP TABLE precalc_filter_bib_list ON COMMIT DROP AS + SELECT id FROM precalc_bib_filter_bib_list + INTERSECT + SELECT id FROM precalc_copy_filter_bib_list; + + CREATE INDEX precalc_filter_bib_list_idx + ON precalc_filter_bib_list (id); + + IF setup_only THEN + RETURN; + END IF; + + -- If it's a fixed-rating badge, just do it ... + IF badge_row.fixed_rating IS NOT NULL THEN + DELETE FROM rating.record_badge_score WHERE badge = badge_id; + EXECUTE $e$ + INSERT INTO rating.record_badge_score (record, badge, score) + SELECT record, $1, $2 FROM $e$ || param.func || $e$($1)$e$ + USING badge_id, badge_row.fixed_rating; + + UPDATE rating.badge SET last_calc = NOW() WHERE id = badge_id; + + RETURN; + END IF; + -- else, calculate! + + -- Make a session-local scratchpad for calculating scores + CREATE TEMP TABLE record_score_scratchpad ( + bib BIGINT, + value NUMERIC + ) ON COMMIT DROP; + + -- Gather raw values + EXECUTE $e$ + INSERT INTO record_score_scratchpad (bib, value) + SELECT * FROM $e$ || param.func || $e$($1)$e$ + USING badge_id; + + IF badge_row.discard > 0 OR badge_row.percentile IS NOT NULL THEN + -- To speed up discard-common + CREATE INDEX record_score_scratchpad_score_idx ON record_score_scratchpad (value); + ANALYZE record_score_scratchpad; + END IF; + + IF badge_row.discard > 0 THEN -- Remove common low values (trim the long tail) + DELETE FROM record_score_scratchpad WHERE value IN ( + SELECT DISTINCT value FROM record_score_scratchpad ORDER BY value LIMIT badge_row.discard + ); + END IF; + + IF badge_row.percentile IS NOT NULL THEN -- Cut population down to exceptional records + DELETE FROM record_score_scratchpad WHERE value <= ( + SELECT value FROM ( + SELECT value, + CUME_DIST() OVER (ORDER BY value) AS p + FROM record_score_scratchpad + ) x WHERE p < badge_row.percentile / 100.0 ORDER BY p DESC LIMIT 1 + ); + END IF; + + + -- And, finally, push new data in + DELETE FROM rating.record_badge_score WHERE badge = badge_id; + INSERT INTO rating.record_badge_score (badge, record, score) + SELECT badge_id, + bib, + GREATEST(ROUND((CUME_DIST() OVER (ORDER BY value)) * 5), 1) AS value + FROM record_score_scratchpad; + + DROP TABLE record_score_scratchpad; + + -- Now, finally-finally, mark the badge as recalculated + UPDATE rating.badge SET last_calc = NOW() WHERE id = badge_id; + + RETURN; +END; +$f$ LANGUAGE PLPGSQL; + +CREATE OR REPLACE FUNCTION rating.holds_filled_over_time(badge_id INT) + RETURNS TABLE (record BIGINT, value NUMERIC) AS $f$ +DECLARE + badge rating.badge_with_orgs%ROWTYPE; + iage INT := 1; + iint INT := NULL; + iscale NUMERIC := NULL; +BEGIN + + SELECT * INTO badge FROM rating.badge_with_orgs WHERE id = badge_id; + + IF badge.horizon_age IS NULL THEN + RAISE EXCEPTION 'Badge "%" with id % requires a horizon age but has none.', + badge.name, + badge.id; + END IF; + + PERFORM rating.precalc_bibs_by_copy(badge_id); + + SET LOCAL client_min_messages = error; + DROP TABLE IF EXISTS precalc_bib_list; + CREATE TEMP TABLE precalc_bib_list ON COMMIT DROP AS + SELECT id FROM precalc_filter_bib_list + INTERSECT + SELECT id FROM precalc_bibs_by_copy_list; + + iint := EXTRACT(EPOCH FROM badge.importance_interval); + IF badge.importance_age IS NOT NULL THEN + iage := (EXTRACT(EPOCH FROM badge.importance_age) / iint)::INT; + END IF; + + -- if iscale is smaller than 1, scaling slope will be shallow ... BEWARE! + iscale := COALESCE(badge.importance_scale, 1.0); + + RETURN QUERY + SELECT bib, + SUM( holds * GREATEST( iscale * (iage - hage), 1.0 )) + FROM ( + SELECT f.id AS bib, + (1 + EXTRACT(EPOCH FROM AGE(h.fulfillment_time)) / iint)::INT AS hage, + COUNT(h.id)::INT AS holds + FROM action.hold_request h + JOIN reporter.hold_request_record rhrr ON (rhrr.id = h.id) + JOIN precalc_bib_list f ON (f.id = rhrr.bib_record) + WHERE h.fulfillment_time >= NOW() - badge.horizon_age + AND h.request_lib = ANY (badge.orgs) + GROUP BY 1, 2 + ) x + GROUP BY 1; +END; +$f$ LANGUAGE PLPGSQL STRICT; + +CREATE OR REPLACE FUNCTION rating.holds_placed_over_time(badge_id INT) + RETURNS TABLE (record BIGINT, value NUMERIC) AS $f$ +DECLARE + badge rating.badge_with_orgs%ROWTYPE; + iage INT := 1; + iint INT := NULL; + iscale NUMERIC := NULL; +BEGIN + + SELECT * INTO badge FROM rating.badge_with_orgs WHERE id = badge_id; + + IF badge.horizon_age IS NULL THEN + RAISE EXCEPTION 'Badge "%" with id % requires a horizon age but has none.', + badge.name, + badge.id; + END IF; + + PERFORM rating.precalc_bibs_by_copy(badge_id); + + SET LOCAL client_min_messages = error; + DROP TABLE IF EXISTS precalc_bib_list; + CREATE TEMP TABLE precalc_bib_list ON COMMIT DROP AS + SELECT id FROM precalc_filter_bib_list + INTERSECT + SELECT id FROM precalc_bibs_by_copy_list; + + iint := EXTRACT(EPOCH FROM badge.importance_interval); + IF badge.importance_age IS NOT NULL THEN + iage := (EXTRACT(EPOCH FROM badge.importance_age) / iint)::INT; + END IF; + + -- if iscale is smaller than 1, scaling slope will be shallow ... BEWARE! + iscale := COALESCE(badge.importance_scale, 1.0); + + RETURN QUERY + SELECT bib, + SUM( holds * GREATEST( iscale * (iage - hage), 1.0 )) + FROM ( + SELECT f.id AS bib, + (1 + EXTRACT(EPOCH FROM AGE(h.request_time)) / iint)::INT AS hage, + COUNT(h.id)::INT AS holds + FROM action.hold_request h + JOIN reporter.hold_request_record rhrr ON (rhrr.id = h.id) + JOIN precalc_bib_list f ON (f.id = rhrr.bib_record) + WHERE h.request_time >= NOW() - badge.horizon_age + AND h.request_lib = ANY (badge.orgs) + GROUP BY 1, 2 + ) x + GROUP BY 1; +END; +$f$ LANGUAGE PLPGSQL STRICT; + +CREATE OR REPLACE FUNCTION rating.current_hold_count(badge_id INT) + RETURNS TABLE (record BIGINT, value NUMERIC) AS $f$ +DECLARE + badge rating.badge_with_orgs%ROWTYPE; +BEGIN + + SELECT * INTO badge FROM rating.badge_with_orgs WHERE id = badge_id; + + PERFORM rating.precalc_bibs_by_copy(badge_id); + + DELETE FROM precalc_copy_filter_bib_list WHERE id NOT IN ( + SELECT id FROM precalc_filter_bib_list + INTERSECT + SELECT id FROM precalc_bibs_by_copy_list + ); + + ANALYZE precalc_copy_filter_bib_list; + + RETURN QUERY + SELECT rhrr.bib_record AS bib, + COUNT(DISTINCT h.id)::NUMERIC AS holds + FROM action.hold_request h + JOIN reporter.hold_request_record rhrr ON (rhrr.id = h.id) + JOIN action.hold_copy_map m ON (m.hold = h.id) + JOIN precalc_copy_filter_bib_list cf ON (rhrr.bib_record = cf.id AND m.target_copy = cf.copy) + WHERE h.fulfillment_time IS NULL + AND h.request_lib = ANY (badge.orgs) + GROUP BY 1; +END; +$f$ LANGUAGE PLPGSQL STRICT; + +CREATE OR REPLACE FUNCTION rating.circs_over_time(badge_id INT) + RETURNS TABLE (record BIGINT, value NUMERIC) AS $f$ +DECLARE + badge rating.badge_with_orgs%ROWTYPE; + iage INT := 1; + iint INT := NULL; + iscale NUMERIC := NULL; +BEGIN + + SELECT * INTO badge FROM rating.badge_with_orgs WHERE id = badge_id; + + IF badge.horizon_age IS NULL THEN + RAISE EXCEPTION 'Badge "%" with id % requires a horizon age but has none.', + badge.name, + badge.id; + END IF; + + PERFORM rating.precalc_bibs_by_copy(badge_id); + + DELETE FROM precalc_copy_filter_bib_list WHERE id NOT IN ( + SELECT id FROM precalc_filter_bib_list + INTERSECT + SELECT id FROM precalc_bibs_by_copy_list + ); + + ANALYZE precalc_copy_filter_bib_list; + + iint := EXTRACT(EPOCH FROM badge.importance_interval); + IF badge.importance_age IS NOT NULL THEN + iage := (EXTRACT(EPOCH FROM badge.importance_age) / iint)::INT; + END IF; + + -- if iscale is smaller than 1, scaling slope will be shallow ... BEWARE! + iscale := COALESCE(badge.importance_scale, 1.0); + + RETURN QUERY + SELECT bib, + SUM( circs * GREATEST( iscale * (iage - cage), 1.0 )) + FROM ( + SELECT cn.record AS bib, + (1 + EXTRACT(EPOCH FROM AGE(c.xact_start)) / iint)::INT AS cage, + COUNT(c.id)::INT AS circs + FROM action.circulation c + JOIN precalc_copy_filter_bib_list cf ON (c.target_copy = cf.copy) + JOIN asset.copy cp ON (cp.id = c.target_copy) + JOIN asset.call_number cn ON (cn.id = cp.call_number) + WHERE c.xact_start >= NOW() - badge.horizon_age + AND cn.owning_lib = ANY (badge.orgs) + AND c.phone_renewal IS FALSE -- we don't count renewals + AND c.desk_renewal IS FALSE + AND c.opac_renewal IS FALSE + GROUP BY 1, 2 + ) x + GROUP BY 1; +END; +$f$ LANGUAGE PLPGSQL STRICT; + +CREATE OR REPLACE FUNCTION rating.current_circ_count(badge_id INT) + RETURNS TABLE (record BIGINT, value NUMERIC) AS $f$ +DECLARE + badge rating.badge_with_orgs%ROWTYPE; +BEGIN + + SELECT * INTO badge FROM rating.badge_with_orgs WHERE id = badge_id; + + PERFORM rating.precalc_bibs_by_copy(badge_id); + + DELETE FROM precalc_copy_filter_bib_list WHERE id NOT IN ( + SELECT id FROM precalc_filter_bib_list + INTERSECT + SELECT id FROM precalc_bibs_by_copy_list + ); + + ANALYZE precalc_copy_filter_bib_list; + + RETURN QUERY + SELECT cn.record AS bib, + COUNT(c.id)::NUMERIC AS circs + FROM action.circulation c + JOIN precalc_copy_filter_bib_list cf ON (c.target_copy = cf.copy) + JOIN asset.copy cp ON (cp.id = c.target_copy) + JOIN asset.call_number cn ON (cn.id = cp.call_number) + WHERE c.checkin_time IS NULL + AND cn.owning_lib = ANY (badge.orgs) + GROUP BY 1; + +END; +$f$ LANGUAGE PLPGSQL STRICT; + +CREATE OR REPLACE FUNCTION rating.checked_out_total_ratio(badge_id INT) + RETURNS TABLE (record BIGINT, value NUMERIC) AS $f$ +DECLARE + badge rating.badge_with_orgs%ROWTYPE; +BEGIN + + SELECT * INTO badge FROM rating.badge_with_orgs WHERE id = badge_id; + + PERFORM rating.precalc_bibs_by_copy(badge_id); + + DELETE FROM precalc_copy_filter_bib_list WHERE id NOT IN ( + SELECT id FROM precalc_filter_bib_list + INTERSECT + SELECT id FROM precalc_bibs_by_copy_list + ); + + ANALYZE precalc_copy_filter_bib_list; + + RETURN QUERY + SELECT bib, + SUM(checked_out)::NUMERIC / SUM(total)::NUMERIC + FROM (SELECT cn.record AS bib, + (cp.status = 1)::INT AS checked_out, + 1 AS total + FROM asset.copy cp + JOIN precalc_copy_filter_bib_list c ON (cp.id = c.copy) + JOIN asset.call_number cn ON (cn.id = cp.call_number) + WHERE cn.owning_lib = ANY (badge.orgs) + ) x + GROUP BY 1; +END; +$f$ LANGUAGE PLPGSQL STRICT; + +CREATE OR REPLACE FUNCTION rating.holds_total_ratio(badge_id INT) + RETURNS TABLE (record BIGINT, value NUMERIC) AS $f$ +DECLARE + badge rating.badge_with_orgs%ROWTYPE; +BEGIN + + SELECT * INTO badge FROM rating.badge_with_orgs WHERE id = badge_id; + + PERFORM rating.precalc_bibs_by_copy(badge_id); + + DELETE FROM precalc_copy_filter_bib_list WHERE id NOT IN ( + SELECT id FROM precalc_filter_bib_list + INTERSECT + SELECT id FROM precalc_bibs_by_copy_list + ); + + ANALYZE precalc_copy_filter_bib_list; + + RETURN QUERY + SELECT cn.record AS bib, + COUNT(DISTINCT m.hold)::NUMERIC / COUNT(DISTINCT cp.id)::NUMERIC + FROM asset.copy cp + JOIN precalc_copy_filter_bib_list c ON (cp.id = c.copy) + JOIN asset.call_number cn ON (cn.id = cp.call_number) + JOIN action.hold_copy_map m ON (m.target_copy = cp.id) + WHERE cn.owning_lib = ANY (badge.orgs) + GROUP BY 1; +END; +$f$ LANGUAGE PLPGSQL STRICT; + +CREATE OR REPLACE FUNCTION rating.holds_holdable_ratio(badge_id INT) + RETURNS TABLE (record BIGINT, value NUMERIC) AS $f$ +DECLARE + badge rating.badge_with_orgs%ROWTYPE; +BEGIN + + SELECT * INTO badge FROM rating.badge_with_orgs WHERE id = badge_id; + + PERFORM rating.precalc_bibs_by_copy(badge_id); + + DELETE FROM precalc_copy_filter_bib_list WHERE id NOT IN ( + SELECT id FROM precalc_filter_bib_list + INTERSECT + SELECT id FROM precalc_bibs_by_copy_list + ); + + ANALYZE precalc_copy_filter_bib_list; + + RETURN QUERY + SELECT cn.record AS bib, + COUNT(DISTINCT m.hold)::NUMERIC / COUNT(DISTINCT cp.id)::NUMERIC + FROM asset.copy cp + JOIN precalc_copy_filter_bib_list c ON (cp.id = c.copy) + JOIN asset.copy_location cl ON (cl.id = cp.location) + JOIN config.copy_status cs ON (cs.id = cp.status) + JOIN asset.call_number cn ON (cn.id = cp.call_number) + JOIN action.hold_copy_map m ON (m.target_copy = cp.id) + WHERE cn.owning_lib = ANY (badge.orgs) + AND cp.holdable IS TRUE + AND cl.holdable IS TRUE + AND cs.holdable IS TRUE + GROUP BY 1; +END; +$f$ LANGUAGE PLPGSQL STRICT; + +CREATE OR REPLACE FUNCTION rating.bib_record_age(badge_id INT) + RETURNS TABLE (record BIGINT, value NUMERIC) AS $f$ +DECLARE + badge rating.badge_with_orgs%ROWTYPE; +BEGIN + + SELECT * INTO badge FROM rating.badge_with_orgs WHERE id = badge_id; + + PERFORM rating.precalc_bibs_by_copy_or_uri(badge_id); + + SET LOCAL client_min_messages = error; + DROP TABLE IF EXISTS precalc_bib_list; + CREATE TEMP TABLE precalc_bib_list ON COMMIT DROP AS + SELECT id FROM precalc_filter_bib_list + INTERSECT + SELECT id FROM precalc_bibs_by_copy_or_uri_list; + + RETURN QUERY + SELECT b.id, + 1.0 / EXTRACT(EPOCH FROM AGE(b.create_date))::NUMERIC + 1.0 + FROM precalc_bib_list pop + JOIN biblio.record_entry b ON (b.id = pop.id); +END; +$f$ LANGUAGE PLPGSQL STRICT; + +CREATE OR REPLACE FUNCTION rating.bib_pub_age(badge_id INT) + RETURNS TABLE (record BIGINT, value NUMERIC) AS $f$ +DECLARE + badge rating.badge_with_orgs%ROWTYPE; +BEGIN + + SELECT * INTO badge FROM rating.badge_with_orgs WHERE id = badge_id; + + PERFORM rating.precalc_bibs_by_copy_or_uri(badge_id); + + SET LOCAL client_min_messages = error; + DROP TABLE IF EXISTS precalc_bib_list; + CREATE TEMP TABLE precalc_bib_list ON COMMIT DROP AS + SELECT id FROM precalc_filter_bib_list + INTERSECT + SELECT id FROM precalc_bibs_by_copy_or_uri_list; + + RETURN QUERY + SELECT pop.id AS bib, + s.value::NUMERIC + FROM precalc_bib_list pop + JOIN metabib.record_sorter s ON ( + s.source = pop.id + AND s.attr = 'pubdate' + AND s.value ~ '^\d+$' + ) + WHERE s.value::INT <= EXTRACT(YEAR FROM NOW())::INT; +END; +$f$ LANGUAGE PLPGSQL STRICT; + +CREATE OR REPLACE FUNCTION rating.percent_time_circulating(badge_id INT) + RETURNS TABLE (record BIGINT, value NUMERIC) AS $f$ +DECLARE + badge rating.badge_with_orgs%ROWTYPE; +BEGIN + + SELECT * INTO badge FROM rating.badge_with_orgs WHERE id = badge_id; + + PERFORM rating.precalc_bibs_by_copy(badge_id); + + DELETE FROM precalc_copy_filter_bib_list WHERE id NOT IN ( + SELECT id FROM precalc_filter_bib_list + INTERSECT + SELECT id FROM precalc_bibs_by_copy_list + ); + + ANALYZE precalc_copy_filter_bib_list; + + RETURN QUERY + SELECT bib, + SUM(COALESCE(circ_time,0))::NUMERIC / SUM(age)::NUMERIC + FROM (SELECT cn.record AS bib, + cp.id, + EXTRACT( EPOCH FROM AGE(cp.active_date) ) + 1 AS age, + SUM( -- time copy spent circulating + EXTRACT( + EPOCH FROM + AGE( + COALESCE(circ.checkin_time, circ.stop_fines_time, NOW()), + circ.xact_start + ) + ) + )::NUMERIC AS circ_time + FROM asset.copy cp + JOIN precalc_copy_filter_bib_list c ON (cp.id = c.copy) + JOIN asset.call_number cn ON (cn.id = cp.call_number) + LEFT JOIN action.all_circulation circ ON ( + circ.target_copy = cp.id + AND stop_fines NOT IN ( + 'LOST', + 'LONGOVERDUE', + 'CLAIMSRETURNED', + 'LONGOVERDUE' + ) + AND NOT ( + checkin_time IS NULL AND + stop_fines = 'MAXFINES' + ) + ) + WHERE cn.owning_lib = ANY (badge.orgs) + AND cp.active_date IS NOT NULL + -- Next line requires that copies with no circs (circ.id IS NULL) also not be deleted + AND ((circ.id IS NULL AND NOT cp.deleted) OR circ.id IS NOT NULL) + GROUP BY 1,2,3 + ) x + GROUP BY 1; +END; +$f$ LANGUAGE PLPGSQL STRICT; + +CREATE OR REPLACE FUNCTION rating.generic_fixed_rating_by_copy(badge_id INT) + RETURNS TABLE (record BIGINT, value NUMERIC) AS $f$ +BEGIN + PERFORM rating.precalc_bibs_by_copy(badge_id); + RETURN QUERY + SELECT id, 1.0 FROM precalc_filter_bib_list + INTERSECT + SELECT id, 1.0 FROM precalc_bibs_by_copy_list; +END; +$f$ LANGUAGE PLPGSQL STRICT; + +CREATE OR REPLACE FUNCTION rating.generic_fixed_rating_by_uri(badge_id INT) + RETURNS TABLE (record BIGINT, value NUMERIC) AS $f$ +BEGIN + PERFORM rating.precalc_bibs_by_uri(badge_id); + RETURN QUERY + SELECT id, 1.0 FROM precalc_bib_filter_bib_list + INTERSECT + SELECT id, 1.0 FROM precalc_bibs_by_uri_list; +END; +$f$ LANGUAGE PLPGSQL STRICT; + +CREATE OR REPLACE FUNCTION rating.generic_fixed_rating_by_copy_or_uri(badge_id INT) + RETURNS TABLE (record BIGINT, value NUMERIC) AS $f$ +BEGIN + PERFORM rating.precalc_bibs_by_copy_or_uri(badge_id); + RETURN QUERY + (SELECT id, 1.0 FROM precalc_filter_bib_list + INTERSECT + SELECT id, 1.0 FROM precalc_bibs_by_copy_list) + UNION + (SELECT id, 1.0 FROM precalc_bib_filter_bib_list + INTERSECT + SELECT id, 1.0 FROM precalc_bibs_by_uri_list); +END; +$f$ LANGUAGE PLPGSQL STRICT; + +CREATE OR REPLACE FUNCTION rating.generic_fixed_rating_global(badge_id INT) + RETURNS TABLE (record BIGINT, value NUMERIC) AS $f$ +BEGIN + RETURN QUERY + SELECT id, 1.0 FROM precalc_bib_filter_bib_list; +END; +$f$ LANGUAGE PLPGSQL STRICT; + +CREATE INDEX hold_fulfillment_time_idx ON action.hold_request (fulfillment_time) WHERE fulfillment_time IS NOT NULL; +CREATE INDEX hold_request_time_idx ON action.hold_request (request_time); + + +/* + * Copyright (C) 2016 Equinox Software, Inc. + * Mike Rylander + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + + + +-- DEPS 0984 + +ALTER TYPE search.search_result ADD ATTRIBUTE badges TEXT, ADD ATTRIBUTE popularity NUMERIC; + +CREATE OR REPLACE FUNCTION search.query_parser_fts ( + + param_search_ou INT, + param_depth INT, + param_query TEXT, + param_statuses INT[], + param_locations INT[], + param_offset INT, + param_check INT, + param_limit INT, + metarecord BOOL, + staff BOOL, + deleted_search BOOL, + param_pref_ou INT DEFAULT NULL +) RETURNS SETOF search.search_result AS $func$ +DECLARE + + current_res search.search_result%ROWTYPE; + search_org_list INT[]; + luri_org_list INT[]; + tmp_int_list INT[]; + + check_limit INT; + core_limit INT; + core_offset INT; + tmp_int INT; + + core_result RECORD; + core_cursor REFCURSOR; + core_rel_query TEXT; + + total_count INT := 0; + check_count INT := 0; + deleted_count INT := 0; + visible_count INT := 0; + excluded_count INT := 0; + + luri_as_copy BOOL; +BEGIN + + check_limit := COALESCE( param_check, 1000 ); + core_limit := COALESCE( param_limit, 25000 ); + core_offset := COALESCE( param_offset, 0 ); + + SELECT COALESCE( enabled, FALSE ) INTO luri_as_copy FROM config.global_flag WHERE name = 'opac.located_uri.act_as_copy'; + + -- core_skip_chk := COALESCE( param_skip_chk, 1 ); + + IF param_search_ou > 0 THEN + IF param_depth IS NOT NULL THEN + SELECT ARRAY_AGG(distinct id) INTO search_org_list FROM actor.org_unit_descendants( param_search_ou, param_depth ); + ELSE + SELECT ARRAY_AGG(distinct id) INTO search_org_list FROM actor.org_unit_descendants( param_search_ou ); + END IF; + + IF luri_as_copy THEN + SELECT ARRAY_AGG(distinct id) INTO luri_org_list FROM actor.org_unit_full_path( param_search_ou ); + ELSE + SELECT ARRAY_AGG(distinct id) INTO luri_org_list FROM actor.org_unit_ancestors( param_search_ou ); + END IF; + + ELSIF param_search_ou < 0 THEN + SELECT ARRAY_AGG(distinct org_unit) INTO search_org_list FROM actor.org_lasso_map WHERE lasso = -param_search_ou; + + FOR tmp_int IN SELECT * FROM UNNEST(search_org_list) LOOP + + IF luri_as_copy THEN + SELECT ARRAY_AGG(distinct id) INTO tmp_int_list FROM actor.org_unit_full_path( tmp_int ); + ELSE + SELECT ARRAY_AGG(distinct id) INTO tmp_int_list FROM actor.org_unit_ancestors( tmp_int ); + END IF; + + luri_org_list := luri_org_list || tmp_int_list; + END LOOP; + + SELECT ARRAY_AGG(DISTINCT x.id) INTO luri_org_list FROM UNNEST(luri_org_list) x(id); + + ELSIF param_search_ou = 0 THEN + -- reserved for user lassos (ou_buckets/type='lasso') with ID passed in depth ... hack? sure. + END IF; + + IF param_pref_ou IS NOT NULL THEN + IF luri_as_copy THEN + SELECT ARRAY_AGG(distinct id) INTO tmp_int_list FROM actor.org_unit_full_path( param_pref_ou ); + ELSE + SELECT ARRAY_AGG(distinct id) INTO tmp_int_list FROM actor.org_unit_ancestors( param_pref_ou ); + END IF; + + luri_org_list := luri_org_list || tmp_int_list; + END IF; + + OPEN core_cursor FOR EXECUTE param_query; + + LOOP + + FETCH core_cursor INTO core_result; + EXIT WHEN NOT FOUND; + EXIT WHEN total_count >= core_limit; + + total_count := total_count + 1; + + CONTINUE WHEN total_count NOT BETWEEN core_offset + 1 AND check_limit + core_offset; + + check_count := check_count + 1; + + IF NOT deleted_search THEN + + PERFORM 1 FROM biblio.record_entry b WHERE NOT b.deleted AND b.id IN ( SELECT * FROM unnest( core_result.records ) ); + IF NOT FOUND THEN + -- RAISE NOTICE ' % were all deleted ... ', core_result.records; + deleted_count := deleted_count + 1; + CONTINUE; + END IF; + + PERFORM 1 + FROM biblio.record_entry b + JOIN config.bib_source s ON (b.source = s.id) + WHERE s.transcendant + AND b.id IN ( SELECT * FROM unnest( core_result.records ) ); + + IF FOUND THEN + -- RAISE NOTICE ' % were all transcendant ... ', core_result.records; + visible_count := visible_count + 1; + + current_res.id = core_result.id; + current_res.rel = core_result.rel; + current_res.badges = core_result.badges; + current_res.popularity = core_result.popularity; + + tmp_int := 1; + IF metarecord THEN + SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id; + END IF; + + IF tmp_int = 1 THEN + current_res.record = core_result.records[1]; + ELSE + current_res.record = NULL; + END IF; + + RETURN NEXT current_res; + + CONTINUE; + END IF; + + PERFORM 1 + FROM asset.call_number cn + JOIN asset.uri_call_number_map map ON (map.call_number = cn.id) + JOIN asset.uri uri ON (map.uri = uri.id) + WHERE NOT cn.deleted + AND cn.label = '##URI##' + AND uri.active + AND ( param_locations IS NULL OR array_upper(param_locations, 1) IS NULL ) + AND cn.record IN ( SELECT * FROM unnest( core_result.records ) ) + AND cn.owning_lib IN ( SELECT * FROM unnest( luri_org_list ) ) + LIMIT 1; + + IF FOUND THEN + -- RAISE NOTICE ' % have at least one URI ... ', core_result.records; + visible_count := visible_count + 1; + + current_res.id = core_result.id; + current_res.rel = core_result.rel; + current_res.badges = core_result.badges; + current_res.popularity = core_result.popularity; + + tmp_int := 1; + IF metarecord THEN + SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id; + END IF; + + IF tmp_int = 1 THEN + current_res.record = core_result.records[1]; + ELSE + current_res.record = NULL; + END IF; + + RETURN NEXT current_res; + + CONTINUE; + END IF; + + IF param_statuses IS NOT NULL AND array_upper(param_statuses, 1) > 0 THEN + + PERFORM 1 + FROM asset.call_number cn + JOIN asset.copy cp ON (cp.call_number = cn.id) + WHERE NOT cn.deleted + AND NOT cp.deleted + AND cp.status IN ( SELECT * FROM unnest( param_statuses ) ) + AND cn.record IN ( SELECT * FROM unnest( core_result.records ) ) + AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) ) + LIMIT 1; + + IF NOT FOUND THEN + PERFORM 1 + FROM biblio.peer_bib_copy_map pr + JOIN asset.copy cp ON (cp.id = pr.target_copy) + WHERE NOT cp.deleted + AND cp.status IN ( SELECT * FROM unnest( param_statuses ) ) + AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) ) + AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) ) + LIMIT 1; + + IF NOT FOUND THEN + -- RAISE NOTICE ' % and multi-home linked records were all status-excluded ... ', core_result.records; + excluded_count := excluded_count + 1; + CONTINUE; + END IF; + END IF; + + END IF; + + IF param_locations IS NOT NULL AND array_upper(param_locations, 1) > 0 THEN + + PERFORM 1 + FROM asset.call_number cn + JOIN asset.copy cp ON (cp.call_number = cn.id) + WHERE NOT cn.deleted + AND NOT cp.deleted + AND cp.location IN ( SELECT * FROM unnest( param_locations ) ) + AND cn.record IN ( SELECT * FROM unnest( core_result.records ) ) + AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) ) + LIMIT 1; + + IF NOT FOUND THEN + PERFORM 1 + FROM biblio.peer_bib_copy_map pr + JOIN asset.copy cp ON (cp.id = pr.target_copy) + WHERE NOT cp.deleted + AND cp.location IN ( SELECT * FROM unnest( param_locations ) ) + AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) ) + AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) ) + LIMIT 1; + + IF NOT FOUND THEN + -- RAISE NOTICE ' % and multi-home linked records were all copy_location-excluded ... ', core_result.records; + excluded_count := excluded_count + 1; + CONTINUE; + END IF; + END IF; + + END IF; + + IF staff IS NULL OR NOT staff THEN + + PERFORM 1 + FROM asset.opac_visible_copies + WHERE circ_lib IN ( SELECT * FROM unnest( search_org_list ) ) + AND record IN ( SELECT * FROM unnest( core_result.records ) ) + LIMIT 1; + + IF NOT FOUND THEN + PERFORM 1 + FROM biblio.peer_bib_copy_map pr + JOIN asset.opac_visible_copies cp ON (cp.copy_id = pr.target_copy) + WHERE cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) ) + AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) ) + LIMIT 1; + + IF NOT FOUND THEN + + -- RAISE NOTICE ' % and multi-home linked records were all visibility-excluded ... ', core_result.records; + excluded_count := excluded_count + 1; + CONTINUE; + END IF; + END IF; + + ELSE + + PERFORM 1 + FROM asset.call_number cn + JOIN asset.copy cp ON (cp.call_number = cn.id) + WHERE NOT cn.deleted + AND NOT cp.deleted + AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) ) + AND cn.record IN ( SELECT * FROM unnest( core_result.records ) ) + LIMIT 1; + + IF NOT FOUND THEN + + PERFORM 1 + FROM biblio.peer_bib_copy_map pr + JOIN asset.copy cp ON (cp.id = pr.target_copy) + WHERE NOT cp.deleted + AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) ) + AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) ) + LIMIT 1; + + IF NOT FOUND THEN + + PERFORM 1 + FROM asset.call_number cn + JOIN asset.copy cp ON (cp.call_number = cn.id) + WHERE cn.record IN ( SELECT * FROM unnest( core_result.records ) ) + AND NOT cp.deleted + LIMIT 1; + + IF NOT FOUND THEN + -- Recheck Located URI visibility in the case of no "foreign" copies + PERFORM 1 + FROM asset.call_number cn + JOIN asset.uri_call_number_map map ON (map.call_number = cn.id) + JOIN asset.uri uri ON (map.uri = uri.id) + WHERE NOT cn.deleted + AND cn.label = '##URI##' + AND uri.active + AND cn.record IN ( SELECT * FROM unnest( core_result.records ) ) + AND cn.owning_lib NOT IN ( SELECT * FROM unnest( luri_org_list ) ) + LIMIT 1; + + IF FOUND THEN + -- RAISE NOTICE ' % were excluded for foreign located URIs... ', core_result.records; + excluded_count := excluded_count + 1; + CONTINUE; + END IF; + ELSE + -- RAISE NOTICE ' % and multi-home linked records were all visibility-excluded ... ', core_result.records; + excluded_count := excluded_count + 1; + CONTINUE; + END IF; + END IF; + + END IF; + + END IF; + + END IF; + + visible_count := visible_count + 1; + + current_res.id = core_result.id; + current_res.rel = core_result.rel; + current_res.badges = core_result.badges; + current_res.popularity = core_result.popularity; + + tmp_int := 1; + IF metarecord THEN + SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id; + END IF; + + IF tmp_int = 1 THEN + current_res.record = core_result.records[1]; + ELSE + current_res.record = NULL; + END IF; + + RETURN NEXT current_res; + + IF visible_count % 1000 = 0 THEN + -- RAISE NOTICE ' % visible so far ... ', visible_count; + END IF; + + END LOOP; + + current_res.id = NULL; + current_res.rel = NULL; + current_res.record = NULL; + current_res.badges = NULL; + current_res.popularity = NULL; + current_res.total = total_count; + current_res.checked = check_count; + current_res.deleted = deleted_count; + current_res.visible = visible_count; + current_res.excluded = excluded_count; + + CLOSE core_cursor; + + RETURN NEXT current_res; + +END; +$func$ LANGUAGE PLPGSQL; + +-- KCLS VERSION PLUS CHANGES +CREATE OR REPLACE FUNCTION metabib.staged_browse( + query text, + fields integer[], + context_org integer, + context_locations integer[], + staff boolean, + browse_superpage_size integer, + count_up_from_zero boolean, + result_limit integer, + next_pivot_pos integer, + search_class text, + item_type text DEFAULT NULL::text +) RETURNS SETOF metabib.flat_browse_entry_appearance +AS $FUNK$ +DECLARE + curs REFCURSOR; + rec RECORD; + qpfts_query TEXT; + aqpfts_query TEXT; + afields INT[]; + bfields INT[]; + result_row metabib.flat_browse_entry_appearance%ROWTYPE; + results_skipped INT := 0; + row_counter INT := 0; + row_number INT; + slice_start INT; + slice_end INT; + full_end INT; + all_records BIGINT[]; + all_brecords BIGINT[]; + all_arecords BIGINT[]; + superpage_of_records BIGINT[]; + superpage_size INT; + unauthorized_entry RECORD; +BEGIN + --ver1.1 updated with kmain-806 - added support for the new metabib.browse_____entry_simple_heading_map tables. + IF count_up_from_zero THEN + row_number := 0; + ELSE + row_number := -1; + END IF; + + OPEN curs FOR EXECUTE query; + + LOOP + FETCH curs INTO rec; + IF NOT FOUND THEN + IF result_row.pivot_point IS NOT NULL THEN + RETURN NEXT result_row; + END IF; + RETURN; + END IF; + + CASE search_class + WHEN 'author' THEN + --Is unauthorized, i.e., 4xx on an auth record? + SELECT INTO unauthorized_entry * + FROM metabib.browse_author_entry_simple_heading_map mbeshm + INNER JOIN authority.simple_heading ash ON ( mbeshm.simple_heading = ash.id ) + INNER JOIN authority.control_set_authority_field acsaf ON ( acsaf.id = ash.atag AND acsaf.tag like '4__') + WHERE mbeshm.entry = rec.id; + + -- Gather aggregate data based on the MBE row we're looking at now, authority axis + IF (unauthorized_entry.record IS NOT NULL) THEN + --Do unauthorized procedure, use the authorized term's auth record and it's bibs + SELECT INTO all_arecords, result_row.sees, afields + ARRAY_AGG(DISTINCT abl.bib), + STRING_AGG(DISTINCT abl.authority::TEXT, $$,$$), + ARRAY_AGG(DISTINCT map.metabib_field) + FROM authority.bib_linking abl + INNER JOIN authority.control_set_auth_field_metabib_field_map_refs map ON ( + map.authority_field = unauthorized_entry.atag + AND map.metabib_field = ANY(fields) + ) + WHERE abl.authority = unauthorized_entry.record; + ELSE + SELECT INTO all_arecords, result_row.sees, afields + ARRAY_AGG(DISTINCT abl.bib), -- bibs to check for visibility + ARRAY_TO_STRING(ARRAY_AGG(DISTINCT aal.source), $$,$$), -- authority record ids + ARRAY_AGG(DISTINCT map.metabib_field) -- authority-tag-linked CMF rows + FROM metabib.browse_author_entry_simple_heading_map mbeshm + JOIN authority.simple_heading ash ON ( mbeshm.simple_heading = ash.id ) + JOIN authority.authority_linking aal ON ( ash.record = aal.source ) + JOIN authority.bib_linking abl ON ( aal.target = abl.authority ) + JOIN authority.control_set_auth_field_metabib_field_map_refs map ON ( + ash.atag = map.authority_field + AND map.metabib_field = ANY(fields) + ) + WHERE mbeshm.entry = rec.id; + END IF; + + -- Gather aggregate data based on the MBE row we're looking at now, bib axis + SELECT INTO all_brecords, result_row.authorities, bfields + ARRAY_AGG(DISTINCT source), + ARRAY_TO_STRING(ARRAY_AGG(DISTINCT authority), $$,$$), + ARRAY_AGG(DISTINCT def) + FROM metabib.browse_author_entry_def_map map + WHERE entry = rec.id; + + WHEN 'title' THEN + -- Gather aggregate data based on the MBE row we're looking at now, authority axis + SELECT INTO all_arecords, result_row.sees, afields + ARRAY_AGG(DISTINCT abl.bib), -- bibs to check for visibility + ARRAY_TO_STRING(ARRAY_AGG(DISTINCT aal.source), $$,$$), -- authority record ids + ARRAY_AGG(DISTINCT map.metabib_field) -- authority-tag-linked CMF rows + FROM metabib.browse_title_entry_simple_heading_map mbeshm + JOIN authority.simple_heading ash ON ( mbeshm.simple_heading = ash.id ) + JOIN authority.authority_linking aal ON ( ash.record = aal.source ) + JOIN authority.bib_linking abl ON ( aal.target = abl.authority ) + JOIN authority.control_set_auth_field_metabib_field_map_refs map ON ( + ash.atag = map.authority_field + AND map.metabib_field = ANY(fields) + ) + WHERE mbeshm.entry = rec.id; + + -- Gather aggregate data based on the MBE row we're looking at now, bib axis + SELECT INTO all_brecords, result_row.authorities, bfields + ARRAY_AGG(DISTINCT source), + ARRAY_TO_STRING(ARRAY_AGG(DISTINCT authority), $$,$$), + ARRAY_AGG(DISTINCT def) + FROM metabib.browse_title_entry_def_map map + WHERE entry = rec.id; + + WHEN 'subject' THEN + --Is unauthorized, i.e., 4xx on an auth record? + SELECT INTO unauthorized_entry * + FROM metabib.browse_subject_entry_simple_heading_map mbeshm + INNER JOIN authority.simple_heading ash ON ( mbeshm.simple_heading = ash.id ) + INNER JOIN authority.control_set_authority_field acsaf ON ( acsaf.id = ash.atag AND acsaf.tag like '4__') + WHERE mbeshm.entry = rec.id; + + -- Gather aggregate data based on the MBE row we're looking at now, authority axis + IF (unauthorized_entry.record IS NOT NULL) THEN + --Do unauthorized procedure, use the authorized term's auth record and it's bibs + SELECT INTO all_arecords, result_row.sees, afields + ARRAY_AGG(DISTINCT abl.bib), + STRING_AGG(DISTINCT abl.authority::TEXT, $$,$$), + ARRAY_AGG(DISTINCT map.metabib_field) + FROM authority.bib_linking abl + INNER JOIN authority.control_set_auth_field_metabib_field_map_refs map ON ( + map.authority_field = unauthorized_entry.atag + AND map.metabib_field = ANY(fields) + ) + WHERE abl.authority = unauthorized_entry.record; + ELSE + SELECT INTO all_arecords, result_row.sees, afields + ARRAY_AGG(DISTINCT abl.bib), -- bibs to check for visibility + ARRAY_TO_STRING(ARRAY_AGG(DISTINCT aal.source), $$,$$), -- authority record ids + ARRAY_AGG(DISTINCT map.metabib_field) -- authority-tag-linked CMF rows + FROM metabib.browse_subject_entry_simple_heading_map mbeshm + JOIN authority.simple_heading ash ON ( mbeshm.simple_heading = ash.id ) + JOIN authority.authority_linking aal ON ( ash.record = aal.source ) + JOIN authority.bib_linking abl ON ( aal.target = abl.authority ) + JOIN authority.control_set_auth_field_metabib_field_map_refs map ON ( + ash.atag = map.authority_field + AND map.metabib_field = ANY(fields) + ) + WHERE mbeshm.entry = rec.id; + END IF; + + -- Gather aggregate data based on the MBE row we're looking at now, bib axis + SELECT INTO all_brecords, result_row.authorities, bfields + ARRAY_AGG(DISTINCT source), + ARRAY_TO_STRING(ARRAY_AGG(DISTINCT authority), $$,$$), + ARRAY_AGG(DISTINCT def) + FROM metabib.browse_subject_entry_def_map map + WHERE entry = rec.id; + + WHEN 'series' THEN + --Is unauthorized, i.e., 4xx on an auth record? + SELECT INTO unauthorized_entry * + FROM metabib.browse_series_entry_simple_heading_map mbeshm + INNER JOIN authority.simple_heading ash ON ( mbeshm.simple_heading = ash.id ) + INNER JOIN authority.control_set_authority_field acsaf ON ( acsaf.id = ash.atag AND acsaf.tag like '4__') + WHERE mbeshm.entry = rec.id; + + -- Gather aggregate data based on the MBE row we're looking at now, authority axis + -- Gather aggregate data based on the MBE row we're looking at now, authority axis + IF (unauthorized_entry.record IS NOT NULL) THEN + --Do unauthorized procedure, use the authorized term's auth record and it's bibs + SELECT INTO all_arecords, result_row.sees, afields + ARRAY_AGG(DISTINCT abl.bib), + STRING_AGG(DISTINCT abl.authority::TEXT, $$,$$), + ARRAY_AGG(DISTINCT map.metabib_field) + FROM authority.bib_linking abl + INNER JOIN authority.control_set_auth_field_metabib_field_map_refs map ON ( + map.authority_field = unauthorized_entry.atag + AND map.metabib_field = ANY(fields) + ) + WHERE abl.authority = unauthorized_entry.record; + ELSE + SELECT INTO all_arecords, result_row.sees, afields + ARRAY_AGG(DISTINCT abl.bib), -- bibs to check for visibility + ARRAY_TO_STRING(ARRAY_AGG(DISTINCT aal.source), $$,$$), -- authority record ids + ARRAY_AGG(DISTINCT map.metabib_field) -- authority-tag-linked CMF rows + FROM metabib.browse_series_entry_simple_heading_map mbeshm + JOIN authority.simple_heading ash ON ( mbeshm.simple_heading = ash.id ) + JOIN authority.authority_linking aal ON ( ash.record = aal.source ) + JOIN authority.bib_linking abl ON ( aal.target = abl.authority ) + JOIN authority.control_set_auth_field_metabib_field_map_refs map ON ( + ash.atag = map.authority_field + AND map.metabib_field = ANY(fields) + ) + WHERE mbeshm.entry = rec.id; + END IF; + + -- Gather aggregate data based on the MBE row we're looking at now, bib axis + SELECT INTO all_brecords, result_row.authorities, bfields + ARRAY_AGG(DISTINCT source), + ARRAY_TO_STRING(ARRAY_AGG(DISTINCT authority), $$,$$), + ARRAY_AGG(DISTINCT def) + FROM metabib.browse_series_entry_def_map map + WHERE entry = rec.id; + + WHEN 'call_number' THEN + -- Gather aggregate data based on the MBE row we're looking at now, bib axis + SELECT INTO all_brecords, result_row.authorities, bfields + ARRAY_AGG(DISTINCT source), + ARRAY_TO_STRING(ARRAY_AGG(DISTINCT authority), $$,$$), + ARRAY_AGG(DISTINCT def) + FROM metabib.browse_call_number_entry_def_map + WHERE entry = rec.id; + ELSE + + END CASE; + + + + SELECT INTO result_row.fields ARRAY_TO_STRING(ARRAY_AGG(DISTINCT x), $$,$$) FROM UNNEST(afields || bfields) x; + + result_row.sources := 0; + result_row.asources := 0; + + /** JBAS-1929 Remove references to any bib records that do not + match the requested item_type (mattype). */ + IF item_type IS NOT NULL THEN + + + -- bib-linked bibs + IF ARRAY_UPPER(all_brecords, 1) IS NOT NULL THEN + -- RAISE NOTICE 'Checking bib links on %', all_brecords; + + SELECT INTO all_brecords ARRAY_AGG(DISTINCT vec.source) + FROM metabib.record_attr_vector_list vec + JOIN config.coded_value_map ccvm + ON ( + ccvm.ctype = 'mattype' + AND ccvm.code = item_type + AND ccvm.id = ANY (vec.vlist) + ) + WHERE vec.source = ANY(all_brecords); + + -- RAISE NOTICE 'Bib links trimmed to %', all_brecords; + END IF; + + -- auth-linked bibs + IF ARRAY_UPPER(all_arecords, 1) IS NOT NULL THEN + -- RAISE NOTICE 'Checking auth links on %', all_arecords; + + SELECT INTO all_arecords ARRAY_AGG(DISTINCT vec.source) + FROM metabib.record_attr_vector_list vec + JOIN config.coded_value_map ccvm + ON ( + ccvm.ctype = 'mattype' + AND ccvm.code = item_type + AND ccvm.id = ANY (vec.vlist) + ) + WHERE vec.source = ANY(all_arecords); + + -- RAISE NOTICE 'Auth links trimmed to %', all_arecords; + END IF; + + END IF; + + -- Bib-linked vis checking + IF ARRAY_UPPER(all_brecords,1) IS NOT NULL THEN + + full_end := ARRAY_LENGTH(all_brecords, 1); + superpage_size := COALESCE(browse_superpage_size, full_end); + slice_start := 1; + slice_end := superpage_size; + + WHILE result_row.sources = 0 AND slice_start <= full_end LOOP + superpage_of_records := all_brecords[slice_start:slice_end]; + qpfts_query := + 'SELECT NULL::BIGINT AS id, ARRAY[r] AS records, ' || + 'NULL AS badges, NULL::NUMERIC AS popularity, ' || + '1::NUMERIC AS rel FROM (SELECT UNNEST(' || + quote_literal(superpage_of_records) || '::BIGINT[]) AS r) rr'; + + -- We use search.query_parser_fts() for visibility testing. + -- We're calling it once per browse-superpage worth of records + -- out of the set of records related to a given mbe, until we've + -- either exhausted that set of records or found at least 1 + -- visible record. + + SELECT INTO result_row.sources visible + FROM search.query_parser_fts( + context_org, NULL, qpfts_query, NULL, + context_locations, 0, NULL, NULL, FALSE, staff, FALSE + ) qpfts + WHERE qpfts.rel IS NULL; + + slice_start := slice_start + superpage_size; + slice_end := slice_end + superpage_size; + END LOOP; + + -- Accurate? Well, probably. + result_row.accurate := browse_superpage_size IS NULL OR + browse_superpage_size >= full_end; + + END IF; + + -- Authority-linked vis checking + IF ARRAY_UPPER(all_arecords,1) IS NOT NULL THEN + + full_end := ARRAY_LENGTH(all_arecords, 1); + superpage_size := COALESCE(browse_superpage_size, full_end); + slice_start := 1; + slice_end := superpage_size; + + WHILE result_row.asources = 0 AND slice_start <= full_end LOOP + superpage_of_records := all_arecords[slice_start:slice_end]; + qpfts_query := + 'SELECT NULL::BIGINT AS id, ARRAY[r] AS records, ' || + 'NULL AS badges, NULL::NUMERIC AS popularity, ' || + '1::NUMERIC AS rel FROM (SELECT UNNEST(' || + quote_literal(superpage_of_records) || '::BIGINT[]) AS r) rr'; + + -- We use search.query_parser_fts() for visibility testing. + -- We're calling it once per browse-superpage worth of records + -- out of the set of records related to a given mbe, via + -- authority until we've either exhausted that set of records + -- or found at least 1 visible record. + + SELECT INTO result_row.asources visible + FROM search.query_parser_fts( + context_org, NULL, qpfts_query, NULL, + context_locations, 0, NULL, NULL, FALSE, staff, FALSE + ) qpfts + WHERE qpfts.rel IS NULL; + + slice_start := slice_start + superpage_size; + slice_end := slice_end + superpage_size; + END LOOP; + + + -- Accurate? Well, probably. + result_row.aaccurate := browse_superpage_size IS NULL OR + browse_superpage_size >= full_end; + + END IF; + + IF result_row.sources > 0 OR result_row.asources > 0 THEN + + -- The function that calls this function needs row_number in order + -- to correctly order results from two different runs of this + -- functions. + result_row.row_number := row_number; + + -- Now, if row_counter is still less than limit, return a row. If + -- not, but it is less than next_pivot_pos, continue on without + -- returning actual result rows until we find + -- that next pivot, and return it. + + IF row_counter < result_limit THEN + result_row.browse_entry := rec.id; + result_row.value := rec.value; + + RETURN NEXT result_row; + ELSE + result_row.browse_entry := NULL; + result_row.authorities := NULL; + result_row.fields := NULL; + result_row.value := NULL; + result_row.sources := NULL; + result_row.sees := NULL; + result_row.accurate := NULL; + result_row.aaccurate := NULL; + result_row.pivot_point := rec.id; + + IF row_counter >= next_pivot_pos THEN + RETURN NEXT result_row; + RETURN; + END IF; + END IF; + + IF count_up_from_zero THEN + row_number := row_number + 1; + ELSE + row_number := row_number - 1; + END IF; + + -- row_counter is different from row_number. + -- It simply counts up from zero so that we know when + -- we've reached our limit. + row_counter := row_counter + 1; + END IF; + END LOOP; +END; +$FUNK$ LANGUAGE plpgsql ROWS 11; + +-- DEPS 0985 + +CREATE OR REPLACE FUNCTION metabib.reingest_record_attributes (rid BIGINT, pattr_list TEXT[] DEFAULT NULL, prmarc TEXT DEFAULT NULL, rdeleted BOOL DEFAULT TRUE) RETURNS VOID AS $func$ +DECLARE + transformed_xml TEXT; + rmarc TEXT := prmarc; + tmp_val TEXT; + prev_xfrm TEXT; + normalizer RECORD; + xfrm config.xml_transform%ROWTYPE; + attr_vector INT[] := '{}'::INT[]; + attr_vector_tmp INT[]; + attr_list TEXT[] := pattr_list; + attr_value TEXT[]; + norm_attr_value TEXT[]; + tmp_xml TEXT; + attr_def config.record_attr_definition%ROWTYPE; + ccvm_row config.coded_value_map%ROWTYPE; +BEGIN + + IF attr_list IS NULL OR rdeleted THEN -- need to do the full dance on INSERT or undelete + SELECT ARRAY_AGG(name) INTO attr_list FROM config.record_attr_definition + WHERE ( + tag IS NOT NULL OR + fixed_field IS NOT NULL OR + xpath IS NOT NULL OR + phys_char_sf IS NOT NULL OR + composite + ) AND ( + filter OR sorter + ); + END IF; + + IF rmarc IS NULL THEN + SELECT marc INTO rmarc FROM biblio.record_entry WHERE id = rid; + END IF; + + FOR attr_def IN SELECT * FROM config.record_attr_definition WHERE NOT composite AND name = ANY( attr_list ) ORDER BY format LOOP + + attr_value := '{}'::TEXT[]; + norm_attr_value := '{}'::TEXT[]; + attr_vector_tmp := '{}'::INT[]; + + SELECT * INTO ccvm_row FROM config.coded_value_map c WHERE c.ctype = attr_def.name LIMIT 1; + + -- tag+sf attrs only support SVF + IF attr_def.tag IS NOT NULL THEN -- tag (and optional subfield list) selection + SELECT ARRAY[ARRAY_TO_STRING(ARRAY_AGG(value), COALESCE(attr_def.joiner,' '))] INTO attr_value + FROM (SELECT * FROM metabib.full_rec ORDER BY tag, subfield) AS x + WHERE record = rid + AND tag LIKE attr_def.tag + AND CASE + WHEN attr_def.sf_list IS NOT NULL + THEN POSITION(subfield IN attr_def.sf_list) > 0 + ELSE TRUE + END + GROUP BY tag + ORDER BY tag + LIMIT 1; + + ELSIF attr_def.fixed_field IS NOT NULL THEN -- a named fixed field, see config.marc21_ff_pos_map.fixed_field + attr_value := vandelay.marc21_extract_fixed_field_list(rmarc, attr_def.fixed_field); + + IF NOT attr_def.multi THEN + attr_value := ARRAY[attr_value[1]]; + END IF; + + ELSIF attr_def.xpath IS NOT NULL THEN -- and xpath expression + + SELECT INTO xfrm * FROM config.xml_transform WHERE name = attr_def.format; + + -- See if we can skip the XSLT ... it's expensive + IF prev_xfrm IS NULL OR prev_xfrm <> xfrm.name THEN + -- Can't skip the transform + IF xfrm.xslt <> '---' THEN + transformed_xml := oils_xslt_process(rmarc,xfrm.xslt); + ELSE + transformed_xml := rmarc; + END IF; + + prev_xfrm := xfrm.name; + END IF; + + IF xfrm.name IS NULL THEN + -- just grab the marcxml (empty) transform + SELECT INTO xfrm * FROM config.xml_transform WHERE xslt = '---' LIMIT 1; + prev_xfrm := xfrm.name; + END IF; + + FOR tmp_xml IN SELECT UNNEST(oils_xpath(attr_def.xpath, transformed_xml, ARRAY[ARRAY[xfrm.prefix, xfrm.namespace_uri]])) LOOP + tmp_val := oils_xpath_string( + '//*', + tmp_xml, + COALESCE(attr_def.joiner,' '), + ARRAY[ARRAY[xfrm.prefix, xfrm.namespace_uri]] + ); + IF tmp_val IS NOT NULL AND BTRIM(tmp_val) <> '' THEN + attr_value := attr_value || tmp_val; + EXIT WHEN NOT attr_def.multi; + END IF; + END LOOP; + + ELSIF attr_def.phys_char_sf IS NOT NULL THEN -- a named Physical Characteristic, see config.marc21_physical_characteristic_*_map + SELECT ARRAY_AGG(m.value) INTO attr_value + FROM vandelay.marc21_physical_characteristics(rmarc) v + LEFT JOIN config.marc21_physical_characteristic_value_map m ON (m.id = v.value) + WHERE v.subfield = attr_def.phys_char_sf AND (m.value IS NOT NULL AND BTRIM(m.value) <> '') + AND ( ccvm_row.id IS NULL OR ( ccvm_row.id IS NOT NULL AND v.id IS NOT NULL) ); + + IF NOT attr_def.multi THEN + attr_value := ARRAY[attr_value[1]]; + END IF; + + END IF; + + -- apply index normalizers to attr_value + FOR tmp_val IN SELECT value FROM UNNEST(attr_value) x(value) LOOP + FOR normalizer IN + SELECT n.func AS func, + n.param_count AS param_count, + m.params AS params + FROM config.index_normalizer n + JOIN config.record_attr_index_norm_map m ON (m.norm = n.id) + WHERE attr = attr_def.name + ORDER BY m.pos LOOP + EXECUTE 'SELECT ' || normalizer.func || '(' || + COALESCE( quote_literal( tmp_val ), 'NULL' ) || + CASE + WHEN normalizer.param_count > 0 + THEN ',' || REPLACE(REPLACE(BTRIM(normalizer.params,'[]'),E'\'',E'\\\''),E'"',E'\'') + ELSE '' + END || + ')' INTO tmp_val; + + END LOOP; + IF tmp_val IS NOT NULL AND tmp_val <> '' THEN + -- note that a string that contains only blanks + -- is a valid value for some attributes + norm_attr_value := norm_attr_value || tmp_val; + END IF; + END LOOP; + + IF attr_def.filter THEN + -- Create unknown uncontrolled values and find the IDs of the values + IF ccvm_row.id IS NULL THEN + FOR tmp_val IN SELECT value FROM UNNEST(norm_attr_value) x(value) LOOP + IF tmp_val IS NOT NULL AND BTRIM(tmp_val) <> '' THEN + BEGIN -- use subtransaction to isolate unique constraint violations + INSERT INTO metabib.uncontrolled_record_attr_value ( attr, value ) VALUES ( attr_def.name, tmp_val ); + EXCEPTION WHEN unique_violation THEN END; + END IF; + END LOOP; + + SELECT ARRAY_AGG(id) INTO attr_vector_tmp FROM metabib.uncontrolled_record_attr_value WHERE attr = attr_def.name AND value = ANY( norm_attr_value ); + ELSE + SELECT ARRAY_AGG(id) INTO attr_vector_tmp FROM config.coded_value_map WHERE ctype = attr_def.name AND code = ANY( norm_attr_value ); + END IF; + + -- Add the new value to the vector + attr_vector := attr_vector || attr_vector_tmp; + END IF; + + IF attr_def.sorter THEN + DELETE FROM metabib.record_sorter WHERE source = rid AND attr = attr_def.name; + IF norm_attr_value[1] IS NOT NULL THEN + INSERT INTO metabib.record_sorter (source, attr, value) VALUES (rid, attr_def.name, norm_attr_value[1]); + END IF; + END IF; + + END LOOP; + +/* We may need to rewrite the vlist to contain + the intersection of new values for requested + attrs and old values for ignored attrs. To + do this, we take the old attr vlist and + subtract any values that are valid for the + requested attrs, and then add back the new + set of attr values. */ + + IF ARRAY_LENGTH(pattr_list, 1) > 0 THEN + SELECT vlist INTO attr_vector_tmp FROM metabib.record_attr_vector_list WHERE source = rid; + SELECT attr_vector_tmp - ARRAY_AGG(id::INT) INTO attr_vector_tmp FROM metabib.full_attr_id_map WHERE attr = ANY (pattr_list); + attr_vector := attr_vector || attr_vector_tmp; + END IF; + + -- On to composite attributes, now that the record attrs have been pulled. Processed in name order, so later composite + -- attributes can depend on earlier ones. + PERFORM metabib.compile_composite_attr_cache_init(); + FOR attr_def IN SELECT * FROM config.record_attr_definition WHERE composite AND name = ANY( attr_list ) ORDER BY name LOOP + + FOR ccvm_row IN SELECT * FROM config.coded_value_map c WHERE c.ctype = attr_def.name ORDER BY value LOOP + + tmp_val := metabib.compile_composite_attr( ccvm_row.id ); + CONTINUE WHEN tmp_val IS NULL OR tmp_val = ''; -- nothing to do + + IF attr_def.filter THEN + IF attr_vector @@ tmp_val::query_int THEN + attr_vector = attr_vector + intset(ccvm_row.id); + EXIT WHEN NOT attr_def.multi; + END IF; + END IF; + + IF attr_def.sorter THEN + IF attr_vector @@ tmp_val THEN + DELETE FROM metabib.record_sorter WHERE source = rid AND attr = attr_def.name; + INSERT INTO metabib.record_sorter (source, attr, value) VALUES (rid, attr_def.name, ccvm_row.code); + END IF; + END IF; + + END LOOP; + + END LOOP; + + IF ARRAY_LENGTH(attr_vector, 1) > 0 THEN + IF rdeleted THEN -- initial insert OR revivication + DELETE FROM metabib.record_attr_vector_list WHERE source = rid; + INSERT INTO metabib.record_attr_vector_list (source, vlist) VALUES (rid, attr_vector); + ELSE + UPDATE metabib.record_attr_vector_list SET vlist = attr_vector WHERE source = rid; + END IF; + END IF; + +END; + +$func$ LANGUAGE PLPGSQL; + +CREATE INDEX config_coded_value_map_ctype_idx ON config.coded_value_map (ctype); + + +-- DEPS 0986 + +CREATE EXTENSION IF NOT EXISTS unaccent SCHEMA public; + +CREATE OR REPLACE FUNCTION evergreen.unaccent_and_squash ( IN arg text) RETURNS text + IMMUTABLE STRICT AS $$ + BEGIN + RETURN evergreen.lowercase(unaccent(regexp_replace(arg, '\s','','g'))); + END; +$$ LANGUAGE PLPGSQL; + +-- The unaccented indices for patron name fields +CREATE INDEX actor_usr_first_given_name_unaccent_idx ON actor.usr (evergreen.unaccent_and_squash(first_given_name)); +CREATE INDEX actor_usr_second_given_name_unaccent_idx ON actor.usr (evergreen.unaccent_and_squash(second_given_name)); +CREATE INDEX actor_usr_family_name_unaccent_idx ON actor.usr (evergreen.unaccent_and_squash(family_name)); + +-- DB setting to control behavior; true by default +DO $INSERT$ +BEGIN + IF evergreen.insert_on_deploy() THEN ----------------------------------- +INSERT INTO config.org_unit_setting_type +( name, grp, label, description, datatype ) +VALUES +('circ.patron_search.diacritic_insensitive', + 'circ', + oils_i18n_gettext('circ.patron_search.diacritic_insensitive', + 'Patron search diacritic insensitive', + 'coust', 'label'), + oils_i18n_gettext('circ.patron_search.diacritic_insensitive', + 'Match patron last, first, and middle names irrespective of usage of diacritical marks or spaces. (e.g., Ines will match Inés; de la Cruz will match Delacruz)', + 'coust', 'description'), + 'bool'); + +INSERT INTO actor.org_unit_setting ( + org_unit, name, value +) VALUES ( + (SELECT id FROM actor.org_unit WHERE parent_ou IS NULL), + 'circ.patron_search.diacritic_insensitive', + 'true' +); + + + + +-- DEPS 0987 + +INSERT INTO config.org_unit_setting_type ( name, grp, label, description, datatype ) + VALUES ( + 'ui.circ.billing.amount_limit', 'gui', + oils_i18n_gettext( + 'ui.circ.billing.amount_limit', + 'Maximum payment amount allowed.', + 'coust', 'label'), + oils_i18n_gettext( + 'ui.circ.billing.amount_limit', + 'The payment amount in the Patron Bills interface may not exceed the value of this setting.', + 'coust', 'description'), + 'currency' + ); + +INSERT INTO config.org_unit_setting_type ( name, grp, label, description, datatype ) + VALUES ( + 'ui.circ.billing.amount_warn', 'gui', + oils_i18n_gettext( + 'ui.circ.billing.amount_warn', + 'Payment amount threshold for Are You Sure? dialog.', + 'coust', 'label'), + oils_i18n_gettext( + 'ui.circ.billing.amount_warn', + 'In the Patron Bills interface, a payment attempt will warn if the amount exceeds the value of this setting.', + 'coust', 'description'), + 'currency' + ); + + END IF; +END $INSERT$; -------------------------------------------------------------- + + +-- DEPS 0988 + +CREATE OR REPLACE FUNCTION evergreen.maintain_901 () RETURNS TRIGGER AS $func$ +use strict; +use MARC::Record; +use MARC::File::XML (BinaryEncoding => 'UTF-8'); +use MARC::Charset; +use Encode; +use Unicode::Normalize; + +MARC::Charset->assume_unicode(1); + +my $schema = $_TD->{table_schema}; +my $marc = MARC::Record->new_from_xml($_TD->{new}{marc}); + +my @old901s = $marc->field('901'); +$marc->delete_fields(@old901s); + +if ($schema eq 'biblio') { + my $tcn_value = $_TD->{new}{tcn_value}; + + # Set TCN value to record ID? + my $id_as_tcn = spi_exec_query(" + SELECT enabled + FROM config.global_flag + WHERE name = 'cat.bib.use_id_for_tcn' + "); + if (($id_as_tcn->{processed}) && $id_as_tcn->{rows}[0]->{enabled} eq 't') { + $tcn_value = $_TD->{new}{id}; + $_TD->{new}{tcn_value} = $tcn_value; + } + + my $new_901 = MARC::Field->new("901", " ", " ", + "a" => $tcn_value, + "b" => $_TD->{new}{tcn_source}, + "c" => $_TD->{new}{id}, + "t" => $schema + ); + + if ($_TD->{new}{owner}) { + $new_901->add_subfields("o" => $_TD->{new}{owner}); + } + + if ($_TD->{new}{share_depth}) { + $new_901->add_subfields("d" => $_TD->{new}{share_depth}); + } + + if ($_TD->{new}{source}) { + my $plan = spi_prepare(' + SELECT source + FROM config.bib_source + WHERE id = $1 + ', 'INTEGER'); + my $source_name = + spi_exec_prepared($plan, {limit => 1}, $_TD->{new}{source})->{rows}[0]{source}; + spi_freeplan($plan); + $new_901->add_subfields("s" => $source_name) if $source_name; + } + + $marc->append_fields($new_901); +} elsif ($schema eq 'authority') { + my $new_901 = MARC::Field->new("901", " ", " ", + "c" => $_TD->{new}{id}, + "t" => $schema, + ); + $marc->append_fields($new_901); +} elsif ($schema eq 'serial') { + my $new_901 = MARC::Field->new("901", " ", " ", + "c" => $_TD->{new}{id}, + "t" => $schema, + "o" => $_TD->{new}{owning_lib}, + ); + + if ($_TD->{new}{record}) { + $new_901->add_subfields("r" => $_TD->{new}{record}); + } + + $marc->append_fields($new_901); +} else { + my $new_901 = MARC::Field->new("901", " ", " ", + "c" => $_TD->{new}{id}, + "t" => $schema, + ); + $marc->append_fields($new_901); +} + +my $xml = $marc->as_xml_record(); +$xml =~ s/\n//sgo; +$xml =~ s/^<\?xml.+\?\s*>//go; +$xml =~ s/>\s+entityize() +# to avoid having to set PERL5LIB for PostgreSQL as well + +$xml = NFC($xml); + +# Convert raw ampersands to entities +$xml =~ s/&(?!\S+;)/&/gso; + +# Convert Unicode characters to entities +$xml =~ s/([\x{0080}-\x{fffd}])/sprintf('&#x%X;',ord($1))/sgoe; + +$xml =~ s/[\x00-\x1f]//go; +$_TD->{new}{marc} = $xml; + +return "MODIFY"; +$func$ LANGUAGE PLPERLU; + + +-- DEPS 0989 + +CREATE OR REPLACE FUNCTION vandelay.overlay_authority_record ( import_id BIGINT, eg_id BIGINT, merge_profile_id INT ) RETURNS BOOL AS $$ +DECLARE + merge_profile vandelay.merge_profile%ROWTYPE; + dyn_profile vandelay.compile_profile%ROWTYPE; + editor_string TEXT; + new_editor INT; + new_edit_date TIMESTAMPTZ; + source_marc TEXT; + target_marc TEXT; + eg_marc_row authority.record_entry%ROWTYPE; + eg_marc TEXT; + v_marc TEXT; + replace_rule TEXT; + match_count INT; + update_query TEXT; +BEGIN + + SELECT * INTO eg_marc_row + FROM authority.record_entry b + JOIN vandelay.authority_match m ON (m.eg_record = b.id AND m.queued_record = import_id) + LIMIT 1; + + SELECT q.marc INTO v_marc + FROM vandelay.queued_record q + JOIN vandelay.authority_match m ON (m.queued_record = q.id AND q.id = import_id) + LIMIT 1; + + eg_marc := eg_marc_row.marc; + + IF eg_marc IS NULL OR v_marc IS NULL THEN + -- RAISE NOTICE 'no marc for vandelay or authority record'; + RETURN FALSE; + END IF; + + -- Extract the editor string before any modification to the vandelay + -- MARC occur. + editor_string := + (oils_xpath('//*[@tag="905"]/*[@code="u"]/text()',v_marc))[1]; + + -- If an editor value can be found, update the authority record + -- editor and edit_date values. + IF editor_string IS NOT NULL AND editor_string <> '' THEN + + -- Vandelay.pm sets the value to 'usrname' when needed. + SELECT id INTO new_editor + FROM actor.usr WHERE usrname = editor_string; + + IF new_editor IS NULL THEN + SELECT usr INTO new_editor + FROM actor.card WHERE barcode = editor_string; + END IF; + + IF new_editor IS NOT NULL THEN + new_edit_date := NOW(); + ELSE -- No valid editor, use current values + new_editor = eg_marc_row.editor; + new_edit_date = eg_marc_row.edit_date; + END IF; + ELSE + new_editor = eg_marc_row.editor; + new_edit_date = eg_marc_row.edit_date; + END IF; + + dyn_profile := vandelay.compile_profile( v_marc ); + + IF merge_profile_id IS NOT NULL THEN + SELECT * INTO merge_profile FROM vandelay.merge_profile WHERE id = merge_profile_id; + IF FOUND THEN + dyn_profile.add_rule := BTRIM( dyn_profile.add_rule || ',' || COALESCE(merge_profile.add_spec,''), ','); + dyn_profile.strip_rule := BTRIM( dyn_profile.strip_rule || ',' || COALESCE(merge_profile.strip_spec,''), ','); + dyn_profile.replace_rule := BTRIM( dyn_profile.replace_rule || ',' || COALESCE(merge_profile.replace_spec,''), ','); + dyn_profile.preserve_rule := BTRIM( dyn_profile.preserve_rule || ',' || COALESCE(merge_profile.preserve_spec,''), ','); + END IF; + END IF; + + IF dyn_profile.replace_rule <> '' AND dyn_profile.preserve_rule <> '' THEN + -- RAISE NOTICE 'both replace [%] and preserve [%] specified', dyn_profile.replace_rule, dyn_profile.preserve_rule; + RETURN FALSE; + END IF; + + IF dyn_profile.replace_rule = '' AND dyn_profile.preserve_rule = '' AND dyn_profile.add_rule = '' AND dyn_profile.strip_rule = '' THEN + --Since we have nothing to do, just return a NOOP "we did it" + RETURN TRUE; + ELSIF dyn_profile.replace_rule <> '' THEN + source_marc = v_marc; + target_marc = eg_marc; + replace_rule = dyn_profile.replace_rule; + ELSE + source_marc = eg_marc; + target_marc = v_marc; + replace_rule = dyn_profile.preserve_rule; + END IF; + + UPDATE authority.record_entry + SET marc = vandelay.merge_record_xml( target_marc, source_marc, dyn_profile.add_rule, replace_rule, dyn_profile.strip_rule ), + editor = new_editor, + edit_date = new_edit_date + WHERE id = eg_id; + + IF NOT FOUND THEN + -- Import/merge failed. Nothing left to do. + RETURN FALSE; + END IF; + + -- Authority record successfully merged / imported. + + -- Update the vandelay record to show the successful import. + UPDATE vandelay.queued_authority_record + SET imported_as = eg_id, + import_time = NOW() + WHERE id = import_id; + + RETURN TRUE; + +END; +$$ LANGUAGE PLPGSQL; + + + + +-- DEPS 0990 + +CREATE OR REPLACE FUNCTION rating.copy_count(badge_id INT) + RETURNS TABLE (record INT, value NUMERIC) AS $f$ +DECLARE + badge rating.badge_with_orgs%ROWTYPE; +BEGIN + + SELECT * INTO badge FROM rating.badge_with_orgs WHERE id = badge_id; + + PERFORM rating.precalc_bibs_by_copy(badge_id); + + DELETE FROM precalc_copy_filter_bib_list WHERE id NOT IN ( + SELECT id FROM precalc_filter_bib_list + INTERSECT + SELECT id FROM precalc_bibs_by_copy_list + ); + ANALYZE precalc_copy_filter_bib_list; + + RETURN QUERY + SELECT f.id::INT AS bib, + COUNT(f.copy)::NUMERIC + FROM precalc_copy_filter_bib_list f + JOIN asset.copy cp ON (f.copy = cp.id) + JOIN asset.call_number cn ON (cn.id = cp.call_number) + WHERE cn.owning_lib = ANY (badge.orgs) GROUP BY 1; + +END; +$f$ LANGUAGE PLPGSQL STRICT; + + +DO $INSERT$ +BEGIN + IF evergreen.insert_on_deploy() THEN ------------------------------------ +INSERT INTO rating.popularity_parameter (id, name, func, require_percentile) + VALUES (16, 'Copy Count', 'rating.copy_count', TRUE); + END IF; +END $INSERT$; --------------------------------------------------------------- + + +-- DEPS 0991 + +CREATE OR REPLACE FUNCTION evergreen.ranked_volumes( + bibid BIGINT[], + ouid INT, + depth INT DEFAULT NULL, + slimit HSTORE DEFAULT NULL, + soffset HSTORE DEFAULT NULL, + pref_lib INT DEFAULT NULL, + includes TEXT[] DEFAULT NULL::TEXT[] +) RETURNS TABLE(id BIGINT, name TEXT, label_sortkey TEXT, rank BIGINT) AS $$ + WITH RECURSIVE ou_depth AS ( + SELECT COALESCE( + $3, + ( + SELECT depth + FROM actor.org_unit_type aout + INNER JOIN actor.org_unit ou ON ou_type = aout.id + WHERE ou.id = $2 + ) + ) AS depth + ), descendant_depth AS ( + SELECT ou.id, + ou.parent_ou, + out.depth + FROM actor.org_unit ou + JOIN actor.org_unit_type out ON (out.id = ou.ou_type) + JOIN anscestor_depth ad ON (ad.id = ou.id), + ou_depth + WHERE ad.depth = ou_depth.depth + UNION ALL + SELECT ou.id, + ou.parent_ou, + out.depth + FROM actor.org_unit ou + JOIN actor.org_unit_type out ON (out.id = ou.ou_type) + JOIN descendant_depth ot ON (ot.id = ou.parent_ou) + ), anscestor_depth AS ( + SELECT ou.id, + ou.parent_ou, + out.depth + FROM actor.org_unit ou + JOIN actor.org_unit_type out ON (out.id = ou.ou_type) + WHERE ou.id = $2 + UNION ALL + SELECT ou.id, + ou.parent_ou, + out.depth + FROM actor.org_unit ou + JOIN actor.org_unit_type out ON (out.id = ou.ou_type) + JOIN anscestor_depth ot ON (ot.parent_ou = ou.id) + ), descendants as ( + SELECT ou.* FROM actor.org_unit ou JOIN descendant_depth USING (id) + ) + + SELECT ua.id, ua.name, ua.label_sortkey, MIN(ua.rank) AS rank FROM ( + SELECT acn.id, owning_lib.name, acn.label_sortkey, + evergreen.rank_cp(acp), + RANK() OVER w + FROM asset.call_number acn + JOIN asset.copy acp ON (acn.id = acp.call_number) + JOIN descendants AS aou ON (acp.circ_lib = aou.id) + JOIN actor.org_unit AS owning_lib ON (acn.owning_lib = owning_lib.id) + WHERE acn.record = ANY ($1) + AND acn.deleted IS FALSE + AND acp.deleted IS FALSE + AND CASE WHEN ('exclude_invisible_acn' = ANY($7)) THEN + EXISTS ( + SELECT 1 + FROM asset.opac_visible_copies + WHERE copy_id = acp.id AND record = acn.record + ) ELSE TRUE END + GROUP BY acn.id, evergreen.rank_cp(acp), owning_lib.name, acn.label_sortkey, aou.id + WINDOW w AS ( + ORDER BY + COALESCE( + CASE WHEN aou.id = $2 THEN -20000 END, + CASE WHEN aou.id = $6 THEN -10000 END, + (SELECT distance - 5000 + FROM actor.org_unit_descendants_distance($6) as x + WHERE x.id = aou.id AND $6 IN ( + SELECT q.id FROM actor.org_unit_descendants($2) as q)), + (SELECT e.distance FROM actor.org_unit_descendants_distance($2) as e WHERE e.id = aou.id), + 1000 + ), + evergreen.rank_cp(acp) + ) + ) AS ua + GROUP BY ua.id, ua.name, ua.label_sortkey + ORDER BY rank, ua.name, ua.label_sortkey + LIMIT ($4 -> 'acn')::INT + OFFSET ($5 -> 'acn')::INT; +$$ LANGUAGE SQL STABLE ROWS 10; + +-- DEPS 0992 + +ALTER TABLE config.copy_status + ADD COLUMN is_available BOOL NOT NULL DEFAULT FALSE; + +-- Migrate our existing checkout-ok values to is-available +/* +UPDATE config.copy_status SET is_available = TRUE + WHERE id IN (0, 7); -- available, reshelving. +*/ + +UPDATE config.copy_status SET is_available = TRUE WHERE id IN ( + SELECT id FROM config.copy_status + WHERE checkout_ok AND id <> 8 -- on-holds-shelf is handled specially +); + +-- TESTING +SELECT id, name FROM config.copy_status WHERE checkout_ok; + +-- TODO: drop column checkout_ok + +CREATE OR REPLACE FUNCTION action.item_user_circ_test( circ_ou INT, match_item BIGINT, match_user INT, renewal BOOL ) RETURNS SETOF action.circ_matrix_test_result AS $func$ +DECLARE + user_object actor.usr%ROWTYPE; + standing_penalty config.standing_penalty%ROWTYPE; + item_object asset.copy%ROWTYPE; + item_status_object config.copy_status%ROWTYPE; + item_location_object asset.copy_location%ROWTYPE; + result action.circ_matrix_test_result; + circ_test action.found_circ_matrix_matchpoint; + circ_matchpoint config.circ_matrix_matchpoint%ROWTYPE; + circ_limit_set config.circ_limit_set%ROWTYPE; + hold_ratio action.hold_stats%ROWTYPE; + penalty_type TEXT; + items_out INT; + context_org_list INT[]; + done BOOL := FALSE; +BEGIN + -- Assume success unless we hit a failure condition + result.success := TRUE; + + -- Need user info to look up matchpoints + SELECT INTO user_object * FROM actor.usr WHERE id = match_user AND NOT deleted; + + -- (Insta)Fail if we couldn't find the user + IF user_object.id IS NULL THEN + result.fail_part := 'no_user'; + result.success := FALSE; + done := TRUE; + RETURN NEXT result; + RETURN; + END IF; + + -- Need item info to look up matchpoints + SELECT INTO item_object * FROM asset.copy WHERE id = match_item AND NOT deleted; + + -- (Insta)Fail if we couldn't find the item + IF item_object.id IS NULL THEN + result.fail_part := 'no_item'; + result.success := FALSE; + done := TRUE; + RETURN NEXT result; + RETURN; + END IF; + + SELECT INTO circ_test * FROM action.find_circ_matrix_matchpoint(circ_ou, item_object, user_object, renewal); + + circ_matchpoint := circ_test.matchpoint; + result.matchpoint := circ_matchpoint.id; + result.circulate := circ_matchpoint.circulate; + result.duration_rule := circ_matchpoint.duration_rule; + result.recurring_fine_rule := circ_matchpoint.recurring_fine_rule; + result.max_fine_rule := circ_matchpoint.max_fine_rule; + result.hard_due_date := circ_matchpoint.hard_due_date; + result.renewals := circ_matchpoint.renewals; + result.grace_period := circ_matchpoint.grace_period; + result.buildrows := circ_test.buildrows; + + -- (Insta)Fail if we couldn't find a matchpoint + IF circ_test.success = false THEN + result.fail_part := 'no_matchpoint'; + result.success := FALSE; + done := TRUE; + RETURN NEXT result; + RETURN; + END IF; + + -- All failures before this point are non-recoverable + -- Below this point are possibly overridable failures + + -- Fail if the user is barred + IF user_object.barred IS TRUE THEN + result.fail_part := 'actor.usr.barred'; + result.success := FALSE; + done := TRUE; + RETURN NEXT result; + END IF; + + -- Fail if the item can't circulate + IF item_object.circulate IS FALSE THEN + result.fail_part := 'asset.copy.circulate'; + result.success := FALSE; + done := TRUE; + RETURN NEXT result; + END IF; + + -- Fail if the item isn't in a circulateable status on a non-renewal + IF NOT renewal AND item_object.status <> 8 AND item_object.status NOT IN ( + (SELECT id FROM config.copy_status WHERE is_available) ) THEN + result.fail_part := 'asset.copy.status'; + result.success := FALSE; + done := TRUE; + RETURN NEXT result; + -- Alternately, fail if the item isn't checked out on a renewal + ELSIF renewal AND item_object.status <> 1 THEN + result.fail_part := 'asset.copy.status'; + result.success := FALSE; + done := TRUE; + RETURN NEXT result; + END IF; + + -- Fail if the item can't circulate because of the shelving location + SELECT INTO item_location_object * FROM asset.copy_location WHERE id = item_object.location; + IF item_location_object.circulate IS FALSE THEN + result.fail_part := 'asset.copy_location.circulate'; + result.success := FALSE; + done := TRUE; + RETURN NEXT result; + END IF; + + -- Use Circ OU for penalties and such + SELECT INTO context_org_list ARRAY_AGG(id) FROM actor.org_unit_full_path( circ_ou ); + + IF renewal THEN + penalty_type = '%RENEW%'; + ELSE + penalty_type = '%CIRC%'; + END IF; + + FOR standing_penalty IN + SELECT DISTINCT csp.* + FROM actor.usr_standing_penalty usp + JOIN config.standing_penalty csp ON (csp.id = usp.standing_penalty) + WHERE usr = match_user + AND usp.org_unit IN ( SELECT * FROM unnest(context_org_list) ) + AND (usp.stop_date IS NULL or usp.stop_date > NOW()) + AND csp.block_list LIKE penalty_type LOOP + + result.fail_part := standing_penalty.name; + result.success := FALSE; + done := TRUE; + RETURN NEXT result; + END LOOP; + + -- Fail if the test is set to hard non-circulating + IF circ_matchpoint.circulate IS FALSE THEN + result.fail_part := 'config.circ_matrix_test.circulate'; + result.success := FALSE; + done := TRUE; + RETURN NEXT result; + END IF; + + -- Fail if the total copy-hold ratio is too low + IF circ_matchpoint.total_copy_hold_ratio IS NOT NULL THEN + SELECT INTO hold_ratio * FROM action.copy_related_hold_stats(match_item); + IF hold_ratio.total_copy_ratio IS NOT NULL AND hold_ratio.total_copy_ratio < circ_matchpoint.total_copy_hold_ratio THEN + result.fail_part := 'config.circ_matrix_test.total_copy_hold_ratio'; + result.success := FALSE; + done := TRUE; + RETURN NEXT result; + END IF; + END IF; + + -- Fail if the available copy-hold ratio is too low + IF circ_matchpoint.available_copy_hold_ratio IS NOT NULL THEN + IF hold_ratio.hold_count IS NULL THEN + SELECT INTO hold_ratio * FROM action.copy_related_hold_stats(match_item); + END IF; + IF hold_ratio.available_copy_ratio IS NOT NULL AND hold_ratio.available_copy_ratio < circ_matchpoint.available_copy_hold_ratio THEN + result.fail_part := 'config.circ_matrix_test.available_copy_hold_ratio'; + result.success := FALSE; + done := TRUE; + RETURN NEXT result; + END IF; + END IF; + + -- Fail if the user has too many items out by defined limit sets + FOR circ_limit_set IN SELECT ccls.* FROM config.circ_limit_set ccls + JOIN config.circ_matrix_limit_set_map ccmlsm ON ccmlsm.limit_set = ccls.id + WHERE ccmlsm.active AND ( ccmlsm.matchpoint = circ_matchpoint.id OR + ( ccmlsm.matchpoint IN (SELECT * FROM unnest(result.buildrows)) AND ccmlsm.fallthrough ) + ) LOOP + IF circ_limit_set.items_out > 0 AND NOT renewal THEN + SELECT INTO context_org_list ARRAY_AGG(aou.id) + FROM actor.org_unit_full_path( circ_ou ) aou + JOIN actor.org_unit_type aout ON aou.ou_type = aout.id + WHERE aout.depth >= circ_limit_set.depth; + IF circ_limit_set.global THEN + WITH RECURSIVE descendant_depth AS ( + SELECT ou.id, + ou.parent_ou + FROM actor.org_unit ou + WHERE ou.id IN (SELECT * FROM unnest(context_org_list)) + UNION + SELECT ou.id, + ou.parent_ou + FROM actor.org_unit ou + JOIN descendant_depth ot ON (ot.id = ou.parent_ou) + ) SELECT INTO context_org_list ARRAY_AGG(ou.id) FROM actor.org_unit ou JOIN descendant_depth USING (id); + END IF; + SELECT INTO items_out COUNT(DISTINCT circ.id) + FROM action.circulation circ + JOIN asset.copy copy ON (copy.id = circ.target_copy) + LEFT JOIN action.circulation_limit_group_map aclgm ON (circ.id = aclgm.circ) + WHERE circ.usr = match_user + AND circ.circ_lib IN (SELECT * FROM unnest(context_org_list)) + AND circ.checkin_time IS NULL + AND (circ.stop_fines IN ('MAXFINES','LONGOVERDUE') OR circ.stop_fines IS NULL) + AND (copy.circ_modifier IN (SELECT circ_mod FROM config.circ_limit_set_circ_mod_map WHERE limit_set = circ_limit_set.id) + OR copy.location IN (SELECT copy_loc FROM config.circ_limit_set_copy_loc_map WHERE limit_set = circ_limit_set.id) + OR aclgm.limit_group IN (SELECT limit_group FROM config.circ_limit_set_group_map WHERE limit_set = circ_limit_set.id) + ); + IF items_out >= circ_limit_set.items_out THEN + result.fail_part := 'config.circ_matrix_circ_mod_test'; + result.success := FALSE; + done := TRUE; + RETURN NEXT result; + END IF; + END IF; + SELECT INTO result.limit_groups result.limit_groups || ARRAY_AGG(limit_group) FROM config.circ_limit_set_group_map WHERE limit_set = circ_limit_set.id AND NOT check_only; + END LOOP; + + -- If we passed everything, return the successful matchpoint + IF NOT done THEN + RETURN NEXT result; + END IF; + + RETURN; +END; +$func$ LANGUAGE plpgsql; + + +-- DEPS 0993 + +/* +-- KCLS already has this column, but not the purge function below. +ALTER TABLE config.usr_activity_type + ALTER COLUMN transient SET DEFAULT TRUE; +*/ + +-- Utility function for removing all activity entries by activity type, +-- except for the most recent entry per user. This is primarily useful +-- when cleaning up rows prior to setting the transient flag on an +-- activity type to true. It allows for immediate cleanup of data (e.g. +-- for patron privacy) and lets admins control when the data is deleted, +-- which could be useful for huge activity tables. + +CREATE OR REPLACE FUNCTION + actor.purge_usr_activity_by_type(act_type INTEGER) + RETURNS VOID AS $$ +DECLARE + cur_usr INTEGER; +BEGIN + FOR cur_usr IN SELECT DISTINCT(usr) + FROM actor.usr_activity WHERE etype = act_type LOOP + DELETE FROM actor.usr_activity WHERE id IN ( + SELECT id + FROM actor.usr_activity + WHERE usr = cur_usr AND etype = act_type + ORDER BY event_time DESC OFFSET 1 + ); + + END LOOP; +END $$ LANGUAGE PLPGSQL; + +-- DEPS 0994 REMOVED Authority propagation + +-- DEPS 0995 + +DO $INSERT$ +BEGIN + IF evergreen.insert_on_deploy() THEN ------------------------------------ + +INSERT INTO rating.badge (name, description, scope, weight, horizon_age, importance_age, importance_interval, importance_scale, recalc_interval, popularity_parameter, percentile) + VALUES('Top Holds Over Last 5 Years', 'The top 97th percentile for holds requested over the past five years on all materials. More weight is given to holds requested over the last year, with importance decreasing for every year after that.', 1, 3, '5 years', '5 years', '1 year', 2, '1 day', 2, 97); + +-- DEPS 0996 + +INSERT INTO config.usr_setting_type ( + name, + opac_visible, + label, + description, + datatype +) VALUES ( + 'circ.send_email_checkout_receipts', + TRUE, + oils_i18n_gettext('circ.send_email_checkout_receipts', 'Email checkout receipts by default?', 'cust', 'label'), + oils_i18n_gettext('circ.send_email_checkout_receipts', 'Email checkout receipts by default?', 'cust', 'description'), + 'bool' +); + +INSERT INTO action_trigger.hook (key, core_type, description, passive) +VALUES ( + 'circ.checkout.batch_notify', + 'circ', + oils_i18n_gettext( + 'circ.checkout.batch_notify', + 'Notification of a group of circs', + 'ath', + 'description' + ), + FALSE +); + +INSERT INTO action_trigger.hook (key, core_type, description, passive) +VALUES ( + 'circ.checkout.batch_notify.session', + 'circ', + oils_i18n_gettext( + 'circ.checkout.batch_notify.session', + 'Notification of a group of circs at the end of a checkout session', + 'ath', + 'description' + ), + FALSE +); + +INSERT INTO action_trigger.event_definition ( + active, + owner, + name, + hook, + validator, + reactor, + usr_field, + opt_in_setting, + group_field, + template +) VALUES ( + TRUE, + 1, + 'Email Checkout Receipt', + 'circ.checkout.batch_notify.session', + 'NOOP_True', + 'SendEmail', + 'usr', + 'circ.send_email_checkout_receipts', + 'usr', + $$[%- USE date -%] +[%- user = target.0.usr -%] +To: [%- params.recipient_email || user.email %] +From: [%- helpers.get_org_setting(target.0.circ_lib.id, 'org.bounced_emails') || params.sender_email || default_sender %] +Subject: Checkout Receipt +Auto-Submitted: auto-generated + +You checked out the following items: + +[% FOR circ IN target %] + [%- copy_details = helpers.get_copy_bib_basics(circ.target_copy.id) -%] + Title: [% copy_details.title %] + Author: [% copy_details.author %] + Call Number: [% circ.target_copy.call_number.label %] + Barcode: [% circ.target_copy.barcode %] + Due: [% date.format(helpers.format_date(circ.due_date), '%Y-%m-%d') %] + Library: [% circ.circ_lib.name %] + +[% END %] +$$); + +INSERT INTO action_trigger.environment ( + event_def, + path +) VALUES ( + currval('action_trigger.event_definition_id_seq'), + 'target_copy.call_number' +), ( + currval('action_trigger.event_definition_id_seq'), + 'target_copy.location' +), ( + currval('action_trigger.event_definition_id_seq'), + 'usr' +), ( + currval('action_trigger.event_definition_id_seq'), + 'circ_lib' +); + +-- DEPS 0997 + +INSERT INTO config.copy_status (id, name, holdable, opac_visible) VALUES + (18,oils_i18n_gettext(18, 'Canceled Transit', 'ccs', 'name'), 't', 't'); + + +-- Add Spanish to config.i18n_locale table + +-- DEPS 1000 (moved up for insert) + +INSERT INTO config.i18n_locale (code,marc_code,name,description) + SELECT 'es-ES', 'spa', oils_i18n_gettext('es-ES', 'Spanish', 'i18n_l', 'name'), + oils_i18n_gettext('es-ES', 'Spanish', 'i18n_l', 'description') + WHERE NOT EXISTS (SELECT 1 FROM config.i18n_locale WHERE code = 'es-ES'); + + END IF; +END $INSERT$; --------------------------------------------------------------- + + +-- DEPS 0998 REMOVED Aged circ views + +-- DEPS 0999 + +CREATE TABLE staging.setting_stage ( + row_id BIGSERIAL PRIMARY KEY, + row_date TIMESTAMP WITH TIME ZONE DEFAULT NOW(), + usrname TEXT NOT NULL, + setting TEXT NOT NULL, + value TEXT NOT NULL, + complete BOOL DEFAULT FALSE +); + + +-- END 2.10.7-2.11.0-upgrade-db.sql + + +SELECT '2.10 -> 2.11 complete: ', CLOCK_TIMESTAMP(); + +-- DEPS 2.11.1 + +-- DEPS 1001 REMOVED circ history user index + +-- DEPS 1002 REMOVED +-- This is a placeholder for the backport of schema update 1002 +-- (adding es-ES to the list of locales. This script does nothing for +-- rel_2_11 and later. + +-- DEPS 1003 +-- We already have this applied, but can't hurt to reapply function. + +CREATE OR REPLACE FUNCTION metabib.remap_metarecord_for_bib( bib_id BIGINT, fp TEXT, bib_is_deleted BOOL DEFAULT FALSE, retain_deleted BOOL DEFAULT FALSE ) RETURNS BIGINT AS $func$ +DECLARE + new_mapping BOOL := TRUE; + source_count INT; + old_mr BIGINT; + tmp_mr metabib.metarecord%ROWTYPE; + deleted_mrs BIGINT[]; +BEGIN + + -- We need to make sure we're not a deleted master record of an MR + IF bib_is_deleted THEN + FOR old_mr IN SELECT id FROM metabib.metarecord WHERE master_record = bib_id LOOP + + IF NOT retain_deleted THEN -- Go away for any MR that we're master of, unless retained + DELETE FROM metabib.metarecord_source_map WHERE source = bib_id; + END IF; + + -- Now, are there any more sources on this MR? + SELECT COUNT(*) INTO source_count FROM metabib.metarecord_source_map WHERE metarecord = old_mr; + + IF source_count = 0 AND NOT retain_deleted THEN -- No other records + deleted_mrs := ARRAY_APPEND(deleted_mrs, old_mr); -- Just in case... + DELETE FROM metabib.metarecord WHERE id = old_mr; + + ELSE -- indeed there are. Update it with a null cache and recalcualated master record + UPDATE metabib.metarecord + SET mods = NULL, + master_record = ( SELECT id FROM biblio.record_entry WHERE fingerprint = fp AND NOT deleted ORDER BY quality DESC LIMIT 1) + WHERE id = old_mr; + END IF; + END LOOP; + + ELSE -- insert or update + + FOR tmp_mr IN SELECT m.* FROM metabib.metarecord m JOIN metabib.metarecord_source_map s ON (s.metarecord = m.id) WHERE s.source = bib_id LOOP + + -- Find the first fingerprint-matching + IF old_mr IS NULL AND fp = tmp_mr.fingerprint THEN + old_mr := tmp_mr.id; + new_mapping := FALSE; + + ELSE -- Our fingerprint changed ... maybe remove the old MR + DELETE FROM metabib.metarecord_source_map WHERE metarecord = tmp_mr.id AND source = bib_id; -- remove the old source mapping + SELECT COUNT(*) INTO source_count FROM metabib.metarecord_source_map WHERE metarecord = tmp_mr.id; + IF source_count = 0 THEN -- No other records + deleted_mrs := ARRAY_APPEND(deleted_mrs, tmp_mr.id); + DELETE FROM metabib.metarecord WHERE id = tmp_mr.id; + END IF; + END IF; + + END LOOP; + + -- we found no suitable, preexisting MR based on old source maps + IF old_mr IS NULL THEN + SELECT id INTO old_mr FROM metabib.metarecord WHERE fingerprint = fp; -- is there one for our current fingerprint? + + IF old_mr IS NULL THEN -- nope, create one and grab its id + INSERT INTO metabib.metarecord ( fingerprint, master_record ) VALUES ( fp, bib_id ); + SELECT id INTO old_mr FROM metabib.metarecord WHERE fingerprint = fp; + + ELSE -- indeed there is. update it with a null cache and recalcualated master record + UPDATE metabib.metarecord + SET mods = NULL, + master_record = ( SELECT id FROM biblio.record_entry WHERE fingerprint = fp AND NOT deleted ORDER BY quality DESC LIMIT 1) + WHERE id = old_mr; + END IF; + + ELSE -- there was one we already attached to, update its mods cache and master_record + UPDATE metabib.metarecord + SET mods = NULL, + master_record = ( SELECT id FROM biblio.record_entry WHERE fingerprint = fp AND NOT deleted ORDER BY quality DESC LIMIT 1) + WHERE id = old_mr; + END IF; + + IF new_mapping THEN + INSERT INTO metabib.metarecord_source_map (metarecord, source) VALUES (old_mr, bib_id); -- new source mapping + END IF; + + END IF; + + IF ARRAY_UPPER(deleted_mrs,1) > 0 THEN + UPDATE action.hold_request SET target = old_mr WHERE target IN ( SELECT unnest(deleted_mrs) ) AND hold_type = 'M'; -- if we had to delete any MRs above, make sure their holds are moved + END IF; + + RETURN old_mr; + +END; +$func$ LANGUAGE PLPGSQL; + +-- DEPS 2.11.2 + +-- DEPS 1004 + +CREATE OR REPLACE FUNCTION reporter.hold_request_record_mapper () RETURNS TRIGGER AS $$ +BEGIN + IF TG_OP = 'INSERT' THEN + INSERT INTO reporter.hold_request_record (id, target, hold_type, bib_record) + SELECT NEW.id, + NEW.target, + NEW.hold_type, + CASE + WHEN NEW.hold_type = 'T' + THEN NEW.target + WHEN NEW.hold_type = 'I' + THEN (SELECT ssub.record_entry FROM serial.subscription ssub JOIN serial.issuance si ON (si.subscription = ssub.id) WHERE si.id = NEW.target) + WHEN NEW.hold_type = 'V' + THEN (SELECT cn.record FROM asset.call_number cn WHERE cn.id = NEW.target) + WHEN NEW.hold_type IN ('C','R','F') + THEN (SELECT cn.record FROM asset.call_number cn JOIN asset.copy cp ON (cn.id = cp.call_number) WHERE cp.id = NEW.target) + WHEN NEW.hold_type = 'M' + THEN (SELECT mr.master_record FROM metabib.metarecord mr WHERE mr.id = NEW.target) + WHEN NEW.hold_type = 'P' + THEN (SELECT bmp.record FROM biblio.monograph_part bmp WHERE bmp.id = NEW.target) + END AS bib_record; + ELSIF TG_OP = 'UPDATE' AND (OLD.target <> NEW.target OR OLD.hold_type <> NEW.hold_type) THEN + UPDATE reporter.hold_request_record + SET target = NEW.target, + hold_type = NEW.hold_type, + bib_record = CASE + WHEN NEW.hold_type = 'T' + THEN NEW.target + WHEN NEW.hold_type = 'I' + THEN (SELECT ssub.record_entry FROM serial.subscription ssub JOIN serial.issuance si ON (si.subscription = ssub.id) WHERE si.id = NEW.target) + WHEN NEW.hold_type = 'V' + THEN (SELECT cn.record FROM asset.call_number cn WHERE cn.id = NEW.target) + WHEN NEW.hold_type IN ('C','R','F') + THEN (SELECT cn.record FROM asset.call_number cn JOIN asset.copy cp ON (cn.id = cp.call_number) WHERE cp.id = NEW.target) + WHEN NEW.hold_type = 'M' + THEN (SELECT mr.master_record FROM metabib.metarecord mr WHERE mr.id = NEW.target) + WHEN NEW.hold_type = 'P' + THEN (SELECT bmp.record FROM biblio.monograph_part bmp WHERE bmp.id = NEW.target) + END + WHERE id = NEW.id; + END IF; + RETURN NEW; +END; +$$ LANGUAGE PLPGSQL; + +-- TODO: probably want to do this inline, but there's a chance running +-- this after the main transaction will make sense. Depends on run time. + +SELECT 'Starting reporter.hold_request_record rebuild: ', CLOCK_TIMESTAMP(); + +TRUNCATE TABLE reporter.hold_request_record; + +INSERT INTO reporter.hold_request_record +SELECT id, + target, + hold_type, + CASE + WHEN hold_type = 'T' + THEN target + WHEN hold_type = 'I' + THEN (SELECT ssub.record_entry FROM serial.subscription ssub JOIN serial.issuance si ON (si.subscription = ssub.id) WHERE si.id = ahr.target) + WHEN hold_type = 'V' + THEN (SELECT cn.record FROM asset.call_number cn WHERE cn.id = ahr.target) + WHEN hold_type IN ('C','R','F') + THEN (SELECT cn.record FROM asset.call_number cn JOIN asset.copy cp ON (cn.id = cp.call_number) WHERE cp.id = ahr.target) + WHEN hold_type = 'M' + THEN (SELECT mr.master_record FROM metabib.metarecord mr WHERE mr.id = ahr.target) + WHEN hold_type = 'P' + THEN (SELECT bmp.record FROM biblio.monograph_part bmp WHERE bmp.id = ahr.target) + END AS bib_record + FROM action.hold_request ahr; + +REINDEX TABLE reporter.hold_request_record; + +ANALYZE reporter.hold_request_record; + +SELECT 'Done with reporter.hold_request_record rebuild: ', CLOCK_TIMESTAMP(); + + +-- DEPS 1005 REMOVED aged circ parent idx +-- DEPS 2.11.3 +-- DEPS 1012 + +UPDATE vandelay.merge_profile +SET preserve_spec = '901c', + replace_spec = NULL +WHERE id = 2 +AND name = oils_i18n_gettext(2, 'Full Overlay', 'vmp', 'name') +AND preserve_spec IS NULL +AND add_spec IS NULL +AND strip_spec IS NULL +AND replace_spec = '901c'; + +-- DEPS 1013 + +CREATE INDEX actor_usr_usrname_unaccent_idx ON actor.usr (evergreen.unaccent_and_squash(usrname)); + +-- DEPS 1018 REMOVED stripe aous permissions + +-- END 2.11.3 (2.11 SERIES) + +-- START 2.11 -> 2.12 UPGRADE + +SELECT 'Starting 2.11 -> 2.12: ', CLOCK_TIMESTAMP(); + +--Upgrade Script for 2.11.3 to 2.12.0 + +-- DEPS 2.12.0 + +-- DEPS 1006 + +-- This function is used to help clean up facet labels. Due to quirks in +-- MARC parsing, some facet labels may be generated with periods or commas +-- at the end. This will strip a trailing commas off all the time, and +-- periods when they don't look like they are part of initials. +-- Smith, John => no change +-- Smith, John, => Smith, John +-- Smith, John. => Smith, John +-- Public, John Q. => no change +CREATE OR REPLACE FUNCTION metabib.trim_trailing_punctuation ( TEXT ) RETURNS TEXT AS $$ +DECLARE + result TEXT; + last_char TEXT; +BEGIN + result := $1; + last_char = substring(result from '.$'); + + IF last_char = ',' THEN + result := substring(result from '^(.*),$'); + + ELSIF last_char = '.' THEN + IF substring(result from ' \w\.$') IS NULL THEN + result := substring(result from '^(.*)\.$'); + END IF; + END IF; + + RETURN result; + +END; +$$ language 'plpgsql'; + +DO $INSERT$ +BEGIN + IF evergreen.insert_on_deploy() THEN ----------------------------------- + +INSERT INTO config.index_normalizer (name, description, func, param_count) VALUES ( + 'Trim Trailing Punctuation', + 'Eliminate extraneous trailing commas and periods in text', + 'metabib.trim_trailing_punctuation', + 0 +); + +INSERT INTO config.metabib_field_index_norm_map (field,norm,pos) + SELECT m.id, + i.id, + -1 + FROM config.metabib_field m, + config.index_normalizer i + WHERE i.func = 'metabib.trim_trailing_punctuation' + AND m.id IN (7,8,9,10); + + END IF; +END $INSERT$; -------------------------------------------------------------- + +-- DEPS 1007 + +UPDATE config.record_attr_definition +SET description = oils_i18n_gettext('audience', 'Audience', 'crad', 'label') +WHERE description IS NULL +AND name = 'audience'; +UPDATE config.record_attr_definition +SET description = oils_i18n_gettext('bib_level', 'Bib Level', 'crad', 'label') +WHERE description IS NULL +AND name = 'bib_level'; +UPDATE config.record_attr_definition +SET description = oils_i18n_gettext('item_form', 'Item Form', 'crad', 'label') +WHERE description IS NULL +AND name = 'item_form'; +UPDATE config.record_attr_definition +SET description = oils_i18n_gettext('item_lang', 'Language', 'crad', 'label') +WHERE description IS NULL +AND name = 'item_lang'; +UPDATE config.record_attr_definition +SET description = oils_i18n_gettext('lit_form', 'Literary Form', 'crad', 'label') +WHERE description IS NULL +AND name = 'lit_form'; +UPDATE config.record_attr_definition +SET description = oils_i18n_gettext('item_type', 'Item Type', 'crad', 'label') +WHERE description IS NULL +AND name = 'item_type'; +UPDATE config.record_attr_definition +SET description = oils_i18n_gettext('vr_format', 'Video Format', 'crad', 'label') +WHERE description IS NULL +AND name = 'vr_format'; + +-- DEPS 1008 + +CREATE OR REPLACE FUNCTION evergreen.unaccent_and_squash ( IN arg text) RETURNS text + IMMUTABLE STRICT AS $$ + BEGIN + RETURN evergreen.lowercase(unaccent(regexp_replace(arg, '[\s[:punct:]]','','g'))); + END; +$$ LANGUAGE PLPGSQL; + +-- DEPS 1009 + +DO $INSERT$ +BEGIN + IF evergreen.insert_on_deploy() THEN ----------------------------------- + +INSERT into config.org_unit_setting_type +( name, grp, label, description, datatype, fm_class ) VALUES +( 'acq.copy_status_on_receiving', 'acq', + oils_i18n_gettext('acq.copy_status_on_receiving', + 'Initial status for received items', + 'coust', 'label'), + oils_i18n_gettext('acq.copy_status_on_receiving', + 'Allows staff to designate a custom copy status on received lineitems. Default status is "In Process".', + 'coust', 'description'), + 'link', 'ccs'); + + END IF; +END $INSERT$; -------------------------------------------------------------- + +-- remove unused org unit setting for self checkout interface +-- DEPS 1010 + +DELETE FROM actor.org_unit_setting WHERE name = 'circ.selfcheck.require_patron_password'; + +DELETE FROM config.org_unit_setting_type WHERE name = 'circ.selfcheck.require_patron_password'; + +DELETE FROM config.org_unit_setting_type_log WHERE field_name = 'circ.selfcheck.require_patron_password'; + +DELETE FROM permission.usr_perm_map WHERE perm IN (SELECT id FROM permission.perm_list WHERE code = 'UPDATE_ORG_UNIT_SETTING.circ.selfcheck.require_patron_password'); + +DELETE FROM permission.grp_perm_map WHERE perm IN (SELECT id FROM permission.perm_list WHERE code = 'UPDATE_ORG_UNIT_SETTING.circ.selfcheck.require_patron_password'); + +DELETE FROM permission.perm_list WHERE code = 'UPDATE_ORG_UNIT_SETTING.circ.selfcheck.require_patron_password'; + +-- DEPS 1011 + +DO $INSERT$ +BEGIN + IF evergreen.insert_on_deploy() THEN ----------------------------------- + +INSERT INTO config.org_unit_setting_type + (name, grp, label, description, datatype) + VALUES + ('circ.in_house_use.copy_alert', + 'circ', + oils_i18n_gettext('circ.in_house_use.copy_alert', + 'Display copy alert for in-house-use', + 'coust', 'label'), + oils_i18n_gettext('circ.in_house_use.copy_alert', + 'Display copy alert for in-house-use', + 'coust', 'description'), + 'bool'), + ('circ.in_house_use.checkin_alert', + 'circ', + oils_i18n_gettext('circ.in_house_use.checkin_alert', + 'Display copy location checkin alert for in-house-use', + 'coust', 'label'), + oils_i18n_gettext('circ.in_house_use.checkin_alert', + 'Display copy location checkin alert for in-house-use', + 'coust', 'description'), + 'bool'); + END IF; +END $INSERT$; -------------------------------------------------------------- + +-- DEPS 1014 +-- this update of unapi.mmr_mra() removed since 1015 has a newer version + +-- DEPS 1015 + +CREATE OR REPLACE FUNCTION unapi.mmr_mra ( + obj_id BIGINT, + format TEXT, + ename TEXT, + includes TEXT[], + org TEXT, + depth INT DEFAULT NULL, + slimit HSTORE DEFAULT NULL, + soffset HSTORE DEFAULT NULL, + include_xmlns BOOL DEFAULT TRUE, + pref_lib INT DEFAULT NULL +) RETURNS XML AS $F$ + SELECT XMLELEMENT( + name attributes, + XMLATTRIBUTES( + CASE WHEN $9 THEN 'http://open-ils.org/spec/indexing/v1' ELSE NULL END AS xmlns, + 'tag:open-ils.org:U2@mmr/' || $1 AS metarecord + ), + (SELECT XMLAGG(foo.y) + FROM ( + WITH sourcelist AS ( + WITH aou AS (SELECT COALESCE(id, (evergreen.org_top()).id) AS id + FROM actor.org_unit WHERE shortname = $5 LIMIT 1) + SELECT source + FROM metabib.metarecord_source_map mmsm, aou + WHERE metarecord = $1 AND ( + EXISTS ( + SELECT 1 FROM asset.opac_visible_copies + WHERE record = source AND circ_lib IN ( + SELECT id FROM actor.org_unit_descendants(aou.id, $6)) + LIMIT 1 + ) + OR EXISTS (SELECT 1 FROM located_uris(source, aou.id, $10) LIMIT 1) + OR EXISTS (SELECT 1 FROM biblio.record_entry b JOIN config.bib_source src ON (b.source = src.id) WHERE src.transcendant AND b.id = mmsm.source LIMIT 1) + ) + ) + SELECT cmra.aid, + XMLELEMENT( + name field, + XMLATTRIBUTES( + cmra.attr AS name, + cmra.value AS "coded-value", + cmra.aid AS "cvmid", + rad.composite, + rad.multi, + rad.filter, + rad.sorter, + cmra.source_list + ), + cmra.value + ) + FROM ( + SELECT DISTINCT aid, attr, value, STRING_AGG(x.id::TEXT, ',') AS source_list + FROM ( + SELECT v.source AS id, + c.id AS aid, + c.ctype AS attr, + c.code AS value + FROM metabib.record_attr_vector_list v + JOIN config.coded_value_map c ON ( c.id = ANY( v.vlist ) ) + ) AS x + JOIN sourcelist ON (x.id = sourcelist.source) + GROUP BY 1, 2, 3 + ) AS cmra + JOIN config.record_attr_definition rad ON (cmra.attr = rad.name) + UNION ALL + SELECT umra.aid, + XMLELEMENT( + name field, + XMLATTRIBUTES( + umra.attr AS name, + rad.composite, + rad.multi, + rad.filter, + rad.sorter + ), + umra.value + ) + FROM ( + SELECT DISTINCT aid, attr, value + FROM ( + SELECT v.source AS id, + m.id AS aid, + m.attr AS attr, + m.value AS value + FROM metabib.record_attr_vector_list v + JOIN metabib.uncontrolled_record_attr_value m ON ( m.id = ANY( v.vlist ) ) + ) AS x + JOIN sourcelist ON (x.id = sourcelist.source) + ) AS umra + JOIN config.record_attr_definition rad ON (umra.attr = rad.name) + ORDER BY 1 + + )foo(id,y) + ) + ) +$F$ LANGUAGE SQL STABLE; + +-- DEPS 1016 + +DO $INSERT$ +BEGIN + IF evergreen.insert_on_deploy() THEN ----------------------------------- + +INSERT INTO config.biblio_fingerprint (name, xpath, format) + VALUES ( + 'PartName', + '//mods32:mods/mods32:titleInfo/mods32:partName', + 'mods32' + ); + +INSERT INTO config.biblio_fingerprint (name, xpath, format) + VALUES ( + 'PartNumber', + '//mods32:mods/mods32:titleInfo/mods32:partNumber', + 'mods32' + ); + + END IF; +END $INSERT$; -------------------------------------------------------------- + +-- DEPS 1017 + +CREATE OR REPLACE FUNCTION biblio.extract_fingerprint ( marc text ) RETURNS TEXT AS $func$ +DECLARE + idx config.biblio_fingerprint%ROWTYPE; + xfrm config.xml_transform%ROWTYPE; + prev_xfrm TEXT; + transformed_xml TEXT; + xml_node TEXT; + xml_node_list TEXT[]; + raw_text TEXT; + output_text TEXT := ''; +BEGIN + + IF marc IS NULL OR marc = '' THEN + RETURN NULL; + END IF; + + -- Loop over the indexing entries + FOR idx IN SELECT * FROM config.biblio_fingerprint ORDER BY format, id LOOP + + SELECT INTO xfrm * from config.xml_transform WHERE name = idx.format; + + -- See if we can skip the XSLT ... it's expensive + IF prev_xfrm IS NULL OR prev_xfrm <> xfrm.name THEN + -- Can't skip the transform + IF xfrm.xslt <> '---' THEN + transformed_xml := oils_xslt_process(marc,xfrm.xslt); + ELSE + transformed_xml := marc; + END IF; + + prev_xfrm := xfrm.name; + END IF; + + raw_text := COALESCE( + naco_normalize( + ARRAY_TO_STRING( + oils_xpath( + '//text()', + (oils_xpath( + idx.xpath, + transformed_xml, + ARRAY[ARRAY[xfrm.prefix, xfrm.namespace_uri]] + ))[1] + ), + '' + ) + ), + '' + ); + + raw_text := REGEXP_REPLACE(raw_text, E'\\[.+?\\]', E''); + raw_text := REGEXP_REPLACE(raw_text, E'\\mthe\\M|\\man?d?d\\M', E'', 'g'); -- arg! the pain! + + IF idx.first_word IS TRUE THEN + raw_text := REGEXP_REPLACE(raw_text, E'^(\\w+).*?$', E'\\1'); + END IF; + + output_text := output_text || idx.name || ':' || + REGEXP_REPLACE(raw_text, E'\\s+', '', 'g') || ' '; + + END LOOP; + + RETURN BTRIM(output_text); + +END; +$func$ LANGUAGE PLPGSQL; + +-- DEPS 1019 REMOVED hold targeter sql updates / setting +-- KCLS already has this, but doesn't hurt to reapply. + +-- DEPS 1020 +-- KCLS alrady has, doesn't hurt + +CREATE OR REPLACE FUNCTION actor.org_unit_ancestor_setting_batch_by_org( + setting_name TEXT, org_ids INTEGER[]) + RETURNS SETOF actor.org_unit_setting AS +$FUNK$ +DECLARE + setting RECORD; + org_id INTEGER; +BEGIN + /* Returns one actor.org_unit_setting row per org unit ID provided. + When no setting exists for a given org unit, the setting row + will contain all empty values. */ + FOREACH org_id IN ARRAY org_ids LOOP + SELECT INTO setting * FROM + actor.org_unit_ancestor_setting(setting_name, org_id); + RETURN NEXT setting; + END LOOP; + RETURN; +END; +$FUNK$ LANGUAGE plpgsql STABLE; + +-- DEPS 1021 + +-- Add missing permissions noted in LP 1517137 adjusting those added manually and ignoring those already in place. + +SELECT 'Starting permissions updates: ', CLOCK_TIMESTAMP(); + +DO $$ +DECLARE fixperm TEXT[3]; +DECLARE modify BOOLEAN; +DECLARE permid BIGINT; +DECLARE oldid BIGINT; +BEGIN + +FOREACH fixperm SLICE 1 IN ARRAY ARRAY[ + ['564', 'MARK_ITEM_CATALOGING', 'Allow a user to mark an item status as ''cataloging'''], + ['565', 'MARK_ITEM_DAMAGED', 'Allow a user to mark an item status as ''damaged'''], + ['566', 'MARK_ITEM_DISCARD', 'Allow a user to mark an item status as ''discard'''], + ['567', 'MARK_ITEM_RESERVES', 'Allow a user to mark an item status as ''reserves'''], + ['568', 'ADMIN_ORG_UNIT_SETTING_TYPE_LOG', 'Allow a user to modify the org unit settings log'], + ['570', 'CREATE_POP_BADGE', 'Allow a user to create a new popularity badge'], + ['571', 'DELETE_POP_BADGE', 'Allow a user to delete a popularity badge'], + ['572', 'UPDATE_POP_BADGE', 'Allow a user to modify a popularity badge'], + ['573', 'CREATE_POP_PARAMETER', 'Allow a user to create a popularity badge parameter'], + ['574', 'DELETE_POP_PARAMETER', 'Allow a user to delete a popularity badge parameter'], + ['575', 'UPDATE_POP_PARAMETER', 'Allow a user to modify a popularity badge parameter'], + ['576', 'CREATE_AUTHORITY_RECORD', 'Allow a user to create an authority record'], + ['577', 'DELETE_AUTHORITY_RECORD', 'Allow a user to delete an authority record'], + ['578', 'UPDATE_AUTHORITY_RECORD', 'Allow a user to modify an authority record'], + ['579', 'CREATE_AUTHORITY_CONTROL_SET', 'Allow a user to create an authority control set'], + ['580', 'DELETE_AUTHORITY_CONTROL_SET', 'Allow a user to delete an authority control set'], + ['581', 'UPDATE_AUTHORITY_CONTROL_SET', 'Allow a user to modify an authority control set'], + ['582', 'ACTOR_USER_DELETE_OPEN_XACTS.override', 'Override the ACTOR_USER_DELETE_OPEN_XACTS event'], + ['583', 'PATRON_EXCEEDS_LOST_COUNT.override', 'Override the PATRON_EXCEEDS_LOST_COUNT event'], + ['584', 'MAX_HOLDS.override', 'Override the MAX_HOLDS event'], + ['585', 'ITEM_DEPOSIT_REQUIRED.override', 'Override the ITEM_DEPOSIT_REQUIRED event'], + ['586', 'ITEM_DEPOSIT_PAID.override', 'Override the ITEM_DEPOSIT_PAID event'], + ['587', 'COPY_STATUS_LOST_AND_PAID.override', 'Override the COPY_STATUS_LOST_AND_PAID event'], + ['588', 'ITEM_NOT_HOLDABLE.override', 'Override the ITEM_NOT_HOLDABLE event'], + ['589', 'ITEM_RENTAL_FEE_REQUIRED.override', 'Override the ITEM_RENTAL_FEE_REQUIRED event'] +] +LOOP + permid := CAST (fixperm[1] AS BIGINT); + -- Has this permission already been manually applied at the expected id? + PERFORM * FROM permission.perm_list WHERE id = permid; + IF NOT FOUND THEN + UPDATE permission.perm_list SET code = code || '_local' WHERE code = fixperm[2] AND id > 1000 RETURNING id INTO oldid; + modify := FOUND; + + INSERT INTO permission.perm_list (id, code, description) VALUES (permid, fixperm[2], fixperm[3]); + + -- Several of these are rather unlikely for these particular permissions but safer > sorry. + IF modify THEN + UPDATE permission.grp_perm_map SET perm = permid WHERE perm = oldid; + UPDATE config.org_unit_setting_type SET update_perm = permid WHERE update_perm = oldid; + UPDATE permission.usr_object_perm_map SET perm = permid WHERE perm = oldid; + UPDATE permission.usr_perm_map SET perm = permid WHERE perm = oldid; + UPDATE config.org_unit_setting_type SET view_perm = permid WHERE view_perm = oldid; + UPDATE config.z3950_source SET use_perm = permid WHERE use_perm = oldid; + DELETE FROM permission.perm_list WHERE id = oldid; + END IF; + END IF; +END LOOP; + +END$$; + +SELECT 'Done permissions updates: ', CLOCK_TIMESTAMP(); + +-- DEPS 1022 + +CREATE OR REPLACE FUNCTION vandelay.merge_record_xml_using_profile ( incoming_marc TEXT, existing_marc TEXT, merge_profile_id BIGINT ) RETURNS TEXT AS $$ +DECLARE + merge_profile vandelay.merge_profile%ROWTYPE; + dyn_profile vandelay.compile_profile%ROWTYPE; + target_marc TEXT; + source_marc TEXT; + replace_rule TEXT; + match_count INT; +BEGIN + + IF existing_marc IS NULL OR incoming_marc IS NULL THEN + -- RAISE NOTICE 'no marc for source or target records'; + RETURN NULL; + END IF; + + IF merge_profile_id IS NOT NULL THEN + SELECT * INTO merge_profile FROM vandelay.merge_profile WHERE id = merge_profile_id; + IF FOUND THEN + dyn_profile.add_rule := COALESCE(merge_profile.add_spec,''); + dyn_profile.strip_rule := COALESCE(merge_profile.strip_spec,''); + dyn_profile.replace_rule := COALESCE(merge_profile.replace_spec,''); + dyn_profile.preserve_rule := COALESCE(merge_profile.preserve_spec,''); + ELSE + -- RAISE NOTICE 'merge profile not found'; + RETURN NULL; + END IF; + ELSE + -- RAISE NOTICE 'no merge profile specified'; + RETURN NULL; + END IF; + + IF dyn_profile.replace_rule <> '' AND dyn_profile.preserve_rule <> '' THEN + -- RAISE NOTICE 'both replace [%] and preserve [%] specified', dyn_profile.replace_rule, dyn_profile.preserve_rule; + RETURN NULL; + END IF; + + IF dyn_profile.replace_rule = '' AND dyn_profile.preserve_rule = '' AND dyn_profile.add_rule = '' AND dyn_profile.strip_rule = '' THEN + -- Since we have nothing to do, just return a target record as is + RETURN existing_marc; + ELSIF dyn_profile.preserve_rule <> '' THEN + source_marc = existing_marc; + target_marc = incoming_marc; + replace_rule = dyn_profile.preserve_rule; + ELSE + source_marc = incoming_marc; + target_marc = existing_marc; + replace_rule = dyn_profile.replace_rule; + END IF; + + RETURN vandelay.merge_record_xml( target_marc, source_marc, dyn_profile.add_rule, replace_rule, dyn_profile.strip_rule ); + +END; +$$ LANGUAGE PLPGSQL; + +-- DEPS 1023 + +DO $INSERT$ +BEGIN + IF evergreen.insert_on_deploy() THEN ----------------------------------- +INSERT into config.org_unit_setting_type +( name, grp, label, description, datatype, fm_class ) VALUES +( + 'cat.default_merge_profile', 'cat', + oils_i18n_gettext( + 'cat.default_merge_profile', + 'Default Merge Profile (Z39.50 and Record Buckets)', + 'coust', + 'label' + ), + oils_i18n_gettext( + 'cat.default_merge_profile', + 'Default merge profile to use during Z39.50 imports and record bucket merges', + 'coust', + 'description' + ), + 'link', + 'vmp' +); + END IF; +END $INSERT$; -------------------------------------------------------------- + +-- DEPS 1024 + +-- Add new column "rtl" with default of false +ALTER TABLE config.i18n_locale ADD COLUMN rtl BOOL NOT NULL DEFAULT FALSE; + +-- DEPS 1025 + +DO $INSERT$ +BEGIN + IF evergreen.insert_on_deploy() THEN ----------------------------------- + +-- Add Arabic (Jordan) to i18n_locale table as a stock language option +INSERT INTO config.i18n_locale (code,marc_code,name,description,rtl) + VALUES ('ar-JO', 'ara', oils_i18n_gettext('ar-JO', 'Arabic (Jordan)', 'i18n_l', 'name'), + oils_i18n_gettext('ar-JO', 'Arabic (Jordan)', 'i18n_l', 'description'), 'true'); + +-- DEPS 1026 + +INSERT INTO config.metabib_field ( id, field_class, name, label, + format, xpath, search_field, browse_field, authority_xpath, joiner ) VALUES + (34, 'subject', 'topic_browse', oils_i18n_gettext(34, 'Topic Browse', 'cmf', 'label'), + 'mods32', $$//mods32:mods/mods32:subject[local-name(./*[1]) = "topic"]$$, FALSE, TRUE, '//@xlink:href', ' -- ' ); -- /* to fool vim */; + +INSERT INTO config.metabib_field ( id, field_class, name, label, + format, xpath, search_field, browse_field, authority_xpath, joiner ) VALUES + (35, 'subject', 'geographic_browse', oils_i18n_gettext(35, 'Geographic Name Browse', 'cmf', 'label'), + 'mods32', $$//mods32:mods/mods32:subject[local-name(./*[1]) = "geographic"]$$, FALSE, TRUE, '//@xlink:href', ' -- ' ); -- /* to fool vim */; + +INSERT INTO config.metabib_field ( id, field_class, name, label, + format, xpath, search_field, browse_field, authority_xpath, joiner ) VALUES + (36, 'subject', 'temporal_browse', oils_i18n_gettext(36, 'Temporal Term Browse', 'cmf', 'label'), + 'mods32', $$//mods32:mods/mods32:subject[local-name(./*[1]) = "temporal"]$$, FALSE, TRUE, '//@xlink:href', ' -- ' ); -- /* to fool vim */; + +INSERT INTO config.metabib_field_index_norm_map (field,norm) + SELECT m.id, + i.id + FROM config.metabib_field m, + config.index_normalizer i + WHERE i.func IN ('naco_normalize') + AND m.id IN (34, 35, 36); + +UPDATE config.metabib_field +SET browse_field = FALSE +WHERE field_class = 'subject' AND name = 'topic' +AND id = 14; +UPDATE config.metabib_field +SET browse_field = FALSE +WHERE field_class = 'subject' AND name = 'geographic' +AND id = 13; +UPDATE config.metabib_field +SET browse_field = FALSE +WHERE field_class = 'subject' AND name = 'temporal' +AND id = 11; + +UPDATE authority.control_set_bib_field_metabib_field_map +SET metabib_field = 34 +WHERE metabib_field = 14; +UPDATE authority.control_set_bib_field_metabib_field_map +SET metabib_field = 35 +WHERE metabib_field = 13; +UPDATE authority.control_set_bib_field_metabib_field_map +SET metabib_field = 36 +WHERE metabib_field = 11; + +-- DEPS 1027 + +INSERT INTO config.settings_group (name, label) + VALUES ('ebook_api', 'Ebook API Integration'); + +INSERT INTO config.org_unit_setting_type + (name, label, description, grp, datatype) +VALUES ( + 'ebook_api.oneclickdigital.library_id', + oils_i18n_gettext( + 'ebook_api.oneclickdigital.library_id', + 'OneClickdigital Library ID', + 'coust', + 'label' + ), + oils_i18n_gettext( + 'ebook_api.oneclickdigital.library_id', + 'Identifier assigned to this library by OneClickdigital', + 'coust', + 'description' + ), + 'ebook_api', + 'string' +),( + 'ebook_api.oneclickdigital.basic_token', + oils_i18n_gettext( + 'ebook_api.oneclickdigital.basic_token', + 'OneClickdigital Basic Token', + 'coust', + 'label' + ), + oils_i18n_gettext( + 'ebook_api.oneclickdigital.basic_token', + 'Basic token for client authentication with OneClickdigital API (supplied by OneClickdigital)', + 'coust', + 'description' + ), + 'ebook_api', + 'string' +); + +INSERT INTO config.org_unit_setting_type + (name, label, description, grp, datatype) +VALUES ( + 'ebook_api.overdrive.discovery_base_uri', + oils_i18n_gettext( + 'ebook_api.overdrive.discovery_base_uri', + 'OverDrive Discovery API Base URI', + 'coust', + 'label' + ), + oils_i18n_gettext( + 'ebook_api.overdrive.discovery_base_uri', + 'Base URI for OverDrive Discovery API (defaults to https://api.overdrive.com/v1). Using HTTPS here is strongly encouraged.', + 'coust', + 'description' + ), + 'ebook_api', + 'string' +),( + 'ebook_api.overdrive.circulation_base_uri', + oils_i18n_gettext( + 'ebook_api.overdrive.circulation_base_uri', + 'OverDrive Circulation API Base URI', + 'coust', + 'label' + ), + oils_i18n_gettext( + 'ebook_api.overdrive.circulation_base_uri', + 'Base URI for OverDrive Circulation API (defaults to https://patron.api.overdrive.com/v1). Using HTTPS here is strongly encouraged.', + 'coust', + 'description' + ), + 'ebook_api', + 'string' +),( + 'ebook_api.overdrive.account_id', + oils_i18n_gettext( + 'ebook_api.overdrive.account_id', + 'OverDrive Account ID', + 'coust', + 'label' + ), + oils_i18n_gettext( + 'ebook_api.overdrive.account_id', + 'Account ID (a.k.a. Library ID) for this library, as assigned by OverDrive', + 'coust', + 'description' + ), + 'ebook_api', + 'string' +),( + 'ebook_api.overdrive.websiteid', + oils_i18n_gettext( + 'ebook_api.overdrive.websiteid', + 'OverDrive Website ID', + 'coust', + 'label' + ), + oils_i18n_gettext( + 'ebook_api.overdrive.websiteid', + 'Website ID for this library, as assigned by OverDrive', + 'coust', + 'description' + ), + 'ebook_api', + 'string' +),( + 'ebook_api.overdrive.authorizationname', + oils_i18n_gettext( + 'ebook_api.overdrive.authorizationname', + 'OverDrive Authorization Name', + 'coust', + 'label' + ), + oils_i18n_gettext( + 'ebook_api.overdrive.authorizationname', + 'Authorization name for this library, as assigned by OverDrive', + 'coust', + 'description' + ), + 'ebook_api', + 'string' +),( + 'ebook_api.overdrive.basic_token', + oils_i18n_gettext( + 'ebook_api.overdrive.basic_token', + 'OverDrive Basic Token', + 'coust', + 'label' + ), + oils_i18n_gettext( + 'ebook_api.overdrive.basic_token', + 'Basic token for client authentication with OverDrive API (supplied by OverDrive)', + 'coust', + 'description' + ), + 'ebook_api', + 'string' +),( + 'ebook_api.overdrive.granted_auth_redirect_uri', + oils_i18n_gettext( + 'ebook_api.overdrive.granted_auth_redirect_uri', + 'OverDrive Granted Authorization Redirect URI', + 'coust', + 'label' + ), + oils_i18n_gettext( + 'ebook_api.overdrive.granted_auth_redirect_uri', + 'URI provided to OverDrive for use with granted authorization', + 'coust', + 'description' + ), + 'ebook_api', + 'string' +),( + 'ebook_api.overdrive.password_required', + oils_i18n_gettext( + 'ebook_api.overdrive.password_required', + 'OverDrive Password Required', + 'coust', + 'label' + ), + oils_i18n_gettext( + 'ebook_api.overdrive.password_required', + 'Does this library require a password when authenticating patrons with the OverDrive API?', + 'coust', + 'description' + ), + 'ebook_api', + 'bool' +); + + END IF; +END $INSERT$; -------------------------------------------------------------- + +-- DEPS 1029 + +UPDATE config.index_normalizer SET description = 'Apply NACO normalization rules to the extracted text. See https://www.loc.gov/aba/pcc/naco/normrule-2.html for details.' WHERE func = 'naco_normalize'; +UPDATE config.index_normalizer SET description = 'Apply NACO normalization rules to the extracted text, retaining the first comma. See https://www.loc.gov/aba/pcc/naco/normrule-2.html for details.' WHERE func = 'naco_normalize_keep_comma'; + +CREATE OR REPLACE FUNCTION public.naco_normalize( TEXT, TEXT ) RETURNS TEXT AS $func$ + + use strict; + use Unicode::Normalize; + use Encode; + + my $str = shift; + my $sf = shift; + + # Apply NACO normalization to input string; based on + # https://www.loc.gov/aba/pcc/naco/documents/SCA_PccNormalization_Final_revised.pdf + # + # Note that unlike a strict reading of the NACO normalization rules, + # output is returned as lowercase instead of uppercase for compatibility + # with previous versions of the Evergreen naco_normalize routine. + + # Convert to upper-case first; even though final output will be lowercase, doing this will + # ensure that the German eszett (ß) and certain ligatures (ff, fi, ffl, etc.) will be handled correctly. + # If there are any bugs in Perl's implementation of upcasing, they will be passed through here. + $str = uc $str; + + # remove non-filing strings + $str =~ s/\x{0098}.*?\x{009C}//g; + + $str = NFKD($str); + + # additional substitutions - 3.6. + $str =~ s/\x{00C6}/AE/g; + $str =~ s/\x{00DE}/TH/g; + $str =~ s/\x{0152}/OE/g; + $str =~ tr/\x{0110}\x{00D0}\x{00D8}\x{0141}\x{2113}\x{02BB}\x{02BC}]['/DDOLl/d; + + # transformations based on Unicode category codes + $str =~ s/[\p{Cc}\p{Cf}\p{Co}\p{Cs}\p{Lm}\p{Mc}\p{Me}\p{Mn}]//g; + + if ($sf && $sf =~ /^a/o) { + my $commapos = index($str, ','); + if ($commapos > -1) { + if ($commapos != length($str) - 1) { + $str =~ s/,/\x07/; # preserve first comma + } + } + } + + # since we've stripped out the control characters, we can now + # use a few as placeholders temporarily + $str =~ tr/+&@\x{266D}\x{266F}#/\x01\x02\x03\x04\x05\x06/; + $str =~ s/[\p{Pc}\p{Pd}\p{Pe}\p{Pf}\p{Pi}\p{Po}\p{Ps}\p{Sk}\p{Sm}\p{So}\p{Zl}\p{Zp}\p{Zs}]/ /g; + $str =~ tr/\x01\x02\x03\x04\x05\x06\x07/+&@\x{266D}\x{266F}#,/; + + # decimal digits + $str =~ tr/\x{0660}-\x{0669}\x{06F0}-\x{06F9}\x{07C0}-\x{07C9}\x{0966}-\x{096F}\x{09E6}-\x{09EF}\x{0A66}-\x{0A6F}\x{0AE6}-\x{0AEF}\x{0B66}-\x{0B6F}\x{0BE6}-\x{0BEF}\x{0C66}-\x{0C6F}\x{0CE6}-\x{0CEF}\x{0D66}-\x{0D6F}\x{0E50}-\x{0E59}\x{0ED0}-\x{0ED9}\x{0F20}-\x{0F29}\x{1040}-\x{1049}\x{1090}-\x{1099}\x{17E0}-\x{17E9}\x{1810}-\x{1819}\x{1946}-\x{194F}\x{19D0}-\x{19D9}\x{1A80}-\x{1A89}\x{1A90}-\x{1A99}\x{1B50}-\x{1B59}\x{1BB0}-\x{1BB9}\x{1C40}-\x{1C49}\x{1C50}-\x{1C59}\x{A620}-\x{A629}\x{A8D0}-\x{A8D9}\x{A900}-\x{A909}\x{A9D0}-\x{A9D9}\x{AA50}-\x{AA59}\x{ABF0}-\x{ABF9}\x{FF10}-\x{FF19}/0-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-9/; + + # intentionally skipping step 8 of the NACO algorithm; if the string + # gets normalized away, that's fine. + + # leading and trailing spaces + $str =~ s/\s+/ /g; + $str =~ s/^\s+//; + $str =~ s/\s+$//g; + + return lc $str; +$func$ LANGUAGE 'plperlu' STRICT IMMUTABLE; + +-- Currently, the only difference from naco_normalize is that search_normalize +-- turns apostrophes into spaces, while naco_normalize collapses them. +CREATE OR REPLACE FUNCTION public.search_normalize( TEXT, TEXT ) RETURNS TEXT AS $func$ + + use strict; + use Unicode::Normalize; + use Encode; + + my $str = shift; + my $sf = shift; + + # Apply NACO normalization to input string; based on + # https://www.loc.gov/aba/pcc/naco/documents/SCA_PccNormalization_Final_revised.pdf + # + # Note that unlike a strict reading of the NACO normalization rules, + # output is returned as lowercase instead of uppercase for compatibility + # with previous versions of the Evergreen naco_normalize routine. + + # Convert to upper-case first; even though final output will be lowercase, doing this will + # ensure that the German eszett (ß) and certain ligatures (ff, fi, ffl, etc.) will be handled correctly. + # If there are any bugs in Perl's implementation of upcasing, they will be passed through here. + $str = uc $str; + + # remove non-filing strings + $str =~ s/\x{0098}.*?\x{009C}//g; + + $str = NFKD($str); + + # additional substitutions - 3.6. + $str =~ s/\x{00C6}/AE/g; + $str =~ s/\x{00DE}/TH/g; + $str =~ s/\x{0152}/OE/g; + $str =~ tr/\x{0110}\x{00D0}\x{00D8}\x{0141}\x{2113}\x{02BB}\x{02BC}][/DDOLl/d; + + # transformations based on Unicode category codes + $str =~ s/[\p{Cc}\p{Cf}\p{Co}\p{Cs}\p{Lm}\p{Mc}\p{Me}\p{Mn}]//g; + + if ($sf && $sf =~ /^a/o) { + my $commapos = index($str, ','); + if ($commapos > -1) { + if ($commapos != length($str) - 1) { + $str =~ s/,/\x07/; # preserve first comma + } + } + } + + # since we've stripped out the control characters, we can now + # use a few as placeholders temporarily + $str =~ tr/+&@\x{266D}\x{266F}#/\x01\x02\x03\x04\x05\x06/; + $str =~ s/[\p{Pc}\p{Pd}\p{Pe}\p{Pf}\p{Pi}\p{Po}\p{Ps}\p{Sk}\p{Sm}\p{So}\p{Zl}\p{Zp}\p{Zs}]/ /g; + $str =~ tr/\x01\x02\x03\x04\x05\x06\x07/+&@\x{266D}\x{266F}#,/; + + # decimal digits + $str =~ tr/\x{0660}-\x{0669}\x{06F0}-\x{06F9}\x{07C0}-\x{07C9}\x{0966}-\x{096F}\x{09E6}-\x{09EF}\x{0A66}-\x{0A6F}\x{0AE6}-\x{0AEF}\x{0B66}-\x{0B6F}\x{0BE6}-\x{0BEF}\x{0C66}-\x{0C6F}\x{0CE6}-\x{0CEF}\x{0D66}-\x{0D6F}\x{0E50}-\x{0E59}\x{0ED0}-\x{0ED9}\x{0F20}-\x{0F29}\x{1040}-\x{1049}\x{1090}-\x{1099}\x{17E0}-\x{17E9}\x{1810}-\x{1819}\x{1946}-\x{194F}\x{19D0}-\x{19D9}\x{1A80}-\x{1A89}\x{1A90}-\x{1A99}\x{1B50}-\x{1B59}\x{1BB0}-\x{1BB9}\x{1C40}-\x{1C49}\x{1C50}-\x{1C59}\x{A620}-\x{A629}\x{A8D0}-\x{A8D9}\x{A900}-\x{A909}\x{A9D0}-\x{A9D9}\x{AA50}-\x{AA59}\x{ABF0}-\x{ABF9}\x{FF10}-\x{FF19}/0-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-90-9/; + + # intentionally skipping step 8 of the NACO algorithm; if the string + # gets normalized away, that's fine. + + # leading and trailing spaces + $str =~ s/\s+/ /g; + $str =~ s/^\s+//; + $str =~ s/\s+$//g; + + return lc $str; +$func$ LANGUAGE 'plperlu' STRICT IMMUTABLE; + +-- DEPS 1030 +-- KCLS already has, reapply OK. + +CREATE OR REPLACE FUNCTION evergreen.oils_xslt_process(TEXT, TEXT) RETURNS TEXT AS $func$ + use strict; + + use XML::LibXSLT; + use XML::LibXML; + + my $doc = shift; + my $xslt = shift; + + # The following approach uses the older XML::LibXML 1.69 / XML::LibXSLT 1.68 + # methods of parsing XML documents and stylesheets, in the hopes of broader + # compatibility with distributions + my $parser = $_SHARED{'_xslt_process'}{parsers}{xml} || XML::LibXML->new(); + + # Cache the XML parser, if we do not already have one + $_SHARED{'_xslt_process'}{parsers}{xml} = $parser + unless ($_SHARED{'_xslt_process'}{parsers}{xml}); + + my $xslt_parser = $_SHARED{'_xslt_process'}{parsers}{xslt} || XML::LibXSLT->new(); + + # Cache the XSLT processor, if we do not already have one + $_SHARED{'_xslt_process'}{parsers}{xslt} = $xslt_parser + unless ($_SHARED{'_xslt_process'}{parsers}{xslt}); + + my $stylesheet = $_SHARED{'_xslt_process'}{stylesheets}{$xslt} || + $xslt_parser->parse_stylesheet( $parser->parse_string($xslt) ); + + $_SHARED{'_xslt_process'}{stylesheets}{$xslt} = $stylesheet + unless ($_SHARED{'_xslt_process'}{stylesheets}{$xslt}); + + return $stylesheet->output_as_chars( + $stylesheet->transform( + $parser->parse_string($doc) + ) + ); + +$func$ LANGUAGE 'plperlu' STRICT IMMUTABLE; + +-- DEPS 1031 + +DO $INSERT$ +BEGIN + IF evergreen.insert_on_deploy() THEN ----------------------------------- + +INSERT INTO config.org_unit_setting_type + (name, label, description, grp, datatype) +VALUES ( + 'ebook_api.oneclickdigital.base_uri', + oils_i18n_gettext( + 'ebook_api.oneclickdigital.base_uri', + 'OneClickdigital Base URI', + 'coust', + 'label' + ), + oils_i18n_gettext( + 'ebook_api.oneclickdigital.base_uri', + 'Base URI for OneClickdigital API (defaults to https://api.oneclickdigital.com/v1). Using HTTPS here is strongly encouraged.', + 'coust', + 'description' + ), + 'ebook_api', + 'string' +); + + END IF; +END $INSERT$; -------------------------------------------------------------- + +-- END 2.11 -> 2.12 UPGRADE + +-- DEPS 2.12.1 + +-- DEPS 1033 + +-- correctly turn off browsing for subjectd|geograhic and +-- subject|temporal now that the *_browse versions exist. This is +-- a no-op in a database that was started at version 2.12.0. +UPDATE config.metabib_field +SET browse_field = FALSE +WHERE field_class = 'subject' AND name = 'geographic' +AND browse_field +AND id = 11; +UPDATE config.metabib_field +SET browse_field = FALSE +WHERE field_class = 'subject' AND name = 'temporal' +AND browse_field +AND id = 13; + +select b.tag, idx.name +from authority.control_set_bib_field b +join authority.control_set_bib_field_metabib_field_map map on (b.id = map.bib_field) +join config.metabib_field idx on (map.metabib_field = idx.id) +order by b.tag; + +-- and fix bib field mapping if necessasry +UPDATE authority.control_set_bib_field_metabib_field_map map +SET metabib_field = cmf.id +FROM config.metabib_field cmf +WHERE cmf.field_class = 'subject' AND cmf.name= 'temporal_browse' +AND map.bib_field IN ( + SELECT b.id + FROM authority.control_set_bib_field b + JOIN authority.control_set_authority_field a + ON (b.authority_field = a.id) + AND a.tag = '148' +) +AND map.metabib_field IN ( + SELECT id + FROM config.metabib_field + WHERE field_class = 'subject' AND name = 'geographic_browse' +); +UPDATE authority.control_set_bib_field_metabib_field_map map +SET metabib_field = cmf.id +FROM config.metabib_field cmf +WHERE cmf.field_class = 'subject' AND cmf.name= 'geographic_browse' +AND map.bib_field IN ( + SELECT b.id + FROM authority.control_set_bib_field b + JOIN authority.control_set_authority_field a + ON (b.authority_field = a.id) + AND a.tag = '151' +) +AND map.metabib_field IN ( + SELECT id + FROM config.metabib_field + WHERE field_class = 'subject' AND name = 'temporal_browse' +); + +-- DEPS 2.12.2 + +-- Evergreen DB patch XXXX.data.fix_long_overdue_perm.sql +-- +-- Update permission 549 to have a "code" value that matches what +-- the Perl code references +-- + +-- check whether patch can be applied +-- DEPS 1037 + +-- For some time now, the database seed data / upgrade scripts have created +-- a permission with id 549 and code COPY_STATUS_LONGOVERDUE.override, while +-- the Perl code references a permission with code +-- COPY_STATUS_LONG_OVERDUE.override +-- +-- Below, we attempt to handle at least three possible database states: +-- +-- 1) no corrective action has been taken, permission exists with id 549 and +-- code COPY_STATUS_LONGOVERDUE.override +-- +-- 2) permission with id 549 has already been updated to have code +-- COPY_STATUS_LONG_OVERDUE.override +-- +-- 3) new permission with unknown id and code COPY_STATUS_LONG_OVERDUE.override +-- has been added, and potentially assigned to users/groups +-- +-- In the case of 3, users and groups may have been assigned both perm id 549 +-- and the local permission of unknown id. +-- +-- The desired end result is that we should have a permission.perm_list +-- entry with id 549 and code COPY_STATUS_LONG_OVERDUE.override, +-- any locally-created permission with that same code but a different id +-- is deleted, and any users or groups that had been granted that locally-created +-- permission (by id) have been granted permission id 549 if not already granted. +-- +-- If for some reason the permission at id 549 has an unexpected value for "code", +-- the end result of this upgrade script should be a no-op. + +-- grant permission 549 to any group that +-- has a potentially locally-added perm +-- with code COPY_STATUS_LONG_OVERDUE.override + +SELECT 'More perm cleanup: ', CLOCK_TIMESTAMP(); + +WITH new_grp_perms AS ( +SELECT grp, 549 AS perm, depth, grantable +FROM permission.grp_perm_map pgpm +JOIN permission.perm_list ppl ON ppl.id = pgpm.perm +WHERE ppl.code = 'COPY_STATUS_LONG_OVERDUE.override' +-- short circuit if perm id 549 exists and doesn't have the expected code +AND EXISTS (SELECT 1 FROM permission.perm_list ppl WHERE ppl.id = 549 and ppl.code = 'COPY_STATUS_LONGOVERDUE.override') +-- don't try to assign perm 549 if already assigned +AND NOT EXISTS (SELECT 1 FROM permission.grp_perm_map pgpm2 WHERE pgpm2.grp = pgpm.grp AND pgpm2.perm = 549) +) +INSERT INTO permission.grp_perm_map +(grp, perm, depth, grantable) +SELECT grp, perm, depth, grantable +FROM new_grp_perms; + +-- grant permission 549 to any user that +-- has a potentially locally-added perm +-- with code COPY_STATUS_LONG_OVERDUE.override +WITH new_usr_perms AS ( +SELECT usr, 549 AS perm, depth, grantable +FROM permission.usr_perm_map pupm +JOIN permission.perm_list ppl ON ppl.id = pupm.perm +WHERE ppl.code = 'COPY_STATUS_LONG_OVERDUE.override' +-- short circuit if perm id 549 exists and doesn't have the expected code +AND EXISTS (SELECT 1 FROM permission.perm_list ppl WHERE ppl.id = 549 and ppl.code = 'COPY_STATUS_LONGOVERDUE.override') +-- don't try to assign perm 549 if already assigned +AND NOT EXISTS (SELECT 1 FROM permission.usr_perm_map pupm2 WHERE pupm2.usr = pupm.usr AND pupm2.perm = 549) +) +INSERT INTO permission.usr_perm_map +(usr, perm, depth, grantable) +SELECT usr, perm, depth, grantable +FROM new_usr_perms; + +-- delete any group assignments of the locally-added perm +DELETE FROM permission.grp_perm_map +WHERE perm = (SELECT id FROM permission.perm_list WHERE code = 'COPY_STATUS_LONG_OVERDUE.override' AND id <> 549) +-- short circuit if perm id 549 exists and doesn't have the expected code +AND EXISTS (SELECT 1 FROM permission.perm_list ppl WHERE ppl.id = 549 and ppl.code = 'COPY_STATUS_LONGOVERDUE.override'); + +-- delete any user assignments of the locally-added perm +DELETE FROM permission.usr_perm_map +WHERE perm = (SELECT id FROM permission.perm_list WHERE code = 'COPY_STATUS_LONG_OVERDUE.override' AND id <> 549) +-- short circuit if perm id 549 exists and doesn't have the expected code +AND EXISTS (SELECT 1 FROM permission.perm_list ppl WHERE ppl.id = 549 and ppl.code = 'COPY_STATUS_LONGOVERDUE.override'); + +-- delete the locally-added perm, if any +DELETE FROM permission.perm_list +WHERE code = 'COPY_STATUS_LONG_OVERDUE.override' +AND id <> 549 +-- short circuit if perm id 549 exists and doesn't have the expected code +AND EXISTS (SELECT 1 FROM permission.perm_list ppl WHERE ppl.id = 549 and ppl.code = 'COPY_STATUS_LONGOVERDUE.override'); + +-- update perm id 549 to the correct code, if not already +UPDATE permission.perm_list +SET code = 'COPY_STATUS_LONG_OVERDUE.override' +WHERE id = 549 +AND code = 'COPY_STATUS_LONGOVERDUE.override'; + +-- DEPS 1038 + +-- This function was replaced back in 2011, but never made it +-- into an upgrade script. Here it is, nearly 6 years later. + +CREATE OR REPLACE FUNCTION vandelay.auto_overlay_bib_record ( import_id BIGINT, merge_profile_id INT ) RETURNS BOOL AS $$ +DECLARE + eg_id BIGINT; + match_count INT; +BEGIN + + PERFORM * FROM vandelay.queued_bib_record WHERE import_time IS NOT NULL AND id = import_id; + + IF FOUND THEN + -- RAISE NOTICE 'already imported, cannot auto-overlay' + RETURN FALSE; + END IF; + + SELECT COUNT(*) INTO match_count FROM vandelay.bib_match WHERE queued_record = import_id; + + IF match_count <> 1 THEN + -- RAISE NOTICE 'not an exact match'; + RETURN FALSE; + END IF; + + -- Check that the one match is on the first 901c + SELECT m.eg_record INTO eg_id + FROM vandelay.queued_bib_record q + JOIN vandelay.bib_match m ON (m.queued_record = q.id) + WHERE q.id = import_id + AND m.eg_record = oils_xpath_string('//*[@tag="901"]/*[@code="c"][1]',marc)::BIGINT; + + IF NOT FOUND THEN + -- RAISE NOTICE 'not a 901c match'; + RETURN FALSE; + END IF; + + RETURN vandelay.overlay_bib_record( import_id, eg_id, merge_profile_id ); +END; +$$ LANGUAGE PLPGSQL; + +-- DEPS 1039 + +UPDATE config.org_unit_setting_type +SET datatype = 'link', fm_class = 'vms' +WHERE name = 'vandelay.default_match_set' +AND datatype = 'string' +AND fm_class IS NULL; + +\echo Existing vandelay.default_match_set that do not +\echo correspond to match sets +SELECT aou.shortname, aous.value +FROM actor.org_unit_setting aous +JOIN actor.org_unit aou ON (aou.id = aous.org_unit) +WHERE aous.name = 'vandelay.default_match_set' +AND ( + value !~ '^"[0-9]+"$' + OR + oils_json_to_text(aous.value)::INT NOT IN ( + SELECT id FROM vandelay.match_set + ) +); + +\echo And now deleting the bad values, as otherwise they +\echo will break the Library Settings Editor. +DELETE +FROM actor.org_unit_setting aous +WHERE aous.name = 'vandelay.default_match_set' +AND ( + value !~ '^"[0-9]+"$' + OR + oils_json_to_text(aous.value)::INT NOT IN ( + SELECT id FROM vandelay.match_set + ) +); + +-- DEPS 1040 + +SELECT 'EDI remote file index: ', CLOCK_TIMESTAMP(); + +CREATE INDEX edi_message_remote_file_idx ON acq.edi_message (evergreen.lowercase(remote_file)); + +-- DEPS 2.12.3 +-- DEPS 2.12.4 +-- DEPS 2.12.5 +-- DEPS 2.12.6 +-- DEPS 1055 + +CREATE OR REPLACE FUNCTION actor.usr_merge( src_usr INT, dest_usr INT, del_addrs BOOLEAN, del_cards BOOLEAN, deactivate_cards BOOLEAN ) RETURNS VOID AS $$ +DECLARE + suffix TEXT; + bucket_row RECORD; + picklist_row RECORD; + queue_row RECORD; + folder_row RECORD; +BEGIN + + -- do some initial cleanup + UPDATE actor.usr SET card = NULL WHERE id = src_usr; + UPDATE actor.usr SET mailing_address = NULL WHERE id = src_usr; + UPDATE actor.usr SET billing_address = NULL WHERE id = src_usr; + + -- actor.* + IF del_cards THEN + DELETE FROM actor.card where usr = src_usr; + ELSE + IF deactivate_cards THEN + UPDATE actor.card SET active = 'f' WHERE usr = src_usr; + END IF; + UPDATE actor.card SET usr = dest_usr WHERE usr = src_usr; + END IF; + + + IF del_addrs THEN + DELETE FROM actor.usr_address WHERE usr = src_usr; + ELSE + UPDATE actor.usr_address SET usr = dest_usr WHERE usr = src_usr; + END IF; + + UPDATE actor.usr_note SET usr = dest_usr WHERE usr = src_usr; + -- dupes are technically OK in actor.usr_standing_penalty, should manually delete them... + UPDATE actor.usr_standing_penalty SET usr = dest_usr WHERE usr = src_usr; + PERFORM actor.usr_merge_rows('actor.usr_org_unit_opt_in', 'usr', src_usr, dest_usr); + PERFORM actor.usr_merge_rows('actor.usr_setting', 'usr', src_usr, dest_usr); + + -- permission.* + PERFORM actor.usr_merge_rows('permission.usr_perm_map', 'usr', src_usr, dest_usr); + PERFORM actor.usr_merge_rows('permission.usr_object_perm_map', 'usr', src_usr, dest_usr); + PERFORM actor.usr_merge_rows('permission.usr_grp_map', 'usr', src_usr, dest_usr); + PERFORM actor.usr_merge_rows('permission.usr_work_ou_map', 'usr', src_usr, dest_usr); + + + -- container.* + + -- For each *_bucket table: transfer every bucket belonging to src_usr + -- into the custody of dest_usr. + -- + -- In order to avoid colliding with an existing bucket owned by + -- the destination user, append the source user's id (in parenthesese) + -- to the name. If you still get a collision, add successive + -- spaces to the name and keep trying until you succeed. + -- + FOR bucket_row in + SELECT id, name + FROM container.biblio_record_entry_bucket + WHERE owner = src_usr + LOOP + suffix := ' (' || src_usr || ')'; + LOOP + BEGIN + UPDATE container.biblio_record_entry_bucket + SET owner = dest_usr, name = name || suffix + WHERE id = bucket_row.id; + EXCEPTION WHEN unique_violation THEN + suffix := suffix || ' '; + CONTINUE; + END; + EXIT; + END LOOP; + END LOOP; + + FOR bucket_row in + SELECT id, name + FROM container.call_number_bucket + WHERE owner = src_usr + LOOP + suffix := ' (' || src_usr || ')'; + LOOP + BEGIN + UPDATE container.call_number_bucket + SET owner = dest_usr, name = name || suffix + WHERE id = bucket_row.id; + EXCEPTION WHEN unique_violation THEN + suffix := suffix || ' '; + CONTINUE; + END; + EXIT; + END LOOP; + END LOOP; + + FOR bucket_row in + SELECT id, name + FROM container.copy_bucket + WHERE owner = src_usr + LOOP + suffix := ' (' || src_usr || ')'; + LOOP + BEGIN + UPDATE container.copy_bucket + SET owner = dest_usr, name = name || suffix + WHERE id = bucket_row.id; + EXCEPTION WHEN unique_violation THEN + suffix := suffix || ' '; + CONTINUE; + END; + EXIT; + END LOOP; + END LOOP; + + FOR bucket_row in + SELECT id, name + FROM container.user_bucket + WHERE owner = src_usr + LOOP + suffix := ' (' || src_usr || ')'; + LOOP + BEGIN + UPDATE container.user_bucket + SET owner = dest_usr, name = name || suffix + WHERE id = bucket_row.id; + EXCEPTION WHEN unique_violation THEN + suffix := suffix || ' '; + CONTINUE; + END; + EXIT; + END LOOP; + END LOOP; + + UPDATE container.user_bucket_item SET target_user = dest_usr WHERE target_user = src_usr; + + -- vandelay.* + -- transfer queues the same way we transfer buckets (see above) + FOR queue_row in + SELECT id, name + FROM vandelay.queue + WHERE owner = src_usr + LOOP + suffix := ' (' || src_usr || ')'; + LOOP + BEGIN + UPDATE vandelay.queue + SET owner = dest_usr, name = name || suffix + WHERE id = queue_row.id; + EXCEPTION WHEN unique_violation THEN + suffix := suffix || ' '; + CONTINUE; + END; + EXIT; + END LOOP; + END LOOP; + + -- money.* + PERFORM actor.usr_merge_rows('money.collections_tracker', 'usr', src_usr, dest_usr); + PERFORM actor.usr_merge_rows('money.collections_tracker', 'collector', src_usr, dest_usr); + UPDATE money.billable_xact SET usr = dest_usr WHERE usr = src_usr; + UPDATE money.billing SET voider = dest_usr WHERE voider = src_usr; + UPDATE money.bnm_payment SET accepting_usr = dest_usr WHERE accepting_usr = src_usr; + + -- action.* + UPDATE action.circulation SET usr = dest_usr WHERE usr = src_usr; + UPDATE action.circulation SET circ_staff = dest_usr WHERE circ_staff = src_usr; + UPDATE action.circulation SET checkin_staff = dest_usr WHERE checkin_staff = src_usr; + UPDATE action.usr_circ_history SET usr = dest_usr WHERE usr = src_usr; + + UPDATE action.hold_request SET usr = dest_usr WHERE usr = src_usr; + UPDATE action.hold_request SET fulfillment_staff = dest_usr WHERE fulfillment_staff = src_usr; + UPDATE action.hold_request SET requestor = dest_usr WHERE requestor = src_usr; + UPDATE action.hold_notification SET notify_staff = dest_usr WHERE notify_staff = src_usr; + + UPDATE action.in_house_use SET staff = dest_usr WHERE staff = src_usr; + UPDATE action.non_cataloged_circulation SET staff = dest_usr WHERE staff = src_usr; + UPDATE action.non_cataloged_circulation SET patron = dest_usr WHERE patron = src_usr; + UPDATE action.non_cat_in_house_use SET staff = dest_usr WHERE staff = src_usr; + UPDATE action.survey_response SET usr = dest_usr WHERE usr = src_usr; + + -- acq.* + UPDATE acq.fund_allocation SET allocator = dest_usr WHERE allocator = src_usr; + UPDATE acq.fund_transfer SET transfer_user = dest_usr WHERE transfer_user = src_usr; + + -- transfer picklists the same way we transfer buckets (see above) + FOR picklist_row in + SELECT id, name + FROM acq.picklist + WHERE owner = src_usr + LOOP + suffix := ' (' || src_usr || ')'; + LOOP + BEGIN + UPDATE acq.picklist + SET owner = dest_usr, name = name || suffix + WHERE id = picklist_row.id; + EXCEPTION WHEN unique_violation THEN + suffix := suffix || ' '; + CONTINUE; + END; + EXIT; + END LOOP; + END LOOP; + + UPDATE acq.purchase_order SET owner = dest_usr WHERE owner = src_usr; + UPDATE acq.po_note SET creator = dest_usr WHERE creator = src_usr; + UPDATE acq.po_note SET editor = dest_usr WHERE editor = src_usr; + UPDATE acq.provider_note SET creator = dest_usr WHERE creator = src_usr; + UPDATE acq.provider_note SET editor = dest_usr WHERE editor = src_usr; + UPDATE acq.lineitem_note SET creator = dest_usr WHERE creator = src_usr; + UPDATE acq.lineitem_note SET editor = dest_usr WHERE editor = src_usr; + UPDATE acq.lineitem_usr_attr_definition SET usr = dest_usr WHERE usr = src_usr; + + -- asset.* + UPDATE asset.copy SET creator = dest_usr WHERE creator = src_usr; + UPDATE asset.copy SET editor = dest_usr WHERE editor = src_usr; + UPDATE asset.copy_note SET creator = dest_usr WHERE creator = src_usr; + UPDATE asset.call_number SET creator = dest_usr WHERE creator = src_usr; + UPDATE asset.call_number SET editor = dest_usr WHERE editor = src_usr; + UPDATE asset.call_number_note SET creator = dest_usr WHERE creator = src_usr; + + -- serial.* + UPDATE serial.record_entry SET creator = dest_usr WHERE creator = src_usr; + UPDATE serial.record_entry SET editor = dest_usr WHERE editor = src_usr; + + -- reporter.* + -- It's not uncommon to define the reporter schema in a replica + -- DB only, so don't assume these tables exist in the write DB. + BEGIN + UPDATE reporter.template SET owner = dest_usr WHERE owner = src_usr; + EXCEPTION WHEN undefined_table THEN + -- do nothing + END; + BEGIN + UPDATE reporter.report SET owner = dest_usr WHERE owner = src_usr; + EXCEPTION WHEN undefined_table THEN + -- do nothing + END; + BEGIN + UPDATE reporter.schedule SET runner = dest_usr WHERE runner = src_usr; + EXCEPTION WHEN undefined_table THEN + -- do nothing + END; + BEGIN + -- transfer folders the same way we transfer buckets (see above) + FOR folder_row in + SELECT id, name + FROM reporter.template_folder + WHERE owner = src_usr + LOOP + suffix := ' (' || src_usr || ')'; + LOOP + BEGIN + UPDATE reporter.template_folder + SET owner = dest_usr, name = name || suffix + WHERE id = folder_row.id; + EXCEPTION WHEN unique_violation THEN + suffix := suffix || ' '; + CONTINUE; + END; + EXIT; + END LOOP; + END LOOP; + EXCEPTION WHEN undefined_table THEN + -- do nothing + END; + BEGIN + -- transfer folders the same way we transfer buckets (see above) + FOR folder_row in + SELECT id, name + FROM reporter.report_folder + WHERE owner = src_usr + LOOP + suffix := ' (' || src_usr || ')'; + LOOP + BEGIN + UPDATE reporter.report_folder + SET owner = dest_usr, name = name || suffix + WHERE id = folder_row.id; + EXCEPTION WHEN unique_violation THEN + suffix := suffix || ' '; + CONTINUE; + END; + EXIT; + END LOOP; + END LOOP; + EXCEPTION WHEN undefined_table THEN + -- do nothing + END; + BEGIN + -- transfer folders the same way we transfer buckets (see above) + FOR folder_row in + SELECT id, name + FROM reporter.output_folder + WHERE owner = src_usr + LOOP + suffix := ' (' || src_usr || ')'; + LOOP + BEGIN + UPDATE reporter.output_folder + SET owner = dest_usr, name = name || suffix + WHERE id = folder_row.id; + EXCEPTION WHEN unique_violation THEN + suffix := suffix || ' '; + CONTINUE; + END; + EXIT; + END LOOP; + END LOOP; + EXCEPTION WHEN undefined_table THEN + -- do nothing + END; + + -- Finally, delete the source user + DELETE FROM actor.usr WHERE id = src_usr; + +END; +$$ LANGUAGE plpgsql; + +-- DEPS 2.12.7 + +-- The following billing types would not have been automatically added +-- in upgrade scripts between versions 1.2 and 1.4 (early 2009). We +-- add them here. It's okay if they fail, so this should probably be +-- run outside a transaction if added to the version-upgrade scripts. + +DO $INSERT$ +BEGIN + IF evergreen.insert_on_deploy() THEN ----------------------------------- + +INSERT INTO config.billing_type (id, name, owner) + SELECT 7, 'Damaged Item', 1 + WHERE NOT EXISTS (SELECT 1 FROM config.billing_type WHERE name = 'Damaged Item'); + +INSERT INTO config.billing_type (id, name, owner) + SELECT 8, 'Damaged Item Processing Fee', 1 + WHERE NOT EXISTS (SELECT 1 FROM config.billing_type WHERE name = 'Damaged Item Processing Fee'); + +INSERT INTO config.billing_type (id, name, owner) + SELECT 9, 'Notification Fee', 1 + WHERE NOT EXISTS (SELECT 1 FROM config.billing_type WHERE name = 'Notification Fee'); + + END IF; +END $INSERT$; -------------------------------------------------------------- + +-- DEPS 2.12.8 +-- DEPS 1079 + +CREATE OR REPLACE FUNCTION asset.merge_record_assets( target_record BIGINT, source_record BIGINT ) RETURNS INT AS $func$ +DECLARE + moved_objects INT := 0; + source_cn asset.call_number%ROWTYPE; + target_cn asset.call_number%ROWTYPE; + metarec metabib.metarecord%ROWTYPE; + hold action.hold_request%ROWTYPE; + ser_rec serial.record_entry%ROWTYPE; + ser_sub serial.subscription%ROWTYPE; + acq_lineitem acq.lineitem%ROWTYPE; + acq_request acq.user_request%ROWTYPE; + booking booking.resource_type%ROWTYPE; + source_part biblio.monograph_part%ROWTYPE; + target_part biblio.monograph_part%ROWTYPE; + multi_home biblio.peer_bib_copy_map%ROWTYPE; + uri_count INT := 0; + counter INT := 0; + uri_datafield TEXT; + uri_text TEXT := ''; +BEGIN + + -- move any 856 entries on records that have at least one MARC-mapped URI entry + SELECT INTO uri_count COUNT(*) + FROM asset.uri_call_number_map m + JOIN asset.call_number cn ON (m.call_number = cn.id) + WHERE cn.record = source_record; + + IF uri_count > 0 THEN + + -- This returns more nodes than you might expect: + -- 7 instead of 1 for an 856 with $u $y $9 + SELECT COUNT(*) INTO counter + FROM oils_xpath_table( + 'id', + 'marc', + 'biblio.record_entry', + '//*[@tag="856"]', + 'id=' || source_record + ) as t(i int,c text); + + FOR i IN 1 .. counter LOOP + SELECT '' || + STRING_AGG( + '' || + regexp_replace( + regexp_replace( + regexp_replace(data,'&','&','g'), + '>', '>', 'g' + ), + '<', '<', 'g' + ) || '', '' + ) || '' INTO uri_datafield + FROM oils_xpath_table( + 'id', + 'marc', + 'biblio.record_entry', + '//*[@tag="856"][position()=' || i || ']/@ind1|' || + '//*[@tag="856"][position()=' || i || ']/@ind2|' || + '//*[@tag="856"][position()=' || i || ']/*/@code|' || + '//*[@tag="856"][position()=' || i || ']/*[@code]', + 'id=' || source_record + ) as t(id int,ind1 text, ind2 text,subfield text,data text); + + -- As most of the results will be NULL, protect against NULLifying + -- the valid content that we do generate + uri_text := uri_text || COALESCE(uri_datafield, ''); + END LOOP; + + IF uri_text <> '' THEN + UPDATE biblio.record_entry + SET marc = regexp_replace(marc,'(]*record>)', uri_text || E'\\1') + WHERE id = target_record; + END IF; + + END IF; + + -- Find and move metarecords to the target record + SELECT INTO metarec * + FROM metabib.metarecord + WHERE master_record = source_record; + + IF FOUND THEN + UPDATE metabib.metarecord + SET master_record = target_record, + mods = NULL + WHERE id = metarec.id; + + moved_objects := moved_objects + 1; + END IF; + + -- Find call numbers attached to the source ... + FOR source_cn IN SELECT * FROM asset.call_number WHERE record = source_record LOOP + + SELECT INTO target_cn * + FROM asset.call_number + WHERE label = source_cn.label + AND prefix = source_cn.prefix + AND suffix = source_cn.suffix + AND owning_lib = source_cn.owning_lib + AND record = target_record + AND NOT deleted; + + -- ... and if there's a conflicting one on the target ... + IF FOUND THEN + + -- ... move the copies to that, and ... + UPDATE asset.copy + SET call_number = target_cn.id + WHERE call_number = source_cn.id; + + -- ... move V holds to the move-target call number + FOR hold IN SELECT * FROM action.hold_request WHERE target = source_cn.id AND hold_type = 'V' LOOP + + UPDATE action.hold_request + SET target = target_cn.id + WHERE id = hold.id; + + moved_objects := moved_objects + 1; + END LOOP; + + UPDATE asset.call_number SET deleted = TRUE WHERE id = source_cn.id; + + -- ... if not ... + ELSE + -- ... just move the call number to the target record + UPDATE asset.call_number + SET record = target_record + WHERE id = source_cn.id; + END IF; + + moved_objects := moved_objects + 1; + END LOOP; + + -- Find T holds targeting the source record ... + FOR hold IN SELECT * FROM action.hold_request WHERE target = source_record AND hold_type = 'T' LOOP + + -- ... and move them to the target record + UPDATE action.hold_request + SET target = target_record + WHERE id = hold.id; + + moved_objects := moved_objects + 1; + END LOOP; + + -- Find serial records targeting the source record ... + FOR ser_rec IN SELECT * FROM serial.record_entry WHERE record = source_record LOOP + -- ... and move them to the target record + UPDATE serial.record_entry + SET record = target_record + WHERE id = ser_rec.id; + + moved_objects := moved_objects + 1; + END LOOP; + + -- Find serial subscriptions targeting the source record ... + FOR ser_sub IN SELECT * FROM serial.subscription WHERE record_entry = source_record LOOP + -- ... and move them to the target record + UPDATE serial.subscription + SET record_entry = target_record + WHERE id = ser_sub.id; + + moved_objects := moved_objects + 1; + END LOOP; + + -- Find booking resource types targeting the source record ... + FOR booking IN SELECT * FROM booking.resource_type WHERE record = source_record LOOP + -- ... and move them to the target record + UPDATE booking.resource_type + SET record = target_record + WHERE id = booking.id; + + moved_objects := moved_objects + 1; + END LOOP; + + -- Find acq lineitems targeting the source record ... + FOR acq_lineitem IN SELECT * FROM acq.lineitem WHERE eg_bib_id = source_record LOOP + -- ... and move them to the target record + UPDATE acq.lineitem + SET eg_bib_id = target_record + WHERE id = acq_lineitem.id; + + moved_objects := moved_objects + 1; + END LOOP; + + -- Find acq user purchase requests targeting the source record ... + FOR acq_request IN SELECT * FROM acq.user_request WHERE eg_bib = source_record LOOP + -- ... and move them to the target record + UPDATE acq.user_request + SET eg_bib = target_record + WHERE id = acq_request.id; + + moved_objects := moved_objects + 1; + END LOOP; + + -- Find parts attached to the source ... + FOR source_part IN SELECT * FROM biblio.monograph_part WHERE record = source_record LOOP + + SELECT INTO target_part * + FROM biblio.monograph_part + WHERE label = source_part.label + AND record = target_record; + + -- ... and if there's a conflicting one on the target ... + IF FOUND THEN + + -- ... move the copy-part maps to that, and ... + UPDATE asset.copy_part_map + SET part = target_part.id + WHERE part = source_part.id; + + -- ... move P holds to the move-target part + FOR hold IN SELECT * FROM action.hold_request WHERE target = source_part.id AND hold_type = 'P' LOOP + + UPDATE action.hold_request + SET target = target_part.id + WHERE id = hold.id; + + moved_objects := moved_objects + 1; + END LOOP; + + -- ... if not ... + ELSE + -- ... just move the part to the target record + UPDATE biblio.monograph_part + SET record = target_record + WHERE id = source_part.id; + END IF; + + moved_objects := moved_objects + 1; + END LOOP; + + -- Find multi_home items attached to the source ... + FOR multi_home IN SELECT * FROM biblio.peer_bib_copy_map WHERE peer_record = source_record LOOP + -- ... and move them to the target record + UPDATE biblio.peer_bib_copy_map + SET peer_record = target_record + WHERE id = multi_home.id; + + moved_objects := moved_objects + 1; + END LOOP; + + -- And delete mappings where the item's home bib was merged with the peer bib + DELETE FROM biblio.peer_bib_copy_map WHERE peer_record = ( + SELECT (SELECT record FROM asset.call_number WHERE id = call_number) + FROM asset.copy WHERE id = target_copy + ); + + -- Finally, "delete" the source record + DELETE FROM biblio.record_entry WHERE id = source_record; + + -- That's all, folks! + RETURN moved_objects; +END; +$func$ LANGUAGE plpgsql; + +-- Evergreen DB patch XXXX.schema.qualify_unaccent_refs.sql +-- +-- LP#1671150 Fix unaccent() function call in evergreen.unaccent_and_squash() +-- + + +-- DEPS 1083 + +CREATE OR REPLACE FUNCTION evergreen.unaccent_and_squash ( IN arg text) RETURNS text + IMMUTABLE STRICT AS $$ + BEGIN + RETURN evergreen.lowercase(public.unaccent('public.unaccent', regexp_replace(arg, '[\s[:punct:]]','','g'))); + END; +$$ LANGUAGE PLPGSQL; + + +-- DEPS 2.12.9 + + +-- ------------- +-- TODO: Get all the way up to 2.12.8-2.12.9-upgrade-db.sql !!!!! +-- ------------- + +SELECT 'Adding upgrade log entries: ', CLOCK_TIMESTAMP(); + +DO $INSERT$ +BEGIN + IF evergreen.insert_on_deploy() THEN + +-- Updates commented out are ones we have already back-ported. +-- Left here for reference. + +INSERT INTO config.upgrade_log (version, applied_to) VALUES ('2.11.0', '2.10.9'); +PERFORM evergreen.upgrade_deps_block_check('0979', '2.11.0'); +PERFORM evergreen.upgrade_deps_block_check('0980', '2.11.0'); +PERFORM evergreen.upgrade_deps_block_check('0982', '2.11.0'); +PERFORM evergreen.upgrade_deps_block_check('0983', '2.11.0'); +PERFORM evergreen.upgrade_deps_block_check('0984', '2.11.0'); +PERFORM evergreen.upgrade_deps_block_check('0985', '2.11.0'); +PERFORM evergreen.upgrade_deps_block_check('0986', '2.11.0'); +PERFORM evergreen.upgrade_deps_block_check('0987', '2.11.0'); +PERFORM evergreen.upgrade_deps_block_check('0988', '2.11.0'); +PERFORM evergreen.upgrade_deps_block_check('0989', '2.11.0'); +PERFORM evergreen.upgrade_deps_block_check('0990', '2.11.0'); +PERFORM evergreen.upgrade_deps_block_check('0991', '2.11.0'); +PERFORM evergreen.upgrade_deps_block_check('0992', '2.11.0'); +PERFORM evergreen.upgrade_deps_block_check('0993', '2.11.0'); +-- PERFORM evergreen.upgrade_deps_block_check('0994', '2.11.0'); +PERFORM evergreen.upgrade_deps_block_check('0995', '2.11.0'); +PERFORM evergreen.upgrade_deps_block_check('0996', '2.11.0'); +PERFORM evergreen.upgrade_deps_block_check('0997', '2.11.0'); +-- PERFORM evergreen.upgrade_deps_block_check('0998', '2.11.0'); +PERFORM evergreen.upgrade_deps_block_check('0999', '2.11.0'); +PERFORM evergreen.upgrade_deps_block_check('1000', '2.11.0'); +INSERT INTO config.upgrade_log (version, applied_to) VALUES ('2.11.1', '2.11.0'); +-- PERFORM evergreen.upgrade_deps_block_check('1001', :eg_version); -- stompro/gmcharlt +-- PERFORM evergreen.upgrade_deps_block_check('1002', :eg_version); +-- PERFORM evergreen.upgrade_deps_block_check('1003', :eg_version); +INSERT INTO config.upgrade_log (version, applied_to) VALUES ('2.11.2', '2.11.1'); +PERFORM evergreen.upgrade_deps_block_check('1004', '2.11.2'); +-- PERFORM evergreen.upgrade_deps_block_check('1005', '2.11.2'); +INSERT INTO config.upgrade_log (version, applied_to) VALUES ('2.11.3', '2.11.2'); +PERFORM evergreen.upgrade_deps_block_check('1012', '2.11.3'); +PERFORM evergreen.upgrade_deps_block_check('1013', '2.11.3'); +-- PERFORM evergreen.upgrade_deps_block_check('1018', :eg_version); +PERFORM evergreen.upgrade_deps_block_check('1006', '2.12.0'); +PERFORM evergreen.upgrade_deps_block_check('1007', '2.12.0'); +PERFORM evergreen.upgrade_deps_block_check('1008', '2.12.0'); +PERFORM evergreen.upgrade_deps_block_check('1009', '2.12.0'); +PERFORM evergreen.upgrade_deps_block_check('1010', '2.12.0'); +PERFORM evergreen.upgrade_deps_block_check('1011', '2.12.0'); +PERFORM evergreen.upgrade_deps_block_check('1014', '2.12.0'); +PERFORM evergreen.upgrade_deps_block_check('1015', '2.12.0'); +PERFORM evergreen.upgrade_deps_block_check('1016', '2.12.0'); +PERFORM evergreen.upgrade_deps_block_check('1017', '2.12.0'); +-- PERFORM evergreen.upgrade_deps_block_check('1019', '2.12.0'); +-- PERFORM evergreen.upgrade_deps_block_check('1020', '2.12.0'); +PERFORM evergreen.upgrade_deps_block_check('1021', '2.12.0'); +PERFORM evergreen.upgrade_deps_block_check('1022', '2.12.0'); +PERFORM evergreen.upgrade_deps_block_check('1023', '2.12.0'); +PERFORM evergreen.upgrade_deps_block_check('1024', '2.12.0'); +PERFORM evergreen.upgrade_deps_block_check('1025', '2.12.0'); +PERFORM evergreen.upgrade_deps_block_check('1026', '2.12.0'); +PERFORM evergreen.upgrade_deps_block_check('1027', '2.12.0'); +PERFORM evergreen.upgrade_deps_block_check('1029', '2.12.0'); -- csharp/gmcharlt +-- PERFORM evergreen.upgrade_deps_block_check('1030', '2.12.0'); +PERFORM evergreen.upgrade_deps_block_check('1031', '2.12.0'); +INSERT INTO config.upgrade_log (version, applied_to) VALUES ('2.12.1', '2.12.0'); +PERFORM evergreen.upgrade_deps_block_check('1033', '2.12.1'); +INSERT INTO config.upgrade_log (version, applied_to) VALUES ('2.12.2', '2.12.1'); +PERFORM evergreen.upgrade_deps_block_check('1037', '2.12.2'); -- jeff +PERFORM evergreen.upgrade_deps_block_check('1038', '2.12.2'); +PERFORM evergreen.upgrade_deps_block_check('1039', '2.12.2'); -- jeffdavis/gmcharlt +PERFORM evergreen.upgrade_deps_block_check('1040', '2.12.2'); +INSERT INTO config.upgrade_log (version, applied_to) VALUES ('2.12.3', '2.12.2'); +INSERT INTO config.upgrade_log (version, applied_to) VALUES ('2.12.4', '2.12.3'); +INSERT INTO config.upgrade_log (version, applied_to) VALUES ('2.12.5', '2.12.4'); +INSERT INTO config.upgrade_log (version, applied_to) VALUES ('2.12.6', '2.12.5'); +PERFORM evergreen.upgrade_deps_block_check('1055', '2.12.6'); +INSERT INTO config.upgrade_log (version, applied_to) VALUES ('2.12.7', '2.12.6'); +PERFORM evergreen.upgrade_deps_block_check('1078', '2.12.6'); -- csharp/bshum/gmcharlt +INSERT INTO config.upgrade_log (version, applied_to) VALUES ('2.12.8', '2.12.7'); +PERFORM evergreen.upgrade_deps_block_check('1079', '2.12.8'); -- rhamby/cesardv/gmcharlt +PERFORM evergreen.upgrade_deps_block_check('1083', '2.12.8'); +INSERT INTO config.upgrade_log (version, applied_to) VALUES ('2.12.9', '2.12.8'); + + END IF; +END $INSERT$; + +SELECT 'Rebuilding user name indexes: ', CLOCK_TIMESTAMP(); + +-- Drop indexes if present, so that we can re-create them +DROP INDEX IF EXISTS actor.actor_usr_first_given_name_unaccent_idx; +DROP INDEX IF EXISTS actor.actor_usr_second_given_name_unaccent_idx; +DROP INDEX IF EXISTS actor.actor_usr_family_name_unaccent_idx; +DROP INDEX IF EXISTS actor.actor_usr_usrname_unaccent_idx; + +-- Create (or re-create) indexes -- they may be missing if pg_restore failed to create +-- them due to the previously unqualified call to unaccent() +CREATE INDEX actor_usr_first_given_name_unaccent_idx ON actor.usr (evergreen.unaccent_and_squash(first_given_name)); +CREATE INDEX actor_usr_second_given_name_unaccent_idx ON actor.usr (evergreen.unaccent_and_squash(second_given_name)); +CREATE INDEX actor_usr_family_name_unaccent_idx ON actor.usr (evergreen.unaccent_and_squash(family_name)); +CREATE INDEX actor_usr_usrname_unaccent_idx ON actor.usr (evergreen.unaccent_and_squash(usrname)); + +-- SELECT 'Commiting... ', CLOCK_TIMESTAMP(); +-- COMMIT; + +SELECT 'Rolling Back... ', CLOCK_TIMESTAMP(); +ROLLBACK; -- TESTING + +SELECT 'Upgrade complete: ', CLOCK_TIMESTAMP(); + +-- POST-TRANSACTION UPDATES + +-- Various bib record reingest updates left inline below for reference. +-- It will be faster and simpler to simply reingest en masse than to +-- manually apply each of these. + +/* +UPDATE biblio.record_entry SET id = id WHERE source IS NOT NULL; +\qecho Recalculating bib fingerprints; this may take a while +ALTER TABLE biblio.record_entry DISABLE TRIGGER USER; +UPDATE biblio.record_entry SET fingerprint = biblio.extract_fingerprint(marc) WHERE NOT deleted; +ALTER TABLE biblio.record_entry ENABLE TRIGGER USER; + +\qecho Remapping metarecords +SELECT metabib.remap_metarecord_for_bib(id, fingerprint) +FROM biblio.record_entry +WHERE NOT deleted; + +\qecho Running a browse and reingest of your bib records. It may take a while. +\qecho You may cancel now without losing the effect of the rest of the +\qecho upgrade script, and arrange the reingest later. +\qecho . +SELECT metabib.reingest_metabib_field_entries(id, FALSE, FALSE, TRUE) + FROM biblio.record_entry; + +\qecho Verify that bib subject fields appear to be mapped to +\qecho to correct browse indexes +SELECT b.id, b.tag, idx.field_class, idx.name +FROM authority.control_set_bib_field b +JOIN authority.control_set_bib_field_metabib_field_map map ON (b.id = map.bib_field) +JOIN config.metabib_field idx ON (map.metabib_field = idx.id) +WHERE tag ~ '^6' +ORDER BY b.tag; + +\qecho This is a browse-only reingest of your bib records. It may take a while. +\qecho You may cancel now without losing the effect of the rest of the +\qecho upgrade script, and arrange the reingest later. +\qecho . +SELECT metabib.reingest_metabib_field_entries(id, TRUE, FALSE, TRUE) + FROM biblio.record_entry; +*/ + diff --git a/KCLS/sql/schema/revert/2.10-to-2.12-upgrade.sql b/KCLS/sql/schema/revert/2.10-to-2.12-upgrade.sql new file mode 100644 index 0000000000..f23d2103d7 --- /dev/null +++ b/KCLS/sql/schema/revert/2.10-to-2.12-upgrade.sql @@ -0,0 +1,7 @@ +-- Revert kcls-evergreen:2.10-to-2.12-upgrade from pg + +BEGIN; + +-- XXX Add DDLs here. + +COMMIT; diff --git a/KCLS/sql/schema/sqitch.plan b/KCLS/sql/schema/sqitch.plan index 813d9a44f8..4077e91bd4 100644 --- a/KCLS/sql/schema/sqitch.plan +++ b/KCLS/sql/schema/sqitch.plan @@ -59,3 +59,4 @@ browse-mattype-filter [2.9-to-2.10-upgrade] 2017-11-28T20:39:59Z Bill Erickson,, auth-prop-partial-matches [2.9-to-2.10-upgrade-reingest] 2017-09-29T15:47:04Z Bill Erickson,,, # Authority propagation partial matches at-purge-interval [2.9-to-2.10-upgrade-reingest] 2017-10-23T15:31:00Z Bill Erickson,,, # A/T Purge schema changes at-purge-interval-data [at-purge-interval] 2017-10-23T15:31:23Z Bill Erickson,,, # A/T Purge data cleanup +2.10-to-2.12-upgrade [at-purge-interval-data] 2018-01-29T21:03:38Z Bill Erickson,,, # KCLS 2.10 to 2.12 SQL Upgrade diff --git a/KCLS/sql/schema/verify/2.10-to-2.12-upgrade.sql b/KCLS/sql/schema/verify/2.10-to-2.12-upgrade.sql new file mode 100644 index 0000000000..b77183bc9e --- /dev/null +++ b/KCLS/sql/schema/verify/2.10-to-2.12-upgrade.sql @@ -0,0 +1,7 @@ +-- Verify kcls-evergreen:2.10-to-2.12-upgrade on pg + +BEGIN; + +-- XXX Add verifications here. + +ROLLBACK; -- 2.11.0