hack to permit Evergreen bookbag URLs as search-terms in Add Physical Item.
authorgfawcett <gfawcett@6d9bc8c9-1ec2-4278-b937-99fde70a366f>
Sat, 4 Apr 2009 17:54:14 +0000 (17:54 +0000)
committergfawcett <gfawcett@6d9bc8c9-1ec2-4278-b937-99fde70a366f>
Sat, 4 Apr 2009 17:54:14 +0000 (17:54 +0000)
You still have to pick the items one at a time, still: not a batch import yet.

git-svn-id: svn://svn.open-ils.org/ILS-Contrib/servres/trunk@267 6d9bc8c9-1ec2-4278-b937-99fde70a366f

conifer/custom/lib_integration.py
conifer/libsystems/evergreen/item_status.py
conifer/libsystems/z3950/marcxml.py

index c976183..4c155c7 100644 (file)
@@ -75,7 +75,7 @@ def cat_search(query, start=1, limit=10):
     # this is a total hack for conifer. If the query is a Conifer
     # title-detail URL, then return just that one item.
     if query.startswith('http://dwarf'):
-        results = [marcxml_to_dictionary(I.url_to_marcxml(query))]
+        results = marcxml_to_dictionary(I.url_to_marcxml(query), multiples=True)
     else:
         cat_host, cat_db = ('dwarf.cs.uoguelph.ca:2210', 'conifer')
         results = yaz_search.search(cat_host, cat_db, query, start, limit)
index 9f471a1..61d45c3 100644 (file)
@@ -15,12 +15,18 @@ def bib_id_to_marcxml(bib_id):
 def url_to_marcxml(url):
     # this is a hack. Given a opac Title Details url, return marcxml.
     if url.startswith('http://dwarf.cs.uoguelph.ca'):
-        m = re.match(r'.*r=(\d+).*', url)
-        item_id = m and m.group(1) or None
-        if item_id:
-            marc_url = ("http://dwarf.cs.uoguelph.ca/opac/extras"
-                        "/supercat/retrieve/marcxml/record/" + item_id)
-        xml = urllib2.urlopen(marc_url).read()
+        if 'feed/bookbag' in url:
+            #eg http://dwarf.cs.uoguelph.ca/opac/extras/feed/bookbag/marcxml-full/60
+            #http://dwarf.cs.uoguelph.ca/opac/extras/feed/bookbag/html-full/60
+            marc_url = re.sub(r'(.*/bookbag/)(.*?)(/.*)', r'\1marcxml-full\3', url)
+            xml = urllib2.urlopen(marc_url).read()
+        else:
+            m = re.match(r'.*r=(\d+).*', url)
+            item_id = m and m.group(1) or None
+            if item_id:
+                marc_url = ("http://dwarf.cs.uoguelph.ca/opac/extras"
+                            "/supercat/retrieve/marcxml/record/" + item_id)
+            xml = urllib2.urlopen(marc_url).read()
         return xml
 
 if __name__ == '__main__':
index d1f2dff..1a208fb 100644 (file)
@@ -3,19 +3,29 @@ import marctools
 
 loc_to_unicode = marctools.locToUTF8().replace
 
-def marcxml_to_dictionary(rec):
+def marcxml_to_dictionary(rec, multiples=False):
     tree = ElementTree.fromstring(rec)
     if tree.tag == '{http://www.loc.gov/MARC21/slim}collection':
-        # thenwe only look at the first record.
-        tree = tree.find('{http://www.loc.gov/MARC21/slim}record')
-    dct = {}
-    for df in tree.findall('{http://www.loc.gov/MARC21/slim}datafield'):
-        t = df.attrib['tag']
-        for sf in df.findall('{http://www.loc.gov/MARC21/slim}subfield'):
-            c = sf.attrib['code']
-            v = sf.text
-            dct[t+c] = loc_to_unicode(v)
-    return dct
+        # then we may have multiple records
+        records = tree.findall('{http://www.loc.gov/MARC21/slim}record')
+    elif tree.tag == '{http://www.loc.gov/MARC21/slim}record':
+        records = [tree]
+    else:
+        return []
+    out = []
+    for r in records:
+        dct = {}
+        for df in r.findall('{http://www.loc.gov/MARC21/slim}datafield'):
+            t = df.attrib['tag']
+            for sf in df.findall('{http://www.loc.gov/MARC21/slim}subfield'):
+                c = sf.attrib['code']
+                v = sf.text
+                dct[t+c] = loc_to_unicode(v)
+        out.append(dct)
+    if multiples is False:
+        return out and out[0] or None
+    else:
+        return out
 
 def marcxml_dictionary_to_dc(dct):
     """Take a dictionary generated by marcxml_to_dictionary, and