$fieldset = $e->create_action_fieldset($fieldset);
my @keys = keys %$edits;
- $max = scalar(@keys);
+ $max = int(scalar(@keys));
$count = 0;
$client->respond({ ord => $stage, count=> $count, max => $max, stage => 'FIELDSET_EDITS_CREATE' });
for my $key (@keys) {
}
push(@holds, $hold);
- $client->respond({maximum => scalar(@holds), progress => $counter}) if ( (++$counter % $chunk_size) == 0);
+ $client->respond({maximum => int(scalar(@holds)), progress => $counter}) if ( (++$counter % $chunk_size) == 0);
}
if ($e->commit) {
return {count => 0} unless (
$search_hash and
$search_hash->{searches} and
- scalar( keys %{$search_hash->{searches}} ));
+ int(scalar( keys %{$search_hash->{searches}} )));
}
my $search_duration;
}
}
}
- return { ids => \@recs, count => scalar(@recs) };
+ return { ids => \@recs, count => int(scalar(@recs)) };
}
foreach my $issn_method (qw/
# parallel, so we need to cacluate summary values up front.
my %bre_uniq;
$bre_uniq{$_->{bre_id}} = 1 for @$z_searches;
- $response->{bre_count} = scalar(keys %bre_uniq);
+ $response->{bre_count} = int(scalar(keys %bre_uniq));
$response->{search_count} += scalar(@$z_searches);
# let the caller know searches are on their way out
$dbh->pg_putline($line."\n");
}
- return scalar(@fm_nodes);
+ return int(scalar(@fm_nodes));
}
sub copy_create_finish {
copy_create_push( $self => $client => @fm_nodes );
copy_create_finish( $self => $client );
- return scalar(@fm_nodes);
+ return int(scalar(@fm_nodes));
}
sub autoprimary {
$sth->execute();
my @list = $sth->fetchall_hash;
- $client->respond(scalar(@list)); # send the row count first, for progress tracking
+ $client->respond(int(scalar(@list))); # send the row count first, for progress tracking
$client->respond( $_ ) for (@list);
$client->respond_complete;
foreach my $bib (@bibs) {
container::biblio_record_entry_bucket_item->create({ bucket => $bucket, target_biblio_record_entry => $bib, pos => $i++ });
}
- return scalar(@bibs);
+ return int(scalar(@bibs));
}
__PACKAGE__->register_method(