3007 lines
96 KiB
Perl
3007 lines
96 KiB
Perl
# ==================================================================
|
|
# Gossamer Threads Module Library - http://gossamer-threads.com/
|
|
#
|
|
# GT::SQL::Table
|
|
# CVS Info : 087,071,086,086,085
|
|
# $Id: Table.pm,v 1.274 2008/09/17 19:35:24 brewt Exp $
|
|
#
|
|
# Copyright (c) 2004 Gossamer Threads Inc. All Rights Reserved.
|
|
# ==================================================================
|
|
#
|
|
# Description:
|
|
# Class used to store and retrieve data from a table.
|
|
#
|
|
|
|
package GT::SQL::Table;
|
|
# ===============================================================
|
|
use GT::SQL::Condition;
|
|
use GT::SQL::Base;
|
|
use GT::Config;
|
|
use GT::AutoLoader(NAME => '_AUTOLOAD');
|
|
use strict;
|
|
use vars qw/$DEBUG $VERSION @ISA $AUTOLOAD $ERROR_MESSAGE @COL_ATTRIBS/;
|
|
|
|
@ISA = qw/GT::SQL::Base/;
|
|
$DEBUG = 0;
|
|
$VERSION = sprintf "%d.%03d", q$Revision: 1.274 $ =~ /(\d+)\.(\d+)/;
|
|
@COL_ATTRIBS = qw/size type values default not_null pos regex weight form_display form_size form_type form_names form_values time_check/;
|
|
$ERROR_MESSAGE = 'GT::SQL';
|
|
|
|
use constants DEF_HEADER => <<'HEADER';
|
|
# Database definition file for '%TABLE_NAME%' table
|
|
# Last updated: [localtime]
|
|
# Created by GT::SQL::Table $Revision: 1.274 $
|
|
HEADER
|
|
|
|
sub new {
|
|
# -----------------------------------------------------------------------------
|
|
# GT::SQL::Table->new(
|
|
# name => table_name,
|
|
# debug => debug level,
|
|
# _err_pkg => package name,
|
|
# driver => driver name,
|
|
# );
|
|
# -----------------------------------------------------------------------------
|
|
# Constructs (or returns if it already exists) a new GT::SQL::Object with the
|
|
# parameters specified above.
|
|
#
|
|
my $this = shift;
|
|
my $class = ref $this || $this;
|
|
my $self = bless {}, $class;
|
|
|
|
my $opts = $self->common_param(@_) or return $self->fatal(BADARGS => 'Usage: $obj->new(HASH or HASH_REF or CGI) only');
|
|
|
|
$self->{connect} = $opts->{connect} || {};
|
|
$self->{_debug} = $opts->{debug} || $DEBUG;
|
|
$self->{_err_pkg} = $opts->{_err_pkg} || __PACKAGE__;
|
|
$self->{_index} = 0;
|
|
$self->{_file} = 0;
|
|
|
|
# Must have {connect} info first.
|
|
$self->name($opts->{name});
|
|
$self->{name} ||= '';
|
|
if (-f "$self->{connect}->{def_path}/$self->{name}.def" and not $opts->{_schema}) {
|
|
$self->load_state;
|
|
}
|
|
elsif ($opts->{_schema} and UNIVERSAL::isa($opts->{_schema}, 'GT::Config')) {
|
|
# If _schema is passed as a GT::Config object, use it directory. This
|
|
# is primarily used for subclassed tables - see GT::SQL::Base::new_table()
|
|
$self->{schema} = $opts->{_schema};
|
|
}
|
|
else {
|
|
$self->{schema} = { %{$opts->{_schema}} } if $opts->{_schema};
|
|
$self->_new_schema if length $self->{name};
|
|
}
|
|
|
|
# Some defaults for writing to
|
|
$self->{schema}->{index} ||= {};
|
|
$self->{schema}->{unique} ||= {};
|
|
$self->{schema}->{cols} ||= {};
|
|
$self->{schema}->{pk} ||= [];
|
|
$self->{schema}->{fk} ||= {};
|
|
$self->{schema}->{subclass} ||= {};
|
|
$self->{schema}->{ai} ||= '';
|
|
$self->{schema}->{fk_tables} ||= [];
|
|
|
|
{ # Check for weights or file columns and set _file and _index accordingly
|
|
my ($found_file, $found_weight);
|
|
my $c = $self->{schema}->{cols};
|
|
for (keys %$c) {
|
|
if (!$found_file and $c->{$_}->{form_type} and uc $c->{$_}->{form_type} eq 'FILE') {
|
|
$self->_file_cols();
|
|
$self->{_file} = ++$found_file;
|
|
}
|
|
if (!$found_weight and $c->{$_}->{weight}) {
|
|
$self->{_index} = ++$found_weight;
|
|
}
|
|
last if $found_file and $found_weight;
|
|
}
|
|
}
|
|
|
|
$self->debug("Table '$self->{name}' object created.") if ($self->{_debug} > 2);
|
|
return $self;
|
|
}
|
|
|
|
sub DESTROY {}
|
|
|
|
sub AUTOLOAD {
|
|
# -------------------------------------------------------------
|
|
# This method provides get methods for all the cols attributes.
|
|
# It returns a hash reference of the column names to the value
|
|
# of the attribute for that attribute.
|
|
#
|
|
my $self = $_[0];
|
|
my ($pkg, $what) = $AUTOLOAD =~ /^(.*)::([^:]+)$/;
|
|
|
|
# Otherwise we have auto generated functions for each of the
|
|
# column names.
|
|
if (grep { $what eq $_ } @COL_ATTRIBS) {
|
|
no strict 'refs';
|
|
*$AUTOLOAD = sub {
|
|
my $self = shift;
|
|
my $h = {};
|
|
for my $col (keys %{$self->{schema}->{cols}}) {
|
|
if (exists $self->{schema}->{cols}->{$col}->{$what}) {
|
|
$h->{$col} = $self->{schema}->{cols}->{$col}->{$what};
|
|
}
|
|
}
|
|
wantarray ? %$h : $h;
|
|
};
|
|
goto &$AUTOLOAD;
|
|
}
|
|
|
|
# Pass to the imported &_AUTOLOAD, which handles loading from %COMPILE
|
|
goto &_AUTOLOAD;
|
|
}
|
|
|
|
# Loads a new ->{schema} GT::Config object that, when saved, will create the
|
|
# def file. The config object created is always empty, but any existing values
|
|
# in ->{schema} will be copied into the object. Thus, saving will always
|
|
# overwrite anything stored in this table's def file.
|
|
$COMPILE{_new_schema} = __LINE__ . <<'END_OF_SUB';
|
|
sub _new_schema {
|
|
my $self = shift;
|
|
my $name = $self->name;
|
|
(my $header = DEF_HEADER) =~ s/%TABLE_NAME%/$name/g;
|
|
my %old = $self->{schema} ? %{$self->{schema}} : ();
|
|
$self->{schema} = GT::Config->load(
|
|
"$self->{connect}->{def_path}/$name.def" => {
|
|
local => 0,
|
|
empty => 1,
|
|
chmod => 0666,
|
|
debug => $self->{_debug},
|
|
sort_order => sub {
|
|
my ($keya, $keyb, $vala, $valb) = @_;
|
|
if (ref $vala eq 'HASH' and ref $valb eq 'HASH' and exists $vala->{pos} and exists $valb->{pos}) {
|
|
return $vala->{pos} <=> $valb->{pos};
|
|
}
|
|
else {
|
|
return $keya cmp $keyb;
|
|
}
|
|
},
|
|
header => $header
|
|
}
|
|
);
|
|
%{$self->{schema}} = %old;
|
|
$self->{schema};
|
|
}
|
|
END_OF_SUB
|
|
|
|
sub load_state {
|
|
# -----------------------------------------------------------------------------
|
|
# $obj->load_state;
|
|
# -----------------
|
|
# Loads relation structure from def file. If you want to reload the
|
|
# structure currently stored on disk, you should call ->reload or ->reset -
|
|
# this method caches files (via GT::Config).
|
|
#
|
|
my ($self, $reload) = @_;
|
|
my $name = $self->name;
|
|
-e "$self->{connect}->{def_path}/$name.def" or return $self->fatal(FILENOEXISTS => "$self->{connect}->{def_path}/$name.def");
|
|
$self->debug("Loading state for " . $self->name) if $self->{_debug} and $self->{_debug} > 1;
|
|
(my $header = DEF_HEADER) =~ s/%TABLE_NAME%/$name/g;
|
|
$self->{schema} = GT::Config->load(
|
|
"$self->{connect}->{def_path}/$name.def" => {
|
|
cache => !($reload and $reload eq 'reload'),
|
|
chmod => 0666,
|
|
debug => $self->{_debug},
|
|
sort_order => sub {
|
|
my ($keya, $keyb, $vala, $valb) = @_;
|
|
if (ref $vala eq 'HASH' and ref $valb eq 'HASH' and exists $vala->{pos} and exists $valb->{pos}) {
|
|
return $vala->{pos} <=> $valb->{pos};
|
|
}
|
|
else {
|
|
return $keya cmp $keyb;
|
|
}
|
|
},
|
|
header => $header
|
|
}
|
|
);
|
|
$self->{driver}->{schema} = $self->{schema} if $self->{driver} and exists $self->{driver}->{schema};
|
|
$self->debug("State loaded for " . $self->name) if $self->{_debug} and $self->{_debug} > 1;
|
|
return 1;
|
|
}
|
|
|
|
$COMPILE{reload} = __LINE__ . <<'END_OF_SUB';
|
|
sub reload {
|
|
# -----------------------------------------------------------------------------
|
|
# $obj->reload;
|
|
# -------------
|
|
shift->load_state('reload');
|
|
}
|
|
END_OF_SUB
|
|
|
|
sub reset {
|
|
# -----------------------------------------------------------------------------
|
|
# Works just like reload, except it always returns false, allowing for a
|
|
# shortcut such as:
|
|
#
|
|
# $code->that_changes($table) or return $table->reset;
|
|
#
|
|
shift->load_state('reload');
|
|
return;
|
|
}
|
|
|
|
# -------------------------------------------------------------------------------------- #
|
|
# SQL OPERATIONS #
|
|
# -------------------------------------------------------------------------------------- #
|
|
|
|
sub add {
|
|
# -----------------------------------------------------------
|
|
# add()
|
|
# IN : hash/hash_ref/cgi/GT::CGI of col => val pairs to add.
|
|
# OUT: ID number if auto_incremented table, or undef if failure
|
|
#
|
|
my $self = shift;
|
|
my $input = $self->common_param(@_) or return $self->fatal(BADARGS => 'Usage: $obj->add(HASH or HASH_REF or CGI) only.');
|
|
$input = {%$input};
|
|
my $table = $self->name or return $self->fatal('NOTABLE');
|
|
|
|
my $c = $self->{schema}->{cols};
|
|
my $ai = $self->{schema}->{ai};
|
|
my $err = 0;
|
|
|
|
my %skip_check = (
|
|
$ai => 1
|
|
);
|
|
|
|
if ($self->{schema}->{tree}) {
|
|
my $tree = $self->tree;
|
|
$skip_check{$tree->father_id_col}++;
|
|
$skip_check{$tree->root_id_col}++;
|
|
$skip_check{$tree->depth_col}++;
|
|
}
|
|
|
|
# Clear errors.
|
|
$self->{_error} = [];
|
|
|
|
for my $col (keys %$c) {
|
|
my $default = $c->{$col}->{default};
|
|
next if $skip_check{$col};
|
|
my $set = defined $input->{$col} && $input->{$col} =~ /\S/;
|
|
|
|
# The following code is a little inconsistent (not_null sometimes means a value
|
|
# is required, sometimes it doesn't) because it needs to be backwards
|
|
# compatible. Changing this behaviour will break a lot of code.
|
|
unless ($set) {
|
|
if ($c->{$col}->{type} =~ /^(?:.*INT|INTEGER|FLOAT|REAL|DOUBLE|DECIMAL)$/) {
|
|
# If we have a default, use it, otherwise set it to undef so that it will get
|
|
# inserted as NULL or return a NOTNULL error (see _check_value()).
|
|
if (defined $default and length $default) {
|
|
delete $input->{$col};
|
|
}
|
|
else {
|
|
$input->{$col} = undef;
|
|
}
|
|
}
|
|
elsif ($c->{$col}->{type} =~ /^(?:CHAR|VARCHAR|.*TEXT)$/) {
|
|
# The only cases where the default is used is when we have a default and it
|
|
# hasn't been passed into add(). Otherwise, set the column to undef (to catch
|
|
# NOTNULL) or an empty string, depending on the not_null setting of the column.
|
|
if (!exists $input->{$col} and defined $default and length $default) {
|
|
delete $input->{$col};
|
|
}
|
|
else {
|
|
$input->{$col} = $c->{$col}->{not_null} ? undef : '';
|
|
}
|
|
}
|
|
# For all other column types just do what <=r1.256 did, except set the value to
|
|
# undef so _check_value() catches the NOTNULL instead of triggering it here so
|
|
# we don't get duplicate errors.
|
|
elsif ($c->{$col}->{not_null} and not (!exists $input->{$col} and defined $default and length $default)) {
|
|
$input->{$col} = undef;
|
|
}
|
|
}
|
|
}
|
|
if ($err and ref $self->{_error} and @{$self->{_error}}) {
|
|
$GT::SQL::error = join "\n", @{$self->{_error}};
|
|
return;
|
|
}
|
|
my $sth = $self->insert($input);
|
|
return $sth ? $ai ? $sth->insert_id : 1 : undef;
|
|
}
|
|
|
|
sub insert {
|
|
# -----------------------------------------------------------
|
|
# $obj->insert(key1 => $value1, key2 => $value2);
|
|
# ------------------------------------------------
|
|
# Key values pairs that correspond to the row you are
|
|
# inserting.
|
|
#
|
|
# $obj->insert(\%row);
|
|
# ---------------------
|
|
# A hash that contains key value pairs that corespond to
|
|
# the row you are inserting.
|
|
#
|
|
my $self = shift;
|
|
my $opts = $self->common_param(@_) or return $self->fatal(BADARGS => 'Usage: $obj->insert(HASH or HASH_REF) only.');
|
|
my $table = $self->name or return $self->fatal('NOTABLE');
|
|
|
|
# Connect to the database if we are not already connected
|
|
$self->connect or return;
|
|
|
|
# Clear errors.
|
|
$self->{_error} = [];
|
|
|
|
# Make sure we have some data.
|
|
keys %$opts or return $self->warn(NOVALUES => "insert()");
|
|
|
|
# Copy the data and remove anything that doesn't make sense here.
|
|
my $c = $self->{schema}->{cols};
|
|
my %set = map { exists $opts->{$_} ? ($_ => $opts->{$_}) : () } keys %$c;
|
|
|
|
# Check for file uploads.
|
|
my ($fset, %fcols);
|
|
if ($self->{_file} and !$opts->{GT_SQL_SKIP_FILE} and %fcols = $self->_file_cols()) {
|
|
require GT::SQL::File;
|
|
$fset = GT::SQL::File->pre_file_actions(\%fcols, \%set, $opts) or return;
|
|
}
|
|
|
|
my $tree;
|
|
if ($self->{schema}->{tree}) {
|
|
$tree = $self->tree;
|
|
my $f = $tree->father_id_col;
|
|
my $r = $tree->root_id_col;
|
|
my $d = $tree->depth_col;
|
|
if ($set{$f}) {
|
|
my $pk = $self->{schema}->{pk}->[0];
|
|
my ($root, $depth) = $self->select($r, $d, { $pk => $set{$f} })->fetchrow;
|
|
$set{$r} = $root || $set{$f};
|
|
$set{$d} = $depth + 1;
|
|
}
|
|
else {
|
|
$set{$f} = $set{$r} = $set{$d} = 0; # A root record
|
|
}
|
|
}
|
|
|
|
unless ($opts->{GT_SQL_SKIP_CHECK}) {
|
|
$self->_check_insert(\%set) or return;
|
|
}
|
|
$self->{last_insert} = \%set;
|
|
|
|
# Weighted indexing needs special handling
|
|
my $tmp_weight;
|
|
if ($self->{_index} and !$opts->{GT_SQL_SKIP_INDEX} and $self->_weight_cols) {
|
|
$tmp_weight = $self->_get_indexer->pre_add_record( $self->{last_insert} ) or return;
|
|
}
|
|
|
|
my $sth = $self->{driver}->insert(\%set) or return;
|
|
|
|
# If we have files, let's save them.
|
|
if ($self->{_file} and !$opts->{GT_SQL_SKIP_FILE}) {
|
|
if ((my @pk = $self->pk()) == 1 and keys %fcols) {
|
|
my $key = $self->ai() ? $sth->insert_id : $set{$pk[0]};
|
|
require GT::SQL::File;
|
|
my $tbl = GT::SQL::File->new({ parent_table => $self, connect => $self->{connect} }) or return;
|
|
$tbl->add_file({ %set, %$fset }, $key) or return;
|
|
}
|
|
}
|
|
|
|
# Finish off special handling for weighted indexing
|
|
if ($self->{_index} and !$opts->{GT_SQL_SKIP_INDEX} and $self->_weight_cols) {
|
|
$self->_get_indexer->post_add_record( $self->{last_insert}, $sth, $tmp_weight ) or return;
|
|
}
|
|
|
|
# If a tree exists, insert any new entries required
|
|
if ($self->{schema}->{tree}) {
|
|
$tree->insert(insert_id => $sth->insert_id, data => \%set);
|
|
}
|
|
|
|
return $sth;
|
|
}
|
|
|
|
$COMPILE{insert_multiple} = __LINE__ . <<'END_OF_SUB';
|
|
sub insert_multiple {
|
|
# -----------------------------------------------------------
|
|
# $obj->insert_multiple(['key1', 'key2', 'key3'], [$value1_1, $value1_2, $value1_3], [$value2_1, $value2_2, $value2_3], ...);
|
|
# ------------------------------------------------
|
|
# The first array ref is the columns, and all following array refs are the
|
|
# values to be inserted.
|
|
#
|
|
# This method doesn't mess around - it doesn't check to make sure all the
|
|
# columns you entered exist, nor does it do foreign key checks, nor does it
|
|
# handle raw SQL values via scalar references (it does, however, support
|
|
# undef as NULL). Currently, it does not support file columns or columns
|
|
# indexed by GT::SQL's 'INTERNAL' indexer.
|
|
#
|
|
# Returned is the number of _queries_ successfully executed, or undef if no
|
|
# queries were executed successfully. Note that the number of queries is not
|
|
# necessarily the same as the number of rows insert - in particular, several
|
|
# rows may be inserted in a single query in some databases (currently,
|
|
# MySQL).
|
|
#
|
|
my ($self, $cols, @values) = @_;
|
|
$cols or return $self->fatal(BADARGS => 'Usage: $obj->insert_multiple(ARRAY_REF, ARRAY_REF, ...) only');
|
|
|
|
my $table = $self->name or return $self->fatal('NOTABLE');
|
|
|
|
$self->{schema}->{tree} and return $self->fatal(TREENOCANDO => 'insert_multiple', $table);
|
|
|
|
# Connect to the database if we are not already connected
|
|
$self->connect or return;
|
|
|
|
# Clear errors.
|
|
$self->{_error} = [];
|
|
|
|
# Make sure we have some data, and the right number for each insert.
|
|
@values or return $self->warn(NOVALUES => "insert()");
|
|
for my $val (@values) {
|
|
if (@$val != @$cols) {
|
|
return $self->fatal(BADMULTVALUES => 'insert_multiple()');
|
|
}
|
|
}
|
|
|
|
my $c = $self->{schema}->{cols};
|
|
for (my $i = 0; $i < @$cols; $i++) {
|
|
unless (exists $c->{$cols->[$i]}) {
|
|
splice @$cols, $i, 1;
|
|
for my $val (@values) {
|
|
splice @$val, $i, 1;
|
|
}
|
|
--$i;
|
|
}
|
|
}
|
|
|
|
# Query is executed inside to handle ai fields.
|
|
$self->{driver}->insert_multiple($cols, \@values) or return;
|
|
}
|
|
END_OF_SUB
|
|
|
|
$COMPILE{modify} = __LINE__ . <<'END_OF_SUB';
|
|
sub modify {
|
|
# -----------------------------------------------------------
|
|
# modify()
|
|
# IN : hash/hash_ref/cgi/GT::CGI of col => val pairs to change.
|
|
# OUT: 1 on success, undef on failure.
|
|
#
|
|
my $self = shift;
|
|
my $input = $self->common_param(@_) or return $self->fatal(BADARGS => 'Usage: $obj->modify(HASH or HASH_REF or CGI) only.');
|
|
$input = {%$input};
|
|
my $table = $self->name or return $self->fatal('NOTABLE');
|
|
|
|
# Connect to the database if we are not already connected
|
|
$self->connect or return;
|
|
|
|
# Clear errors.
|
|
$self->{_error} = [];
|
|
|
|
my $err;
|
|
|
|
# Remove primary keys from update clause and make sure we have a primary key.
|
|
my $where;
|
|
for my $key (@{$self->{schema}->{pk}}) {
|
|
$where->{$key} = delete $input->{$key} if exists $input->{$key};
|
|
}
|
|
unless (keys %{$where} == @{$self->{schema}->{pk}}) {
|
|
$self->warn('NOPKTOMOD');
|
|
$err++;
|
|
}
|
|
|
|
# Check to see if the record has been updated since the original record was retrieved.
|
|
$err++ unless $self->_check_timestamp($where, $input);
|
|
|
|
# If we caught any errors, return.
|
|
if ($err and ref $self->{_error} and @{$self->{_error}}) {
|
|
$GT::SQL::error = join "\n", @{$self->{_error}};
|
|
return;
|
|
}
|
|
|
|
my $cols = $self->{schema}->{cols};
|
|
for my $col (keys %$cols) {
|
|
|
|
# update() will handle not null checks
|
|
next unless exists $input->{$col};
|
|
|
|
# Don't allow modification of timestamps
|
|
if ($cols->{$col}->{type} eq 'TIMESTAMP') {
|
|
delete $input->{$col};
|
|
}
|
|
# Treat numeric and date columns set to empty strings as NULL (the update()
|
|
# will catch NOT NULL errors). Do this with date columns because '' is not a
|
|
# valid date.
|
|
elsif ($cols->{$col}->{type} =~ /^(?:.*INT|INTEGER|FLOAT|REAL|DOUBLE|DECIMAL|DATE|TIME|DATETIME)$/ and defined $input->{$col} and $input->{$col} eq '') {
|
|
$input->{$col} = undef;
|
|
}
|
|
# For add and modify, empty strings are considered as NULL, so set these values
|
|
# to undef so it triggers a NOT NULL error during the update().
|
|
elsif ($cols->{$col}->{not_null} and not (defined $input->{$col} and length $input->{$col})) {
|
|
$input->{$col} = undef;
|
|
}
|
|
}
|
|
|
|
# Execute the update
|
|
$self->update($input, $where) or return;
|
|
return 1;
|
|
}
|
|
END_OF_SUB
|
|
|
|
sub update {
|
|
# -----------------------------------------------------------
|
|
# $obj->update($hash_ref, $condition, $opts);
|
|
# -------------------------------------
|
|
# $condition is a Condition or a
|
|
# hash reference.
|
|
#
|
|
# $obj->update($hash_ref_1, $hash_ref_2, $opts);
|
|
# ----------------------------------------
|
|
# Hash1 is what needs to be changed.
|
|
# Hash2 is the condition.
|
|
#
|
|
my $self = shift;
|
|
my ($set, $where, $opts) = @_;
|
|
ref $set eq 'HASH' or return $self->fatal(BADARGS => 'Usage: $obj->update(HASH_REF, CONDITION_OBJ or HASH_REF, HASH_REF)');
|
|
keys %$set or return $self->fatal(BADARGS => 'update called with nothing to set!');
|
|
$self->name or return $self->fatal('NOTABLE');
|
|
|
|
# Connect to the database if we are not already connected
|
|
$self->connect or return;
|
|
|
|
# Clear errors.
|
|
$self->{_error} = [];
|
|
|
|
# Check to make sure the update is possible
|
|
$opts ||= {};
|
|
$where ||= {}; # Update all.
|
|
|
|
my $where_cond = $self->_build_cond($where);
|
|
|
|
# Check to see if we have files to update.
|
|
my ($fset, %fcols);
|
|
if ($self->{_file} and !$opts->{GT_SQL_SKIP_FILE} and %fcols = $self->_file_cols()) {
|
|
my @pk = $self->pk();
|
|
if (@pk == 1) {
|
|
my @ids = $self->select($pk[0], $where_cond)->fetchall_list();
|
|
|
|
require GT::SQL::File;
|
|
my $file = GT::SQL::File->new({ parent_table => $self, connect => $self->{connect} });
|
|
$fset = $file->pre_file_actions(\%fcols, $set, $opts, \@ids) or return;
|
|
|
|
if (not keys %$set and not keys %$fset) {
|
|
return $self->warn(BADARGS => "update called with nothing to set!");
|
|
}
|
|
}
|
|
else {
|
|
for my $col (keys %fcols) {
|
|
delete $set->{$col};
|
|
delete $set->{"${col}_del"};
|
|
delete $set->{"${col}_filename"};
|
|
}
|
|
}
|
|
}
|
|
|
|
# If there is a tree, and the father_id is being updated, call the appropriate tree method.
|
|
my $tree_data;
|
|
if ($self->{schema}->{tree}) {
|
|
my $tree = $self->tree;
|
|
if (exists $set->{$tree->father_id_col}) {
|
|
$tree_data = $tree->pre_update(where => $where_cond, data => $set) or return;
|
|
}
|
|
}
|
|
|
|
# Remove any invalid columns from the set (_build_set also does this, but
|
|
# _check_update uses $set)
|
|
for my $key (keys %$set) {
|
|
delete $set->{$key} unless exists $self->{schema}->{cols}->{$key};
|
|
}
|
|
|
|
# Validate data.
|
|
unless ($opts->{GT_SQL_SKIP_CHECK}) {
|
|
$self->_check_update($set, $where) or return;
|
|
}
|
|
my $set_cond = $self->_build_set($set);
|
|
|
|
# If we are updating this tables primary key, then get the original
|
|
# value and save it for after the update.
|
|
my $pk = $self->{schema}->{pk};
|
|
my $where_r = $where_cond->as_hash;
|
|
my @update_pk;
|
|
for (@$pk) {
|
|
if (defined $set->{$_} and defined $where_r->{$_} and $set->{$_} ne $where_r->{$_}) {
|
|
push @update_pk, $_;
|
|
}
|
|
}
|
|
|
|
# Update the search index if changing a weighted column.
|
|
my $tmp_weights = {};
|
|
my %wcols;
|
|
if ($self->{_index} and ! $opts->{GT_SQL_SKIP_INDEX}) {
|
|
%wcols = $self->_weight_cols;
|
|
for my $col (keys %wcols) {
|
|
if ($wcols{$col} and exists $set->{$col}) {
|
|
$tmp_weights = $self->_get_indexer->pre_update_record( $set_cond, $where_cond ) or return;
|
|
last;
|
|
}
|
|
}
|
|
}
|
|
|
|
$self->{sel_opts} ||= [];
|
|
|
|
# Save the where clause.
|
|
$self->{last_where} = $where_cond;
|
|
|
|
# Perform the update.
|
|
my $sth = $self->{driver}->update($set_cond, $where_cond) or return;
|
|
|
|
# The query was successful, so now if there is a tree, call the tree's update method
|
|
if ($tree_data) {
|
|
$self->tree->update($tree_data);
|
|
}
|
|
|
|
# Update the foreign keys of other tables if this tables primary key changed.
|
|
for my $key (@update_pk) {
|
|
for my $table (@{$self->{schema}->{fk_tables}}) {
|
|
my $new_me = $self->new_table($table) or return $self->fatal(FKNOTABLE => $table, $GT::SQL::error);
|
|
my $fk_hash = $new_me->{schema}->{fk}->{$self->name} or next;
|
|
for my $my_col (keys %$fk_hash) {
|
|
if ($fk_hash->{$my_col} eq $key) {
|
|
$new_me->update({ $my_col => $set->{$key} }, { $my_col => $where_r->{$key} });
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
# Update any file changes.
|
|
if (keys %fcols and $self->{_file} and !$opts->{GT_SQL_SKIP_FILE}) {
|
|
require GT::SQL::File;
|
|
my $File = GT::SQL::File->new({ parent_table => $self, connect => $self->{connect} });
|
|
$File->update_records({ %$set, %$fset }, $where_cond) or return;
|
|
}
|
|
|
|
# Update the search index if required.
|
|
if ($self->{_index} and !$opts->{GT_SQL_SKIP_INDEX}) {
|
|
%wcols = $self->_weight_cols;
|
|
for my $col (keys %wcols) {
|
|
if ($wcols{$col} and exists $set->{$col}) {
|
|
$self->_get_indexer->post_update_record( $set_cond, $where_cond, $tmp_weights ) or return;
|
|
last;
|
|
}
|
|
}
|
|
}
|
|
return $sth;
|
|
}
|
|
|
|
sub delete {
|
|
# -----------------------------------------------------------
|
|
# $obj->delete($condition);
|
|
# --------------------------
|
|
# $condition is a Condition or a
|
|
# hash reference.
|
|
#
|
|
# $obj->delete($val);
|
|
# ----------------------
|
|
# Deletes a single record based on the scalar value being the
|
|
# primary key.
|
|
#
|
|
# $obj->delete([$val1, $val2]);
|
|
# --------------------------------
|
|
# If you have a composite primary key, deletes a single record
|
|
# based on the values being the primary keys.
|
|
#
|
|
# NOTE: use delete_all to delete everything
|
|
#
|
|
my $self = shift;
|
|
@_ > 0 or return $self->fatal(BADARGS => "You must call delete_all to delete all entries");
|
|
$self->name or return $self->fatal('NOTABLE');
|
|
|
|
# Connect to the database if we are not already connected
|
|
$self->connect or return;
|
|
# Clear errors.
|
|
$self->{_error} = [];
|
|
|
|
my ($opt, $cond, $where, $do_select, %del, @rows);
|
|
|
|
# Determine what sort of delete to do.
|
|
unless (@_ == 1) {
|
|
for my $i (0 .. $#_) {
|
|
$_ = $_[$i];
|
|
/^abort$/ and do { $opt = splice(@_, $i, 1); last };
|
|
/^cascade$/ and do { $opt = splice(@_, $i, 1); last };
|
|
/^ignore$/ and do { $opt = splice(@_, $i, 1); last };
|
|
/^cleanup$/ and do { $opt = splice(@_, $i, 1); last };
|
|
}
|
|
}
|
|
|
|
# Get the where clause we are going to use to do the delete. This can be
|
|
# either from a a scalar/array reference representing the primary key, or a
|
|
# condition/hash reference representing a where clause.
|
|
if ( ((ref $_[0] eq 'ARRAY') or (not ref $_[0])) and (@_ == 1) ) {
|
|
my @keys = @{$self->{schema}->{pk}};
|
|
my @vals = ref $_[0] ? @{shift()} : shift();
|
|
my $href = {};
|
|
if (@keys != @vals) {
|
|
return $self->fatal(BADARGS => "Your primary key is made of " . @keys . " elements, but you passed in " . @vals . " elements.");
|
|
}
|
|
while (@vals) {
|
|
$href->{shift(@keys)} = shift(@vals);
|
|
}
|
|
(keys %{$href}) or return $self->fatal(BADARGS => 'Usage: $obj->delete(CONDITION_OBJ or PRIMARY_KEY or [PRIMARY_KEY1, PRIMARY_KEY2])');
|
|
$where = $self->_build_cond($href);
|
|
}
|
|
else {
|
|
($where, $do_select) = _extract_where(@_);
|
|
}
|
|
|
|
# Make sure $where is not empty.
|
|
if (! $where->sql) {
|
|
return $self->fatal(BADARGS => "Could not create a condition object out of arguments.");
|
|
}
|
|
|
|
# Save the where clause.
|
|
$self->{last_where} = $where;
|
|
$opt ||= 'cascade';
|
|
|
|
# Do a 'cascade' or 'abort' delete.
|
|
if ($opt ne 'ignore' and $opt ne 'cleanup' and @{$self->fk_tables}) {
|
|
my $sth;
|
|
# If they passed in a complex condition we select
|
|
if ($do_select) {
|
|
$sth = $self->select($where);
|
|
}
|
|
# If the hash that was passed in does not contain the foreign keys we select
|
|
elsif (not $self->_check_keys($where)) {
|
|
$sth = $self->select($where);
|
|
}
|
|
|
|
if ($sth) {
|
|
$self->_delete_select($sth, $opt) or return
|
|
}
|
|
else {
|
|
$self->_delete_cond($where, $opt) or return
|
|
}
|
|
}
|
|
|
|
# now handle the indexes if that's required
|
|
my $tmp_weights = {};
|
|
if ($self->{_index} and $self->_weight_cols) {
|
|
$tmp_weights = $self->_get_indexer()->pre_delete_record( $where ) or return;
|
|
}
|
|
|
|
# delete anything related to tables
|
|
if ($self->{_file} and $self->_file_cols() ) {
|
|
require GT::SQL::File;
|
|
my $file = GT::SQL::File->new({ parent_table => $self, connect => $self->{connect} });
|
|
$file->delete_records( $where );
|
|
}
|
|
|
|
# For many to one fk relations
|
|
my $rows;
|
|
if ($opt eq 'cleanup') {
|
|
defined($rows = $self->_delete_cleanup($where)) or return;
|
|
}
|
|
else {
|
|
# Get the SQL.
|
|
my $sth = $self->{driver}->delete($where) or return;
|
|
$rows = $sth->rows;
|
|
}
|
|
|
|
if ($self->{_index} and $self->_weight_cols) {
|
|
$self->_get_indexer()->post_delete_record( $where, $tmp_weights ) or return;
|
|
}
|
|
|
|
defined $rows or return;
|
|
return ($rows == 0) ? "0E0" : $rows;
|
|
}
|
|
|
|
$COMPILE{delete_all} = __LINE__ . <<'END_OF_SUB';
|
|
sub delete_all {
|
|
# -----------------------------------------------------------
|
|
# $obj->delete_all;
|
|
# -----------------
|
|
# Deletes all the records in the current table.
|
|
#
|
|
my ($self, $opt, $done) = @_; # $done is used internally
|
|
$opt ||= 'cascade';
|
|
my $name = $self->name or return $self->fatal('NOTABLE');
|
|
$done ||= { $name => 1 };
|
|
|
|
# Connect to the database if we are not already connected
|
|
$self->connect or return;
|
|
|
|
# Clear errors.
|
|
$self->{_error} = [];
|
|
|
|
# Do the cascading delete.
|
|
for my $fktable (@{$self->fk_tables}) {
|
|
next if $done->{$fktable}++;
|
|
my $new_me = $self->new_table($fktable) or return $self->fatal(FKNOTABLE => $fktable, $GT::SQL::error);
|
|
if ($opt eq 'cascade') {
|
|
$done->{$fktable}++;
|
|
$new_me->delete_all($opt, $done) or return;
|
|
}
|
|
else {
|
|
$new_me->count and return $self->warn(DEPENDENCY => $fktable);
|
|
}
|
|
}
|
|
my $tmp_weights = {};
|
|
if ($self->_weight_cols()) { $tmp_weights = $self->_get_indexer()->pre_delete_all_records() or return }
|
|
my $sth = $self->{driver}->delete() or return;
|
|
if ($self->_weight_cols()) { $self->_get_indexer()->post_delete_all_records($tmp_weights) or return }
|
|
|
|
$sth;
|
|
}
|
|
END_OF_SUB
|
|
|
|
$COMPILE{query} = __LINE__ . <<'END_OF_SUB';
|
|
sub query {
|
|
# -------------------------------------------------------------------
|
|
# Just performs the query and returns a fetchall.
|
|
#
|
|
return shift->_query(@_)->fetchall_arrayref;
|
|
}
|
|
END_OF_SUB
|
|
|
|
$COMPILE{query_sth} = __LINE__ . <<'END_OF_SUB';
|
|
sub query_sth {
|
|
# -------------------------------------------------------------------
|
|
# Just performs the query and returns an active sth.
|
|
#
|
|
return shift->_query(@_);
|
|
}
|
|
END_OF_SUB
|
|
|
|
$COMPILE{_query} = __LINE__ . <<'END_OF_SUB';
|
|
sub _query {
|
|
# -------------------------------------------------------------------
|
|
# Parses the input, and runs a select based on input.
|
|
#
|
|
my $self = shift;
|
|
my $opts = $self->common_param(@_) or return $self->fatal(BADARGS => 'Usage: $obj->insert(HASH or HASH_REF or CGI) only.');
|
|
$self->name or return $self->fatal('NOTABLE');
|
|
# Clear errors.
|
|
$self->{_error} = [];
|
|
|
|
# Strip out values that are empty or blank (as query is generally derived from
|
|
# cgi input).
|
|
my %input = map { $_ => $opts->{$_} } grep { defined $opts->{$_} and $opts->{$_} !~ /^\s*$/ } keys %$opts;
|
|
$opts = \%input;
|
|
|
|
# If build_query_cond returns a GT::SQL::Search object, then we are done.
|
|
my $cond = $self->build_query_cond($opts, $self->{schema}->{cols});
|
|
|
|
if ( ( ref $cond ) =~ /(?:DBI::st|::STH)$/i ) {
|
|
return $cond;
|
|
}
|
|
|
|
# If we have a callback, then we get all the results as a hash, send them
|
|
# to the callback, and then do the regular query on the remaining set.
|
|
if (defined $opts->{callback} and (ref $opts->{callback} eq 'CODE')) {
|
|
my $pk = $self->{schema}->{pk}->[0];
|
|
my $sth = $self->select($pk, $cond) or return;
|
|
my %res = map { $_ => 1 } $sth->fetchall_list;
|
|
my $new_results = $opts->{callback}->($self, \%res);
|
|
$cond = GT::SQL::Condition->new($pk, 'IN', [keys %$new_results]);
|
|
}
|
|
|
|
# Set the limit clause, defaults to 25, set to -1 for none.
|
|
my $in = $self->_get_search_opts($opts);
|
|
my $offset = ($in->{nh} - 1) * $in->{mh};
|
|
$self->select_options("ORDER BY $in->{sb} $in->{so}") if ($in->{sb});
|
|
$self->select_options("LIMIT $in->{mh} OFFSET $offset") unless $in->{mh} == -1;
|
|
|
|
# Now do the select.
|
|
my @sel = ();
|
|
if ($cond) { push @sel, $cond }
|
|
if ($opts->{rs} and $cond) { push @sel, $opts->{rs} }
|
|
my $sth = $self->select(@sel) or return;
|
|
|
|
return $sth;
|
|
}
|
|
END_OF_SUB
|
|
|
|
sub select_options {
|
|
# -----------------------------------------------------------
|
|
# $obj->select_options(@options);
|
|
# --------------------------------
|
|
# @options should be a list of options you want append to your search.
|
|
# Select options will be used for delete, and select.
|
|
#
|
|
my $self = shift;
|
|
push @{$self->{sel_opts}}, @_ if @_;
|
|
wantarray ? @{$self->{sel_opts}} : $self->{sel_opts};
|
|
}
|
|
|
|
sub select {
|
|
# -----------------------------------------------------------
|
|
# $obj->select;
|
|
# -------------
|
|
# returns all rows from that relation (no where condition).
|
|
#
|
|
# $obj->select($condition, \@select_returns);
|
|
# --------------------------------------------
|
|
# $condition is a Condition or a hash reference.
|
|
#
|
|
# $obj->select(\%columns, \@select_returns);
|
|
# -------------------------------------------
|
|
# $col1 = $val1, $col2 = $val2
|
|
#
|
|
# @select_returns is a list of the fields that you wish returned. If none are
|
|
# specified all fields will be returned.
|
|
#
|
|
my $self = shift;
|
|
my $sel_opts = $self->{sel_opts} || [];
|
|
$self->{sel_opts} = [];
|
|
$self->name or return $self->fatal('NOTABLE');
|
|
|
|
# Connect to the database if we are not already connected
|
|
$self->connect or return;
|
|
|
|
# Get the list of select fields.
|
|
my (@fields);
|
|
for (@_) {
|
|
if (ref $_ eq 'ARRAY') { push @fields, @{$_} }
|
|
elsif (not ref $_) { push @fields, $_ }
|
|
}
|
|
@fields = grep defined && length, @fields;
|
|
# Extract the where clause and save it for future.
|
|
my ($where, $do_select) = _extract_where(@_);
|
|
$self->{last_where} = $where;
|
|
|
|
# Perform the select
|
|
my $sth = $self->{driver}->select(\@fields, $where, $sel_opts) or return;
|
|
|
|
$self->{last_hits} = undef;
|
|
my $rows = $sth->rows;
|
|
|
|
# Attempt to optimize a possible later call to hits(). If there was no limit,
|
|
# it's the number of rows. If there was a limit, and the rows returned was
|
|
# less than the limit (but still greater than 0), we can calculate it.
|
|
$sel_opts = join " ", @$sel_opts;
|
|
if ($sel_opts =~ /\bLIMIT\s+(\d+)(?:\s+OFFSET\s+(\d+)|\s*,\s*(\d+))?|\bOFFSET\s+(\d+)\s+LIMIT\s+(\d+)/i) {
|
|
my ($limit, $offset);
|
|
if (defined $3) { # MySQL-style, with an offset
|
|
($offset, $limit) = ($1, $3);
|
|
}
|
|
elsif (defined $4) { # Pg-style with OFFSET before LIMIT
|
|
($offset, $limit) = ($4, $5);
|
|
}
|
|
else {
|
|
($limit, $offset) = ($1, $2 || 0);
|
|
}
|
|
if ($rows > 0 and $rows < $limit) {
|
|
$self->{last_hits} = $offset + $rows;
|
|
}
|
|
}
|
|
else {
|
|
$self->{last_hits} = $rows;
|
|
}
|
|
return $sth;
|
|
}
|
|
|
|
$COMPILE{get} = __LINE__ . <<'END_OF_SUB';
|
|
sub get {
|
|
# -----------------------------------------------------------
|
|
# get()
|
|
# IN : primary key and format options, and fields wanted.
|
|
# OUT: array_ref/hash_ref on success, undef on failure.
|
|
#
|
|
my $self = shift;
|
|
|
|
# Connect to the database if we are not already connected
|
|
$self->connect or return;
|
|
|
|
my (@keys, @pk, @sel, $cond, $method, $format, $cols);
|
|
$self->name or return $self->fatal('NOTABLE');
|
|
$cond = GT::SQL::Condition->new;
|
|
|
|
if (@_ == 0) { return $self->fatal(BADARGS => 'Usage: $obj->get(HASH or HASH_REF or CGI_OBJ)') }
|
|
elsif (ref $_[0] eq 'HASH') {
|
|
my $href = shift;
|
|
for (keys %{$href}) {
|
|
$cond->add($_, '=', $href->{$_});
|
|
}
|
|
}
|
|
else {
|
|
@keys = ref $_[0] eq 'ARRAY' ? @{shift()} : (shift);
|
|
@pk = @{$self->{schema}->{pk}};
|
|
while (@keys) {
|
|
$cond->add(shift(@pk), '=', shift(@keys));
|
|
}
|
|
}
|
|
|
|
$format = uc shift || 'HASH';
|
|
$cols = shift || [];
|
|
$method = $format eq 'ARRAY' ? 'fetchrow_arrayref' : 'fetchrow_hashref';
|
|
my $sth = $self->select($cond, $cols);
|
|
if ($sth) {
|
|
return $sth->$method();
|
|
}
|
|
else {
|
|
return;
|
|
}
|
|
}
|
|
END_OF_SUB
|
|
|
|
sub do_query {
|
|
# -----------------------------------------------------------
|
|
# $obj->do_query($query)
|
|
# $obj->do_query($query, \@args);
|
|
# ------------------------
|
|
# Performs SQL $query and returns a
|
|
# Query object as the result of this query.
|
|
#
|
|
my ($self, $query, $args) = @_;
|
|
|
|
$self->connect or return;
|
|
$query = $self unless (ref $self || $query);
|
|
|
|
# Show the query if debug is on.
|
|
$self->debug("Query: $query\n") if $self->{_debug} > 1;
|
|
|
|
# Do the query.
|
|
my $sth = $self->{driver}->prepare($query) or return;
|
|
if ($args and ref $args eq 'ARRAY') {
|
|
$sth->execute(@$args) or return;
|
|
}
|
|
else {
|
|
$sth->execute or return;
|
|
}
|
|
$self->{sel_opts} = [];
|
|
return $sth;
|
|
}
|
|
|
|
$COMPILE{do} = __LINE__ . <<'END_OF_SUB';
|
|
sub do {
|
|
my $self = shift;
|
|
return $self->do_query(@_);
|
|
}
|
|
END_OF_SUB
|
|
|
|
$COMPILE{reindex} = __LINE__ . <<'END_OF_SUB';
|
|
sub reindex {
|
|
# -----------------------------------------------------------
|
|
# $obj->reindex()
|
|
# -----------------------------------
|
|
# Reindexes the database if required
|
|
#
|
|
my $self = shift;
|
|
my $opts = shift;
|
|
|
|
$self->connect or return;
|
|
my $Indexer = $self->_get_indexer();
|
|
$Indexer->reindex_all( $self, $opts );
|
|
}
|
|
END_OF_SUB
|
|
|
|
$COMPILE{indexing} = __LINE__ . <<'END_OF_SUB';
|
|
sub indexing {
|
|
# -----------------------------------------------------------
|
|
# $obj->indexing(0/1);
|
|
# --------------------
|
|
# Enables/Disables indexing, spans life of object.
|
|
#
|
|
@_ == 2 and ($_[0]->{_index} = $_[1]);
|
|
return $_[0]->{_index};
|
|
}
|
|
END_OF_SUB
|
|
|
|
$COMPILE{prepare} = __LINE__ . <<'END_OF_SUB';
|
|
sub prepare {
|
|
# -----------------------------------------------------------
|
|
# Passes query straight through to dbh.
|
|
#
|
|
my ($self, $query) = @_;
|
|
$self->connect or return;
|
|
return $self->{driver}->prepare($query);
|
|
}
|
|
END_OF_SUB
|
|
|
|
sub name {
|
|
# -----------------------------------------------------------
|
|
# $obj->name;
|
|
# -----------
|
|
# Returns the name of the current table instance.
|
|
#
|
|
# $obj->name($table_name);
|
|
# -------------------------
|
|
# Sets the name for the table to create.
|
|
#
|
|
my $self = shift;
|
|
if (defined $_[0]) {
|
|
my $name = shift;
|
|
my $prefix = $self->{connect}->{PREFIX};
|
|
if (length $prefix) {
|
|
unless ($name =~ /^$prefix/) {
|
|
$name = $prefix . $name;
|
|
}
|
|
}
|
|
unless ($name =~ /^(\w+)$/) {
|
|
return $self->fatal(BADNAME => $name);
|
|
}
|
|
$self->{name} = $1;
|
|
|
|
# If a schema exists, a new GT::Config object is needed as the name just changed
|
|
$self->_new_schema if $self->{schema};
|
|
}
|
|
return $self->{name};
|
|
}
|
|
|
|
# -------------------------------------------------------------------------------------- #
|
|
# ACCESSOR METHODS #
|
|
# -------------------------------------------------------------------------------------- #
|
|
|
|
$COMPILE{cols} = __LINE__ . <<'END_OF_SUB';
|
|
sub cols {
|
|
# -----------------------------------------------------------
|
|
# $obj->cols;
|
|
# -----------
|
|
# Returns the hash structure for this tables
|
|
# cols.
|
|
#
|
|
# $obj->cols($hash_ref);
|
|
# ----------------------
|
|
# Sets the relations columns as specified by $hash_ref.
|
|
# the hash should look like { $col_name => { type => 'int' } }.
|
|
#
|
|
# $obj->cols($array_ref);
|
|
# -----------------------
|
|
# Just like $hash_ref, except an array ref. The array should look like:
|
|
# [ $col_name => { type => 'int' } ]. The difference between this and
|
|
# using a hash reference is that with the array ref pos will be automatically
|
|
# calculated and set in each column definition. The following two lines passed
|
|
# to cols() are equivelant and internally become the same thing:
|
|
#
|
|
# { $col1 => { type => 'int', pos => 1 }, $col2 => { type => 'text', pos => 2 } }
|
|
# [ $col1 => { type => 'int' }, $col2 => { type => 'text' } ]
|
|
#
|
|
# $obj->cols(
|
|
# $col1 => {
|
|
# type => 'int',
|
|
# not_null => 1
|
|
# },
|
|
# $col2 => { ... }
|
|
# );
|
|
# ----------------------
|
|
# Sets the relations columns as specified via method
|
|
# params.
|
|
#
|
|
my $self = shift;
|
|
|
|
if (@_) {
|
|
if (@_== 1) {
|
|
my $arg = shift;
|
|
if (ref $arg eq 'HASH') {
|
|
$self->{schema}->{cols} = $arg;
|
|
}
|
|
elsif (ref $arg eq 'ARRAY' and not @$arg % 2) {
|
|
for (0 .. 0.5 * @$arg - 1) {
|
|
$arg->[2 * $_ + 1]->{pos} = $_ + 1;
|
|
}
|
|
$self->{schema}->{cols} = {@$arg};
|
|
}
|
|
else {
|
|
return $self->fatal(BADARGS => 'Usage: $obj->cols(HASH_REF or ARRAY_REF or HASH)');
|
|
}
|
|
}
|
|
elsif (not @_ % 2) { $self->{schema}->{cols} = {@_} }
|
|
else { return $self->fatal(BADARGS => 'Usage $obj->cols(HASH_REF or ARRAY_REF or HASH)') }
|
|
|
|
my $name = $self->{name};
|
|
for (keys %{$self->{schema}->{cols}}) {
|
|
ref $self->{schema}->{cols}->{$_} eq 'HASH' or return $self->fatal(BADARGS => 'You must have a hash of hashes to specify your columns');
|
|
exists $self->{schema}->{cols}->{$_}->{type} or return $self->fatal(BADARGS => "Error in table '$name' with column '$_': no type defined.");
|
|
exists $self->{schema}->{cols}->{$_}->{pos} or return $self->fatal(BADARGS => "Error in table '$name' with column '$_': no position defined.");
|
|
}
|
|
}
|
|
|
|
return $self->{schema}->{cols} unless wantarray;
|
|
|
|
# Wantarray has been set so create a copy of the cols whose
|
|
# first and second level references can be clobbered.
|
|
# This assumes that the values side of the schema will
|
|
# always been hashrefs
|
|
my %cols_copy = %{$self->{schema}{cols}};
|
|
for my $col_name (keys %cols_copy) {
|
|
|
|
my %col_data = %{$cols_copy{$col_name}};
|
|
$cols_copy{$col_name} = \%col_data;
|
|
|
|
for (keys %col_data) {
|
|
if (ref $col_data{$_} eq 'HASH') {
|
|
$col_data{$_} = {%{$col_data{$_}}};
|
|
}
|
|
elsif (ref $col_data{$_} eq 'ARRAY') {
|
|
$col_data{$_} = [@{$col_data{$_}}];
|
|
}
|
|
}
|
|
}
|
|
|
|
return %cols_copy;
|
|
}
|
|
END_OF_SUB
|
|
|
|
$COMPILE{pk} = __LINE__ . <<'END_OF_SUB';
|
|
sub pk {
|
|
# -----------------------------------------------------------
|
|
# $obj->pk;
|
|
# ---------
|
|
# Returns the primary key columns for the current table. In scalar context,
|
|
# returns undef to indicate no primary key, or an array reference of column
|
|
# names. In list context you get a list of column names, or an empty list if
|
|
# no primary key exists.
|
|
#
|
|
# $obj->pk($array_ref);
|
|
# ----------------------
|
|
# Sets relation primary key, $array_ref is the reference to an array which
|
|
# looks like:
|
|
# ["FIELD1", ..., "FIELDN"]
|
|
#
|
|
# $obj->pk($field1, $field2, ...);
|
|
# ---------------------------------
|
|
# Sets relation primary key given the fields which are in parameter.
|
|
#
|
|
my $self = shift;
|
|
my @pk;
|
|
if (@_ == 0) {
|
|
my @pk = @{$self->{schema}->{pk}};
|
|
return wantarray ? @pk : @pk ? \@pk : undef;
|
|
}
|
|
elsif (@_ == 1) {
|
|
my $arg = shift;
|
|
if (ref $arg eq 'ARRAY') {
|
|
push @pk, @{$arg};
|
|
}
|
|
elsif (not ref $arg) {
|
|
push @pk, $arg;
|
|
}
|
|
else {
|
|
return $self->fatal(BADARGS => "Argument to ->pk must be an array ref or a list of scalars. You passed in $arg");
|
|
}
|
|
}
|
|
else {
|
|
for (@_) {
|
|
if (not ref $_) {
|
|
push @pk, @_;
|
|
}
|
|
else {
|
|
return $self->fatal(BADARGS => "Argument to ->pk must be an array ref or a list of scalars. You passed in @_");
|
|
}
|
|
}
|
|
}
|
|
@{$self->{schema}->{pk}} = @pk;
|
|
return wantarray ? @{$self->{schema}->{pk}} : $self->{schema}->{pk};
|
|
}
|
|
END_OF_SUB
|
|
|
|
$COMPILE{ai} = __LINE__ . <<'END_OF_SUB';
|
|
sub ai {
|
|
# -----------------------------------------------------------
|
|
# $obj->ai;
|
|
# ---------
|
|
# Returns the auto incriment column for the current
|
|
# table instance.
|
|
#
|
|
# $obj->ai($column);
|
|
# -------------------
|
|
# Sets the AUTO INCREMENT column.
|
|
#
|
|
my ($self, $ai) = @_;
|
|
ref $ai and return $self->fatal(BADARGS => "Argument to ->ai cannot be a reference");
|
|
$self->{schema}->{ai} = $ai if defined $ai;
|
|
return $self->{schema}->{ai}
|
|
}
|
|
END_OF_SUB
|
|
|
|
$COMPILE{search_driver} = __LINE__ . <<'END_OF_SUB';
|
|
sub search_driver {
|
|
# -----------------------------------------------------------
|
|
# $obj->search_driver;
|
|
# --------------------
|
|
# Returns the search driver column for the current
|
|
# table instance.
|
|
#
|
|
# can be 'INTERNAL', 'MYSQL', 'NONINDEXED'
|
|
#
|
|
# $obj->search_driver($column);
|
|
# -----------------------------
|
|
# Sets the Searching Driver column.
|
|
#
|
|
my ($self, $search_driver) = @_;
|
|
$search_driver and ref $search_driver and return $self->fatal(BADARGS => "Argument to ->search_driver must not be a reference");
|
|
$self->{schema}->{search_driver} = $search_driver if $search_driver;
|
|
if ( not defined $self->{schema}->{search_driver} ) {
|
|
my $indexer = $self->_get_indexer(1);
|
|
( ref $indexer ) =~ /::(\w+)::Indexer$/;
|
|
$self->{schema}->{search_driver} = $1;
|
|
}
|
|
return $self->{schema}->{search_driver};
|
|
}
|
|
END_OF_SUB
|
|
|
|
$COMPILE{index} = __LINE__ . <<'END_OF_SUB';
|
|
sub index {
|
|
# -----------------------------------------------------------
|
|
# $obj->index;
|
|
# ------------
|
|
# Returns a hash in list context and a hash ref
|
|
# in scalar context. This hash contain the index
|
|
# name as the keys and an array ref as the values.
|
|
# The array ref contains the fields that are part of
|
|
# the index that is the key.
|
|
#
|
|
# $obj->index($index_name, $col1, ..., $coln);
|
|
# -------------------------------------------------
|
|
# Sets an index called $index_name handling $col1,
|
|
# ..., $coln.
|
|
#
|
|
# $obj->index({
|
|
# $index1 => [field1, field2],
|
|
# $index2 => [field3, field4]
|
|
# });
|
|
# --------------------------------
|
|
# Sets indexes for this table specified by the key
|
|
# with the values as the fields.
|
|
#
|
|
my $self = shift;
|
|
if (@_ == 0) { return wantarray ? %{$self->{schema}->{index}} : $self->{schema}->{index} }
|
|
if (@_ == 1) {
|
|
my $arg = shift;
|
|
if (ref $arg eq 'HASH') {
|
|
$self->{schema}->{index} = $arg;
|
|
}
|
|
else {
|
|
return $self->fatal(BADARGS => 'Usage: $obj->index(INDEX_NAME, FIELD1, FIELD2 ...) or $obj->index(HASH_REF) or $obj->index')
|
|
}
|
|
}
|
|
else {
|
|
my $index_name = shift;
|
|
$self->{schema}->{index}->{$index_name} = [];
|
|
while (@_) {
|
|
my $arg = shift || last;
|
|
push @{$self->{schema}->{index}->{$index_name}}, (ref $arg eq 'ARRAY') ? $arg : [$arg];
|
|
}
|
|
}
|
|
|
|
for (keys %{$self->{schema}->{index}}) {
|
|
ref $self->{schema}->{index}->{$_} eq 'ARRAY' or return $self->fatal(BADARGS => "Index columns must be in the form of an array reference");
|
|
}
|
|
return wantarray ? %{$self->{schema}->{index}} : $self->{schema}->{index};
|
|
}
|
|
END_OF_SUB
|
|
|
|
sub subclass {
|
|
# -----------------------------------------------------------
|
|
# $obj->subclass;
|
|
# ---------------
|
|
# Returns the subclass for the current table.
|
|
# This subclass is what the objects are blessed
|
|
# into. This makes it easy to subclass per table object.
|
|
#
|
|
# $obj->subclass($subclass);
|
|
# ---------------------------
|
|
# Sets the subclass. $subclass should be a hash
|
|
# reference or a hash.
|
|
#
|
|
my $self = shift;
|
|
my $opt;
|
|
if (@_ == 0) { return wantarray ? %{$self->{schema}->{subclass}} : $self->{schema}->{subclass} }
|
|
elsif (@_ == 1 and ref $_[0] eq 'HASH') { $opt = shift }
|
|
elsif (defined $_[0] and @_ % 2 == 0) { $opt = {@_} }
|
|
else { return $self->fatal(BADARGS => 'Usage: $obj->subclass(HASH or HASH_REF)') }
|
|
|
|
for my $meth (qw/html relation table/) {
|
|
next unless exists $opt->{$meth};
|
|
if (ref $opt->{$meth} ne 'HASH') {
|
|
return $self->fatal(BADARGS => 'The hash that is passed into subclass() must be a hash of hashes');
|
|
}
|
|
my $val = {};
|
|
my $prefix = $self->{connect}->{PREFIX};
|
|
for (keys %{$opt->{$meth}}) {
|
|
my $v = $_;
|
|
if (length $prefix) {
|
|
unless (/^$prefix/) {
|
|
$v = $prefix . $v;
|
|
}
|
|
}
|
|
$val->{$meth}->{$v} = $opt->{$meth}->{$_};
|
|
}
|
|
$self->{schema}->{subclass}->{$meth} = $val->{$meth};
|
|
}
|
|
return 1;
|
|
}
|
|
|
|
sub unique {
|
|
# -----------------------------------------------------------
|
|
# $obj->unique;
|
|
# -------------
|
|
# Returns a hash in list context and a hash ref
|
|
# in scalar context. This hash contains the unique
|
|
# index names as the keys and array refs as the values.
|
|
# The array refs contain the fields that are part of
|
|
# the unique index.
|
|
#
|
|
# $obj->unique($index_name, $col1, ..., $coln);
|
|
# ---------------------------------------------
|
|
# Sets an unique index called $index_name handling $col1,
|
|
# ..., $coln.
|
|
#
|
|
# $obj->unique({
|
|
# $index1 => [field1, field2],
|
|
# $index2 => [field3, field4]
|
|
# });
|
|
# --------------------------------
|
|
# Sets uniques for this table specified by the key
|
|
# with the values as the fields.
|
|
#
|
|
my $self = shift;
|
|
if (@_ == 0) { return wantarray ? %{$self->{schema}->{unique}} : $self->{schema}->{unique} }
|
|
if (@_ == 1) {
|
|
my $arg = shift;
|
|
if (ref $arg eq 'HASH') {
|
|
$self->{schema}->{unique} = $arg;
|
|
}
|
|
else {
|
|
return $self->fatal(BADARGS => 'Usage: $obj->unique(INDEX_NAME, FIELD1, FIELD2 ...) or $obj->unique(HASH_REF) or $obj->unique')
|
|
}
|
|
}
|
|
else {
|
|
my $index_name = shift;
|
|
$self->{schema}->{unique}->{$index_name} = [];
|
|
while (@_) {
|
|
my $arg = shift || last;
|
|
push @{$self->{schema}->{unique}->{$index_name}}, (ref $arg eq 'ARRAY') ? $arg : [$arg];
|
|
}
|
|
}
|
|
|
|
for (keys %{$self->{schema}->{unique}}) {
|
|
ref $self->{schema}->{unique}->{$_} eq 'ARRAY' or return $self->fatal(BADARGS => "Index columns must be in the form of an array reference");
|
|
}
|
|
|
|
return wantarray ? %{$self->{schema}->{unique}} : $self->{schema}->{unique};
|
|
}
|
|
|
|
$COMPILE{fk} = __LINE__ . <<'END_OF_SUB';
|
|
sub fk {
|
|
# -----------------------------------------------------------
|
|
# $obj->fk;
|
|
# ---------
|
|
# Returns a hash in list content and a hash ref in scalar
|
|
# context. This hash ref contains the foreign table as the
|
|
# key and a hash ref as the value. The hash ref has keys as
|
|
# the field in the current table that relates to fields in
|
|
# the foreign table. The values are the fields in the foreign
|
|
# table that the fields in this table relate to.
|
|
#
|
|
# $obj->fk({
|
|
# RELATION_NAME => {
|
|
# SOURCE_FIELD_1 => TARGET_FIELD_2,
|
|
# ...
|
|
# SOURCE_FIELD_n => TARGET_FIELD_n
|
|
# }
|
|
# });
|
|
# ----------------------------------------------------------
|
|
# You can set all the relations for the tables this way.
|
|
# sets the source and target schemas for the given relation
|
|
# name. Source and target schemas shall have the same type !
|
|
#
|
|
# $obj->fk(RELATION_NAME, { SOURCE_FIELD_1 => TARGET_FIELD });
|
|
# ------------------------------------------------------------------
|
|
# Sets the foreign key relations for one relation.
|
|
#
|
|
# this structure introduces a limitations: a table cannot
|
|
# refer two schemas in the same target table, which should
|
|
# really not be a problem.
|
|
#
|
|
my $self = shift;
|
|
@_ or return wantarray ? %{$self->{schema}->{fk}} : $self->{schema}->{fk};
|
|
|
|
my %set;
|
|
if (@_ == 1) {
|
|
my $arg = shift;
|
|
if (ref $arg eq 'HASH') {
|
|
%set = %$arg;
|
|
}
|
|
else {
|
|
return $self->{schema}->{fk}->{$arg};
|
|
}
|
|
}
|
|
elsif (@_ == 2 and ref $_[1] eq 'HASH') {
|
|
%set = @_;
|
|
}
|
|
else {
|
|
return $self->fatal(BADARGS => 'Usage: $obj->fk(TABLE_NAME, HASH_REF or HASH_REF) or $obj->fk');
|
|
}
|
|
my $prefix = $self->{connect}->{PREFIX};
|
|
for my $table (keys %set) {
|
|
my $prefixed = $table;
|
|
$prefixed = $prefix . $prefixed if length $prefix and $table !~ /^\Q$prefix/;
|
|
$self->{schema}->{fk}->{$prefixed} = $set{$table};
|
|
}
|
|
|
|
# Make sure the arguments passed in were correct.
|
|
for my $ftable (keys %{$self->{schema}->{fk}}) {
|
|
ref $self->{schema}->{fk}->{$ftable} eq 'HASH' or return $self->fatal(BADARGS => "fk must contain a hash of hashes");
|
|
}
|
|
|
|
$self->_update_fk_tables or return;
|
|
|
|
return wantarray ? %{$self->{schema}->{fk}} : $self->{schema}->{fk};
|
|
}
|
|
END_OF_SUB
|
|
|
|
$COMPILE{fk_tables} = __LINE__ . <<'END_OF_SUB';
|
|
sub fk_tables {
|
|
# -----------------------------------------------------------
|
|
# Used to set the tables that reference this one.
|
|
#
|
|
my $self = shift;
|
|
if (@_ == 0) { return wantarray ? @{$self->{schema}->{fk_tables}} : $self->{schema}->{fk_tables} }
|
|
elsif (@_ == 1) {
|
|
my $arg = shift;
|
|
if (ref $arg eq 'ARRAY') {
|
|
$self->{schema}->{fk_tables} = [@$arg];
|
|
}
|
|
else {
|
|
@{$self->{schema}->{fk_tables}} = ($arg);
|
|
}
|
|
}
|
|
else {
|
|
@{$self->{schema}->{fk_tables}} = @_;
|
|
}
|
|
for (@{$self->{schema}->{fk_tables}}) {
|
|
if (ref $_) {
|
|
return $self->fatal(BADARGS => "Arguments to fk_table must be scalars");
|
|
}
|
|
}
|
|
my $prefix = $self->{connect}->{PREFIX};
|
|
for (@{$self->{schema}->{fk_tables}}) {
|
|
if (length $prefix) {
|
|
unless (/^$prefix/) {
|
|
$_ = $prefix . $_;
|
|
}
|
|
}
|
|
}
|
|
return wantarray ? @{$self->{schema}->{fk_tables}} : $self->{schema}->{fk_tables};
|
|
}
|
|
END_OF_SUB
|
|
|
|
$COMPILE{tree} = __LINE__ . <<'END_OF_SUB';
|
|
sub tree {
|
|
# -----------------------------------------------------------
|
|
# An accessor for the GT::SQL::Tree object associated with
|
|
# this table. Creating/dropping a tree is done through the
|
|
# table editor. If no tree exists, you get undef and a warning
|
|
# occurs.
|
|
my $self = shift;
|
|
return $self->warn(NOTREE => $self->name()) unless ($self->{schema}->{tree});
|
|
if ($self->{connect}->{obj_cache} and my $cached = $GT::SQL::OBJ_CACHE{"TREE\0$self->{name}\0$self->{connect}->{def_path}"}) {
|
|
$self->debug("Returning GT::SQL::Tree object for table $self->{name} from cache") if $self->{_debug};
|
|
return $cached;
|
|
}
|
|
|
|
require GT::SQL::Tree;
|
|
$self->debug("Creating new GT::SQL::Tree object for table " . $self->name()) if $self->{_debug};
|
|
my $tree = GT::SQL::Tree->new({
|
|
table => $self,
|
|
debug => $self->{_debug}
|
|
});
|
|
|
|
if ($self->{connect}->{obj_cache}) {
|
|
$GT::SQL::OBJ_CACHE{"TREE\0$self->{name}\0$self->{connect}->{def_path}"} = $tree;
|
|
}
|
|
|
|
return $tree;
|
|
}
|
|
END_OF_SUB
|
|
|
|
$COMPILE{check_schema} = __LINE__ . <<'END_OF_SUB';
|
|
sub check_schema {
|
|
# -----------------------------------------------------------
|
|
# Checks the current table schema for inconsistencies in the
|
|
# structure.
|
|
#
|
|
my $self = shift;
|
|
my %cols = %{$self->{schema}->{cols}};
|
|
|
|
# Go through each column and check them
|
|
for my $col (keys %cols) {
|
|
# Make sure we have a position field.
|
|
if (! exists $cols{$col}->{pos}) {
|
|
$self->debug("Trying to create a column that does not have a position field.") if $self->{_debug};
|
|
return $self->fatal(NOPOS => $col);
|
|
}
|
|
|
|
# Primary key cannot be a "text" or "blob" type and must be "not null".
|
|
if ($self->_is_pk($col)) {
|
|
unless ($self->{schema}->{cols}->{$col}->{not_null}) {
|
|
$self->debug("Trying to use a primary key without making it not null. Adding not_null to $col") if $self->{_debug};
|
|
$self->{schema}->{cols}->{$col}->{not_null} = 1;
|
|
}
|
|
if ($cols{$col}->{type} =~ /TEXT|BLOB/i) {
|
|
return $self->fatal(PKTEXT => $col);
|
|
}
|
|
}
|
|
|
|
# Unique must be "not null" and cannot be a "text" or "blob" type.
|
|
for (keys %{$self->{schema}->{unique}}) {
|
|
if (grep /^\Q$col\E$/, @{$self->{unique}->{$_}}) {
|
|
unless ($self->{schema}->{cols}->{$col}->{not_null}) {
|
|
$self->debug("unique key $col is not NOT_NULL. Adding to NOT_NULL") if ($self->{_debug});
|
|
$self->{schema}->{cols}->{$col}->{not_null} = 1;
|
|
}
|
|
if ($cols{$col}->{type} =~ /TEXT|BLOB/i) {
|
|
return $self->fatal(UNIQTEXT => $col);
|
|
}
|
|
}
|
|
}
|
|
|
|
# Index must ne "not null" and cannot be a "text" or "blob" type.
|
|
for (keys %{$self->{schema}->{index}}) {
|
|
if (grep /^\Q$col\E$/, @{$self->{schema}->{index}->{$_}}) {
|
|
unless ($self->_is_not_null($col)) {
|
|
$self->debug("index key $col is not NOT_NULL. Adding to NOT_NULL") if $self->{_debug};
|
|
$self->{schema}->{cols}->{$col}->{not_null} = 1;
|
|
}
|
|
if ($cols{$col}->{type} =~ /TEXT|BLOB/i) {
|
|
return $self->fatal(INDXQTEXT => $col) if $self->{_debug};
|
|
}
|
|
}
|
|
}
|
|
|
|
# Autoincrement must be an "INT" type and must be the only "PRIMARY KEY"
|
|
$self->{schema}->{ai} ||= '';
|
|
if ($col eq $self->{schema}->{ai}) {
|
|
if ($cols{$col}->{type} !~ /INT/i) {
|
|
return $self->fatal(AINOTPK => $col);
|
|
}
|
|
if (!$self->_is_pk($col) or @{$self->{schema}->{pk}} > 1) {
|
|
$self->debug("AUTO_INCREMENT column $col specified but is not the primary key. Making $col primary key.") if $self->{_debug};
|
|
@{$self->{schema}->{pk}} = ($col);
|
|
}
|
|
}
|
|
|
|
# File columns must point to exisiting directories where we have write access!
|
|
if ($cols{$col}->{form_type} and uc $cols{$col}->{form_type} eq 'FILE') {
|
|
$cols{$col}->{file_save_in} or return $self->fatal(NOFILESAVEIN => $col);
|
|
return $self->fatal(NODIRPRIV => $cols{$col}->{file_save_in})
|
|
unless -w $cols{$col}->{file_save_in};
|
|
}
|
|
}
|
|
|
|
# Circularity check
|
|
$self->_circularity_check or return undef;
|
|
|
|
return 1;
|
|
}
|
|
END_OF_SUB
|
|
|
|
$COMPILE{ordered_columns} = __LINE__ . <<'END_OF_SUB';
|
|
sub ordered_columns {
|
|
# -----------------------------------------------------------
|
|
# $obj->ordered_columns;
|
|
# ----------------------
|
|
# Returns the current table columns ordered
|
|
# in function of the "pos" type of a given
|
|
# column.
|
|
#
|
|
# The columns having no specified pos are
|
|
# appended in lexicographical order at the
|
|
# end of the result array.
|
|
#
|
|
my $self = shift;
|
|
my @cols = ();
|
|
my @append = ();
|
|
my $cols = $self->{schema}->{cols};
|
|
for my $col (sort {
|
|
$cols->{$a}->{pos} && $cols->{$b}->{pos} ? $cols->{$a}->{pos} <=> $cols->{$b}->{pos} :
|
|
$cols->{$a}->{pos} && !$cols->{$b}->{pos} ? -1 :
|
|
$cols->{$b}->{pos} && !$cols->{$a}->{pos} ? 1 :
|
|
($a cmp $b)
|
|
} keys %{$cols}) {
|
|
push @cols, $col;
|
|
}
|
|
|
|
return @cols;
|
|
}
|
|
END_OF_SUB
|
|
|
|
$COMPILE{all_indexes} = __LINE__ . <<'END_OF_SUB';
|
|
sub all_indexes {
|
|
# -----------------------------------------------------------
|
|
# $obj->all_indexes;
|
|
# ------------------
|
|
# Returns an array reference with all the array refs
|
|
# from the indexes and the uniques.
|
|
#
|
|
my $self = shift;
|
|
my @keys = map { @$_ } values %{$self->unique}, values %{$self->index};
|
|
return wantarray ? @keys : \@keys;
|
|
}
|
|
END_OF_SUB
|
|
|
|
$COMPILE{save_def} = __LINE__ . <<'END_OF_SUB';
|
|
sub save_def { shift->save_state(@_) }
|
|
END_OF_SUB
|
|
|
|
$COMPILE{save_state} = __LINE__ . <<'END_OF_SUB';
|
|
sub save_state {
|
|
# -----------------------------------------------------------
|
|
# $obj->save_state;
|
|
# ----------------------------
|
|
# Saves table structure in $self->{connect}->{def_path}/table.def, and
|
|
# deletes the table from the object cache.
|
|
#
|
|
my $self = shift;
|
|
$self->debug("Saving state for " . $self->name) if $self->{_debug} and $self->{_debug} > 1;
|
|
$self->{schema}->save();
|
|
$self->debug("State saved for " . $self->name) if $self->{_debug} and $self->{_debug} > 1;
|
|
|
|
my $cache_key = join("\0", 'TABLE', $self->{name}, $self->{connect}->{def_path});
|
|
delete $GT::SQL::OBJ_CACHE{$cache_key};
|
|
|
|
return 1;
|
|
}
|
|
END_OF_SUB
|
|
|
|
$COMPILE{file_info} = __LINE__ . <<'END_OF_SUB';
|
|
sub file_info {
|
|
# -------------------------------------------------------------------
|
|
# $obj->file('ColumnName', $primary_key);
|
|
# ------------------------------
|
|
# Returns the file associated with the column
|
|
#
|
|
my $self = shift;
|
|
require GT::SQL::File;
|
|
my $File = GT::SQL::File->new({ parent_table => $self, connect => $self->{connect} });
|
|
return $File->file_info(@_);
|
|
}
|
|
END_OF_SUB
|
|
|
|
$COMPILE{file_rescan} = __LINE__ . <<'END_OF_SUB';
|
|
sub file_rescan {
|
|
# -------------------------------------------------------------------
|
|
my $self = shift;
|
|
require GT::SQL::File;
|
|
my $File = GT::SQL::File->new({ parent_table => $self, connect => $self->{connect} });
|
|
return $File->rescan();
|
|
}
|
|
END_OF_SUB
|
|
|
|
sub check_values {
|
|
# -------------------------------------------------------------------
|
|
# Checks to see that the values for an insert are legal to
|
|
# be inserted. Returns false on error true on success
|
|
#
|
|
my ($self, $set) = @_;
|
|
|
|
# Check to ensure the values are valid
|
|
my %cols = %{$self->{schema}->{cols}};
|
|
my $ai = $self->{schema}->{ai};
|
|
for my $col (keys %$set) {
|
|
next if ($ai and $ai eq $col);
|
|
if (ref $set->{$col} eq 'ARRAY') {
|
|
require GT::SQL::Display::HTML;
|
|
$set->{$col} = join $GT::SQL::Display::HTML::INPUT_SEPARATOR, sort @{$set->{$col}};
|
|
}
|
|
$self->_check_value($col, $cols{$col}, $set->{$col});
|
|
}
|
|
if (ref $self->{_error} and @{$self->{_error}}) {
|
|
$GT::SQL::error = join "\n", @{$self->{_error}};
|
|
return;
|
|
}
|
|
return 1;
|
|
}
|
|
|
|
# -------------------------------------------------------------------------------------- #
|
|
# PRIVATE FUNCTIONS #
|
|
# -------------------------------------------------------------------------------------- #
|
|
$COMPILE{_update_fk_tables} = __LINE__ . <<'END_OF_SUB';
|
|
sub _update_fk_tables {
|
|
# -------------------------------------------------------------------
|
|
# Updates all the tables fields that
|
|
# this tables is referenced by.
|
|
#
|
|
my $self = shift;
|
|
for my $table (keys %{$self->{schema}->{fk}}) {
|
|
my $foreign_table = $table eq $self->{name}
|
|
? $self
|
|
: ($self->new_table($table) or return $self->fatal(FKNOTABLE => $table, $GT::SQL::error));
|
|
$foreign_table->_add_fk_table($self->{name})
|
|
and $foreign_table->save_state();
|
|
}
|
|
return 1;
|
|
}
|
|
END_OF_SUB
|
|
|
|
$COMPILE{_add_fk_table} = __LINE__ . <<'END_OF_SUB';
|
|
sub _add_fk_table {
|
|
# -----------------------------------------------------------------------------
|
|
# Takes a foreign table name. The foreign table is added if it doesn't already
|
|
# exist in $self's fk_tables schema. Any duplicates are removed. This is to
|
|
# prevent the same table appearing several times in fk_tables. You still need
|
|
# to ->save_state() after calling this. Returns 1 if anything changed, undef
|
|
# otherwise.
|
|
#
|
|
my ($self, $add) = @_;
|
|
my %have = map { $_ => 1 } @{$self->{schema}->{fk_tables}};
|
|
push @{$self->{schema}->{fk_tables}}, $add unless $have{$add};
|
|
return $have{$add} ? undef : 1;
|
|
}
|
|
END_OF_SUB
|
|
|
|
$COMPILE{_circularity_check} = __LINE__ . <<'END_OF_SUB';
|
|
sub _circularity_check {
|
|
# -------------------------------------------------------------------
|
|
# This function loops through all the tables in the current
|
|
# databases. If a circular reference is detected, then a
|
|
# warning is printed and FALSE is returned. If no circular
|
|
# references are detected, TRUE is returned.
|
|
#
|
|
my $self = shift;
|
|
my (%cols, @tables, %tables);
|
|
|
|
return 1 unless keys %{$self->{schema}->{fk}}; # If there are no foreign keys there is nothing to do.
|
|
|
|
my $name = $self->name;
|
|
|
|
@tables = $name;
|
|
$tables{$name}++;
|
|
|
|
for (my $i = 0; $i < @tables; $i++) {
|
|
return $self->fatal('CIRCULARLIMIT') if $i >= 100;
|
|
|
|
my $table = $tables[$i];
|
|
my $new = ($table eq $name) ? $self : $self->new_table($table) or return $self->fatal(FKNOTABLE => $table, $GT::SQL::error);
|
|
for my $table_name (keys %{$new->{schema}->{fk}}) {
|
|
my %this; # Allows for multiple fk's from the same table to the same key
|
|
for my $column (keys %{$new->{schema}->{fk}->{$table_name}}) {
|
|
my $tc = "$table: $table_name.$new->{schema}->{fk}->{$table_name}->{$column}";
|
|
$self->debug("Found foreign key in $tc") if $self->{_debug};
|
|
if (not $this{$tc}++ and $cols{$tc}++) {
|
|
$self->debug("$tc was already found!") if $self->{_debug};
|
|
return $self->warn(CIRCULAR => $tc);
|
|
}
|
|
splice @tables, $i + 1, 0, $table_name unless $tables{$table_name}++;
|
|
}
|
|
}
|
|
}
|
|
|
|
return 1;
|
|
}
|
|
END_OF_SUB
|
|
|
|
|
|
$COMPILE{_check_timestamp} = __LINE__ . <<'END_OF_SUB';
|
|
sub _check_timestamp {
|
|
# -------------------------------------------------------------------
|
|
# Won't modify a record if the passed in timestamp is older than
|
|
# what's in the database.
|
|
#
|
|
my ($self, $keys, $set) = @_;
|
|
|
|
# first check to see if we even need to look up the orig timestamp.
|
|
my $auto = $self->time_check;
|
|
return 1 unless ($auto);
|
|
my $found = 0;
|
|
for (keys %$auto) {
|
|
exists $set->{$_} and ($found = 1); # should only be one timestamp.
|
|
}
|
|
return 1 unless ($found);
|
|
|
|
# if we got here, then we do a search on the record and compare timestamp.
|
|
my $pk = $self->{schema}->{pk};
|
|
my $cond = GT::SQL::Condition->new;
|
|
my @res;
|
|
for my $key (@$pk) {
|
|
$cond->add($key, "=", $keys->{$key});
|
|
}
|
|
for my $tmstmp (keys %$auto) {
|
|
push @res, $tmstmp;
|
|
$cond->add($tmstmp, ">", $set->{$tmstmp});
|
|
delete $set->{$tmstmp};
|
|
}
|
|
my $sth = $self->select($cond, \@res) or return;
|
|
if ($sth->fetchrow_arrayref) {
|
|
return $self->warn('ALREADYCHANGED');
|
|
}
|
|
else {
|
|
return 1;
|
|
}
|
|
}
|
|
END_OF_SUB
|
|
|
|
sub _check_insert {
|
|
# -------------------------------------------------------------------
|
|
# Check to make sure an insert is properly set up.
|
|
#
|
|
my ($self, $set, $cond) = @_;
|
|
my @indexes;
|
|
my %indx_hash = $self->unique;
|
|
push @indexes, values %indx_hash if (keys %indx_hash);
|
|
|
|
# Add the primary key to the list of uniques
|
|
if (@{$self->{schema}->{pk}} and ! $self->{schema}->{ai}) {
|
|
push @indexes, $self->{schema}->{pk};
|
|
}
|
|
|
|
# Check that columns that aren't in the insert are not not_null columns. This
|
|
# check is done here rather than in _check_value() because _check_value() is
|
|
# also used by update(). _check_value() will handle all other not_null cases.
|
|
while (my ($c, $col) = each %{$self->{schema}->{cols}}) {
|
|
next if exists $set->{$c};
|
|
my $default = $col->{default};
|
|
if ($col->{not_null} and # Only check for not_null columns
|
|
(not $self->{schema}->{ai} or $c ne $self->{schema}->{ai}) and # But not the auto-increment field
|
|
(not defined $default or $default eq '')) { # And only when there isn't a default
|
|
$self->warn(NOTNULL => $col->{form_display} || $c);
|
|
}
|
|
}
|
|
|
|
# Check that the unique columns are really unique.
|
|
my $check = {};
|
|
INDEX: for my $index (@indexes) {
|
|
my $check = {};
|
|
COL: for my $col (@$index) {
|
|
next INDEX if ($col eq $self->{schema}->{ai});
|
|
$check->{$col} = $set->{$col};
|
|
}
|
|
my $rows = $self->count($check);
|
|
if ($rows) {
|
|
$self->warn(UNIQUE => join(",", map $self->{schema}->{cols}->{$_}->{form_display} || $_, keys %$check), join(",", values %$check));
|
|
}
|
|
}
|
|
# Check the values to make sure they are ok.
|
|
$self->check_values($set);
|
|
|
|
# Join the list of errors.
|
|
my @errors = (ref($self->{_error}) and @{$self->{_error}}) ? @{$self->{_error}} : ();
|
|
if (@errors) {
|
|
$GT::SQL::error = join "\n", @errors;
|
|
return;
|
|
}
|
|
return 1;
|
|
}
|
|
|
|
sub _check_update {
|
|
# -------------------------------------------------------------------
|
|
# Checks to see if any of the set options
|
|
# are unique. If they are does a select
|
|
# on the table. If the condition tests
|
|
# true returns undef. The error will be set in
|
|
# the package error variable.
|
|
#
|
|
my ($self, $set, $cond) = @_;
|
|
|
|
# Turn off warning here (too much work to remove unitialized values from
|
|
# returned data).
|
|
local $^W = 0;
|
|
|
|
# Ensure that columns that are NOT NULL have not been specified as null
|
|
my %cols = %{$self->{schema}->{cols}};
|
|
for my $col (keys %{$set}) {
|
|
if (ref $set->{$col} eq 'ARRAY') {
|
|
require GT::SQL::Display::HTML;
|
|
$set->{$col} = join($GT::SQL::Display::HTML::INPUT_SEPARATOR, sort @{$set->{$col}});
|
|
}
|
|
$self->_check_value($col, $cols{$col}, $set->{$col}) or return;
|
|
}
|
|
my %indx_hash = $self->unique;
|
|
my @indexes = values %indx_hash;
|
|
|
|
# Add the primary key to the list of uniques
|
|
my $pk = $self->{schema}->{pk};
|
|
$pk = ref $pk ? $pk : [$pk];
|
|
push @indexes, $pk unless $self->{schema}->{ai};
|
|
|
|
# If there are no uniques, then return previous errors, or return 1.
|
|
if (! @indexes) {
|
|
if (ref $self->{_error} and @{$self->{_error}}) {
|
|
$GT::SQL::error = join "\n", @{$self->{_error}};
|
|
return;
|
|
}
|
|
else {
|
|
return 1;
|
|
}
|
|
}
|
|
# If the update isn't changing any unique columns, then there's no need to
|
|
# perform the select later in the code.
|
|
else {
|
|
my $updates_unique;
|
|
INDEX: for my $index (@indexes) {
|
|
for (@$index) {
|
|
if (exists $set->{$_}) {
|
|
$updates_unique = 1;
|
|
last INDEX;
|
|
}
|
|
}
|
|
}
|
|
return 1 unless $updates_unique;
|
|
}
|
|
|
|
# Only request what has changed plus the primary key and any uniques
|
|
my %changes = ();
|
|
for (keys %$set) { $changes{$_} = 1 }
|
|
for (@$pk) { $changes{$_} = 1 }
|
|
for my $index (@indexes) {
|
|
for (@$index) {
|
|
$changes{$_} = 1;
|
|
}
|
|
}
|
|
|
|
# Fetch records to make sure we don't break a unique clause.
|
|
my $sth = $self->select(keys(%changes), $cond) or return;
|
|
my @marked = ();
|
|
RECORD: while (my $rec = $sth->fetchrow_hashref) {
|
|
|
|
# Go through all the indexes for this table
|
|
for my $i (0 .. $#indexes) {
|
|
|
|
# A hash to build the count query out of
|
|
my $count_check = {};
|
|
|
|
# If the record is different than the one in the database
|
|
my $match = 0;
|
|
for (@{$indexes[$i]}) {
|
|
if (defined $set->{$_} and $set->{$_} ne $rec->{$_}) {
|
|
$match = 1;
|
|
}
|
|
$count_check->{$_} = $set->{$_};
|
|
}
|
|
|
|
# It was not different so we continue to the next set of uniques
|
|
$match or next;
|
|
|
|
# It was different so we need to make a count select to see if it is possible
|
|
# to do this insert
|
|
if ($self->count($count_check)) {
|
|
|
|
# the count returned true so there was a duplicate record
|
|
$self->warn(UNIQUE => join(',' => map $set->{$_}, @{$indexes[$i]}), join(',' => @{$indexes[$i]}));
|
|
last RECORD;
|
|
}
|
|
else {
|
|
# The count returned false so there was not a duplicate record
|
|
# so if the record is already marked we return false
|
|
if ($marked[$i]) {
|
|
$self->warn(UNIQUE => join(',' => map $set->{$_}, @{$indexes[$i]}), join(',' => @{$indexes[$i]}));
|
|
last RECORD;
|
|
}
|
|
else {
|
|
# else we mark the record.
|
|
$marked[$i] = 1;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
# Everything should have went fine so return true the record is
|
|
# insertable.
|
|
if (ref $self->{_error} and @{$self->{_error}}) {
|
|
$GT::SQL::error = join "\n", @{$self->{_error}};
|
|
return;
|
|
}
|
|
else {
|
|
return 1;
|
|
}
|
|
}
|
|
|
|
sub _check_value {
|
|
# -------------------------------------------------------------------
|
|
# Checks to see if a value is valid.
|
|
#
|
|
my ($self, $name, $column, $value) = @_;
|
|
|
|
my $regex = '';
|
|
if ($column->{not_null} and not defined $value) {
|
|
$self->warn(NOTNULL => $column->{form_display} || $name);
|
|
}
|
|
if ($column->{type} eq 'ENUM' and $value) {
|
|
$regex = '^(?:' . join('|', map quotemeta, @{$column->{values}}) . ')$';
|
|
}
|
|
elsif (defined $value) {
|
|
unless ($regex = $column->{regex}) {
|
|
my $sign = $column->{unsigned} ? '\+' : '[+-]';
|
|
if ($column->{type} eq 'INTEGER' or $column->{type} =~ /INT$/) {
|
|
$regex = '^' . $sign . '?\d+$';
|
|
}
|
|
elsif ($column->{type} =~ /^(?:REAL|FLOAT|DOUBLE|DECIMAL)$/) {
|
|
$regex = '^' . $sign . '?(?=\d|\.\d)\d*(\.\d*)?(?:[eE][+-]?\d+)?$';
|
|
}
|
|
}
|
|
}
|
|
|
|
if ($regex and not ref $value) {
|
|
if (eval { $value !~ /$regex/ }) {
|
|
$self->warn(ILLEGALVAL => $column->{form_display} || $name, $value);
|
|
}
|
|
elsif ($@) {
|
|
$self->warn(REGEXFAIL => $regex);
|
|
}
|
|
}
|
|
if (ref $self->{_error} and @{$self->{_error}}) {
|
|
$GT::SQL::error = join "\n", @{$self->{_error}};
|
|
return;
|
|
}
|
|
return 1;
|
|
}
|
|
|
|
sub _extract_where {
|
|
# -------------------------------------------------------------------
|
|
# Takes the users input and extracts the
|
|
# hash refs or condition clause. Creates
|
|
# a Condition object and returns it.
|
|
# Returns where the query was a hash or not
|
|
# as well.
|
|
#
|
|
my @args = @_;
|
|
my $cond = GT::SQL::Condition->new;
|
|
my $do_select = 0;
|
|
for (@args) {
|
|
if (ref eq "HASH") {
|
|
while (my ($col, $val) = each %$_) {
|
|
$cond->add($col => '=' => $val);
|
|
}
|
|
}
|
|
elsif (ref eq 'GT::SQL::Condition') {
|
|
$do_select = 1;
|
|
$cond->add($_->clone);
|
|
}
|
|
}
|
|
return ($cond, $do_select);
|
|
}
|
|
|
|
sub _build_cond {
|
|
# -------------------------------------------------------------------
|
|
# this subroutine is done to build conditions
|
|
# which may not be a Condition
|
|
# for selects and deletes.
|
|
#
|
|
my $self = shift;
|
|
my $cond = shift;
|
|
|
|
my $cols = $self->{schema}->{cols};
|
|
|
|
if (ref $cond eq 'GT::SQL::Condition') {
|
|
return $cond->clone;
|
|
}
|
|
elsif (ref $cond eq 'HASH') {
|
|
my $tmp = new GT::SQL::Condition;
|
|
for my $key (keys %{$cond}) {
|
|
next unless exists $cols->{$key};
|
|
if (ref $cond->{$key} eq 'ARRAY') {
|
|
$tmp->add($key => IN => $cond->{$key});
|
|
}
|
|
elsif (defined $cond->{$key}) {
|
|
$tmp->add($key => '=' => $cond->{$key});
|
|
}
|
|
else {
|
|
$tmp->add($key => 'IS' => \'NULL');
|
|
}
|
|
}
|
|
return $tmp;
|
|
}
|
|
elsif (ref $cond eq 'ARRAY') {
|
|
my $tmp = new GT::SQL::Condition(@$cond);
|
|
return $tmp->clone;
|
|
}
|
|
$self->fatal(BADARGS => "_build_cond takes only a condition, array ref, or hash ref. Not: '$cond'");
|
|
}
|
|
|
|
sub _build_set {
|
|
# -------------------------------------------------------------------
|
|
# Internal use. Builds the set options for the query.
|
|
#
|
|
my $self = shift;
|
|
my $cond = shift;
|
|
|
|
my $cols = $self->{schema}->{cols};
|
|
|
|
if (ref $cond eq 'GT::SQL::Condition') {
|
|
return $cond;
|
|
}
|
|
elsif (ref $cond eq 'HASH') {
|
|
my $tmp = new GT::SQL::Condition;
|
|
$tmp->bool(',');
|
|
for my $key (keys %{$cond}) {
|
|
$tmp->add($key, "=", $cond->{$key}) if exists $cols->{$key};
|
|
}
|
|
return $tmp;
|
|
}
|
|
elsif (ref $cond eq 'ARRAY') {
|
|
my $tmp = new GT::SQL::Condition (@{$cond}, ',');
|
|
return $tmp;
|
|
}
|
|
$self->fatal(BADARGS => "_build_set takes only a condition, array ref, or hash ref. Not: '$cond'");
|
|
}
|
|
|
|
$COMPILE{_check_keys} = __LINE__ . <<'END_OF_SUB';
|
|
sub _check_keys {
|
|
# -------------------------------------------------------------------
|
|
# Checks to see if the arguments passed into
|
|
# delete contains the externally linked columns
|
|
#
|
|
my ($self, $where) = @_;
|
|
ref $where or return $self->fatal(BADARGS => '_check_keys');
|
|
my $cond = ref $where eq 'HASH' ? $where : $where->as_hash;
|
|
for ($self->fk_tables) {
|
|
my $new_schema = $self->new_table($_) or return $self->fatal(FKNOTABLE => $_, $GT::SQL::error);
|
|
my %hash = $new_schema->fk;
|
|
my $name = $self->name;
|
|
|
|
for (values %{$hash{$name}}) {
|
|
return unless exists $cond->{$_};
|
|
}
|
|
}
|
|
return 1;
|
|
}
|
|
END_OF_SUB
|
|
|
|
$COMPILE{_do_opt} = __LINE__ . <<'END_OF_SUB';
|
|
sub _do_opt {
|
|
# -------------------------------------------------------------------
|
|
# Does a select or delete based on the option
|
|
#
|
|
my ($self, $opt, $sel_hashr, $table_name) = @_;
|
|
my $new_me = $self->new_table($table_name) or return $self->fatal(FKNOTABLE => $table_name, $GT::SQL::error);
|
|
if ($opt eq 'cascade') {
|
|
my $cond;
|
|
if ($self->{schema}->{tree} and keys %$sel_hashr > 1 and $self->tree->{tree}->name() eq $new_me->name()) {
|
|
$cond = [];
|
|
for (keys %$sel_hashr) {
|
|
push @$cond, GT::SQL::Condition->new($_ => '=' => $sel_hashr->{$_});
|
|
}
|
|
}
|
|
else {
|
|
$cond = $sel_hashr;
|
|
}
|
|
if (ref $cond eq 'ARRAY') {
|
|
for (@$cond) {
|
|
$new_me->delete($_) or return;
|
|
}
|
|
}
|
|
else {
|
|
$new_me->delete($cond) or return;
|
|
}
|
|
}
|
|
else {
|
|
return $self->warn(DEPENDENCY => $table_name) if $new_me->count($sel_hashr);
|
|
}
|
|
return 1;
|
|
}
|
|
END_OF_SUB
|
|
|
|
$COMPILE{_delete_cond} = __LINE__ . <<'END_OF_SUB';
|
|
sub _delete_cond {
|
|
# -------------------------------------------------------------------
|
|
# Performs the delete based on a condition object
|
|
#
|
|
my ($self, $where, $opt) = @_;
|
|
my $cond = ref $where eq 'HASH' ? $where : $where->as_hash;
|
|
for my $fktable (@{$self->fk_tables}) {
|
|
my $new_schema = $self->new_table($fktable) or return $self->fatal(FKNOTABLE => $fktable, $GT::SQL::error);
|
|
my %fk = $new_schema->fk;
|
|
my $fk_href = $fk{$self->name};
|
|
my $sel_hashr = {};
|
|
while (my ($k, $v) = each %$fk_href) {
|
|
$sel_hashr->{$k} = $cond->{$v} if exists $cond->{$v};
|
|
}
|
|
|
|
return $self->fatal(FKMISSING => $fktable, $self->name, $fktable) unless keys %$sel_hashr;
|
|
$self->_do_opt($opt, $sel_hashr, $fktable) or return;
|
|
}
|
|
return 1;
|
|
}
|
|
END_OF_SUB
|
|
|
|
$COMPILE{_delete_select} = __LINE__ . <<'END_OF_SUB';
|
|
sub _delete_select {
|
|
# -------------------------------------------------------------------
|
|
# Performs the delete based on the cascade
|
|
# option
|
|
#
|
|
my ($self, $sth, $opt) = @_;
|
|
my $fk_del;
|
|
my $data = $sth->fetchall_hashref;
|
|
for my $fktable (@{$self->fk_tables}) {
|
|
my $new_schema = $self->new_table($fktable) or return $self->fatal(FKNOTABLE => $fktable, $GT::SQL::error);
|
|
my %fk = $new_schema->fk;
|
|
my $fk_href = $fk{$self->name};
|
|
my $sel_hashr = {};
|
|
for my $row (@$data) {
|
|
for my $fk (keys %$fk_href) {
|
|
push @{$sel_hashr->{$fk}}, $row->{$fk_href->{$fk}};
|
|
}
|
|
}
|
|
$self->_do_opt($opt, $sel_hashr, $fktable) or return if keys %$sel_hashr;
|
|
}
|
|
|
|
return 1;
|
|
}
|
|
END_OF_SUB
|
|
|
|
$COMPILE{_delete_cleanup} = __LINE__ . <<'END_OF_SUB';
|
|
sub _delete_cleanup {
|
|
# -------------------------------------------------------------------
|
|
# Performs the delete based on one to many relationship.
|
|
#
|
|
my ($self, $where) = @_;
|
|
|
|
# Get the SQL.
|
|
my $sth = $self->select($where);
|
|
|
|
my $rows = $sth->fetchall_arrayref();
|
|
return 0 unless $rows and @$rows;
|
|
|
|
$sth = $self->{driver}->delete($where) or return;
|
|
|
|
my $name = $self->name;
|
|
for my $fk_table ($self->fk_tables) {
|
|
|
|
my $new_schema = $self->new_table($fk_table) or return $self->fatal(FKNOTABLE => $fk_table, $GT::SQL::error);
|
|
my %fk = $new_schema->fk;
|
|
my @ls = sort keys %{$fk{$name}};
|
|
my $rel = $self->new_relation($fk_table, $self->name);
|
|
my %cond;
|
|
for my $col (@ls) {
|
|
my $c = $fk{$name}->{$col};
|
|
$cond{"$name.$c"} = undef;
|
|
my @sel_limit = map $_->[$self->{schema}->{cols}->{$c}->{pos} - 1], @$rows;
|
|
next unless @sel_limit;
|
|
$cond{"$fk_table.$col"} = \@sel_limit;
|
|
}
|
|
my $sth = $rel->select('left_join', @ls, \%cond) or return;
|
|
my $cols = $new_schema->cols;
|
|
|
|
my $pk_vals = $sth->fetchall_arrayref;
|
|
if (@ls > 1) {
|
|
for my $row (@$pk_vals) {
|
|
$new_schema->delete({ map { ($ls[$_] => $row->[$_]) } 0 .. $#ls }) or return;
|
|
}
|
|
}
|
|
elsif (@ls == 1) {
|
|
my @del = map $_->[0], @$pk_vals;
|
|
$new_schema->delete({ $ls[0] => \@del }) if @del;
|
|
}
|
|
}
|
|
return 1;
|
|
}
|
|
END_OF_SUB
|
|
|
|
# Returns a hash of all columns that have positive weights.
|
|
$COMPILE{_weight_cols} = __LINE__ . <<'END_OF_SUB';
|
|
sub _weight_cols {
|
|
my $self = shift;
|
|
return map {
|
|
$self->{schema}->{cols}->{$_}->{weight}
|
|
? ($_ => $self->{schema}->{cols}->{$_}->{weight})
|
|
: ()
|
|
} keys %{$self->{schema}->{cols}};
|
|
}
|
|
END_OF_SUB
|
|
|
|
# a hash of all columns that have form_type file
|
|
$COMPILE{_file_cols} = __LINE__ . <<'END_OF_SUB';
|
|
sub _file_cols {
|
|
my $self = shift;
|
|
$self->{_file_cols} = {
|
|
map {
|
|
($self->{schema}->{cols}->{$_}->{form_type} and uc $self->{schema}->{cols}->{$_}->{form_type} eq 'FILE')
|
|
? ($_ => $self->{schema}->{cols}->{$_})
|
|
: ()
|
|
} keys %{$self->{schema}->{cols}}
|
|
} if !$self->{_file_cols} or shift;
|
|
|
|
%{$self->{_file_cols}};
|
|
}
|
|
END_OF_SUB
|
|
|
|
# Returns true if first argument is a primary key.
|
|
$COMPILE{_is_pk} = __LINE__ . <<'END_OF_SUB';
|
|
sub _is_pk {
|
|
for (@{$_[0]->{schema}->{pk}}) {
|
|
return 1 if $_ eq $_[1];
|
|
}
|
|
return 0;
|
|
}
|
|
END_OF_SUB
|
|
|
|
$COMPILE{_is_fk} = __LINE__ . <<'END_OF_SUB';
|
|
sub _is_fk {
|
|
# -------------------------------------------------------------------
|
|
# Returns true if first argument is a foreign key.
|
|
#
|
|
for (keys %{$_[0]->{schema}->{fk}}) {
|
|
return 1 if exists $_[0]->{schema}->{fk}->{$_}->{$_[1]};
|
|
}
|
|
return 0;
|
|
}
|
|
END_OF_SUB
|
|
|
|
# Returns true if first argument is not null.
|
|
$COMPILE{_is_not_null} = __LINE__ . <<'END_OF_SUB';
|
|
sub _is_not_null {
|
|
return(
|
|
exists $_[0]->{schema}->{cols}->{$_[1]}->{not_null}
|
|
and $_[0]->{schema}->{cols}->{$_[1]}->{not_null}
|
|
);
|
|
}
|
|
END_OF_SUB
|
|
|
|
# Returns true if first argument is indexed.
|
|
$COMPILE{_is_indexed} = __LINE__ . <<'END_OF_SUB';
|
|
sub _is_indexed {
|
|
my ($self, $col) = @_;
|
|
for my $index_name (keys %{$self->{schema}->{index}}) {
|
|
for my $index_col (@{$self->{schema}->{index}->{$index_name}}) {
|
|
return 1 if $index_col eq $col;
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
END_OF_SUB
|
|
|
|
# Returns true if first argument is uniquely indexed.
|
|
$COMPILE{_is_unique} = __LINE__ . <<'END_OF_SUB';
|
|
sub _is_unique {
|
|
my ($self, $col) = @_;
|
|
for my $index_name (keys %{$self->{schema}->{unique}}) {
|
|
for my $index_col (@{$self->{schema}->{unique}->{$index_name}}) {
|
|
return 1 if $index_col eq $col;
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
END_OF_SUB
|
|
|
|
$COMPILE{_get_indexer} = __LINE__ . <<'END_OF_SUB';
|
|
sub _get_indexer {
|
|
#-------------------------------------------------------------------------------
|
|
my $self = shift;
|
|
$self->debug("CREATING GT::SQL::Indexer OBJECT") if ($self->{_debug} > 2);
|
|
require GT::SQL::Search;
|
|
my $indexer = GT::SQL::Search->load_indexer(
|
|
table => $self
|
|
);
|
|
$indexer->debug_level($self->{_debug});
|
|
return $indexer;
|
|
}
|
|
END_OF_SUB
|
|
|
|
1;
|
|
|
|
__END__
|
|
|
|
=head1 NAME
|
|
|
|
GT::SQL::Table - a perl interface to manipulate a single SQL table.
|
|
|
|
=head1 SYNOPSIS
|
|
|
|
my $sth = $table->select(Column3 => { Column => $value, Column2 => $value2 });
|
|
$table->delete({ Column => $value });
|
|
$table->insert({ Column1 => $val, Column2 => $value2 });
|
|
$table->update({ SetCol => $val }, { WhereCol => $val2 });
|
|
|
|
=head1 DESCRIPTION
|
|
|
|
GT::SQL::Table provides methods to add, modify, delete and search over a single
|
|
SQL table.
|
|
|
|
The following methods are provided.
|
|
|
|
=head2 query, query_sth
|
|
|
|
C<query> provides a simple and powerful method to search a table. It takes as
|
|
input either a hash, hash ref or CGI object making it especially useful
|
|
searching from web forms.
|
|
|
|
my $results = $db->query($in);
|
|
|
|
The return of C<query> is an arrayref of arrayrefs. C<query_sth> returns an STH
|
|
that you can fetch rows from.
|
|
|
|
Typical usage to go through the results is:
|
|
|
|
my $results = $db->query({ Title => 'foobar' });
|
|
if ($results) {
|
|
for my $result (@$results) {
|
|
...
|
|
}
|
|
}
|
|
|
|
To specify what to search, you simply pass in column => search value. However,
|
|
you can also pass in a lot of options to enhance your search:
|
|
|
|
Find all rows with field_name = value:
|
|
|
|
field_name => value
|
|
|
|
Find all rows with field_name > value:
|
|
|
|
field_name => ">value"
|
|
|
|
Find all rows with field_name < value:
|
|
|
|
field_name => "<value"
|
|
|
|
Find all rows with field_name > value:
|
|
|
|
field_name-gt => value
|
|
|
|
Find all rows with field_name < value:
|
|
|
|
field_name-lt => value
|
|
|
|
Find all rows where any field_name = value:
|
|
|
|
keyword => value
|
|
|
|
Find all rows using indexed search (see weights):
|
|
|
|
query => value
|
|
|
|
Set to 1, use '=' comparison, 0/unspecified use 'LIKE '%val%' comparision:
|
|
|
|
ww => 1
|
|
|
|
Search using LIKE for column 'Title' (valid opts are '=', '>', '<' or 'LIKE'):
|
|
|
|
Title-opt => 'LIKE'
|
|
|
|
Set to 1, OR match results, 0/unspecified AND match results:
|
|
|
|
ma => 1
|
|
|
|
Return a max of n results, defaults to 25:
|
|
|
|
mh => n
|
|
|
|
Return page n of results:
|
|
|
|
nh => n
|
|
|
|
Sort by 'Title' column:
|
|
|
|
sb => 'Title'
|
|
|
|
Sort in ascending (ASC) or descending (DESC) order:
|
|
|
|
so => 'ASC'
|
|
|
|
=head2 select
|
|
|
|
Select provides a way to implement almost any sql SELECT statement.
|
|
|
|
An executed statement handle is returned that you can call the normal fetchrow,
|
|
fetchrow_array, fetchrow_hashref, etc on.
|
|
|
|
my $sth = $obj->select;
|
|
|
|
is equivalant to "SELECT * FROM Table"
|
|
|
|
my $sth = $obj->select({ Col => Val });
|
|
|
|
is equivalant to "SELECT * FROM Table WHERE Col = 'Val'".
|
|
|
|
my $sth = $obj->select('Col2', 'Col3', { Col => "Val" });
|
|
|
|
is equivalant to "SELECT Col2,Col3 FROM Table WHERE Col => 'Val'".
|
|
|
|
So you can pass in a hash reference which represents the where clause, and an
|
|
array reference where represents what you want to select on.
|
|
|
|
If you need more complex where clauses, you should use a condition object
|
|
instead of a hash reference. See L<GT::SQL::Condition> for more information.
|
|
|
|
Notes:
|
|
|
|
=over 4
|
|
|
|
=item quoting in where
|
|
|
|
All arguments in the where clause are automatically quoted. If you don't want
|
|
quotes, you should pass in a scalar reference as in:
|
|
|
|
my $sth = $obj->select({ Col => \"NOW()" });
|
|
|
|
which turns into "SELECT * FROM Table WHERE Col = NOW()".
|
|
|
|
=item quoting in select
|
|
|
|
Nothing in the select will be quoted, so to use functions, simply pass in what
|
|
you want:
|
|
|
|
my $sth = $obj->select('COUNT(*)');
|
|
|
|
which turns into "SELECT COUNT(*) FROM Table".
|
|
|
|
=back
|
|
|
|
To specify LIMIT, or GROUP BY, or ORDER BY or other SELECT clauses that come
|
|
after the WHERE, you should use select_options below.
|
|
|
|
=head2 select_options
|
|
|
|
This method provides a way for you to specify select options such as LIMIT and
|
|
SORT_BY.
|
|
|
|
$obj->select_options(@OPTIONS);
|
|
|
|
@OPTIONS should be a list of options you want appended to your next select.
|
|
|
|
For example,
|
|
|
|
$obj->select_options('ORDER BY Foo', 'LIMIT 50');
|
|
$obj->select;
|
|
|
|
would turn into "SELECT * FROM Table ORDER BY Foo LIMIT 50". To perform a
|
|
LIMIT with an OFFSET, you should specify something like:
|
|
|
|
$obj->select_options('LIMIT 25 OFFSET 75');
|
|
|
|
You can alternatively use the equivelant MySQL-specific syntax:
|
|
|
|
$obj->select_options('LIMIT 75, 25');
|
|
|
|
Both will be handled correctly regardless of the database type.
|
|
|
|
=head2 count
|
|
|
|
This method will allow you to count records based on a where clause.
|
|
|
|
my $count = $obj->count($condition);
|
|
|
|
count() takes either a condition or a hash reference. If no argument is
|
|
provided, it is equivalant to "SELECT COUNT(*) FROM Table", or total number of
|
|
rows.
|
|
|
|
=head2 hits
|
|
|
|
This method returns the number of hits from that last select query B<without>
|
|
the limit clause if there was one.
|
|
|
|
$hits = $obj->hits;
|
|
|
|
For example, to get rows 20-30 of a query result, use:
|
|
|
|
$obj->select_options("LIMIT 10 OFFSET 20"); $obj->select({ Column => 'Foo' });
|
|
|
|
this translates into (in MySQL):
|
|
|
|
SELECT * FROM Table WHERE Column = 'Foo' LIMIT 20, 10
|
|
|
|
To see the total number of results that the query would have retrieved without
|
|
any limit, you call:
|
|
|
|
$hits = $obj->hits;
|
|
|
|
If the number of hits can be calculated, it will be returned to you without any
|
|
additional query. Otherwise, the following query will be performed
|
|
automatically, and the hit count returned to you:
|
|
|
|
SELECT COUNT(*) FROM Table WHERE Column = 'Foo'
|
|
|
|
B<NOTE>: The hits() method _only_ applies to select queries. Most databases do
|
|
not provide enough information to get counts of rows affected for other types
|
|
of queries.
|
|
|
|
=head2 get
|
|
|
|
This method allows for a simple interface to retrieving records from the
|
|
table(s).
|
|
|
|
my $rec_hash_ref = $obj->get($val);
|
|
my $rec_hash_ref = $obj->get($val, 'HASH', ['col1', 'col2']);
|
|
my $rec_array_ref = $obj->get($val, 'ARRAY');
|
|
|
|
The first argument is the primary key value of the record you want to retrieve.
|
|
|
|
The second argument is a format option. It can be either 'ARRAY' or 'HASH' and
|
|
determines whether you are returned a HASH reference or an ARRAY reference. The
|
|
default is 'HASH', and it is optional.
|
|
|
|
The last argument is a list of column names you want retrieved. C<get> defaults
|
|
to returning the entire record, but if you only need specific columns, you can
|
|
ask for the ones you want.
|
|
|
|
For example:
|
|
|
|
my $employee = $emp_db->get('Alex');
|
|
|
|
would return a hash ref of the record whose primary key is equal to 'Alex'.
|
|
|
|
my $emp_addr = $emp_db->get('Alex', 'HASH', ['City', 'State', 'ZipCode']);
|
|
|
|
would return a hash ref of only the three fields City, State, ZipCode for the
|
|
record whose primary key equals Alex.
|
|
|
|
=head2 add
|
|
|
|
Method to add an entry into the database. This method can take it's arguments
|
|
one of three ways.
|
|
|
|
$obj->add($CGI_OBJECT);
|
|
|
|
-or-
|
|
|
|
$obj->add({
|
|
col1 => $val1,
|
|
col2 => $val2,
|
|
...
|
|
});
|
|
|
|
-or-
|
|
|
|
$obj->add(
|
|
col1 => $val1,
|
|
col2 => $val2,
|
|
...
|
|
);
|
|
|
|
This method can take a cgi object, a hash reference or a hash. The keys of the
|
|
hash should be the names of the column and the values should be the values to
|
|
insert into the fields. The CGI Object is not different. If the table has an
|
|
auto_increment field, the value of the last inserted record will be returned.
|
|
|
|
C<add> returns undef on failure. If successful, and the table has an
|
|
auto-increment field, the auto increment value is returned. If there is no
|
|
auto increment value, then 1 is returned. Any errors will be in
|
|
$GT::SQL::error.
|
|
|
|
Passing in GT_SQL_SKIP_CHECK => 1 will have the table module skip any error
|
|
checking it should perform.
|
|
|
|
Passing in GT_SQL_SKIP_INDEX => 1 will not index the fields. You can also use
|
|
the C<indexing> method to do this.
|
|
|
|
=head2 insert
|
|
|
|
C<insert> is a lower level add. The main differences between C<add> and
|
|
C<insert> are that add performs a not null check, and add returns the id of the
|
|
just inserted value.
|
|
|
|
C<insert> does not perform a not null check. Also, insert returns the statement
|
|
handle used to do the insert (so you can call $sth->insert_id to get the auto
|
|
increment).
|
|
|
|
=head2 insert_multiple
|
|
|
|
C<insert_multiple> will try to optimize the insertion of multiple rows with
|
|
simple values. Under MySQL, this uses MySQL's extended insert syntax:
|
|
|
|
INSERT INTO Table (col1, col2, col3)
|
|
VALUES ('val1', 'val2', 'val3'), ('val4', 'val5', 'val6'), ...
|
|
|
|
On other databases, it attempts to perform all insertions in a single
|
|
transaction, which will also usually yield performance benefits. Note,
|
|
however, that C<insert_multiple> should not be used for anything more complex
|
|
than basic column values - for example, inserting NULL to set the current date,
|
|
or using raw SQL by passing scalar references for values.
|
|
|
|
It takes at least two arguments - the first argument is an array ref of column
|
|
names, and the rest are array references of values. For example, to produce
|
|
the above example SQL code, you would call:
|
|
|
|
$table->insert_multiple(
|
|
['col1', 'col2', 'col3'],
|
|
['val1', 'val2', 'val3'],
|
|
['val4', 'val5', 'val6'],
|
|
...
|
|
);
|
|
|
|
=head2 modify
|
|
|
|
This method is designed for modifying a single entry in the table. It takes as
|
|
input a hash, hash ref or CGI object, which is assumed to represent a single
|
|
row with all fields intact.
|
|
|
|
C<modify> will then look for the primary key in the input and set all fields
|
|
for that row equal to what was passed in.
|
|
|
|
You need to pass in a complete record! If you just want to update one column,
|
|
you probably want to use C<update> instead, as doing:
|
|
|
|
my $result = $obj->modify(column1 => 'Foo');
|
|
|
|
will blank out all the other fields and set just column1 to Foo.
|
|
|
|
C<modify> returns undef on failure, 1 on success. The error message will be
|
|
available in $GT::SQL::error.
|
|
|
|
=head2 update
|
|
|
|
This method provides a more robust way to update multiple entries in the table.
|
|
|
|
my $result = $obj->update(
|
|
{
|
|
col1 => $val1,
|
|
col2 => $val2,
|
|
...
|
|
},
|
|
$condition
|
|
);
|
|
|
|
-or-
|
|
|
|
my $result = $obj->update(
|
|
{
|
|
col1 => $val1,
|
|
col2 => $val2,
|
|
...
|
|
},
|
|
{
|
|
col1 => $val1,
|
|
col2 => $val2,
|
|
...
|
|
}
|
|
);
|
|
|
|
In both these cases the first argument is a hash reference with the column
|
|
names as the keys and the new values you want the columns to hold as the
|
|
values. The second argument can either be a condition object or a hash
|
|
reference. If it is a hash reference the keys will be used as the column names
|
|
and the values will be taken as the current column values for the where clause
|
|
to update the table.
|
|
|
|
$obj->update({ Setme => 'NewValue'}, { WhereCol => 5 });
|
|
|
|
would set the column 'Setme' to 'NewValue' where the column 'WhereCol' is 5.
|
|
This translates to:
|
|
|
|
UPDATE Table SET SetMe='NewValue' WHERE WhereCol = 5
|
|
|
|
If the second argument is a GT::SQL::Condition object the condition object will
|
|
be used to build the where clause with. Please see L<GT::SQL::Condition> for a
|
|
description of what you can do with a where clause.
|
|
|
|
my $condition = GT::SQL::Condition->new('WhereCol', 'LIKE', 'Foo%');
|
|
$obj->update({ Setme => 'Newvalue' }, $condition);
|
|
|
|
would translate to:
|
|
|
|
UPDATE Table SET Setme = 'Newvalue' WHERE WhereCol LIKE 'Foo%'
|
|
|
|
The condition can now much more complex where clauses though.
|
|
|
|
C<update> returns undef on failure and the a L<GT::SQL::Driver> statement
|
|
handle on success. The error message will be available in $GT::SQL::error.
|
|
|
|
Passing in GT_SQL_SKIP_CHECK => 1 as a third option to C<update> will have the
|
|
table module skip any error checking it should perform.
|
|
|
|
Passing in GT_SQL_SKIP_INDEX => 1 will not index the fields. You can also use
|
|
the C<indexing> method to do this.
|
|
|
|
=head2 delete
|
|
|
|
This method provides a robust interface to delete entries from your table(s)
|
|
using join and or foreign key relations.
|
|
|
|
my $result = $obj->delete($condition);
|
|
|
|
You can pass into C<delete> either a condition object to delete multiple
|
|
entries, or a scalar value to delete the row whose primary key equals the
|
|
value. If you have a multiple primary key, then you can pass in an array ref to
|
|
delete that row.
|
|
|
|
my $result = $obj->delete({
|
|
col1 => $val1,
|
|
col2 => $val2,
|
|
...
|
|
);
|
|
|
|
-or-
|
|
|
|
$obj->delete($val);
|
|
|
|
-or-
|
|
|
|
$obj->delete([$val1, $val2]);
|
|
|
|
C<delete> returns undef on failure, 1 on success. The error message will be
|
|
available in $GT::SQL::error.
|
|
|
|
=head2 delete_all
|
|
|
|
This method takes no arguments and will erase all entries from a table.
|
|
|
|
=head2 Table Properties
|
|
|
|
Table provides a lot of methods to access information about the table:
|
|
|
|
=over 4
|
|
|
|
=item name
|
|
|
|
Provides the name of the table minus any prefix.
|
|
|
|
=item ai
|
|
|
|
Returns the name of the auto-increment field if any.
|
|
|
|
=item pk
|
|
|
|
Returns an array(ref) of primary key column names.
|
|
|
|
=item fk
|
|
|
|
Returns a hash of foreign key values.
|
|
|
|
=item fk_tables
|
|
|
|
Returns a list of tables with foreign keys pointing to this table.
|
|
|
|
=item index
|
|
|
|
Returns a hash ref of index name => array ref of column names that index uses.
|
|
|
|
=item unique
|
|
|
|
Returns a hash ref of unique index names => array ref of column names that
|
|
unique index uses.
|
|
|
|
=item B<all_indexes>
|
|
|
|
Returns the joined output of index and unique and primary key.
|
|
|
|
=item cols
|
|
|
|
Returns a hash(ref) of column name => column definition
|
|
|
|
=item default
|
|
|
|
Returns a hash(ref) of column name => default value.
|
|
|
|
=item size
|
|
|
|
Returns a hash(ref) of column name => size of column in SQL.
|
|
|
|
=item type
|
|
|
|
Returns a hash(ref) of column name => type of column in SQL.
|
|
|
|
=item form_display
|
|
|
|
Returns a hash(ref) of column name => name to display on auto generated forms
|
|
(think pretty name).
|
|
|
|
=item form_size
|
|
|
|
Returns a hash(ref) of column name => size of html form to generate.
|
|
|
|
=item form_type
|
|
|
|
Returns a hash(ref) of column name => type of html form to generate (checkbox,
|
|
select, text, etc).
|
|
|
|
=item form_names
|
|
|
|
Returns a hash(ref) of column name => array ref of form names. This is used for
|
|
multi option form elements like checkboxes and multi selects. The name is what
|
|
is displayed to the user and not entered in the database.
|
|
|
|
=item form_values
|
|
|
|
Returns a hash(ref) of column name => array ref of form values. Same as above,
|
|
but this is the value that actually gets entered.
|
|
|
|
=item time_check
|
|
|
|
Returns a hash(ref) of column name => time check on or off. If set
|
|
|
|
=item regex
|
|
|
|
Returns a hash(ref) of column name => regular expression that all input must
|
|
pass before being inserted.
|
|
|
|
=item pos
|
|
|
|
Returns a hash(ref) of column name => position in table.
|
|
|
|
=item not_null
|
|
|
|
Returns a hash(ref) of column name => not null (whether the field is allowed to
|
|
be null or not).
|
|
|
|
=back
|
|
|
|
=head1 COPYRIGHT
|
|
|
|
Copyright (c) 2004 Gossamer Threads Inc. All Rights Reserved.
|
|
http://www.gossamer-threads.com/
|
|
|
|
=head1 VERSION
|
|
|
|
Revision: $Id: Table.pm,v 1.274 2008/09/17 19:35:24 brewt Exp $
|
|
|
|
=cut
|