[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Bacula-devel] libdbi backend to catalog database


Hello,

libdbi has passes about 98%-96% in regression tests (with batch insert
or normal insert) for mysql and postgresl.

The file attached is changes for files:
 src/cats/dbi.c implement batch insert
 src/cats/cats.h implement batch insert
 src/cats/sql_create.c #define to HAVE_DBI
 src/stored/bscan.c - fix in options -t

Kern, can you do a commit in trunk?

About regress tests. Attached to this email have a regress_trunk.patch
too. Is some modifications in all regression/scripts/ to include
libdbi. This patch is need to test the libdbi feature in Bacula.


Any problem with libdbi, I am is here to solve.

PS1: If anybody want to run bacula with libdbi, you will need a CVS
version (because it have support to postgresql batch insert :)) for
libdbi and libdbi-drivers. A release of libdbi will be soon.

PS2: dbi.c need some fix and review. In regression tests when
bacula-dir is killed many messages apper in console:

Orphaned buffer:  localhost-dir    277 bytes buf=8211b60 allocated at dbi.c:1051
Orphaned buffer:  localhost-dir     53 bytes buf=81f3df8 allocated at dbi.c:1051
Orphaned buffer:  localhost-dir    200 bytes buf=81f55b0 allocated at dbi.c:1055
Orphaned buffer:  localhost-dir    200 bytes buf=81f5828 allocated at dbi.c:1055
Orphaned buffer:  localhost-dir    225 bytes buf=8211c98 allocated at dbi.c:1051
Orphaned buffer:  localhost-dir    277 bytes buf=8211d98 allocated at dbi.c:1051
Orphaned buffer:  localhost-dir     69 bytes buf=81f5ae0 allocated at dbi.c:1051
Orphaned buffer:  localhost-dir    200 bytes buf=81f5d18 allocated at dbi.c:1055
Orphaned buffer:  localhost-dir    200 bytes buf=81f5f90 allocated at dbi.c:1055

Any suggestions are welcome.

Thanks
Index: scripts/bacula-dir.conf.testrunscript.in
===================================================================
--- scripts/bacula-dir.conf.testrunscript.in	(revisão 6640)
+++ scripts/bacula-dir.conf.testrunscript.in	(cópia de trabalho)
@@ -203,6 +203,7 @@
 # Generic catalog service
 Catalog {
   Name = MyCatalog
+  @libdbi@
   dbname = regress; user = regress; password = ""
 }
 
Index: scripts/create_sed
===================================================================
--- scripts/create_sed	(revisão 6640)
+++ scripts/create_sed	(cópia de trabalho)
@@ -10,6 +10,8 @@
 
 # set the ports used by these tests
 BASEPORT=${BASEPORT:-8101}
+LIBDBI=${LIBDBI:- }
+libdbi=${LIBDBI}
 srcdir=${BACULA_SOURCE}
 dirport=${BASEPORT}
 fdport=`expr ${BASEPORT} '+' 1`
@@ -55,3 +57,5 @@
 echo "s%@os@%${os}%g" >>${out}
 echo "s%@bversion@%${bversion}%g" >>${out}
 echo "s%@bdate@%${bdate}%g" >>${out}
+echo "s%@libdbi@%${libdbi}%g" >>${out}
+
Index: scripts/bacula-dir.conf.regexwhere.in
===================================================================
--- scripts/bacula-dir.conf.regexwhere.in	(revisão 6640)
+++ scripts/bacula-dir.conf.regexwhere.in	(cópia de trabalho)
@@ -117,6 +117,7 @@
 # Generic catalog service
 Catalog {
   Name = MyCatalog
+  @libdbi@
   dbname = regress; user = regress; password = ""
 }
 
Index: scripts/testa-bacula-dir.conf.in
===================================================================
--- scripts/testa-bacula-dir.conf.in	(revisão 6640)
+++ scripts/testa-bacula-dir.conf.in	(cópia de trabalho)
@@ -102,6 +102,7 @@
 # Generic catalog service
 Catalog {
   Name = MyCatalog
+  @libdbi@
   dbname = regress; user = regress; password = ""
 }
 
Index: scripts/bacula-dir-fifo.conf.in
===================================================================
--- scripts/bacula-dir-fifo.conf.in	(revisão 6640)
+++ scripts/bacula-dir-fifo.conf.in	(cópia de trabalho)
@@ -161,6 +161,7 @@
 # Generic catalog service
 Catalog {
   Name = MyCatalog
+  @libdbi@
   dbname = regress; user = regress; password = ""
 }
 
Index: scripts/bacula-dir-tape.conf.in
===================================================================
--- scripts/bacula-dir-tape.conf.in	(revisão 6640)
+++ scripts/bacula-dir-tape.conf.in	(cópia de trabalho)
@@ -174,6 +174,7 @@
 # Generic catalog service
 Catalog {
   Name = MyCatalog
+  @libdbi@
   dbname = regress; user = regress; password = ""
 }
 
Index: scripts/bacula-dir-migration.conf.in
===================================================================
--- scripts/bacula-dir-migration.conf.in	(revisão 6640)
+++ scripts/bacula-dir-migration.conf.in	(cópia de trabalho)
@@ -174,6 +174,7 @@
 # Generic catalog service
 Catalog {
   Name = MyCatalog
+  @libdbi@
   dbname = regress; user = regress; password = ""
 }
 
Index: scripts/bacula-dir.conf.accurate.in
===================================================================
--- scripts/bacula-dir.conf.accurate.in	(revisão 6640)
+++ scripts/bacula-dir.conf.accurate.in	(cópia de trabalho)
@@ -122,6 +122,7 @@
 # Generic catalog service
 Catalog {
   Name = MyCatalog
+  @libdbi@
   dbname = regress; user = regress; password = ""
 }
 
Index: scripts/bacula-dir.conf.maxtime.in
===================================================================
--- scripts/bacula-dir.conf.maxtime.in	(revisão 6640)
+++ scripts/bacula-dir.conf.maxtime.in	(cópia de trabalho)
@@ -107,6 +107,7 @@
 # Generic catalog service
 Catalog {
   Name = MyCatalog
+  @libdbi@
   dbname = regress; user = regress; password = ""
 }
 
Index: scripts/new-test-bacula-dir.conf.in
===================================================================
--- scripts/new-test-bacula-dir.conf.in	(revisão 6640)
+++ scripts/new-test-bacula-dir.conf.in	(cópia de trabalho)
@@ -323,6 +323,7 @@
 # Generic catalog service
 Catalog {
   Name = MyCatalog
+  @libdbi@
   dbname = regress; user = regress; password = ""
 }
 
Index: scripts/bacula-dir.conf.errors.in
===================================================================
--- scripts/bacula-dir.conf.errors.in	(revisão 6640)
+++ scripts/bacula-dir.conf.errors.in	(cópia de trabalho)
@@ -204,6 +204,7 @@
 # Generic catalog service
 Catalog {
   Name = MyCatalog
+  @libdbi@
   dbname = regress; user = regress; password = ""
 }
 
Index: scripts/test-bacula-dir.conf.in
===================================================================
--- scripts/test-bacula-dir.conf.in	(revisão 6640)
+++ scripts/test-bacula-dir.conf.in	(cópia de trabalho)
@@ -260,6 +260,7 @@
 # Generic catalog service
 Catalog {
   Name = MyCatalog
+  @libdbi@
   dbname = regress; user = regress; password = ""
 }
 
Index: scripts/tls-auth-bacula-dir.conf.in
===================================================================
--- scripts/tls-auth-bacula-dir.conf.in	(revisão 6640)
+++ scripts/tls-auth-bacula-dir.conf.in	(cópia de trabalho)
@@ -338,6 +338,7 @@
 # Generic catalog service
 Catalog {
   Name = MyCatalog
+  @libdbi@
   dbname = regress; user = regress; password = ""
 }
 
Index: scripts/tls-bacula-dir.conf.in
===================================================================
--- scripts/tls-bacula-dir.conf.in	(revisão 6640)
+++ scripts/tls-bacula-dir.conf.in	(cópia de trabalho)
@@ -335,6 +335,7 @@
 # Generic catalog service
 Catalog {
   Name = MyCatalog
+  @libdbi@
   dbname = regress; user = regress; password = ""
 }
 
Index: scripts/multi-client-bacula-dir.conf.in
===================================================================
--- scripts/multi-client-bacula-dir.conf.in	(revisão 6640)
+++ scripts/multi-client-bacula-dir.conf.in	(cópia de trabalho)
@@ -324,6 +324,7 @@
 # Generic catalog service
 Catalog {
   Name = MyCatalog
+  @libdbi@
   dbname = regress; user = regress; password = ""
 }
 
Index: scripts/testb-bacula-dir.conf.in
===================================================================
--- scripts/testb-bacula-dir.conf.in	(revisão 6640)
+++ scripts/testb-bacula-dir.conf.in	(cópia de trabalho)
@@ -106,6 +106,7 @@
 # Generic catalog service
 Catalog {
   Name = MyCatalog
+  @libdbi@  
   dbname = regress; user = regress; password = ""
 }
 
Index: tests/accurate-test
===================================================================
--- tests/accurate-test	(revisão 6640)
+++ tests/accurate-test	(cópia de trabalho)
@@ -176,14 +176,14 @@
 stop_bacula
 
 cd ${cwd}/bin
-  ./drop_bacula_tables      >/dev/null 2>&1
-  ./make_bacula_tables      >/dev/null 2>&1
-  ./grant_bacula_privileges >/dev/null 2>&1
+  ./drop_bacula_tables -U regress     >/dev/null 2>&1
+  ./make_bacula_tables -U regress     >/dev/null 2>&1
+  ./grant_bacula_privileges -U regress >/dev/null 2>&1
 cd ..
 
 echo "volume=TestVolume001" >tmp/bscan.bsr
 
-${cwd}/bin/bscan -c ${cwd}/bin/bacula-sd.conf -n regress -u regress -m -s -b tmp/bscan.bsr FileStorage 2>&1 > ${cwd}/tmp/bscan.log
+${cwd}/bin/bscan -c ${cwd}/bin/bacula-sd.conf -h 127.0.0.1 -D dbi:postgresql -t 5432 -n regress -u regress -m -s -b tmp/bscan.bsr FileStorage 2>&1 > ${cwd}/tmp/bscan.log
 
 cat <<END_OF_DATA >${cwd}/tmp/bconcmds
 @$out ${cwd}/tmp/log1.out
Index: tests/bscan-test
===================================================================
--- tests/bscan-test	(revisão 6640)
+++ tests/bscan-test	(cópia de trabalho)
@@ -58,9 +58,9 @@
 echo "volume=TestVolume002" >>tmp/bscan.bsr
 
 if test "$debug" -eq 1 ; then
-  bin/bscan -w working -u regress -n regress -m -s -v -b tmp/bscan.bsr -c bin/bacula-sd.conf ${cwd}/tmp                   
+  bin/bscan -w working -D dbi:postgresql -t 5432 -h 127.0.0.1 -u regress -n regress -m -s -v -b tmp/bscan.bsr -c bin/bacula-sd.conf ${cwd}/tmp                   
 else
-  bin/bscan -w working -u regress -n regress -m -s -v -b tmp/bscan.bsr -c bin/bacula-sd.conf ${cwd}/tmp 2>&1 >tmp/log3.out
+  bin/bscan -w working -D dbi:postgresql -t 5432 -h 127.0.0.1 -u regress -n regress -m -s -v -b tmp/bscan.bsr -c bin/bacula-sd.conf ${cwd}/tmp 2>&1 >tmp/log3.out
 fi
 
 cat <<END_OF_DATA >tmp/bconcmds
Index: src/cats/dbi.c
===================================================================
--- src/cats/dbi.c	(revisão 6518)
+++ src/cats/dbi.c	(cópia de trabalho)
@@ -117,7 +117,8 @@
              bstrcmp(mdb->db_address, db_address) &&
              bstrcmp(mdb->db_driver, db_driver) &&
              mdb->db_port == db_port) { 
-            Dmsg3(100, "DB REopen %d %s %s\n", mdb->ref_count, db_driver, db_name);
+            Dmsg4(100, "DB REopen %d %s %s erro: %d\n", mdb->ref_count, db_driver, db_name, 
+                  dbi_conn_error(mdb->db, NULL));
             mdb->ref_count++;
             V(mutex);
             return mdb;                  /* already open */
@@ -200,16 +201,15 @@
       port = NULL;
    }
    
-   numdrivers = dbi_initialize(mdb->db_driverdir);
+   numdrivers = dbi_initialize_r(mdb->db_driverdir, &(mdb->instance));
    if (numdrivers < 0) {
-      dbi_shutdown();
       Mmsg2(&mdb->errmsg, _("Unable to locate the DBD drivers to DBI interface in: \n"
                                "db_driverdir=%s. It is probaly not found any drivers\n"),
                                mdb->db_driverdir,numdrivers);
       V(mutex);
       return 0;
    }
-   mdb->db = (void **)dbi_conn_new(mdb->db_driver);
+   mdb->db = (void **)dbi_conn_new_r(mdb->db_driver, mdb->instance);
    dbi_conn_set_option(mdb->db, "host", mdb->db_address); /* default = localhost */
    dbi_conn_set_option(mdb->db, "port", port);            /* default port */
    dbi_conn_set_option(mdb->db, "username", mdb->db_user);     /* login name */
@@ -287,8 +287,10 @@
    if (mdb->ref_count == 0) {
       qdchain(&mdb->bq);
       if (mdb->connected && mdb->db) {
-         sql_close(mdb);
+         //sql_close(mdb);
+         dbi_shutdown_r(mdb->instance);
          mdb->db = NULL;
+         mdb->instance = NULL;
       }
       rwl_destroy(&mdb->lock);
       free_pool_memory(mdb->errmsg);
@@ -313,13 +315,13 @@
       if (mdb->db_socket) {
          free(mdb->db_socket);
       }
-      dbi_shutdown();
+      if (mdb->db_driverdir) {
+         free(mdb->db_driverdir);
+      }
       if (mdb->db_driver) {
           free(mdb->db_driver);
       }
-      free(mdb);
-      
-      
+      free(mdb);            
    }   
    V(mutex);
 }
@@ -347,7 +349,7 @@
  *         the escaped output.
  * 
  * dbi_conn_quote_string_copy receives a pointer to pointer.
- * We need copy the value of pointer to snew. Because libdbi change the 
+ * We need copy the value of pointer to snew because libdbi change the 
  * pointer
  */
 void
@@ -357,14 +359,17 @@
    char *pnew;
    
    if (len == 0) {
-          snew[0] = 0; 
+      snew[0] = 0; 
    } else {
-          /* correct the size of old basead in len and copy new string to inew */
-          inew = (char *)malloc(sizeof(char) * len + 1);
-          bstrncpy(inew,old,len + 1);
-          /* escape the correct size of old */
-          dbi_conn_escape_string_copy(mdb->db, inew, &pnew);
-          /* copy the escaped string to snew */
+      /* correct the size of old basead in len
+       * and copy new string to inew
+       */
+      inew = (char *)malloc(sizeof(char) * len + 1);
+      bstrncpy(inew,old,len + 1);
+      /* escape the correct size of old */
+      dbi_conn_escape_string_copy(mdb->db, inew, &pnew);
+      free(inew);
+      /* copy the escaped string to snew */
       bstrncpy(snew, pnew, 2 * len + 1);   
    }
    
@@ -429,6 +434,15 @@
 
       if (mdb->row) {
          Dmsg0(500, "my_dbi_fetch_row freeing space\n");
+         Dmsg2(500, "my_dbi_free_row row: '%p' num_fields: '%d'\n", mdb->row, mdb->num_fields);
+         if (mdb->num_rows != 0) {
+            for(j = 0; j < mdb->num_fields; j++) {
+               Dmsg2(500, "my_dbi_free_row row '%p' '%d'\n", mdb->row[j], j);
+                  if(mdb->row[j]) {
+                     free(mdb->row[j]);
+                  }                  
+            } 
+         }
          free(mdb->row);
       }
       num_fields += 20;                  /* add a bit extra */
@@ -445,7 +459,7 @@
       // get each value from this row
       for (j = 0; j < mdb->num_fields; j++) {
          mdb->row[j] = my_dbi_getvalue(mdb->result, mdb->row_number, j);
-         Dmsg2(500, "my_dbi_fetch_row field '%d' has value '%s'\n", j, mdb->row[j]);
+         Dmsg3(500, "my_dbi_fetch_row field '%p' '%d' has value '%s'\n",mdb->row[j], j, mdb->row[j]);
       }
       // increment the row number for the next call
       mdb->row_number++;
@@ -467,6 +481,7 @@
    int max_length;
    int i;
    int this_length;
+   char *cbuf;
 
    max_length = 0;
    for (i = 0; i < mdb->num_rows; i++) {
@@ -474,7 +489,9 @@
           this_length = 4;        // "NULL"
       } else {
           // TODO: error
-          this_length = cstrlen(my_dbi_getvalue(mdb->result, i, field_num));
+         cbuf = my_dbi_getvalue(mdb->result, i, field_num);
+         this_length = cstrlen(cbuf);
+         free(cbuf);
       }
 
       if (max_length < this_length) {
@@ -553,24 +570,16 @@
       dbi_result_free(mdb->result);  /* hmm, someone forgot to free?? */
       mdb->result = NULL;
    }
-
-   //for (int i=0; i < 10; i++) {
-          
-      mdb->result = (void **)dbi_conn_query(mdb->db, query);
+         
+   mdb->result = (void **)dbi_conn_query(mdb->db, query);
       
-   //   if (mdb->result) {
-   //      break;
-   //   }
-   //   bmicrosleep(5, 0);
-   //}
-   if (mdb->result == NULL) {
+   if (!mdb->result) {
       Dmsg2(50, "Query failed: %s %p\n", query, mdb->result);      
       goto bail_out;
    }
-
-   //mdb->status = (dbi_error_flag)dbi_conn_error_flag(mdb->db);
-   mdb->status = DBI_ERROR_NONE;
    
+   mdb->status = (dbi_error_flag) dbi_conn_error(mdb->db, &errmsg);
+   
    if (mdb->status == DBI_ERROR_NONE) {
       Dmsg1(500, "we have a result\n", query);
 
@@ -591,9 +600,9 @@
    return mdb->status;
 
 bail_out:
-   mdb->status = dbi_conn_error_flag(mdb->db);
-   dbi_conn_error(mdb->db, &errmsg);
-   Dmsg4(500, "my_dbi_query we failed dbi error "
+   mdb->status = (dbi_error_flag) dbi_conn_error(mdb->db,&errmsg);
+   //dbi_conn_error(mdb->db, &errmsg);
+   Dmsg4(500, "my_dbi_query we failed dbi error: "
                    "'%s' '%p' '%d' flag '%d''\n", errmsg, mdb->result, mdb->result, mdb->status);
    dbi_result_free(mdb->result);
    mdb->result = NULL;
@@ -602,21 +611,27 @@
 }
 
 void my_dbi_free_result(B_DB *mdb)
-{
-   int i;
+{ 
    
-   db_lock(mdb);
-   //Dmsg2(500, "my_dbi_free_result started result '%p' '%p'\n", mdb->result, mdb->result);
-   if (mdb->result != NULL) {
-          i = dbi_result_free(mdb->result);
-      if(i == 0) {
-         mdb->result = NULL;
-         //Dmsg2(500, "my_dbi_free_result result '%p' '%d'\n", mdb->result, mdb->result);
-      }
-      
+   db_lock(mdb);  
+   int i = 0;
+   if (mdb->result) {
+      Dmsg1(500, "my_dbi_free_result result '%p'\n", mdb->result);
+      dbi_result_free(mdb->result);
    }
 
+   mdb->result = NULL;
+   
    if (mdb->row) {
+      Dmsg2(500, "my_dbi_free_result row: '%p' num_fields: '%d'\n", mdb->row, mdb->num_fields);
+      if (mdb->num_rows != 0) {
+         for(i = 0; i < mdb->num_fields; i++) {
+            Dmsg2(500, "my_dbi_free_result row '%p' '%d'\n", mdb->row[i], i);
+            if(mdb->row[i]) {
+               free(mdb->row[i]);
+            }
+         } 
+      }
       free(mdb->row);
       mdb->row = NULL;
    }
@@ -639,66 +654,116 @@
    return errmsg;
 }
 
-// TODO: make batch insert work with libdbi
 #ifdef HAVE_BATCH_FILE_INSERT
 
+/*
+ * This can be a bit strang but is the one way to do
+ * 
+ * Returns 1 if OK
+ *         0 if failed
+ */
 int my_dbi_batch_start(JCR *jcr, B_DB *mdb)
 {
    char *query = "COPY batch FROM STDIN";
+   
+   Dmsg0(500, "my_dbi_batch_start started\n");
+   
+   switch (mdb->db_type) {
+   case SQL_TYPE_MYSQL:
+      db_lock(mdb);
+      if (my_dbi_query(mdb,
+                              "CREATE TEMPORARY TABLE batch ("
+                                  "FileIndex integer,"
+                                  "JobId integer,"
+                                  "Path blob,"
+                                  "Name blob,"
+                                  "LStat tinyblob,"
+                                  "MD5 tinyblob)") == 1)
+      {
+         Dmsg0(500, "my_dbi_batch_start failed\n");
+         return 1;
+      }
+      db_unlock(mdb);
+      Dmsg0(500, "my_dbi_batch_start finishing\n");
+      return 1;
+      break;
+   case SQL_TYPE_POSTGRESQL:
+      
+      //query = "COPY batch FROM STDIN";
 
-   Dmsg0(500, "my_postgresql_batch_start started\n");
+      if (my_dbi_query(mdb,
+                              "CREATE TEMPORARY TABLE batch ("
+                                  "fileindex int,"
+                                  "jobid int,"
+                                  "path varchar,"
+                                  "name varchar,"
+                                  "lstat varchar,"
+                                  "md5 varchar)") == 1)
+      {
+         Dmsg0(500, "my_dbi_batch_start failed\n");
+         return 1;
+      }
+      
+      // We are starting a new query.  reset everything.
+      mdb->num_rows     = -1;
+      mdb->row_number   = -1;
+      mdb->field_number = -1;
 
-   if (my_postgresql_query(mdb,
-                           "CREATE TEMPORARY TABLE batch ("
-                               "fileindex int,"
-                               "jobid int,"
-                               "path varchar,"
-                               "name varchar,"
-                               "lstat varchar,"
-                               "md5 varchar)") == 1)
-   {
-      Dmsg0(500, "my_postgresql_batch_start failed\n");
-      return 1;
-   }
-   
-   // We are starting a new query.  reset everything.
-   mdb->num_rows     = -1;
-   mdb->row_number   = -1;
-   mdb->field_number = -1;
+      my_dbi_free_result(mdb);
 
-   my_postgresql_free_result(mdb);
+      for (int i=0; i < 10; i++) {
+         my_dbi_query(mdb, query);
+         if (mdb->result) {
+            break;
+         }
+         bmicrosleep(5, 0);
+      }
+      if (!mdb->result) {
+         Dmsg1(50, "Query failed: %s\n", query);
+         goto bail_out;
+      }
 
-   for (int i=0; i < 10; i++) {
-      mdb->result = PQexec(mdb->db, query);
-      if (mdb->result) {
-         break;
+      mdb->status = (dbi_error_flag)dbi_conn_error(mdb->db, NULL);
+      //mdb->status = DBI_ERROR_NONE;
+         
+      if (mdb->status == DBI_ERROR_NONE) {
+         // how many fields in the set?
+         mdb->num_fields = dbi_result_get_numfields(mdb->result);
+         mdb->num_rows   = dbi_result_get_numrows(mdb->result);
+         mdb->status = (dbi_error_flag) 1;
+      } else {
+         Dmsg1(50, "Result status failed: %s\n", query);
+         goto bail_out;
       }
-      bmicrosleep(5, 0);
-   }
-   if (!mdb->result) {
-      Dmsg1(50, "Query failed: %s\n", query);
-      goto bail_out;
-   }
 
-   mdb->status = PQresultStatus(mdb->result);
-   if (mdb->status == PGRES_COPY_IN) {
-      // how many fields in the set?
-      mdb->num_fields = (int) PQnfields(mdb->result);
-      mdb->num_rows   = 0;
-      mdb->status = 1;
-   } else {
-      Dmsg1(50, "Result status failed: %s\n", query);
-      goto bail_out;
+      Dmsg0(500, "my_postgresql_batch_start finishing\n");
+
+      return mdb->status;
+      break;
+   case SQL_TYPE_SQLITE:
+      db_lock(mdb);
+      if (my_dbi_query(mdb,
+                              "CREATE TEMPORARY TABLE batch ("
+                                  "FileIndex integer,"
+                                  "JobId integer,"
+                                  "Path blob,"
+                                  "Name blob,"
+                                  "LStat tinyblob,"
+                                  "MD5 tinyblob)") == 1)
+      {
+         Dmsg0(500, "my_dbi_batch_start failed\n");
+         goto bail_out;
+      }
+      db_unlock(mdb);
+      Dmsg0(500, "my_dbi_batch_start finishing\n");
+      return 1;
+      break;
    }
-
-   Dmsg0(500, "my_postgresql_batch_start finishing\n");
-
-   return mdb->status;
-
+   
 bail_out:
-   Mmsg1(&mdb->errmsg, _("error starting batch mode: %s"), PQerrorMessage(mdb->db));
-   mdb->status = 0;
-   PQclear(mdb->result);
+   Mmsg1(&mdb->errmsg, _("error starting batch mode: %s"), my_dbi_strerror(mdb));
+   mdb->status = (dbi_error_flag) 0;
+   my_dbi_free_result(mdb);
    mdb->result = NULL;
    return mdb->status;
 }
@@ -706,47 +771,75 @@
 /* set error to something to abort operation */
 int my_dbi_batch_end(JCR *jcr, B_DB *mdb, const char *error)
 {
-   int res;
-   int count=30;
-   Dmsg0(500, "my_postgresql_batch_end started\n");
+   int res = 0;
+   int count = 30;
+   int (*custom_function)(void*, const char*) = NULL;  
+   dbi_conn_t *myconn = (dbi_conn_t *)(mdb->db);
+   
+   Dmsg0(500, "my_dbi_batch_end started\n");
 
    if (!mdb) {                  /* no files ? */
       return 0;
    }
 
-   do { 
-      res = PQputCopyEnd(mdb->db, error);
-   } while (res == 0 && --count > 0);
+   switch (mdb->db_type) {
+   case SQL_TYPE_MYSQL:
+      if(mdb) {
+         mdb->status = (dbi_error_flag) 0;
+      }
+      break;
+   case SQL_TYPE_POSTGRESQL:
+      custom_function = (custom_function_end_t)dbi_driver_specific_function(dbi_conn_get_driver(mdb->db), "PQputCopyEnd");
 
-   if (res == 1) {
-      Dmsg0(500, "ok\n");
-      mdb->status = 1;
+                  
+      do { 
+         res = (*custom_function)(myconn->connection, error);         
+      } while (res == 0 && --count > 0);
+
+      if (res == 1) {
+         Dmsg0(500, "ok\n");
+         mdb->status = (dbi_error_flag) 1;
+      }
+         
+      if (res <= 0) {
+         Dmsg0(500, "we failed\n");
+         mdb->status = (dbi_error_flag) 0;
+         //Mmsg1(&mdb->errmsg, _("error ending batch mode: %s"), PQerrorMessage(mdb->db));
+       }
+      break;
+   case SQL_TYPE_SQLITE:
+      if(mdb) {
+         mdb->status = (dbi_error_flag) 0;
+      }
+      break;
    }
-   
-   if (res <= 0) {
-      Dmsg0(500, "we failed\n");
-      mdb->status = 0;
-      Mmsg1(&mdb->errmsg, _("error ending batch mode: %s"), PQerrorMessage(mdb->db));
-   }
-   
-   Dmsg0(500, "my_postgresql_batch_end finishing\n");
 
-   return mdb->status;
+   Dmsg0(500, "my_dbi_batch_end finishing\n");
+
+   return true;      
 }
 
+/* 
+ * This function is big and use a big switch.  
+ * In near future is better split in small functions
+ * and refactory.
+ * 
+ */
 int my_dbi_batch_insert(JCR *jcr, B_DB *mdb, ATTR_DBR *ar)
 {
    int res;
-   int count=30;
+   int count=30;   
+   dbi_conn_t *myconn = (dbi_conn_t *)(mdb->db);
+   int (*custom_function)(void*, const char*, int) = NULL;
+   char* (*custom_function_error)(void*) = NULL;
    size_t len;
    char *digest;
    char ed1[50];
 
-   mdb->esc_name = check_pool_memory_size(mdb->esc_name, mdb->fnl*2+1);
-   my_postgresql_copy_escape(mdb->esc_name, mdb->fname, mdb->fnl);
+   Dmsg0(500, "my_dbi_batch_insert started \n");
 
+   mdb->esc_name = check_pool_memory_size(mdb->esc_name, mdb->fnl*2+1);    
    mdb->esc_path = check_pool_memory_size(mdb->esc_path, mdb->pnl*2+1);
-   my_postgresql_copy_escape(mdb->esc_path, mdb->path, mdb->pnl);
 
    if (ar->Digest == NULL || ar->Digest[0] == 0) {
       digest = "0";
@@ -754,31 +847,131 @@
       digest = ar->Digest;
    }
 
-   len = Mmsg(mdb->cmd, "%u\t%s\t%s\t%s\t%s\t%s\n", 
-              ar->FileIndex, edit_int64(ar->JobId, ed1), mdb->esc_path, 
-              mdb->esc_name, ar->attr, digest);
+   switch (mdb->db_type) {
+   case SQL_TYPE_MYSQL:
+      db_escape_string(jcr, mdb, mdb->esc_name, mdb->fname, mdb->fnl);
+      db_escape_string(jcr, mdb, mdb->esc_path, mdb->path, mdb->pnl);
+      len = Mmsg(mdb->cmd, "INSERT INTO batch VALUES (%u,%s,'%s','%s','%s','%s')",
+                      ar->FileIndex, edit_int64(ar->JobId,ed1), mdb->esc_path, 
+                      mdb->esc_name, ar->attr, digest);
+      
+      if (my_dbi_query(mdb,mdb->cmd) == 1)
+      {
+         Dmsg0(500, "my_dbi_batch_insert failed\n");
+         goto bail_out;
+      }
+      
+      Dmsg0(500, "my_dbi_batch_insert finishing\n");
+      
+      return 1;
+      break;
+   case SQL_TYPE_POSTGRESQL:
+      my_postgresql_copy_escape(mdb->esc_name, mdb->fname, mdb->fnl);
+      my_postgresql_copy_escape(mdb->esc_path, mdb->path, mdb->pnl);
+      len = Mmsg(mdb->cmd, "%u\t%s\t%s\t%s\t%s\t%s\n", 
+                     ar->FileIndex, edit_int64(ar->JobId, ed1), mdb->esc_path, 
+                     mdb->esc_name, ar->attr, digest);
+      
+      /* libdbi don't support CopyData and we need call a postgresql
+       * specific function to do this work
+       */
+      Dmsg2(500, "my_dbi_batch_insert :\n %s \ncmd_size: %d",mdb->cmd, len);
+      if ((custom_function = (custom_function_insert_t)dbi_driver_specific_function(dbi_conn_get_driver(mdb->db),
+            "PQputCopyData")) != NULL) {
+         do { 
+            res = (*custom_function)(myconn->connection, mdb->cmd, len);
+         } while (res == 0 && --count > 0);
 
-   do { 
-      res = PQputCopyData(mdb->db,
-                          mdb->cmd,
-                          len);
-   } while (res == 0 && --count > 0);
+         if (res == 1) {
+            Dmsg0(500, "ok\n");
+            mdb->changes++;
+            mdb->status = (dbi_error_flag) 1;
+         }
 
-   if (res == 1) {
-      Dmsg0(500, "ok\n");
-      mdb->changes++;
-      mdb->status = 1;
+         if (res <= 0) {
+            Dmsg0(500, "my_dbi_batch_insert failed\n");
+            goto bail_out;
+         }
+
+         Dmsg0(500, "my_dbi_batch_insert finishing\n");
+         return mdb->status;
+      } else {
+         // ensure to detect a PQerror 
+         custom_function_error = (custom_function_error_t)dbi_driver_specific_function(dbi_conn_get_driver(mdb->db), "PQerrorMessage");
+         Dmsg1(500, "my_dbi_batch_insert failed\n PQerrorMessage: %s", (*custom_function_error)(myconn->connection));
+         goto bail_out;
+      }
+      break;
+   case SQL_TYPE_SQLITE:
+      db_escape_string(jcr, mdb, mdb->esc_name, mdb->fname, mdb->fnl);
+      db_escape_string(jcr, mdb, mdb->esc_path, mdb->path, mdb->pnl);
+      len = Mmsg(mdb->cmd, "INSERT INTO batch VALUES (%u,%s,'%s','%s','%s','%s')",
+                      ar->FileIndex, edit_int64(ar->JobId,ed1), mdb->esc_path, 
+                      mdb->esc_name, ar->attr, digest);
+      if (my_dbi_query(mdb,mdb->cmd) == 1)
+      {
+         Dmsg0(500, "my_dbi_batch_insert failed\n");
+         goto bail_out;
+      }
+      
+      Dmsg0(500, "my_dbi_batch_insert finishing\n");
+      
+      return 1;
+      break;
    }
+    
+bail_out:
+  Mmsg1(&mdb->errmsg, _("error inserting batch mode: %s"), my_dbi_strerror(mdb));
+  mdb->status = (dbi_error_flag) 0;
+  my_dbi_free_result(mdb);  
+  return mdb->status;
+}
 
-   if (res <= 0) {
-      Dmsg0(500, "we failed\n");
-      mdb->status = 0;
-      Mmsg1(&mdb->errmsg, _("error ending batch mode: %s"), PQerrorMessage(mdb->db));
+/*
+ * Escape strings so that PostgreSQL is happy on COPY
+ *
+ *   NOTE! len is the length of the old string. Your new
+ *         string must be long enough (max 2*old+1) to hold
+ *         the escaped output.
+ */
+char *my_postgresql_copy_escape(char *dest, char *src, size_t len)
+{
+   /* we have to escape \t, \n, \r, \ */
+   char c = '\0' ;
+
+   while (len > 0 && *src) {
+      switch (*src) {
+      case '\n':
+         c = 'n';
+         break;
+      case '\\':
+         c = '\\';
+         break;
+      case '\t':
+         c = 't';
+         break;
+      case '\r':
+         c = 'r';
+         break;
+      default:
+         c = '\0' ;
+      }
+
+      if (c) {
+         *dest = '\\';
+         dest++;
+         *dest = c;
+      } else {
+         *dest = *src;
+      }
+
+      len--;
+      src++;
+      dest++;
    }
 
-   Dmsg0(500, "my_postgresql_batch_insert finishing\n");
-
-   return mdb->status;
+   *dest = '\0';
+   return dest;
 }
 
 #endif /* HAVE_BATCH_FILE_INSERT */
@@ -823,9 +1016,8 @@
 char *my_dbi_getvalue(dbi_result *result, int row_number, unsigned int column_number) {
 
    /* TODO: This is very bad, need refactoring */
-   POOLMEM *buf = get_pool_memory(PM_FNAME);
-   //const unsigned char *bufb = (unsigned char *)malloc(sizeof(unsigned char) * 300);
-   //const unsigned char *bufb;
+   //POOLMEM *buf = get_pool_memory(PM_FNAME);
+   char *buf = NULL;
    const char *errmsg;
    const char *field_name;     
    unsigned short dbitype;
@@ -853,16 +1045,19 @@
       field_name = dbi_result_get_field_name(result, column_number);
       field_length = dbi_result_get_field_length(result, field_name);
       dbitype = dbi_result_get_field_type_idx(result,column_number);
-                
+      
       if(field_length) {
-          buf = check_pool_memory_size(buf, field_length + 1);
+         //buf = check_pool_memory_size(buf, field_length + 1);
+         buf = (char *)malloc(sizeof(char *) * field_length + 1);
       } else {
-          buf = check_pool_memory_size(buf, 50);
+         /* if numbers */
+         //buf = check_pool_memory_size(buf, 50);
+         buf = (char *)malloc(sizeof(char *) * 50);
       }
       
-      Dmsg5(500, "my_dbi_getvalue result '%p' type '%d' \n field name '%s' "
-            "field_length '%d' field_length size '%d'\n", 
-            result, dbitype, field_name, field_length, sizeof_pool_memory(buf));
+      Dmsg4(500, "my_dbi_getvalue result '%p' type '%d' \n\tfield name '%s'\n\t"
+            "field_length '%d'\n", 
+            result, dbitype, field_name, field_length);
       
       switch (dbitype) {
       case DBI_TYPE_INTEGER:
@@ -873,7 +1068,7 @@
       case DBI_TYPE_STRING:
          if(field_length) {
             field_length = bsnprintf(buf, field_length + 1, "%s", 
-                  dbi_result_get_string(result, field_name));
+            dbi_result_get_string(result, field_name));
          } else {
             buf[0] = 0;
          }
@@ -913,6 +1108,7 @@
                 
    Dmsg3(500, "my_dbi_getvalue finish result '%p' num bytes '%d' data '%s'\n", 
       result, field_length, buf);
+   
    return buf;
 }
 
@@ -960,28 +1156,62 @@
 }
 
 #ifdef HAVE_BATCH_FILE_INSERT
-const char *my_dbi_batch_lock_path_query = 
-   "BEGIN; LOCK TABLE Path IN SHARE ROW EXCLUSIVE MODE";
+const char *my_dbi_batch_lock_path_query[3] = {
+   /* Mysql */
+   "LOCK TABLES Path write, batch write, Path as p write",
+   /* Postgresql */
+   "BEGIN; LOCK TABLE Path IN SHARE ROW EXCLUSIVE MODE",
+   /* SQLite */
+   "BEGIN"};  
 
+const char *my_dbi_batch_lock_filename_query[3] = {
+   /* Mysql */
+   "LOCK TABLES Filename write, batch write, Filename as f write",
+   /* Postgresql */
+   "BEGIN; LOCK TABLE Filename IN SHARE ROW EXCLUSIVE MODE",
+   /* SQLite */
+   "BEGIN"};
 
-const char *my_dbi_batch_lock_filename_query = 
-   "BEGIN; LOCK TABLE Filename IN SHARE ROW EXCLUSIVE MODE";
+const char *my_dbi_batch_unlock_tables_query[3] = {
+   /* Mysql */
+   "UNLOCK TABLES",
+   /* Postgresql */
+   "COMMIT",
+   /* SQLite */
+   "COMMIT"};
 
-const char *my_dbi_batch_unlock_tables_query = "COMMIT";
-
-const char *my_dbi_batch_fill_path_query = 
+const char *my_dbi_batch_fill_path_query[3] = {
+   /* Mysql */
    "INSERT INTO Path (Path) "
-    "SELECT a.Path FROM "
-     "(SELECT DISTINCT Path FROM batch) AS a "
-      "WHERE NOT EXISTS (SELECT Path FROM Path WHERE Path = a.Path) ";
+   "SELECT a.Path FROM " 
+   "(SELECT DISTINCT Path FROM batch) AS a WHERE NOT EXISTS "
+   "(SELECT Path FROM Path AS p WHERE p.Path = a.Path)",
+   /* Postgresql */
+   "INSERT INTO Path (Path) "
+   "SELECT a.Path FROM "
+   "(SELECT DISTINCT Path FROM batch) AS a "
+   "WHERE NOT EXISTS (SELECT Path FROM Path WHERE Path = a.Path) ",
+   /* SQLite */
+   "INSERT INTO Path (Path)" 
+   " SELECT DISTINCT Path FROM batch"
+   " EXCEPT SELECT Path FROM Path"};
 
-
-const char *my_dbi_batch_fill_filename_query = 
+const char *my_dbi_batch_fill_filename_query[3] = {
+   /* Mysql */
    "INSERT INTO Filename (Name) "
-    "SELECT a.Name FROM "
-     "(SELECT DISTINCT Name FROM batch) as a "
-      "WHERE NOT EXISTS "
-       "(SELECT Name FROM Filename WHERE Name = a.Name)";
+   "SELECT a.Name FROM " 
+   "(SELECT DISTINCT Name FROM batch) AS a WHERE NOT EXISTS "
+   "(SELECT Name FROM Filename AS f WHERE f.Name = a.Name)",
+   /* Postgresql */
+   "INSERT INTO Filename (Name) "
+   "SELECT a.Name FROM "
+   "(SELECT DISTINCT Name FROM batch) as a "
+   "WHERE NOT EXISTS "
+   "(SELECT Name FROM Filename WHERE Name = a.Name)",
+   /* SQLite */
+   "INSERT INTO Filename (Name)"
+   " SELECT DISTINCT Name FROM batch "
+   " EXCEPT SELECT Name FROM Filename"};
 #endif /* HAVE_BATCH_FILE_INSERT */
 
 #endif /* HAVE_DBI */
Index: src/cats/cats.h
===================================================================
--- src/cats/cats.h	(revisão 6518)
+++ src/cats/cats.h	(cópia de trabalho)
@@ -533,6 +533,10 @@
 
 #include <dbi/dbi.h>
 
+#ifdef HAVE_BATCH_FILE_INSERT
+#include <dbi/dbi-dev.h>
+#endif //HAVE_BATCH_FILE_INSERT
+
 #define IS_NUM(x)        ((x) == 1 || (x) == 2 )
 #define IS_NOT_NULL(x)   ((x) == (1 << 0))
 
@@ -557,6 +561,8 @@
    brwlock_t lock;                    /* transaction lock */
    dbi_conn *db;
    dbi_result *result;
+   dbi_inst instance;
+   // TODO: change dbi_error_flag to int for more compatible with bacula
    dbi_error_flag status;
    DBI_ROW row;
    DBI_FIELD *fields;
@@ -605,18 +611,21 @@
 char *             my_dbi_getvalue   (dbi_result *result, int row_number, unsigned int column_number);
 int                my_dbi_sql_insert_id(B_DB *mdb, char *table_name);
 
-// TODO: do batch insert in DBI
-//int my_dbi_batch_start(JCR *jcr, B_DB *mdb);
-//int my_dbi_batch_end(JCR *jcr, B_DB *mdb, const char *error);
-//typedef struct ATTR_DBR ATTR_DBR;
-//int my_dbi_batch_insert(JCR *jcr, B_DB *mdb, ATTR_DBR *ar);
-//char *my_dbi_copy_escape(char *dest, char *src, size_t len);
+int my_dbi_batch_start(JCR *jcr, B_DB *mdb);
+int my_dbi_batch_end(JCR *jcr, B_DB *mdb, const char *error);
+typedef struct ATTR_DBR ATTR_DBR;
+int my_dbi_batch_insert(JCR *jcr, B_DB *mdb, ATTR_DBR *ar);
+char *my_postgresql_copy_escape(char *dest, char *src, size_t len);
+// typedefs for libdbi work with postgresql copy insert
+typedef int (*custom_function_insert_t)(void*, const char*, int);   
+typedef char* (*custom_function_error_t)(void*);
+typedef int (*custom_function_end_t)(void*, const char*);
 
-//extern const char* my_dbi_batch_lock_path_query;
-//extern const char* my_dbi_batch_lock_filename_query;
-//extern const char* my_dbi_batch_unlock_tables_query;
-//extern const char* my_dbi_batch_fill_filename_query;
-//extern const char* my_dbi_batch_fill_path_query;
+extern const char* my_dbi_batch_lock_path_query[3];
+extern const char* my_dbi_batch_lock_filename_query[3];
+extern const char* my_dbi_batch_unlock_tables_query[3];
+extern const char* my_dbi_batch_fill_filename_query[3];
+extern const char* my_dbi_batch_fill_path_query[3];
 
 /* "Generic" names for easier conversion */
 #define sql_store_result(x)   (x)->result
@@ -633,15 +642,14 @@
 #define sql_field_seek(x, y)  my_dbi_field_seek((x), (y))
 #define sql_fetch_field(x)    my_dbi_fetch_field(x)
 #define sql_num_fields(x)     ((x)->num_fields)
-// TODO: do dbi batch insert
 #define sql_batch_start(x,y)    my_dbi_batch_start(x,y)   
 #define sql_batch_end(x,y,z)    my_dbi_batch_end(x,y,z)   
 #define sql_batch_insert(x,y,z) my_dbi_batch_insert(x,y,z)
-#define sql_batch_lock_path_query       my_dbi_batch_lock_path_query
-#define sql_batch_lock_filename_query   my_dbi_batch_lock_filename_query
-#define sql_batch_unlock_tables_query   my_dbi_batch_unlock_tables_query
-#define sql_batch_fill_filename_query   my_dbi_batch_fill_filename_query
-#define sql_batch_fill_path_query       my_dbi_batch_fill_path_query
+#define sql_batch_lock_path_query       my_dbi_batch_lock_path_query[db_type]
+#define sql_batch_lock_filename_query   my_dbi_batch_lock_filename_query[db_type]
+#define sql_batch_unlock_tables_query   my_dbi_batch_unlock_tables_query[db_type]
+#define sql_batch_fill_filename_query   my_dbi_batch_fill_filename_query[db_type]
+#define sql_batch_fill_path_query       my_dbi_batch_fill_path_query[db_type]
 
 #define SQL_ROW               DBI_ROW
 #define SQL_FIELD             DBI_FIELD
Index: src/cats/sql_create.c
===================================================================
--- src/cats/sql_create.c	(revisão 6518)
+++ src/cats/sql_create.c	(cópia de trabalho)
@@ -1,7 +1,7 @@
 /*
    Bacula® - The Network Backup Solution
 
-   Copyright (C) 2000-2007 Free Software Foundation Europe e.V.
+   Copyright (C) 2000-2008 Free Software Foundation Europe e.V.
 
    The main author of Bacula is Kern Sibbald, with contributions from
    many others, a complete list can be found in the file AUTHORS.
@@ -112,7 +112,7 @@
 bool
 db_create_jobmedia_record(JCR *jcr, B_DB *mdb, JOBMEDIA_DBR *jm)
 {
-   bool ok = true;;
+   bool ok = true;
    int count;
    char ed1[50], ed2[50];
 
@@ -162,8 +162,6 @@
    return ok;
 }
 
-
-
 /* Create Unique Pool record
  * Returns: false on failure
  *          true  on success
@@ -743,7 +741,11 @@
    Dmsg0(50, "sql_batch_end started\n");
 
    if (mdb) {
-      mdb->status = 0;
+#if HAVE_DBI
+      mdb->status = (dbi_error_flag) 0;
+#else 
+      mdb->status= 0;
+#endif
    }
    return true;
 }
@@ -967,8 +969,8 @@
 static int db_create_file_record(JCR *jcr, B_DB *mdb, ATTR_DBR *ar)
 {
    int stat;
-   static char *no_digest = "0";
-   char *digest;
+   static const char *no_digest = "0";
+   const char *digest;
 
    ASSERT(ar->JobId);
    ASSERT(ar->PathId);
Index: src/stored/bscan.c
===================================================================
--- src/stored/bscan.c	(revisão 6730)
+++ src/stored/bscan.c	(cópia de trabalho)
@@ -154,7 +154,7 @@
 
    OSDependentInit();
 
-   while ((ch = getopt(argc, argv, "b:c:dD:h:p:mn:pP:rsStu:vV:w:?")) != -1) {
+   while ((ch = getopt(argc, argv, "b:c:dD:h:p:mn:pP:rsSt:u:vV:w:?")) != -1) {
       switch (ch) {
       case 'S' :
          showProgress = true;
-------------------------------------------------------------------------
This SF.net email is sponsored by the 2008 JavaOne(SM) Conference 
Don't miss this year's exciting event. There's still time to save $100. 
Use priority code J8TL2D2. 
http://ad.doubleclick.net/clk;198757673;13503038;p?http://java.sun.com/javaone
_______________________________________________
Bacula-devel mailing list
Bacula-devel@xxxxxxxxxxxxxxxxxxxxx
https://lists.sourceforge.net/lists/listinfo/bacula-devel


This mailing list archive is a service of Copilotco.