[Python-checkins] python/dist/src/Lib/bsddb __init__.py,1.2,1.3 dbobj.py,1.2,1.3 dbrecio.py,1.1,1.2 dbshelve.py,1.3,1.4 dbtables.py,1.5,1.6 dbutils.py,1.4,1.5
bwarsaw@users.sourceforge.net
bwarsaw@users.sourceforge.net
Mon, 30 Dec 2002 12:52:10 -0800
- Previous message: [Python-checkins] python/dist/src/Doc/texinputs python.sty,1.100,1.101
- Next message: [Python-checkins] python/dist/src/Lib/bsddb/test test_all.py,NONE,1.1 __init__.py,1.1,1.2 test_associate.py,1.2,1.3 test_basics.py,1.2,1.3 test_compat.py,1.2,1.3 test_dbobj.py,1.2,1.3 test_dbshelve.py,1.3,1.4 test_dbtables.py,1.3,1.4 test_env_close.py,1.2,1.3 test_get_none.py,1.2,1.3 test_lock.py,1.1,1.2 test_misc.py,1.1,1.2 test_queue.py,1.1,1.2 test_recno.py,1.2,1.3 test_thread.py,1.2,1.3
- Messages sorted by:
[ date ]
[ thread ]
[ subject ]
[ author ]
Update of /cvsroot/python/python/dist/src/Lib/bsddb
In directory sc8-pr-cvs1:/tmp/cvs-serv32373/Lib/bsddb
Modified Files:
__init__.py dbobj.py dbrecio.py dbshelve.py dbtables.py
dbutils.py
Log Message:
Port BerkeleyDB 4.1 support from the pybsddb project. bsddb is now at
version 4.1.1 and works with up to BerkeleyDB 4.1.25.
Index: __init__.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/bsddb/__init__.py,v
retrieving revision 1.2
retrieving revision 1.3
diff -C2 -d -r1.2 -r1.3
*** __init__.py 24 Nov 2002 08:26:01 -0000 1.2
--- __init__.py 30 Dec 2002 20:52:07 -0000 1.3
***************
*** 34,45 ****
! """
! This package initialization module provides a compatibility interface
! that should enable bsddb3 to be a near drop-in replacement for the original
! old bsddb module. The functions and classes provided here are all
! wrappers around the new functionality provided in the bsddb3.db module.
!
! People interested in the more advanced capabilites of Berkeley DB 3.x
! should use the bsddb3.db module directly.
"""
--- 34,38 ----
! """Support for BerkeleyDB 3.1 through 4.1.
"""
***************
*** 56,60 ****
__version__ = _db.__version__
! error = _db.DBError # So bsddb3.error will mean something...
#----------------------------------------------------------------------
--- 49,53 ----
__version__ = _db.__version__
! error = _db.DBError # So bsddb.error will mean something...
#----------------------------------------------------------------------
Index: dbobj.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/bsddb/dbobj.py,v
retrieving revision 1.2
retrieving revision 1.3
diff -C2 -d -r1.2 -r1.3
*** dbobj.py 24 Nov 2002 02:35:34 -0000 1.2
--- dbobj.py 30 Dec 2002 20:52:07 -0000 1.3
***************
*** 18,22 ****
import db
-
class DBEnv:
def __init__(self, *args, **kwargs):
--- 18,21 ----
***************
*** 78,81 ****
--- 77,88 ----
return apply(self._cobj.set_get_returns_none, args, kwargs)
+ if db.version() >= (4,1):
+ def dbremove(self, *args, **kwargs):
+ return apply(self._cobj.dbremove, args, kwargs)
+ def dbrename(self, *args, **kwargs):
+ return apply(self._cobj.dbrename, args, kwargs)
+ def set_encrypt(self, *args, **kwargs):
+ return apply(self._cobj.set_encrypt, args, kwargs)
+
class DB:
***************
*** 176,177 ****
--- 183,189 ----
def set_get_returns_none(self, *args, **kwargs):
return apply(self._cobj.set_get_returns_none, args, kwargs)
+
+ if db.version() >= (4,1):
+ def set_encrypt(self, *args, **kwargs):
+ return apply(self._cobj.set_encrypt, args, kwargs)
+
Index: dbrecio.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/bsddb/dbrecio.py,v
retrieving revision 1.1
retrieving revision 1.2
diff -C2 -d -r1.1 -r1.2
*** dbrecio.py 19 Nov 2002 08:09:52 -0000 1.1
--- dbrecio.py 30 Dec 2002 20:52:07 -0000 1.2
***************
*** 1,5 ****
"""
! File-like objects that read from or write to a bsddb3 record.
This implements (nearly) all stdio methods.
--- 1,5 ----
"""
! File-like objects that read from or write to a bsddb record.
This implements (nearly) all stdio methods.
Index: dbshelve.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/bsddb/dbshelve.py,v
retrieving revision 1.3
retrieving revision 1.4
diff -C2 -d -r1.3 -r1.4
*** dbshelve.py 24 Nov 2002 02:35:34 -0000 1.3
--- dbshelve.py 30 Dec 2002 20:52:07 -0000 1.4
***************
*** 24,29 ****
#------------------------------------------------------------------------
! """
! Manage shelves of pickled objects using bsddb3 database files for the
storage.
"""
--- 24,28 ----
#------------------------------------------------------------------------
! """Manage shelves of pickled objects using bsddb database files for the
storage.
"""
***************
*** 44,48 ****
and data is a pickleable object:
! from bsddb3 import dbshelve
db = dbshelve.open(filename)
--- 43,47 ----
and data is a pickleable object:
! from bsddb import dbshelve
db = dbshelve.open(filename)
***************
*** 64,68 ****
flags = db.DB_TRUNCATE | db.DB_CREATE
else:
! raise error, "flags should be one of 'r', 'w', 'c' or 'n' or use the bsddb3.db.DB_* flags"
d = DBShelf(dbenv)
--- 63,67 ----
flags = db.DB_TRUNCATE | db.DB_CREATE
else:
! raise error, "flags should be one of 'r', 'w', 'c' or 'n' or use the bsddb.db.DB_* flags"
d = DBShelf(dbenv)
***************
*** 74,78 ****
class DBShelf:
"""
! A shelf to hold pickled objects, built upon a bsddb3 DB object. It
automatically pickles/unpickles data objects going to/from the DB.
"""
--- 73,77 ----
class DBShelf:
"""
! A shelf to hold pickled objects, built upon a bsddb DB object. It
automatically pickles/unpickles data objects going to/from the DB.
"""
***************
*** 287,288 ****
--- 286,290 ----
#---------------------------------------------------------------------------
+
+
+
Index: dbtables.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/bsddb/dbtables.py,v
retrieving revision 1.5
retrieving revision 1.6
diff -C2 -d -r1.5 -r1.6
*** dbtables.py 2 Dec 2002 16:17:46 -0000 1.5
--- dbtables.py 30 Dec 2002 20:52:07 -0000 1.6
***************
*** 114,150 ****
class bsdTableDB :
!
! # Save close() from bombing out if __init__() failed
! db = None
! env = None
!
! def __init__(self, filename, dbhome, create=0, truncate=0, mode=0600, recover=0, dbflags=0) :
"""bsdTableDB.open(filename, dbhome, create=0, truncate=0, mode=0600)
Open database name in the dbhome BerkeleyDB directory.
Use keyword arguments when calling this constructor.
"""
myflags = DB_THREAD
! if create :
! myflags = myflags | DB_CREATE
! flagsforenv = DB_INIT_MPOOL | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_TXN | dbflags
! if recover :
flagsforenv = flagsforenv | DB_RECOVER
self.env = DBEnv()
! self.env.set_lk_detect(DB_LOCK_DEFAULT) # enable auto deadlock avoidance
self.env.open(dbhome, myflags | flagsforenv)
! if truncate :
! myflags = myflags | DB_TRUNCATE
self.db = DB(self.env)
! self.db.set_flags(DB_DUP) # allow duplicate entries [warning: be careful w/ metadata]
! self.db.open(filename, DB_BTREE, myflags, mode)
!
self.dbfilename = filename
-
# Initialize the table names list if this is a new database
! if not self.db.has_key(_table_names_key) :
! self.db.put(_table_names_key, pickle.dumps([], 1))
!
# TODO verify more of the database's metadata?
-
self.__tablecolumns = {}
--- 114,159 ----
class bsdTableDB :
! def __init__(self, filename, dbhome, create=0, truncate=0, mode=0600,
! recover=0, dbflags=0) :
"""bsdTableDB.open(filename, dbhome, create=0, truncate=0, mode=0600)
Open database name in the dbhome BerkeleyDB directory.
Use keyword arguments when calling this constructor.
"""
+ self.db = None
myflags = DB_THREAD
! if create:
! myflags |= DB_CREATE
! flagsforenv = (DB_INIT_MPOOL | DB_INIT_LOCK | DB_INIT_LOG |
! DB_INIT_TXN | dbflags)
! # DB_AUTO_COMMIT isn't a valid flag for env.open()
! try:
! dbflags |= DB_AUTO_COMMIT
! except AttributeError:
! pass
! if recover:
flagsforenv = flagsforenv | DB_RECOVER
self.env = DBEnv()
! # enable auto deadlock avoidance
! self.env.set_lk_detect(DB_LOCK_DEFAULT)
self.env.open(dbhome, myflags | flagsforenv)
! if truncate:
! myflags |= DB_TRUNCATE
self.db = DB(self.env)
! # allow duplicate entries [warning: be careful w/ metadata]
! self.db.set_flags(DB_DUP)
! self.db.open(filename, DB_BTREE, dbflags | myflags, mode)
self.dbfilename = filename
# Initialize the table names list if this is a new database
! txn = self.env.txn_begin()
! try:
! if not self.db.has_key(_table_names_key, txn):
! self.db.put(_table_names_key, pickle.dumps([], 1), txn=txn)
! # Yes, bare except
! except:
! txn.abort()
! raise
! else:
! txn.commit()
# TODO verify more of the database's metadata?
self.__tablecolumns = {}
***************
*** 190,194 ****
! def CreateTable(self, table, columns) :
"""CreateTable(table, columns) - Create a new table in the database
raises TableDBError if it already exists or for other DB errors.
--- 199,203 ----
! def CreateTable(self, table, columns):
"""CreateTable(table, columns) - Create a new table in the database
raises TableDBError if it already exists or for other DB errors.
***************
*** 199,210 ****
# checking sanity of the table and column names here on
# table creation will prevent problems elsewhere.
! if contains_metastrings(table) :
! raise ValueError, "bad table name: contains reserved metastrings"
for column in columns :
! if contains_metastrings(column) :
! raise ValueError, "bad column name: contains reserved metastrings"
columnlist_key = _columns_key(table)
! if self.db.has_key(columnlist_key) :
raise TableAlreadyExists, "table already exists"
--- 208,221 ----
# checking sanity of the table and column names here on
# table creation will prevent problems elsewhere.
! if contains_metastrings(table):
! raise ValueError(
! "bad table name: contains reserved metastrings")
for column in columns :
! if contains_metastrings(column):
! raise ValueError(
! "bad column name: contains reserved metastrings")
columnlist_key = _columns_key(table)
! if self.db.has_key(columnlist_key):
raise TableAlreadyExists, "table already exists"
***************
*** 214,220 ****
# add the table name to the tablelist
! tablelist = pickle.loads(self.db.get(_table_names_key, txn=txn, flags=DB_RMW))
tablelist.append(table)
! self.db.delete(_table_names_key, txn) # delete 1st, incase we opened with DB_DUP
self.db.put(_table_names_key, pickle.dumps(tablelist, 1), txn=txn)
--- 225,233 ----
# add the table name to the tablelist
! tablelist = pickle.loads(self.db.get(_table_names_key, txn=txn,
! flags=DB_RMW))
tablelist.append(table)
! # delete 1st, in case we opened with DB_DUP
! self.db.delete(_table_names_key, txn)
self.db.put(_table_names_key, pickle.dumps(tablelist, 1), txn=txn)
***************
*** 229,233 ****
def ListTableColumns(self, table):
! """Return a list of columns in the given table. [] if the table doesn't exist.
"""
assert type(table) == type('')
--- 242,247 ----
def ListTableColumns(self, table):
! """Return a list of columns in the given table.
! [] if the table doesn't exist.
"""
assert type(table) == type('')
***************
*** 253,257 ****
def CreateOrExtendTable(self, table, columns):
! """CreateOrExtendTable(table, columns) - Create a new table in the database.
If a table of this name already exists, extend it to have any
additional columns present in the given list as well as
--- 267,273 ----
def CreateOrExtendTable(self, table, columns):
! """CreateOrExtendTable(table, columns)
!
! - Create a new table in the database.
If a table of this name already exists, extend it to have any
additional columns present in the given list as well as
***************
*** 269,279 ****
# load the current column list
! oldcolumnlist = pickle.loads(self.db.get(columnlist_key, txn=txn, flags=DB_RMW))
! # create a hash table for fast lookups of column names in the loop below
oldcolumnhash = {}
for c in oldcolumnlist:
oldcolumnhash[c] = c
! # create a new column list containing both the old and new column names
newcolumnlist = copy.copy(oldcolumnlist)
for c in columns:
--- 285,298 ----
# load the current column list
! oldcolumnlist = pickle.loads(
! self.db.get(columnlist_key, txn=txn, flags=DB_RMW))
! # create a hash table for fast lookups of column names in the
! # loop below
oldcolumnhash = {}
for c in oldcolumnlist:
oldcolumnhash[c] = c
! # create a new column list containing both the old and new
! # column names
newcolumnlist = copy.copy(oldcolumnlist)
for c in columns:
***************
*** 285,289 ****
# delete the old one first since we opened with DB_DUP
self.db.delete(columnlist_key, txn)
! self.db.put(columnlist_key, pickle.dumps(newcolumnlist, 1), txn=txn)
txn.commit()
--- 304,310 ----
# delete the old one first since we opened with DB_DUP
self.db.delete(columnlist_key, txn)
! self.db.put(columnlist_key,
! pickle.dumps(newcolumnlist, 1),
! txn=txn)
txn.commit()
***************
*** 308,312 ****
self.__tablecolumns[table] = pickle.loads(tcolpickles)
! def __new_rowid(self, table, txn=None) :
"""Create a new unique row identifier"""
unique = 0
--- 329,333 ----
self.__tablecolumns[table] = pickle.loads(tcolpickles)
! def __new_rowid(self, table, txn) :
"""Create a new unique row identifier"""
unique = 0
***************
*** 322,327 ****
# Guarantee uniqueness by adding this key to the database
try:
! self.db.put(_rowid_key(table, newid), None, txn=txn, flags=DB_NOOVERWRITE)
! except DBKeyExistsError:
pass
else:
--- 343,349 ----
# Guarantee uniqueness by adding this key to the database
try:
! self.db.put(_rowid_key(table, newid), None, txn=txn,
! flags=DB_NOOVERWRITE)
! except DBKeyExistError:
pass
else:
***************
*** 348,354 ****
# get a unique row identifier for this row
- rowid = self.__new_rowid(table)
-
txn = self.env.txn_begin()
# insert the row values into the table database
--- 370,375 ----
# get a unique row identifier for this row
txn = self.env.txn_begin()
+ rowid = self.__new_rowid(table, txn=txn)
# insert the row values into the table database
***************
*** 361,368 ****
except DBError, dberror:
! if txn :
txn.abort()
self.db.delete(_rowid_key(table, rowid))
! raise TableDBError, dberror[1]
--- 382,394 ----
except DBError, dberror:
! # WIBNI we could just abort the txn and re-raise the exception?
! # But no, because TableDBError is not related to DBError via
! # inheritance, so it would be backwards incompatible. Do the next
! # best thing.
! info = sys.exc_info()
! if txn:
txn.abort()
self.db.delete(_rowid_key(table, rowid))
! raise TableDBError, dberror[1], info[2]
***************
*** 389,399 ****
# modify the requested column
try:
! dataitem = self.db.get(_data_key(table, column, rowid), txn)
! self.db.delete(_data_key(table, column, rowid), txn)
except DBNotFoundError:
! dataitem = None # XXXXXXX row key somehow didn't exist, assume no error
dataitem = mappings[column](dataitem)
if dataitem <> None:
! self.db.put(_data_key(table, column, rowid), dataitem, txn=txn)
txn.commit()
txn = None
--- 415,433 ----
# modify the requested column
try:
! dataitem = self.db.get(
! _data_key(table, column, rowid),
! txn)
! self.db.delete(
! _data_key(table, column, rowid),
! txn)
except DBNotFoundError:
! # XXXXXXX row key somehow didn't exist, assume no
! # error
! dataitem = None
dataitem = mappings[column](dataitem)
if dataitem <> None:
! self.db.put(
! _data_key(table, column, rowid),
! dataitem, txn=txn)
txn.commit()
txn = None
***************
*** 426,437 ****
# delete the data key
try:
! self.db.delete(_data_key(table, column, rowid), txn)
except DBNotFoundError:
! pass # XXXXXXX column may not exist, assume no error
try:
self.db.delete(_rowid_key(table, rowid), txn)
except DBNotFoundError:
! pass # XXXXXXX row key somehow didn't exist, assume no error
txn.commit()
txn = None
--- 460,474 ----
# delete the data key
try:
! self.db.delete(_data_key(table, column, rowid),
! txn)
except DBNotFoundError:
! # XXXXXXX column may not exist, assume no error
! pass
try:
self.db.delete(_rowid_key(table, rowid), txn)
except DBNotFoundError:
! # XXXXXXX row key somehow didn't exist, assume no error
! pass
txn.commit()
txn = None
***************
*** 491,495 ****
rejected_rowids = {} # keys are rowids that do not match
! # attempt to sort the conditions in such a way as to minimize full column lookups
def cmp_conditions(atuple, btuple):
a = atuple[1]
--- 528,533 ----
rejected_rowids = {} # keys are rowids that do not match
! # attempt to sort the conditions in such a way as to minimize full
! # column lookups
def cmp_conditions(atuple, btuple):
a = atuple[1]
***************
*** 497,503 ****
if type(a) == type(b) :
if isinstance(a, PrefixCond) and isinstance(b, PrefixCond):
! return cmp(len(b.prefix), len(a.prefix)) # longest prefix first
if isinstance(a, LikeCond) and isinstance(b, LikeCond):
! return cmp(len(b.likestr), len(a.likestr)) # longest likestr first
return 0
if isinstance(a, ExactCond):
--- 535,543 ----
if type(a) == type(b) :
if isinstance(a, PrefixCond) and isinstance(b, PrefixCond):
! # longest prefix first
! return cmp(len(b.prefix), len(a.prefix))
if isinstance(a, LikeCond) and isinstance(b, LikeCond):
! # longest likestr first
! return cmp(len(b.likestr), len(a.likestr))
return 0
if isinstance(a, ExactCond):
***************
*** 566,570 ****
continue
try:
! rowdata[column] = self.db.get(_data_key(table, column, rowid))
except DBError, dberror:
if dberror[0] != DB_NOTFOUND :
--- 606,611 ----
continue
try:
! rowdata[column] = self.db.get(
! _data_key(table, column, rowid))
except DBError, dberror:
if dberror[0] != DB_NOTFOUND :
***************
*** 615,624 ****
# delete the tablename from the table name list
! tablelist = pickle.loads(self.db.get(_table_names_key, txn=txn, flags=DB_RMW))
try:
tablelist.remove(table)
except ValueError:
! pass # hmm, it wasn't there, oh well, that's what we want.
! self.db.delete(_table_names_key, txn) # delete 1st, incase we opened with DB_DUP
self.db.put(_table_names_key, pickle.dumps(tablelist, 1), txn=txn)
--- 656,668 ----
# delete the tablename from the table name list
! tablelist = pickle.loads(
! self.db.get(_table_names_key, txn=txn, flags=DB_RMW))
try:
tablelist.remove(table)
except ValueError:
! # hmm, it wasn't there, oh well, that's what we want.
! pass
! # delete 1st, incase we opened with DB_DUP
! self.db.delete(_table_names_key, txn)
self.db.put(_table_names_key, pickle.dumps(tablelist, 1), txn=txn)
Index: dbutils.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/bsddb/dbutils.py,v
retrieving revision 1.4
retrieving revision 1.5
diff -C2 -d -r1.4 -r1.5
*** dbutils.py 2 Dec 2002 16:08:54 -0000 1.4
--- dbutils.py 30 Dec 2002 20:52:08 -0000 1.5
***************
*** 23,39 ****
#
# import the time.sleep function in a namespace safe way to allow
! # "from bsddb3.db import *"
#
! from time import sleep
! _sleep = sleep
! del sleep
! import _bsddb
! _deadlock_MinSleepTime = 1.0/64 # always sleep at least N seconds between retrys
! _deadlock_MaxSleepTime = 3.14159 # never sleep more than N seconds between retrys
- _deadlock_VerboseFile = None # Assign a file object to this for a "sleeping"
- # message to be written to it each retry
def DeadlockWrap(function, *_args, **_kwargs):
--- 23,41 ----
#
# import the time.sleep function in a namespace safe way to allow
! # "from bsddb.db import *"
#
! from time import sleep as _sleep
! from bsddb import _db
! # always sleep at least N seconds between retrys
! _deadlock_MinSleepTime = 1.0/64
! # never sleep more than N seconds between retrys
! _deadlock_MaxSleepTime = 3.14159
!
! # Assign a file object to this for a "sleeping" message to be written to it
! # each retry
! _deadlock_VerboseFile = None
def DeadlockWrap(function, *_args, **_kwargs):
***************
*** 58,71 ****
while 1:
try:
! return apply(function, _args, _kwargs)
! except _bsddb.DBLockDeadlockError:
if _deadlock_VerboseFile:
! _deadlock_VerboseFile.write('dbutils.DeadlockWrap: sleeping %1.3f\n' % sleeptime)
_sleep(sleeptime)
# exponential backoff in the sleep time
! sleeptime = sleeptime * 2
! if sleeptime > _deadlock_MaxSleepTime :
sleeptime = _deadlock_MaxSleepTime
! max_retries = max_retries - 1
if max_retries == -1:
raise
--- 60,74 ----
while 1:
try:
! return function(*_args, **_kwargs)
! except _db.DBLockDeadlockError:
if _deadlock_VerboseFile:
! _deadlock_VerboseFile.write(
! 'dbutils.DeadlockWrap: sleeping %1.3f\n' % sleeptime)
_sleep(sleeptime)
# exponential backoff in the sleep time
! sleeptime *= 2
! if sleeptime > _deadlock_MaxSleepTime:
sleeptime = _deadlock_MaxSleepTime
! max_retries -= 1
if max_retries == -1:
raise
- Previous message: [Python-checkins] python/dist/src/Doc/texinputs python.sty,1.100,1.101
- Next message: [Python-checkins] python/dist/src/Lib/bsddb/test test_all.py,NONE,1.1 __init__.py,1.1,1.2 test_associate.py,1.2,1.3 test_basics.py,1.2,1.3 test_compat.py,1.2,1.3 test_dbobj.py,1.2,1.3 test_dbshelve.py,1.3,1.4 test_dbtables.py,1.3,1.4 test_env_close.py,1.2,1.3 test_get_none.py,1.2,1.3 test_lock.py,1.1,1.2 test_misc.py,1.1,1.2 test_queue.py,1.1,1.2 test_recno.py,1.2,1.3 test_thread.py,1.2,1.3
- Messages sorted by:
[ date ]
[ thread ]
[ subject ]
[ author ]