[Python-3000-checkins] r64122 - in python/branches/py3k: Doc/includes/mp_benchmarks.py Doc/includes/mp_distributing.py Doc/includes/mp_newtype.py Doc/includes/mp_pool.py Doc/includes/mp_synchronize.py Doc/includes/mp_webserver.py Doc/includes/mp_workers.py Doc/library/multiprocessing.rst Doc/library/someos.rst Lib/multiprocessing Lib/multiprocessing/__init__.py Lib/multiprocessing/connection.py Lib/multiprocessing/dummy/__init__.py Lib/multiprocessing/dummy/connection.py Lib/multiprocessing/forking.py Lib/multiprocessing/heap.py Lib/multiprocessing/managers.py Lib/multiprocessing/pool.py Lib/multiprocessing/process.py Lib/multiprocessing/queues.py Lib/multiprocessing/reduction.py Lib/multiprocessing/sharedctypes.py Lib/multiprocessing/synchronize.py Lib/multiprocessing/util.py Lib/test/test_multiprocessing.py Modules/_multiprocessing Modules/_multiprocessing/connection.h Modules/_multiprocessing/multiprocessing.c setup.py

benjamin.peterson python-3000-checkins at python.org
Wed Jun 11 18:44:06 CEST 2008


Author: benjamin.peterson
Date: Wed Jun 11 18:44:04 2008
New Revision: 64122

Log:
Merged revisions 64104,64117 via svnmerge from 
svn+ssh://pythondev@svn.python.org/python/trunk

........
  r64104 | benjamin.peterson | 2008-06-10 21:40:25 -0500 (Tue, 10 Jun 2008) | 2 lines
  
  add the multiprocessing package to fulfill PEP 371
........
  r64117 | benjamin.peterson | 2008-06-11 07:26:31 -0500 (Wed, 11 Jun 2008) | 2 lines
  
  fix import of multiprocessing by juggling imports
........


Added:
   python/branches/py3k/Doc/includes/mp_benchmarks.py
      - copied unchanged from r64104, /python/trunk/Doc/includes/mp_benchmarks.py
   python/branches/py3k/Doc/includes/mp_distributing.py
      - copied, changed from r64104, /python/trunk/Doc/includes/mp_distributing.py
   python/branches/py3k/Doc/includes/mp_newtype.py
      - copied unchanged from r64104, /python/trunk/Doc/includes/mp_newtype.py
   python/branches/py3k/Doc/includes/mp_pool.py
      - copied unchanged from r64104, /python/trunk/Doc/includes/mp_pool.py
   python/branches/py3k/Doc/includes/mp_synchronize.py
      - copied unchanged from r64104, /python/trunk/Doc/includes/mp_synchronize.py
   python/branches/py3k/Doc/includes/mp_webserver.py
      - copied unchanged from r64104, /python/trunk/Doc/includes/mp_webserver.py
   python/branches/py3k/Doc/includes/mp_workers.py
      - copied unchanged from r64104, /python/trunk/Doc/includes/mp_workers.py
   python/branches/py3k/Doc/library/multiprocessing.rst
      - copied unchanged from r64104, /python/trunk/Doc/library/multiprocessing.rst
   python/branches/py3k/Lib/multiprocessing/
      - copied from r64104, /python/trunk/Lib/multiprocessing/
   python/branches/py3k/Lib/test/test_multiprocessing.py
      - copied, changed from r64104, /python/trunk/Lib/test/test_multiprocessing.py
   python/branches/py3k/Modules/_multiprocessing/
      - copied from r64104, /python/trunk/Modules/_multiprocessing/
Modified:
   python/branches/py3k/   (props changed)
   python/branches/py3k/Doc/library/someos.rst
   python/branches/py3k/Lib/multiprocessing/__init__.py
   python/branches/py3k/Lib/multiprocessing/connection.py
   python/branches/py3k/Lib/multiprocessing/dummy/__init__.py
   python/branches/py3k/Lib/multiprocessing/dummy/connection.py
   python/branches/py3k/Lib/multiprocessing/forking.py
   python/branches/py3k/Lib/multiprocessing/heap.py
   python/branches/py3k/Lib/multiprocessing/managers.py
   python/branches/py3k/Lib/multiprocessing/pool.py
   python/branches/py3k/Lib/multiprocessing/process.py
   python/branches/py3k/Lib/multiprocessing/queues.py
   python/branches/py3k/Lib/multiprocessing/reduction.py
   python/branches/py3k/Lib/multiprocessing/sharedctypes.py
   python/branches/py3k/Lib/multiprocessing/synchronize.py
   python/branches/py3k/Lib/multiprocessing/util.py
   python/branches/py3k/Modules/_multiprocessing/connection.h
   python/branches/py3k/Modules/_multiprocessing/multiprocessing.c
   python/branches/py3k/setup.py

Copied: python/branches/py3k/Doc/includes/mp_distributing.py (from r64104, /python/trunk/Doc/includes/mp_distributing.py)
==============================================================================
--- /python/trunk/Doc/includes/mp_distributing.py	(original)
+++ python/branches/py3k/Doc/includes/mp_distributing.py	Wed Jun 11 18:44:04 2008
@@ -1,362 +1,362 @@
-#
-# Module to allow spawning of processes on foreign host
-#
-# Depends on `multiprocessing` package -- tested with `processing-0.60`
-#
-
-__all__ = ['Cluster', 'Host', 'get_logger', 'current_process']
-
-#
-# Imports
-#
-
-import sys
-import os
-import tarfile
-import shutil
-import subprocess
-import logging
-import itertools
-import Queue
-
-try:
-    import cPickle as pickle
-except ImportError:
-    import pickle
-
-from multiprocessing import Process, current_process, cpu_count
-from multiprocessing import util, managers, connection, forking, pool
-
-#
-# Logging
-#
-
-def get_logger():
-    return _logger
-
-_logger = logging.getLogger('distributing')
-_logger.propogate = 0
-
-util.fix_up_logger(_logger)
-_formatter = logging.Formatter(util.DEFAULT_LOGGING_FORMAT)
-_handler = logging.StreamHandler()
-_handler.setFormatter(_formatter)
-_logger.addHandler(_handler)
-
-info = _logger.info
-debug = _logger.debug
-
-#
-# Get number of cpus
-#
-
-try:
-    slot_count = cpu_count()
-except NotImplemented:
-    slot_count = 1
-        
-#
-# Manager type which spawns subprocesses
-#
-
-class HostManager(managers.SyncManager):
-    '''
-    Manager type used for spawning processes on a (presumably) foreign host
-    '''    
-    def __init__(self, address, authkey):
-        managers.SyncManager.__init__(self, address, authkey)
-        self._name = 'Host-unknown'
-
-    def Process(self, group=None, target=None, name=None, args=(), kwargs={}):
-        if hasattr(sys.modules['__main__'], '__file__'):
-            main_path = os.path.basename(sys.modules['__main__'].__file__)
-        else:
-            main_path = None
-        data = pickle.dumps((target, args, kwargs))
-        p = self._RemoteProcess(data, main_path)
-        if name is None:
-            temp = self._name.split('Host-')[-1] + '/Process-%s'
-            name = temp % ':'.join(map(str, p.get_identity()))
-        p.set_name(name)
-        return p
-
-    @classmethod
-    def from_address(cls, address, authkey):
-        manager = cls(address, authkey)
-        managers.transact(address, authkey, 'dummy')
-        manager._state.value = managers.State.STARTED
-        manager._name = 'Host-%s:%s' % manager.address
-        manager.shutdown = util.Finalize(
-            manager, HostManager._finalize_host,
-            args=(manager._address, manager._authkey, manager._name),
-            exitpriority=-10
-            )
-        return manager
-
-    @staticmethod
-    def _finalize_host(address, authkey, name):
-        managers.transact(address, authkey, 'shutdown')
-        
-    def __repr__(self):
-        return '<Host(%s)>' % self._name
-
-#
-# Process subclass representing a process on (possibly) a remote machine
-#
-
-class RemoteProcess(Process):
-    '''
-    Represents a process started on a remote host
-    '''
-    def __init__(self, data, main_path):
-        assert not main_path or os.path.basename(main_path) == main_path
-        Process.__init__(self)
-        self._data = data
-        self._main_path = main_path
-        
-    def _bootstrap(self):
-        forking.prepare({'main_path': self._main_path})
-        self._target, self._args, self._kwargs = pickle.loads(self._data)
-        return Process._bootstrap(self)
-        
-    def get_identity(self):
-        return self._identity
-
-HostManager.register('_RemoteProcess', RemoteProcess)
-
-#
-# A Pool class that uses a cluster
-#
-
-class DistributedPool(pool.Pool):
-    
-    def __init__(self, cluster, processes=None, initializer=None, initargs=()):
-        self._cluster = cluster
-        self.Process = cluster.Process
-        pool.Pool.__init__(self, processes or len(cluster),
-                           initializer, initargs)
-        
-    def _setup_queues(self):
-        self._inqueue = self._cluster._SettableQueue()
-        self._outqueue = self._cluster._SettableQueue()
-        self._quick_put = self._inqueue.put
-        self._quick_get = self._outqueue.get
-
-    @staticmethod
-    def _help_stuff_finish(inqueue, task_handler, size):
-        inqueue.set_contents([None] * size)
-
-#
-# Manager type which starts host managers on other machines
-#
-
-def LocalProcess(**kwds):
-    p = Process(**kwds)
-    p.set_name('localhost/' + p.get_name())
-    return p
-
-class Cluster(managers.SyncManager):
-    '''
-    Represents collection of slots running on various hosts.
-    
-    `Cluster` is a subclass of `SyncManager` so it allows creation of
-    various types of shared objects.
-    '''
-    def __init__(self, hostlist, modules):
-        managers.SyncManager.__init__(self, address=('localhost', 0))
-        self._hostlist = hostlist
-        self._modules = modules
-        if __name__ not in modules:
-            modules.append(__name__)
-        files = [sys.modules[name].__file__ for name in modules]
-        for i, file in enumerate(files):
-            if file.endswith('.pyc') or file.endswith('.pyo'):
-                files[i] = file[:-4] + '.py'
-        self._files = [os.path.abspath(file) for file in files]
-        
-    def start(self):
-        managers.SyncManager.start(self)
-        
-        l = connection.Listener(family='AF_INET', authkey=self._authkey)
-        
-        for i, host in enumerate(self._hostlist):
-            host._start_manager(i, self._authkey, l.address, self._files)
-
-        for host in self._hostlist:
-            if host.hostname != 'localhost':
-                conn = l.accept()
-                i, address, cpus = conn.recv()
-                conn.close()
-                other_host = self._hostlist[i]
-                other_host.manager = HostManager.from_address(address,
-                                                              self._authkey)
-                other_host.slots = other_host.slots or cpus
-                other_host.Process = other_host.manager.Process
-            else:
-                host.slots = host.slots or slot_count
-                host.Process = LocalProcess
-
-        self._slotlist = [
-            Slot(host) for host in self._hostlist for i in range(host.slots)
-            ]
-        self._slot_iterator = itertools.cycle(self._slotlist)
-        self._base_shutdown = self.shutdown
-        del self.shutdown
-        
-    def shutdown(self):
-        for host in self._hostlist:
-            if host.hostname != 'localhost':
-                host.manager.shutdown()
-        self._base_shutdown()
-        
-    def Process(self, group=None, target=None, name=None, args=(), kwargs={}):
-        slot = self._slot_iterator.next()
-        return slot.Process(
-            group=group, target=target, name=name, args=args, kwargs=kwargs
-            )
-
-    def Pool(self, processes=None, initializer=None, initargs=()):
-        return DistributedPool(self, processes, initializer, initargs)
-    
-    def __getitem__(self, i):
-        return self._slotlist[i]
-
-    def __len__(self):
-        return len(self._slotlist)
-
-    def __iter__(self):
-        return iter(self._slotlist)
-
-#
-# Queue subclass used by distributed pool
-#
-
-class SettableQueue(Queue.Queue):
-    def empty(self):
-        return not self.queue
-    def full(self):
-        return self.maxsize > 0 and len(self.queue) == self.maxsize
-    def set_contents(self, contents):
-        # length of contents must be at least as large as the number of
-        # threads which have potentially called get()
-        self.not_empty.acquire()
-        try:
-            self.queue.clear()
-            self.queue.extend(contents)
-            self.not_empty.notifyAll()
-        finally:
-            self.not_empty.release()
-            
-Cluster.register('_SettableQueue', SettableQueue)
-
-#
-# Class representing a notional cpu in the cluster
-#
-
-class Slot(object):
-    def __init__(self, host):
-        self.host = host
-        self.Process = host.Process
-
-#
-# Host
-#
-
-class Host(object):
-    '''
-    Represents a host to use as a node in a cluster.
-
-    `hostname` gives the name of the host.  If hostname is not
-    "localhost" then ssh is used to log in to the host.  To log in as
-    a different user use a host name of the form
-    "username at somewhere.org"
-
-    `slots` is used to specify the number of slots for processes on
-    the host.  This affects how often processes will be allocated to
-    this host.  Normally this should be equal to the number of cpus on
-    that host.
-    '''
-    def __init__(self, hostname, slots=None):
-        self.hostname = hostname
-        self.slots = slots
-        
-    def _start_manager(self, index, authkey, address, files):
-        if self.hostname != 'localhost':
-            tempdir = copy_to_remote_temporary_directory(self.hostname, files)
-            debug('startup files copied to %s:%s', self.hostname, tempdir)
-            p = subprocess.Popen(
-                ['ssh', self.hostname, 'python', '-c',
-                 '"import os; os.chdir(%r); '
-                 'from distributing import main; main()"' % tempdir],
-                stdin=subprocess.PIPE
-                )
-            data = dict(
-                name='BoostrappingHost', index=index,
-                dist_log_level=_logger.getEffectiveLevel(),
-                dir=tempdir, authkey=str(authkey), parent_address=address
-                )
-            pickle.dump(data, p.stdin, pickle.HIGHEST_PROTOCOL)
-            p.stdin.close()
-
-#
-# Copy files to remote directory, returning name of directory
-#
-
-unzip_code = '''"
-import tempfile, os, sys, tarfile
-tempdir = tempfile.mkdtemp(prefix='distrib-')
-os.chdir(tempdir)
-tf = tarfile.open(fileobj=sys.stdin, mode='r|gz')
-for ti in tf:
-    tf.extract(ti)
-print tempdir
-"'''
-
-def copy_to_remote_temporary_directory(host, files):
-    p = subprocess.Popen(
-        ['ssh', host, 'python', '-c', unzip_code],
-        stdout=subprocess.PIPE, stdin=subprocess.PIPE
-        )
-    tf = tarfile.open(fileobj=p.stdin, mode='w|gz')
-    for name in files:
-        tf.add(name, os.path.basename(name))
-    tf.close()
-    p.stdin.close()
-    return p.stdout.read().rstrip()
-
-#
-# Code which runs a host manager
-#
-
-def main():   
-    # get data from parent over stdin
-    data = pickle.load(sys.stdin)
-    sys.stdin.close()
-
-    # set some stuff
-    _logger.setLevel(data['dist_log_level'])
-    forking.prepare(data)
-    
-    # create server for a `HostManager` object
-    server = managers.Server(HostManager._registry, ('', 0), data['authkey'])
-    current_process()._server = server
-    
-    # report server address and number of cpus back to parent
-    conn = connection.Client(data['parent_address'], authkey=data['authkey'])
-    conn.send((data['index'], server.address, slot_count))
-    conn.close()
-    
-    # set name etc
-    current_process().set_name('Host-%s:%s' % server.address)
-    util._run_after_forkers()
-    
-    # register a cleanup function
-    def cleanup(directory):
-        debug('removing directory %s', directory)
-        shutil.rmtree(directory)
-        debug('shutting down host manager')
-    util.Finalize(None, cleanup, args=[data['dir']], exitpriority=0)
-    
-    # start host manager
-    debug('remote host manager starting in %s', data['dir'])
-    server.serve_forever()
+#
+# Module to allow spawning of processes on foreign host
+#
+# Depends on `multiprocessing` package -- tested with `processing-0.60`
+#
+
+__all__ = ['Cluster', 'Host', 'get_logger', 'current_process']
+
+#
+# Imports
+#
+
+import sys
+import os
+import tarfile
+import shutil
+import subprocess
+import logging
+import itertools
+import Queue
+
+try:
+    import cPickle as pickle
+except ImportError:
+    import pickle
+
+from multiprocessing import Process, current_process, cpu_count
+from multiprocessing import util, managers, connection, forking, pool
+
+#
+# Logging
+#
+
+def get_logger():
+    return _logger
+
+_logger = logging.getLogger('distributing')
+_logger.propogate = 0
+
+util.fix_up_logger(_logger)
+_formatter = logging.Formatter(util.DEFAULT_LOGGING_FORMAT)
+_handler = logging.StreamHandler()
+_handler.setFormatter(_formatter)
+_logger.addHandler(_handler)
+
+info = _logger.info
+debug = _logger.debug
+
+#
+# Get number of cpus
+#
+
+try:
+    slot_count = cpu_count()
+except NotImplemented:
+    slot_count = 1
+
+#
+# Manager type which spawns subprocesses
+#
+
+class HostManager(managers.SyncManager):
+    '''
+    Manager type used for spawning processes on a (presumably) foreign host
+    '''
+    def __init__(self, address, authkey):
+        managers.SyncManager.__init__(self, address, authkey)
+        self._name = 'Host-unknown'
+
+    def Process(self, group=None, target=None, name=None, args=(), kwargs={}):
+        if hasattr(sys.modules['__main__'], '__file__'):
+            main_path = os.path.basename(sys.modules['__main__'].__file__)
+        else:
+            main_path = None
+        data = pickle.dumps((target, args, kwargs))
+        p = self._RemoteProcess(data, main_path)
+        if name is None:
+            temp = self._name.split('Host-')[-1] + '/Process-%s'
+            name = temp % ':'.join(map(str, p.get_identity()))
+        p.set_name(name)
+        return p
+
+    @classmethod
+    def from_address(cls, address, authkey):
+        manager = cls(address, authkey)
+        managers.transact(address, authkey, 'dummy')
+        manager._state.value = managers.State.STARTED
+        manager._name = 'Host-%s:%s' % manager.address
+        manager.shutdown = util.Finalize(
+            manager, HostManager._finalize_host,
+            args=(manager._address, manager._authkey, manager._name),
+            exitpriority=-10
+            )
+        return manager
+
+    @staticmethod
+    def _finalize_host(address, authkey, name):
+        managers.transact(address, authkey, 'shutdown')
+
+    def __repr__(self):
+        return '<Host(%s)>' % self._name
+
+#
+# Process subclass representing a process on (possibly) a remote machine
+#
+
+class RemoteProcess(Process):
+    '''
+    Represents a process started on a remote host
+    '''
+    def __init__(self, data, main_path):
+        assert not main_path or os.path.basename(main_path) == main_path
+        Process.__init__(self)
+        self._data = data
+        self._main_path = main_path
+
+    def _bootstrap(self):
+        forking.prepare({'main_path': self._main_path})
+        self._target, self._args, self._kwargs = pickle.loads(self._data)
+        return Process._bootstrap(self)
+
+    def get_identity(self):
+        return self._identity
+
+HostManager.register('_RemoteProcess', RemoteProcess)
+
+#
+# A Pool class that uses a cluster
+#
+
+class DistributedPool(pool.Pool):
+
+    def __init__(self, cluster, processes=None, initializer=None, initargs=()):
+        self._cluster = cluster
+        self.Process = cluster.Process
+        pool.Pool.__init__(self, processes or len(cluster),
+                           initializer, initargs)
+
+    def _setup_queues(self):
+        self._inqueue = self._cluster._SettableQueue()
+        self._outqueue = self._cluster._SettableQueue()
+        self._quick_put = self._inqueue.put
+        self._quick_get = self._outqueue.get
+
+    @staticmethod
+    def _help_stuff_finish(inqueue, task_handler, size):
+        inqueue.set_contents([None] * size)
+
+#
+# Manager type which starts host managers on other machines
+#
+
+def LocalProcess(**kwds):
+    p = Process(**kwds)
+    p.set_name('localhost/' + p.get_name())
+    return p
+
+class Cluster(managers.SyncManager):
+    '''
+    Represents collection of slots running on various hosts.
+
+    `Cluster` is a subclass of `SyncManager` so it allows creation of
+    various types of shared objects.
+    '''
+    def __init__(self, hostlist, modules):
+        managers.SyncManager.__init__(self, address=('localhost', 0))
+        self._hostlist = hostlist
+        self._modules = modules
+        if __name__ not in modules:
+            modules.append(__name__)
+        files = [sys.modules[name].__file__ for name in modules]
+        for i, file in enumerate(files):
+            if file.endswith('.pyc') or file.endswith('.pyo'):
+                files[i] = file[:-4] + '.py'
+        self._files = [os.path.abspath(file) for file in files]
+
+    def start(self):
+        managers.SyncManager.start(self)
+
+        l = connection.Listener(family='AF_INET', authkey=self._authkey)
+
+        for i, host in enumerate(self._hostlist):
+            host._start_manager(i, self._authkey, l.address, self._files)
+
+        for host in self._hostlist:
+            if host.hostname != 'localhost':
+                conn = l.accept()
+                i, address, cpus = conn.recv()
+                conn.close()
+                other_host = self._hostlist[i]
+                other_host.manager = HostManager.from_address(address,
+                                                              self._authkey)
+                other_host.slots = other_host.slots or cpus
+                other_host.Process = other_host.manager.Process
+            else:
+                host.slots = host.slots or slot_count
+                host.Process = LocalProcess
+
+        self._slotlist = [
+            Slot(host) for host in self._hostlist for i in range(host.slots)
+            ]
+        self._slot_iterator = itertools.cycle(self._slotlist)
+        self._base_shutdown = self.shutdown
+        del self.shutdown
+
+    def shutdown(self):
+        for host in self._hostlist:
+            if host.hostname != 'localhost':
+                host.manager.shutdown()
+        self._base_shutdown()
+
+    def Process(self, group=None, target=None, name=None, args=(), kwargs={}):
+        slot = self._slot_iterator.next()
+        return slot.Process(
+            group=group, target=target, name=name, args=args, kwargs=kwargs
+            )
+
+    def Pool(self, processes=None, initializer=None, initargs=()):
+        return DistributedPool(self, processes, initializer, initargs)
+
+    def __getitem__(self, i):
+        return self._slotlist[i]
+
+    def __len__(self):
+        return len(self._slotlist)
+
+    def __iter__(self):
+        return iter(self._slotlist)
+
+#
+# Queue subclass used by distributed pool
+#
+
+class SettableQueue(Queue.Queue):
+    def empty(self):
+        return not self.queue
+    def full(self):
+        return self.maxsize > 0 and len(self.queue) == self.maxsize
+    def set_contents(self, contents):
+        # length of contents must be at least as large as the number of
+        # threads which have potentially called get()
+        self.not_empty.acquire()
+        try:
+            self.queue.clear()
+            self.queue.extend(contents)
+            self.not_empty.notifyAll()
+        finally:
+            self.not_empty.release()
+
+Cluster.register('_SettableQueue', SettableQueue)
+
+#
+# Class representing a notional cpu in the cluster
+#
+
+class Slot(object):
+    def __init__(self, host):
+        self.host = host
+        self.Process = host.Process
+
+#
+# Host
+#
+
+class Host(object):
+    '''
+    Represents a host to use as a node in a cluster.
+
+    `hostname` gives the name of the host.  If hostname is not
+    "localhost" then ssh is used to log in to the host.  To log in as
+    a different user use a host name of the form
+    "username at somewhere.org"
+
+    `slots` is used to specify the number of slots for processes on
+    the host.  This affects how often processes will be allocated to
+    this host.  Normally this should be equal to the number of cpus on
+    that host.
+    '''
+    def __init__(self, hostname, slots=None):
+        self.hostname = hostname
+        self.slots = slots
+
+    def _start_manager(self, index, authkey, address, files):
+        if self.hostname != 'localhost':
+            tempdir = copy_to_remote_temporary_directory(self.hostname, files)
+            debug('startup files copied to %s:%s', self.hostname, tempdir)
+            p = subprocess.Popen(
+                ['ssh', self.hostname, 'python', '-c',
+                 '"import os; os.chdir(%r); '
+                 'from distributing import main; main()"' % tempdir],
+                stdin=subprocess.PIPE
+                )
+            data = dict(
+                name='BoostrappingHost', index=index,
+                dist_log_level=_logger.getEffectiveLevel(),
+                dir=tempdir, authkey=str(authkey), parent_address=address
+                )
+            pickle.dump(data, p.stdin, pickle.HIGHEST_PROTOCOL)
+            p.stdin.close()
+
+#
+# Copy files to remote directory, returning name of directory
+#
+
+unzip_code = '''"
+import tempfile, os, sys, tarfile
+tempdir = tempfile.mkdtemp(prefix='distrib-')
+os.chdir(tempdir)
+tf = tarfile.open(fileobj=sys.stdin, mode='r|gz')
+for ti in tf:
+    tf.extract(ti)
+print tempdir
+"'''
+
+def copy_to_remote_temporary_directory(host, files):
+    p = subprocess.Popen(
+        ['ssh', host, 'python', '-c', unzip_code],
+        stdout=subprocess.PIPE, stdin=subprocess.PIPE
+        )
+    tf = tarfile.open(fileobj=p.stdin, mode='w|gz')
+    for name in files:
+        tf.add(name, os.path.basename(name))
+    tf.close()
+    p.stdin.close()
+    return p.stdout.read().rstrip()
+
+#
+# Code which runs a host manager
+#
+
+def main():
+    # get data from parent over stdin
+    data = pickle.load(sys.stdin)
+    sys.stdin.close()
+
+    # set some stuff
+    _logger.setLevel(data['dist_log_level'])
+    forking.prepare(data)
+
+    # create server for a `HostManager` object
+    server = managers.Server(HostManager._registry, ('', 0), data['authkey'])
+    current_process()._server = server
+
+    # report server address and number of cpus back to parent
+    conn = connection.Client(data['parent_address'], authkey=data['authkey'])
+    conn.send((data['index'], server.address, slot_count))
+    conn.close()
+
+    # set name etc
+    current_process().set_name('Host-%s:%s' % server.address)
+    util._run_after_forkers()
+
+    # register a cleanup function
+    def cleanup(directory):
+        debug('removing directory %s', directory)
+        shutil.rmtree(directory)
+        debug('shutting down host manager')
+    util.Finalize(None, cleanup, args=[data['dir']], exitpriority=0)
+
+    # start host manager
+    debug('remote host manager starting in %s', data['dir'])
+    server.serve_forever()

Modified: python/branches/py3k/Doc/library/someos.rst
==============================================================================
--- python/branches/py3k/Doc/library/someos.rst	(original)
+++ python/branches/py3k/Doc/library/someos.rst	Wed Jun 11 18:44:04 2008
@@ -15,9 +15,9 @@
 
    select.rst
    threading.rst
-   dummy_threading.rst
    _thread.rst
    _dummy_thread.rst
+   multiprocessing.rst
    mmap.rst
    readline.rst
    rlcompleter.rst

Modified: python/branches/py3k/Lib/multiprocessing/__init__.py
==============================================================================
--- /python/trunk/Lib/multiprocessing/__init__.py	(original)
+++ python/branches/py3k/Lib/multiprocessing/__init__.py	Wed Jun 11 18:44:04 2008
@@ -1,269 +1,270 @@
-#
-# Package analogous to 'threading.py' but using processes
-#
-# multiprocessing/__init__.py
-#
-# This package is intended to duplicate the functionality (and much of
-# the API) of threading.py but uses processes instead of threads.  A
-# subpackage 'multiprocessing.dummy' has the same API but is a simple
-# wrapper for 'threading'.
-#
-# Try calling `multiprocessing.doc.main()` to read the html
-# documentation in in a webbrowser.
-#
-#
-# Copyright (c) 2006-2008, R Oudkerk
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# 1. Redistributions of source code must retain the above copyright
-#    notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-#    notice, this list of conditions and the following disclaimer in the
-#    documentation and/or other materials provided with the distribution.
-# 3. Neither the name of author nor the names of any contributors may be
-#    used to endorse or promote products derived from this software
-#    without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-#
-
-__version__ = '0.70a1'
-
-__all__ = [
-    'Process', 'current_process', 'active_children', 'freeze_support',
-    'Manager', 'Pipe', 'cpu_count', 'log_to_stderr', 'get_logger',
-    'allow_connection_pickling', 'BufferTooShort', 'TimeoutError',
-    'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
-    'Event', 'Queue', 'JoinableQueue', 'Pool', 'Value', 'Array',
-    'RawValue', 'RawArray'
-    ]
-
-__author__ = 'R. Oudkerk (r.m.oudkerk at gmail.com)'
-
-#
-# Imports
-#
-
-import os
-import sys
-
-import _multiprocessing
-from multiprocessing.process import Process, current_process, active_children
-
-#
-# Exceptions
-#
-
-class ProcessError(Exception):
-    pass
-    
-class BufferTooShort(ProcessError):
-    pass
-    
-class TimeoutError(ProcessError):
-    pass
-
-class AuthenticationError(ProcessError):
-    pass
-
-#
-# Definitions not depending on native semaphores
-#
-
-def Manager():
-    '''
-    Returns a manager associated with a running server process
-
-    The managers methods such as `Lock()`, `Condition()` and `Queue()`
-    can be used to create shared objects.
-    '''
-    from multiprocessing.managers import SyncManager
-    m = SyncManager()
-    m.start()
-    return m
-
-def Pipe(duplex=True):
-    '''
-    Returns two connection object connected by a pipe
-    '''
-    from multiprocessing.connection import Pipe
-    return Pipe(duplex)
-
-def cpu_count():
-    '''
-    Returns the number of CPUs in the system
-    '''
-    if sys.platform == 'win32':
-        try:
-            num = int(os.environ['NUMBER_OF_PROCESSORS'])
-        except (ValueError, KeyError):
-            num = 0
-    elif sys.platform == 'darwin':
-        try:
-            num = int(os.popen('sysctl -n hw.ncpu').read())
-        except ValueError:
-            num = 0
-    else:
-        try:
-            num = os.sysconf('SC_NPROCESSORS_ONLN')
-        except (ValueError, OSError, AttributeError):
-            num = 0
-        
-    if num >= 1:
-        return num
-    else:
-        raise NotImplementedError('cannot determine number of cpus')
-
-def freeze_support():
-    '''
-    Check whether this is a fake forked process in a frozen executable.
-    If so then run code specified by commandline and exit.
-    '''
-    if sys.platform == 'win32' and getattr(sys, 'frozen', False):
-        from multiprocessing.forking import freeze_support
-        freeze_support()
-
-def get_logger():
-    '''
-    Return package logger -- if it does not already exist then it is created
-    '''
-    from multiprocessing.util import get_logger
-    return get_logger()
-
-def log_to_stderr(level=None):
-    '''
-    Turn on logging and add a handler which prints to stderr
-    '''
-    from multiprocessing.util import log_to_stderr
-    return log_to_stderr(level)
-    
-def allow_connection_pickling():
-    '''
-    Install support for sending connections and sockets between processes
-    '''
-    from multiprocessing import reduction
-    
-#
-# Definitions depending on native semaphores
-#
-
-def Lock():
-    '''
-    Returns a non-recursive lock object
-    '''
-    from multiprocessing.synchronize import Lock
-    return Lock()
-
-def RLock():
-    '''
-    Returns a recursive lock object
-    '''
-    from multiprocessing.synchronize import RLock
-    return RLock()
-
-def Condition(lock=None):
-    '''
-    Returns a condition object
-    '''
-    from multiprocessing.synchronize import Condition
-    return Condition(lock)
-
-def Semaphore(value=1):
-    '''
-    Returns a semaphore object
-    '''
-    from multiprocessing.synchronize import Semaphore
-    return Semaphore(value)
-
-def BoundedSemaphore(value=1):
-    '''
-    Returns a bounded semaphore object
-    '''
-    from multiprocessing.synchronize import BoundedSemaphore
-    return BoundedSemaphore(value)
-
-def Event():
-    '''
-    Returns an event object
-    '''
-    from multiprocessing.synchronize import Event
-    return Event()
-
-def Queue(maxsize=0):
-    '''
-    Returns a queue object
-    '''
-    from multiprocessing.queues import Queue
-    return Queue(maxsize)
-
-def JoinableQueue(maxsize=0):
-    '''
-    Returns a queue object
-    '''
-    from multiprocessing.queues import JoinableQueue
-    return JoinableQueue(maxsize)
-
-def Pool(processes=None, initializer=None, initargs=()):
-    '''
-    Returns a process pool object
-    '''
-    from multiprocessing.pool import Pool
-    return Pool(processes, initializer, initargs)
-
-def RawValue(typecode_or_type, *args):
-    '''
-    Returns a shared object
-    '''
-    from multiprocessing.sharedctypes import RawValue
-    return RawValue(typecode_or_type, *args)
-
-def RawArray(typecode_or_type, size_or_initializer):
-    '''
-    Returns a shared array
-    '''
-    from multiprocessing.sharedctypes import RawArray
-    return RawArray(typecode_or_type, size_or_initializer)
-
-def Value(typecode_or_type, *args, **kwds):
-    '''
-    Returns a synchronized shared object
-    '''
-    from multiprocessing.sharedctypes import Value
-    return Value(typecode_or_type, *args, **kwds)
-
-def Array(typecode_or_type, size_or_initializer, **kwds):
-    '''
-    Returns a synchronized shared array
-    '''
-    from multiprocessing.sharedctypes import Array
-    return Array(typecode_or_type, size_or_initializer, **kwds)
-
-#
-#
-#
-
-if sys.platform == 'win32':
-
-    def set_executable(executable):
-        '''
-        Sets the path to a python.exe or pythonw.exe binary used to run
-        child processes on Windows instead of sys.executable.
-        Useful for people embedding Python. 
-        '''
-        from multiprocessing.forking import set_executable
-        set_executable(executable)
-
-    __all__ += ['set_executable']
+#
+# Package analogous to 'threading.py' but using processes
+#
+# multiprocessing/__init__.py
+#
+# This package is intended to duplicate the functionality (and much of
+# the API) of threading.py but uses processes instead of threads.  A
+# subpackage 'multiprocessing.dummy' has the same API but is a simple
+# wrapper for 'threading'.
+#
+# Try calling `multiprocessing.doc.main()` to read the html
+# documentation in in a webbrowser.
+#
+#
+# Copyright (c) 2006-2008, R Oudkerk
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+# 3. Neither the name of author nor the names of any contributors may be
+#    used to endorse or promote products derived from this software
+#    without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+#
+
+__version__ = '0.70a1'
+
+__all__ = [
+    'Process', 'current_process', 'active_children', 'freeze_support',
+    'Manager', 'Pipe', 'cpu_count', 'log_to_stderr', 'get_logger',
+    'allow_connection_pickling', 'BufferTooShort', 'TimeoutError',
+    'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
+    'Event', 'Queue', 'JoinableQueue', 'Pool', 'Value', 'Array',
+    'RawValue', 'RawArray'
+    ]
+
+__author__ = 'R. Oudkerk (r.m.oudkerk at gmail.com)'
+
+#
+# Imports
+#
+
+import os
+import sys
+
+from multiprocessing.process import Process, current_process, active_children
+
+#
+# Exceptions
+#
+
+class ProcessError(Exception):
+    pass
+
+class BufferTooShort(ProcessError):
+    pass
+
+class TimeoutError(ProcessError):
+    pass
+
+class AuthenticationError(ProcessError):
+    pass
+
+import _multiprocessing
+
+#
+# Definitions not depending on native semaphores
+#
+
+def Manager():
+    '''
+    Returns a manager associated with a running server process
+
+    The managers methods such as `Lock()`, `Condition()` and `Queue()`
+    can be used to create shared objects.
+    '''
+    from multiprocessing.managers import SyncManager
+    m = SyncManager()
+    m.start()
+    return m
+
+def Pipe(duplex=True):
+    '''
+    Returns two connection object connected by a pipe
+    '''
+    from multiprocessing.connection import Pipe
+    return Pipe(duplex)
+
+def cpu_count():
+    '''
+    Returns the number of CPUs in the system
+    '''
+    if sys.platform == 'win32':
+        try:
+            num = int(os.environ['NUMBER_OF_PROCESSORS'])
+        except (ValueError, KeyError):
+            num = 0
+    elif sys.platform == 'darwin':
+        try:
+            num = int(os.popen('sysctl -n hw.ncpu').read())
+        except ValueError:
+            num = 0
+    else:
+        try:
+            num = os.sysconf('SC_NPROCESSORS_ONLN')
+        except (ValueError, OSError, AttributeError):
+            num = 0
+
+    if num >= 1:
+        return num
+    else:
+        raise NotImplementedError('cannot determine number of cpus')
+
+def freeze_support():
+    '''
+    Check whether this is a fake forked process in a frozen executable.
+    If so then run code specified by commandline and exit.
+    '''
+    if sys.platform == 'win32' and getattr(sys, 'frozen', False):
+        from multiprocessing.forking import freeze_support
+        freeze_support()
+
+def get_logger():
+    '''
+    Return package logger -- if it does not already exist then it is created
+    '''
+    from multiprocessing.util import get_logger
+    return get_logger()
+
+def log_to_stderr(level=None):
+    '''
+    Turn on logging and add a handler which prints to stderr
+    '''
+    from multiprocessing.util import log_to_stderr
+    return log_to_stderr(level)
+
+def allow_connection_pickling():
+    '''
+    Install support for sending connections and sockets between processes
+    '''
+    from multiprocessing import reduction
+
+#
+# Definitions depending on native semaphores
+#
+
+def Lock():
+    '''
+    Returns a non-recursive lock object
+    '''
+    from multiprocessing.synchronize import Lock
+    return Lock()
+
+def RLock():
+    '''
+    Returns a recursive lock object
+    '''
+    from multiprocessing.synchronize import RLock
+    return RLock()
+
+def Condition(lock=None):
+    '''
+    Returns a condition object
+    '''
+    from multiprocessing.synchronize import Condition
+    return Condition(lock)
+
+def Semaphore(value=1):
+    '''
+    Returns a semaphore object
+    '''
+    from multiprocessing.synchronize import Semaphore
+    return Semaphore(value)
+
+def BoundedSemaphore(value=1):
+    '''
+    Returns a bounded semaphore object
+    '''
+    from multiprocessing.synchronize import BoundedSemaphore
+    return BoundedSemaphore(value)
+
+def Event():
+    '''
+    Returns an event object
+    '''
+    from multiprocessing.synchronize import Event
+    return Event()
+
+def Queue(maxsize=0):
+    '''
+    Returns a queue object
+    '''
+    from multiprocessing.queues import Queue
+    return Queue(maxsize)
+
+def JoinableQueue(maxsize=0):
+    '''
+    Returns a queue object
+    '''
+    from multiprocessing.queues import JoinableQueue
+    return JoinableQueue(maxsize)
+
+def Pool(processes=None, initializer=None, initargs=()):
+    '''
+    Returns a process pool object
+    '''
+    from multiprocessing.pool import Pool
+    return Pool(processes, initializer, initargs)
+
+def RawValue(typecode_or_type, *args):
+    '''
+    Returns a shared object
+    '''
+    from multiprocessing.sharedctypes import RawValue
+    return RawValue(typecode_or_type, *args)
+
+def RawArray(typecode_or_type, size_or_initializer):
+    '''
+    Returns a shared array
+    '''
+    from multiprocessing.sharedctypes import RawArray
+    return RawArray(typecode_or_type, size_or_initializer)
+
+def Value(typecode_or_type, *args, **kwds):
+    '''
+    Returns a synchronized shared object
+    '''
+    from multiprocessing.sharedctypes import Value
+    return Value(typecode_or_type, *args, **kwds)
+
+def Array(typecode_or_type, size_or_initializer, **kwds):
+    '''
+    Returns a synchronized shared array
+    '''
+    from multiprocessing.sharedctypes import Array
+    return Array(typecode_or_type, size_or_initializer, **kwds)
+
+#
+#
+#
+
+if sys.platform == 'win32':
+
+    def set_executable(executable):
+        '''
+        Sets the path to a python.exe or pythonw.exe binary used to run
+        child processes on Windows instead of sys.executable.
+        Useful for people embedding Python.
+        '''
+        from multiprocessing.forking import set_executable
+        set_executable(executable)
+
+    __all__ += ['set_executable']

Modified: python/branches/py3k/Lib/multiprocessing/connection.py
==============================================================================
--- /python/trunk/Lib/multiprocessing/connection.py	(original)
+++ python/branches/py3k/Lib/multiprocessing/connection.py	Wed Jun 11 18:44:04 2008
@@ -1,425 +1,425 @@
-#
-# A higher level module for using sockets (or Windows named pipes)
-#
-# multiprocessing/connection.py
-#
-# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
-#
-
-__all__ = [ 'Client', 'Listener', 'Pipe' ]
-
-import os
-import sys
-import socket
-import time
-import tempfile
-import itertools
-
-import _multiprocessing
-from multiprocessing import current_process
-from multiprocessing.util import get_temp_dir, Finalize, sub_debug, debug
-from multiprocessing.forking import duplicate, close
-
-
-#
-#
-#
-
-BUFSIZE = 8192
-
-_mmap_counter = itertools.count()
-
-default_family = 'AF_INET'
-families = ['AF_INET']
-
-if hasattr(socket, 'AF_UNIX'):
-    default_family = 'AF_UNIX'
-    families += ['AF_UNIX']
-
-if sys.platform == 'win32':
-    default_family = 'AF_PIPE'
-    families += ['AF_PIPE']
-
-#
-#
-#
-
-def arbitrary_address(family):
-    '''
-    Return an arbitrary free address for the given family
-    '''
-    if family == 'AF_INET':
-        return ('localhost', 0)
-    elif family == 'AF_UNIX':        
-        return tempfile.mktemp(prefix='listener-', dir=get_temp_dir())
-    elif family == 'AF_PIPE':
-        return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' %
-                               (os.getpid(), _mmap_counter.next()))
-    else:
-        raise ValueError('unrecognized family')
-
-
-def address_type(address):
-    '''
-    Return the types of the address
-
-    This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE'
-    '''
-    if type(address) == tuple:
-        return 'AF_INET'
-    elif type(address) is str and address.startswith('\\\\'):
-        return 'AF_PIPE'
-    elif type(address) is str:
-        return 'AF_UNIX'
-    else:
-        raise ValueError('address type of %r unrecognized' % address)
-
-#
-# Public functions
-#
-
-class Listener(object):
-    '''
-    Returns a listener object.
-
-    This is a wrapper for a bound socket which is 'listening' for
-    connections, or for a Windows named pipe.
-    '''
-    def __init__(self, address=None, family=None, backlog=1, authkey=None):
-        family = family or (address and address_type(address)) \
-                 or default_family
-        address = address or arbitrary_address(family)
-
-        if family == 'AF_PIPE':
-            self._listener = PipeListener(address, backlog)
-        else:
-            self._listener = SocketListener(address, family, backlog)
-
-        if authkey is not None and not isinstance(authkey, bytes):
-            raise TypeError, 'authkey should be a byte string'
-
-        self._authkey = authkey
-
-    def accept(self):
-        '''
-        Accept a connection on the bound socket or named pipe of `self`.
-
-        Returns a `Connection` object.
-        '''
-        c = self._listener.accept()
-        if self._authkey:
-            deliver_challenge(c, self._authkey)
-            answer_challenge(c, self._authkey)
-        return c
-
-    def close(self):
-        '''
-        Close the bound socket or named pipe of `self`.
-        '''
-        return self._listener.close()
-
-    address = property(lambda self: self._listener._address)
-    last_accepted = property(lambda self: self._listener._last_accepted)
-
-
-def Client(address, family=None, authkey=None):
-    '''
-    Returns a connection to the address of a `Listener`
-    '''
-    family = family or address_type(address)
-    if family == 'AF_PIPE':
-        c = PipeClient(address)
-    else:
-        c = SocketClient(address)
-
-    if authkey is not None and not isinstance(authkey, bytes):
-        raise TypeError, 'authkey should be a byte string'
-
-    if authkey is not None:
-        answer_challenge(c, authkey)
-        deliver_challenge(c, authkey)
-
-    return c
-
-
-if sys.platform != 'win32':
-
-    def Pipe(duplex=True):
-        '''
-        Returns pair of connection objects at either end of a pipe
-        '''
-        if duplex:
-            s1, s2 = socket.socketpair()
-            c1 = _multiprocessing.Connection(os.dup(s1.fileno()))
-            c2 = _multiprocessing.Connection(os.dup(s2.fileno()))
-            s1.close()
-            s2.close()
-        else:
-            fd1, fd2 = os.pipe()
-            c1 = _multiprocessing.Connection(fd1, writable=False)
-            c2 = _multiprocessing.Connection(fd2, readable=False)
-
-        return c1, c2
-    
-else:
-
-    from ._multiprocessing import win32
-
-    def Pipe(duplex=True):
-        '''
-        Returns pair of connection objects at either end of a pipe
-        '''
-        address = arbitrary_address('AF_PIPE')
-        if duplex:
-            openmode = win32.PIPE_ACCESS_DUPLEX
-            access = win32.GENERIC_READ | win32.GENERIC_WRITE
-            obsize, ibsize = BUFSIZE, BUFSIZE
-        else:
-            openmode = win32.PIPE_ACCESS_INBOUND
-            access = win32.GENERIC_WRITE
-            obsize, ibsize = 0, BUFSIZE
-
-        h1 = win32.CreateNamedPipe(
-            address, openmode,
-            win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE |
-            win32.PIPE_WAIT,
-            1, obsize, ibsize, win32.NMPWAIT_WAIT_FOREVER, win32.NULL
-            )
-        h2 = win32.CreateFile(
-            address, access, 0, win32.NULL, win32.OPEN_EXISTING, 0, win32.NULL
-            )
-        win32.SetNamedPipeHandleState(
-            h2, win32.PIPE_READMODE_MESSAGE, None, None
-            )
-
-        try:
-            win32.ConnectNamedPipe(h1, win32.NULL)
-        except WindowsError, e:
-            if e.args[0] != win32.ERROR_PIPE_CONNECTED:
-                raise
-
-        c1 = _multiprocessing.PipeConnection(h1, writable=duplex)
-        c2 = _multiprocessing.PipeConnection(h2, readable=duplex)
-        
-        return c1, c2
-
-#
-# Definitions for connections based on sockets
-#
-
-class SocketListener(object):
-    '''
-    Represtation of a socket which is bound to an address and listening
-    '''
-    def __init__(self, address, family, backlog=1):
-        self._socket = socket.socket(getattr(socket, family))
-        self._socket.bind(address)
-        self._socket.listen(backlog)
-        address = self._socket.getsockname()
-        if type(address) is tuple:
-            address = (socket.getfqdn(address[0]),) + address[1:]
-        self._address = address
-        self._family = family
-        self._last_accepted = None
-
-        sub_debug('listener bound to address %r', self._address)
-
-        if family == 'AF_UNIX':
-            self._unlink = Finalize(
-                self, os.unlink, args=(self._address,), exitpriority=0
-                )
-        else:
-            self._unlink = None
-
-    def accept(self):
-        s, self._last_accepted = self._socket.accept()
-        fd = duplicate(s.fileno())
-        conn = _multiprocessing.Connection(fd)
-        s.close()
-        return conn
-
-    def close(self):
-        self._socket.close()
-        if self._unlink is not None:
-            self._unlink()
-
-
-def SocketClient(address):
-    '''
-    Return a connection object connected to the socket given by `address`
-    '''
-    family = address_type(address)
-    s = socket.socket( getattr(socket, family) )
-
-    while 1:
-        try:
-            s.connect(address)
-        except socket.error, e:
-            if e.args[0] != 10061:    # 10061 => connection refused
-                debug('failed to connect to address %s', address)
-                raise
-            time.sleep(0.01)
-        else:
-            break
-    else:
-        raise
-
-    fd = duplicate(s.fileno())
-    conn = _multiprocessing.Connection(fd)
-    s.close()
-    return conn
-
-#
-# Definitions for connections based on named pipes
-#
-
-if sys.platform == 'win32':
-
-    class PipeListener(object):
-        '''
-        Representation of a named pipe
-        '''
-        def __init__(self, address, backlog=None):
-            self._address = address
-            handle = win32.CreateNamedPipe(
-                address, win32.PIPE_ACCESS_DUPLEX,
-                win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE |
-                win32.PIPE_WAIT,
-                win32.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,
-                win32.NMPWAIT_WAIT_FOREVER, win32.NULL
-                )
-            self._handle_queue = [handle]
-            self._last_accepted = None
-            
-            sub_debug('listener created with address=%r', self._address)
-
-            self.close = Finalize(
-                self, PipeListener._finalize_pipe_listener,
-                args=(self._handle_queue, self._address), exitpriority=0
-                )
-            
-        def accept(self):
-            newhandle = win32.CreateNamedPipe(
-                self._address, win32.PIPE_ACCESS_DUPLEX,
-                win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE |
-                win32.PIPE_WAIT,
-                win32.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,
-                win32.NMPWAIT_WAIT_FOREVER, win32.NULL
-                )
-            self._handle_queue.append(newhandle)
-            handle = self._handle_queue.pop(0)
-            try:
-                win32.ConnectNamedPipe(handle, win32.NULL)
-            except WindowsError, e:
-                if e.args[0] != win32.ERROR_PIPE_CONNECTED:
-                    raise
-            return _multiprocessing.PipeConnection(handle)
-
-        @staticmethod
-        def _finalize_pipe_listener(queue, address):
-            sub_debug('closing listener with address=%r', address)
-            for handle in queue:
-                close(handle)
-        
-    def PipeClient(address):
-        '''
-        Return a connection object connected to the pipe given by `address`
-        '''
-        while 1:
-            try:
-                win32.WaitNamedPipe(address, 1000)
-                h = win32.CreateFile(
-                    address, win32.GENERIC_READ | win32.GENERIC_WRITE,
-                    0, win32.NULL, win32.OPEN_EXISTING, 0, win32.NULL
-                    )
-            except WindowsError, e:
-                if e.args[0] not in (win32.ERROR_SEM_TIMEOUT,
-                                     win32.ERROR_PIPE_BUSY):
-                    raise
-            else:
-                break
-        else:
-            raise
-
-        win32.SetNamedPipeHandleState(
-            h, win32.PIPE_READMODE_MESSAGE, None, None
-            )
-        return _multiprocessing.PipeConnection(h)
-
-#
-# Authentication stuff
-#
-
-MESSAGE_LENGTH = 20
-
-CHALLENGE = '#CHALLENGE#'
-WELCOME = '#WELCOME#'
-FAILURE = '#FAILURE#'
-
-if sys.version_info >= (3, 0):         # XXX can use bytes literals in 2.6/3.0
-    CHALLENGE = CHALLENGE.encode('ascii')
-    WELCOME = WELCOME.encode('ascii')
-    FAILURE = FAILURE.encode('ascii')
-
-def deliver_challenge(connection, authkey):
-    import hmac
-    assert isinstance(authkey, bytes)
-    message = os.urandom(MESSAGE_LENGTH)
-    connection.send_bytes(CHALLENGE + message)
-    digest = hmac.new(authkey, message).digest()
-    response = connection.recv_bytes(256)        # reject large message
-    if response == digest:
-        connection.send_bytes(WELCOME)
-    else:
-        connection.send_bytes(FAILURE)
-        raise AuthenticationError('digest received was wrong')
-
-def answer_challenge(connection, authkey):
-    import hmac
-    assert isinstance(authkey, bytes)
-    message = connection.recv_bytes(256)         # reject large message
-    assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message
-    message = message[len(CHALLENGE):]
-    digest = hmac.new(authkey, message).digest()
-    connection.send_bytes(digest)
-    response = connection.recv_bytes(256)        # reject large message
-    if response != WELCOME:
-        raise AuthenticationError('digest sent was rejected')
-
-#
-# Support for using xmlrpclib for serialization
-#
-
-class ConnectionWrapper(object):
-    def __init__(self, conn, dumps, loads):
-        self._conn = conn
-        self._dumps = dumps
-        self._loads = loads
-        for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'):
-            obj = getattr(conn, attr)
-            setattr(self, attr, obj)            
-    def send(self, obj):
-        s = self._dumps(obj)
-        self._conn.send_bytes(s)
-    def recv(self):
-        s = self._conn.recv_bytes()
-        return self._loads(s)
-
-def _xml_dumps(obj):
-    return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf8')
-
-def _xml_loads(s):
-    (obj,), method = xmlrpclib.loads(s.decode('utf8'))
-    return obj
-
-class XmlListener(Listener):
-    def accept(self):
-        global xmlrpclib
-        import xmlrpclib
-        obj = Listener.accept(self)
-        return ConnectionWrapper(obj, _xml_dumps, _xml_loads)
-
-def XmlClient(*args, **kwds):
-    global xmlrpclib
-    import xmlrpclib
-    return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads)
+#
+# A higher level module for using sockets (or Windows named pipes)
+#
+# multiprocessing/connection.py
+#
+# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
+#
+
+__all__ = [ 'Client', 'Listener', 'Pipe' ]
+
+import os
+import sys
+import socket
+import time
+import tempfile
+import itertools
+
+import _multiprocessing
+from multiprocessing import current_process
+from multiprocessing.util import get_temp_dir, Finalize, sub_debug, debug
+from multiprocessing.forking import duplicate, close
+
+
+#
+#
+#
+
+BUFSIZE = 8192
+
+_mmap_counter = itertools.count()
+
+default_family = 'AF_INET'
+families = ['AF_INET']
+
+if hasattr(socket, 'AF_UNIX'):
+    default_family = 'AF_UNIX'
+    families += ['AF_UNIX']
+
+if sys.platform == 'win32':
+    default_family = 'AF_PIPE'
+    families += ['AF_PIPE']
+
+#
+#
+#
+
+def arbitrary_address(family):
+    '''
+    Return an arbitrary free address for the given family
+    '''
+    if family == 'AF_INET':
+        return ('localhost', 0)
+    elif family == 'AF_UNIX':
+        return tempfile.mktemp(prefix='listener-', dir=get_temp_dir())
+    elif family == 'AF_PIPE':
+        return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' %
+                               (os.getpid(), next(_mmap_counter)))
+    else:
+        raise ValueError('unrecognized family')
+
+
+def address_type(address):
+    '''
+    Return the types of the address
+
+    This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE'
+    '''
+    if type(address) == tuple:
+        return 'AF_INET'
+    elif type(address) is str and address.startswith('\\\\'):
+        return 'AF_PIPE'
+    elif type(address) is str:
+        return 'AF_UNIX'
+    else:
+        raise ValueError('address type of %r unrecognized' % address)
+
+#
+# Public functions
+#
+
+class Listener(object):
+    '''
+    Returns a listener object.
+
+    This is a wrapper for a bound socket which is 'listening' for
+    connections, or for a Windows named pipe.
+    '''
+    def __init__(self, address=None, family=None, backlog=1, authkey=None):
+        family = family or (address and address_type(address)) \
+                 or default_family
+        address = address or arbitrary_address(family)
+
+        if family == 'AF_PIPE':
+            self._listener = PipeListener(address, backlog)
+        else:
+            self._listener = SocketListener(address, family, backlog)
+
+        if authkey is not None and not isinstance(authkey, bytes):
+            raise TypeError('authkey should be a byte string')
+
+        self._authkey = authkey
+
+    def accept(self):
+        '''
+        Accept a connection on the bound socket or named pipe of `self`.
+
+        Returns a `Connection` object.
+        '''
+        c = self._listener.accept()
+        if self._authkey:
+            deliver_challenge(c, self._authkey)
+            answer_challenge(c, self._authkey)
+        return c
+
+    def close(self):
+        '''
+        Close the bound socket or named pipe of `self`.
+        '''
+        return self._listener.close()
+
+    address = property(lambda self: self._listener._address)
+    last_accepted = property(lambda self: self._listener._last_accepted)
+
+
+def Client(address, family=None, authkey=None):
+    '''
+    Returns a connection to the address of a `Listener`
+    '''
+    family = family or address_type(address)
+    if family == 'AF_PIPE':
+        c = PipeClient(address)
+    else:
+        c = SocketClient(address)
+
+    if authkey is not None and not isinstance(authkey, bytes):
+        raise TypeError('authkey should be a byte string')
+
+    if authkey is not None:
+        answer_challenge(c, authkey)
+        deliver_challenge(c, authkey)
+
+    return c
+
+
+if sys.platform != 'win32':
+
+    def Pipe(duplex=True):
+        '''
+        Returns pair of connection objects at either end of a pipe
+        '''
+        if duplex:
+            s1, s2 = socket.socketpair()
+            c1 = _multiprocessing.Connection(os.dup(s1.fileno()))
+            c2 = _multiprocessing.Connection(os.dup(s2.fileno()))
+            s1.close()
+            s2.close()
+        else:
+            fd1, fd2 = os.pipe()
+            c1 = _multiprocessing.Connection(fd1, writable=False)
+            c2 = _multiprocessing.Connection(fd2, readable=False)
+
+        return c1, c2
+
+else:
+
+    from ._multiprocessing import win32
+
+    def Pipe(duplex=True):
+        '''
+        Returns pair of connection objects at either end of a pipe
+        '''
+        address = arbitrary_address('AF_PIPE')
+        if duplex:
+            openmode = win32.PIPE_ACCESS_DUPLEX
+            access = win32.GENERIC_READ | win32.GENERIC_WRITE
+            obsize, ibsize = BUFSIZE, BUFSIZE
+        else:
+            openmode = win32.PIPE_ACCESS_INBOUND
+            access = win32.GENERIC_WRITE
+            obsize, ibsize = 0, BUFSIZE
+
+        h1 = win32.CreateNamedPipe(
+            address, openmode,
+            win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE |
+            win32.PIPE_WAIT,
+            1, obsize, ibsize, win32.NMPWAIT_WAIT_FOREVER, win32.NULL
+            )
+        h2 = win32.CreateFile(
+            address, access, 0, win32.NULL, win32.OPEN_EXISTING, 0, win32.NULL
+            )
+        win32.SetNamedPipeHandleState(
+            h2, win32.PIPE_READMODE_MESSAGE, None, None
+            )
+
+        try:
+            win32.ConnectNamedPipe(h1, win32.NULL)
+        except WindowsError as e:
+            if e.args[0] != win32.ERROR_PIPE_CONNECTED:
+                raise
+
+        c1 = _multiprocessing.PipeConnection(h1, writable=duplex)
+        c2 = _multiprocessing.PipeConnection(h2, readable=duplex)
+
+        return c1, c2
+
+#
+# Definitions for connections based on sockets
+#
+
+class SocketListener(object):
+    '''
+    Represtation of a socket which is bound to an address and listening
+    '''
+    def __init__(self, address, family, backlog=1):
+        self._socket = socket.socket(getattr(socket, family))
+        self._socket.bind(address)
+        self._socket.listen(backlog)
+        address = self._socket.getsockname()
+        if type(address) is tuple:
+            address = (socket.getfqdn(address[0]),) + address[1:]
+        self._address = address
+        self._family = family
+        self._last_accepted = None
+
+        sub_debug('listener bound to address %r', self._address)
+
+        if family == 'AF_UNIX':
+            self._unlink = Finalize(
+                self, os.unlink, args=(self._address,), exitpriority=0
+                )
+        else:
+            self._unlink = None
+
+    def accept(self):
+        s, self._last_accepted = self._socket.accept()
+        fd = duplicate(s.fileno())
+        conn = _multiprocessing.Connection(fd)
+        s.close()
+        return conn
+
+    def close(self):
+        self._socket.close()
+        if self._unlink is not None:
+            self._unlink()
+
+
+def SocketClient(address):
+    '''
+    Return a connection object connected to the socket given by `address`
+    '''
+    family = address_type(address)
+    s = socket.socket( getattr(socket, family) )
+
+    while 1:
+        try:
+            s.connect(address)
+        except socket.error as e:
+            if e.args[0] != 10061:    # 10061 => connection refused
+                debug('failed to connect to address %s', address)
+                raise
+            time.sleep(0.01)
+        else:
+            break
+    else:
+        raise
+
+    fd = duplicate(s.fileno())
+    conn = _multiprocessing.Connection(fd)
+    s.close()
+    return conn
+
+#
+# Definitions for connections based on named pipes
+#
+
+if sys.platform == 'win32':
+
+    class PipeListener(object):
+        '''
+        Representation of a named pipe
+        '''
+        def __init__(self, address, backlog=None):
+            self._address = address
+            handle = win32.CreateNamedPipe(
+                address, win32.PIPE_ACCESS_DUPLEX,
+                win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE |
+                win32.PIPE_WAIT,
+                win32.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,
+                win32.NMPWAIT_WAIT_FOREVER, win32.NULL
+                )
+            self._handle_queue = [handle]
+            self._last_accepted = None
+
+            sub_debug('listener created with address=%r', self._address)
+
+            self.close = Finalize(
+                self, PipeListener._finalize_pipe_listener,
+                args=(self._handle_queue, self._address), exitpriority=0
+                )
+
+        def accept(self):
+            newhandle = win32.CreateNamedPipe(
+                self._address, win32.PIPE_ACCESS_DUPLEX,
+                win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE |
+                win32.PIPE_WAIT,
+                win32.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,
+                win32.NMPWAIT_WAIT_FOREVER, win32.NULL
+                )
+            self._handle_queue.append(newhandle)
+            handle = self._handle_queue.pop(0)
+            try:
+                win32.ConnectNamedPipe(handle, win32.NULL)
+            except WindowsError as e:
+                if e.args[0] != win32.ERROR_PIPE_CONNECTED:
+                    raise
+            return _multiprocessing.PipeConnection(handle)
+
+        @staticmethod
+        def _finalize_pipe_listener(queue, address):
+            sub_debug('closing listener with address=%r', address)
+            for handle in queue:
+                close(handle)
+
+    def PipeClient(address):
+        '''
+        Return a connection object connected to the pipe given by `address`
+        '''
+        while 1:
+            try:
+                win32.WaitNamedPipe(address, 1000)
+                h = win32.CreateFile(
+                    address, win32.GENERIC_READ | win32.GENERIC_WRITE,
+                    0, win32.NULL, win32.OPEN_EXISTING, 0, win32.NULL
+                    )
+            except WindowsError as e:
+                if e.args[0] not in (win32.ERROR_SEM_TIMEOUT,
+                                     win32.ERROR_PIPE_BUSY):
+                    raise
+            else:
+                break
+        else:
+            raise
+
+        win32.SetNamedPipeHandleState(
+            h, win32.PIPE_READMODE_MESSAGE, None, None
+            )
+        return _multiprocessing.PipeConnection(h)
+
+#
+# Authentication stuff
+#
+
+MESSAGE_LENGTH = 20
+
+CHALLENGE = '#CHALLENGE#'
+WELCOME = '#WELCOME#'
+FAILURE = '#FAILURE#'
+
+if sys.version_info >= (3, 0):         # XXX can use bytes literals in 2.6/3.0
+    CHALLENGE = CHALLENGE.encode('ascii')
+    WELCOME = WELCOME.encode('ascii')
+    FAILURE = FAILURE.encode('ascii')
+
+def deliver_challenge(connection, authkey):
+    import hmac
+    assert isinstance(authkey, bytes)
+    message = os.urandom(MESSAGE_LENGTH)
+    connection.send_bytes(CHALLENGE + message)
+    digest = hmac.new(authkey, message).digest()
+    response = connection.recv_bytes(256)        # reject large message
+    if response == digest:
+        connection.send_bytes(WELCOME)
+    else:
+        connection.send_bytes(FAILURE)
+        raise AuthenticationError('digest received was wrong')
+
+def answer_challenge(connection, authkey):
+    import hmac
+    assert isinstance(authkey, bytes)
+    message = connection.recv_bytes(256)         # reject large message
+    assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message
+    message = message[len(CHALLENGE):]
+    digest = hmac.new(authkey, message).digest()
+    connection.send_bytes(digest)
+    response = connection.recv_bytes(256)        # reject large message
+    if response != WELCOME:
+        raise AuthenticationError('digest sent was rejected')
+
+#
+# Support for using xmlrpclib for serialization
+#
+
+class ConnectionWrapper(object):
+    def __init__(self, conn, dumps, loads):
+        self._conn = conn
+        self._dumps = dumps
+        self._loads = loads
+        for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'):
+            obj = getattr(conn, attr)
+            setattr(self, attr, obj)
+    def send(self, obj):
+        s = self._dumps(obj)
+        self._conn.send_bytes(s)
+    def recv(self):
+        s = self._conn.recv_bytes()
+        return self._loads(s)
+
+def _xml_dumps(obj):
+    return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf8')
+
+def _xml_loads(s):
+    (obj,), method = xmlrpclib.loads(s.decode('utf8'))
+    return obj
+
+class XmlListener(Listener):
+    def accept(self):
+        global xmlrpclib
+        import xmlrpc.client as xmlrpclib
+        obj = Listener.accept(self)
+        return ConnectionWrapper(obj, _xml_dumps, _xml_loads)
+
+def XmlClient(*args, **kwds):
+    global xmlrpclib
+    import xmlrpc.client as xmlrpclib
+    return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads)

Modified: python/branches/py3k/Lib/multiprocessing/dummy/__init__.py
==============================================================================
--- /python/trunk/Lib/multiprocessing/dummy/__init__.py	(original)
+++ python/branches/py3k/Lib/multiprocessing/dummy/__init__.py	Wed Jun 11 18:44:04 2008
@@ -1,143 +1,143 @@
-#
-# Support for the API of the multiprocessing package using threads
-#
-# multiprocessing/dummy/__init__.py
-#
-# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
-#
-
-__all__ = [
-    'Process', 'current_process', 'active_children', 'freeze_support',
-    'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
-    'Event', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
-    ]
-
-#
-# Imports
-#
-
-import threading
-import sys
-import weakref
-import array
-import itertools
-
-from multiprocessing import TimeoutError, cpu_count
-from multiprocessing.dummy.connection import Pipe
-from threading import Lock, RLock, Semaphore, BoundedSemaphore
-from threading import Event
-from Queue import Queue
-
-#
-#
-#
-
-class DummyProcess(threading.Thread):
-
-    def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
-        threading.Thread.__init__(self, group, target, name, args, kwargs)
-        self._pid = None
-        self._children = weakref.WeakKeyDictionary()
-        self._start_called = False
-        self._parent = current_process()
-
-    def start(self):
-        assert self._parent is current_process()
-        self._start_called = True
-        self._parent._children[self] = None
-        threading.Thread.start(self)
-
-    def get_exitcode(self):
-        if self._start_called and not self.isAlive():
-            return 0
-        else:
-            return None
-
-    # XXX
-    if sys.version_info < (3, 0):
-        is_alive = threading.Thread.isAlive.im_func
-        get_name = threading.Thread.getName.im_func
-        set_name = threading.Thread.setName.im_func
-        is_daemon = threading.Thread.isDaemon.im_func
-        set_daemon = threading.Thread.setDaemon.im_func
-    else:
-        is_alive = threading.Thread.isAlive
-        get_name = threading.Thread.getName
-        set_name = threading.Thread.setName
-        is_daemon = threading.Thread.isDaemon
-        set_daemon = threading.Thread.setDaemon
-
-#
-#
-#
-        
-class Condition(threading._Condition):
-    # XXX
-    if sys.version_info < (3, 0):
-        notify_all = threading._Condition.notifyAll.im_func
-    else:
-        notify_all = threading._Condition.notifyAll
-
-#
-#
-#
-
-Process = DummyProcess
-current_process = threading.currentThread
-current_process()._children = weakref.WeakKeyDictionary()
-
-def active_children():
-    children = current_process()._children
-    for p in list(children):
-        if not p.isAlive():
-            children.pop(p, None)
-    return list(children)
-
-def freeze_support():
-    pass
-
-#
-#
-#
-
-class Namespace(object):
-    def __init__(self, **kwds):
-        self.__dict__.update(kwds)
-    def __repr__(self):
-        items = self.__dict__.items()
-        temp = []
-        for name, value in items:
-            if not name.startswith('_'):
-                temp.append('%s=%r' % (name, value))
-        temp.sort()
-        return 'Namespace(%s)' % str.join(', ', temp)
-
-dict = dict
-list = list
-
-def Array(typecode, sequence, lock=True):
-    return array.array(typecode, sequence)
-
-class Value(object):
-    def __init__(self, typecode, value, lock=True):
-        self._typecode = typecode
-        self._value = value
-    def _get(self):
-        return self._value
-    def _set(self, value):
-        self._value = value
-    value = property(_get, _set)
-    def __repr__(self):
-        return '<%r(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
-
-def Manager():
-    return sys.modules[__name__]
-
-def shutdown():
-    pass
-
-def Pool(processes=None, initializer=None, initargs=()):
-    from multiprocessing.pool import ThreadPool
-    return ThreadPool(processes, initializer, initargs)
-
-JoinableQueue = Queue
+#
+# Support for the API of the multiprocessing package using threads
+#
+# multiprocessing/dummy/__init__.py
+#
+# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
+#
+
+__all__ = [
+    'Process', 'current_process', 'active_children', 'freeze_support',
+    'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
+    'Event', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
+    ]
+
+#
+# Imports
+#
+
+import threading
+import sys
+import weakref
+import array
+import itertools
+
+from multiprocessing import TimeoutError, cpu_count
+from multiprocessing.dummy.connection import Pipe
+from threading import Lock, RLock, Semaphore, BoundedSemaphore
+from threading import Event
+from queue import Queue
+
+#
+#
+#
+
+class DummyProcess(threading.Thread):
+
+    def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
+        threading.Thread.__init__(self, group, target, name, args, kwargs)
+        self._pid = None
+        self._children = weakref.WeakKeyDictionary()
+        self._start_called = False
+        self._parent = current_process()
+
+    def start(self):
+        assert self._parent is current_process()
+        self._start_called = True
+        self._parent._children[self] = None
+        threading.Thread.start(self)
+
+    def get_exitcode(self):
+        if self._start_called and not self.isAlive():
+            return 0
+        else:
+            return None
+
+    # XXX
+    if sys.version_info < (3, 0):
+        is_alive = threading.Thread.isAlive.__func__
+        get_name = threading.Thread.getName.__func__
+        set_name = threading.Thread.setName.__func__
+        is_daemon = threading.Thread.isDaemon.__func__
+        set_daemon = threading.Thread.setDaemon.__func__
+    else:
+        is_alive = threading.Thread.isAlive
+        get_name = threading.Thread.getName
+        set_name = threading.Thread.setName
+        is_daemon = threading.Thread.isDaemon
+        set_daemon = threading.Thread.setDaemon
+
+#
+#
+#
+
+class Condition(threading._Condition):
+    # XXX
+    if sys.version_info < (3, 0):
+        notify_all = threading._Condition.notifyAll.__func__
+    else:
+        notify_all = threading._Condition.notifyAll
+
+#
+#
+#
+
+Process = DummyProcess
+current_process = threading.currentThread
+current_process()._children = weakref.WeakKeyDictionary()
+
+def active_children():
+    children = current_process()._children
+    for p in list(children):
+        if not p.isAlive():
+            children.pop(p, None)
+    return list(children)
+
+def freeze_support():
+    pass
+
+#
+#
+#
+
+class Namespace(object):
+    def __init__(self, **kwds):
+        self.__dict__.update(kwds)
+    def __repr__(self):
+        items = list(self.__dict__.items())
+        temp = []
+        for name, value in items:
+            if not name.startswith('_'):
+                temp.append('%s=%r' % (name, value))
+        temp.sort()
+        return 'Namespace(%s)' % str.join(', ', temp)
+
+dict = dict
+list = list
+
+def Array(typecode, sequence, lock=True):
+    return array.array(typecode, sequence)
+
+class Value(object):
+    def __init__(self, typecode, value, lock=True):
+        self._typecode = typecode
+        self._value = value
+    def _get(self):
+        return self._value
+    def _set(self, value):
+        self._value = value
+    value = property(_get, _set)
+    def __repr__(self):
+        return '<%r(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
+
+def Manager():
+    return sys.modules[__name__]
+
+def shutdown():
+    pass
+
+def Pool(processes=None, initializer=None, initargs=()):
+    from multiprocessing.pool import ThreadPool
+    return ThreadPool(processes, initializer, initargs)
+
+JoinableQueue = Queue

Modified: python/branches/py3k/Lib/multiprocessing/dummy/connection.py
==============================================================================
--- /python/trunk/Lib/multiprocessing/dummy/connection.py	(original)
+++ python/branches/py3k/Lib/multiprocessing/dummy/connection.py	Wed Jun 11 18:44:04 2008
@@ -1,61 +1,61 @@
-#
-# Analogue of `multiprocessing.connection` which uses queues instead of sockets
-#
-# multiprocessing/dummy/connection.py
-#
-# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
-#
-
-__all__ = [ 'Client', 'Listener', 'Pipe' ]
-
-from Queue import Queue
-
-
-families = [None]
-
-
-class Listener(object):
-
-    def __init__(self, address=None, family=None, backlog=1):
-        self._backlog_queue = Queue(backlog)
-
-    def accept(self):
-        return Connection(*self._backlog_queue.get())
-
-    def close(self):
-        self._backlog_queue = None
-
-    address = property(lambda self: self._backlog_queue)
-
-
-def Client(address):
-    _in, _out = Queue(), Queue()
-    address.put((_out, _in))
-    return Connection(_in, _out)
-
-
-def Pipe(duplex=True):
-    a, b = Queue(), Queue()
-    return Connection(a, b), Connection(b, a)
-
-
-class Connection(object):
-
-    def __init__(self, _in, _out):
-        self._out = _out
-        self._in = _in
-        self.send = self.send_bytes = _out.put
-        self.recv = self.recv_bytes = _in.get
-
-    def poll(self, timeout=0.0):
-        if self._in.qsize() > 0:
-            return True
-        if timeout <= 0.0:
-            return False
-        self._in.not_empty.acquire()
-        self._in.not_empty.wait(timeout)
-        self._in.not_empty.release()
-        return self._in.qsize() > 0
-
-    def close(self):
-        pass
+#
+# Analogue of `multiprocessing.connection` which uses queues instead of sockets
+#
+# multiprocessing/dummy/connection.py
+#
+# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
+#
+
+__all__ = [ 'Client', 'Listener', 'Pipe' ]
+
+from queue import Queue
+
+
+families = [None]
+
+
+class Listener(object):
+
+    def __init__(self, address=None, family=None, backlog=1):
+        self._backlog_queue = Queue(backlog)
+
+    def accept(self):
+        return Connection(*self._backlog_queue.get())
+
+    def close(self):
+        self._backlog_queue = None
+
+    address = property(lambda self: self._backlog_queue)
+
+
+def Client(address):
+    _in, _out = Queue(), Queue()
+    address.put((_out, _in))
+    return Connection(_in, _out)
+
+
+def Pipe(duplex=True):
+    a, b = Queue(), Queue()
+    return Connection(a, b), Connection(b, a)
+
+
+class Connection(object):
+
+    def __init__(self, _in, _out):
+        self._out = _out
+        self._in = _in
+        self.send = self.send_bytes = _out.put
+        self.recv = self.recv_bytes = _in.get
+
+    def poll(self, timeout=0.0):
+        if self._in.qsize() > 0:
+            return True
+        if timeout <= 0.0:
+            return False
+        self._in.not_empty.acquire()
+        self._in.not_empty.wait(timeout)
+        self._in.not_empty.release()
+        return self._in.qsize() > 0
+
+    def close(self):
+        pass

Modified: python/branches/py3k/Lib/multiprocessing/forking.py
==============================================================================
--- /python/trunk/Lib/multiprocessing/forking.py	(original)
+++ python/branches/py3k/Lib/multiprocessing/forking.py	Wed Jun 11 18:44:04 2008
@@ -1,429 +1,429 @@
-#
-# Module for starting a process object using os.fork() or CreateProcess()
-#
-# multiprocessing/forking.py
-#
-# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
-#
-
-import os
-import sys
-import signal
-
-from multiprocessing import util, process
-
-__all__ = ['Popen', 'assert_spawning', 'exit', 'duplicate', 'close']
-
-#
-# Check that the current thread is spawning a child process
-#
-
-def assert_spawning(self):
-    if not Popen.thread_is_spawning():
-        raise RuntimeError(
-            '%s objects should only be shared between processes'
-            ' through inheritance' % type(self).__name__
-            )
-
-#
-# Unix
-#
-
-if sys.platform != 'win32':
-    import time
-
-    exit = os._exit
-    duplicate = os.dup
-    close = os.close
-
-    #
-    # We define a Popen class similar to the one from subprocess, but
-    # whose constructor takes a process object as its argument.
-    #
-
-    class Popen(object):
-
-        def __init__(self, process_obj):
-            sys.stdout.flush()
-            sys.stderr.flush()
-            self.returncode = None
-
-            self.pid = os.fork()
-            if self.pid == 0:
-                if 'random' in sys.modules:
-                    import random
-                    random.seed()
-                code = process_obj._bootstrap()
-                sys.stdout.flush()
-                sys.stderr.flush()
-                os._exit(code)
-
-        def poll(self, flag=os.WNOHANG):
-            if self.returncode is None:
-                pid, sts = os.waitpid(self.pid, flag)
-                if pid == self.pid:
-                    if os.WIFSIGNALED(sts):
-                        self.returncode = -os.WTERMSIG(sts)
-                    else:
-                        assert os.WIFEXITED(sts)
-                        self.returncode = os.WEXITSTATUS(sts)
-            return self.returncode
-
-        def wait(self, timeout=None):
-            if timeout is None:
-                return self.poll(0)
-            deadline = time.time() + timeout
-            delay = 0.0005
-            while 1:
-                res = self.poll()
-                if res is not None:
-                    break
-                remaining = deadline - time.time()
-                if remaining <= 0:
-                    break
-                delay = min(delay * 2, remaining, 0.05)
-                time.sleep(delay)
-            return res
-
-        def terminate(self):
-            if self.returncode is None:
-                try:
-                    os.kill(self.pid, signal.SIGTERM)
-                except OSError, e:
-                    if self.wait(timeout=0.1) is None:
-                        raise
-                    
-        @staticmethod
-        def thread_is_spawning():
-            return False
-
-#
-# Windows
-#
-
-else:
-    import thread
-    import msvcrt
-    import _subprocess
-    import copy_reg
-    import time
-    
-    from ._multiprocessing import win32, Connection, PipeConnection
-    from .util import Finalize
-    
-    try:
-        from cPickle import dump, load, HIGHEST_PROTOCOL
-    except ImportError:
-        from pickle import dump, load, HIGHEST_PROTOCOL
-
-    #
-    #
-    #
-
-    TERMINATE = 0x10000
-    WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
-
-    exit = win32.ExitProcess
-    close = win32.CloseHandle
-
-    #
-    # _python_exe is the assumed path to the python executable.
-    # People embedding Python want to modify it.
-    #
-
-    if sys.executable.lower().endswith('pythonservice.exe'):
-        _python_exe = os.path.join(sys.exec_prefix, 'python.exe')
-    else:
-        _python_exe = sys.executable
-
-    def set_executable(exe):
-        global _python_exe
-        _python_exe = exe
-
-    #
-    #
-    #
-
-    def duplicate(handle, target_process=None, inheritable=False):
-        if target_process is None:
-            target_process = _subprocess.GetCurrentProcess()
-        return _subprocess.DuplicateHandle(
-            _subprocess.GetCurrentProcess(), handle, target_process,
-            0, inheritable, _subprocess.DUPLICATE_SAME_ACCESS
-            ).Detach()
-
-    #
-    # We define a Popen class similar to the one from subprocess, but
-    # whose constructor takes a process object as its argument.
-    #
-
-    class Popen(object):
-        '''
-        Start a subprocess to run the code of a process object
-        '''
-        _tls = thread._local()
-
-        def __init__(self, process_obj):
-            # create pipe for communication with child
-            rfd, wfd = os.pipe()
-
-            # get handle for read end of the pipe and make it inheritable
-            rhandle = duplicate(msvcrt.get_osfhandle(rfd), inheritable=True)
-            os.close(rfd)
-
-            # start process
-            cmd = get_command_line() + [rhandle]
-            cmd = ' '.join('"%s"' % x for x in cmd)
-            hp, ht, pid, tid = _subprocess.CreateProcess(
-                _python_exe, cmd, None, None, 1, 0, None, None, None
-                )
-            ht.Close()
-            close(rhandle)
-
-            # set attributes of self
-            self.pid = pid
-            self.returncode = None
-            self._handle = hp
-
-            # send information to child
-            prep_data = get_preparation_data(process_obj._name)
-            to_child = os.fdopen(wfd, 'wb')
-            Popen._tls.process_handle = int(hp)
-            try:
-                dump(prep_data, to_child, HIGHEST_PROTOCOL)
-                dump(process_obj, to_child, HIGHEST_PROTOCOL)
-            finally:
-                del Popen._tls.process_handle
-                to_child.close()
-
-        @staticmethod
-        def thread_is_spawning():
-            return getattr(Popen._tls, 'process_handle', None) is not None
-
-        @staticmethod
-        def duplicate_for_child(handle):
-            return duplicate(handle, Popen._tls.process_handle)
-
-        def wait(self, timeout=None):
-            if self.returncode is None:
-                if timeout is None:
-                    msecs = _subprocess.INFINITE
-                else:
-                    msecs = max(0, int(timeout * 1000 + 0.5))
-
-                res = _subprocess.WaitForSingleObject(int(self._handle), msecs)
-                if res == _subprocess.WAIT_OBJECT_0:
-                    code = _subprocess.GetExitCodeProcess(self._handle)
-                    if code == TERMINATE:
-                        code = -signal.SIGTERM
-                    self.returncode = code
-                    
-            return self.returncode
-
-        def poll(self):
-            return self.wait(timeout=0)
-
-        def terminate(self):
-            if self.returncode is None:
-                try:
-                    _subprocess.TerminateProcess(int(self._handle), TERMINATE)
-                except WindowsError:
-                    if self.wait(timeout=0.1) is None:
-                        raise
-        
-    #
-    #
-    #
-
-    def is_forking(argv):
-        '''
-        Return whether commandline indicates we are forking
-        '''
-        if len(argv) >= 2 and argv[1] == '--multiprocessing-fork':
-            assert len(argv) == 3
-            return True
-        else:
-            return False
-
-
-    def freeze_support():
-        '''
-        Run code for process object if this in not the main process
-        '''
-        if is_forking(sys.argv):
-            main()
-            sys.exit()
-
-
-    def get_command_line():
-        '''
-        Returns prefix of command line used for spawning a child process
-        '''
-        if process.current_process()._identity==() and is_forking(sys.argv):
-            raise RuntimeError('''
-            Attempt to start a new process before the current process
-            has finished its bootstrapping phase.
-
-            This probably means that you are on Windows and you have
-            forgotten to use the proper idiom in the main module:
-
-                if __name__ == '__main__':
-                    freeze_support()
-                    ...
-
-            The "freeze_support()" line can be omitted if the program
-            is not going to be frozen to produce a Windows executable.''')
-
-        if getattr(sys, 'frozen', False):
-            return [sys.executable, '--multiprocessing-fork']
-        else:
-            prog = 'from multiprocessing.forking import main; main()'
-            return [_python_exe, '-c', prog, '--multiprocessing-fork']
-
-
-    def main():
-        '''
-        Run code specifed by data received over pipe
-        '''
-        assert is_forking(sys.argv)
-
-        handle = int(sys.argv[-1])
-        fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)
-        from_parent = os.fdopen(fd, 'rb')
-
-        process.current_process()._inheriting = True
-        preparation_data = load(from_parent)
-        prepare(preparation_data)
-        self = load(from_parent)
-        process.current_process()._inheriting = False
-
-        from_parent.close()
-
-        exitcode = self._bootstrap()
-        exit(exitcode)
-
-
-    def get_preparation_data(name):
-        '''
-        Return info about parent needed by child to unpickle process object
-        '''
-        from .util import _logger, _log_to_stderr
-        
-        d = dict(
-            name=name,
-            sys_path=sys.path,
-            sys_argv=sys.argv,
-            log_to_stderr=_log_to_stderr,
-            orig_dir=process.ORIGINAL_DIR,
-            authkey=process.current_process().get_authkey(),
-            )
-        
-        if _logger is not None:
-            d['log_level'] = _logger.getEffectiveLevel()
-
-        if not WINEXE:
-            main_path = getattr(sys.modules['__main__'], '__file__', None)
-            if not main_path and sys.argv[0] not in ('', '-c'):
-                main_path = sys.argv[0]
-            if main_path is not None:
-                if not os.path.isabs(main_path) and \
-                                          process.ORIGINAL_DIR is not None:
-                    main_path = os.path.join(process.ORIGINAL_DIR, main_path)
-                d['main_path'] = os.path.normpath(main_path)
-
-        return d
-
-    #
-    # Make (Pipe)Connection picklable
-    #
-    
-    def reduce_connection(conn):
-        if not Popen.thread_is_spawning():
-            raise RuntimeError(
-                'By default %s objects can only be shared between processes\n'
-                'using inheritance' % type(conn).__name__
-                )
-        return type(conn), (Popen.duplicate_for_child(conn.fileno()),
-                            conn.readable, conn.writable)
-    
-    copy_reg.pickle(Connection, reduce_connection)
-    copy_reg.pickle(PipeConnection, reduce_connection)
-
-
-#
-# Prepare current process
-#
-
-old_main_modules = []
-
-def prepare(data):
-    '''
-    Try to get current process ready to unpickle process object
-    '''
-    old_main_modules.append(sys.modules['__main__'])
-
-    if 'name' in data:
-        process.current_process().set_name(data['name'])
-
-    if 'authkey' in data:
-        process.current_process()._authkey = data['authkey']
-    
-    if 'log_to_stderr' in data and data['log_to_stderr']:
-        util.log_to_stderr()
-
-    if 'log_level' in data:
-        util.get_logger().setLevel(data['log_level'])
-
-    if 'sys_path' in data:
-        sys.path = data['sys_path']
-
-    if 'sys_argv' in data:
-        sys.argv = data['sys_argv']
-
-    if 'dir' in data:
-        os.chdir(data['dir'])
-
-    if 'orig_dir' in data:
-        process.ORIGINAL_DIR = data['orig_dir']
-
-    if 'main_path' in data:
-        main_path = data['main_path']
-        main_name = os.path.splitext(os.path.basename(main_path))[0]
-        if main_name == '__init__':
-            main_name = os.path.basename(os.path.dirname(main_path))
-
-        if main_name != 'ipython':
-            import imp
-
-            if main_path is None:
-                dirs = None
-            elif os.path.basename(main_path).startswith('__init__.py'):
-                dirs = [os.path.dirname(os.path.dirname(main_path))]
-            else:
-                dirs = [os.path.dirname(main_path)]
-
-            assert main_name not in sys.modules, main_name
-            file, path_name, etc = imp.find_module(main_name, dirs)
-            try:
-                # We would like to do "imp.load_module('__main__', ...)"
-                # here.  However, that would cause 'if __name__ ==
-                # "__main__"' clauses to be executed.
-                main_module = imp.load_module(
-                    '__parents_main__', file, path_name, etc
-                    )
-            finally:
-                if file:
-                    file.close()
-
-            sys.modules['__main__'] = main_module
-            main_module.__name__ = '__main__'
-
-            # Try to make the potentially picklable objects in
-            # sys.modules['__main__'] realize they are in the main
-            # module -- somewhat ugly.
-            for obj in main_module.__dict__.values():
-                try:
-                    if obj.__module__ == '__parents_main__':
-                        obj.__module__ = '__main__'
-                except Exception:
-                    pass
+#
+# Module for starting a process object using os.fork() or CreateProcess()
+#
+# multiprocessing/forking.py
+#
+# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
+#
+
+import os
+import sys
+import signal
+
+from multiprocessing import util, process
+
+__all__ = ['Popen', 'assert_spawning', 'exit', 'duplicate', 'close']
+
+#
+# Check that the current thread is spawning a child process
+#
+
+def assert_spawning(self):
+    if not Popen.thread_is_spawning():
+        raise RuntimeError(
+            '%s objects should only be shared between processes'
+            ' through inheritance' % type(self).__name__
+            )
+
+#
+# Unix
+#
+
+if sys.platform != 'win32':
+    import time
+
+    exit = os._exit
+    duplicate = os.dup
+    close = os.close
+
+    #
+    # We define a Popen class similar to the one from subprocess, but
+    # whose constructor takes a process object as its argument.
+    #
+
+    class Popen(object):
+
+        def __init__(self, process_obj):
+            sys.stdout.flush()
+            sys.stderr.flush()
+            self.returncode = None
+
+            self.pid = os.fork()
+            if self.pid == 0:
+                if 'random' in sys.modules:
+                    import random
+                    random.seed()
+                code = process_obj._bootstrap()
+                sys.stdout.flush()
+                sys.stderr.flush()
+                os._exit(code)
+
+        def poll(self, flag=os.WNOHANG):
+            if self.returncode is None:
+                pid, sts = os.waitpid(self.pid, flag)
+                if pid == self.pid:
+                    if os.WIFSIGNALED(sts):
+                        self.returncode = -os.WTERMSIG(sts)
+                    else:
+                        assert os.WIFEXITED(sts)
+                        self.returncode = os.WEXITSTATUS(sts)
+            return self.returncode
+
+        def wait(self, timeout=None):
+            if timeout is None:
+                return self.poll(0)
+            deadline = time.time() + timeout
+            delay = 0.0005
+            while 1:
+                res = self.poll()
+                if res is not None:
+                    break
+                remaining = deadline - time.time()
+                if remaining <= 0:
+                    break
+                delay = min(delay * 2, remaining, 0.05)
+                time.sleep(delay)
+            return res
+
+        def terminate(self):
+            if self.returncode is None:
+                try:
+                    os.kill(self.pid, signal.SIGTERM)
+                except OSError as e:
+                    if self.wait(timeout=0.1) is None:
+                        raise
+
+        @staticmethod
+        def thread_is_spawning():
+            return False
+
+#
+# Windows
+#
+
+else:
+    import _thread
+    import msvcrt
+    import _subprocess
+    import copyreg
+    import time
+
+    from ._multiprocessing import win32, Connection, PipeConnection
+    from .util import Finalize
+
+    try:
+        from cPickle import dump, load, HIGHEST_PROTOCOL
+    except ImportError:
+        from pickle import dump, load, HIGHEST_PROTOCOL
+
+    #
+    #
+    #
+
+    TERMINATE = 0x10000
+    WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
+
+    exit = win32.ExitProcess
+    close = win32.CloseHandle
+
+    #
+    # _python_exe is the assumed path to the python executable.
+    # People embedding Python want to modify it.
+    #
+
+    if sys.executable.lower().endswith('pythonservice.exe'):
+        _python_exe = os.path.join(sys.exec_prefix, 'python.exe')
+    else:
+        _python_exe = sys.executable
+
+    def set_executable(exe):
+        global _python_exe
+        _python_exe = exe
+
+    #
+    #
+    #
+
+    def duplicate(handle, target_process=None, inheritable=False):
+        if target_process is None:
+            target_process = _subprocess.GetCurrentProcess()
+        return _subprocess.DuplicateHandle(
+            _subprocess.GetCurrentProcess(), handle, target_process,
+            0, inheritable, _subprocess.DUPLICATE_SAME_ACCESS
+            ).Detach()
+
+    #
+    # We define a Popen class similar to the one from subprocess, but
+    # whose constructor takes a process object as its argument.
+    #
+
+    class Popen(object):
+        '''
+        Start a subprocess to run the code of a process object
+        '''
+        _tls = _thread._local()
+
+        def __init__(self, process_obj):
+            # create pipe for communication with child
+            rfd, wfd = os.pipe()
+
+            # get handle for read end of the pipe and make it inheritable
+            rhandle = duplicate(msvcrt.get_osfhandle(rfd), inheritable=True)
+            os.close(rfd)
+
+            # start process
+            cmd = get_command_line() + [rhandle]
+            cmd = ' '.join('"%s"' % x for x in cmd)
+            hp, ht, pid, tid = _subprocess.CreateProcess(
+                _python_exe, cmd, None, None, 1, 0, None, None, None
+                )
+            ht.Close()
+            close(rhandle)
+
+            # set attributes of self
+            self.pid = pid
+            self.returncode = None
+            self._handle = hp
+
+            # send information to child
+            prep_data = get_preparation_data(process_obj._name)
+            to_child = os.fdopen(wfd, 'wb')
+            Popen._tls.process_handle = int(hp)
+            try:
+                dump(prep_data, to_child, HIGHEST_PROTOCOL)
+                dump(process_obj, to_child, HIGHEST_PROTOCOL)
+            finally:
+                del Popen._tls.process_handle
+                to_child.close()
+
+        @staticmethod
+        def thread_is_spawning():
+            return getattr(Popen._tls, 'process_handle', None) is not None
+
+        @staticmethod
+        def duplicate_for_child(handle):
+            return duplicate(handle, Popen._tls.process_handle)
+
+        def wait(self, timeout=None):
+            if self.returncode is None:
+                if timeout is None:
+                    msecs = _subprocess.INFINITE
+                else:
+                    msecs = max(0, int(timeout * 1000 + 0.5))
+
+                res = _subprocess.WaitForSingleObject(int(self._handle), msecs)
+                if res == _subprocess.WAIT_OBJECT_0:
+                    code = _subprocess.GetExitCodeProcess(self._handle)
+                    if code == TERMINATE:
+                        code = -signal.SIGTERM
+                    self.returncode = code
+
+            return self.returncode
+
+        def poll(self):
+            return self.wait(timeout=0)
+
+        def terminate(self):
+            if self.returncode is None:
+                try:
+                    _subprocess.TerminateProcess(int(self._handle), TERMINATE)
+                except WindowsError:
+                    if self.wait(timeout=0.1) is None:
+                        raise
+
+    #
+    #
+    #
+
+    def is_forking(argv):
+        '''
+        Return whether commandline indicates we are forking
+        '''
+        if len(argv) >= 2 and argv[1] == '--multiprocessing-fork':
+            assert len(argv) == 3
+            return True
+        else:
+            return False
+
+
+    def freeze_support():
+        '''
+        Run code for process object if this in not the main process
+        '''
+        if is_forking(sys.argv):
+            main()
+            sys.exit()
+
+
+    def get_command_line():
+        '''
+        Returns prefix of command line used for spawning a child process
+        '''
+        if process.current_process()._identity==() and is_forking(sys.argv):
+            raise RuntimeError('''
+            Attempt to start a new process before the current process
+            has finished its bootstrapping phase.
+
+            This probably means that you are on Windows and you have
+            forgotten to use the proper idiom in the main module:
+
+                if __name__ == '__main__':
+                    freeze_support()
+                    ...
+
+            The "freeze_support()" line can be omitted if the program
+            is not going to be frozen to produce a Windows executable.''')
+
+        if getattr(sys, 'frozen', False):
+            return [sys.executable, '--multiprocessing-fork']
+        else:
+            prog = 'from multiprocessing.forking import main; main()'
+            return [_python_exe, '-c', prog, '--multiprocessing-fork']
+
+
+    def main():
+        '''
+        Run code specifed by data received over pipe
+        '''
+        assert is_forking(sys.argv)
+
+        handle = int(sys.argv[-1])
+        fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)
+        from_parent = os.fdopen(fd, 'rb')
+
+        process.current_process()._inheriting = True
+        preparation_data = load(from_parent)
+        prepare(preparation_data)
+        self = load(from_parent)
+        process.current_process()._inheriting = False
+
+        from_parent.close()
+
+        exitcode = self._bootstrap()
+        exit(exitcode)
+
+
+    def get_preparation_data(name):
+        '''
+        Return info about parent needed by child to unpickle process object
+        '''
+        from .util import _logger, _log_to_stderr
+
+        d = dict(
+            name=name,
+            sys_path=sys.path,
+            sys_argv=sys.argv,
+            log_to_stderr=_log_to_stderr,
+            orig_dir=process.ORIGINAL_DIR,
+            authkey=process.current_process().get_authkey(),
+            )
+
+        if _logger is not None:
+            d['log_level'] = _logger.getEffectiveLevel()
+
+        if not WINEXE:
+            main_path = getattr(sys.modules['__main__'], '__file__', None)
+            if not main_path and sys.argv[0] not in ('', '-c'):
+                main_path = sys.argv[0]
+            if main_path is not None:
+                if not os.path.isabs(main_path) and \
+                                          process.ORIGINAL_DIR is not None:
+                    main_path = os.path.join(process.ORIGINAL_DIR, main_path)
+                d['main_path'] = os.path.normpath(main_path)
+
+        return d
+
+    #
+    # Make (Pipe)Connection picklable
+    #
+
+    def reduce_connection(conn):
+        if not Popen.thread_is_spawning():
+            raise RuntimeError(
+                'By default %s objects can only be shared between processes\n'
+                'using inheritance' % type(conn).__name__
+                )
+        return type(conn), (Popen.duplicate_for_child(conn.fileno()),
+                            conn.readable, conn.writable)
+
+    copyreg.pickle(Connection, reduce_connection)
+    copyreg.pickle(PipeConnection, reduce_connection)
+
+
+#
+# Prepare current process
+#
+
+old_main_modules = []
+
+def prepare(data):
+    '''
+    Try to get current process ready to unpickle process object
+    '''
+    old_main_modules.append(sys.modules['__main__'])
+
+    if 'name' in data:
+        process.current_process().set_name(data['name'])
+
+    if 'authkey' in data:
+        process.current_process()._authkey = data['authkey']
+
+    if 'log_to_stderr' in data and data['log_to_stderr']:
+        util.log_to_stderr()
+
+    if 'log_level' in data:
+        util.get_logger().setLevel(data['log_level'])
+
+    if 'sys_path' in data:
+        sys.path = data['sys_path']
+
+    if 'sys_argv' in data:
+        sys.argv = data['sys_argv']
+
+    if 'dir' in data:
+        os.chdir(data['dir'])
+
+    if 'orig_dir' in data:
+        process.ORIGINAL_DIR = data['orig_dir']
+
+    if 'main_path' in data:
+        main_path = data['main_path']
+        main_name = os.path.splitext(os.path.basename(main_path))[0]
+        if main_name == '__init__':
+            main_name = os.path.basename(os.path.dirname(main_path))
+
+        if main_name != 'ipython':
+            import imp
+
+            if main_path is None:
+                dirs = None
+            elif os.path.basename(main_path).startswith('__init__.py'):
+                dirs = [os.path.dirname(os.path.dirname(main_path))]
+            else:
+                dirs = [os.path.dirname(main_path)]
+
+            assert main_name not in sys.modules, main_name
+            file, path_name, etc = imp.find_module(main_name, dirs)
+            try:
+                # We would like to do "imp.load_module('__main__', ...)"
+                # here.  However, that would cause 'if __name__ ==
+                # "__main__"' clauses to be executed.
+                main_module = imp.load_module(
+                    '__parents_main__', file, path_name, etc
+                    )
+            finally:
+                if file:
+                    file.close()
+
+            sys.modules['__main__'] = main_module
+            main_module.__name__ = '__main__'
+
+            # Try to make the potentially picklable objects in
+            # sys.modules['__main__'] realize they are in the main
+            # module -- somewhat ugly.
+            for obj in list(main_module.__dict__.values()):
+                try:
+                    if obj.__module__ == '__parents_main__':
+                        obj.__module__ = '__main__'
+                except Exception:
+                    pass

Modified: python/branches/py3k/Lib/multiprocessing/heap.py
==============================================================================
--- /python/trunk/Lib/multiprocessing/heap.py	(original)
+++ python/branches/py3k/Lib/multiprocessing/heap.py	Wed Jun 11 18:44:04 2008
@@ -34,7 +34,7 @@
 
         def __init__(self, size):
             self.size = size
-            self.name = 'pym-%d-%d' % (os.getpid(), Arena._counter.next())
+            self.name = 'pym-%d-%d' % (os.getpid(), next(Arena._counter))
             self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
             assert win32.GetLastError() == 0, 'tagname already in use'
             self._state = (self.size, self.name)
@@ -161,7 +161,7 @@
 
     def malloc(self, size):
         # return a block of right size (possibly rounded up)
-        assert 0 <= size < sys.maxint
+        assert 0 <= size < sys.maxsize
         if os.getpid() != self._lastpid:
             self.__init__()                     # reinitialize after fork
         self._lock.acquire()
@@ -186,7 +186,7 @@
     _heap = Heap()
 
     def __init__(self, size):
-        assert 0 <= size < sys.maxint
+        assert 0 <= size < sys.maxsize
         block = BufferWrapper._heap.malloc(size)
         self._state = (block, size)
         Finalize(self, BufferWrapper._heap.free, args=(block,))

Modified: python/branches/py3k/Lib/multiprocessing/managers.py
==============================================================================
--- /python/trunk/Lib/multiprocessing/managers.py	(original)
+++ python/branches/py3k/Lib/multiprocessing/managers.py	Wed Jun 11 18:44:04 2008
@@ -1,1092 +1,1092 @@
-#
-# Module providing the `SyncManager` class for dealing
-# with shared objects
-#
-# multiprocessing/managers.py
-#
-# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
-#
-
-__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ]
-
-#
-# Imports
-#
-
-import os
-import sys
-import weakref
-import threading
-import array
-import copy_reg
-import Queue
-
-from traceback import format_exc
-from multiprocessing import Process, current_process, active_children, Pool, util, connection
-from multiprocessing.process import AuthenticationString
-from multiprocessing.forking import exit, Popen, assert_spawning
-from multiprocessing.util import Finalize, info
-
-try:
-    from cPickle import PicklingError
-except ImportError:
-    from pickle import PicklingError
-
-#
-#
-#
-
-try:
-    bytes
-except NameError:
-    bytes = str                  # XXX not needed in Py2.6 and Py3.0
-    
-#
-# Register some things for pickling
-#
-
-def reduce_array(a):
-    return array.array, (a.typecode, a.tostring())
-copy_reg.pickle(array.array, reduce_array)
-
-view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
-if view_types[0] is not list:       # XXX only needed in Py3.0
-    def rebuild_as_list(obj):
-        return list, (list(obj),)
-    for view_type in view_types:
-        copy_reg.pickle(view_type, rebuild_as_list)
-    
-#
-# Type for identifying shared objects
-#
-
-class Token(object):
-    '''
-    Type to uniquely indentify a shared object
-    '''
-    __slots__ = ('typeid', 'address', 'id')
-
-    def __init__(self, typeid, address, id):
-        (self.typeid, self.address, self.id) = (typeid, address, id)
-
-    def __getstate__(self):
-        return (self.typeid, self.address, self.id)
-
-    def __setstate__(self, state):
-        (self.typeid, self.address, self.id) = state
-
-    def __repr__(self):
-        return 'Token(typeid=%r, address=%r, id=%r)' % \
-               (self.typeid, self.address, self.id)
-
-#
-# Function for communication with a manager's server process
-#
-
-def dispatch(c, id, methodname, args=(), kwds={}):
-    '''
-    Send a message to manager using connection `c` and return response
-    '''
-    c.send((id, methodname, args, kwds))
-    kind, result = c.recv()
-    if kind == '#RETURN':
-        return result
-    raise convert_to_error(kind, result)
-
-def convert_to_error(kind, result):
-    if kind == '#ERROR':
-        return result
-    elif kind == '#TRACEBACK':
-        assert type(result) is str
-        return  RemoteError(result)
-    elif kind == '#UNSERIALIZABLE':
-        assert type(result) is str
-        return RemoteError('Unserializable message: %s\n' % result)
-    else:
-        return ValueError('Unrecognized message type')
-        
-class RemoteError(Exception):
-    def __str__(self):
-        return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
-
-#
-# Functions for finding the method names of an object
-#
-
-def all_methods(obj):
-    '''
-    Return a list of names of methods of `obj`
-    '''
-    temp = []
-    for name in dir(obj):
-        func = getattr(obj, name)
-        if hasattr(func, '__call__'):
-            temp.append(name)
-    return temp
-
-def public_methods(obj):
-    '''
-    Return a list of names of methods of `obj` which do not start with '_'
-    '''
-    return [name for name in all_methods(obj) if name[0] != '_']
-
-#
-# Server which is run in a process controlled by a manager
-#
-
-class Server(object):
-    '''
-    Server class which runs in a process controlled by a manager object
-    '''
-    public = ['shutdown', 'create', 'accept_connection', 'get_methods',
-              'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']
-
-    def __init__(self, registry, address, authkey, serializer):
-        assert isinstance(authkey, bytes)
-        self.registry = registry
-        self.authkey = AuthenticationString(authkey)
-        Listener, Client = listener_client[serializer]
-
-        # do authentication later
-        self.listener = Listener(address=address, backlog=5)
-        self.address = self.listener.address
-
-        self.id_to_obj = {0: (None, ())}
-        self.id_to_refcount = {}
-        self.mutex = threading.RLock()
-        self.stop = 0
-
-    def serve_forever(self):
-        '''
-        Run the server forever
-        '''
-        current_process()._manager_server = self
-        try:
-            try:
-                while 1:
-                    try:
-                        c = self.listener.accept()
-                    except (OSError, IOError):
-                        continue
-                    t = threading.Thread(target=self.handle_request, args=(c,))
-                    t.setDaemon(True)
-                    t.start()
-            except (KeyboardInterrupt, SystemExit):
-                pass
-        finally:
-            self.stop = 999
-            self.listener.close()
-
-    def handle_request(self, c):
-        '''
-        Handle a new connection
-        '''
-        funcname = result = request = None
-        try:
-            connection.deliver_challenge(c, self.authkey)
-            connection.answer_challenge(c, self.authkey)
-            request = c.recv()
-            ignore, funcname, args, kwds = request
-            assert funcname in self.public, '%r unrecognized' % funcname
-            func = getattr(self, funcname)
-        except Exception:
-            msg = ('#TRACEBACK', format_exc())
-        else:
-            try:
-                result = func(c, *args, **kwds)
-            except Exception:
-                msg = ('#TRACEBACK', format_exc())
-            else:
-                msg = ('#RETURN', result)
-        try:
-            c.send(msg)
-        except Exception, e:
-            try:
-                c.send(('#TRACEBACK', format_exc()))
-            except Exception:
-                pass
-            util.info('Failure to send message: %r', msg)
-            util.info(' ... request was %r', request)
-            util.info(' ... exception was %r', e)
-
-        c.close()
-
-    def serve_client(self, conn):
-        '''
-        Handle requests from the proxies in a particular process/thread
-        '''
-        util.debug('starting server thread to service %r',
-                   threading.currentThread().getName())
-
-        recv = conn.recv
-        send = conn.send
-        id_to_obj = self.id_to_obj
-
-        while not self.stop:
-
-            try:
-                methodname = obj = None
-                request = recv()
-                ident, methodname, args, kwds = request
-                obj, exposed, gettypeid = id_to_obj[ident]
-
-                if methodname not in exposed:
-                    raise AttributeError(
-                        'method %r of %r object is not in exposed=%r' %
-                        (methodname, type(obj), exposed)
-                        )
-
-                function = getattr(obj, methodname)
-
-                try:
-                    res = function(*args, **kwds)
-                except Exception, e:
-                    msg = ('#ERROR', e)
-                else:
-                    typeid = gettypeid and gettypeid.get(methodname, None)
-                    if typeid:
-                        rident, rexposed = self.create(conn, typeid, res)
-                        token = Token(typeid, self.address, rident)
-                        msg = ('#PROXY', (rexposed, token))
-                    else:
-                        msg = ('#RETURN', res)
-
-            except AttributeError:
-                if methodname is None:
-                    msg = ('#TRACEBACK', format_exc())
-                else:
-                    try:
-                        fallback_func = self.fallback_mapping[methodname]
-                        result = fallback_func(
-                            self, conn, ident, obj, *args, **kwds
-                            )
-                        msg = ('#RETURN', result)
-                    except Exception:
-                        msg = ('#TRACEBACK', format_exc())
-
-            except EOFError:
-                util.debug('got EOF -- exiting thread serving %r',
-                           threading.currentThread().getName())
-                sys.exit(0)
-
-            except Exception:
-                msg = ('#TRACEBACK', format_exc())
-
-            try:
-                try:
-                    send(msg)
-                except Exception, e:
-                    send(('#UNSERIALIZABLE', repr(msg)))
-            except Exception, e:
-                util.info('exception in thread serving %r',
-                        threading.currentThread().getName())
-                util.info(' ... message was %r', msg)
-                util.info(' ... exception was %r', e)
-                conn.close()
-                sys.exit(1)
-
-    def fallback_getvalue(self, conn, ident, obj):
-        return obj
-
-    def fallback_str(self, conn, ident, obj):
-        return str(obj)
-
-    def fallback_repr(self, conn, ident, obj):
-        return repr(obj)
-
-    fallback_mapping = {
-        '__str__':fallback_str,
-        '__repr__':fallback_repr,
-        '#GETVALUE':fallback_getvalue
-        }
-
-    def dummy(self, c):
-        pass
-
-    def debug_info(self, c):
-        '''
-        Return some info --- useful to spot problems with refcounting
-        '''
-        self.mutex.acquire()
-        try:
-            result = []
-            keys = self.id_to_obj.keys()
-            keys.sort()
-            for ident in keys:
-                if ident != 0:
-                    result.append('  %s:       refcount=%s\n    %s' %
-                                  (ident, self.id_to_refcount[ident],
-                                   str(self.id_to_obj[ident][0])[:75]))
-            return '\n'.join(result)
-        finally:
-            self.mutex.release()
-
-    def number_of_objects(self, c):
-        '''
-        Number of shared objects
-        '''
-        return len(self.id_to_obj) - 1      # don't count ident=0
-
-    def shutdown(self, c):
-        '''
-        Shutdown this process
-        '''
-        try:
-            try:
-                util.debug('manager received shutdown message')
-                c.send(('#RETURN', None))
-
-                if sys.stdout != sys.__stdout__:
-                    util.debug('resetting stdout, stderr')
-                    sys.stdout = sys.__stdout__
-                    sys.stderr = sys.__stderr__
-                    
-                util._run_finalizers(0)
-
-                for p in active_children():
-                    util.debug('terminating a child process of manager')
-                    p.terminate()
-
-                for p in active_children():
-                    util.debug('terminating a child process of manager')
-                    p.join()
-
-                util._run_finalizers()
-                util.info('manager exiting with exitcode 0')
-            except:
-                import traceback
-                traceback.print_exc()
-        finally:
-            exit(0)
-            
-    def create(self, c, typeid, *args, **kwds):
-        '''
-        Create a new shared object and return its id
-        '''
-        self.mutex.acquire()
-        try:
-            callable, exposed, method_to_typeid, proxytype = \
-                      self.registry[typeid]
-            
-            if callable is None:
-                assert len(args) == 1 and not kwds
-                obj = args[0]
-            else:
-                obj = callable(*args, **kwds)
-
-            if exposed is None:
-                exposed = public_methods(obj)
-            if method_to_typeid is not None:
-                assert type(method_to_typeid) is dict
-                exposed = list(exposed) + list(method_to_typeid)
-
-            ident = '%x' % id(obj)  # convert to string because xmlrpclib
-                                    # only has 32 bit signed integers
-            util.debug('%r callable returned object with id %r', typeid, ident)
-
-            self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
-            if ident not in self.id_to_refcount:
-                self.id_to_refcount[ident] = None
-            return ident, tuple(exposed)
-        finally:
-            self.mutex.release()
-
-    def get_methods(self, c, token):
-        '''
-        Return the methods of the shared object indicated by token
-        '''
-        return tuple(self.id_to_obj[token.id][1])
-
-    def accept_connection(self, c, name):
-        '''
-        Spawn a new thread to serve this connection
-        '''
-        threading.currentThread().setName(name)
-        c.send(('#RETURN', None))
-        self.serve_client(c)
-
-    def incref(self, c, ident):
-        self.mutex.acquire()
-        try:
-            try:
-                self.id_to_refcount[ident] += 1
-            except TypeError:
-                assert self.id_to_refcount[ident] is None
-                self.id_to_refcount[ident] = 1
-        finally:
-            self.mutex.release()
-
-    def decref(self, c, ident):
-        self.mutex.acquire()
-        try:
-            assert self.id_to_refcount[ident] >= 1
-            self.id_to_refcount[ident] -= 1
-            if self.id_to_refcount[ident] == 0:
-                del self.id_to_obj[ident], self.id_to_refcount[ident]
-                util.debug('disposing of obj with id %d', ident)
-        finally:
-            self.mutex.release()
-
-#
-# Class to represent state of a manager
-#
-
-class State(object):
-    __slots__ = ['value']
-    INITIAL = 0
-    STARTED = 1
-    SHUTDOWN = 2
-
-#
-# Mapping from serializer name to Listener and Client types
-#
-
-listener_client = {
-    'pickle' : (connection.Listener, connection.Client),
-    'xmlrpclib' : (connection.XmlListener, connection.XmlClient)
-    }
-
-#
-# Definition of BaseManager
-#
-
-class BaseManager(object):
-    '''
-    Base class for managers
-    '''
-    _registry = {}
-    _Server = Server
-    
-    def __init__(self, address=None, authkey=None, serializer='pickle'):
-        if authkey is None:
-            authkey = current_process().get_authkey()
-        self._address = address     # XXX not final address if eg ('', 0)
-        self._authkey = AuthenticationString(authkey)
-        self._state = State()
-        self._state.value = State.INITIAL
-        self._serializer = serializer
-        self._Listener, self._Client = listener_client[serializer]
-
-    def __reduce__(self):
-        return type(self).from_address, \
-               (self._address, self._authkey, self._serializer)
-
-    def get_server(self):
-        '''
-        Return server object with serve_forever() method and address attribute
-        '''
-        assert self._state.value == State.INITIAL
-        return Server(self._registry, self._address,
-                      self._authkey, self._serializer)
-
-    def connect(self):
-        '''
-        Connect manager object to the server process
-        '''
-        Listener, Client = listener_client[self._serializer]
-        conn = Client(self._address, authkey=self._authkey)
-        dispatch(conn, None, 'dummy')
-        self._state.value = State.STARTED
-        
-    def start(self):
-        '''
-        Spawn a server process for this manager object
-        '''
-        assert self._state.value == State.INITIAL
-
-        # pipe over which we will retrieve address of server
-        reader, writer = connection.Pipe(duplex=False)
-
-        # spawn process which runs a server
-        self._process = Process(
-            target=type(self)._run_server,
-            args=(self._registry, self._address, self._authkey,
-                  self._serializer, writer),
-            )
-        ident = ':'.join(str(i) for i in self._process._identity)
-        self._process.set_name(type(self).__name__  + '-' + ident)
-        self._process.start()
-
-        # get address of server
-        writer.close()
-        self._address = reader.recv()
-        reader.close()
-
-        # register a finalizer
-        self._state.value = State.STARTED
-        self.shutdown = util.Finalize(
-            self, type(self)._finalize_manager,
-            args=(self._process, self._address, self._authkey,
-                  self._state, self._Client),
-            exitpriority=0
-            )
-
-    @classmethod
-    def _run_server(cls, registry, address, authkey, serializer, writer):
-        '''
-        Create a server, report its address and run it
-        '''
-        # create server
-        server = cls._Server(registry, address, authkey, serializer)
-
-        # inform parent process of the server's address
-        writer.send(server.address)
-        writer.close()
-
-        # run the manager
-        util.info('manager serving at %r', server.address)
-        server.serve_forever()
-
-    def _create(self, typeid, *args, **kwds):
-        '''
-        Create a new shared object; return the token and exposed tuple
-        '''
-        assert self._state.value == State.STARTED, 'server not yet started'
-        conn = self._Client(self._address, authkey=self._authkey)
-        try:
-            id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
-        finally:
-            conn.close()
-        return Token(typeid, self._address, id), exposed
-
-    def join(self, timeout=None):
-        '''
-        Join the manager process (if it has been spawned)
-        '''
-        self._process.join(timeout)
-
-    def _debug_info(self):
-        '''
-        Return some info about the servers shared objects and connections
-        '''
-        conn = self._Client(self._address, authkey=self._authkey)
-        try:
-            return dispatch(conn, None, 'debug_info')
-        finally:
-            conn.close()
-
-    def _number_of_objects(self):
-        '''
-        Return the number of shared objects
-        '''
-        conn = self._Client(self._address, authkey=self._authkey)
-        try:        
-            return dispatch(conn, None, 'number_of_objects')
-        finally:
-            conn.close()        
-
-    def __enter__(self):
-        return self
-
-    def __exit__(self, exc_type, exc_val, exc_tb):
-        self.shutdown()
-
-    @staticmethod
-    def _finalize_manager(process, address, authkey, state, _Client):
-        '''
-        Shutdown the manager process; will be registered as a finalizer
-        '''
-        if process.is_alive():
-            util.info('sending shutdown message to manager')
-            try:
-                conn = _Client(address, authkey=authkey)
-                try:
-                    dispatch(conn, None, 'shutdown')
-                finally:
-                    conn.close()
-            except Exception:
-                pass
-
-            process.join(timeout=0.2)
-            if process.is_alive():
-                util.info('manager still alive')
-                if hasattr(process, 'terminate'):
-                    util.info('trying to `terminate()` manager process')
-                    process.terminate()
-                    process.join(timeout=0.1)
-                    if process.is_alive():
-                        util.info('manager still alive after terminate')
-
-        state.value = State.SHUTDOWN
-        try:
-            del BaseProxy._address_to_local[address]
-        except KeyError:
-            pass
-        
-    address = property(lambda self: self._address)
-
-    @classmethod
-    def register(cls, typeid, callable=None, proxytype=None, exposed=None,
-                 method_to_typeid=None, create_method=True):
-        '''
-        Register a typeid with the manager type
-        '''
-        if '_registry' not in cls.__dict__:
-            cls._registry = cls._registry.copy()
-
-        if proxytype is None:
-            proxytype = AutoProxy
-
-        exposed = exposed or getattr(proxytype, '_exposed_', None)
-
-        method_to_typeid = method_to_typeid or \
-                           getattr(proxytype, '_method_to_typeid_', None)
-
-        if method_to_typeid:
-            for key, value in method_to_typeid.items():
-                assert type(key) is str, '%r is not a string' % key
-                assert type(value) is str, '%r is not a string' % value
-
-        cls._registry[typeid] = (
-            callable, exposed, method_to_typeid, proxytype
-            )
-        
-        if create_method:
-            def temp(self, *args, **kwds):
-                util.debug('requesting creation of a shared %r object', typeid)
-                token, exp = self._create(typeid, *args, **kwds)
-                proxy = proxytype(
-                    token, self._serializer, manager=self,
-                    authkey=self._authkey, exposed=exp
-                    )
-                return proxy
-            temp.__name__ = typeid
-            setattr(cls, typeid, temp)
-
-#
-# Subclass of set which get cleared after a fork
-#
-
-class ProcessLocalSet(set):
-    def __init__(self):
-        util.register_after_fork(self, lambda obj: obj.clear())
-    def __reduce__(self):
-        return type(self), ()
-
-#
-# Definition of BaseProxy
-#
-
-class BaseProxy(object):
-    '''
-    A base for proxies of shared objects
-    '''
-    _address_to_local = {}
-    _mutex = util.ForkAwareThreadLock()
-
-    def __init__(self, token, serializer, manager=None,
-                 authkey=None, exposed=None, incref=True):
-        BaseProxy._mutex.acquire()
-        try:
-            tls_idset = BaseProxy._address_to_local.get(token.address, None)
-            if tls_idset is None:
-                tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
-                BaseProxy._address_to_local[token.address] = tls_idset
-        finally:
-            BaseProxy._mutex.release()
-
-        # self._tls is used to record the connection used by this
-        # thread to communicate with the manager at token.address
-        self._tls = tls_idset[0]
-
-        # self._idset is used to record the identities of all shared
-        # objects for which the current process owns references and
-        # which are in the manager at token.address
-        self._idset = tls_idset[1]
-
-        self._token = token
-        self._id = self._token.id
-        self._manager = manager
-        self._serializer = serializer
-        self._Client = listener_client[serializer][1]
-
-        if authkey is not None:
-            self._authkey = AuthenticationString(authkey)
-        elif self._manager is not None:
-            self._authkey = self._manager._authkey
-        else:
-            self._authkey = current_process().get_authkey()
-
-        if incref:
-            self._incref()
-            
-        util.register_after_fork(self, BaseProxy._after_fork)
-        
-    def _connect(self):
-        util.debug('making connection to manager')
-        name = current_process().get_name()
-        if threading.currentThread().getName() != 'MainThread':
-            name += '|' + threading.currentThread().getName()
-        conn = self._Client(self._token.address, authkey=self._authkey)
-        dispatch(conn, None, 'accept_connection', (name,))
-        self._tls.connection = conn
-        
-    def _callmethod(self, methodname, args=(), kwds={}):
-        '''
-        Try to call a method of the referrent and return a copy of the result
-        '''
-        try:
-            conn = self._tls.connection
-        except AttributeError:
-            util.debug('thread %r does not own a connection',
-                       threading.currentThread().getName())
-            self._connect()
-            conn = self._tls.connection
-
-        conn.send((self._id, methodname, args, kwds))
-        kind, result = conn.recv()
-        
-        if kind == '#RETURN':
-            return result
-        elif kind == '#PROXY':
-            exposed, token = result
-            proxytype = self._manager._registry[token.typeid][-1]
-            return proxytype(
-                token, self._serializer, manager=self._manager,
-                authkey=self._authkey, exposed=exposed
-                )
-        raise convert_to_error(kind, result)
-
-    def _getvalue(self):
-        '''
-        Get a copy of the value of the referent
-        '''
-        return self._callmethod('#GETVALUE')
-
-    def _incref(self):
-        conn = self._Client(self._token.address, authkey=self._authkey)
-        dispatch(conn, None, 'incref', (self._id,))
-        util.debug('INCREF %r', self._token.id)
-
-        self._idset.add(self._id)
-
-        state = self._manager and self._manager._state
-
-        self._close = util.Finalize(
-            self, BaseProxy._decref,
-            args=(self._token, self._authkey, state,
-                  self._tls, self._idset, self._Client),
-            exitpriority=10
-            )
-
-    @staticmethod
-    def _decref(token, authkey, state, tls, idset, _Client):
-        idset.discard(token.id)
-
-        # check whether manager is still alive
-        if state is None or state.value == State.STARTED:
-            # tell manager this process no longer cares about referent
-            try:
-                util.debug('DECREF %r', token.id)
-                conn = _Client(token.address, authkey=authkey)
-                dispatch(conn, None, 'decref', (token.id,))
-            except Exception, e:
-                util.debug('... decref failed %s', e)
-
-        else:
-            util.debug('DECREF %r -- manager already shutdown', token.id)
-
-        # check whether we can close this thread's connection because
-        # the process owns no more references to objects for this manager
-        if not idset and hasattr(tls, 'connection'):
-            util.debug('thread %r has no more proxies so closing conn',
-                       threading.currentThread().getName())
-            tls.connection.close()
-            del tls.connection
-            
-    def _after_fork(self):
-        self._manager = None
-        try:
-            self._incref()
-        except Exception, e:
-            # the proxy may just be for a manager which has shutdown
-            util.info('incref failed: %s' % e)
-
-    def __reduce__(self):
-        kwds = {}
-        if Popen.thread_is_spawning():
-            kwds['authkey'] = self._authkey
-        
-        if getattr(self, '_isauto', False):
-            kwds['exposed'] = self._exposed_
-            return (RebuildProxy,
-                    (AutoProxy, self._token, self._serializer, kwds))
-        else:
-            return (RebuildProxy,
-                    (type(self), self._token, self._serializer, kwds))
-
-    def __deepcopy__(self, memo):
-        return self._getvalue()
-    
-    def __repr__(self):
-        return '<%s object, typeid %r at %s>' % \
-               (type(self).__name__, self._token.typeid, '0x%x' % id(self))
-
-    def __str__(self):
-        '''
-        Return representation of the referent (or a fall-back if that fails)
-        '''
-        try:
-            return self._callmethod('__repr__')
-        except Exception:
-            return repr(self)[:-1] + "; '__str__()' failed>"
-
-#
-# Function used for unpickling
-#
-
-def RebuildProxy(func, token, serializer, kwds):
-    '''
-    Function used for unpickling proxy objects.
-
-    If possible the shared object is returned, or otherwise a proxy for it.
-    '''
-    server = getattr(current_process(), '_manager_server', None)
-    
-    if server and server.address == token.address:
-        return server.id_to_obj[token.id][0]
-    else:
-        incref = (
-            kwds.pop('incref', True) and
-            not getattr(current_process(), '_inheriting', False)
-            )
-        return func(token, serializer, incref=incref, **kwds)
-
-#
-# Functions to create proxies and proxy types
-#
-
-def MakeProxyType(name, exposed, _cache={}):
-    '''
-    Return an proxy type whose methods are given by `exposed`
-    '''
-    exposed = tuple(exposed)
-    try:
-        return _cache[(name, exposed)]
-    except KeyError:
-        pass
-
-    dic = {}
-
-    for meth in exposed:
-        exec '''def %s(self, *args, **kwds):
-        return self._callmethod(%r, args, kwds)''' % (meth, meth) in dic
-
-    ProxyType = type(name, (BaseProxy,), dic)
-    ProxyType._exposed_ = exposed
-    _cache[(name, exposed)] = ProxyType
-    return ProxyType
-
-
-def AutoProxy(token, serializer, manager=None, authkey=None,
-              exposed=None, incref=True):
-    '''
-    Return an auto-proxy for `token`
-    '''
-    _Client = listener_client[serializer][1]
-    
-    if exposed is None:
-        conn = _Client(token.address, authkey=authkey)
-        try:
-            exposed = dispatch(conn, None, 'get_methods', (token,))
-        finally:
-            conn.close()
-
-    if authkey is None and manager is not None:
-        authkey = manager._authkey
-    if authkey is None:
-        authkey = current_process().get_authkey()
-
-    ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
-    proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
-                      incref=incref)
-    proxy._isauto = True
-    return proxy
-
-#
-# Types/callables which we will register with SyncManager
-#
-
-class Namespace(object):
-    def __init__(self, **kwds):
-        self.__dict__.update(kwds)
-    def __repr__(self):
-        items = self.__dict__.items()
-        temp = []
-        for name, value in items:
-            if not name.startswith('_'):
-                temp.append('%s=%r' % (name, value))
-        temp.sort()
-        return 'Namespace(%s)' % str.join(', ', temp)
-
-class Value(object):
-    def __init__(self, typecode, value, lock=True):
-        self._typecode = typecode
-        self._value = value
-    def get(self):
-        return self._value
-    def set(self, value):
-        self._value = value
-    def __repr__(self):
-        return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
-    value = property(get, set)
-
-def Array(typecode, sequence, lock=True):
-    return array.array(typecode, sequence)
-
-#
-# Proxy types used by SyncManager
-#
-
-class IteratorProxy(BaseProxy):
-    # XXX remove methods for Py3.0 and Py2.6
-    _exposed_ = ('__next__', 'next', 'send', 'throw', 'close')
-    def __iter__(self):
-        return self
-    def __next__(self, *args):
-        return self._callmethod('__next__', args)
-    def next(self, *args):
-        return self._callmethod('next', args)
-    def send(self, *args):
-        return self._callmethod('send', args)
-    def throw(self, *args):
-        return self._callmethod('throw', args)
-    def close(self, *args):
-        return self._callmethod('close', args)
-
-
-class AcquirerProxy(BaseProxy):
-    _exposed_ = ('acquire', 'release')
-    def acquire(self, blocking=True):
-        return self._callmethod('acquire', (blocking,))
-    def release(self):
-        return self._callmethod('release')
-    def __enter__(self):
-        return self._callmethod('acquire')
-    def __exit__(self, exc_type, exc_val, exc_tb):
-        return self._callmethod('release')
-
-
-class ConditionProxy(AcquirerProxy):
-    # XXX will Condition.notfyAll() name be available in Py3.0?
-    _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notifyAll')
-    def wait(self, timeout=None):
-        return self._callmethod('wait', (timeout,))
-    def notify(self):
-        return self._callmethod('notify')
-    def notify_all(self):
-        return self._callmethod('notifyAll')
-
-class EventProxy(BaseProxy):
-    # XXX will Event.isSet name be available in Py3.0?
-    _exposed_ = ('isSet', 'set', 'clear', 'wait')
-    def is_set(self):
-        return self._callmethod('isSet')
-    def set(self):
-        return self._callmethod('set')
-    def clear(self):
-        return self._callmethod('clear')
-    def wait(self, timeout=None):
-        return self._callmethod('wait', (timeout,))
-
-class NamespaceProxy(BaseProxy):
-    _exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
-    def __getattr__(self, key):
-        if key[0] == '_':
-            return object.__getattribute__(self, key)
-        callmethod = object.__getattribute__(self, '_callmethod')
-        return callmethod('__getattribute__', (key,))    
-    def __setattr__(self, key, value):
-        if key[0] == '_':
-            return object.__setattr__(self, key, value)
-        callmethod = object.__getattribute__(self, '_callmethod')
-        return callmethod('__setattr__', (key, value))
-    def __delattr__(self, key):
-        if key[0] == '_':
-            return object.__delattr__(self, key)
-        callmethod = object.__getattribute__(self, '_callmethod')
-        return callmethod('__delattr__', (key,))
-
-    
-class ValueProxy(BaseProxy):
-    _exposed_ = ('get', 'set')
-    def get(self):
-        return self._callmethod('get')
-    def set(self, value):
-        return self._callmethod('set', (value,))
-    value = property(get, set)
-
-
-BaseListProxy = MakeProxyType('BaseListProxy', (
-    '__add__', '__contains__', '__delitem__', '__delslice__',
-    '__getitem__', '__getslice__', '__len__', '__mul__',
-    '__reversed__', '__rmul__', '__setitem__', '__setslice__',
-    'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
-    'reverse', 'sort', '__imul__'
-    ))                  # XXX __getslice__ and __setslice__ unneeded in Py3.0
-class ListProxy(BaseListProxy):
-    def __iadd__(self, value):
-        self._callmethod('extend', (value,))
-        return self
-    def __imul__(self, value):
-        self._callmethod('__imul__', (value,))
-        return self
-
-
-DictProxy = MakeProxyType('DictProxy', (
-    '__contains__', '__delitem__', '__getitem__', '__len__',
-    '__setitem__', 'clear', 'copy', 'get', 'has_key', 'items',
-    'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
-    ))
-
-
-ArrayProxy = MakeProxyType('ArrayProxy', (
-    '__len__', '__getitem__', '__setitem__', '__getslice__', '__setslice__'
-    ))                  # XXX __getslice__ and __setslice__ unneeded in Py3.0
-
-
-PoolProxy = MakeProxyType('PoolProxy', (
-    'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
-    'map', 'map_async', 'terminate'
-    ))
-PoolProxy._method_to_typeid_ = {
-    'apply_async': 'AsyncResult',
-    'map_async': 'AsyncResult',
-    'imap': 'Iterator',
-    'imap_unordered': 'Iterator'
-    }
-
-#
-# Definition of SyncManager
-#
-
-class SyncManager(BaseManager):
-    '''
-    Subclass of `BaseManager` which supports a number of shared object types.
-    
-    The types registered are those intended for the synchronization
-    of threads, plus `dict`, `list` and `Namespace`.
-    
-    The `multiprocessing.Manager()` function creates started instances of
-    this class.
-    '''
-
-SyncManager.register('Queue', Queue.Queue)
-SyncManager.register('JoinableQueue', Queue.Queue)
-SyncManager.register('Event', threading.Event, EventProxy)
-SyncManager.register('Lock', threading.Lock, AcquirerProxy)
-SyncManager.register('RLock', threading.RLock, AcquirerProxy)
-SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
-SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
-                     AcquirerProxy)
-SyncManager.register('Condition', threading.Condition, ConditionProxy)
-SyncManager.register('Pool', Pool, PoolProxy)
-SyncManager.register('list', list, ListProxy)
-SyncManager.register('dict', dict, DictProxy)
-SyncManager.register('Value', Value, ValueProxy)
-SyncManager.register('Array', Array, ArrayProxy)
-SyncManager.register('Namespace', Namespace, NamespaceProxy)
-
-# types returned by methods of PoolProxy
-SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
-SyncManager.register('AsyncResult', create_method=False)
+#
+# Module providing the `SyncManager` class for dealing
+# with shared objects
+#
+# multiprocessing/managers.py
+#
+# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
+#
+
+__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ]
+
+#
+# Imports
+#
+
+import os
+import sys
+import weakref
+import threading
+import array
+import copyreg
+import queue
+
+from traceback import format_exc
+from multiprocessing import Process, current_process, active_children, Pool, util, connection
+from multiprocessing.process import AuthenticationString
+from multiprocessing.forking import exit, Popen, assert_spawning
+from multiprocessing.util import Finalize, info
+
+try:
+    from cPickle import PicklingError
+except ImportError:
+    from pickle import PicklingError
+
+#
+#
+#
+
+try:
+    bytes
+except NameError:
+    bytes = str                  # XXX not needed in Py2.6 and Py3.0
+
+#
+# Register some things for pickling
+#
+
+def reduce_array(a):
+    return array.array, (a.typecode, a.tostring())
+copyreg.pickle(array.array, reduce_array)
+
+view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
+if view_types[0] is not list:       # XXX only needed in Py3.0
+    def rebuild_as_list(obj):
+        return list, (list(obj),)
+    for view_type in view_types:
+        copyreg.pickle(view_type, rebuild_as_list)
+
+#
+# Type for identifying shared objects
+#
+
+class Token(object):
+    '''
+    Type to uniquely indentify a shared object
+    '''
+    __slots__ = ('typeid', 'address', 'id')
+
+    def __init__(self, typeid, address, id):
+        (self.typeid, self.address, self.id) = (typeid, address, id)
+
+    def __getstate__(self):
+        return (self.typeid, self.address, self.id)
+
+    def __setstate__(self, state):
+        (self.typeid, self.address, self.id) = state
+
+    def __repr__(self):
+        return 'Token(typeid=%r, address=%r, id=%r)' % \
+               (self.typeid, self.address, self.id)
+
+#
+# Function for communication with a manager's server process
+#
+
+def dispatch(c, id, methodname, args=(), kwds={}):
+    '''
+    Send a message to manager using connection `c` and return response
+    '''
+    c.send((id, methodname, args, kwds))
+    kind, result = c.recv()
+    if kind == '#RETURN':
+        return result
+    raise convert_to_error(kind, result)
+
+def convert_to_error(kind, result):
+    if kind == '#ERROR':
+        return result
+    elif kind == '#TRACEBACK':
+        assert type(result) is str
+        return  RemoteError(result)
+    elif kind == '#UNSERIALIZABLE':
+        assert type(result) is str
+        return RemoteError('Unserializable message: %s\n' % result)
+    else:
+        return ValueError('Unrecognized message type')
+
+class RemoteError(Exception):
+    def __str__(self):
+        return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
+
+#
+# Functions for finding the method names of an object
+#
+
+def all_methods(obj):
+    '''
+    Return a list of names of methods of `obj`
+    '''
+    temp = []
+    for name in dir(obj):
+        func = getattr(obj, name)
+        if hasattr(func, '__call__'):
+            temp.append(name)
+    return temp
+
+def public_methods(obj):
+    '''
+    Return a list of names of methods of `obj` which do not start with '_'
+    '''
+    return [name for name in all_methods(obj) if name[0] != '_']
+
+#
+# Server which is run in a process controlled by a manager
+#
+
+class Server(object):
+    '''
+    Server class which runs in a process controlled by a manager object
+    '''
+    public = ['shutdown', 'create', 'accept_connection', 'get_methods',
+              'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']
+
+    def __init__(self, registry, address, authkey, serializer):
+        assert isinstance(authkey, bytes)
+        self.registry = registry
+        self.authkey = AuthenticationString(authkey)
+        Listener, Client = listener_client[serializer]
+
+        # do authentication later
+        self.listener = Listener(address=address, backlog=5)
+        self.address = self.listener.address
+
+        self.id_to_obj = {0: (None, ())}
+        self.id_to_refcount = {}
+        self.mutex = threading.RLock()
+        self.stop = 0
+
+    def serve_forever(self):
+        '''
+        Run the server forever
+        '''
+        current_process()._manager_server = self
+        try:
+            try:
+                while 1:
+                    try:
+                        c = self.listener.accept()
+                    except (OSError, IOError):
+                        continue
+                    t = threading.Thread(target=self.handle_request, args=(c,))
+                    t.setDaemon(True)
+                    t.start()
+            except (KeyboardInterrupt, SystemExit):
+                pass
+        finally:
+            self.stop = 999
+            self.listener.close()
+
+    def handle_request(self, c):
+        '''
+        Handle a new connection
+        '''
+        funcname = result = request = None
+        try:
+            connection.deliver_challenge(c, self.authkey)
+            connection.answer_challenge(c, self.authkey)
+            request = c.recv()
+            ignore, funcname, args, kwds = request
+            assert funcname in self.public, '%r unrecognized' % funcname
+            func = getattr(self, funcname)
+        except Exception:
+            msg = ('#TRACEBACK', format_exc())
+        else:
+            try:
+                result = func(c, *args, **kwds)
+            except Exception:
+                msg = ('#TRACEBACK', format_exc())
+            else:
+                msg = ('#RETURN', result)
+        try:
+            c.send(msg)
+        except Exception as e:
+            try:
+                c.send(('#TRACEBACK', format_exc()))
+            except Exception:
+                pass
+            util.info('Failure to send message: %r', msg)
+            util.info(' ... request was %r', request)
+            util.info(' ... exception was %r', e)
+
+        c.close()
+
+    def serve_client(self, conn):
+        '''
+        Handle requests from the proxies in a particular process/thread
+        '''
+        util.debug('starting server thread to service %r',
+                   threading.currentThread().getName())
+
+        recv = conn.recv
+        send = conn.send
+        id_to_obj = self.id_to_obj
+
+        while not self.stop:
+
+            try:
+                methodname = obj = None
+                request = recv()
+                ident, methodname, args, kwds = request
+                obj, exposed, gettypeid = id_to_obj[ident]
+
+                if methodname not in exposed:
+                    raise AttributeError(
+                        'method %r of %r object is not in exposed=%r' %
+                        (methodname, type(obj), exposed)
+                        )
+
+                function = getattr(obj, methodname)
+
+                try:
+                    res = function(*args, **kwds)
+                except Exception as e:
+                    msg = ('#ERROR', e)
+                else:
+                    typeid = gettypeid and gettypeid.get(methodname, None)
+                    if typeid:
+                        rident, rexposed = self.create(conn, typeid, res)
+                        token = Token(typeid, self.address, rident)
+                        msg = ('#PROXY', (rexposed, token))
+                    else:
+                        msg = ('#RETURN', res)
+
+            except AttributeError:
+                if methodname is None:
+                    msg = ('#TRACEBACK', format_exc())
+                else:
+                    try:
+                        fallback_func = self.fallback_mapping[methodname]
+                        result = fallback_func(
+                            self, conn, ident, obj, *args, **kwds
+                            )
+                        msg = ('#RETURN', result)
+                    except Exception:
+                        msg = ('#TRACEBACK', format_exc())
+
+            except EOFError:
+                util.debug('got EOF -- exiting thread serving %r',
+                           threading.currentThread().getName())
+                sys.exit(0)
+
+            except Exception:
+                msg = ('#TRACEBACK', format_exc())
+
+            try:
+                try:
+                    send(msg)
+                except Exception as e:
+                    send(('#UNSERIALIZABLE', repr(msg)))
+            except Exception as e:
+                util.info('exception in thread serving %r',
+                        threading.currentThread().getName())
+                util.info(' ... message was %r', msg)
+                util.info(' ... exception was %r', e)
+                conn.close()
+                sys.exit(1)
+
+    def fallback_getvalue(self, conn, ident, obj):
+        return obj
+
+    def fallback_str(self, conn, ident, obj):
+        return str(obj)
+
+    def fallback_repr(self, conn, ident, obj):
+        return repr(obj)
+
+    fallback_mapping = {
+        '__str__':fallback_str,
+        '__repr__':fallback_repr,
+        '#GETVALUE':fallback_getvalue
+        }
+
+    def dummy(self, c):
+        pass
+
+    def debug_info(self, c):
+        '''
+        Return some info --- useful to spot problems with refcounting
+        '''
+        self.mutex.acquire()
+        try:
+            result = []
+            keys = list(self.id_to_obj.keys())
+            keys.sort()
+            for ident in keys:
+                if ident != 0:
+                    result.append('  %s:       refcount=%s\n    %s' %
+                                  (ident, self.id_to_refcount[ident],
+                                   str(self.id_to_obj[ident][0])[:75]))
+            return '\n'.join(result)
+        finally:
+            self.mutex.release()
+
+    def number_of_objects(self, c):
+        '''
+        Number of shared objects
+        '''
+        return len(self.id_to_obj) - 1      # don't count ident=0
+
+    def shutdown(self, c):
+        '''
+        Shutdown this process
+        '''
+        try:
+            try:
+                util.debug('manager received shutdown message')
+                c.send(('#RETURN', None))
+
+                if sys.stdout != sys.__stdout__:
+                    util.debug('resetting stdout, stderr')
+                    sys.stdout = sys.__stdout__
+                    sys.stderr = sys.__stderr__
+
+                util._run_finalizers(0)
+
+                for p in active_children():
+                    util.debug('terminating a child process of manager')
+                    p.terminate()
+
+                for p in active_children():
+                    util.debug('terminating a child process of manager')
+                    p.join()
+
+                util._run_finalizers()
+                util.info('manager exiting with exitcode 0')
+            except:
+                import traceback
+                traceback.print_exc()
+        finally:
+            exit(0)
+
+    def create(self, c, typeid, *args, **kwds):
+        '''
+        Create a new shared object and return its id
+        '''
+        self.mutex.acquire()
+        try:
+            callable, exposed, method_to_typeid, proxytype = \
+                      self.registry[typeid]
+
+            if callable is None:
+                assert len(args) == 1 and not kwds
+                obj = args[0]
+            else:
+                obj = callable(*args, **kwds)
+
+            if exposed is None:
+                exposed = public_methods(obj)
+            if method_to_typeid is not None:
+                assert type(method_to_typeid) is dict
+                exposed = list(exposed) + list(method_to_typeid)
+
+            ident = '%x' % id(obj)  # convert to string because xmlrpclib
+                                    # only has 32 bit signed integers
+            util.debug('%r callable returned object with id %r', typeid, ident)
+
+            self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
+            if ident not in self.id_to_refcount:
+                self.id_to_refcount[ident] = None
+            return ident, tuple(exposed)
+        finally:
+            self.mutex.release()
+
+    def get_methods(self, c, token):
+        '''
+        Return the methods of the shared object indicated by token
+        '''
+        return tuple(self.id_to_obj[token.id][1])
+
+    def accept_connection(self, c, name):
+        '''
+        Spawn a new thread to serve this connection
+        '''
+        threading.currentThread().setName(name)
+        c.send(('#RETURN', None))
+        self.serve_client(c)
+
+    def incref(self, c, ident):
+        self.mutex.acquire()
+        try:
+            try:
+                self.id_to_refcount[ident] += 1
+            except TypeError:
+                assert self.id_to_refcount[ident] is None
+                self.id_to_refcount[ident] = 1
+        finally:
+            self.mutex.release()
+
+    def decref(self, c, ident):
+        self.mutex.acquire()
+        try:
+            assert self.id_to_refcount[ident] >= 1
+            self.id_to_refcount[ident] -= 1
+            if self.id_to_refcount[ident] == 0:
+                del self.id_to_obj[ident], self.id_to_refcount[ident]
+                util.debug('disposing of obj with id %d', ident)
+        finally:
+            self.mutex.release()
+
+#
+# Class to represent state of a manager
+#
+
+class State(object):
+    __slots__ = ['value']
+    INITIAL = 0
+    STARTED = 1
+    SHUTDOWN = 2
+
+#
+# Mapping from serializer name to Listener and Client types
+#
+
+listener_client = {
+    'pickle' : (connection.Listener, connection.Client),
+    'xmlrpclib' : (connection.XmlListener, connection.XmlClient)
+    }
+
+#
+# Definition of BaseManager
+#
+
+class BaseManager(object):
+    '''
+    Base class for managers
+    '''
+    _registry = {}
+    _Server = Server
+
+    def __init__(self, address=None, authkey=None, serializer='pickle'):
+        if authkey is None:
+            authkey = current_process().get_authkey()
+        self._address = address     # XXX not final address if eg ('', 0)
+        self._authkey = AuthenticationString(authkey)
+        self._state = State()
+        self._state.value = State.INITIAL
+        self._serializer = serializer
+        self._Listener, self._Client = listener_client[serializer]
+
+    def __reduce__(self):
+        return type(self).from_address, \
+               (self._address, self._authkey, self._serializer)
+
+    def get_server(self):
+        '''
+        Return server object with serve_forever() method and address attribute
+        '''
+        assert self._state.value == State.INITIAL
+        return Server(self._registry, self._address,
+                      self._authkey, self._serializer)
+
+    def connect(self):
+        '''
+        Connect manager object to the server process
+        '''
+        Listener, Client = listener_client[self._serializer]
+        conn = Client(self._address, authkey=self._authkey)
+        dispatch(conn, None, 'dummy')
+        self._state.value = State.STARTED
+
+    def start(self):
+        '''
+        Spawn a server process for this manager object
+        '''
+        assert self._state.value == State.INITIAL
+
+        # pipe over which we will retrieve address of server
+        reader, writer = connection.Pipe(duplex=False)
+
+        # spawn process which runs a server
+        self._process = Process(
+            target=type(self)._run_server,
+            args=(self._registry, self._address, self._authkey,
+                  self._serializer, writer),
+            )
+        ident = ':'.join(str(i) for i in self._process._identity)
+        self._process.set_name(type(self).__name__  + '-' + ident)
+        self._process.start()
+
+        # get address of server
+        writer.close()
+        self._address = reader.recv()
+        reader.close()
+
+        # register a finalizer
+        self._state.value = State.STARTED
+        self.shutdown = util.Finalize(
+            self, type(self)._finalize_manager,
+            args=(self._process, self._address, self._authkey,
+                  self._state, self._Client),
+            exitpriority=0
+            )
+
+    @classmethod
+    def _run_server(cls, registry, address, authkey, serializer, writer):
+        '''
+        Create a server, report its address and run it
+        '''
+        # create server
+        server = cls._Server(registry, address, authkey, serializer)
+
+        # inform parent process of the server's address
+        writer.send(server.address)
+        writer.close()
+
+        # run the manager
+        util.info('manager serving at %r', server.address)
+        server.serve_forever()
+
+    def _create(self, typeid, *args, **kwds):
+        '''
+        Create a new shared object; return the token and exposed tuple
+        '''
+        assert self._state.value == State.STARTED, 'server not yet started'
+        conn = self._Client(self._address, authkey=self._authkey)
+        try:
+            id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
+        finally:
+            conn.close()
+        return Token(typeid, self._address, id), exposed
+
+    def join(self, timeout=None):
+        '''
+        Join the manager process (if it has been spawned)
+        '''
+        self._process.join(timeout)
+
+    def _debug_info(self):
+        '''
+        Return some info about the servers shared objects and connections
+        '''
+        conn = self._Client(self._address, authkey=self._authkey)
+        try:
+            return dispatch(conn, None, 'debug_info')
+        finally:
+            conn.close()
+
+    def _number_of_objects(self):
+        '''
+        Return the number of shared objects
+        '''
+        conn = self._Client(self._address, authkey=self._authkey)
+        try:
+            return dispatch(conn, None, 'number_of_objects')
+        finally:
+            conn.close()
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        self.shutdown()
+
+    @staticmethod
+    def _finalize_manager(process, address, authkey, state, _Client):
+        '''
+        Shutdown the manager process; will be registered as a finalizer
+        '''
+        if process.is_alive():
+            util.info('sending shutdown message to manager')
+            try:
+                conn = _Client(address, authkey=authkey)
+                try:
+                    dispatch(conn, None, 'shutdown')
+                finally:
+                    conn.close()
+            except Exception:
+                pass
+
+            process.join(timeout=0.2)
+            if process.is_alive():
+                util.info('manager still alive')
+                if hasattr(process, 'terminate'):
+                    util.info('trying to `terminate()` manager process')
+                    process.terminate()
+                    process.join(timeout=0.1)
+                    if process.is_alive():
+                        util.info('manager still alive after terminate')
+
+        state.value = State.SHUTDOWN
+        try:
+            del BaseProxy._address_to_local[address]
+        except KeyError:
+            pass
+
+    address = property(lambda self: self._address)
+
+    @classmethod
+    def register(cls, typeid, callable=None, proxytype=None, exposed=None,
+                 method_to_typeid=None, create_method=True):
+        '''
+        Register a typeid with the manager type
+        '''
+        if '_registry' not in cls.__dict__:
+            cls._registry = cls._registry.copy()
+
+        if proxytype is None:
+            proxytype = AutoProxy
+
+        exposed = exposed or getattr(proxytype, '_exposed_', None)
+
+        method_to_typeid = method_to_typeid or \
+                           getattr(proxytype, '_method_to_typeid_', None)
+
+        if method_to_typeid:
+            for key, value in list(method_to_typeid.items()):
+                assert type(key) is str, '%r is not a string' % key
+                assert type(value) is str, '%r is not a string' % value
+
+        cls._registry[typeid] = (
+            callable, exposed, method_to_typeid, proxytype
+            )
+
+        if create_method:
+            def temp(self, *args, **kwds):
+                util.debug('requesting creation of a shared %r object', typeid)
+                token, exp = self._create(typeid, *args, **kwds)
+                proxy = proxytype(
+                    token, self._serializer, manager=self,
+                    authkey=self._authkey, exposed=exp
+                    )
+                return proxy
+            temp.__name__ = typeid
+            setattr(cls, typeid, temp)
+
+#
+# Subclass of set which get cleared after a fork
+#
+
+class ProcessLocalSet(set):
+    def __init__(self):
+        util.register_after_fork(self, lambda obj: obj.clear())
+    def __reduce__(self):
+        return type(self), ()
+
+#
+# Definition of BaseProxy
+#
+
+class BaseProxy(object):
+    '''
+    A base for proxies of shared objects
+    '''
+    _address_to_local = {}
+    _mutex = util.ForkAwareThreadLock()
+
+    def __init__(self, token, serializer, manager=None,
+                 authkey=None, exposed=None, incref=True):
+        BaseProxy._mutex.acquire()
+        try:
+            tls_idset = BaseProxy._address_to_local.get(token.address, None)
+            if tls_idset is None:
+                tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
+                BaseProxy._address_to_local[token.address] = tls_idset
+        finally:
+            BaseProxy._mutex.release()
+
+        # self._tls is used to record the connection used by this
+        # thread to communicate with the manager at token.address
+        self._tls = tls_idset[0]
+
+        # self._idset is used to record the identities of all shared
+        # objects for which the current process owns references and
+        # which are in the manager at token.address
+        self._idset = tls_idset[1]
+
+        self._token = token
+        self._id = self._token.id
+        self._manager = manager
+        self._serializer = serializer
+        self._Client = listener_client[serializer][1]
+
+        if authkey is not None:
+            self._authkey = AuthenticationString(authkey)
+        elif self._manager is not None:
+            self._authkey = self._manager._authkey
+        else:
+            self._authkey = current_process().get_authkey()
+
+        if incref:
+            self._incref()
+
+        util.register_after_fork(self, BaseProxy._after_fork)
+
+    def _connect(self):
+        util.debug('making connection to manager')
+        name = current_process().get_name()
+        if threading.currentThread().getName() != 'MainThread':
+            name += '|' + threading.currentThread().getName()
+        conn = self._Client(self._token.address, authkey=self._authkey)
+        dispatch(conn, None, 'accept_connection', (name,))
+        self._tls.connection = conn
+
+    def _callmethod(self, methodname, args=(), kwds={}):
+        '''
+        Try to call a method of the referrent and return a copy of the result
+        '''
+        try:
+            conn = self._tls.connection
+        except AttributeError:
+            util.debug('thread %r does not own a connection',
+                       threading.currentThread().getName())
+            self._connect()
+            conn = self._tls.connection
+
+        conn.send((self._id, methodname, args, kwds))
+        kind, result = conn.recv()
+
+        if kind == '#RETURN':
+            return result
+        elif kind == '#PROXY':
+            exposed, token = result
+            proxytype = self._manager._registry[token.typeid][-1]
+            return proxytype(
+                token, self._serializer, manager=self._manager,
+                authkey=self._authkey, exposed=exposed
+                )
+        raise convert_to_error(kind, result)
+
+    def _getvalue(self):
+        '''
+        Get a copy of the value of the referent
+        '''
+        return self._callmethod('#GETVALUE')
+
+    def _incref(self):
+        conn = self._Client(self._token.address, authkey=self._authkey)
+        dispatch(conn, None, 'incref', (self._id,))
+        util.debug('INCREF %r', self._token.id)
+
+        self._idset.add(self._id)
+
+        state = self._manager and self._manager._state
+
+        self._close = util.Finalize(
+            self, BaseProxy._decref,
+            args=(self._token, self._authkey, state,
+                  self._tls, self._idset, self._Client),
+            exitpriority=10
+            )
+
+    @staticmethod
+    def _decref(token, authkey, state, tls, idset, _Client):
+        idset.discard(token.id)
+
+        # check whether manager is still alive
+        if state is None or state.value == State.STARTED:
+            # tell manager this process no longer cares about referent
+            try:
+                util.debug('DECREF %r', token.id)
+                conn = _Client(token.address, authkey=authkey)
+                dispatch(conn, None, 'decref', (token.id,))
+            except Exception as e:
+                util.debug('... decref failed %s', e)
+
+        else:
+            util.debug('DECREF %r -- manager already shutdown', token.id)
+
+        # check whether we can close this thread's connection because
+        # the process owns no more references to objects for this manager
+        if not idset and hasattr(tls, 'connection'):
+            util.debug('thread %r has no more proxies so closing conn',
+                       threading.currentThread().getName())
+            tls.connection.close()
+            del tls.connection
+
+    def _after_fork(self):
+        self._manager = None
+        try:
+            self._incref()
+        except Exception as e:
+            # the proxy may just be for a manager which has shutdown
+            util.info('incref failed: %s' % e)
+
+    def __reduce__(self):
+        kwds = {}
+        if Popen.thread_is_spawning():
+            kwds['authkey'] = self._authkey
+
+        if getattr(self, '_isauto', False):
+            kwds['exposed'] = self._exposed_
+            return (RebuildProxy,
+                    (AutoProxy, self._token, self._serializer, kwds))
+        else:
+            return (RebuildProxy,
+                    (type(self), self._token, self._serializer, kwds))
+
+    def __deepcopy__(self, memo):
+        return self._getvalue()
+
+    def __repr__(self):
+        return '<%s object, typeid %r at %s>' % \
+               (type(self).__name__, self._token.typeid, '0x%x' % id(self))
+
+    def __str__(self):
+        '''
+        Return representation of the referent (or a fall-back if that fails)
+        '''
+        try:
+            return self._callmethod('__repr__')
+        except Exception:
+            return repr(self)[:-1] + "; '__str__()' failed>"
+
+#
+# Function used for unpickling
+#
+
+def RebuildProxy(func, token, serializer, kwds):
+    '''
+    Function used for unpickling proxy objects.
+
+    If possible the shared object is returned, or otherwise a proxy for it.
+    '''
+    server = getattr(current_process(), '_manager_server', None)
+
+    if server and server.address == token.address:
+        return server.id_to_obj[token.id][0]
+    else:
+        incref = (
+            kwds.pop('incref', True) and
+            not getattr(current_process(), '_inheriting', False)
+            )
+        return func(token, serializer, incref=incref, **kwds)
+
+#
+# Functions to create proxies and proxy types
+#
+
+def MakeProxyType(name, exposed, _cache={}):
+    '''
+    Return an proxy type whose methods are given by `exposed`
+    '''
+    exposed = tuple(exposed)
+    try:
+        return _cache[(name, exposed)]
+    except KeyError:
+        pass
+
+    dic = {}
+
+    for meth in exposed:
+        exec('''def %s(self, *args, **kwds):
+        return self._callmethod(%r, args, kwds)''' % (meth, meth), dic)
+
+    ProxyType = type(name, (BaseProxy,), dic)
+    ProxyType._exposed_ = exposed
+    _cache[(name, exposed)] = ProxyType
+    return ProxyType
+
+
+def AutoProxy(token, serializer, manager=None, authkey=None,
+              exposed=None, incref=True):
+    '''
+    Return an auto-proxy for `token`
+    '''
+    _Client = listener_client[serializer][1]
+
+    if exposed is None:
+        conn = _Client(token.address, authkey=authkey)
+        try:
+            exposed = dispatch(conn, None, 'get_methods', (token,))
+        finally:
+            conn.close()
+
+    if authkey is None and manager is not None:
+        authkey = manager._authkey
+    if authkey is None:
+        authkey = current_process().get_authkey()
+
+    ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
+    proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
+                      incref=incref)
+    proxy._isauto = True
+    return proxy
+
+#
+# Types/callables which we will register with SyncManager
+#
+
+class Namespace(object):
+    def __init__(self, **kwds):
+        self.__dict__.update(kwds)
+    def __repr__(self):
+        items = list(self.__dict__.items())
+        temp = []
+        for name, value in items:
+            if not name.startswith('_'):
+                temp.append('%s=%r' % (name, value))
+        temp.sort()
+        return 'Namespace(%s)' % str.join(', ', temp)
+
+class Value(object):
+    def __init__(self, typecode, value, lock=True):
+        self._typecode = typecode
+        self._value = value
+    def get(self):
+        return self._value
+    def set(self, value):
+        self._value = value
+    def __repr__(self):
+        return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
+    value = property(get, set)
+
+def Array(typecode, sequence, lock=True):
+    return array.array(typecode, sequence)
+
+#
+# Proxy types used by SyncManager
+#
+
+class IteratorProxy(BaseProxy):
+    # XXX remove methods for Py3.0 and Py2.6
+    _exposed_ = ('__next__', 'next', 'send', 'throw', 'close')
+    def __iter__(self):
+        return self
+    def __next__(self, *args):
+        return self._callmethod('__next__', args)
+    def next(self, *args):
+        return self._callmethod('next', args)
+    def send(self, *args):
+        return self._callmethod('send', args)
+    def throw(self, *args):
+        return self._callmethod('throw', args)
+    def close(self, *args):
+        return self._callmethod('close', args)
+
+
+class AcquirerProxy(BaseProxy):
+    _exposed_ = ('acquire', 'release')
+    def acquire(self, blocking=True):
+        return self._callmethod('acquire', (blocking,))
+    def release(self):
+        return self._callmethod('release')
+    def __enter__(self):
+        return self._callmethod('acquire')
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        return self._callmethod('release')
+
+
+class ConditionProxy(AcquirerProxy):
+    # XXX will Condition.notfyAll() name be available in Py3.0?
+    _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notifyAll')
+    def wait(self, timeout=None):
+        return self._callmethod('wait', (timeout,))
+    def notify(self):
+        return self._callmethod('notify')
+    def notify_all(self):
+        return self._callmethod('notifyAll')
+
+class EventProxy(BaseProxy):
+    # XXX will Event.isSet name be available in Py3.0?
+    _exposed_ = ('isSet', 'set', 'clear', 'wait')
+    def is_set(self):
+        return self._callmethod('isSet')
+    def set(self):
+        return self._callmethod('set')
+    def clear(self):
+        return self._callmethod('clear')
+    def wait(self, timeout=None):
+        return self._callmethod('wait', (timeout,))
+
+class NamespaceProxy(BaseProxy):
+    _exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
+    def __getattr__(self, key):
+        if key[0] == '_':
+            return object.__getattribute__(self, key)
+        callmethod = object.__getattribute__(self, '_callmethod')
+        return callmethod('__getattribute__', (key,))
+    def __setattr__(self, key, value):
+        if key[0] == '_':
+            return object.__setattr__(self, key, value)
+        callmethod = object.__getattribute__(self, '_callmethod')
+        return callmethod('__setattr__', (key, value))
+    def __delattr__(self, key):
+        if key[0] == '_':
+            return object.__delattr__(self, key)
+        callmethod = object.__getattribute__(self, '_callmethod')
+        return callmethod('__delattr__', (key,))
+
+
+class ValueProxy(BaseProxy):
+    _exposed_ = ('get', 'set')
+    def get(self):
+        return self._callmethod('get')
+    def set(self, value):
+        return self._callmethod('set', (value,))
+    value = property(get, set)
+
+
+BaseListProxy = MakeProxyType('BaseListProxy', (
+    '__add__', '__contains__', '__delitem__', '__delslice__',
+    '__getitem__', '__getslice__', '__len__', '__mul__',
+    '__reversed__', '__rmul__', '__setitem__', '__setslice__',
+    'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
+    'reverse', 'sort', '__imul__'
+    ))                  # XXX __getslice__ and __setslice__ unneeded in Py3.0
+class ListProxy(BaseListProxy):
+    def __iadd__(self, value):
+        self._callmethod('extend', (value,))
+        return self
+    def __imul__(self, value):
+        self._callmethod('__imul__', (value,))
+        return self
+
+
+DictProxy = MakeProxyType('DictProxy', (
+    '__contains__', '__delitem__', '__getitem__', '__len__',
+    '__setitem__', 'clear', 'copy', 'get', 'has_key', 'items',
+    'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
+    ))
+
+
+ArrayProxy = MakeProxyType('ArrayProxy', (
+    '__len__', '__getitem__', '__setitem__', '__getslice__', '__setslice__'
+    ))                  # XXX __getslice__ and __setslice__ unneeded in Py3.0
+
+
+PoolProxy = MakeProxyType('PoolProxy', (
+    'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
+    'map', 'map_async', 'terminate'
+    ))
+PoolProxy._method_to_typeid_ = {
+    'apply_async': 'AsyncResult',
+    'map_async': 'AsyncResult',
+    'imap': 'Iterator',
+    'imap_unordered': 'Iterator'
+    }
+
+#
+# Definition of SyncManager
+#
+
+class SyncManager(BaseManager):
+    '''
+    Subclass of `BaseManager` which supports a number of shared object types.
+
+    The types registered are those intended for the synchronization
+    of threads, plus `dict`, `list` and `Namespace`.
+
+    The `multiprocessing.Manager()` function creates started instances of
+    this class.
+    '''
+
+SyncManager.register('Queue', queue.Queue)
+SyncManager.register('JoinableQueue', queue.Queue)
+SyncManager.register('Event', threading.Event, EventProxy)
+SyncManager.register('Lock', threading.Lock, AcquirerProxy)
+SyncManager.register('RLock', threading.RLock, AcquirerProxy)
+SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
+SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
+                     AcquirerProxy)
+SyncManager.register('Condition', threading.Condition, ConditionProxy)
+SyncManager.register('Pool', Pool, PoolProxy)
+SyncManager.register('list', list, ListProxy)
+SyncManager.register('dict', dict, DictProxy)
+SyncManager.register('Value', Value, ValueProxy)
+SyncManager.register('Array', Array, ArrayProxy)
+SyncManager.register('Namespace', Namespace, NamespaceProxy)
+
+# types returned by methods of PoolProxy
+SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
+SyncManager.register('AsyncResult', create_method=False)

Modified: python/branches/py3k/Lib/multiprocessing/pool.py
==============================================================================
--- /python/trunk/Lib/multiprocessing/pool.py	(original)
+++ python/branches/py3k/Lib/multiprocessing/pool.py	Wed Jun 11 18:44:04 2008
@@ -1,596 +1,596 @@
-#
-# Module providing the `Pool` class for managing a process pool
-#
-# multiprocessing/pool.py
-#
-# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt
-#
-
-__all__ = ['Pool']
-
-#
-# Imports
-#
-
-import threading
-import Queue
-import itertools
-import collections
-import time
-
-from multiprocessing import Process, cpu_count, TimeoutError
-from multiprocessing.util import Finalize, debug
-
-#
-# Constants representing the state of a pool
-#
-
-RUN = 0
-CLOSE = 1
-TERMINATE = 2
-
-#
-# Miscellaneous
-#
-
-job_counter = itertools.count()
-
-def mapstar(args):
-    return map(*args)
-
-#
-# Code run by worker processes
-#
-
-def worker(inqueue, outqueue, initializer=None, initargs=()):
-    put = outqueue.put
-    get = inqueue.get
-    if hasattr(inqueue, '_writer'):
-        inqueue._writer.close()
-        outqueue._reader.close()
-
-    if initializer is not None:
-        initializer(*initargs)
-
-    while 1:
-        try:
-            task = get()
-        except (EOFError, IOError):
-            debug('worker got EOFError or IOError -- exiting')
-            break
-        
-        if task is None:
-            debug('worker got sentinel -- exiting')
-            break
-            
-        job, i, func, args, kwds = task
-        try:
-            result = (True, func(*args, **kwds))
-        except Exception, e:
-            result = (False, e)
-        put((job, i, result))
-    
-#
-# Class representing a process pool
-#
-
-class Pool(object):
-    '''
-    Class which supports an async version of the `apply()` builtin
-    '''
-    Process = Process
-
-    def __init__(self, processes=None, initializer=None, initargs=()):
-        self._setup_queues()
-        self._taskqueue = Queue.Queue()
-        self._cache = {}
-        self._state = RUN
-
-        if processes is None:
-            try:
-                processes = cpu_count()
-            except NotImplementedError:
-                processes = 1
-            
-        self._pool = []
-        for i in range(processes):
-            w = self.Process(
-                target=worker,
-                args=(self._inqueue, self._outqueue, initializer, initargs)
-                )
-            self._pool.append(w)
-            w.set_name(w.get_name().replace('Process', 'PoolWorker'))
-            w.set_daemon(True)
-            w.start()
-            
-        self._task_handler = threading.Thread(
-            target=Pool._handle_tasks,
-            args=(self._taskqueue, self._quick_put, self._outqueue, self._pool)
-            )
-        self._task_handler.setDaemon(True)
-        self._task_handler._state = RUN
-        self._task_handler.start()
-
-        self._result_handler = threading.Thread(
-            target=Pool._handle_results,
-            args=(self._outqueue, self._quick_get, self._cache)
-            )
-        self._result_handler.setDaemon(True)
-        self._result_handler._state = RUN
-        self._result_handler.start()
-
-        self._terminate = Finalize(
-            self, self._terminate_pool,
-            args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
-                  self._task_handler, self._result_handler, self._cache),
-            exitpriority=15
-            )
-
-    def _setup_queues(self):
-        from .queues import SimpleQueue
-        self._inqueue = SimpleQueue()
-        self._outqueue = SimpleQueue()
-        self._quick_put = self._inqueue._writer.send
-        self._quick_get = self._outqueue._reader.recv
-        
-    def apply(self, func, args=(), kwds={}):
-        '''
-        Equivalent of `apply()` builtin
-        '''
-        assert self._state == RUN
-        return self.apply_async(func, args, kwds).get()
-
-    def map(self, func, iterable, chunksize=None):
-        '''
-        Equivalent of `map()` builtin
-        '''
-        assert self._state == RUN
-        return self.map_async(func, iterable, chunksize).get()
-
-    def imap(self, func, iterable, chunksize=1):
-        '''
-        Equivalent of `itertool.imap()` -- can be MUCH slower than `Pool.map()`
-        '''
-        assert self._state == RUN
-        if chunksize == 1:
-            result = IMapIterator(self._cache)
-            self._taskqueue.put((((result._job, i, func, (x,), {})
-                         for i, x in enumerate(iterable)), result._set_length))
-            return result
-        else:
-            assert chunksize > 1
-            task_batches = Pool._get_tasks(func, iterable, chunksize)
-            result = IMapIterator(self._cache)
-            self._taskqueue.put((((result._job, i, mapstar, (x,), {})
-                     for i, x in enumerate(task_batches)), result._set_length))
-            return (item for chunk in result for item in chunk)
-
-    def imap_unordered(self, func, iterable, chunksize=1):
-        '''
-        Like `imap()` method but ordering of results is arbitrary
-        '''
-        assert self._state == RUN
-        if chunksize == 1:
-            result = IMapUnorderedIterator(self._cache)
-            self._taskqueue.put((((result._job, i, func, (x,), {})
-                         for i, x in enumerate(iterable)), result._set_length))
-            return result
-        else:
-            assert chunksize > 1
-            task_batches = Pool._get_tasks(func, iterable, chunksize)
-            result = IMapUnorderedIterator(self._cache)
-            self._taskqueue.put((((result._job, i, mapstar, (x,), {})
-                     for i, x in enumerate(task_batches)), result._set_length))
-            return (item for chunk in result for item in chunk)
-            
-    def apply_async(self, func, args=(), kwds={}, callback=None):
-        '''
-        Asynchronous equivalent of `apply()` builtin
-        '''
-        assert self._state == RUN
-        result = ApplyResult(self._cache, callback)
-        self._taskqueue.put(([(result._job, None, func, args, kwds)], None))
-        return result
-
-    def map_async(self, func, iterable, chunksize=None, callback=None):
-        '''
-        Asynchronous equivalent of `map()` builtin
-        '''
-        assert self._state == RUN
-        if not hasattr(iterable, '__len__'):
-            iterable = list(iterable)
-        
-        if chunksize is None:
-            chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
-            if extra:
-                chunksize += 1
-                
-        task_batches = Pool._get_tasks(func, iterable, chunksize)
-        result = MapResult(self._cache, chunksize, len(iterable), callback)
-        self._taskqueue.put((((result._job, i, mapstar, (x,), {})
-                              for i, x in enumerate(task_batches)), None))
-        return result
-
-    @staticmethod
-    def _handle_tasks(taskqueue, put, outqueue, pool):
-        thread = threading.currentThread()
-
-        for taskseq, set_length in iter(taskqueue.get, None):
-            i = -1
-            for i, task in enumerate(taskseq):
-                if thread._state:
-                    debug('task handler found thread._state != RUN')
-                    break
-                try:
-                    put(task)
-                except IOError:
-                    debug('could not put task on queue')
-                    break
-            else:
-                if set_length:
-                    debug('doing set_length()')
-                    set_length(i+1)
-                continue
-            break
-        else:
-            debug('task handler got sentinel')
-            
-
-        try:
-            # tell result handler to finish when cache is empty
-            debug('task handler sending sentinel to result handler')
-            outqueue.put(None)
-            
-            # tell workers there is no more work
-            debug('task handler sending sentinel to workers')
-            for p in pool:
-                put(None)
-        except IOError:
-            debug('task handler got IOError when sending sentinels')
-
-        debug('task handler exiting')
-
-    @staticmethod
-    def _handle_results(outqueue, get, cache):
-        thread = threading.currentThread()
-
-        while 1:
-            try:
-                task = get()
-            except (IOError, EOFError):
-                debug('result handler got EOFError/IOError -- exiting')
-                return
-            
-            if thread._state:
-                assert thread._state == TERMINATE
-                debug('result handler found thread._state=TERMINATE')
-                break
-            
-            if task is None:
-                debug('result handler got sentinel')
-                break
-
-            job, i, obj = task
-            try:
-                cache[job]._set(i, obj)
-            except KeyError:
-                pass
-
-        while cache and thread._state != TERMINATE:
-            try:
-                task = get()
-            except (IOError, EOFError):
-                debug('result handler got EOFError/IOError -- exiting')
-                return
-
-            if task is None:
-                debug('result handler ignoring extra sentinel')
-                continue
-            job, i, obj = task
-            try:
-                cache[job]._set(i, obj)
-            except KeyError:
-                pass
-
-        if hasattr(outqueue, '_reader'):
-            debug('ensuring that outqueue is not full')
-            # If we don't make room available in outqueue then
-            # attempts to add the sentinel (None) to outqueue may
-            # block.  There is guaranteed to be no more than 2 sentinels.
-            try:
-                for i in range(10):
-                    if not outqueue._reader.poll():
-                        break
-                    get()
-            except (IOError, EOFError):
-                pass
-
-        debug('result handler exiting: len(cache)=%s, thread._state=%s',
-              len(cache), thread._state)
-
-    @staticmethod
-    def _get_tasks(func, it, size):
-        it = iter(it)
-        while 1:
-            x = tuple(itertools.islice(it, size))
-            if not x:
-                return
-            yield (func, x)
-
-    def __reduce__(self):
-        raise NotImplementedError(
-              'pool objects cannot be passed between processes or pickled'
-              )
-    
-    def close(self):
-        debug('closing pool')
-        if self._state == RUN:
-            self._state = CLOSE
-            self._taskqueue.put(None)
-
-    def terminate(self):
-        debug('terminating pool')
-        self._state = TERMINATE
-        self._terminate()
-
-    def join(self):
-        debug('joining pool')
-        assert self._state in (CLOSE, TERMINATE)
-        self._task_handler.join()
-        self._result_handler.join()
-        for p in self._pool:
-            p.join()
-
-    @staticmethod
-    def _help_stuff_finish(inqueue, task_handler, size):
-        # task_handler may be blocked trying to put items on inqueue
-        debug('removing tasks from inqueue until task handler finished')
-        inqueue._rlock.acquire()
-        while task_handler.isAlive() and inqueue._reader.poll():
-            inqueue._reader.recv()
-            time.sleep(0)
-
-    @classmethod
-    def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool,
-                        task_handler, result_handler, cache):
-        # this is guaranteed to only be called once
-        debug('finalizing pool')
-        
-        task_handler._state = TERMINATE
-        taskqueue.put(None)                 # sentinel
-
-        debug('helping task handler/workers to finish')
-        cls._help_stuff_finish(inqueue, task_handler, len(pool))
-
-        assert result_handler.isAlive() or len(cache) == 0
-        
-        result_handler._state = TERMINATE
-        outqueue.put(None)                  # sentinel
-
-        if pool and hasattr(pool[0], 'terminate'):
-            debug('terminating workers')
-            for p in pool:
-                p.terminate()
-
-        debug('joining task handler')
-        task_handler.join(1e100)
-
-        debug('joining result handler')
-        result_handler.join(1e100)
-
-        if pool and hasattr(pool[0], 'terminate'):
-            debug('joining pool workers')
-            for p in pool:
-                p.join()
-
-#
-# Class whose instances are returned by `Pool.apply_async()`
-#
-
-class ApplyResult(object):
-
-    def __init__(self, cache, callback):
-        self._cond = threading.Condition(threading.Lock())
-        self._job = job_counter.next()
-        self._cache = cache
-        self._ready = False
-        self._callback = callback
-        cache[self._job] = self
-        
-    def ready(self):
-        return self._ready
-    
-    def successful(self):
-        assert self._ready
-        return self._success
-    
-    def wait(self, timeout=None):
-        self._cond.acquire()
-        try:
-            if not self._ready:
-                self._cond.wait(timeout)
-        finally:
-            self._cond.release()
-
-    def get(self, timeout=None):
-        self.wait(timeout)
-        if not self._ready:
-            raise TimeoutError
-        if self._success:
-            return self._value
-        else:
-            raise self._value
-
-    def _set(self, i, obj):
-        self._success, self._value = obj
-        if self._callback and self._success:
-            self._callback(self._value)
-        self._cond.acquire()
-        try:
-            self._ready = True
-            self._cond.notify()
-        finally:
-            self._cond.release()
-        del self._cache[self._job]
-
-#
-# Class whose instances are returned by `Pool.map_async()`
-#
-
-class MapResult(ApplyResult):
-    
-    def __init__(self, cache, chunksize, length, callback):
-        ApplyResult.__init__(self, cache, callback)
-        self._success = True
-        self._value = [None] * length
-        self._chunksize = chunksize
-        if chunksize <= 0:
-            self._number_left = 0
-            self._ready = True
-        else:
-            self._number_left = length//chunksize + bool(length % chunksize)
-        
-    def _set(self, i, success_result):
-        success, result = success_result
-        if success:
-            self._value[i*self._chunksize:(i+1)*self._chunksize] = result
-            self._number_left -= 1
-            if self._number_left == 0:
-                if self._callback:
-                    self._callback(self._value)
-                del self._cache[self._job]
-                self._cond.acquire()
-                try:
-                    self._ready = True
-                    self._cond.notify()
-                finally:
-                    self._cond.release()
-
-        else:
-            self._success = False
-            self._value = result
-            del self._cache[self._job]
-            self._cond.acquire()
-            try:
-                self._ready = True
-                self._cond.notify()
-            finally:
-                self._cond.release()
-
-#
-# Class whose instances are returned by `Pool.imap()`
-#
-
-class IMapIterator(object):
-
-    def __init__(self, cache):
-        self._cond = threading.Condition(threading.Lock())
-        self._job = job_counter.next()
-        self._cache = cache
-        self._items = collections.deque()
-        self._index = 0
-        self._length = None
-        self._unsorted = {}
-        cache[self._job] = self
-        
-    def __iter__(self):
-        return self
-    
-    def next(self, timeout=None):
-        self._cond.acquire()
-        try:
-            try:
-                item = self._items.popleft()
-            except IndexError:
-                if self._index == self._length:
-                    raise StopIteration
-                self._cond.wait(timeout)
-                try:
-                    item = self._items.popleft()
-                except IndexError:
-                    if self._index == self._length:
-                        raise StopIteration
-                    raise TimeoutError
-        finally:
-            self._cond.release()
-
-        success, value = item
-        if success:
-            return value
-        raise value
-
-    __next__ = next                    # XXX
-    
-    def _set(self, i, obj):
-        self._cond.acquire()
-        try:
-            if self._index == i:
-                self._items.append(obj)
-                self._index += 1
-                while self._index in self._unsorted:
-                    obj = self._unsorted.pop(self._index)
-                    self._items.append(obj)
-                    self._index += 1
-                self._cond.notify()
-            else:
-                self._unsorted[i] = obj
-                
-            if self._index == self._length:
-                del self._cache[self._job]
-        finally:
-            self._cond.release()
-            
-    def _set_length(self, length):
-        self._cond.acquire()
-        try:
-            self._length = length
-            if self._index == self._length:
-                self._cond.notify()
-                del self._cache[self._job]
-        finally:
-            self._cond.release()
-
-#
-# Class whose instances are returned by `Pool.imap_unordered()`
-#
-
-class IMapUnorderedIterator(IMapIterator):
-
-    def _set(self, i, obj):
-        self._cond.acquire()
-        try:
-            self._items.append(obj)
-            self._index += 1
-            self._cond.notify()
-            if self._index == self._length:
-                del self._cache[self._job]
-        finally:
-            self._cond.release()
-
-#
-#
-#
-
-class ThreadPool(Pool):
-    
-    from .dummy import Process
-    
-    def __init__(self, processes=None, initializer=None, initargs=()):
-        Pool.__init__(self, processes, initializer, initargs)
-        
-    def _setup_queues(self):
-        self._inqueue = Queue.Queue()
-        self._outqueue = Queue.Queue()
-        self._quick_put = self._inqueue.put
-        self._quick_get = self._outqueue.get
-        
-    @staticmethod
-    def _help_stuff_finish(inqueue, task_handler, size):
-        # put sentinels at head of inqueue to make workers finish
-        inqueue.not_empty.acquire()
-        try:
-            inqueue.queue.clear()
-            inqueue.queue.extend([None] * size)
-            inqueue.not_empty.notifyAll()
-        finally:
-            inqueue.not_empty.release()
+#
+# Module providing the `Pool` class for managing a process pool
+#
+# multiprocessing/pool.py
+#
+# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt
+#
+
+__all__ = ['Pool']
+
+#
+# Imports
+#
+
+import threading
+import queue
+import itertools
+import collections
+import time
+
+from multiprocessing import Process, cpu_count, TimeoutError
+from multiprocessing.util import Finalize, debug
+
+#
+# Constants representing the state of a pool
+#
+
+RUN = 0
+CLOSE = 1
+TERMINATE = 2
+
+#
+# Miscellaneous
+#
+
+job_counter = itertools.count()
+
+def mapstar(args):
+    return list(map(*args))
+
+#
+# Code run by worker processes
+#
+
+def worker(inqueue, outqueue, initializer=None, initargs=()):
+    put = outqueue.put
+    get = inqueue.get
+    if hasattr(inqueue, '_writer'):
+        inqueue._writer.close()
+        outqueue._reader.close()
+
+    if initializer is not None:
+        initializer(*initargs)
+
+    while 1:
+        try:
+            task = get()
+        except (EOFError, IOError):
+            debug('worker got EOFError or IOError -- exiting')
+            break
+
+        if task is None:
+            debug('worker got sentinel -- exiting')
+            break
+
+        job, i, func, args, kwds = task
+        try:
+            result = (True, func(*args, **kwds))
+        except Exception as e:
+            result = (False, e)
+        put((job, i, result))
+
+#
+# Class representing a process pool
+#
+
+class Pool(object):
+    '''
+    Class which supports an async version of the `apply()` builtin
+    '''
+    Process = Process
+
+    def __init__(self, processes=None, initializer=None, initargs=()):
+        self._setup_queues()
+        self._taskqueue = queue.Queue()
+        self._cache = {}
+        self._state = RUN
+
+        if processes is None:
+            try:
+                processes = cpu_count()
+            except NotImplementedError:
+                processes = 1
+
+        self._pool = []
+        for i in range(processes):
+            w = self.Process(
+                target=worker,
+                args=(self._inqueue, self._outqueue, initializer, initargs)
+                )
+            self._pool.append(w)
+            w.set_name(w.get_name().replace('Process', 'PoolWorker'))
+            w.set_daemon(True)
+            w.start()
+
+        self._task_handler = threading.Thread(
+            target=Pool._handle_tasks,
+            args=(self._taskqueue, self._quick_put, self._outqueue, self._pool)
+            )
+        self._task_handler.setDaemon(True)
+        self._task_handler._state = RUN
+        self._task_handler.start()
+
+        self._result_handler = threading.Thread(
+            target=Pool._handle_results,
+            args=(self._outqueue, self._quick_get, self._cache)
+            )
+        self._result_handler.setDaemon(True)
+        self._result_handler._state = RUN
+        self._result_handler.start()
+
+        self._terminate = Finalize(
+            self, self._terminate_pool,
+            args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
+                  self._task_handler, self._result_handler, self._cache),
+            exitpriority=15
+            )
+
+    def _setup_queues(self):
+        from .queues import SimpleQueue
+        self._inqueue = SimpleQueue()
+        self._outqueue = SimpleQueue()
+        self._quick_put = self._inqueue._writer.send
+        self._quick_get = self._outqueue._reader.recv
+
+    def apply(self, func, args=(), kwds={}):
+        '''
+        Equivalent of `apply()` builtin
+        '''
+        assert self._state == RUN
+        return self.apply_async(func, args, kwds).get()
+
+    def map(self, func, iterable, chunksize=None):
+        '''
+        Equivalent of `map()` builtin
+        '''
+        assert self._state == RUN
+        return self.map_async(func, iterable, chunksize).get()
+
+    def imap(self, func, iterable, chunksize=1):
+        '''
+        Equivalent of `itertool.imap()` -- can be MUCH slower than `Pool.map()`
+        '''
+        assert self._state == RUN
+        if chunksize == 1:
+            result = IMapIterator(self._cache)
+            self._taskqueue.put((((result._job, i, func, (x,), {})
+                         for i, x in enumerate(iterable)), result._set_length))
+            return result
+        else:
+            assert chunksize > 1
+            task_batches = Pool._get_tasks(func, iterable, chunksize)
+            result = IMapIterator(self._cache)
+            self._taskqueue.put((((result._job, i, mapstar, (x,), {})
+                     for i, x in enumerate(task_batches)), result._set_length))
+            return (item for chunk in result for item in chunk)
+
+    def imap_unordered(self, func, iterable, chunksize=1):
+        '''
+        Like `imap()` method but ordering of results is arbitrary
+        '''
+        assert self._state == RUN
+        if chunksize == 1:
+            result = IMapUnorderedIterator(self._cache)
+            self._taskqueue.put((((result._job, i, func, (x,), {})
+                         for i, x in enumerate(iterable)), result._set_length))
+            return result
+        else:
+            assert chunksize > 1
+            task_batches = Pool._get_tasks(func, iterable, chunksize)
+            result = IMapUnorderedIterator(self._cache)
+            self._taskqueue.put((((result._job, i, mapstar, (x,), {})
+                     for i, x in enumerate(task_batches)), result._set_length))
+            return (item for chunk in result for item in chunk)
+
+    def apply_async(self, func, args=(), kwds={}, callback=None):
+        '''
+        Asynchronous equivalent of `apply()` builtin
+        '''
+        assert self._state == RUN
+        result = ApplyResult(self._cache, callback)
+        self._taskqueue.put(([(result._job, None, func, args, kwds)], None))
+        return result
+
+    def map_async(self, func, iterable, chunksize=None, callback=None):
+        '''
+        Asynchronous equivalent of `map()` builtin
+        '''
+        assert self._state == RUN
+        if not hasattr(iterable, '__len__'):
+            iterable = list(iterable)
+
+        if chunksize is None:
+            chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
+            if extra:
+                chunksize += 1
+
+        task_batches = Pool._get_tasks(func, iterable, chunksize)
+        result = MapResult(self._cache, chunksize, len(iterable), callback)
+        self._taskqueue.put((((result._job, i, mapstar, (x,), {})
+                              for i, x in enumerate(task_batches)), None))
+        return result
+
+    @staticmethod
+    def _handle_tasks(taskqueue, put, outqueue, pool):
+        thread = threading.currentThread()
+
+        for taskseq, set_length in iter(taskqueue.get, None):
+            i = -1
+            for i, task in enumerate(taskseq):
+                if thread._state:
+                    debug('task handler found thread._state != RUN')
+                    break
+                try:
+                    put(task)
+                except IOError:
+                    debug('could not put task on queue')
+                    break
+            else:
+                if set_length:
+                    debug('doing set_length()')
+                    set_length(i+1)
+                continue
+            break
+        else:
+            debug('task handler got sentinel')
+
+
+        try:
+            # tell result handler to finish when cache is empty
+            debug('task handler sending sentinel to result handler')
+            outqueue.put(None)
+
+            # tell workers there is no more work
+            debug('task handler sending sentinel to workers')
+            for p in pool:
+                put(None)
+        except IOError:
+            debug('task handler got IOError when sending sentinels')
+
+        debug('task handler exiting')
+
+    @staticmethod
+    def _handle_results(outqueue, get, cache):
+        thread = threading.currentThread()
+
+        while 1:
+            try:
+                task = get()
+            except (IOError, EOFError):
+                debug('result handler got EOFError/IOError -- exiting')
+                return
+
+            if thread._state:
+                assert thread._state == TERMINATE
+                debug('result handler found thread._state=TERMINATE')
+                break
+
+            if task is None:
+                debug('result handler got sentinel')
+                break
+
+            job, i, obj = task
+            try:
+                cache[job]._set(i, obj)
+            except KeyError:
+                pass
+
+        while cache and thread._state != TERMINATE:
+            try:
+                task = get()
+            except (IOError, EOFError):
+                debug('result handler got EOFError/IOError -- exiting')
+                return
+
+            if task is None:
+                debug('result handler ignoring extra sentinel')
+                continue
+            job, i, obj = task
+            try:
+                cache[job]._set(i, obj)
+            except KeyError:
+                pass
+
+        if hasattr(outqueue, '_reader'):
+            debug('ensuring that outqueue is not full')
+            # If we don't make room available in outqueue then
+            # attempts to add the sentinel (None) to outqueue may
+            # block.  There is guaranteed to be no more than 2 sentinels.
+            try:
+                for i in range(10):
+                    if not outqueue._reader.poll():
+                        break
+                    get()
+            except (IOError, EOFError):
+                pass
+
+        debug('result handler exiting: len(cache)=%s, thread._state=%s',
+              len(cache), thread._state)
+
+    @staticmethod
+    def _get_tasks(func, it, size):
+        it = iter(it)
+        while 1:
+            x = tuple(itertools.islice(it, size))
+            if not x:
+                return
+            yield (func, x)
+
+    def __reduce__(self):
+        raise NotImplementedError(
+              'pool objects cannot be passed between processes or pickled'
+              )
+
+    def close(self):
+        debug('closing pool')
+        if self._state == RUN:
+            self._state = CLOSE
+            self._taskqueue.put(None)
+
+    def terminate(self):
+        debug('terminating pool')
+        self._state = TERMINATE
+        self._terminate()
+
+    def join(self):
+        debug('joining pool')
+        assert self._state in (CLOSE, TERMINATE)
+        self._task_handler.join()
+        self._result_handler.join()
+        for p in self._pool:
+            p.join()
+
+    @staticmethod
+    def _help_stuff_finish(inqueue, task_handler, size):
+        # task_handler may be blocked trying to put items on inqueue
+        debug('removing tasks from inqueue until task handler finished')
+        inqueue._rlock.acquire()
+        while task_handler.isAlive() and inqueue._reader.poll():
+            inqueue._reader.recv()
+            time.sleep(0)
+
+    @classmethod
+    def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool,
+                        task_handler, result_handler, cache):
+        # this is guaranteed to only be called once
+        debug('finalizing pool')
+
+        task_handler._state = TERMINATE
+        taskqueue.put(None)                 # sentinel
+
+        debug('helping task handler/workers to finish')
+        cls._help_stuff_finish(inqueue, task_handler, len(pool))
+
+        assert result_handler.isAlive() or len(cache) == 0
+
+        result_handler._state = TERMINATE
+        outqueue.put(None)                  # sentinel
+
+        if pool and hasattr(pool[0], 'terminate'):
+            debug('terminating workers')
+            for p in pool:
+                p.terminate()
+
+        debug('joining task handler')
+        task_handler.join(1e100)
+
+        debug('joining result handler')
+        result_handler.join(1e100)
+
+        if pool and hasattr(pool[0], 'terminate'):
+            debug('joining pool workers')
+            for p in pool:
+                p.join()
+
+#
+# Class whose instances are returned by `Pool.apply_async()`
+#
+
+class ApplyResult(object):
+
+    def __init__(self, cache, callback):
+        self._cond = threading.Condition(threading.Lock())
+        self._job = next(job_counter)
+        self._cache = cache
+        self._ready = False
+        self._callback = callback
+        cache[self._job] = self
+
+    def ready(self):
+        return self._ready
+
+    def successful(self):
+        assert self._ready
+        return self._success
+
+    def wait(self, timeout=None):
+        self._cond.acquire()
+        try:
+            if not self._ready:
+                self._cond.wait(timeout)
+        finally:
+            self._cond.release()
+
+    def get(self, timeout=None):
+        self.wait(timeout)
+        if not self._ready:
+            raise TimeoutError
+        if self._success:
+            return self._value
+        else:
+            raise self._value
+
+    def _set(self, i, obj):
+        self._success, self._value = obj
+        if self._callback and self._success:
+            self._callback(self._value)
+        self._cond.acquire()
+        try:
+            self._ready = True
+            self._cond.notify()
+        finally:
+            self._cond.release()
+        del self._cache[self._job]
+
+#
+# Class whose instances are returned by `Pool.map_async()`
+#
+
+class MapResult(ApplyResult):
+
+    def __init__(self, cache, chunksize, length, callback):
+        ApplyResult.__init__(self, cache, callback)
+        self._success = True
+        self._value = [None] * length
+        self._chunksize = chunksize
+        if chunksize <= 0:
+            self._number_left = 0
+            self._ready = True
+        else:
+            self._number_left = length//chunksize + bool(length % chunksize)
+
+    def _set(self, i, success_result):
+        success, result = success_result
+        if success:
+            self._value[i*self._chunksize:(i+1)*self._chunksize] = result
+            self._number_left -= 1
+            if self._number_left == 0:
+                if self._callback:
+                    self._callback(self._value)
+                del self._cache[self._job]
+                self._cond.acquire()
+                try:
+                    self._ready = True
+                    self._cond.notify()
+                finally:
+                    self._cond.release()
+
+        else:
+            self._success = False
+            self._value = result
+            del self._cache[self._job]
+            self._cond.acquire()
+            try:
+                self._ready = True
+                self._cond.notify()
+            finally:
+                self._cond.release()
+
+#
+# Class whose instances are returned by `Pool.imap()`
+#
+
+class IMapIterator(object):
+
+    def __init__(self, cache):
+        self._cond = threading.Condition(threading.Lock())
+        self._job = next(job_counter)
+        self._cache = cache
+        self._items = collections.deque()
+        self._index = 0
+        self._length = None
+        self._unsorted = {}
+        cache[self._job] = self
+
+    def __iter__(self):
+        return self
+
+    def next(self, timeout=None):
+        self._cond.acquire()
+        try:
+            try:
+                item = self._items.popleft()
+            except IndexError:
+                if self._index == self._length:
+                    raise StopIteration
+                self._cond.wait(timeout)
+                try:
+                    item = self._items.popleft()
+                except IndexError:
+                    if self._index == self._length:
+                        raise StopIteration
+                    raise TimeoutError
+        finally:
+            self._cond.release()
+
+        success, value = item
+        if success:
+            return value
+        raise value
+
+    __next__ = next                    # XXX
+
+    def _set(self, i, obj):
+        self._cond.acquire()
+        try:
+            if self._index == i:
+                self._items.append(obj)
+                self._index += 1
+                while self._index in self._unsorted:
+                    obj = self._unsorted.pop(self._index)
+                    self._items.append(obj)
+                    self._index += 1
+                self._cond.notify()
+            else:
+                self._unsorted[i] = obj
+
+            if self._index == self._length:
+                del self._cache[self._job]
+        finally:
+            self._cond.release()
+
+    def _set_length(self, length):
+        self._cond.acquire()
+        try:
+            self._length = length
+            if self._index == self._length:
+                self._cond.notify()
+                del self._cache[self._job]
+        finally:
+            self._cond.release()
+
+#
+# Class whose instances are returned by `Pool.imap_unordered()`
+#
+
+class IMapUnorderedIterator(IMapIterator):
+
+    def _set(self, i, obj):
+        self._cond.acquire()
+        try:
+            self._items.append(obj)
+            self._index += 1
+            self._cond.notify()
+            if self._index == self._length:
+                del self._cache[self._job]
+        finally:
+            self._cond.release()
+
+#
+#
+#
+
+class ThreadPool(Pool):
+
+    from .dummy import Process
+
+    def __init__(self, processes=None, initializer=None, initargs=()):
+        Pool.__init__(self, processes, initializer, initargs)
+
+    def _setup_queues(self):
+        self._inqueue = queue.Queue()
+        self._outqueue = queue.Queue()
+        self._quick_put = self._inqueue.put
+        self._quick_get = self._outqueue.get
+
+    @staticmethod
+    def _help_stuff_finish(inqueue, task_handler, size):
+        # put sentinels at head of inqueue to make workers finish
+        inqueue.not_empty.acquire()
+        try:
+            inqueue.queue.clear()
+            inqueue.queue.extend([None] * size)
+            inqueue.not_empty.notifyAll()
+        finally:
+            inqueue.not_empty.release()

Modified: python/branches/py3k/Lib/multiprocessing/process.py
==============================================================================
--- /python/trunk/Lib/multiprocessing/process.py	(original)
+++ python/branches/py3k/Lib/multiprocessing/process.py	Wed Jun 11 18:44:04 2008
@@ -1,302 +1,302 @@
-#
-# Module providing the `Process` class which emulates `threading.Thread`
-#
-# multiprocessing/process.py
-#
-# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
-#
-
-__all__ = ['Process', 'current_process', 'active_children']
-
-#
-# Imports
-#
-
-import os
-import sys
-import signal
-import itertools
-
-#
-#
-#
-
-try:
-    ORIGINAL_DIR = os.path.abspath(os.getcwd())
-except OSError:
-    ORIGINAL_DIR = None
-
-try:
-    bytes
-except NameError:
-    bytes = str                  # XXX not needed in Py2.6 and Py3.0
-
-#
-# Public functions
-#
-
-def current_process():
-    '''
-    Return process object representing the current process
-    '''
-    return _current_process
-
-def active_children():
-    '''
-    Return list of process objects corresponding to live child processes
-    '''
-    _cleanup()
-    return list(_current_process._children)
-    
-#
-#
-#
-
-def _cleanup():
-    # check for processes which have finished
-    for p in list(_current_process._children):
-        if p._popen.poll() is not None:
-            _current_process._children.discard(p)
-
-#
-# The `Process` class
-#
-
-class Process(object):
-    '''
-    Process objects represent activity that is run in a separate process
-
-    The class is analagous to `threading.Thread`
-    '''
-    _Popen = None
-    
-    def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
-        assert group is None, 'group argument must be None for now'
-        count = _current_process._counter.next()
-        self._identity = _current_process._identity + (count,)
-        self._authkey = _current_process._authkey
-        self._daemonic = _current_process._daemonic
-        self._tempdir = _current_process._tempdir
-        self._parent_pid = os.getpid()
-        self._popen = None
-        self._target = target
-        self._args = tuple(args)
-        self._kwargs = dict(kwargs)
-        self._name = name or type(self).__name__ + '-' + \
-                     ':'.join(str(i) for i in self._identity)
-
-    def run(self):
-        '''
-        Method to be run in sub-process; can be overridden in sub-class
-        '''
-        if self._target:
-            self._target(*self._args, **self._kwargs)
-            
-    def start(self):
-        '''
-        Start child process
-        '''
-        assert self._popen is None, 'cannot start a process twice'
-        assert self._parent_pid == os.getpid(), \
-               'can only start a process object created by current process'
-        assert not _current_process._daemonic, \
-               'daemonic processes are not allowed to have children'
-        _cleanup()
-        if self._Popen is not None:
-            Popen = self._Popen
-        else:
-            from .forking import Popen
-        self._popen = Popen(self)
-        _current_process._children.add(self)
-
-    def terminate(self):
-        '''
-        Terminate process; sends SIGTERM signal or uses TerminateProcess()
-        '''
-        self._popen.terminate()
-        
-    def join(self, timeout=None):
-        '''
-        Wait until child process terminates
-        '''
-        assert self._parent_pid == os.getpid(), 'can only join a child process'
-        assert self._popen is not None, 'can only join a started process'
-        res = self._popen.wait(timeout)
-        if res is not None:
-            _current_process._children.discard(self)
-
-    def is_alive(self):
-        '''
-        Return whether process is alive
-        '''
-        if self is _current_process:
-            return True
-        assert self._parent_pid == os.getpid(), 'can only test a child process'
-        if self._popen is None:
-            return False
-        self._popen.poll()
-        return self._popen.returncode is None
-
-    def get_name(self):
-        '''
-        Return name of process
-        '''
-        return self._name
-
-    def set_name(self, name):
-        '''
-        Set name of process
-        '''
-        assert isinstance(name, str), 'name must be a string'
-        self._name = name
-
-    def is_daemon(self):
-        '''
-        Return whether process is a daemon
-        '''
-        return self._daemonic
-
-    def set_daemon(self, daemonic):
-        '''
-        Set whether process is a daemon
-        '''
-        assert self._popen is None, 'process has already started'
-        self._daemonic = daemonic
-
-    def get_authkey(self):
-        '''
-        Return authorization key of process
-        '''
-        return self._authkey
-
-    def set_authkey(self, authkey):
-        '''
-        Set authorization key of process
-        '''
-        self._authkey = AuthenticationString(authkey)
-
-    def get_exitcode(self):
-        '''
-        Return exit code of process or `None` if it has yet to stop
-        '''
-        if self._popen is None:
-            return self._popen
-        return self._popen.poll()
-
-    def get_ident(self):
-        '''
-        Return indentifier (PID) of process or `None` if it has yet to start
-        '''
-        if self is _current_process:
-            return os.getpid()
-        else:
-            return self._popen and self._popen.pid
-
-    pid = property(get_ident)
-
-    def __repr__(self):
-        if self is _current_process:
-            status = 'started'
-        elif self._parent_pid != os.getpid():
-            status = 'unknown'
-        elif self._popen is None:
-            status = 'initial'
-        else:
-            if self._popen.poll() is not None:
-                status = self.get_exitcode()
-            else:
-                status = 'started'
-
-        if type(status) is int:
-            if status == 0:
-                status = 'stopped'
-            else:
-                status = 'stopped[%s]' % _exitcode_to_name.get(status, status)
-
-        return '<%s(%s, %s%s)>' % (type(self).__name__, self._name,
-                                   status, self._daemonic and ' daemon' or '')
-
-    ##
-        
-    def _bootstrap(self):
-        from . import util
-        global _current_process
-        
-        try:
-            self._children = set()
-            self._counter = itertools.count(1)
-            try:
-                os.close(sys.stdin.fileno())
-            except (OSError, ValueError):
-                pass
-            _current_process = self
-            util._finalizer_registry.clear()
-            util._run_after_forkers()
-            util.info('child process calling self.run()')
-            try:
-                self.run()
-                exitcode = 0
-            finally:
-                util._exit_function()
-        except SystemExit, e:
-            if not e.args:
-                exitcode = 1
-            elif type(e.args[0]) is int:
-                exitcode = e.args[0]
-            else:
-                sys.stderr.write(e.args[0] + '\n')
-                sys.stderr.flush()
-                exitcode = 1
-        except:
-            exitcode = 1
-            import traceback
-            sys.stderr.write('Process %s:\n' % self.get_name())
-            sys.stderr.flush()
-            traceback.print_exc()
-
-        util.info('process exiting with exitcode %d' % exitcode)
-        return exitcode
-
-#
-# We subclass bytes to avoid accidental transmission of auth keys over network
-#
-
-class AuthenticationString(bytes):
-    def __reduce__(self):
-        from .forking import Popen
-        if not Popen.thread_is_spawning():
-            raise TypeError(
-                'Pickling an AuthenticationString object is '
-                'disallowed for security reasons'
-                )
-        return AuthenticationString, (bytes(self),)
-
-#
-# Create object representing the main process
-#
-
-class _MainProcess(Process):
-
-    def __init__(self):
-        self._identity = ()
-        self._daemonic = False
-        self._name = 'MainProcess'
-        self._parent_pid = None
-        self._popen = None
-        self._counter = itertools.count(1)
-        self._children = set()
-        self._authkey = AuthenticationString(os.urandom(32))
-        self._tempdir = None
-
-_current_process = _MainProcess()
-del _MainProcess
-
-#
-# Give names to some return codes
-#
-
-_exitcode_to_name = {}
-
-for name, signum in signal.__dict__.items():
-    if name[:3]=='SIG' and '_' not in name:
-        _exitcode_to_name[-signum] = name
+#
+# Module providing the `Process` class which emulates `threading.Thread`
+#
+# multiprocessing/process.py
+#
+# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
+#
+
+__all__ = ['Process', 'current_process', 'active_children']
+
+#
+# Imports
+#
+
+import os
+import sys
+import signal
+import itertools
+
+#
+#
+#
+
+try:
+    ORIGINAL_DIR = os.path.abspath(os.getcwd())
+except OSError:
+    ORIGINAL_DIR = None
+
+try:
+    bytes
+except NameError:
+    bytes = str                  # XXX not needed in Py2.6 and Py3.0
+
+#
+# Public functions
+#
+
+def current_process():
+    '''
+    Return process object representing the current process
+    '''
+    return _current_process
+
+def active_children():
+    '''
+    Return list of process objects corresponding to live child processes
+    '''
+    _cleanup()
+    return list(_current_process._children)
+
+#
+#
+#
+
+def _cleanup():
+    # check for processes which have finished
+    for p in list(_current_process._children):
+        if p._popen.poll() is not None:
+            _current_process._children.discard(p)
+
+#
+# The `Process` class
+#
+
+class Process(object):
+    '''
+    Process objects represent activity that is run in a separate process
+
+    The class is analagous to `threading.Thread`
+    '''
+    _Popen = None
+
+    def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
+        assert group is None, 'group argument must be None for now'
+        count = next(_current_process._counter)
+        self._identity = _current_process._identity + (count,)
+        self._authkey = _current_process._authkey
+        self._daemonic = _current_process._daemonic
+        self._tempdir = _current_process._tempdir
+        self._parent_pid = os.getpid()
+        self._popen = None
+        self._target = target
+        self._args = tuple(args)
+        self._kwargs = dict(kwargs)
+        self._name = name or type(self).__name__ + '-' + \
+                     ':'.join(str(i) for i in self._identity)
+
+    def run(self):
+        '''
+        Method to be run in sub-process; can be overridden in sub-class
+        '''
+        if self._target:
+            self._target(*self._args, **self._kwargs)
+
+    def start(self):
+        '''
+        Start child process
+        '''
+        assert self._popen is None, 'cannot start a process twice'
+        assert self._parent_pid == os.getpid(), \
+               'can only start a process object created by current process'
+        assert not _current_process._daemonic, \
+               'daemonic processes are not allowed to have children'
+        _cleanup()
+        if self._Popen is not None:
+            Popen = self._Popen
+        else:
+            from .forking import Popen
+        self._popen = Popen(self)
+        _current_process._children.add(self)
+
+    def terminate(self):
+        '''
+        Terminate process; sends SIGTERM signal or uses TerminateProcess()
+        '''
+        self._popen.terminate()
+
+    def join(self, timeout=None):
+        '''
+        Wait until child process terminates
+        '''
+        assert self._parent_pid == os.getpid(), 'can only join a child process'
+        assert self._popen is not None, 'can only join a started process'
+        res = self._popen.wait(timeout)
+        if res is not None:
+            _current_process._children.discard(self)
+
+    def is_alive(self):
+        '''
+        Return whether process is alive
+        '''
+        if self is _current_process:
+            return True
+        assert self._parent_pid == os.getpid(), 'can only test a child process'
+        if self._popen is None:
+            return False
+        self._popen.poll()
+        return self._popen.returncode is None
+
+    def get_name(self):
+        '''
+        Return name of process
+        '''
+        return self._name
+
+    def set_name(self, name):
+        '''
+        Set name of process
+        '''
+        assert isinstance(name, str), 'name must be a string'
+        self._name = name
+
+    def is_daemon(self):
+        '''
+        Return whether process is a daemon
+        '''
+        return self._daemonic
+
+    def set_daemon(self, daemonic):
+        '''
+        Set whether process is a daemon
+        '''
+        assert self._popen is None, 'process has already started'
+        self._daemonic = daemonic
+
+    def get_authkey(self):
+        '''
+        Return authorization key of process
+        '''
+        return self._authkey
+
+    def set_authkey(self, authkey):
+        '''
+        Set authorization key of process
+        '''
+        self._authkey = AuthenticationString(authkey)
+
+    def get_exitcode(self):
+        '''
+        Return exit code of process or `None` if it has yet to stop
+        '''
+        if self._popen is None:
+            return self._popen
+        return self._popen.poll()
+
+    def get_ident(self):
+        '''
+        Return indentifier (PID) of process or `None` if it has yet to start
+        '''
+        if self is _current_process:
+            return os.getpid()
+        else:
+            return self._popen and self._popen.pid
+
+    pid = property(get_ident)
+
+    def __repr__(self):
+        if self is _current_process:
+            status = 'started'
+        elif self._parent_pid != os.getpid():
+            status = 'unknown'
+        elif self._popen is None:
+            status = 'initial'
+        else:
+            if self._popen.poll() is not None:
+                status = self.get_exitcode()
+            else:
+                status = 'started'
+
+        if type(status) is int:
+            if status == 0:
+                status = 'stopped'
+            else:
+                status = 'stopped[%s]' % _exitcode_to_name.get(status, status)
+
+        return '<%s(%s, %s%s)>' % (type(self).__name__, self._name,
+                                   status, self._daemonic and ' daemon' or '')
+
+    ##
+
+    def _bootstrap(self):
+        from . import util
+        global _current_process
+
+        try:
+            self._children = set()
+            self._counter = itertools.count(1)
+            try:
+                os.close(sys.stdin.fileno())
+            except (OSError, ValueError):
+                pass
+            _current_process = self
+            util._finalizer_registry.clear()
+            util._run_after_forkers()
+            util.info('child process calling self.run()')
+            try:
+                self.run()
+                exitcode = 0
+            finally:
+                util._exit_function()
+        except SystemExit as e:
+            if not e.args:
+                exitcode = 1
+            elif type(e.args[0]) is int:
+                exitcode = e.args[0]
+            else:
+                sys.stderr.write(e.args[0] + '\n')
+                sys.stderr.flush()
+                exitcode = 1
+        except:
+            exitcode = 1
+            import traceback
+            sys.stderr.write('Process %s:\n' % self.get_name())
+            sys.stderr.flush()
+            traceback.print_exc()
+
+        util.info('process exiting with exitcode %d' % exitcode)
+        return exitcode
+
+#
+# We subclass bytes to avoid accidental transmission of auth keys over network
+#
+
+class AuthenticationString(bytes):
+    def __reduce__(self):
+        from .forking import Popen
+        if not Popen.thread_is_spawning():
+            raise TypeError(
+                'Pickling an AuthenticationString object is '
+                'disallowed for security reasons'
+                )
+        return AuthenticationString, (bytes(self),)
+
+#
+# Create object representing the main process
+#
+
+class _MainProcess(Process):
+
+    def __init__(self):
+        self._identity = ()
+        self._daemonic = False
+        self._name = 'MainProcess'
+        self._parent_pid = None
+        self._popen = None
+        self._counter = itertools.count(1)
+        self._children = set()
+        self._authkey = AuthenticationString(os.urandom(32))
+        self._tempdir = None
+
+_current_process = _MainProcess()
+del _MainProcess
+
+#
+# Give names to some return codes
+#
+
+_exitcode_to_name = {}
+
+for name, signum in list(signal.__dict__.items()):
+    if name[:3]=='SIG' and '_' not in name:
+        _exitcode_to_name[-signum] = name

Modified: python/branches/py3k/Lib/multiprocessing/queues.py
==============================================================================
--- /python/trunk/Lib/multiprocessing/queues.py	(original)
+++ python/branches/py3k/Lib/multiprocessing/queues.py	Wed Jun 11 18:44:04 2008
@@ -1,356 +1,356 @@
-#
-# Module implementing queues
-#
-# multiprocessing/queues.py
-#
-# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
-#
-
-__all__ = ['Queue', 'SimpleQueue']
-
-import sys
-import os
-import threading
-import collections
-import time
-import atexit
-import weakref
-
-from Queue import Empty, Full
-import _multiprocessing
-from multiprocessing import Pipe
-from multiprocessing.synchronize import Lock, BoundedSemaphore, Semaphore, Condition
-from multiprocessing.util import debug, info, Finalize, register_after_fork
-from multiprocessing.forking import assert_spawning
-
-#
-# Queue type using a pipe, buffer and thread
-#
-
-class Queue(object):
-
-    def __init__(self, maxsize=0):
-        if maxsize <= 0:
-            maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX
-        self._maxsize = maxsize
-        self._reader, self._writer = Pipe(duplex=False)
-        self._rlock = Lock()
-        self._opid = os.getpid()
-        if sys.platform == 'win32':
-            self._wlock = None
-        else:
-            self._wlock = Lock()
-        self._sem = BoundedSemaphore(maxsize)
-            
-        self._after_fork()
-        
-        if sys.platform != 'win32':
-            register_after_fork(self, Queue._after_fork)
-
-    def __getstate__(self):
-        assert_spawning(self)
-        return (self._maxsize, self._reader, self._writer,
-                self._rlock, self._wlock, self._sem, self._opid)
-    
-    def __setstate__(self, state):
-        (self._maxsize, self._reader, self._writer,
-         self._rlock, self._wlock, self._sem, self._opid) = state
-        self._after_fork()
-        
-    def _after_fork(self):
-        debug('Queue._after_fork()')
-        self._notempty = threading.Condition(threading.Lock())
-        self._buffer = collections.deque()
-        self._thread = None
-        self._jointhread = None
-        self._joincancelled = False
-        self._closed = False
-        self._close = None
-        self._send = self._writer.send
-        self._recv = self._reader.recv
-        self._poll = self._reader.poll
-        
-    def put(self, obj, block=True, timeout=None):
-        assert not self._closed
-        if not self._sem.acquire(block, timeout):
-            raise Full
-
-        self._notempty.acquire()
-        try:
-            if self._thread is None:
-                self._start_thread()
-            self._buffer.append(obj)
-            self._notempty.notify()
-        finally:
-            self._notempty.release()
-
-    def get(self, block=True, timeout=None):
-        if block and timeout is None:
-            self._rlock.acquire()
-            try:
-                res = self._recv()
-                self._sem.release()
-                return res
-            finally:
-                self._rlock.release()
-                
-        else:
-            if block:
-                deadline = time.time() + timeout
-            if not self._rlock.acquire(block, timeout):
-                raise Empty
-            try:
-                if not self._poll(block and (deadline-time.time()) or 0.0):
-                    raise Empty
-                res = self._recv()
-                self._sem.release()
-                return res
-            finally:
-                self._rlock.release()
-
-    def qsize(self):
-        # Raises NotImplementError on Mac OSX because of broken sem_getvalue()
-        return self._maxsize - self._sem._semlock._get_value()
-
-    def empty(self):
-        return not self._poll()
-
-    def full(self):
-        return self._sem._semlock._is_zero()
-
-    def get_nowait(self):
-        return self.get(False)
-
-    def put_nowait(self, obj):
-        return self.put(obj, False)
-
-    def close(self):
-        self._closed = True
-        self._reader.close()
-        if self._close:
-            self._close()
-
-    def join_thread(self):
-        debug('Queue.join_thread()')
-        assert self._closed
-        if self._jointhread:
-            self._jointhread()
-    
-    def cancel_join_thread(self):
-        debug('Queue.cancel_join_thread()')
-        self._joincancelled = True
-        try:
-            self._jointhread.cancel()
-        except AttributeError:
-            pass
-
-    def _start_thread(self):
-        debug('Queue._start_thread()')
-        
-        # Start thread which transfers data from buffer to pipe
-        self._buffer.clear()
-        self._thread = threading.Thread(
-            target=Queue._feed,
-            args=(self._buffer, self._notempty, self._send,
-                  self._wlock, self._writer.close),
-            name='QueueFeederThread'
-            )
-        self._thread.setDaemon(True)
-
-        debug('doing self._thread.start()')
-        self._thread.start()
-        debug('... done self._thread.start()')
-
-        # On process exit we will wait for data to be flushed to pipe.
-        #
-        # However, if this process created the queue then all
-        # processes which use the queue will be descendants of this
-        # process.  Therefore waiting for the queue to be flushed
-        # is pointless once all the child processes have been joined.
-        created_by_this_process = (self._opid == os.getpid())
-        if not self._joincancelled and not created_by_this_process:
-            self._jointhread = Finalize(
-                self._thread, Queue._finalize_join,
-                [weakref.ref(self._thread)],
-                exitpriority=-5
-                )
-            
-        # Send sentinel to the thread queue object when garbage collected
-        self._close = Finalize(
-            self, Queue._finalize_close,
-            [self._buffer, self._notempty],
-            exitpriority=10
-            )
-        
-    @staticmethod
-    def _finalize_join(twr):
-        debug('joining queue thread')
-        thread = twr()
-        if thread is not None:
-            thread.join()
-            debug('... queue thread joined')
-        else:
-            debug('... queue thread already dead')
-            
-    @staticmethod
-    def _finalize_close(buffer, notempty):
-        debug('telling queue thread to quit')
-        notempty.acquire()
-        try:
-            buffer.append(_sentinel)
-            notempty.notify()
-        finally:
-            notempty.release()
-
-    @staticmethod
-    def _feed(buffer, notempty, send, writelock, close):
-        debug('starting thread to feed data to pipe')
-        from .util import is_exiting
-        
-        nacquire = notempty.acquire
-        nrelease = notempty.release
-        nwait = notempty.wait
-        bpopleft = buffer.popleft
-        sentinel = _sentinel
-        if sys.platform != 'win32':
-            wacquire = writelock.acquire
-            wrelease = writelock.release
-        else:
-            wacquire = None
-        
-        try:
-            while 1:
-                nacquire()
-                try:
-                    if not buffer:
-                        nwait()
-                finally:
-                    nrelease()
-                try:
-                    while 1:
-                        obj = bpopleft()
-                        if obj is sentinel:
-                            debug('feeder thread got sentinel -- exiting')
-                            close()
-                            return
-
-                        if wacquire is None:
-                            send(obj)
-                        else:
-                            wacquire()
-                            try:
-                                send(obj)
-                            finally:
-                                wrelease()
-                except IndexError:
-                    pass
-        except Exception, e:
-            # Since this runs in a daemon thread the resources it uses
-            # may be become unusable while the process is cleaning up.
-            # We ignore errors which happen after the process has
-            # started to cleanup.
-            try:
-                if is_exiting():
-                    info('error in queue thread: %s', e)
-                else:
-                    import traceback
-                    traceback.print_exc()
-            except Exception:
-                pass
-            
-_sentinel = object()
-
-#
-# A queue type which also supports join() and task_done() methods
-#
-# Note that if you do not call task_done() for each finished task then
-# eventually the counter's semaphore may overflow causing Bad Things
-# to happen.
-#
-
-class JoinableQueue(Queue):
-
-    def __init__(self, maxsize=0):
-        Queue.__init__(self, maxsize)
-        self._unfinished_tasks = Semaphore(0)
-        self._cond = Condition()
-        
-    def __getstate__(self):
-        return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)
-
-    def __setstate__(self, state):
-        Queue.__setstate__(self, state[:-2])
-        self._cond, self._unfinished_tasks = state[-2:]
-
-    def put(self, item, block=True, timeout=None):
-        Queue.put(self, item, block, timeout)
-        self._unfinished_tasks.release()
-        
-    def task_done(self):
-        self._cond.acquire()
-        try:
-            if not self._unfinished_tasks.acquire(False):
-                raise ValueError('task_done() called too many times')
-            if self._unfinished_tasks._semlock._is_zero():
-                self._cond.notify_all()
-        finally:
-            self._cond.release()
-            
-    def join(self):
-        self._cond.acquire()
-        try:
-            if not self._unfinished_tasks._semlock._is_zero():
-                self._cond.wait()
-        finally:
-            self._cond.release()
-
-#
-# Simplified Queue type -- really just a locked pipe
-#
-
-class SimpleQueue(object):
-
-    def __init__(self):
-        self._reader, self._writer = Pipe(duplex=False)
-        self._rlock = Lock()
-        if sys.platform == 'win32':
-            self._wlock = None
-        else:
-            self._wlock = Lock()
-        self._make_methods()
-
-    def empty(self):
-        return not self._reader.poll()
-
-    def __getstate__(self):
-        assert_spawning(self)
-        return (self._reader, self._writer, self._rlock, self._wlock)
-
-    def __setstate__(self, state):
-        (self._reader, self._writer, self._rlock, self._wlock) = state
-        self._make_methods()
-
-    def _make_methods(self):
-        recv = self._reader.recv
-        racquire, rrelease = self._rlock.acquire, self._rlock.release
-        def get():
-            racquire()
-            try:
-                return recv()
-            finally:
-                rrelease()
-        self.get = get
-
-        if self._wlock is None:
-            # writes to a message oriented win32 pipe are atomic
-            self.put = self._writer.send
-        else:
-            send = self._writer.send
-            wacquire, wrelease = self._wlock.acquire, self._wlock.release
-            def put(obj):
-                wacquire()
-                try:
-                    return send(obj)
-                finally:
-                    wrelease()
-            self.put = put
+#
+# Module implementing queues
+#
+# multiprocessing/queues.py
+#
+# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
+#
+
+__all__ = ['Queue', 'SimpleQueue']
+
+import sys
+import os
+import threading
+import collections
+import time
+import atexit
+import weakref
+
+from queue import Empty, Full
+import _multiprocessing
+from multiprocessing import Pipe
+from multiprocessing.synchronize import Lock, BoundedSemaphore, Semaphore, Condition
+from multiprocessing.util import debug, info, Finalize, register_after_fork
+from multiprocessing.forking import assert_spawning
+
+#
+# Queue type using a pipe, buffer and thread
+#
+
+class Queue(object):
+
+    def __init__(self, maxsize=0):
+        if maxsize <= 0:
+            maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX
+        self._maxsize = maxsize
+        self._reader, self._writer = Pipe(duplex=False)
+        self._rlock = Lock()
+        self._opid = os.getpid()
+        if sys.platform == 'win32':
+            self._wlock = None
+        else:
+            self._wlock = Lock()
+        self._sem = BoundedSemaphore(maxsize)
+
+        self._after_fork()
+
+        if sys.platform != 'win32':
+            register_after_fork(self, Queue._after_fork)
+
+    def __getstate__(self):
+        assert_spawning(self)
+        return (self._maxsize, self._reader, self._writer,
+                self._rlock, self._wlock, self._sem, self._opid)
+
+    def __setstate__(self, state):
+        (self._maxsize, self._reader, self._writer,
+         self._rlock, self._wlock, self._sem, self._opid) = state
+        self._after_fork()
+
+    def _after_fork(self):
+        debug('Queue._after_fork()')
+        self._notempty = threading.Condition(threading.Lock())
+        self._buffer = collections.deque()
+        self._thread = None
+        self._jointhread = None
+        self._joincancelled = False
+        self._closed = False
+        self._close = None
+        self._send = self._writer.send
+        self._recv = self._reader.recv
+        self._poll = self._reader.poll
+
+    def put(self, obj, block=True, timeout=None):
+        assert not self._closed
+        if not self._sem.acquire(block, timeout):
+            raise Full
+
+        self._notempty.acquire()
+        try:
+            if self._thread is None:
+                self._start_thread()
+            self._buffer.append(obj)
+            self._notempty.notify()
+        finally:
+            self._notempty.release()
+
+    def get(self, block=True, timeout=None):
+        if block and timeout is None:
+            self._rlock.acquire()
+            try:
+                res = self._recv()
+                self._sem.release()
+                return res
+            finally:
+                self._rlock.release()
+
+        else:
+            if block:
+                deadline = time.time() + timeout
+            if not self._rlock.acquire(block, timeout):
+                raise Empty
+            try:
+                if not self._poll(block and (deadline-time.time()) or 0.0):
+                    raise Empty
+                res = self._recv()
+                self._sem.release()
+                return res
+            finally:
+                self._rlock.release()
+
+    def qsize(self):
+        # Raises NotImplementError on Mac OSX because of broken sem_getvalue()
+        return self._maxsize - self._sem._semlock._get_value()
+
+    def empty(self):
+        return not self._poll()
+
+    def full(self):
+        return self._sem._semlock._is_zero()
+
+    def get_nowait(self):
+        return self.get(False)
+
+    def put_nowait(self, obj):
+        return self.put(obj, False)
+
+    def close(self):
+        self._closed = True
+        self._reader.close()
+        if self._close:
+            self._close()
+
+    def join_thread(self):
+        debug('Queue.join_thread()')
+        assert self._closed
+        if self._jointhread:
+            self._jointhread()
+
+    def cancel_join_thread(self):
+        debug('Queue.cancel_join_thread()')
+        self._joincancelled = True
+        try:
+            self._jointhread.cancel()
+        except AttributeError:
+            pass
+
+    def _start_thread(self):
+        debug('Queue._start_thread()')
+
+        # Start thread which transfers data from buffer to pipe
+        self._buffer.clear()
+        self._thread = threading.Thread(
+            target=Queue._feed,
+            args=(self._buffer, self._notempty, self._send,
+                  self._wlock, self._writer.close),
+            name='QueueFeederThread'
+            )
+        self._thread.setDaemon(True)
+
+        debug('doing self._thread.start()')
+        self._thread.start()
+        debug('... done self._thread.start()')
+
+        # On process exit we will wait for data to be flushed to pipe.
+        #
+        # However, if this process created the queue then all
+        # processes which use the queue will be descendants of this
+        # process.  Therefore waiting for the queue to be flushed
+        # is pointless once all the child processes have been joined.
+        created_by_this_process = (self._opid == os.getpid())
+        if not self._joincancelled and not created_by_this_process:
+            self._jointhread = Finalize(
+                self._thread, Queue._finalize_join,
+                [weakref.ref(self._thread)],
+                exitpriority=-5
+                )
+
+        # Send sentinel to the thread queue object when garbage collected
+        self._close = Finalize(
+            self, Queue._finalize_close,
+            [self._buffer, self._notempty],
+            exitpriority=10
+            )
+
+    @staticmethod
+    def _finalize_join(twr):
+        debug('joining queue thread')
+        thread = twr()
+        if thread is not None:
+            thread.join()
+            debug('... queue thread joined')
+        else:
+            debug('... queue thread already dead')
+
+    @staticmethod
+    def _finalize_close(buffer, notempty):
+        debug('telling queue thread to quit')
+        notempty.acquire()
+        try:
+            buffer.append(_sentinel)
+            notempty.notify()
+        finally:
+            notempty.release()
+
+    @staticmethod
+    def _feed(buffer, notempty, send, writelock, close):
+        debug('starting thread to feed data to pipe')
+        from .util import is_exiting
+
+        nacquire = notempty.acquire
+        nrelease = notempty.release
+        nwait = notempty.wait
+        bpopleft = buffer.popleft
+        sentinel = _sentinel
+        if sys.platform != 'win32':
+            wacquire = writelock.acquire
+            wrelease = writelock.release
+        else:
+            wacquire = None
+
+        try:
+            while 1:
+                nacquire()
+                try:
+                    if not buffer:
+                        nwait()
+                finally:
+                    nrelease()
+                try:
+                    while 1:
+                        obj = bpopleft()
+                        if obj is sentinel:
+                            debug('feeder thread got sentinel -- exiting')
+                            close()
+                            return
+
+                        if wacquire is None:
+                            send(obj)
+                        else:
+                            wacquire()
+                            try:
+                                send(obj)
+                            finally:
+                                wrelease()
+                except IndexError:
+                    pass
+        except Exception as e:
+            # Since this runs in a daemon thread the resources it uses
+            # may be become unusable while the process is cleaning up.
+            # We ignore errors which happen after the process has
+            # started to cleanup.
+            try:
+                if is_exiting():
+                    info('error in queue thread: %s', e)
+                else:
+                    import traceback
+                    traceback.print_exc()
+            except Exception:
+                pass
+
+_sentinel = object()
+
+#
+# A queue type which also supports join() and task_done() methods
+#
+# Note that if you do not call task_done() for each finished task then
+# eventually the counter's semaphore may overflow causing Bad Things
+# to happen.
+#
+
+class JoinableQueue(Queue):
+
+    def __init__(self, maxsize=0):
+        Queue.__init__(self, maxsize)
+        self._unfinished_tasks = Semaphore(0)
+        self._cond = Condition()
+
+    def __getstate__(self):
+        return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)
+
+    def __setstate__(self, state):
+        Queue.__setstate__(self, state[:-2])
+        self._cond, self._unfinished_tasks = state[-2:]
+
+    def put(self, item, block=True, timeout=None):
+        Queue.put(self, item, block, timeout)
+        self._unfinished_tasks.release()
+
+    def task_done(self):
+        self._cond.acquire()
+        try:
+            if not self._unfinished_tasks.acquire(False):
+                raise ValueError('task_done() called too many times')
+            if self._unfinished_tasks._semlock._is_zero():
+                self._cond.notify_all()
+        finally:
+            self._cond.release()
+
+    def join(self):
+        self._cond.acquire()
+        try:
+            if not self._unfinished_tasks._semlock._is_zero():
+                self._cond.wait()
+        finally:
+            self._cond.release()
+
+#
+# Simplified Queue type -- really just a locked pipe
+#
+
+class SimpleQueue(object):
+
+    def __init__(self):
+        self._reader, self._writer = Pipe(duplex=False)
+        self._rlock = Lock()
+        if sys.platform == 'win32':
+            self._wlock = None
+        else:
+            self._wlock = Lock()
+        self._make_methods()
+
+    def empty(self):
+        return not self._reader.poll()
+
+    def __getstate__(self):
+        assert_spawning(self)
+        return (self._reader, self._writer, self._rlock, self._wlock)
+
+    def __setstate__(self, state):
+        (self._reader, self._writer, self._rlock, self._wlock) = state
+        self._make_methods()
+
+    def _make_methods(self):
+        recv = self._reader.recv
+        racquire, rrelease = self._rlock.acquire, self._rlock.release
+        def get():
+            racquire()
+            try:
+                return recv()
+            finally:
+                rrelease()
+        self.get = get
+
+        if self._wlock is None:
+            # writes to a message oriented win32 pipe are atomic
+            self.put = self._writer.send
+        else:
+            send = self._writer.send
+            wacquire, wrelease = self._wlock.acquire, self._wlock.release
+            def put(obj):
+                wacquire()
+                try:
+                    return send(obj)
+                finally:
+                    wrelease()
+            self.put = put

Modified: python/branches/py3k/Lib/multiprocessing/reduction.py
==============================================================================
--- /python/trunk/Lib/multiprocessing/reduction.py	(original)
+++ python/branches/py3k/Lib/multiprocessing/reduction.py	Wed Jun 11 18:44:04 2008
@@ -1,190 +1,190 @@
-#
-# Module to allow connection and socket objects to be transferred
-# between processes
-#
-# multiprocessing/reduction.py
-#
-# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
-#
-
-__all__ = []
-
-import os
-import sys
-import socket
-import threading
-import copy_reg
-
-import _multiprocessing
-from multiprocessing import current_process
-from multiprocessing.forking import Popen, duplicate, close
-from multiprocessing.util import register_after_fork, debug, sub_debug
-from multiprocessing.connection import Client, Listener
-
-
-#
-#
-#
-
-if not(sys.platform == 'win32' or hasattr(_multiprocessing, 'recvfd')):
-    raise ImportError('pickling of connections not supported')
-
-#
-# Platform specific definitions
-#
-
-if sys.platform == 'win32':
-    import _subprocess
-    from ._multiprocessing import win32
-    
-    def send_handle(conn, handle, destination_pid):
-        process_handle = win32.OpenProcess(
-            win32.PROCESS_ALL_ACCESS, False, destination_pid
-            )
-        try:
-            new_handle = duplicate(handle, process_handle)
-            conn.send(new_handle)
-        finally:
-            close(process_handle)
-            
-    def recv_handle(conn):
-        return conn.recv()
-
-else:
-    def send_handle(conn, handle, destination_pid):
-        _multiprocessing.sendfd(conn.fileno(), handle)
-        
-    def recv_handle(conn):
-        return _multiprocessing.recvfd(conn.fileno())
-
-#
-# Support for a per-process server thread which caches pickled handles
-#
-
-_cache = set()
-
-def _reset(obj):
-    global _lock, _listener, _cache
-    for h in _cache:
-        close(h)
-    _cache.clear()
-    _lock = threading.Lock()
-    _listener = None
-
-_reset(None)
-register_after_fork(_reset, _reset)
-
-def _get_listener():
-    global _listener
-
-    if _listener is None:
-        _lock.acquire()
-        try:
-            if _listener is None:
-                debug('starting listener and thread for sending handles')
-                _listener = Listener(authkey=current_process().get_authkey())
-                t = threading.Thread(target=_serve)
-                t.setDaemon(True)
-                t.start()
-        finally:
-            _lock.release()
-
-    return _listener
-
-def _serve():
-    from .util import is_exiting, sub_warning
-    
-    while 1:
-        try:
-            conn = _listener.accept()
-            handle_wanted, destination_pid = conn.recv()
-            _cache.remove(handle_wanted)
-            send_handle(conn, handle_wanted, destination_pid)
-            close(handle_wanted)
-            conn.close()
-        except:
-            if not is_exiting():
-                import traceback
-                sub_warning(
-                    'thread for sharing handles raised exception :\n' +
-                    '-'*79 + '\n' + traceback.format_exc() + '-'*79
-                    )
-    
-#
-# Functions to be used for pickling/unpickling objects with handles
-#
-
-def reduce_handle(handle):
-    if Popen.thread_is_spawning():
-        return (None, Popen.duplicate_for_child(handle), True)
-    dup_handle = duplicate(handle)
-    _cache.add(dup_handle)
-    sub_debug('reducing handle %d', handle)
-    return (_get_listener().address, dup_handle, False)
-
-def rebuild_handle(pickled_data):
-    address, handle, inherited = pickled_data
-    if inherited:
-        return handle
-    sub_debug('rebuilding handle %d', handle)
-    conn = Client(address, authkey=current_process().get_authkey())
-    conn.send((handle, os.getpid()))
-    new_handle = recv_handle(conn)
-    conn.close()
-    return new_handle
-
-#
-# Register `_multiprocessing.Connection` with `copy_reg`
-#
-
-def reduce_connection(conn):
-    rh = reduce_handle(conn.fileno())
-    return rebuild_connection, (rh, conn.readable, conn.writable)
-
-def rebuild_connection(reduced_handle, readable, writable):
-    handle = rebuild_handle(reduced_handle)
-    return _multiprocessing.Connection(
-        handle, readable=readable, writable=writable
-        )
-
-copy_reg.pickle(_multiprocessing.Connection, reduce_connection)
-
-#
-# Register `socket.socket` with `copy_reg`
-#
-
-def fromfd(fd, family, type_, proto=0):
-    s = socket.fromfd(fd, family, type_, proto)
-    if s.__class__ is not socket.socket:
-        s = socket.socket(_sock=s)
-    return s
-
-def reduce_socket(s):
-    reduced_handle = reduce_handle(s.fileno())
-    return rebuild_socket, (reduced_handle, s.family, s.type, s.proto)
-
-def rebuild_socket(reduced_handle, family, type_, proto):
-    fd = rebuild_handle(reduced_handle)
-    _sock = fromfd(fd, family, type_, proto)
-    close(fd)
-    return _sock
-
-copy_reg.pickle(socket.socket, reduce_socket)
-
-#
-# Register `_multiprocessing.PipeConnection` with `copy_reg`
-#
-
-if sys.platform == 'win32':
-    
-    def reduce_pipe_connection(conn):
-        rh = reduce_handle(conn.fileno())
-        return rebuild_pipe_connection, (rh, conn.readable, conn.writable)
-    
-    def rebuild_pipe_connection(reduced_handle, readable, writable):
-        handle = rebuild_handle(reduced_handle)
-        return _multiprocessing.PipeConnection(
-            handle, readable=readable, writable=writable
-            )
-    
-    copy_reg.pickle(_multiprocessing.PipeConnection, reduce_pipe_connection)
+#
+# Module to allow connection and socket objects to be transferred
+# between processes
+#
+# multiprocessing/reduction.py
+#
+# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
+#
+
+__all__ = []
+
+import os
+import sys
+import socket
+import threading
+import copyreg
+
+import _multiprocessing
+from multiprocessing import current_process
+from multiprocessing.forking import Popen, duplicate, close
+from multiprocessing.util import register_after_fork, debug, sub_debug
+from multiprocessing.connection import Client, Listener
+
+
+#
+#
+#
+
+if not(sys.platform == 'win32' or hasattr(_multiprocessing, 'recvfd')):
+    raise ImportError('pickling of connections not supported')
+
+#
+# Platform specific definitions
+#
+
+if sys.platform == 'win32':
+    import _subprocess
+    from ._multiprocessing import win32
+
+    def send_handle(conn, handle, destination_pid):
+        process_handle = win32.OpenProcess(
+            win32.PROCESS_ALL_ACCESS, False, destination_pid
+            )
+        try:
+            new_handle = duplicate(handle, process_handle)
+            conn.send(new_handle)
+        finally:
+            close(process_handle)
+
+    def recv_handle(conn):
+        return conn.recv()
+
+else:
+    def send_handle(conn, handle, destination_pid):
+        _multiprocessing.sendfd(conn.fileno(), handle)
+
+    def recv_handle(conn):
+        return _multiprocessing.recvfd(conn.fileno())
+
+#
+# Support for a per-process server thread which caches pickled handles
+#
+
+_cache = set()
+
+def _reset(obj):
+    global _lock, _listener, _cache
+    for h in _cache:
+        close(h)
+    _cache.clear()
+    _lock = threading.Lock()
+    _listener = None
+
+_reset(None)
+register_after_fork(_reset, _reset)
+
+def _get_listener():
+    global _listener
+
+    if _listener is None:
+        _lock.acquire()
+        try:
+            if _listener is None:
+                debug('starting listener and thread for sending handles')
+                _listener = Listener(authkey=current_process().get_authkey())
+                t = threading.Thread(target=_serve)
+                t.setDaemon(True)
+                t.start()
+        finally:
+            _lock.release()
+
+    return _listener
+
+def _serve():
+    from .util import is_exiting, sub_warning
+
+    while 1:
+        try:
+            conn = _listener.accept()
+            handle_wanted, destination_pid = conn.recv()
+            _cache.remove(handle_wanted)
+            send_handle(conn, handle_wanted, destination_pid)
+            close(handle_wanted)
+            conn.close()
+        except:
+            if not is_exiting():
+                import traceback
+                sub_warning(
+                    'thread for sharing handles raised exception :\n' +
+                    '-'*79 + '\n' + traceback.format_exc() + '-'*79
+                    )
+
+#
+# Functions to be used for pickling/unpickling objects with handles
+#
+
+def reduce_handle(handle):
+    if Popen.thread_is_spawning():
+        return (None, Popen.duplicate_for_child(handle), True)
+    dup_handle = duplicate(handle)
+    _cache.add(dup_handle)
+    sub_debug('reducing handle %d', handle)
+    return (_get_listener().address, dup_handle, False)
+
+def rebuild_handle(pickled_data):
+    address, handle, inherited = pickled_data
+    if inherited:
+        return handle
+    sub_debug('rebuilding handle %d', handle)
+    conn = Client(address, authkey=current_process().get_authkey())
+    conn.send((handle, os.getpid()))
+    new_handle = recv_handle(conn)
+    conn.close()
+    return new_handle
+
+#
+# Register `_multiprocessing.Connection` with `copy_reg`
+#
+
+def reduce_connection(conn):
+    rh = reduce_handle(conn.fileno())
+    return rebuild_connection, (rh, conn.readable, conn.writable)
+
+def rebuild_connection(reduced_handle, readable, writable):
+    handle = rebuild_handle(reduced_handle)
+    return _multiprocessing.Connection(
+        handle, readable=readable, writable=writable
+        )
+
+copyreg.pickle(_multiprocessing.Connection, reduce_connection)
+
+#
+# Register `socket.socket` with `copy_reg`
+#
+
+def fromfd(fd, family, type_, proto=0):
+    s = socket.fromfd(fd, family, type_, proto)
+    if s.__class__ is not socket.socket:
+        s = socket.socket(_sock=s)
+    return s
+
+def reduce_socket(s):
+    reduced_handle = reduce_handle(s.fileno())
+    return rebuild_socket, (reduced_handle, s.family, s.type, s.proto)
+
+def rebuild_socket(reduced_handle, family, type_, proto):
+    fd = rebuild_handle(reduced_handle)
+    _sock = fromfd(fd, family, type_, proto)
+    close(fd)
+    return _sock
+
+copyreg.pickle(socket.socket, reduce_socket)
+
+#
+# Register `_multiprocessing.PipeConnection` with `copy_reg`
+#
+
+if sys.platform == 'win32':
+
+    def reduce_pipe_connection(conn):
+        rh = reduce_handle(conn.fileno())
+        return rebuild_pipe_connection, (rh, conn.readable, conn.writable)
+
+    def rebuild_pipe_connection(reduced_handle, readable, writable):
+        handle = rebuild_handle(reduced_handle)
+        return _multiprocessing.PipeConnection(
+            handle, readable=readable, writable=writable
+            )
+
+    copyreg.pickle(_multiprocessing.PipeConnection, reduce_pipe_connection)

Modified: python/branches/py3k/Lib/multiprocessing/sharedctypes.py
==============================================================================
--- /python/trunk/Lib/multiprocessing/sharedctypes.py	(original)
+++ python/branches/py3k/Lib/multiprocessing/sharedctypes.py	Wed Jun 11 18:44:04 2008
@@ -1,234 +1,234 @@
-#
-# Module which supports allocation of ctypes objects from shared memory
-#
-# multiprocessing/sharedctypes.py
-#
-# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt
-#
-
-import sys
-import ctypes
-import weakref
-import copy_reg
-
-from multiprocessing import heap, RLock
-from multiprocessing.forking import assert_spawning
-
-__all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized']
-
-#
-#
-#
-
-typecode_to_type = {
-    'c': ctypes.c_char,  'u': ctypes.c_wchar,
-    'b': ctypes.c_byte,  'B': ctypes.c_ubyte,
-    'h': ctypes.c_short, 'H': ctypes.c_ushort,
-    'i': ctypes.c_int,   'I': ctypes.c_uint,
-    'l': ctypes.c_long,  'L': ctypes.c_ulong,
-    'f': ctypes.c_float, 'd': ctypes.c_double
-    }
-
-#
-#
-#
-
-def _new_value(type_):
-    size = ctypes.sizeof(type_)
-    wrapper = heap.BufferWrapper(size)
-    return rebuild_ctype(type_, wrapper, None)
-
-def RawValue(typecode_or_type, *args):
-    '''
-    Returns a ctypes object allocated from shared memory
-    '''
-    type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
-    obj = _new_value(type_)
-    ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
-    obj.__init__(*args)
-    return obj
-
-def RawArray(typecode_or_type, size_or_initializer):
-    '''
-    Returns a ctypes array allocated from shared memory
-    '''
-    type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
-    if isinstance(size_or_initializer, int):
-        type_ = type_ * size_or_initializer
-        return _new_value(type_)
-    else:
-        type_ = type_ * len(size_or_initializer)
-        result = _new_value(type_)
-        result.__init__(*size_or_initializer)
-        return result
-
-def Value(typecode_or_type, *args, **kwds):
-    '''
-    Return a synchronization wrapper for a Value
-    '''
-    lock = kwds.pop('lock', None)
-    if kwds:
-        raise ValueError('unrecognized keyword argument(s): %s' % kwds.keys())
-    obj = RawValue(typecode_or_type, *args)
-    if lock is None:
-        lock = RLock()
-    assert hasattr(lock, 'acquire')
-    return synchronized(obj, lock)
-
-def Array(typecode_or_type, size_or_initializer, **kwds):
-    '''
-    Return a synchronization wrapper for a RawArray
-    '''
-    lock = kwds.pop('lock', None)
-    if kwds:
-        raise ValueError('unrecognized keyword argument(s): %s' % kwds.keys())
-    obj = RawArray(typecode_or_type, size_or_initializer)
-    if lock is None:
-        lock = RLock()
-    assert hasattr(lock, 'acquire')
-    return synchronized(obj, lock)
-
-def copy(obj):
-    new_obj = _new_value(type(obj))
-    ctypes.pointer(new_obj)[0] = obj
-    return new_obj
-    
-def synchronized(obj, lock=None):
-    assert not isinstance(obj, SynchronizedBase), 'object already synchronized'
-    
-    if isinstance(obj, ctypes._SimpleCData):
-        return Synchronized(obj, lock)
-    elif isinstance(obj, ctypes.Array):
-        if obj._type_ is ctypes.c_char:
-            return SynchronizedString(obj, lock)
-        return SynchronizedArray(obj, lock)
-    else:
-        cls = type(obj)
-        try:
-            scls = class_cache[cls]
-        except KeyError:
-            names = [field[0] for field in cls._fields_]
-            d = dict((name, make_property(name)) for name in names)
-            classname = 'Synchronized' + cls.__name__
-            scls = class_cache[cls] = type(classname, (SynchronizedBase,), d)
-        return scls(obj, lock)
-
-#
-# Functions for pickling/unpickling
-#
-
-def reduce_ctype(obj):
-    assert_spawning(obj)
-    if isinstance(obj, ctypes.Array):
-        return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_)
-    else:
-        return rebuild_ctype, (type(obj), obj._wrapper, None)
-    
-def rebuild_ctype(type_, wrapper, length):
-    if length is not None:
-        type_ = type_ * length
-    if sys.platform == 'win32' and type_ not in copy_reg.dispatch_table:
-        copy_reg.pickle(type_, reduce_ctype)
-    obj = type_.from_address(wrapper.get_address())
-    obj._wrapper = wrapper
-    return obj
-
-#
-# Function to create properties
-#
-
-def make_property(name):
-    try:
-        return prop_cache[name]
-    except KeyError:
-        d = {}
-        exec template % ((name,)*7) in d
-        prop_cache[name] = d[name]
-        return d[name]
-
-template = '''
-def get%s(self):
-    self.acquire()
-    try:
-        return self._obj.%s
-    finally:
-        self.release()
-def set%s(self, value):
-    self.acquire()
-    try:
-        self._obj.%s = value
-    finally:
-        self.release()
-%s = property(get%s, set%s)
-'''
-
-prop_cache = {}
-class_cache = weakref.WeakKeyDictionary()
-
-#
-# Synchronized wrappers
-#
-
-class SynchronizedBase(object):
-    
-    def __init__(self, obj, lock=None):
-        self._obj = obj
-        self._lock = lock or RLock()
-        self.acquire = self._lock.acquire
-        self.release = self._lock.release
-
-    def __reduce__(self):
-        assert_spawning(self)
-        return synchronized, (self._obj, self._lock)
-    
-    def get_obj(self):
-        return self._obj
-    
-    def get_lock(self):
-        return self._lock
-    
-    def __repr__(self):
-        return '<%s wrapper for %s>' % (type(self).__name__, self._obj)
-    
-    
-class Synchronized(SynchronizedBase):
-    value = make_property('value')
-    
-    
-class SynchronizedArray(SynchronizedBase):
-    
-    def __len__(self):
-        return len(self._obj)
-    
-    def __getitem__(self, i):
-        self.acquire()
-        try:
-            return self._obj[i]
-        finally:
-            self.release()
-            
-    def __setitem__(self, i, value):
-        self.acquire()
-        try:
-            self._obj[i] = value
-        finally:
-            self.release()
-            
-    def __getslice__(self, start, stop):
-        self.acquire()
-        try:
-            return self._obj[start:stop]
-        finally:
-            self.release()
-            
-    def __setslice__(self, start, stop, values):
-        self.acquire()
-        try:
-            self._obj[start:stop] = values
-        finally:
-            self.release()
-            
-            
-class SynchronizedString(SynchronizedArray):
-    value = make_property('value')
-    raw = make_property('raw')
+#
+# Module which supports allocation of ctypes objects from shared memory
+#
+# multiprocessing/sharedctypes.py
+#
+# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt
+#
+
+import sys
+import ctypes
+import weakref
+import copyreg
+
+from multiprocessing import heap, RLock
+from multiprocessing.forking import assert_spawning
+
+__all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized']
+
+#
+#
+#
+
+typecode_to_type = {
+    'c': ctypes.c_char,  'u': ctypes.c_wchar,
+    'b': ctypes.c_byte,  'B': ctypes.c_ubyte,
+    'h': ctypes.c_short, 'H': ctypes.c_ushort,
+    'i': ctypes.c_int,   'I': ctypes.c_uint,
+    'l': ctypes.c_long,  'L': ctypes.c_ulong,
+    'f': ctypes.c_float, 'd': ctypes.c_double
+    }
+
+#
+#
+#
+
+def _new_value(type_):
+    size = ctypes.sizeof(type_)
+    wrapper = heap.BufferWrapper(size)
+    return rebuild_ctype(type_, wrapper, None)
+
+def RawValue(typecode_or_type, *args):
+    '''
+    Returns a ctypes object allocated from shared memory
+    '''
+    type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
+    obj = _new_value(type_)
+    ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
+    obj.__init__(*args)
+    return obj
+
+def RawArray(typecode_or_type, size_or_initializer):
+    '''
+    Returns a ctypes array allocated from shared memory
+    '''
+    type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
+    if isinstance(size_or_initializer, int):
+        type_ = type_ * size_or_initializer
+        return _new_value(type_)
+    else:
+        type_ = type_ * len(size_or_initializer)
+        result = _new_value(type_)
+        result.__init__(*size_or_initializer)
+        return result
+
+def Value(typecode_or_type, *args, **kwds):
+    '''
+    Return a synchronization wrapper for a Value
+    '''
+    lock = kwds.pop('lock', None)
+    if kwds:
+        raise ValueError('unrecognized keyword argument(s): %s' % list(kwds.keys()))
+    obj = RawValue(typecode_or_type, *args)
+    if lock is None:
+        lock = RLock()
+    assert hasattr(lock, 'acquire')
+    return synchronized(obj, lock)
+
+def Array(typecode_or_type, size_or_initializer, **kwds):
+    '''
+    Return a synchronization wrapper for a RawArray
+    '''
+    lock = kwds.pop('lock', None)
+    if kwds:
+        raise ValueError('unrecognized keyword argument(s): %s' % list(kwds.keys()))
+    obj = RawArray(typecode_or_type, size_or_initializer)
+    if lock is None:
+        lock = RLock()
+    assert hasattr(lock, 'acquire')
+    return synchronized(obj, lock)
+
+def copy(obj):
+    new_obj = _new_value(type(obj))
+    ctypes.pointer(new_obj)[0] = obj
+    return new_obj
+
+def synchronized(obj, lock=None):
+    assert not isinstance(obj, SynchronizedBase), 'object already synchronized'
+
+    if isinstance(obj, ctypes._SimpleCData):
+        return Synchronized(obj, lock)
+    elif isinstance(obj, ctypes.Array):
+        if obj._type_ is ctypes.c_char:
+            return SynchronizedString(obj, lock)
+        return SynchronizedArray(obj, lock)
+    else:
+        cls = type(obj)
+        try:
+            scls = class_cache[cls]
+        except KeyError:
+            names = [field[0] for field in cls._fields_]
+            d = dict((name, make_property(name)) for name in names)
+            classname = 'Synchronized' + cls.__name__
+            scls = class_cache[cls] = type(classname, (SynchronizedBase,), d)
+        return scls(obj, lock)
+
+#
+# Functions for pickling/unpickling
+#
+
+def reduce_ctype(obj):
+    assert_spawning(obj)
+    if isinstance(obj, ctypes.Array):
+        return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_)
+    else:
+        return rebuild_ctype, (type(obj), obj._wrapper, None)
+
+def rebuild_ctype(type_, wrapper, length):
+    if length is not None:
+        type_ = type_ * length
+    if sys.platform == 'win32' and type_ not in copyreg.dispatch_table:
+        copyreg.pickle(type_, reduce_ctype)
+    obj = type_.from_address(wrapper.get_address())
+    obj._wrapper = wrapper
+    return obj
+
+#
+# Function to create properties
+#
+
+def make_property(name):
+    try:
+        return prop_cache[name]
+    except KeyError:
+        d = {}
+        exec(template % ((name,)*7), d)
+        prop_cache[name] = d[name]
+        return d[name]
+
+template = '''
+def get%s(self):
+    self.acquire()
+    try:
+        return self._obj.%s
+    finally:
+        self.release()
+def set%s(self, value):
+    self.acquire()
+    try:
+        self._obj.%s = value
+    finally:
+        self.release()
+%s = property(get%s, set%s)
+'''
+
+prop_cache = {}
+class_cache = weakref.WeakKeyDictionary()
+
+#
+# Synchronized wrappers
+#
+
+class SynchronizedBase(object):
+
+    def __init__(self, obj, lock=None):
+        self._obj = obj
+        self._lock = lock or RLock()
+        self.acquire = self._lock.acquire
+        self.release = self._lock.release
+
+    def __reduce__(self):
+        assert_spawning(self)
+        return synchronized, (self._obj, self._lock)
+
+    def get_obj(self):
+        return self._obj
+
+    def get_lock(self):
+        return self._lock
+
+    def __repr__(self):
+        return '<%s wrapper for %s>' % (type(self).__name__, self._obj)
+
+
+class Synchronized(SynchronizedBase):
+    value = make_property('value')
+
+
+class SynchronizedArray(SynchronizedBase):
+
+    def __len__(self):
+        return len(self._obj)
+
+    def __getitem__(self, i):
+        self.acquire()
+        try:
+            return self._obj[i]
+        finally:
+            self.release()
+
+    def __setitem__(self, i, value):
+        self.acquire()
+        try:
+            self._obj[i] = value
+        finally:
+            self.release()
+
+    def __getslice__(self, start, stop):
+        self.acquire()
+        try:
+            return self._obj[start:stop]
+        finally:
+            self.release()
+
+    def __setslice__(self, start, stop, values):
+        self.acquire()
+        try:
+            self._obj[start:stop] = values
+        finally:
+            self.release()
+
+
+class SynchronizedString(SynchronizedArray):
+    value = make_property('value')
+    raw = make_property('raw')

Modified: python/branches/py3k/Lib/multiprocessing/synchronize.py
==============================================================================
--- /python/trunk/Lib/multiprocessing/synchronize.py	(original)
+++ python/branches/py3k/Lib/multiprocessing/synchronize.py	Wed Jun 11 18:44:04 2008
@@ -1,294 +1,294 @@
-#
-# Module implementing synchronization primitives
-#
-# multiprocessing/synchronize.py
-#
-# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
-#
-
-__all__ = [
-    'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event'
-    ]
-
-import threading
-import os
-import sys
-
-from time import time as _time, sleep as _sleep
-
-import _multiprocessing
-from multiprocessing.process import current_process
-from multiprocessing.util import Finalize, register_after_fork, debug
-from multiprocessing.forking import assert_spawning, Popen
-
-#
-# Constants
-#
-
-RECURSIVE_MUTEX, SEMAPHORE = range(2)
-SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX
-
-#
-# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock`
-#
-
-class SemLock(object):
-
-    def __init__(self, kind, value, maxvalue):
-        sl = self._semlock = _multiprocessing.SemLock(kind, value, maxvalue)
-        debug('created semlock with handle %s' % sl.handle)
-        self._make_methods()
-        
-        if sys.platform != 'win32':
-            def _after_fork(obj):
-                obj._semlock._after_fork()
-            register_after_fork(self, _after_fork)
-
-    def _make_methods(self):
-        self.acquire = self._semlock.acquire
-        self.release = self._semlock.release
-        self.__enter__ = self._semlock.__enter__
-        self.__exit__ = self._semlock.__exit__
-
-    def __getstate__(self):
-        assert_spawning(self)
-        sl = self._semlock
-        return (Popen.duplicate_for_child(sl.handle), sl.kind, sl.maxvalue)
-
-    def __setstate__(self, state):
-        self._semlock = _multiprocessing.SemLock._rebuild(*state)
-        debug('recreated blocker with handle %r' % state[0])
-        self._make_methods()
-
-#
-# Semaphore
-#
-
-class Semaphore(SemLock):
-
-    def __init__(self, value=1):
-        SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX)
-
-    def get_value(self):
-        return self._semlock._get_value()
-
-    def __repr__(self):
-        try:
-            value = self._semlock._get_value()
-        except Exception:
-            value = 'unknown'
-        return '<Semaphore(value=%s)>' % value
-
-#
-# Bounded semaphore
-#
-
-class BoundedSemaphore(Semaphore):
-
-    def __init__(self, value=1):
-        SemLock.__init__(self, SEMAPHORE, value, value)
-
-    def __repr__(self):
-        try:
-            value = self._semlock._get_value()
-        except Exception:
-            value = 'unknown'
-        return '<BoundedSemaphore(value=%s, maxvalue=%s)>' % \
-               (value, self._semlock.maxvalue)
-
-#
-# Non-recursive lock
-#
-
-class Lock(SemLock):
-
-    def __init__(self):
-        SemLock.__init__(self, SEMAPHORE, 1, 1)
-
-    def __repr__(self):
-        try:
-            if self._semlock._is_mine():
-                name = current_process().get_name()
-                if threading.currentThread().getName() != 'MainThread':
-                    name += '|' + threading.currentThread().getName()
-            elif self._semlock._get_value() == 1:
-                name = 'None'
-            elif self._semlock._count() > 0:
-                name = 'SomeOtherThread'
-            else:
-                name = 'SomeOtherProcess'
-        except Exception:
-            name = 'unknown'
-        return '<Lock(owner=%s)>' % name
-
-#
-# Recursive lock
-#
-
-class RLock(SemLock):
-
-    def __init__(self):
-        SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1)
-        
-    def __repr__(self):
-        try:
-            if self._semlock._is_mine():
-                name = current_process().get_name()
-                if threading.currentThread().getName() != 'MainThread':
-                    name += '|' + threading.currentThread().getName()
-                count = self._semlock._count()
-            elif self._semlock._get_value() == 1:
-                name, count = 'None', 0
-            elif self._semlock._count() > 0:
-                name, count = 'SomeOtherThread', 'nonzero'
-            else:
-                name, count = 'SomeOtherProcess', 'nonzero'
-        except Exception:
-            name, count = 'unknown', 'unknown'
-        return '<RLock(%s, %s)>' % (name, count)
-
-#
-# Condition variable
-#
-
-class Condition(object):
-
-    def __init__(self, lock=None):
-        self._lock = lock or RLock()
-        self._sleeping_count = Semaphore(0)
-        self._woken_count = Semaphore(0)
-        self._wait_semaphore = Semaphore(0)
-        self._make_methods()
-
-    def __getstate__(self):
-        assert_spawning(self)
-        return (self._lock, self._sleeping_count,
-                self._woken_count, self._wait_semaphore)
-
-    def __setstate__(self, state):
-        (self._lock, self._sleeping_count,
-         self._woken_count, self._wait_semaphore) = state
-        self._make_methods()
-
-    def _make_methods(self):
-        self.acquire = self._lock.acquire
-        self.release = self._lock.release
-        self.__enter__ = self._lock.__enter__
-        self.__exit__ = self._lock.__exit__
-
-    def __repr__(self):
-        try:
-            num_waiters = (self._sleeping_count._semlock._get_value() -
-                           self._woken_count._semlock._get_value())
-        except Exception:
-            num_waiters = 'unkown'
-        return '<Condition(%s, %s)>' % (self._lock, num_waiters)
-
-    def wait(self, timeout=None):
-        assert self._lock._semlock._is_mine(), \
-               'must acquire() condition before using wait()'
-
-        # indicate that this thread is going to sleep
-        self._sleeping_count.release()
-
-        # release lock
-        count = self._lock._semlock._count()
-        for i in xrange(count):
-            self._lock.release()
-
-        try:
-            # wait for notification or timeout
-            self._wait_semaphore.acquire(True, timeout)
-        finally:
-            # indicate that this thread has woken
-            self._woken_count.release()
-
-            # reacquire lock
-            for i in xrange(count):
-                self._lock.acquire()
-
-    def notify(self):
-        assert self._lock._semlock._is_mine(), 'lock is not owned'
-        assert not self._wait_semaphore.acquire(False)
-        
-        # to take account of timeouts since last notify() we subtract
-        # woken_count from sleeping_count and rezero woken_count
-        while self._woken_count.acquire(False):
-            res = self._sleeping_count.acquire(False)
-            assert res
-            
-        if self._sleeping_count.acquire(False): # try grabbing a sleeper
-            self._wait_semaphore.release()      # wake up one sleeper
-            self._woken_count.acquire()         # wait for the sleeper to wake
-            
-            # rezero _wait_semaphore in case a timeout just happened
-            self._wait_semaphore.acquire(False)
-
-    def notify_all(self):
-        assert self._lock._semlock._is_mine(), 'lock is not owned'
-        assert not self._wait_semaphore.acquire(False)
-
-        # to take account of timeouts since last notify*() we subtract
-        # woken_count from sleeping_count and rezero woken_count
-        while self._woken_count.acquire(False):
-            res = self._sleeping_count.acquire(False)
-            assert res
-            
-        sleepers = 0
-        while self._sleeping_count.acquire(False):
-            self._wait_semaphore.release()        # wake up one sleeper
-            sleepers += 1
-
-        if sleepers:
-            for i in xrange(sleepers):
-                self._woken_count.acquire()       # wait for a sleeper to wake
-
-            # rezero wait_semaphore in case some timeouts just happened
-            while self._wait_semaphore.acquire(False):
-                pass
-
-#
-# Event
-#
-
-class Event(object):
-
-    def __init__(self):
-        self._cond = Condition(Lock())
-        self._flag = Semaphore(0)
-
-    def is_set(self):
-        self._cond.acquire()
-        try:
-            if self._flag.acquire(False):
-                self._flag.release()
-                return True
-            return False
-        finally:
-            self._cond.release()
-    
-    def set(self):
-        self._cond.acquire()
-        try:
-            self._flag.acquire(False)
-            self._flag.release()
-            self._cond.notify_all()
-        finally:
-            self._cond.release()
-
-    def clear(self):
-        self._cond.acquire()
-        try:
-            self._flag.acquire(False)
-        finally:
-            self._cond.release()
-
-    def wait(self, timeout=None):
-        self._cond.acquire()
-        try:
-            if self._flag.acquire(False):
-                self._flag.release()
-            else:
-                self._cond.wait(timeout)
-        finally:
-            self._cond.release()
+#
+# Module implementing synchronization primitives
+#
+# multiprocessing/synchronize.py
+#
+# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
+#
+
+__all__ = [
+    'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event'
+    ]
+
+import threading
+import os
+import sys
+
+from time import time as _time, sleep as _sleep
+
+import _multiprocessing
+from multiprocessing.process import current_process
+from multiprocessing.util import Finalize, register_after_fork, debug
+from multiprocessing.forking import assert_spawning, Popen
+
+#
+# Constants
+#
+
+RECURSIVE_MUTEX, SEMAPHORE = list(range(2))
+SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX
+
+#
+# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock`
+#
+
+class SemLock(object):
+
+    def __init__(self, kind, value, maxvalue):
+        sl = self._semlock = _multiprocessing.SemLock(kind, value, maxvalue)
+        debug('created semlock with handle %s' % sl.handle)
+        self._make_methods()
+
+        if sys.platform != 'win32':
+            def _after_fork(obj):
+                obj._semlock._after_fork()
+            register_after_fork(self, _after_fork)
+
+    def _make_methods(self):
+        self.acquire = self._semlock.acquire
+        self.release = self._semlock.release
+        self.__enter__ = self._semlock.__enter__
+        self.__exit__ = self._semlock.__exit__
+
+    def __getstate__(self):
+        assert_spawning(self)
+        sl = self._semlock
+        return (Popen.duplicate_for_child(sl.handle), sl.kind, sl.maxvalue)
+
+    def __setstate__(self, state):
+        self._semlock = _multiprocessing.SemLock._rebuild(*state)
+        debug('recreated blocker with handle %r' % state[0])
+        self._make_methods()
+
+#
+# Semaphore
+#
+
+class Semaphore(SemLock):
+
+    def __init__(self, value=1):
+        SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX)
+
+    def get_value(self):
+        return self._semlock._get_value()
+
+    def __repr__(self):
+        try:
+            value = self._semlock._get_value()
+        except Exception:
+            value = 'unknown'
+        return '<Semaphore(value=%s)>' % value
+
+#
+# Bounded semaphore
+#
+
+class BoundedSemaphore(Semaphore):
+
+    def __init__(self, value=1):
+        SemLock.__init__(self, SEMAPHORE, value, value)
+
+    def __repr__(self):
+        try:
+            value = self._semlock._get_value()
+        except Exception:
+            value = 'unknown'
+        return '<BoundedSemaphore(value=%s, maxvalue=%s)>' % \
+               (value, self._semlock.maxvalue)
+
+#
+# Non-recursive lock
+#
+
+class Lock(SemLock):
+
+    def __init__(self):
+        SemLock.__init__(self, SEMAPHORE, 1, 1)
+
+    def __repr__(self):
+        try:
+            if self._semlock._is_mine():
+                name = current_process().get_name()
+                if threading.currentThread().getName() != 'MainThread':
+                    name += '|' + threading.currentThread().getName()
+            elif self._semlock._get_value() == 1:
+                name = 'None'
+            elif self._semlock._count() > 0:
+                name = 'SomeOtherThread'
+            else:
+                name = 'SomeOtherProcess'
+        except Exception:
+            name = 'unknown'
+        return '<Lock(owner=%s)>' % name
+
+#
+# Recursive lock
+#
+
+class RLock(SemLock):
+
+    def __init__(self):
+        SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1)
+
+    def __repr__(self):
+        try:
+            if self._semlock._is_mine():
+                name = current_process().get_name()
+                if threading.currentThread().getName() != 'MainThread':
+                    name += '|' + threading.currentThread().getName()
+                count = self._semlock._count()
+            elif self._semlock._get_value() == 1:
+                name, count = 'None', 0
+            elif self._semlock._count() > 0:
+                name, count = 'SomeOtherThread', 'nonzero'
+            else:
+                name, count = 'SomeOtherProcess', 'nonzero'
+        except Exception:
+            name, count = 'unknown', 'unknown'
+        return '<RLock(%s, %s)>' % (name, count)
+
+#
+# Condition variable
+#
+
+class Condition(object):
+
+    def __init__(self, lock=None):
+        self._lock = lock or RLock()
+        self._sleeping_count = Semaphore(0)
+        self._woken_count = Semaphore(0)
+        self._wait_semaphore = Semaphore(0)
+        self._make_methods()
+
+    def __getstate__(self):
+        assert_spawning(self)
+        return (self._lock, self._sleeping_count,
+                self._woken_count, self._wait_semaphore)
+
+    def __setstate__(self, state):
+        (self._lock, self._sleeping_count,
+         self._woken_count, self._wait_semaphore) = state
+        self._make_methods()
+
+    def _make_methods(self):
+        self.acquire = self._lock.acquire
+        self.release = self._lock.release
+        self.__enter__ = self._lock.__enter__
+        self.__exit__ = self._lock.__exit__
+
+    def __repr__(self):
+        try:
+            num_waiters = (self._sleeping_count._semlock._get_value() -
+                           self._woken_count._semlock._get_value())
+        except Exception:
+            num_waiters = 'unkown'
+        return '<Condition(%s, %s)>' % (self._lock, num_waiters)
+
+    def wait(self, timeout=None):
+        assert self._lock._semlock._is_mine(), \
+               'must acquire() condition before using wait()'
+
+        # indicate that this thread is going to sleep
+        self._sleeping_count.release()
+
+        # release lock
+        count = self._lock._semlock._count()
+        for i in range(count):
+            self._lock.release()
+
+        try:
+            # wait for notification or timeout
+            self._wait_semaphore.acquire(True, timeout)
+        finally:
+            # indicate that this thread has woken
+            self._woken_count.release()
+
+            # reacquire lock
+            for i in range(count):
+                self._lock.acquire()
+
+    def notify(self):
+        assert self._lock._semlock._is_mine(), 'lock is not owned'
+        assert not self._wait_semaphore.acquire(False)
+
+        # to take account of timeouts since last notify() we subtract
+        # woken_count from sleeping_count and rezero woken_count
+        while self._woken_count.acquire(False):
+            res = self._sleeping_count.acquire(False)
+            assert res
+
+        if self._sleeping_count.acquire(False): # try grabbing a sleeper
+            self._wait_semaphore.release()      # wake up one sleeper
+            self._woken_count.acquire()         # wait for the sleeper to wake
+
+            # rezero _wait_semaphore in case a timeout just happened
+            self._wait_semaphore.acquire(False)
+
+    def notify_all(self):
+        assert self._lock._semlock._is_mine(), 'lock is not owned'
+        assert not self._wait_semaphore.acquire(False)
+
+        # to take account of timeouts since last notify*() we subtract
+        # woken_count from sleeping_count and rezero woken_count
+        while self._woken_count.acquire(False):
+            res = self._sleeping_count.acquire(False)
+            assert res
+
+        sleepers = 0
+        while self._sleeping_count.acquire(False):
+            self._wait_semaphore.release()        # wake up one sleeper
+            sleepers += 1
+
+        if sleepers:
+            for i in range(sleepers):
+                self._woken_count.acquire()       # wait for a sleeper to wake
+
+            # rezero wait_semaphore in case some timeouts just happened
+            while self._wait_semaphore.acquire(False):
+                pass
+
+#
+# Event
+#
+
+class Event(object):
+
+    def __init__(self):
+        self._cond = Condition(Lock())
+        self._flag = Semaphore(0)
+
+    def is_set(self):
+        self._cond.acquire()
+        try:
+            if self._flag.acquire(False):
+                self._flag.release()
+                return True
+            return False
+        finally:
+            self._cond.release()
+
+    def set(self):
+        self._cond.acquire()
+        try:
+            self._flag.acquire(False)
+            self._flag.release()
+            self._cond.notify_all()
+        finally:
+            self._cond.release()
+
+    def clear(self):
+        self._cond.acquire()
+        try:
+            self._flag.acquire(False)
+        finally:
+            self._cond.release()
+
+    def wait(self, timeout=None):
+        self._cond.acquire()
+        try:
+            if self._flag.acquire(False):
+                self._flag.release()
+            else:
+                self._cond.wait(timeout)
+        finally:
+            self._cond.release()

Modified: python/branches/py3k/Lib/multiprocessing/util.py
==============================================================================
--- /python/trunk/Lib/multiprocessing/util.py	(original)
+++ python/branches/py3k/Lib/multiprocessing/util.py	Wed Jun 11 18:44:04 2008
@@ -1,336 +1,336 @@
-#
-# Module providing various facilities to other parts of the package
-#
-# multiprocessing/util.py
-#
-# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
-#
-
-import itertools
-import weakref
-import copy_reg
-import atexit
-import threading        # we want threading to install it's
-                        # cleanup function before multiprocessing does
-
-from multiprocessing.process import current_process, active_children
-
-__all__ = [
-    'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
-    'log_to_stderr', 'get_temp_dir', 'register_after_fork',
-    'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal'
-    ]
-
-#
-# Logging
-#
-
-NOTSET = 0
-SUBDEBUG = 5
-DEBUG = 10
-INFO = 20
-SUBWARNING = 25
-
-LOGGER_NAME = 'multiprocessing'
-DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
-
-_logger = None
-_log_to_stderr = False
-
-def sub_debug(msg, *args):
-    if _logger:
-        _logger.log(SUBDEBUG, msg, *args)
-
-def debug(msg, *args):
-    if _logger:
-        _logger.log(DEBUG, msg, *args)
-
-def info(msg, *args):
-    if _logger:
-        _logger.log(INFO, msg, *args)
-
-def sub_warning(msg, *args):
-    if _logger:
-        _logger.log(SUBWARNING, msg, *args)
-
-def get_logger():
-    '''
-    Returns logger used by multiprocessing
-    '''
-    global _logger
-
-    if not _logger:
-        import logging, atexit
-
-        # XXX multiprocessing should cleanup before logging
-        if hasattr(atexit, 'unregister'):
-            atexit.unregister(_exit_function)
-            atexit.register(_exit_function)
-        else:
-            atexit._exithandlers.remove((_exit_function, (), {}))
-            atexit._exithandlers.append((_exit_function, (), {}))
-
-        _check_logger_class()
-        _logger = logging.getLogger(LOGGER_NAME)
-
-    return _logger
-
-def _check_logger_class():
-    '''
-    Make sure process name is recorded when loggers are used
-    '''
-    # XXX This function is unnecessary once logging is patched
-    import logging
-    if hasattr(logging, 'multiprocessing'):
-        return
-    
-    logging._acquireLock()
-    try:
-        OldLoggerClass = logging.getLoggerClass()
-        if not getattr(OldLoggerClass, '_process_aware', False):
-            class ProcessAwareLogger(OldLoggerClass):
-                _process_aware = True
-                def makeRecord(self, *args, **kwds):
-                    record = OldLoggerClass.makeRecord(self, *args, **kwds)
-                    record.processName = current_process()._name
-                    return record
-            logging.setLoggerClass(ProcessAwareLogger)
-    finally:
-        logging._releaseLock()
-
-def log_to_stderr(level=None):
-    '''
-    Turn on logging and add a handler which prints to stderr
-    '''
-    global _log_to_stderr
-    import logging
-    logger = get_logger()
-    formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
-    handler = logging.StreamHandler()
-    handler.setFormatter(formatter)
-    logger.addHandler(handler)
-    if level is not None:
-        logger.setLevel(level)
-    _log_to_stderr = True
-
-#
-# Function returning a temp directory which will be removed on exit
-#
-
-def get_temp_dir():
-    # get name of a temp directory which will be automatically cleaned up
-    if current_process()._tempdir is None:
-        import shutil, tempfile
-        tempdir = tempfile.mkdtemp(prefix='pymp-')
-        info('created temp directory %s', tempdir)
-        Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100)
-        current_process()._tempdir = tempdir
-    return current_process()._tempdir
-
-#
-# Support for reinitialization of objects when bootstrapping a child process
-#
-
-_afterfork_registry = weakref.WeakValueDictionary()
-_afterfork_counter = itertools.count()
-
-def _run_after_forkers():
-    items = list(_afterfork_registry.items())
-    items.sort()
-    for (index, ident, func), obj in items:
-        try:
-            func(obj)
-        except Exception, e:
-            info('after forker raised exception %s', e)
-
-def register_after_fork(obj, func):
-    _afterfork_registry[(_afterfork_counter.next(), id(obj), func)] = obj
-
-#
-# Finalization using weakrefs
-#
-
-_finalizer_registry = {}
-_finalizer_counter = itertools.count()
-
-
-class Finalize(object):
-    '''
-    Class which supports object finalization using weakrefs
-    '''
-    def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
-        assert exitpriority is None or type(exitpriority) is int
-
-        if obj is not None:
-            self._weakref = weakref.ref(obj, self)
-        else:
-            assert exitpriority is not None
-
-        self._callback = callback
-        self._args = args
-        self._kwargs = kwargs or {}
-        self._key = (exitpriority, _finalizer_counter.next())
-
-        _finalizer_registry[self._key] = self
-
-    def __call__(self, wr=None):
-        '''
-        Run the callback unless it has already been called or cancelled
-        '''
-        try:
-            del _finalizer_registry[self._key]
-        except KeyError:
-            sub_debug('finalizer no longer registered')
-        else:
-            sub_debug('finalizer calling %s with args %s and kwargs %s',
-                     self._callback, self._args, self._kwargs)
-            res = self._callback(*self._args, **self._kwargs)
-            self._weakref = self._callback = self._args = \
-                            self._kwargs = self._key = None
-            return res
-
-    def cancel(self):
-        '''
-        Cancel finalization of the object
-        '''
-        try:
-            del _finalizer_registry[self._key]
-        except KeyError:
-            pass
-        else:
-            self._weakref = self._callback = self._args = \
-                            self._kwargs = self._key = None
-
-    def still_active(self):
-        '''
-        Return whether this finalizer is still waiting to invoke callback
-        '''
-        return self._key in _finalizer_registry
-
-    def __repr__(self):
-        try:
-            obj = self._weakref()
-        except (AttributeError, TypeError):
-            obj = None
-
-        if obj is None:
-            return '<Finalize object, dead>'
-
-        x = '<Finalize object, callback=%s' % \
-            getattr(self._callback, '__name__', self._callback)
-        if self._args:
-            x += ', args=' + str(self._args)
-        if self._kwargs:
-            x += ', kwargs=' + str(self._kwargs)
-        if self._key[0] is not None:
-            x += ', exitprority=' + str(self._key[0])
-        return x + '>'
-
-
-def _run_finalizers(minpriority=None):
-    '''
-    Run all finalizers whose exit priority is not None and at least minpriority
-
-    Finalizers with highest priority are called first; finalizers with
-    the same priority will be called in reverse order of creation.
-    '''
-    if minpriority is None:
-        f = lambda p : p[0][0] is not None
-    else:
-        f = lambda p : p[0][0] is not None and p[0][0] >= minpriority
-
-    items = [x for x in _finalizer_registry.items() if f(x)]
-    items.sort(reverse=True)
-
-    for key, finalizer in items:
-        sub_debug('calling %s', finalizer)
-        try:
-            finalizer()
-        except Exception:
-            import traceback
-            traceback.print_exc()
-
-    if minpriority is None:
-        _finalizer_registry.clear()
-
-#
-# Clean up on exit
-#
-
-def is_exiting():
-    '''
-    Returns true if the process is shutting down
-    '''
-    return _exiting or _exiting is None
-
-_exiting = False
-
-def _exit_function():
-    global _exiting
-
-    info('process shutting down')
-    debug('running all "atexit" finalizers with priority >= 0')
-    _run_finalizers(0)
-
-    for p in active_children():
-        if p._daemonic:
-            info('calling terminate() for daemon %s', p.get_name())
-            p._popen.terminate()
-
-    for p in active_children():
-        info('calling join() for process %s', p.get_name())
-        p.join()
-
-    debug('running the remaining "atexit" finalizers')
-    _run_finalizers()
-
-atexit.register(_exit_function)
-
-#
-# Some fork aware types
-#
-
-class ForkAwareThreadLock(object):
-    def __init__(self):
-        self._lock = threading.Lock()
-        self.acquire = self._lock.acquire
-        self.release = self._lock.release
-        register_after_fork(self, ForkAwareThreadLock.__init__)
-
-class ForkAwareLocal(threading.local):
-    def __init__(self):
-        register_after_fork(self, lambda obj : obj.__dict__.clear())
-    def __reduce__(self):
-        return type(self), ()
-
-#
-# Try making some callable types picklable
-#
-
-def _reduce_method(m):
-    if m.im_self is None:
-        return getattr, (m.im_class, m.im_func.func_name)
-    else:
-        return getattr, (m.im_self, m.im_func.func_name)
-copy_reg.pickle(type(Finalize.__init__), _reduce_method)
-
-def _reduce_method_descriptor(m):
-    return getattr, (m.__objclass__, m.__name__)
-copy_reg.pickle(type(list.append), _reduce_method_descriptor)
-copy_reg.pickle(type(int.__add__), _reduce_method_descriptor)
-
-def _reduce_builtin_function_or_method(m):
-    return getattr, (m.__self__, m.__name__)
-copy_reg.pickle(type(list().append), _reduce_builtin_function_or_method)
-copy_reg.pickle(type(int().__add__), _reduce_builtin_function_or_method)
-
-try:
-    from functools import partial
-except ImportError:
-    pass
-else:
-    def _reduce_partial(p):
-        return _rebuild_partial, (p.func, p.args, p.keywords or {})
-    def _rebuild_partial(func, args, keywords):
-        return partial(func, *args, **keywords)
-    copy_reg.pickle(partial, _reduce_partial)
+#
+# Module providing various facilities to other parts of the package
+#
+# multiprocessing/util.py
+#
+# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
+#
+
+import itertools
+import weakref
+import copyreg
+import atexit
+import threading        # we want threading to install it's
+                        # cleanup function before multiprocessing does
+
+from multiprocessing.process import current_process, active_children
+
+__all__ = [
+    'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
+    'log_to_stderr', 'get_temp_dir', 'register_after_fork',
+    'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal'
+    ]
+
+#
+# Logging
+#
+
+NOTSET = 0
+SUBDEBUG = 5
+DEBUG = 10
+INFO = 20
+SUBWARNING = 25
+
+LOGGER_NAME = 'multiprocessing'
+DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
+
+_logger = None
+_log_to_stderr = False
+
+def sub_debug(msg, *args):
+    if _logger:
+        _logger.log(SUBDEBUG, msg, *args)
+
+def debug(msg, *args):
+    if _logger:
+        _logger.log(DEBUG, msg, *args)
+
+def info(msg, *args):
+    if _logger:
+        _logger.log(INFO, msg, *args)
+
+def sub_warning(msg, *args):
+    if _logger:
+        _logger.log(SUBWARNING, msg, *args)
+
+def get_logger():
+    '''
+    Returns logger used by multiprocessing
+    '''
+    global _logger
+
+    if not _logger:
+        import logging, atexit
+
+        # XXX multiprocessing should cleanup before logging
+        if hasattr(atexit, 'unregister'):
+            atexit.unregister(_exit_function)
+            atexit.register(_exit_function)
+        else:
+            atexit._exithandlers.remove((_exit_function, (), {}))
+            atexit._exithandlers.append((_exit_function, (), {}))
+
+        _check_logger_class()
+        _logger = logging.getLogger(LOGGER_NAME)
+
+    return _logger
+
+def _check_logger_class():
+    '''
+    Make sure process name is recorded when loggers are used
+    '''
+    # XXX This function is unnecessary once logging is patched
+    import logging
+    if hasattr(logging, 'multiprocessing'):
+        return
+
+    logging._acquireLock()
+    try:
+        OldLoggerClass = logging.getLoggerClass()
+        if not getattr(OldLoggerClass, '_process_aware', False):
+            class ProcessAwareLogger(OldLoggerClass):
+                _process_aware = True
+                def makeRecord(self, *args, **kwds):
+                    record = OldLoggerClass.makeRecord(self, *args, **kwds)
+                    record.processName = current_process()._name
+                    return record
+            logging.setLoggerClass(ProcessAwareLogger)
+    finally:
+        logging._releaseLock()
+
+def log_to_stderr(level=None):
+    '''
+    Turn on logging and add a handler which prints to stderr
+    '''
+    global _log_to_stderr
+    import logging
+    logger = get_logger()
+    formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
+    handler = logging.StreamHandler()
+    handler.setFormatter(formatter)
+    logger.addHandler(handler)
+    if level is not None:
+        logger.setLevel(level)
+    _log_to_stderr = True
+
+#
+# Function returning a temp directory which will be removed on exit
+#
+
+def get_temp_dir():
+    # get name of a temp directory which will be automatically cleaned up
+    if current_process()._tempdir is None:
+        import shutil, tempfile
+        tempdir = tempfile.mkdtemp(prefix='pymp-')
+        info('created temp directory %s', tempdir)
+        Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100)
+        current_process()._tempdir = tempdir
+    return current_process()._tempdir
+
+#
+# Support for reinitialization of objects when bootstrapping a child process
+#
+
+_afterfork_registry = weakref.WeakValueDictionary()
+_afterfork_counter = itertools.count()
+
+def _run_after_forkers():
+    items = list(_afterfork_registry.items())
+    items.sort()
+    for (index, ident, func), obj in items:
+        try:
+            func(obj)
+        except Exception as e:
+            info('after forker raised exception %s', e)
+
+def register_after_fork(obj, func):
+    _afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj
+
+#
+# Finalization using weakrefs
+#
+
+_finalizer_registry = {}
+_finalizer_counter = itertools.count()
+
+
+class Finalize(object):
+    '''
+    Class which supports object finalization using weakrefs
+    '''
+    def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
+        assert exitpriority is None or type(exitpriority) is int
+
+        if obj is not None:
+            self._weakref = weakref.ref(obj, self)
+        else:
+            assert exitpriority is not None
+
+        self._callback = callback
+        self._args = args
+        self._kwargs = kwargs or {}
+        self._key = (exitpriority, next(_finalizer_counter))
+
+        _finalizer_registry[self._key] = self
+
+    def __call__(self, wr=None):
+        '''
+        Run the callback unless it has already been called or cancelled
+        '''
+        try:
+            del _finalizer_registry[self._key]
+        except KeyError:
+            sub_debug('finalizer no longer registered')
+        else:
+            sub_debug('finalizer calling %s with args %s and kwargs %s',
+                     self._callback, self._args, self._kwargs)
+            res = self._callback(*self._args, **self._kwargs)
+            self._weakref = self._callback = self._args = \
+                            self._kwargs = self._key = None
+            return res
+
+    def cancel(self):
+        '''
+        Cancel finalization of the object
+        '''
+        try:
+            del _finalizer_registry[self._key]
+        except KeyError:
+            pass
+        else:
+            self._weakref = self._callback = self._args = \
+                            self._kwargs = self._key = None
+
+    def still_active(self):
+        '''
+        Return whether this finalizer is still waiting to invoke callback
+        '''
+        return self._key in _finalizer_registry
+
+    def __repr__(self):
+        try:
+            obj = self._weakref()
+        except (AttributeError, TypeError):
+            obj = None
+
+        if obj is None:
+            return '<Finalize object, dead>'
+
+        x = '<Finalize object, callback=%s' % \
+            getattr(self._callback, '__name__', self._callback)
+        if self._args:
+            x += ', args=' + str(self._args)
+        if self._kwargs:
+            x += ', kwargs=' + str(self._kwargs)
+        if self._key[0] is not None:
+            x += ', exitprority=' + str(self._key[0])
+        return x + '>'
+
+
+def _run_finalizers(minpriority=None):
+    '''
+    Run all finalizers whose exit priority is not None and at least minpriority
+
+    Finalizers with highest priority are called first; finalizers with
+    the same priority will be called in reverse order of creation.
+    '''
+    if minpriority is None:
+        f = lambda p : p[0][0] is not None
+    else:
+        f = lambda p : p[0][0] is not None and p[0][0] >= minpriority
+
+    items = [x for x in list(_finalizer_registry.items()) if f(x)]
+    items.sort(reverse=True)
+
+    for key, finalizer in items:
+        sub_debug('calling %s', finalizer)
+        try:
+            finalizer()
+        except Exception:
+            import traceback
+            traceback.print_exc()
+
+    if minpriority is None:
+        _finalizer_registry.clear()
+
+#
+# Clean up on exit
+#
+
+def is_exiting():
+    '''
+    Returns true if the process is shutting down
+    '''
+    return _exiting or _exiting is None
+
+_exiting = False
+
+def _exit_function():
+    global _exiting
+
+    info('process shutting down')
+    debug('running all "atexit" finalizers with priority >= 0')
+    _run_finalizers(0)
+
+    for p in active_children():
+        if p._daemonic:
+            info('calling terminate() for daemon %s', p.get_name())
+            p._popen.terminate()
+
+    for p in active_children():
+        info('calling join() for process %s', p.get_name())
+        p.join()
+
+    debug('running the remaining "atexit" finalizers')
+    _run_finalizers()
+
+atexit.register(_exit_function)
+
+#
+# Some fork aware types
+#
+
+class ForkAwareThreadLock(object):
+    def __init__(self):
+        self._lock = threading.Lock()
+        self.acquire = self._lock.acquire
+        self.release = self._lock.release
+        register_after_fork(self, ForkAwareThreadLock.__init__)
+
+class ForkAwareLocal(threading.local):
+    def __init__(self):
+        register_after_fork(self, lambda obj : obj.__dict__.clear())
+    def __reduce__(self):
+        return type(self), ()
+
+#
+# Try making some callable types picklable
+#
+
+def _reduce_method(m):
+    if m.__self__ is None:
+        return getattr, (m.__self__.__class__, m.__func__.__name__)
+    else:
+        return getattr, (m.__self__, m.__func__.__name__)
+copyreg.pickle(type(Finalize.__init__), _reduce_method)
+
+def _reduce_method_descriptor(m):
+    return getattr, (m.__objclass__, m.__name__)
+copyreg.pickle(type(list.append), _reduce_method_descriptor)
+copyreg.pickle(type(int.__add__), _reduce_method_descriptor)
+
+def _reduce_builtin_function_or_method(m):
+    return getattr, (m.__self__, m.__name__)
+copyreg.pickle(type(list().append), _reduce_builtin_function_or_method)
+copyreg.pickle(type(int().__add__), _reduce_builtin_function_or_method)
+
+try:
+    from functools import partial
+except ImportError:
+    pass
+else:
+    def _reduce_partial(p):
+        return _rebuild_partial, (p.func, p.args, p.keywords or {})
+    def _rebuild_partial(func, args, keywords):
+        return partial(func, *args, **keywords)
+    copyreg.pickle(partial, _reduce_partial)

Copied: python/branches/py3k/Lib/test/test_multiprocessing.py (from r64104, /python/trunk/Lib/test/test_multiprocessing.py)
==============================================================================
--- /python/trunk/Lib/test/test_multiprocessing.py	(original)
+++ python/branches/py3k/Lib/test/test_multiprocessing.py	Wed Jun 11 18:44:04 2008
@@ -1,1791 +1,1791 @@
-#
-# Unit tests for the multiprocessing package
-#
-
-import unittest
-import threading
-import Queue
-import time
-import sys
-import os
-import gc
-import signal
-import array
-import copy
-import socket
-import random
-import logging
-
-import _multiprocessing
-import multiprocessing.dummy
-import multiprocessing.connection
-import multiprocessing.managers
-import multiprocessing.heap
-import multiprocessing.managers
-import multiprocessing.pool
-
-from multiprocessing import util
-
-#
-#
-#
-
-if sys.version_info >= (3, 0):
-    def latin(s):
-        return s.encode('latin')
-else:
-    latin = str
-
-try:
-    bytes
-except NameError:
-    bytes = str
-    def bytearray(seq):
-        return array.array('c', seq)
-
-#
-# Constants
-#
-
-LOG_LEVEL = util.SUBWARNING
-#LOG_LEVEL = logging.WARNING
-
-DELTA = 0.1
-CHECK_TIMINGS = False     # making true makes tests take a lot longer
-                          # and can sometimes cause some non-serious
-                          # failures because some calls block a bit
-                          # longer than expected
-if CHECK_TIMINGS:
-    TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
-else:
-    TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
-
-HAVE_GETVALUE = not getattr(_multiprocessing,
-                            'HAVE_BROKEN_SEM_GETVALUE', False)
-
-#
-# Creates a wrapper for a function which records the time it takes to finish
-#
-
-class TimingWrapper(object):
-
-    def __init__(self, func):
-        self.func = func
-        self.elapsed = None
-
-    def __call__(self, *args, **kwds):
-        t = time.time()
-        try:
-            return self.func(*args, **kwds)
-        finally:
-            self.elapsed = time.time() - t
-        
-#
-# Base class for test cases
-#
-
-class BaseTestCase(object):
-    
-    ALLOWED_TYPES = ('processes', 'manager', 'threads')
-
-    def assertTimingAlmostEqual(self, a, b):
-        if CHECK_TIMINGS:
-            self.assertAlmostEqual(a, b, 1)
-
-    def assertReturnsIfImplemented(self, value, func, *args):
-        try:
-            res = func(*args)
-        except NotImplementedError:
-            pass
-        else:
-            return self.assertEqual(value, res)
-
-#
-# Return the value of a semaphore
-#
-
-def get_value(self):
-    try:
-        return self.get_value()
-    except AttributeError:
-        try:
-            return self._Semaphore__value
-        except AttributeError:
-            try:
-                return self._value
-            except AttributeError:
-                raise NotImplementedError
-
-#
-# Testcases
-#
-
-class _TestProcess(BaseTestCase):
-    
-    ALLOWED_TYPES = ('processes', 'threads')
-    
-    def test_current(self):
-        if self.TYPE == 'threads':
-            return
-
-        current = self.current_process()
-        authkey = current.get_authkey()
-        
-        self.assertTrue(current.is_alive())
-        self.assertTrue(not current.is_daemon())        
-        self.assertTrue(isinstance(authkey, bytes))
-        self.assertTrue(len(authkey) > 0)
-        self.assertEqual(current.get_ident(), os.getpid())
-        self.assertEqual(current.get_exitcode(), None)
-
-    def _test(self, q, *args, **kwds):
-        current = self.current_process()
-        q.put(args)
-        q.put(kwds)
-        q.put(current.get_name())
-        if self.TYPE != 'threads':
-            q.put(bytes(current.get_authkey()))
-            q.put(current.pid)
-
-    def test_process(self):
-        q = self.Queue(1)
-        e = self.Event()
-        args = (q, 1, 2)
-        kwargs = {'hello':23, 'bye':2.54}
-        name = 'SomeProcess'
-        p = self.Process(
-            target=self._test, args=args, kwargs=kwargs, name=name
-            )
-        p.set_daemon(True)
-        current = self.current_process()
-
-        if self.TYPE != 'threads':
-            self.assertEquals(p.get_authkey(), current.get_authkey())
-        self.assertEquals(p.is_alive(), False)
-        self.assertEquals(p.is_daemon(), True)
-        self.assertTrue(p not in self.active_children())
-        self.assertTrue(type(self.active_children()) is list)
-        self.assertEqual(p.get_exitcode(), None)
-        
-        p.start()
-        
-        self.assertEquals(p.get_exitcode(), None)
-        self.assertEquals(p.is_alive(), True)
-        self.assertTrue(p in self.active_children())
-        
-        self.assertEquals(q.get(), args[1:])
-        self.assertEquals(q.get(), kwargs)
-        self.assertEquals(q.get(), p.get_name())
-        if self.TYPE != 'threads':
-            self.assertEquals(q.get(), current.get_authkey())
-            self.assertEquals(q.get(), p.pid)
-
-        p.join()
-
-        self.assertEquals(p.get_exitcode(), 0)
-        self.assertEquals(p.is_alive(), False)
-        self.assertTrue(p not in self.active_children())        
-
-    def _test_terminate(self):
-        time.sleep(1000)
-
-    def test_terminate(self):
-        if self.TYPE == 'threads':
-            return
-        
-        p = self.Process(target=self._test_terminate)
-        p.set_daemon(True)
-        p.start()
-
-        self.assertEqual(p.is_alive(), True)
-        self.assertTrue(p in self.active_children())
-        self.assertEqual(p.get_exitcode(), None)
-
-        p.terminate()
-
-        join = TimingWrapper(p.join)
-        self.assertEqual(join(), None)
-        self.assertTimingAlmostEqual(join.elapsed, 0.0)
-        
-        self.assertEqual(p.is_alive(), False)
-        self.assertTrue(p not in self.active_children())
-
-        p.join()
-
-        # XXX sometimes get p.get_exitcode() == 0 on Windows ...
-        #self.assertEqual(p.get_exitcode(), -signal.SIGTERM)
-
-    def test_cpu_count(self):
-        try:
-            cpus = multiprocessing.cpu_count()
-        except NotImplementedError:
-            cpus = 1
-        self.assertTrue(type(cpus) is int)
-        self.assertTrue(cpus >= 1)
-
-    def test_active_children(self):
-        self.assertEqual(type(self.active_children()), list)
-
-        p = self.Process(target=time.sleep, args=(DELTA,))
-        self.assertTrue(p not in self.active_children())
-        
-        p.start()
-        self.assertTrue(p in self.active_children())
-
-        p.join()
-        self.assertTrue(p not in self.active_children())
-
-    def _test_recursion(self, wconn, id):
-        from multiprocessing import forking
-        wconn.send(id)
-        if len(id) < 2:
-            for i in range(2):
-                p = self.Process(
-                    target=self._test_recursion, args=(wconn, id+[i])
-                    )
-                p.start()
-                p.join()
-
-    def test_recursion(self):
-        rconn, wconn = self.Pipe(duplex=False)
-        self._test_recursion(wconn, [])
-        
-        time.sleep(DELTA)
-        result = []
-        while rconn.poll():
-            result.append(rconn.recv())
-            
-        expected = [
-            [],
-              [0],
-                [0, 0],
-                [0, 1],
-              [1],
-                [1, 0],
-                [1, 1]
-            ]
-        self.assertEqual(result, expected)
-
-#
-#
-#
-
-class _UpperCaser(multiprocessing.Process):
-
-    def __init__(self):
-        multiprocessing.Process.__init__(self)
-        self.child_conn, self.parent_conn = multiprocessing.Pipe()
-
-    def run(self):
-        self.parent_conn.close()
-        for s in iter(self.child_conn.recv, None):
-            self.child_conn.send(s.upper())
-        self.child_conn.close()
-
-    def submit(self, s):
-        assert type(s) is str
-        self.parent_conn.send(s)
-        return self.parent_conn.recv()
-
-    def stop(self):
-        self.parent_conn.send(None)
-        self.parent_conn.close()
-        self.child_conn.close()
-
-class _TestSubclassingProcess(BaseTestCase):
-
-    ALLOWED_TYPES = ('processes',)
-
-    def test_subclassing(self):
-        uppercaser = _UpperCaser()
-        uppercaser.start()
-        self.assertEqual(uppercaser.submit('hello'), 'HELLO')
-        self.assertEqual(uppercaser.submit('world'), 'WORLD')
-        uppercaser.stop()
-        uppercaser.join()
-        
-#
-#
-#
-
-def queue_empty(q):
-    if hasattr(q, 'empty'):
-        return q.empty()
-    else:
-        return q.qsize() == 0
-
-def queue_full(q, maxsize):
-    if hasattr(q, 'full'):
-        return q.full()
-    else:
-        return q.qsize() == maxsize
-
-
-class _TestQueue(BaseTestCase):
-
-
-    def _test_put(self, queue, child_can_start, parent_can_continue):
-        child_can_start.wait()
-        for i in range(6):
-            queue.get()
-        parent_can_continue.set()
-
-    def test_put(self):
-        MAXSIZE = 6
-        queue = self.Queue(maxsize=MAXSIZE)
-        child_can_start = self.Event()
-        parent_can_continue = self.Event()
-
-        proc = self.Process(
-            target=self._test_put,
-            args=(queue, child_can_start, parent_can_continue)
-            )
-        proc.set_daemon(True)
-        proc.start()
-        
-        self.assertEqual(queue_empty(queue), True)
-        self.assertEqual(queue_full(queue, MAXSIZE), False)
-
-        queue.put(1)
-        queue.put(2, True)
-        queue.put(3, True, None)
-        queue.put(4, False)
-        queue.put(5, False, None)
-        queue.put_nowait(6)
-
-        # the values may be in buffer but not yet in pipe so sleep a bit
-        time.sleep(DELTA)     
-
-        self.assertEqual(queue_empty(queue), False)
-        self.assertEqual(queue_full(queue, MAXSIZE), True)
-
-        put = TimingWrapper(queue.put)
-        put_nowait = TimingWrapper(queue.put_nowait)
-
-        self.assertRaises(Queue.Full, put, 7, False)
-        self.assertTimingAlmostEqual(put.elapsed, 0)
-
-        self.assertRaises(Queue.Full, put, 7, False, None)
-        self.assertTimingAlmostEqual(put.elapsed, 0)
-
-        self.assertRaises(Queue.Full, put_nowait, 7)
-        self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
-
-        self.assertRaises(Queue.Full, put, 7, True, TIMEOUT1)
-        self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
-
-        self.assertRaises(Queue.Full, put, 7, False, TIMEOUT2)
-        self.assertTimingAlmostEqual(put.elapsed, 0)
-
-        self.assertRaises(Queue.Full, put, 7, True, timeout=TIMEOUT3)
-        self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
-
-        child_can_start.set()
-        parent_can_continue.wait()
-
-        self.assertEqual(queue_empty(queue), True)
-        self.assertEqual(queue_full(queue, MAXSIZE), False)
-
-        proc.join()
-
-    def _test_get(self, queue, child_can_start, parent_can_continue):
-        child_can_start.wait()
-        queue.put(1)
-        queue.put(2)
-        queue.put(3)
-        queue.put(4)
-        queue.put(5)
-        parent_can_continue.set()
-        
-    def test_get(self):
-        queue = self.Queue()
-        child_can_start = self.Event()
-        parent_can_continue = self.Event()
-        
-        proc = self.Process(
-            target=self._test_get,
-            args=(queue, child_can_start, parent_can_continue)
-            )
-        proc.set_daemon(True)
-        proc.start()
-        
-        self.assertEqual(queue_empty(queue), True)
-        
-        child_can_start.set()
-        parent_can_continue.wait()
-
-        time.sleep(DELTA)
-        self.assertEqual(queue_empty(queue), False)
-
-        self.assertEqual(queue.get(), 1)
-        self.assertEqual(queue.get(True, None), 2)
-        self.assertEqual(queue.get(True), 3)
-        self.assertEqual(queue.get(timeout=1), 4)
-        self.assertEqual(queue.get_nowait(), 5)
-        
-        self.assertEqual(queue_empty(queue), True)
-
-        get = TimingWrapper(queue.get)
-        get_nowait = TimingWrapper(queue.get_nowait)
-        
-        self.assertRaises(Queue.Empty, get, False)
-        self.assertTimingAlmostEqual(get.elapsed, 0)
-
-        self.assertRaises(Queue.Empty, get, False, None)
-        self.assertTimingAlmostEqual(get.elapsed, 0)
-
-        self.assertRaises(Queue.Empty, get_nowait)
-        self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
-
-        self.assertRaises(Queue.Empty, get, True, TIMEOUT1)
-        self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
-
-        self.assertRaises(Queue.Empty, get, False, TIMEOUT2)
-        self.assertTimingAlmostEqual(get.elapsed, 0)
-
-        self.assertRaises(Queue.Empty, get, timeout=TIMEOUT3)
-        self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
-
-        proc.join()
-        
-    def _test_fork(self, queue):
-        for i in range(10, 20):
-            queue.put(i)
-        # note that at this point the items may only be buffered, so the
-        # process cannot shutdown until the feeder thread has finished
-        # pushing items onto the pipe.
-
-    def test_fork(self):
-        # Old versions of Queue would fail to create a new feeder
-        # thread for a forked process if the original process had its
-        # own feeder thread.  This test checks that this no longer
-        # happens.
-
-        queue = self.Queue()
-
-        # put items on queue so that main process starts a feeder thread
-        for i in range(10):
-            queue.put(i)
-
-        # wait to make sure thread starts before we fork a new process
-        time.sleep(DELTA)
-
-        # fork process
-        p = self.Process(target=self._test_fork, args=(queue,))
-        p.start()
-
-        # check that all expected items are in the queue
-        for i in range(20):
-            self.assertEqual(queue.get(), i)
-        self.assertRaises(Queue.Empty, queue.get, False)
-
-        p.join()
-
-    def test_qsize(self):
-        q = self.Queue()
-        try:
-            self.assertEqual(q.qsize(), 0)
-        except NotImplementedError:
-            return
-        q.put(1)
-        self.assertEqual(q.qsize(), 1)
-        q.put(5)
-        self.assertEqual(q.qsize(), 2)
-        q.get()
-        self.assertEqual(q.qsize(), 1)
-        q.get()
-        self.assertEqual(q.qsize(), 0)
-
-    def _test_task_done(self, q):
-        for obj in iter(q.get, None):
-            time.sleep(DELTA)
-            q.task_done()
-
-    def test_task_done(self):
-        queue = self.JoinableQueue()
-
-        if sys.version_info < (2, 5) and not hasattr(queue, 'task_done'):
-            return
-
-        workers = [self.Process(target=self._test_task_done, args=(queue,))
-                   for i in xrange(4)]
-        
-        for p in workers:
-            p.start()
-
-        for i in xrange(10):
-            queue.put(i)
-
-        queue.join()
-
-        for p in workers:
-            queue.put(None)
-        
-        for p in workers:
-            p.join()
-
-#
-#
-#
-
-class _TestLock(BaseTestCase):
-
-    def test_lock(self):
-        lock = self.Lock()
-        self.assertEqual(lock.acquire(), True)
-        self.assertEqual(lock.acquire(False), False)
-        self.assertEqual(lock.release(), None)
-        self.assertRaises((ValueError, threading.ThreadError), lock.release)
-
-    def test_rlock(self):
-        lock = self.RLock()
-        self.assertEqual(lock.acquire(), True)
-        self.assertEqual(lock.acquire(), True)
-        self.assertEqual(lock.acquire(), True)
-        self.assertEqual(lock.release(), None)
-        self.assertEqual(lock.release(), None)
-        self.assertEqual(lock.release(), None)
-        self.assertRaises((AssertionError, RuntimeError), lock.release)
-        
-        
-class _TestSemaphore(BaseTestCase):
-
-    def _test_semaphore(self, sem):
-        self.assertReturnsIfImplemented(2, get_value, sem)
-        self.assertEqual(sem.acquire(), True)
-        self.assertReturnsIfImplemented(1, get_value, sem)
-        self.assertEqual(sem.acquire(), True)
-        self.assertReturnsIfImplemented(0, get_value, sem)
-        self.assertEqual(sem.acquire(False), False)
-        self.assertReturnsIfImplemented(0, get_value, sem)
-        self.assertEqual(sem.release(), None)
-        self.assertReturnsIfImplemented(1, get_value, sem)
-        self.assertEqual(sem.release(), None)
-        self.assertReturnsIfImplemented(2, get_value, sem)
-        
-    def test_semaphore(self):
-        sem = self.Semaphore(2)
-        self._test_semaphore(sem)
-        self.assertEqual(sem.release(), None)
-        self.assertReturnsIfImplemented(3, get_value, sem)
-        self.assertEqual(sem.release(), None)
-        self.assertReturnsIfImplemented(4, get_value, sem)
-
-    def test_bounded_semaphore(self):
-        sem = self.BoundedSemaphore(2)
-        self._test_semaphore(sem)
-        # Currently fails on OS/X
-        #if HAVE_GETVALUE:
-        #    self.assertRaises(ValueError, sem.release)
-        #    self.assertReturnsIfImplemented(2, get_value, sem)
-
-    def test_timeout(self):
-        if self.TYPE != 'processes':
-            return
-
-        sem = self.Semaphore(0)
-        acquire = TimingWrapper(sem.acquire)
-
-        self.assertEqual(acquire(False), False)
-        self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
-
-        self.assertEqual(acquire(False, None), False)
-        self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
-
-        self.assertEqual(acquire(False, TIMEOUT1), False)
-        self.assertTimingAlmostEqual(acquire.elapsed, 0)
-
-        self.assertEqual(acquire(True, TIMEOUT2), False)
-        self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
-
-        self.assertEqual(acquire(timeout=TIMEOUT3), False)
-        self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
-
-
-class _TestCondition(BaseTestCase):
-    
-    def f(self, cond, sleeping, woken, timeout=None):
-        cond.acquire()
-        sleeping.release()
-        cond.wait(timeout)
-        woken.release()
-        cond.release()
-    
-    def check_invariant(self, cond):
-        # this is only supposed to succeed when there are no sleepers
-        if self.TYPE == 'processes':
-            try:
-                sleepers = (cond._sleeping_count.get_value() -
-                            cond._woken_count.get_value())
-                self.assertEqual(sleepers, 0)
-                self.assertEqual(cond._wait_semaphore.get_value(), 0)
-            except NotImplementedError:
-                pass
-            
-    def test_notify(self):
-        cond = self.Condition()
-        sleeping = self.Semaphore(0)
-        woken = self.Semaphore(0)
-        
-        p = self.Process(target=self.f, args=(cond, sleeping, woken))
-        p.set_daemon(True)
-        p.start()
-
-        p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
-        p.setDaemon(True)
-        p.start()
-        
-        # wait for both children to start sleeping
-        sleeping.acquire()
-        sleeping.acquire()
-        
-        # check no process/thread has woken up
-        time.sleep(DELTA)
-        self.assertReturnsIfImplemented(0, get_value, woken)
-
-        # wake up one process/thread
-        cond.acquire()
-        cond.notify()
-        cond.release()
-        
-        # check one process/thread has woken up
-        time.sleep(DELTA)
-        self.assertReturnsIfImplemented(1, get_value, woken)
-
-        # wake up another
-        cond.acquire()
-        cond.notify()
-        cond.release()
-        
-        # check other has woken up
-        time.sleep(DELTA)
-        self.assertReturnsIfImplemented(2, get_value, woken)
-        
-        # check state is not mucked up
-        self.check_invariant(cond)
-        p.join()
-        
-    def test_notify_all(self):
-        cond = self.Condition()
-        sleeping = self.Semaphore(0)
-        woken = self.Semaphore(0)
-
-        # start some threads/processes which will timeout
-        for i in range(3):
-            p = self.Process(target=self.f,
-                             args=(cond, sleeping, woken, TIMEOUT1))
-            p.set_daemon(True)
-            p.start()
-
-            t = threading.Thread(target=self.f,
-                                 args=(cond, sleeping, woken, TIMEOUT1))
-            t.setDaemon(True)
-            t.start()
-
-        # wait for them all to sleep
-        for i in xrange(6):
-            sleeping.acquire()
-
-        # check they have all timed out
-        for i in xrange(6):
-            woken.acquire()
-        self.assertReturnsIfImplemented(0, get_value, woken)
-
-        # check state is not mucked up
-        self.check_invariant(cond)
-
-        # start some more threads/processes
-        for i in range(3):
-            p = self.Process(target=self.f, args=(cond, sleeping, woken))
-            p.set_daemon(True)
-            p.start()
-            
-            t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
-            t.setDaemon(True)
-            t.start()
-            
-        # wait for them to all sleep
-        for i in xrange(6):
-            sleeping.acquire()
-            
-        # check no process/thread has woken up
-        time.sleep(DELTA)
-        self.assertReturnsIfImplemented(0, get_value, woken)
-
-        # wake them all up
-        cond.acquire()
-        cond.notify_all()
-        cond.release()
-
-        # check they have all woken
-        time.sleep(DELTA)
-        self.assertReturnsIfImplemented(6, get_value, woken)
-
-        # check state is not mucked up
-        self.check_invariant(cond)
-
-    def test_timeout(self):
-        cond = self.Condition()
-        wait = TimingWrapper(cond.wait)
-        cond.acquire()
-        res = wait(TIMEOUT1)
-        cond.release()
-        self.assertEqual(res, None)
-        self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
-
-        
-class _TestEvent(BaseTestCase):
-
-    def _test_event(self, event):
-        time.sleep(TIMEOUT2)
-        event.set()
-
-    def test_event(self):
-        event = self.Event()
-        wait = TimingWrapper(event.wait)
-        
-        # Removed temporaily, due to API shear, this does not 
-        # work with threading._Event objects. is_set == isSet
-        #self.assertEqual(event.is_set(), False)
-        
-        self.assertEqual(wait(0.0), None)
-        self.assertTimingAlmostEqual(wait.elapsed, 0.0)
-        self.assertEqual(wait(TIMEOUT1), None)
-        self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
-
-        event.set()
-
-        # See note above on the API differences
-        # self.assertEqual(event.is_set(), True)
-        self.assertEqual(wait(), None)
-        self.assertTimingAlmostEqual(wait.elapsed, 0.0)
-        self.assertEqual(wait(TIMEOUT1), None)
-        self.assertTimingAlmostEqual(wait.elapsed, 0.0)
-        # self.assertEqual(event.is_set(), True)
-
-        event.clear()
-
-        #self.assertEqual(event.is_set(), False)
-
-        self.Process(target=self._test_event, args=(event,)).start()
-        self.assertEqual(wait(), None)
-
-#
-#
-#
-
-class _TestValue(BaseTestCase):
-
-    codes_values = [
-        ('i', 4343, 24234),
-        ('d', 3.625, -4.25),
-        ('h', -232, 234),
-        ('c', latin('x'), latin('y'))
-        ]
-
-    def _test(self, values):
-        for sv, cv in zip(values, self.codes_values):
-            sv.value = cv[2]
-            
-        
-    def test_value(self, raw=False):
-        if self.TYPE != 'processes':
-            return
-
-        if raw:
-            values = [self.RawValue(code, value)
-                      for code, value, _ in self.codes_values]
-        else:
-            values = [self.Value(code, value)
-                      for code, value, _ in self.codes_values]
-            
-        for sv, cv in zip(values, self.codes_values):
-            self.assertEqual(sv.value, cv[1])
-        
-        proc = self.Process(target=self._test, args=(values,))
-        proc.start()
-        proc.join()
-
-        for sv, cv in zip(values, self.codes_values):
-            self.assertEqual(sv.value, cv[2])
-
-    def test_rawvalue(self):
-        self.test_value(raw=True)
-
-    def test_getobj_getlock(self):
-        if self.TYPE != 'processes':
-            return
-
-        val1 = self.Value('i', 5)
-        lock1 = val1.get_lock()
-        obj1 = val1.get_obj()
-
-        val2 = self.Value('i', 5, lock=None)
-        lock2 = val2.get_lock()
-        obj2 = val2.get_obj()
-
-        lock = self.Lock()
-        val3 = self.Value('i', 5, lock=lock)
-        lock3 = val3.get_lock()
-        obj3 = val3.get_obj()
-        self.assertEqual(lock, lock3)
-        
-        arr4 = self.RawValue('i', 5)
-        self.assertFalse(hasattr(arr4, 'get_lock'))
-        self.assertFalse(hasattr(arr4, 'get_obj'))
-
-
-class _TestArray(BaseTestCase):
-
-    def f(self, seq):
-        for i in range(1, len(seq)):
-            seq[i] += seq[i-1]
-
-    def test_array(self, raw=False):
-        if self.TYPE != 'processes':
-            return
-
-        seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
-        if raw:
-            arr = self.RawArray('i', seq)
-        else:
-            arr = self.Array('i', seq)
-        
-        self.assertEqual(len(arr), len(seq))
-        self.assertEqual(arr[3], seq[3])
-        self.assertEqual(list(arr[2:7]), list(seq[2:7]))
-        
-        arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
-        
-        self.assertEqual(list(arr[:]), seq)
-        
-        self.f(seq)
-        
-        p = self.Process(target=self.f, args=(arr,))
-        p.start()
-        p.join()
-        
-        self.assertEqual(list(arr[:]), seq)
-        
-    def test_rawarray(self):
-        self.test_array(raw=True)
-        
-    def test_getobj_getlock_obj(self):
-        if self.TYPE != 'processes':
-            return
-
-        arr1 = self.Array('i', range(10))
-        lock1 = arr1.get_lock()
-        obj1 = arr1.get_obj()
-
-        arr2 = self.Array('i', range(10), lock=None)
-        lock2 = arr2.get_lock()
-        obj2 = arr2.get_obj()
-
-        lock = self.Lock()
-        arr3 = self.Array('i', range(10), lock=lock)
-        lock3 = arr3.get_lock()
-        obj3 = arr3.get_obj()
-        self.assertEqual(lock, lock3)
-        
-        arr4 = self.RawArray('i', range(10))
-        self.assertFalse(hasattr(arr4, 'get_lock'))
-        self.assertFalse(hasattr(arr4, 'get_obj'))
-
-#
-#
-#
-
-class _TestContainers(BaseTestCase):
-
-    ALLOWED_TYPES = ('manager',)
-
-    def test_list(self):
-        a = self.list(range(10))
-        self.assertEqual(a[:], range(10))
-        
-        b = self.list()
-        self.assertEqual(b[:], [])
-        
-        b.extend(range(5))
-        self.assertEqual(b[:], range(5))
-        
-        self.assertEqual(b[2], 2)
-        self.assertEqual(b[2:10], [2,3,4])
-
-        b *= 2
-        self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
-
-        self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
-
-        self.assertEqual(a[:], range(10))
-
-        d = [a, b]
-        e = self.list(d)
-        self.assertEqual(
-            e[:],
-            [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
-            )
-        
-        f = self.list([a])
-        a.append('hello')
-        self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']])
-
-    def test_dict(self):
-        d = self.dict()
-        indices = range(65, 70)
-        for i in indices:
-            d[i] = chr(i)
-        self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
-        self.assertEqual(sorted(d.keys()), indices)
-        self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
-        self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
-        
-    def test_namespace(self):
-        n = self.Namespace()
-        n.name = 'Bob'
-        n.job = 'Builder'
-        n._hidden = 'hidden'
-        self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
-        del n.job
-        self.assertEqual(str(n), "Namespace(name='Bob')")
-        self.assertTrue(hasattr(n, 'name'))
-        self.assertTrue(not hasattr(n, 'job'))
-
-#
-#
-#
-
-def sqr(x, wait=0.0):
-    time.sleep(wait)
-    return x*x
-
-class _TestPool(BaseTestCase):
-
-    def test_apply(self):
-        papply = self.pool.apply
-        self.assertEqual(papply(sqr, (5,)), sqr(5))
-        self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
-
-    def test_map(self):
-        pmap = self.pool.map
-        self.assertEqual(pmap(sqr, range(10)), map(sqr, range(10)))
-        self.assertEqual(pmap(sqr, range(100), chunksize=20),
-                         map(sqr, range(100)))
-        
-    def test_async(self):
-        res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
-        get = TimingWrapper(res.get)
-        self.assertEqual(get(), 49)
-        self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
-
-    def test_async_timeout(self):
-        res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2))
-        get = TimingWrapper(res.get)
-        self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
-        self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
-
-    def test_imap(self):
-        it = self.pool.imap(sqr, range(10))
-        self.assertEqual(list(it), map(sqr, range(10)))
-
-        it = self.pool.imap(sqr, range(10))
-        for i in range(10):
-            self.assertEqual(it.next(), i*i)
-        self.assertRaises(StopIteration, it.next)
-
-        it = self.pool.imap(sqr, range(1000), chunksize=100)
-        for i in range(1000):
-            self.assertEqual(it.next(), i*i)
-        self.assertRaises(StopIteration, it.next)
-
-    def test_imap_unordered(self):
-        it = self.pool.imap_unordered(sqr, range(1000))
-        self.assertEqual(sorted(it), map(sqr, range(1000)))
-
-        it = self.pool.imap_unordered(sqr, range(1000), chunksize=53)
-        self.assertEqual(sorted(it), map(sqr, range(1000)))
-
-    def test_make_pool(self):
-        p = multiprocessing.Pool(3)
-        self.assertEqual(3, len(p._pool))
-        p.close()
-        p.join()
-
-    def test_terminate(self):
-        if self.TYPE == 'manager':
-            # On Unix a forked process increfs each shared object to
-            # which its parent process held a reference.  If the
-            # forked process gets terminated then there is likely to
-            # be a reference leak.  So to prevent
-            # _TestZZZNumberOfObjects from failing we skip this test
-            # when using a manager.
-            return
-
-        result = self.pool.map_async(
-            time.sleep, [0.1 for i in range(10000)], chunksize=1
-            )
-        self.pool.terminate()
-        join = TimingWrapper(self.pool.join)
-        join()
-        self.assertTrue(join.elapsed < 0.2)
-
-#
-# Test that manager has expected number of shared objects left
-#
-
-class _TestZZZNumberOfObjects(BaseTestCase):
-    # Because test cases are sorted alphabetically, this one will get
-    # run after all the other tests for the manager.  It tests that
-    # there have been no "reference leaks" for the manager's shared
-    # objects.  Note the comment in _TestPool.test_terminate().
-    ALLOWED_TYPES = ('manager',)
-
-    def test_number_of_objects(self):
-        EXPECTED_NUMBER = 1                # the pool object is still alive
-        multiprocessing.active_children()  # discard dead process objs
-        gc.collect()                       # do garbage collection
-        refs = self.manager._number_of_objects()
-        if refs != EXPECTED_NUMBER:
-            print self.manager._debugInfo()
-
-        self.assertEqual(refs, EXPECTED_NUMBER)
-
-#
-# Test of creating a customized manager class
-#
-
-from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
-    
-class FooBar(object):
-    def f(self):
-        return 'f()'
-    def g(self):
-        raise ValueError
-    def _h(self):
-        return '_h()'
-    
-def baz():
-    for i in xrange(10):
-        yield i*i
-
-class IteratorProxy(BaseProxy):
-    _exposed_ = ('next', '__next__')
-    def __iter__(self):
-        return self
-    def next(self):
-        return self._callmethod('next')
-    def __next__(self):
-        return self._callmethod('__next__')
-
-class MyManager(BaseManager):
-    pass
-
-MyManager.register('Foo', callable=FooBar)
-MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
-MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
-
-
-class _TestMyManager(BaseTestCase):
-    
-    ALLOWED_TYPES = ('manager',)
-
-    def test_mymanager(self):
-        manager = MyManager()
-        manager.start()
-        
-        foo = manager.Foo()
-        bar = manager.Bar()
-        baz = manager.baz()
-        
-        foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
-        bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
-        
-        self.assertEqual(foo_methods, ['f', 'g'])
-        self.assertEqual(bar_methods, ['f', '_h'])
-        
-        self.assertEqual(foo.f(), 'f()')
-        self.assertRaises(ValueError, foo.g)
-        self.assertEqual(foo._callmethod('f'), 'f()')
-        self.assertRaises(RemoteError, foo._callmethod, '_h')
-        
-        self.assertEqual(bar.f(), 'f()')
-        self.assertEqual(bar._h(), '_h()')
-        self.assertEqual(bar._callmethod('f'), 'f()')
-        self.assertEqual(bar._callmethod('_h'), '_h()')
-        
-        self.assertEqual(list(baz), [i*i for i in range(10)])
-        
-        manager.shutdown()
-        
-#
-# Test of connecting to a remote server and using xmlrpclib for serialization
-#
-
-_queue = Queue.Queue()
-def get_queue():
-    return _queue
-
-class QueueManager(BaseManager):
-    '''manager class used by server process'''
-QueueManager.register('get_queue', callable=get_queue)
-
-class QueueManager2(BaseManager):
-    '''manager class which specifies the same interface as QueueManager'''
-QueueManager2.register('get_queue')
-
-
-SERIALIZER = 'xmlrpclib'
-
-class _TestRemoteManager(BaseTestCase):
-
-    ALLOWED_TYPES = ('manager',)
-    
-    def _putter(self, address, authkey):
-        manager = QueueManager2(
-            address=address, authkey=authkey, serializer=SERIALIZER
-            )
-        manager.connect()
-        queue = manager.get_queue()
-        queue.put(('hello world', None, True, 2.25))
-
-    def test_remote(self):
-        authkey = os.urandom(32)
-
-        manager = QueueManager(
-            address=('localhost', 0), authkey=authkey, serializer=SERIALIZER
-            )
-        manager.start()
-
-        p = self.Process(target=self._putter, args=(manager.address, authkey))
-        p.start()
-        
-        manager2 = QueueManager2(
-            address=manager.address, authkey=authkey, serializer=SERIALIZER
-            )
-        manager2.connect()
-        queue = manager2.get_queue()
-        
-        # Note that xmlrpclib will deserialize object as a list not a tuple
-        self.assertEqual(queue.get(), ['hello world', None, True, 2.25])
-
-        # Because we are using xmlrpclib for serialization instead of
-        # pickle this will cause a serialization error.
-        self.assertRaises(Exception, queue.put, time.sleep)
-
-        # Make queue finalizer run before the server is stopped
-        del queue
-        manager.shutdown()
-
-#
-#
-#
-
-SENTINEL = latin('')
-
-class _TestConnection(BaseTestCase):
-
-    ALLOWED_TYPES = ('processes', 'threads')
-
-    def _echo(self, conn):
-        for msg in iter(conn.recv_bytes, SENTINEL):
-            conn.send_bytes(msg)
-        conn.close()
-
-    def test_connection(self):
-        conn, child_conn = self.Pipe()
-        
-        p = self.Process(target=self._echo, args=(child_conn,))
-        p.set_daemon(True)
-        p.start()
-
-        seq = [1, 2.25, None]
-        msg = latin('hello world')
-        longmsg = msg * 10
-        arr = array.array('i', range(4))
-
-        if self.TYPE == 'processes':
-            self.assertEqual(type(conn.fileno()), int)
-
-        self.assertEqual(conn.send(seq), None)
-        self.assertEqual(conn.recv(), seq)
-
-        self.assertEqual(conn.send_bytes(msg), None)
-        self.assertEqual(conn.recv_bytes(), msg)
-
-        if self.TYPE == 'processes':
-            buffer = array.array('i', [0]*10)
-            expected = list(arr) + [0] * (10 - len(arr))
-            self.assertEqual(conn.send_bytes(arr), None)
-            self.assertEqual(conn.recv_bytes_into(buffer),
-                             len(arr) * buffer.itemsize)
-            self.assertEqual(list(buffer), expected)
-
-            buffer = array.array('i', [0]*10)
-            expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
-            self.assertEqual(conn.send_bytes(arr), None)
-            self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
-                             len(arr) * buffer.itemsize)
-            self.assertEqual(list(buffer), expected)
-
-            buffer = bytearray(latin(' ' * 40))
-            self.assertEqual(conn.send_bytes(longmsg), None)
-            try:
-                res = conn.recv_bytes_into(buffer)
-            except multiprocessing.BufferTooShort, e:
-                self.assertEqual(e.args, (longmsg,))
-            else:
-                self.fail('expected BufferTooShort, got %s' % res)
-
-        poll = TimingWrapper(conn.poll)
-
-        self.assertEqual(poll(), False)
-        self.assertTimingAlmostEqual(poll.elapsed, 0)
-
-        self.assertEqual(poll(TIMEOUT1), False)
-        self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
-
-        conn.send(None)
-
-        self.assertEqual(poll(TIMEOUT1), True)
-        self.assertTimingAlmostEqual(poll.elapsed, 0)
-        
-        self.assertEqual(conn.recv(), None)
-
-        really_big_msg = latin('X') * (1024 * 1024 * 16)   # 16Mb
-        conn.send_bytes(really_big_msg)
-        self.assertEqual(conn.recv_bytes(), really_big_msg)
-        
-        conn.send_bytes(SENTINEL)                          # tell child to quit
-        child_conn.close()
-
-        if self.TYPE == 'processes':
-            self.assertEqual(conn.readable, True)
-            self.assertEqual(conn.writable, True)
-            self.assertRaises(EOFError, conn.recv)
-            self.assertRaises(EOFError, conn.recv_bytes)
-
-        p.join()
-        
-    def test_duplex_false(self):
-        reader, writer = self.Pipe(duplex=False)
-        self.assertEqual(writer.send(1), None)
-        self.assertEqual(reader.recv(), 1)
-        if self.TYPE == 'processes':
-            self.assertEqual(reader.readable, True)
-            self.assertEqual(reader.writable, False)
-            self.assertEqual(writer.readable, False)
-            self.assertEqual(writer.writable, True)
-            self.assertRaises(IOError, reader.send, 2)
-            self.assertRaises(IOError, writer.recv)
-            self.assertRaises(IOError, writer.poll)
-
-    def test_spawn_close(self):
-        # We test that a pipe connection can be closed by parent
-        # process immediately after child is spawned.  On Windows this
-        # would have sometimes failed on old versions because
-        # child_conn would be closed before the child got a chance to
-        # duplicate it.
-        conn, child_conn = self.Pipe()
-        
-        p = self.Process(target=self._echo, args=(child_conn,))
-        p.start()
-        child_conn.close()    # this might complete before child initializes
-
-        msg = latin('hello')
-        conn.send_bytes(msg)
-        self.assertEqual(conn.recv_bytes(), msg)
-
-        conn.send_bytes(SENTINEL)
-        conn.close()
-        p.join()
-
-    def test_sendbytes(self):
-        if self.TYPE != 'processes':
-            return
-
-        msg = latin('abcdefghijklmnopqrstuvwxyz')
-        a, b = self.Pipe()
-        
-        a.send_bytes(msg)
-        self.assertEqual(b.recv_bytes(), msg)
-
-        a.send_bytes(msg, 5)
-        self.assertEqual(b.recv_bytes(), msg[5:])
-
-        a.send_bytes(msg, 7, 8)
-        self.assertEqual(b.recv_bytes(), msg[7:7+8])
-
-        a.send_bytes(msg, 26)
-        self.assertEqual(b.recv_bytes(), latin(''))
-
-        a.send_bytes(msg, 26, 0)
-        self.assertEqual(b.recv_bytes(), latin(''))
-
-        self.assertRaises(ValueError, a.send_bytes, msg, 27)
-        
-        self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
-        
-        self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
-
-        self.assertRaises(ValueError, a.send_bytes, msg, -1)
-
-        self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
-        
-
-class _TestListenerClient(BaseTestCase):
-
-    ALLOWED_TYPES = ('processes', 'threads')
-
-    def _test(self, address):
-        conn = self.connection.Client(address)
-        conn.send('hello')
-        conn.close()
-
-    def test_listener_client(self):        
-        for family in self.connection.families:
-            l = self.connection.Listener(family=family)
-            p = self.Process(target=self._test, args=(l.address,))
-            p.set_daemon(True)
-            p.start()
-            conn = l.accept()
-            self.assertEqual(conn.recv(), 'hello')
-            p.join()
-            l.close()
-
-#
-# Test of sending connection and socket objects between processes
-#
-
-class _TestPicklingConnections(BaseTestCase):
-
-    ALLOWED_TYPES = ('processes',)
-
-    def _listener(self, conn, families):
-        for fam in families:
-            l = self.connection.Listener(family=fam)
-            conn.send(l.address)
-            new_conn = l.accept()
-            conn.send(new_conn)
-
-        if self.TYPE == 'processes':
-            l = socket.socket()
-            l.bind(('localhost', 0))
-            conn.send(l.getsockname())
-            l.listen(1)
-            new_conn, addr = l.accept()
-            conn.send(new_conn)
-        
-        conn.recv()
-
-    def _remote(self, conn):
-        for (address, msg) in iter(conn.recv, None):
-            client = self.connection.Client(address)
-            client.send(msg.upper())
-            client.close()
-
-        if self.TYPE == 'processes':
-            address, msg = conn.recv()
-            client = socket.socket()
-            client.connect(address)
-            client.sendall(msg.upper())
-            client.close()
-
-        conn.close()
-
-    def test_pickling(self):
-        try:
-            multiprocessing.allow_connection_pickling()
-        except ImportError:
-            return
-        
-        families = self.connection.families
-
-        lconn, lconn0 = self.Pipe()
-        lp = self.Process(target=self._listener, args=(lconn0, families))
-        lp.start()
-        lconn0.close()
-
-        rconn, rconn0 = self.Pipe()
-        rp = self.Process(target=self._remote, args=(rconn0,))
-        rp.start()
-        rconn0.close()
-
-        for fam in families:
-            msg = ('This connection uses family %s' % fam).encode('ascii')
-            address = lconn.recv()
-            rconn.send((address, msg))
-            new_conn = lconn.recv()
-            self.assertEqual(new_conn.recv(), msg.upper())
-            
-        rconn.send(None)
-
-        if self.TYPE == 'processes':
-            msg = latin('This connection uses a normal socket')
-            address = lconn.recv()
-            rconn.send((address, msg))
-            if hasattr(socket, 'fromfd'):
-                new_conn = lconn.recv()
-                self.assertEqual(new_conn.recv(100), msg.upper())
-            else:
-                # XXX On Windows with Py2.6 need to backport fromfd()
-                discard = lconn.recv_bytes()
-                
-        lconn.send(None)
-        
-        rconn.close()
-        lconn.close()
-        
-        lp.join()
-        rp.join()
-
-#
-#
-#
-
-class _TestHeap(BaseTestCase):
-
-    ALLOWED_TYPES = ('processes',)
-
-    def test_heap(self):
-        iterations = 5000
-        maxblocks = 50
-        blocks = []
-
-        # create and destroy lots of blocks of different sizes
-        for i in xrange(iterations):
-            size = int(random.lognormvariate(0, 1) * 1000)
-            b = multiprocessing.heap.BufferWrapper(size)
-            blocks.append(b)
-            if len(blocks) > maxblocks:
-                i = random.randrange(maxblocks)
-                del blocks[i]
-
-        # get the heap object
-        heap = multiprocessing.heap.BufferWrapper._heap
-
-        # verify the state of the heap
-        all = []
-        occupied = 0
-        for L in heap._len_to_seq.values():
-            for arena, start, stop in L:
-                all.append((heap._arenas.index(arena), start, stop,
-                            stop-start, 'free'))
-        for arena, start, stop in heap._allocated_blocks:
-            all.append((heap._arenas.index(arena), start, stop,
-                        stop-start, 'occupied'))
-            occupied += (stop-start)
-
-        all.sort()
-
-        for i in range(len(all)-1):
-            (arena, start, stop) = all[i][:3]
-            (narena, nstart, nstop) = all[i+1][:3]
-            self.assertTrue((arena != narena and nstart == 0) or
-                            (stop == nstart))
-            
-#
-#
-#
-
-try:
-    from ctypes import Structure, Value, copy, c_int, c_double
-except ImportError:
-    Structure = object
-    c_int = c_double = None
-
-class _Foo(Structure):
-    _fields_ = [
-        ('x', c_int),
-        ('y', c_double)
-        ]
-
-class _TestSharedCTypes(BaseTestCase):
-
-    ALLOWED_TYPES = ('processes',)
-
-    def _double(self, x, y, foo, arr, string):
-        x.value *= 2
-        y.value *= 2
-        foo.x *= 2
-        foo.y *= 2
-        string.value *= 2
-        for i in range(len(arr)):
-            arr[i] *= 2
-
-    def test_sharedctypes(self, lock=False):
-        if c_int is None:
-            return
-        
-        x = Value('i', 7, lock=lock)
-        y = Value(ctypes.c_double, 1.0/3.0, lock=lock)
-        foo = Value(_Foo, 3, 2, lock=lock)
-        arr = Array('d', range(10), lock=lock)
-        string = Array('c', 20, lock=lock)
-        string.value = 'hello'
-
-        p = self.Process(target=self._double, args=(x, y, foo, arr, string))
-        p.start()
-        p.join()
-
-        self.assertEqual(x.value, 14)
-        self.assertAlmostEqual(y.value, 2.0/3.0)
-        self.assertEqual(foo.x, 6)
-        self.assertAlmostEqual(foo.y, 4.0)
-        for i in range(10):
-            self.assertAlmostEqual(arr[i], i*2)
-        self.assertEqual(string.value, latin('hellohello'))
-
-    def test_synchronize(self):
-        self.test_sharedctypes(lock=True)
-
-    def test_copy(self):
-        if c_int is None:
-            return
-
-        foo = _Foo(2, 5.0)
-        bar = copy(foo)
-        foo.x = 0
-        foo.y = 0
-        self.assertEqual(bar.x, 2)
-        self.assertAlmostEqual(bar.y, 5.0)
-
-#
-#
-#
-
-class _TestFinalize(BaseTestCase):
-
-    ALLOWED_TYPES = ('processes',)
-
-    def _test_finalize(self, conn):
-        class Foo(object):
-            pass
-
-        a = Foo()
-        util.Finalize(a, conn.send, args=('a',))
-        del a           # triggers callback for a
-
-        b = Foo()
-        close_b = util.Finalize(b, conn.send, args=('b',))    
-        close_b()       # triggers callback for b
-        close_b()       # does nothing because callback has already been called
-        del b           # does nothing because callback has already been called
-
-        c = Foo()
-        util.Finalize(c, conn.send, args=('c',))
-
-        d10 = Foo()
-        util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
-
-        d01 = Foo()
-        util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
-        d02 = Foo()
-        util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
-        d03 = Foo()
-        util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
-
-        util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
-
-        util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
-
-        # call mutliprocessing's cleanup function then exit process without
-        # garbage collecting locals
-        util._exit_function()
-        conn.close()
-        os._exit(0)
-
-    def test_finalize(self):
-        conn, child_conn = self.Pipe()
-        
-        p = self.Process(target=self._test_finalize, args=(child_conn,))
-        p.start()
-        p.join()
-
-        result = [obj for obj in iter(conn.recv, 'STOP')]
-        self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
-
-#
-# Test that from ... import * works for each module
-#
-
-class _TestImportStar(BaseTestCase):
-
-    ALLOWED_TYPES = ('processes',)
-
-    def test_import(self):
-        modules = (
-            'multiprocessing', 'multiprocessing.connection',
-            'multiprocessing.heap', 'multiprocessing.managers',
-            'multiprocessing.pool', 'multiprocessing.process',
-            'multiprocessing.reduction', 'multiprocessing.sharedctypes',
-            'multiprocessing.synchronize', 'multiprocessing.util'
-            )
-        
-        for name in modules:
-            __import__(name)
-            mod = sys.modules[name]
-            
-            for attr in getattr(mod, '__all__', ()):
-                self.assertTrue(
-                    hasattr(mod, attr),
-                    '%r does not have attribute %r' % (mod, attr)
-                    )
-
-#
-# Quick test that logging works -- does not test logging output
-#
-
-class _TestLogging(BaseTestCase):
-
-    ALLOWED_TYPES = ('processes',)
-
-    def test_enable_logging(self):
-        logger = multiprocessing.get_logger()
-        logger.setLevel(util.SUBWARNING)
-        self.assertTrue(logger is not None)
-        logger.debug('this will not be printed')
-        logger.info('nor will this')
-        logger.setLevel(LOG_LEVEL)
-
-    def _test_level(self, conn):
-        logger = multiprocessing.get_logger()
-        conn.send(logger.getEffectiveLevel())
-
-    def test_level(self):
-        LEVEL1 = 32
-        LEVEL2 = 37
-        
-        logger = multiprocessing.get_logger()
-        root_logger = logging.getLogger()
-        root_level = root_logger.level
-
-        reader, writer = multiprocessing.Pipe(duplex=False)
-
-        logger.setLevel(LEVEL1)
-        self.Process(target=self._test_level, args=(writer,)).start()
-        self.assertEqual(LEVEL1, reader.recv())
-
-        logger.setLevel(logging.NOTSET)
-        root_logger.setLevel(LEVEL2)
-        self.Process(target=self._test_level, args=(writer,)).start()
-        self.assertEqual(LEVEL2, reader.recv())
-
-        root_logger.setLevel(root_level)
-        logger.setLevel(level=LOG_LEVEL)
-
-#
-# Functions used to create test cases from the base ones in this module
-#
-
-def get_attributes(Source, names):
-    d = {}
-    for name in names:
-        obj = getattr(Source, name)
-        if type(obj) == type(get_attributes):
-            obj = staticmethod(obj)
-        d[name] = obj
-    return d
-
-def create_test_cases(Mixin, type):
-    result = {}
-    glob = globals()
-    Type = type[0].upper() + type[1:]
-
-    for name in glob.keys():
-        if name.startswith('_Test'):
-            base = glob[name]
-            if type in base.ALLOWED_TYPES:
-                newname = 'With' + Type + name[1:]
-                class Temp(base, unittest.TestCase, Mixin):
-                    pass
-                result[newname] = Temp
-                Temp.__name__ = newname
-                Temp.__module__ = Mixin.__module__
-    return result
-
-#
-# Create test cases
-#
-
-class ProcessesMixin(object):
-    TYPE = 'processes'
-    Process = multiprocessing.Process
-    locals().update(get_attributes(multiprocessing, (
-        'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
-        'Condition', 'Event', 'Value', 'Array', 'RawValue',
-        'RawArray', 'current_process', 'active_children', 'Pipe',
-        'connection', 'JoinableQueue'
-        )))
-
-testcases_processes = create_test_cases(ProcessesMixin, type='processes')
-globals().update(testcases_processes)
-
-
-class ManagerMixin(object):
-    TYPE = 'manager'
-    Process = multiprocessing.Process
-    manager = object.__new__(multiprocessing.managers.SyncManager)
-    locals().update(get_attributes(manager, (
-        'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 
-       'Condition', 'Event', 'Value', 'Array', 'list', 'dict',
-        'Namespace', 'JoinableQueue'
-        )))
-
-testcases_manager = create_test_cases(ManagerMixin, type='manager')
-globals().update(testcases_manager)
-
-
-class ThreadsMixin(object):
-    TYPE = 'threads'
-    Process = multiprocessing.dummy.Process
-    locals().update(get_attributes(multiprocessing.dummy, (
-        'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
-        'Condition', 'Event', 'Value', 'Array', 'current_process',
-        'active_children', 'Pipe', 'connection', 'dict', 'list',
-        'Namespace', 'JoinableQueue'
-        )))
-
-testcases_threads = create_test_cases(ThreadsMixin, type='threads')
-globals().update(testcases_threads)
-
-#
-#
-#
-
-def test_main(run=None):
-    if run is None:
-        from test.test_support import run_unittest as run
-
-    util.get_temp_dir()     # creates temp directory for use by all processes
-    
-    multiprocessing.get_logger().setLevel(LOG_LEVEL)
-
-    ProcessesMixin.pool = multiprocessing.Pool(4)
-    ThreadsMixin.pool = multiprocessing.dummy.Pool(4)
-    ManagerMixin.manager.__init__()
-    ManagerMixin.manager.start()
-    ManagerMixin.pool = ManagerMixin.manager.Pool(4)
-
-    testcases = (
-        sorted(testcases_processes.values(), key=lambda tc:tc.__name__) +
-        sorted(testcases_threads.values(), key=lambda tc:tc.__name__) +
-        sorted(testcases_manager.values(), key=lambda tc:tc.__name__)
-        )
-
-    loadTestsFromTestCase = unittest.defaultTestLoader.loadTestsFromTestCase
-    suite = unittest.TestSuite(loadTestsFromTestCase(tc) for tc in testcases)
-    run(suite)
-
-    ThreadsMixin.pool.terminate()
-    ProcessesMixin.pool.terminate()
-    ManagerMixin.pool.terminate()
-    ManagerMixin.manager.shutdown()
-    
-    del ProcessesMixin.pool, ThreadsMixin.pool, ManagerMixin.pool
-
-def main():
-    test_main(unittest.TextTestRunner(verbosity=2).run)
-
-if __name__ == '__main__':
-    main()
+#
+# Unit tests for the multiprocessing package
+#
+
+import unittest
+import threading
+import queue as pyqueue
+import time
+import sys
+import os
+import gc
+import signal
+import array
+import copy
+import socket
+import random
+import logging
+
+import multiprocessing.dummy
+import multiprocessing.connection
+import multiprocessing.managers
+import multiprocessing.heap
+import multiprocessing.managers
+import multiprocessing.pool
+import _multiprocessing
+
+from multiprocessing import util
+
+#
+#
+#
+
+if sys.version_info >= (3, 0):
+    def latin(s):
+        return s.encode('latin')
+else:
+    latin = str
+
+try:
+    bytes
+except NameError:
+    bytes = str
+    def bytearray(seq):
+        return array.array('c', seq)
+
+#
+# Constants
+#
+
+LOG_LEVEL = util.SUBWARNING
+#LOG_LEVEL = logging.WARNING
+
+DELTA = 0.1
+CHECK_TIMINGS = False     # making true makes tests take a lot longer
+                          # and can sometimes cause some non-serious
+                          # failures because some calls block a bit
+                          # longer than expected
+if CHECK_TIMINGS:
+    TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
+else:
+    TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
+
+HAVE_GETVALUE = not getattr(_multiprocessing,
+                            'HAVE_BROKEN_SEM_GETVALUE', False)
+
+#
+# Creates a wrapper for a function which records the time it takes to finish
+#
+
+class TimingWrapper(object):
+
+    def __init__(self, func):
+        self.func = func
+        self.elapsed = None
+
+    def __call__(self, *args, **kwds):
+        t = time.time()
+        try:
+            return self.func(*args, **kwds)
+        finally:
+            self.elapsed = time.time() - t
+
+#
+# Base class for test cases
+#
+
+class BaseTestCase(object):
+
+    ALLOWED_TYPES = ('processes', 'manager', 'threads')
+
+    def assertTimingAlmostEqual(self, a, b):
+        if CHECK_TIMINGS:
+            self.assertAlmostEqual(a, b, 1)
+
+    def assertReturnsIfImplemented(self, value, func, *args):
+        try:
+            res = func(*args)
+        except NotImplementedError:
+            pass
+        else:
+            return self.assertEqual(value, res)
+
+#
+# Return the value of a semaphore
+#
+
+def get_value(self):
+    try:
+        return self.get_value()
+    except AttributeError:
+        try:
+            return self._Semaphore__value
+        except AttributeError:
+            try:
+                return self._value
+            except AttributeError:
+                raise NotImplementedError
+
+#
+# Testcases
+#
+
+class _TestProcess(BaseTestCase):
+
+    ALLOWED_TYPES = ('processes', 'threads')
+
+    def test_current(self):
+        if self.TYPE == 'threads':
+            return
+
+        current = self.current_process()
+        authkey = current.get_authkey()
+
+        self.assertTrue(current.is_alive())
+        self.assertTrue(not current.is_daemon())
+        self.assertTrue(isinstance(authkey, bytes))
+        self.assertTrue(len(authkey) > 0)
+        self.assertEqual(current.get_ident(), os.getpid())
+        self.assertEqual(current.get_exitcode(), None)
+
+    def _test(self, q, *args, **kwds):
+        current = self.current_process()
+        q.put(args)
+        q.put(kwds)
+        q.put(current.get_name())
+        if self.TYPE != 'threads':
+            q.put(bytes(current.get_authkey()))
+            q.put(current.pid)
+
+    def test_process(self):
+        q = self.Queue(1)
+        e = self.Event()
+        args = (q, 1, 2)
+        kwargs = {'hello':23, 'bye':2.54}
+        name = 'SomeProcess'
+        p = self.Process(
+            target=self._test, args=args, kwargs=kwargs, name=name
+            )
+        p.set_daemon(True)
+        current = self.current_process()
+
+        if self.TYPE != 'threads':
+            self.assertEquals(p.get_authkey(), current.get_authkey())
+        self.assertEquals(p.is_alive(), False)
+        self.assertEquals(p.is_daemon(), True)
+        self.assertTrue(p not in self.active_children())
+        self.assertTrue(type(self.active_children()) is list)
+        self.assertEqual(p.get_exitcode(), None)
+
+        p.start()
+
+        self.assertEquals(p.get_exitcode(), None)
+        self.assertEquals(p.is_alive(), True)
+        self.assertTrue(p in self.active_children())
+
+        self.assertEquals(q.get(), args[1:])
+        self.assertEquals(q.get(), kwargs)
+        self.assertEquals(q.get(), p.get_name())
+        if self.TYPE != 'threads':
+            self.assertEquals(q.get(), current.get_authkey())
+            self.assertEquals(q.get(), p.pid)
+
+        p.join()
+
+        self.assertEquals(p.get_exitcode(), 0)
+        self.assertEquals(p.is_alive(), False)
+        self.assertTrue(p not in self.active_children())
+
+    def _test_terminate(self):
+        time.sleep(1000)
+
+    def test_terminate(self):
+        if self.TYPE == 'threads':
+            return
+
+        p = self.Process(target=self._test_terminate)
+        p.set_daemon(True)
+        p.start()
+
+        self.assertEqual(p.is_alive(), True)
+        self.assertTrue(p in self.active_children())
+        self.assertEqual(p.get_exitcode(), None)
+
+        p.terminate()
+
+        join = TimingWrapper(p.join)
+        self.assertEqual(join(), None)
+        self.assertTimingAlmostEqual(join.elapsed, 0.0)
+
+        self.assertEqual(p.is_alive(), False)
+        self.assertTrue(p not in self.active_children())
+
+        p.join()
+
+        # XXX sometimes get p.get_exitcode() == 0 on Windows ...
+        #self.assertEqual(p.get_exitcode(), -signal.SIGTERM)
+
+    def test_cpu_count(self):
+        try:
+            cpus = multiprocessing.cpu_count()
+        except NotImplementedError:
+            cpus = 1
+        self.assertTrue(type(cpus) is int)
+        self.assertTrue(cpus >= 1)
+
+    def test_active_children(self):
+        self.assertEqual(type(self.active_children()), list)
+
+        p = self.Process(target=time.sleep, args=(DELTA,))
+        self.assertTrue(p not in self.active_children())
+
+        p.start()
+        self.assertTrue(p in self.active_children())
+
+        p.join()
+        self.assertTrue(p not in self.active_children())
+
+    def _test_recursion(self, wconn, id):
+        from multiprocessing import forking
+        wconn.send(id)
+        if len(id) < 2:
+            for i in range(2):
+                p = self.Process(
+                    target=self._test_recursion, args=(wconn, id+[i])
+                    )
+                p.start()
+                p.join()
+
+    def test_recursion(self):
+        rconn, wconn = self.Pipe(duplex=False)
+        self._test_recursion(wconn, [])
+
+        time.sleep(DELTA)
+        result = []
+        while rconn.poll():
+            result.append(rconn.recv())
+
+        expected = [
+            [],
+              [0],
+                [0, 0],
+                [0, 1],
+              [1],
+                [1, 0],
+                [1, 1]
+            ]
+        self.assertEqual(result, expected)
+
+#
+#
+#
+
+class _UpperCaser(multiprocessing.Process):
+
+    def __init__(self):
+        multiprocessing.Process.__init__(self)
+        self.child_conn, self.parent_conn = multiprocessing.Pipe()
+
+    def run(self):
+        self.parent_conn.close()
+        for s in iter(self.child_conn.recv, None):
+            self.child_conn.send(s.upper())
+        self.child_conn.close()
+
+    def submit(self, s):
+        assert type(s) is str
+        self.parent_conn.send(s)
+        return self.parent_conn.recv()
+
+    def stop(self):
+        self.parent_conn.send(None)
+        self.parent_conn.close()
+        self.child_conn.close()
+
+class _TestSubclassingProcess(BaseTestCase):
+
+    ALLOWED_TYPES = ('processes',)
+
+    def test_subclassing(self):
+        uppercaser = _UpperCaser()
+        uppercaser.start()
+        self.assertEqual(uppercaser.submit('hello'), 'HELLO')
+        self.assertEqual(uppercaser.submit('world'), 'WORLD')
+        uppercaser.stop()
+        uppercaser.join()
+
+#
+#
+#
+
+def queue_empty(q):
+    if hasattr(q, 'empty'):
+        return q.empty()
+    else:
+        return q.qsize() == 0
+
+def queue_full(q, maxsize):
+    if hasattr(q, 'full'):
+        return q.full()
+    else:
+        return q.qsize() == maxsize
+
+
+class _TestQueue(BaseTestCase):
+
+
+    def _test_put(self, queue, child_can_start, parent_can_continue):
+        child_can_start.wait()
+        for i in range(6):
+            queue.get()
+        parent_can_continue.set()
+
+    def test_put(self):
+        MAXSIZE = 6
+        queue = self.Queue(maxsize=MAXSIZE)
+        child_can_start = self.Event()
+        parent_can_continue = self.Event()
+
+        proc = self.Process(
+            target=self._test_put,
+            args=(queue, child_can_start, parent_can_continue)
+            )
+        proc.set_daemon(True)
+        proc.start()
+
+        self.assertEqual(queue_empty(queue), True)
+        self.assertEqual(queue_full(queue, MAXSIZE), False)
+
+        queue.put(1)
+        queue.put(2, True)
+        queue.put(3, True, None)
+        queue.put(4, False)
+        queue.put(5, False, None)
+        queue.put_nowait(6)
+
+        # the values may be in buffer but not yet in pipe so sleep a bit
+        time.sleep(DELTA)
+
+        self.assertEqual(queue_empty(queue), False)
+        self.assertEqual(queue_full(queue, MAXSIZE), True)
+
+        put = TimingWrapper(queue.put)
+        put_nowait = TimingWrapper(queue.put_nowait)
+
+        self.assertRaises(pyqueue.Full, put, 7, False)
+        self.assertTimingAlmostEqual(put.elapsed, 0)
+
+        self.assertRaises(pyqueue.Full, put, 7, False, None)
+        self.assertTimingAlmostEqual(put.elapsed, 0)
+
+        self.assertRaises(pyqueue.Full, put_nowait, 7)
+        self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
+
+        self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
+        self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
+
+        self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
+        self.assertTimingAlmostEqual(put.elapsed, 0)
+
+        self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
+        self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
+
+        child_can_start.set()
+        parent_can_continue.wait()
+
+        self.assertEqual(queue_empty(queue), True)
+        self.assertEqual(queue_full(queue, MAXSIZE), False)
+
+        proc.join()
+
+    def _test_get(self, queue, child_can_start, parent_can_continue):
+        child_can_start.wait()
+        queue.put(1)
+        queue.put(2)
+        queue.put(3)
+        queue.put(4)
+        queue.put(5)
+        parent_can_continue.set()
+
+    def test_get(self):
+        queue = self.Queue()
+        child_can_start = self.Event()
+        parent_can_continue = self.Event()
+
+        proc = self.Process(
+            target=self._test_get,
+            args=(queue, child_can_start, parent_can_continue)
+            )
+        proc.set_daemon(True)
+        proc.start()
+
+        self.assertEqual(queue_empty(queue), True)
+
+        child_can_start.set()
+        parent_can_continue.wait()
+
+        time.sleep(DELTA)
+        self.assertEqual(queue_empty(queue), False)
+
+        self.assertEqual(queue.get(), 1)
+        self.assertEqual(queue.get(True, None), 2)
+        self.assertEqual(queue.get(True), 3)
+        self.assertEqual(queue.get(timeout=1), 4)
+        self.assertEqual(queue.get_nowait(), 5)
+
+        self.assertEqual(queue_empty(queue), True)
+
+        get = TimingWrapper(queue.get)
+        get_nowait = TimingWrapper(queue.get_nowait)
+
+        self.assertRaises(pyqueue.Empty, get, False)
+        self.assertTimingAlmostEqual(get.elapsed, 0)
+
+        self.assertRaises(pyqueue.Empty, get, False, None)
+        self.assertTimingAlmostEqual(get.elapsed, 0)
+
+        self.assertRaises(pyqueue.Empty, get_nowait)
+        self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
+
+        self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
+        self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
+
+        self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
+        self.assertTimingAlmostEqual(get.elapsed, 0)
+
+        self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
+        self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
+
+        proc.join()
+
+    def _test_fork(self, queue):
+        for i in range(10, 20):
+            queue.put(i)
+        # note that at this point the items may only be buffered, so the
+        # process cannot shutdown until the feeder thread has finished
+        # pushing items onto the pipe.
+
+    def test_fork(self):
+        # Old versions of Queue would fail to create a new feeder
+        # thread for a forked process if the original process had its
+        # own feeder thread.  This test checks that this no longer
+        # happens.
+
+        queue = self.Queue()
+
+        # put items on queue so that main process starts a feeder thread
+        for i in range(10):
+            queue.put(i)
+
+        # wait to make sure thread starts before we fork a new process
+        time.sleep(DELTA)
+
+        # fork process
+        p = self.Process(target=self._test_fork, args=(queue,))
+        p.start()
+
+        # check that all expected items are in the queue
+        for i in range(20):
+            self.assertEqual(queue.get(), i)
+        self.assertRaises(pyqueue.Empty, queue.get, False)
+
+        p.join()
+
+    def test_qsize(self):
+        q = self.Queue()
+        try:
+            self.assertEqual(q.qsize(), 0)
+        except NotImplementedError:
+            return
+        q.put(1)
+        self.assertEqual(q.qsize(), 1)
+        q.put(5)
+        self.assertEqual(q.qsize(), 2)
+        q.get()
+        self.assertEqual(q.qsize(), 1)
+        q.get()
+        self.assertEqual(q.qsize(), 0)
+
+    def _test_task_done(self, q):
+        for obj in iter(q.get, None):
+            time.sleep(DELTA)
+            q.task_done()
+
+    def test_task_done(self):
+        queue = self.JoinableQueue()
+
+        if sys.version_info < (2, 5) and not hasattr(queue, 'task_done'):
+            return
+
+        workers = [self.Process(target=self._test_task_done, args=(queue,))
+                   for i in range(4)]
+
+        for p in workers:
+            p.start()
+
+        for i in range(10):
+            queue.put(i)
+
+        queue.join()
+
+        for p in workers:
+            queue.put(None)
+
+        for p in workers:
+            p.join()
+
+#
+#
+#
+
+class _TestLock(BaseTestCase):
+
+    def test_lock(self):
+        lock = self.Lock()
+        self.assertEqual(lock.acquire(), True)
+        self.assertEqual(lock.acquire(False), False)
+        self.assertEqual(lock.release(), None)
+        self.assertRaises((ValueError, threading.ThreadError), lock.release)
+
+    def test_rlock(self):
+        lock = self.RLock()
+        self.assertEqual(lock.acquire(), True)
+        self.assertEqual(lock.acquire(), True)
+        self.assertEqual(lock.acquire(), True)
+        self.assertEqual(lock.release(), None)
+        self.assertEqual(lock.release(), None)
+        self.assertEqual(lock.release(), None)
+        self.assertRaises((AssertionError, RuntimeError), lock.release)
+
+
+class _TestSemaphore(BaseTestCase):
+
+    def _test_semaphore(self, sem):
+        self.assertReturnsIfImplemented(2, get_value, sem)
+        self.assertEqual(sem.acquire(), True)
+        self.assertReturnsIfImplemented(1, get_value, sem)
+        self.assertEqual(sem.acquire(), True)
+        self.assertReturnsIfImplemented(0, get_value, sem)
+        self.assertEqual(sem.acquire(False), False)
+        self.assertReturnsIfImplemented(0, get_value, sem)
+        self.assertEqual(sem.release(), None)
+        self.assertReturnsIfImplemented(1, get_value, sem)
+        self.assertEqual(sem.release(), None)
+        self.assertReturnsIfImplemented(2, get_value, sem)
+
+    def test_semaphore(self):
+        sem = self.Semaphore(2)
+        self._test_semaphore(sem)
+        self.assertEqual(sem.release(), None)
+        self.assertReturnsIfImplemented(3, get_value, sem)
+        self.assertEqual(sem.release(), None)
+        self.assertReturnsIfImplemented(4, get_value, sem)
+
+    def test_bounded_semaphore(self):
+        sem = self.BoundedSemaphore(2)
+        self._test_semaphore(sem)
+        # Currently fails on OS/X
+        #if HAVE_GETVALUE:
+        #    self.assertRaises(ValueError, sem.release)
+        #    self.assertReturnsIfImplemented(2, get_value, sem)
+
+    def test_timeout(self):
+        if self.TYPE != 'processes':
+            return
+
+        sem = self.Semaphore(0)
+        acquire = TimingWrapper(sem.acquire)
+
+        self.assertEqual(acquire(False), False)
+        self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
+
+        self.assertEqual(acquire(False, None), False)
+        self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
+
+        self.assertEqual(acquire(False, TIMEOUT1), False)
+        self.assertTimingAlmostEqual(acquire.elapsed, 0)
+
+        self.assertEqual(acquire(True, TIMEOUT2), False)
+        self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
+
+        self.assertEqual(acquire(timeout=TIMEOUT3), False)
+        self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
+
+
+class _TestCondition(BaseTestCase):
+
+    def f(self, cond, sleeping, woken, timeout=None):
+        cond.acquire()
+        sleeping.release()
+        cond.wait(timeout)
+        woken.release()
+        cond.release()
+
+    def check_invariant(self, cond):
+        # this is only supposed to succeed when there are no sleepers
+        if self.TYPE == 'processes':
+            try:
+                sleepers = (cond._sleeping_count.get_value() -
+                            cond._woken_count.get_value())
+                self.assertEqual(sleepers, 0)
+                self.assertEqual(cond._wait_semaphore.get_value(), 0)
+            except NotImplementedError:
+                pass
+
+    def test_notify(self):
+        cond = self.Condition()
+        sleeping = self.Semaphore(0)
+        woken = self.Semaphore(0)
+
+        p = self.Process(target=self.f, args=(cond, sleeping, woken))
+        p.set_daemon(True)
+        p.start()
+
+        p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
+        p.setDaemon(True)
+        p.start()
+
+        # wait for both children to start sleeping
+        sleeping.acquire()
+        sleeping.acquire()
+
+        # check no process/thread has woken up
+        time.sleep(DELTA)
+        self.assertReturnsIfImplemented(0, get_value, woken)
+
+        # wake up one process/thread
+        cond.acquire()
+        cond.notify()
+        cond.release()
+
+        # check one process/thread has woken up
+        time.sleep(DELTA)
+        self.assertReturnsIfImplemented(1, get_value, woken)
+
+        # wake up another
+        cond.acquire()
+        cond.notify()
+        cond.release()
+
+        # check other has woken up
+        time.sleep(DELTA)
+        self.assertReturnsIfImplemented(2, get_value, woken)
+
+        # check state is not mucked up
+        self.check_invariant(cond)
+        p.join()
+
+    def test_notify_all(self):
+        cond = self.Condition()
+        sleeping = self.Semaphore(0)
+        woken = self.Semaphore(0)
+
+        # start some threads/processes which will timeout
+        for i in range(3):
+            p = self.Process(target=self.f,
+                             args=(cond, sleeping, woken, TIMEOUT1))
+            p.set_daemon(True)
+            p.start()
+
+            t = threading.Thread(target=self.f,
+                                 args=(cond, sleeping, woken, TIMEOUT1))
+            t.setDaemon(True)
+            t.start()
+
+        # wait for them all to sleep
+        for i in range(6):
+            sleeping.acquire()
+
+        # check they have all timed out
+        for i in range(6):
+            woken.acquire()
+        self.assertReturnsIfImplemented(0, get_value, woken)
+
+        # check state is not mucked up
+        self.check_invariant(cond)
+
+        # start some more threads/processes
+        for i in range(3):
+            p = self.Process(target=self.f, args=(cond, sleeping, woken))
+            p.set_daemon(True)
+            p.start()
+
+            t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
+            t.setDaemon(True)
+            t.start()
+
+        # wait for them to all sleep
+        for i in range(6):
+            sleeping.acquire()
+
+        # check no process/thread has woken up
+        time.sleep(DELTA)
+        self.assertReturnsIfImplemented(0, get_value, woken)
+
+        # wake them all up
+        cond.acquire()
+        cond.notify_all()
+        cond.release()
+
+        # check they have all woken
+        time.sleep(DELTA)
+        self.assertReturnsIfImplemented(6, get_value, woken)
+
+        # check state is not mucked up
+        self.check_invariant(cond)
+
+    def test_timeout(self):
+        cond = self.Condition()
+        wait = TimingWrapper(cond.wait)
+        cond.acquire()
+        res = wait(TIMEOUT1)
+        cond.release()
+        self.assertEqual(res, None)
+        self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
+
+
+class _TestEvent(BaseTestCase):
+
+    def _test_event(self, event):
+        time.sleep(TIMEOUT2)
+        event.set()
+
+    def test_event(self):
+        event = self.Event()
+        wait = TimingWrapper(event.wait)
+
+        # Removed temporaily, due to API shear, this does not
+        # work with threading._Event objects. is_set == isSet
+        #self.assertEqual(event.is_set(), False)
+
+        self.assertEqual(wait(0.0), None)
+        self.assertTimingAlmostEqual(wait.elapsed, 0.0)
+        self.assertEqual(wait(TIMEOUT1), None)
+        self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
+
+        event.set()
+
+        # See note above on the API differences
+        # self.assertEqual(event.is_set(), True)
+        self.assertEqual(wait(), None)
+        self.assertTimingAlmostEqual(wait.elapsed, 0.0)
+        self.assertEqual(wait(TIMEOUT1), None)
+        self.assertTimingAlmostEqual(wait.elapsed, 0.0)
+        # self.assertEqual(event.is_set(), True)
+
+        event.clear()
+
+        #self.assertEqual(event.is_set(), False)
+
+        self.Process(target=self._test_event, args=(event,)).start()
+        self.assertEqual(wait(), None)
+
+#
+#
+#
+
+class _TestValue(BaseTestCase):
+
+    codes_values = [
+        ('i', 4343, 24234),
+        ('d', 3.625, -4.25),
+        ('h', -232, 234),
+        ('c', latin('x'), latin('y'))
+        ]
+
+    def _test(self, values):
+        for sv, cv in zip(values, self.codes_values):
+            sv.value = cv[2]
+
+
+    def test_value(self, raw=False):
+        if self.TYPE != 'processes':
+            return
+
+        if raw:
+            values = [self.RawValue(code, value)
+                      for code, value, _ in self.codes_values]
+        else:
+            values = [self.Value(code, value)
+                      for code, value, _ in self.codes_values]
+
+        for sv, cv in zip(values, self.codes_values):
+            self.assertEqual(sv.value, cv[1])
+
+        proc = self.Process(target=self._test, args=(values,))
+        proc.start()
+        proc.join()
+
+        for sv, cv in zip(values, self.codes_values):
+            self.assertEqual(sv.value, cv[2])
+
+    def test_rawvalue(self):
+        self.test_value(raw=True)
+
+    def test_getobj_getlock(self):
+        if self.TYPE != 'processes':
+            return
+
+        val1 = self.Value('i', 5)
+        lock1 = val1.get_lock()
+        obj1 = val1.get_obj()
+
+        val2 = self.Value('i', 5, lock=None)
+        lock2 = val2.get_lock()
+        obj2 = val2.get_obj()
+
+        lock = self.Lock()
+        val3 = self.Value('i', 5, lock=lock)
+        lock3 = val3.get_lock()
+        obj3 = val3.get_obj()
+        self.assertEqual(lock, lock3)
+
+        arr4 = self.RawValue('i', 5)
+        self.assertFalse(hasattr(arr4, 'get_lock'))
+        self.assertFalse(hasattr(arr4, 'get_obj'))
+
+
+class _TestArray(BaseTestCase):
+
+    def f(self, seq):
+        for i in range(1, len(seq)):
+            seq[i] += seq[i-1]
+
+    def test_array(self, raw=False):
+        if self.TYPE != 'processes':
+            return
+
+        seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
+        if raw:
+            arr = self.RawArray('i', seq)
+        else:
+            arr = self.Array('i', seq)
+
+        self.assertEqual(len(arr), len(seq))
+        self.assertEqual(arr[3], seq[3])
+        self.assertEqual(list(arr[2:7]), list(seq[2:7]))
+
+        arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
+
+        self.assertEqual(list(arr[:]), seq)
+
+        self.f(seq)
+
+        p = self.Process(target=self.f, args=(arr,))
+        p.start()
+        p.join()
+
+        self.assertEqual(list(arr[:]), seq)
+
+    def test_rawarray(self):
+        self.test_array(raw=True)
+
+    def test_getobj_getlock_obj(self):
+        if self.TYPE != 'processes':
+            return
+
+        arr1 = self.Array('i', list(range(10)))
+        lock1 = arr1.get_lock()
+        obj1 = arr1.get_obj()
+
+        arr2 = self.Array('i', list(range(10)), lock=None)
+        lock2 = arr2.get_lock()
+        obj2 = arr2.get_obj()
+
+        lock = self.Lock()
+        arr3 = self.Array('i', list(range(10)), lock=lock)
+        lock3 = arr3.get_lock()
+        obj3 = arr3.get_obj()
+        self.assertEqual(lock, lock3)
+
+        arr4 = self.RawArray('i', list(range(10)))
+        self.assertFalse(hasattr(arr4, 'get_lock'))
+        self.assertFalse(hasattr(arr4, 'get_obj'))
+
+#
+#
+#
+
+class _TestContainers(BaseTestCase):
+
+    ALLOWED_TYPES = ('manager',)
+
+    def test_list(self):
+        a = self.list(list(range(10)))
+        self.assertEqual(a[:], list(range(10)))
+
+        b = self.list()
+        self.assertEqual(b[:], [])
+
+        b.extend(list(range(5)))
+        self.assertEqual(b[:], list(range(5)))
+
+        self.assertEqual(b[2], 2)
+        self.assertEqual(b[2:10], [2,3,4])
+
+        b *= 2
+        self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
+
+        self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
+
+        self.assertEqual(a[:], list(range(10)))
+
+        d = [a, b]
+        e = self.list(d)
+        self.assertEqual(
+            e[:],
+            [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
+            )
+
+        f = self.list([a])
+        a.append('hello')
+        self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']])
+
+    def test_dict(self):
+        d = self.dict()
+        indices = list(range(65, 70))
+        for i in indices:
+            d[i] = chr(i)
+        self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
+        self.assertEqual(sorted(d.keys()), indices)
+        self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
+        self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
+
+    def test_namespace(self):
+        n = self.Namespace()
+        n.name = 'Bob'
+        n.job = 'Builder'
+        n._hidden = 'hidden'
+        self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
+        del n.job
+        self.assertEqual(str(n), "Namespace(name='Bob')")
+        self.assertTrue(hasattr(n, 'name'))
+        self.assertTrue(not hasattr(n, 'job'))
+
+#
+#
+#
+
+def sqr(x, wait=0.0):
+    time.sleep(wait)
+    return x*x
+
+class _TestPool(BaseTestCase):
+
+    def test_apply(self):
+        papply = self.pool.apply
+        self.assertEqual(papply(sqr, (5,)), sqr(5))
+        self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
+
+    def test_map(self):
+        pmap = self.pool.map
+        self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
+        self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
+                         list(map(sqr, list(range(100)))))
+
+    def test_async(self):
+        res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
+        get = TimingWrapper(res.get)
+        self.assertEqual(get(), 49)
+        self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
+
+    def test_async_timeout(self):
+        res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2))
+        get = TimingWrapper(res.get)
+        self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
+        self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
+
+    def test_imap(self):
+        it = self.pool.imap(sqr, list(range(10)))
+        self.assertEqual(list(it), list(map(sqr, list(range(10)))))
+
+        it = self.pool.imap(sqr, list(range(10)))
+        for i in range(10):
+            self.assertEqual(next(it), i*i)
+        self.assertRaises(StopIteration, it.__next__)
+
+        it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
+        for i in range(1000):
+            self.assertEqual(next(it), i*i)
+        self.assertRaises(StopIteration, it.__next__)
+
+    def test_imap_unordered(self):
+        it = self.pool.imap_unordered(sqr, list(range(1000)))
+        self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
+
+        it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=53)
+        self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
+
+    def test_make_pool(self):
+        p = multiprocessing.Pool(3)
+        self.assertEqual(3, len(p._pool))
+        p.close()
+        p.join()
+
+    def test_terminate(self):
+        if self.TYPE == 'manager':
+            # On Unix a forked process increfs each shared object to
+            # which its parent process held a reference.  If the
+            # forked process gets terminated then there is likely to
+            # be a reference leak.  So to prevent
+            # _TestZZZNumberOfObjects from failing we skip this test
+            # when using a manager.
+            return
+
+        result = self.pool.map_async(
+            time.sleep, [0.1 for i in range(10000)], chunksize=1
+            )
+        self.pool.terminate()
+        join = TimingWrapper(self.pool.join)
+        join()
+        self.assertTrue(join.elapsed < 0.2)
+
+#
+# Test that manager has expected number of shared objects left
+#
+
+class _TestZZZNumberOfObjects(BaseTestCase):
+    # Because test cases are sorted alphabetically, this one will get
+    # run after all the other tests for the manager.  It tests that
+    # there have been no "reference leaks" for the manager's shared
+    # objects.  Note the comment in _TestPool.test_terminate().
+    ALLOWED_TYPES = ('manager',)
+
+    def test_number_of_objects(self):
+        EXPECTED_NUMBER = 1                # the pool object is still alive
+        multiprocessing.active_children()  # discard dead process objs
+        gc.collect()                       # do garbage collection
+        refs = self.manager._number_of_objects()
+        if refs != EXPECTED_NUMBER:
+            print(self.manager._debugInfo())
+
+        self.assertEqual(refs, EXPECTED_NUMBER)
+
+#
+# Test of creating a customized manager class
+#
+
+from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
+
+class FooBar(object):
+    def f(self):
+        return 'f()'
+    def g(self):
+        raise ValueError
+    def _h(self):
+        return '_h()'
+
+def baz():
+    for i in range(10):
+        yield i*i
+
+class IteratorProxy(BaseProxy):
+    _exposed_ = ('next', '__next__')
+    def __iter__(self):
+        return self
+    def __next__(self):
+        return self._callmethod('next')
+    def __next__(self):
+        return self._callmethod('__next__')
+
+class MyManager(BaseManager):
+    pass
+
+MyManager.register('Foo', callable=FooBar)
+MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
+MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
+
+
+class _TestMyManager(BaseTestCase):
+
+    ALLOWED_TYPES = ('manager',)
+
+    def test_mymanager(self):
+        manager = MyManager()
+        manager.start()
+
+        foo = manager.Foo()
+        bar = manager.Bar()
+        baz = manager.baz()
+
+        foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
+        bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
+
+        self.assertEqual(foo_methods, ['f', 'g'])
+        self.assertEqual(bar_methods, ['f', '_h'])
+
+        self.assertEqual(foo.f(), 'f()')
+        self.assertRaises(ValueError, foo.g)
+        self.assertEqual(foo._callmethod('f'), 'f()')
+        self.assertRaises(RemoteError, foo._callmethod, '_h')
+
+        self.assertEqual(bar.f(), 'f()')
+        self.assertEqual(bar._h(), '_h()')
+        self.assertEqual(bar._callmethod('f'), 'f()')
+        self.assertEqual(bar._callmethod('_h'), '_h()')
+
+        self.assertEqual(list(baz), [i*i for i in range(10)])
+
+        manager.shutdown()
+
+#
+# Test of connecting to a remote server and using xmlrpclib for serialization
+#
+
+_queue = pyqueue.Queue()
+def get_queue():
+    return _queue
+
+class QueueManager(BaseManager):
+    '''manager class used by server process'''
+QueueManager.register('get_queue', callable=get_queue)
+
+class QueueManager2(BaseManager):
+    '''manager class which specifies the same interface as QueueManager'''
+QueueManager2.register('get_queue')
+
+
+SERIALIZER = 'xmlrpclib'
+
+class _TestRemoteManager(BaseTestCase):
+
+    ALLOWED_TYPES = ('manager',)
+
+    def _putter(self, address, authkey):
+        manager = QueueManager2(
+            address=address, authkey=authkey, serializer=SERIALIZER
+            )
+        manager.connect()
+        queue = manager.get_queue()
+        queue.put(('hello world', None, True, 2.25))
+
+    def test_remote(self):
+        authkey = os.urandom(32)
+
+        manager = QueueManager(
+            address=('localhost', 0), authkey=authkey, serializer=SERIALIZER
+            )
+        manager.start()
+
+        p = self.Process(target=self._putter, args=(manager.address, authkey))
+        p.start()
+
+        manager2 = QueueManager2(
+            address=manager.address, authkey=authkey, serializer=SERIALIZER
+            )
+        manager2.connect()
+        queue = manager2.get_queue()
+
+        # Note that xmlrpclib will deserialize object as a list not a tuple
+        self.assertEqual(queue.get(), ['hello world', None, True, 2.25])
+
+        # Because we are using xmlrpclib for serialization instead of
+        # pickle this will cause a serialization error.
+        self.assertRaises(Exception, queue.put, time.sleep)
+
+        # Make queue finalizer run before the server is stopped
+        del queue
+        manager.shutdown()
+
+#
+#
+#
+
+SENTINEL = latin('')
+
+class _TestConnection(BaseTestCase):
+
+    ALLOWED_TYPES = ('processes', 'threads')
+
+    def _echo(self, conn):
+        for msg in iter(conn.recv_bytes, SENTINEL):
+            conn.send_bytes(msg)
+        conn.close()
+
+    def test_connection(self):
+        conn, child_conn = self.Pipe()
+
+        p = self.Process(target=self._echo, args=(child_conn,))
+        p.set_daemon(True)
+        p.start()
+
+        seq = [1, 2.25, None]
+        msg = latin('hello world')
+        longmsg = msg * 10
+        arr = array.array('i', list(range(4)))
+
+        if self.TYPE == 'processes':
+            self.assertEqual(type(conn.fileno()), int)
+
+        self.assertEqual(conn.send(seq), None)
+        self.assertEqual(conn.recv(), seq)
+
+        self.assertEqual(conn.send_bytes(msg), None)
+        self.assertEqual(conn.recv_bytes(), msg)
+
+        if self.TYPE == 'processes':
+            buffer = array.array('i', [0]*10)
+            expected = list(arr) + [0] * (10 - len(arr))
+            self.assertEqual(conn.send_bytes(arr), None)
+            self.assertEqual(conn.recv_bytes_into(buffer),
+                             len(arr) * buffer.itemsize)
+            self.assertEqual(list(buffer), expected)
+
+            buffer = array.array('i', [0]*10)
+            expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
+            self.assertEqual(conn.send_bytes(arr), None)
+            self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
+                             len(arr) * buffer.itemsize)
+            self.assertEqual(list(buffer), expected)
+
+            buffer = bytearray(latin(' ' * 40))
+            self.assertEqual(conn.send_bytes(longmsg), None)
+            try:
+                res = conn.recv_bytes_into(buffer)
+            except multiprocessing.BufferTooShort as e:
+                self.assertEqual(e.args, (longmsg,))
+            else:
+                self.fail('expected BufferTooShort, got %s' % res)
+
+        poll = TimingWrapper(conn.poll)
+
+        self.assertEqual(poll(), False)
+        self.assertTimingAlmostEqual(poll.elapsed, 0)
+
+        self.assertEqual(poll(TIMEOUT1), False)
+        self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
+
+        conn.send(None)
+
+        self.assertEqual(poll(TIMEOUT1), True)
+        self.assertTimingAlmostEqual(poll.elapsed, 0)
+
+        self.assertEqual(conn.recv(), None)
+
+        really_big_msg = latin('X') * (1024 * 1024 * 16)   # 16Mb
+        conn.send_bytes(really_big_msg)
+        self.assertEqual(conn.recv_bytes(), really_big_msg)
+
+        conn.send_bytes(SENTINEL)                          # tell child to quit
+        child_conn.close()
+
+        if self.TYPE == 'processes':
+            self.assertEqual(conn.readable, True)
+            self.assertEqual(conn.writable, True)
+            self.assertRaises(EOFError, conn.recv)
+            self.assertRaises(EOFError, conn.recv_bytes)
+
+        p.join()
+
+    def test_duplex_false(self):
+        reader, writer = self.Pipe(duplex=False)
+        self.assertEqual(writer.send(1), None)
+        self.assertEqual(reader.recv(), 1)
+        if self.TYPE == 'processes':
+            self.assertEqual(reader.readable, True)
+            self.assertEqual(reader.writable, False)
+            self.assertEqual(writer.readable, False)
+            self.assertEqual(writer.writable, True)
+            self.assertRaises(IOError, reader.send, 2)
+            self.assertRaises(IOError, writer.recv)
+            self.assertRaises(IOError, writer.poll)
+
+    def test_spawn_close(self):
+        # We test that a pipe connection can be closed by parent
+        # process immediately after child is spawned.  On Windows this
+        # would have sometimes failed on old versions because
+        # child_conn would be closed before the child got a chance to
+        # duplicate it.
+        conn, child_conn = self.Pipe()
+
+        p = self.Process(target=self._echo, args=(child_conn,))
+        p.start()
+        child_conn.close()    # this might complete before child initializes
+
+        msg = latin('hello')
+        conn.send_bytes(msg)
+        self.assertEqual(conn.recv_bytes(), msg)
+
+        conn.send_bytes(SENTINEL)
+        conn.close()
+        p.join()
+
+    def test_sendbytes(self):
+        if self.TYPE != 'processes':
+            return
+
+        msg = latin('abcdefghijklmnopqrstuvwxyz')
+        a, b = self.Pipe()
+
+        a.send_bytes(msg)
+        self.assertEqual(b.recv_bytes(), msg)
+
+        a.send_bytes(msg, 5)
+        self.assertEqual(b.recv_bytes(), msg[5:])
+
+        a.send_bytes(msg, 7, 8)
+        self.assertEqual(b.recv_bytes(), msg[7:7+8])
+
+        a.send_bytes(msg, 26)
+        self.assertEqual(b.recv_bytes(), latin(''))
+
+        a.send_bytes(msg, 26, 0)
+        self.assertEqual(b.recv_bytes(), latin(''))
+
+        self.assertRaises(ValueError, a.send_bytes, msg, 27)
+
+        self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
+
+        self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
+
+        self.assertRaises(ValueError, a.send_bytes, msg, -1)
+
+        self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
+
+
+class _TestListenerClient(BaseTestCase):
+
+    ALLOWED_TYPES = ('processes', 'threads')
+
+    def _test(self, address):
+        conn = self.connection.Client(address)
+        conn.send('hello')
+        conn.close()
+
+    def test_listener_client(self):
+        for family in self.connection.families:
+            l = self.connection.Listener(family=family)
+            p = self.Process(target=self._test, args=(l.address,))
+            p.set_daemon(True)
+            p.start()
+            conn = l.accept()
+            self.assertEqual(conn.recv(), 'hello')
+            p.join()
+            l.close()
+
+#
+# Test of sending connection and socket objects between processes
+#
+
+class _TestPicklingConnections(BaseTestCase):
+
+    ALLOWED_TYPES = ('processes',)
+
+    def _listener(self, conn, families):
+        for fam in families:
+            l = self.connection.Listener(family=fam)
+            conn.send(l.address)
+            new_conn = l.accept()
+            conn.send(new_conn)
+
+        if self.TYPE == 'processes':
+            l = socket.socket()
+            l.bind(('localhost', 0))
+            conn.send(l.getsockname())
+            l.listen(1)
+            new_conn, addr = l.accept()
+            conn.send(new_conn)
+
+        conn.recv()
+
+    def _remote(self, conn):
+        for (address, msg) in iter(conn.recv, None):
+            client = self.connection.Client(address)
+            client.send(msg.upper())
+            client.close()
+
+        if self.TYPE == 'processes':
+            address, msg = conn.recv()
+            client = socket.socket()
+            client.connect(address)
+            client.sendall(msg.upper())
+            client.close()
+
+        conn.close()
+
+    def test_pickling(self):
+        try:
+            multiprocessing.allow_connection_pickling()
+        except ImportError:
+            return
+
+        families = self.connection.families
+
+        lconn, lconn0 = self.Pipe()
+        lp = self.Process(target=self._listener, args=(lconn0, families))
+        lp.start()
+        lconn0.close()
+
+        rconn, rconn0 = self.Pipe()
+        rp = self.Process(target=self._remote, args=(rconn0,))
+        rp.start()
+        rconn0.close()
+
+        for fam in families:
+            msg = ('This connection uses family %s' % fam).encode('ascii')
+            address = lconn.recv()
+            rconn.send((address, msg))
+            new_conn = lconn.recv()
+            self.assertEqual(new_conn.recv(), msg.upper())
+
+        rconn.send(None)
+
+        if self.TYPE == 'processes':
+            msg = latin('This connection uses a normal socket')
+            address = lconn.recv()
+            rconn.send((address, msg))
+            if hasattr(socket, 'fromfd'):
+                new_conn = lconn.recv()
+                self.assertEqual(new_conn.recv(100), msg.upper())
+            else:
+                # XXX On Windows with Py2.6 need to backport fromfd()
+                discard = lconn.recv_bytes()
+
+        lconn.send(None)
+
+        rconn.close()
+        lconn.close()
+
+        lp.join()
+        rp.join()
+
+#
+#
+#
+
+class _TestHeap(BaseTestCase):
+
+    ALLOWED_TYPES = ('processes',)
+
+    def test_heap(self):
+        iterations = 5000
+        maxblocks = 50
+        blocks = []
+
+        # create and destroy lots of blocks of different sizes
+        for i in range(iterations):
+            size = int(random.lognormvariate(0, 1) * 1000)
+            b = multiprocessing.heap.BufferWrapper(size)
+            blocks.append(b)
+            if len(blocks) > maxblocks:
+                i = random.randrange(maxblocks)
+                del blocks[i]
+
+        # get the heap object
+        heap = multiprocessing.heap.BufferWrapper._heap
+
+        # verify the state of the heap
+        all = []
+        occupied = 0
+        for L in list(heap._len_to_seq.values()):
+            for arena, start, stop in L:
+                all.append((heap._arenas.index(arena), start, stop,
+                            stop-start, 'free'))
+        for arena, start, stop in heap._allocated_blocks:
+            all.append((heap._arenas.index(arena), start, stop,
+                        stop-start, 'occupied'))
+            occupied += (stop-start)
+
+        all.sort()
+
+        for i in range(len(all)-1):
+            (arena, start, stop) = all[i][:3]
+            (narena, nstart, nstop) = all[i+1][:3]
+            self.assertTrue((arena != narena and nstart == 0) or
+                            (stop == nstart))
+
+#
+#
+#
+
+try:
+    from ctypes import Structure, Value, copy, c_int, c_double
+except ImportError:
+    Structure = object
+    c_int = c_double = None
+
+class _Foo(Structure):
+    _fields_ = [
+        ('x', c_int),
+        ('y', c_double)
+        ]
+
+class _TestSharedCTypes(BaseTestCase):
+
+    ALLOWED_TYPES = ('processes',)
+
+    def _double(self, x, y, foo, arr, string):
+        x.value *= 2
+        y.value *= 2
+        foo.x *= 2
+        foo.y *= 2
+        string.value *= 2
+        for i in range(len(arr)):
+            arr[i] *= 2
+
+    def test_sharedctypes(self, lock=False):
+        if c_int is None:
+            return
+
+        x = Value('i', 7, lock=lock)
+        y = Value(ctypes.c_double, 1.0/3.0, lock=lock)
+        foo = Value(_Foo, 3, 2, lock=lock)
+        arr = Array('d', list(range(10)), lock=lock)
+        string = Array('c', 20, lock=lock)
+        string.value = 'hello'
+
+        p = self.Process(target=self._double, args=(x, y, foo, arr, string))
+        p.start()
+        p.join()
+
+        self.assertEqual(x.value, 14)
+        self.assertAlmostEqual(y.value, 2.0/3.0)
+        self.assertEqual(foo.x, 6)
+        self.assertAlmostEqual(foo.y, 4.0)
+        for i in range(10):
+            self.assertAlmostEqual(arr[i], i*2)
+        self.assertEqual(string.value, latin('hellohello'))
+
+    def test_synchronize(self):
+        self.test_sharedctypes(lock=True)
+
+    def test_copy(self):
+        if c_int is None:
+            return
+
+        foo = _Foo(2, 5.0)
+        bar = copy(foo)
+        foo.x = 0
+        foo.y = 0
+        self.assertEqual(bar.x, 2)
+        self.assertAlmostEqual(bar.y, 5.0)
+
+#
+#
+#
+
+class _TestFinalize(BaseTestCase):
+
+    ALLOWED_TYPES = ('processes',)
+
+    def _test_finalize(self, conn):
+        class Foo(object):
+            pass
+
+        a = Foo()
+        util.Finalize(a, conn.send, args=('a',))
+        del a           # triggers callback for a
+
+        b = Foo()
+        close_b = util.Finalize(b, conn.send, args=('b',))
+        close_b()       # triggers callback for b
+        close_b()       # does nothing because callback has already been called
+        del b           # does nothing because callback has already been called
+
+        c = Foo()
+        util.Finalize(c, conn.send, args=('c',))
+
+        d10 = Foo()
+        util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
+
+        d01 = Foo()
+        util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
+        d02 = Foo()
+        util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
+        d03 = Foo()
+        util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
+
+        util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
+
+        util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
+
+        # call mutliprocessing's cleanup function then exit process without
+        # garbage collecting locals
+        util._exit_function()
+        conn.close()
+        os._exit(0)
+
+    def test_finalize(self):
+        conn, child_conn = self.Pipe()
+
+        p = self.Process(target=self._test_finalize, args=(child_conn,))
+        p.start()
+        p.join()
+
+        result = [obj for obj in iter(conn.recv, 'STOP')]
+        self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
+
+#
+# Test that from ... import * works for each module
+#
+
+class _TestImportStar(BaseTestCase):
+
+    ALLOWED_TYPES = ('processes',)
+
+    def test_import(self):
+        modules = (
+            'multiprocessing', 'multiprocessing.connection',
+            'multiprocessing.heap', 'multiprocessing.managers',
+            'multiprocessing.pool', 'multiprocessing.process',
+            'multiprocessing.reduction', 'multiprocessing.sharedctypes',
+            'multiprocessing.synchronize', 'multiprocessing.util'
+            )
+
+        for name in modules:
+            __import__(name)
+            mod = sys.modules[name]
+
+            for attr in getattr(mod, '__all__', ()):
+                self.assertTrue(
+                    hasattr(mod, attr),
+                    '%r does not have attribute %r' % (mod, attr)
+                    )
+
+#
+# Quick test that logging works -- does not test logging output
+#
+
+class _TestLogging(BaseTestCase):
+
+    ALLOWED_TYPES = ('processes',)
+
+    def test_enable_logging(self):
+        logger = multiprocessing.get_logger()
+        logger.setLevel(util.SUBWARNING)
+        self.assertTrue(logger is not None)
+        logger.debug('this will not be printed')
+        logger.info('nor will this')
+        logger.setLevel(LOG_LEVEL)
+
+    def _test_level(self, conn):
+        logger = multiprocessing.get_logger()
+        conn.send(logger.getEffectiveLevel())
+
+    def test_level(self):
+        LEVEL1 = 32
+        LEVEL2 = 37
+
+        logger = multiprocessing.get_logger()
+        root_logger = logging.getLogger()
+        root_level = root_logger.level
+
+        reader, writer = multiprocessing.Pipe(duplex=False)
+
+        logger.setLevel(LEVEL1)
+        self.Process(target=self._test_level, args=(writer,)).start()
+        self.assertEqual(LEVEL1, reader.recv())
+
+        logger.setLevel(logging.NOTSET)
+        root_logger.setLevel(LEVEL2)
+        self.Process(target=self._test_level, args=(writer,)).start()
+        self.assertEqual(LEVEL2, reader.recv())
+
+        root_logger.setLevel(root_level)
+        logger.setLevel(level=LOG_LEVEL)
+
+#
+# Functions used to create test cases from the base ones in this module
+#
+
+def get_attributes(Source, names):
+    d = {}
+    for name in names:
+        obj = getattr(Source, name)
+        if type(obj) == type(get_attributes):
+            obj = staticmethod(obj)
+        d[name] = obj
+    return d
+
+def create_test_cases(Mixin, type):
+    result = {}
+    glob = globals()
+    Type = type[0].upper() + type[1:]
+
+    for name in list(glob.keys()):
+        if name.startswith('_Test'):
+            base = glob[name]
+            if type in base.ALLOWED_TYPES:
+                newname = 'With' + Type + name[1:]
+                class Temp(base, unittest.TestCase, Mixin):
+                    pass
+                result[newname] = Temp
+                Temp.__name__ = newname
+                Temp.__module__ = Mixin.__module__
+    return result
+
+#
+# Create test cases
+#
+
+class ProcessesMixin(object):
+    TYPE = 'processes'
+    Process = multiprocessing.Process
+    locals().update(get_attributes(multiprocessing, (
+        'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
+        'Condition', 'Event', 'Value', 'Array', 'RawValue',
+        'RawArray', 'current_process', 'active_children', 'Pipe',
+        'connection', 'JoinableQueue'
+        )))
+
+testcases_processes = create_test_cases(ProcessesMixin, type='processes')
+globals().update(testcases_processes)
+
+
+class ManagerMixin(object):
+    TYPE = 'manager'
+    Process = multiprocessing.Process
+    manager = object.__new__(multiprocessing.managers.SyncManager)
+    locals().update(get_attributes(manager, (
+        'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
+       'Condition', 'Event', 'Value', 'Array', 'list', 'dict',
+        'Namespace', 'JoinableQueue'
+        )))
+
+testcases_manager = create_test_cases(ManagerMixin, type='manager')
+globals().update(testcases_manager)
+
+
+class ThreadsMixin(object):
+    TYPE = 'threads'
+    Process = multiprocessing.dummy.Process
+    locals().update(get_attributes(multiprocessing.dummy, (
+        'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
+        'Condition', 'Event', 'Value', 'Array', 'current_process',
+        'active_children', 'Pipe', 'connection', 'dict', 'list',
+        'Namespace', 'JoinableQueue'
+        )))
+
+testcases_threads = create_test_cases(ThreadsMixin, type='threads')
+globals().update(testcases_threads)
+
+#
+#
+#
+
+def test_main(run=None):
+    if run is None:
+        from test.support import run_unittest as run
+
+    util.get_temp_dir()     # creates temp directory for use by all processes
+
+    multiprocessing.get_logger().setLevel(LOG_LEVEL)
+
+    ProcessesMixin.pool = multiprocessing.Pool(4)
+    ThreadsMixin.pool = multiprocessing.dummy.Pool(4)
+    ManagerMixin.manager.__init__()
+    ManagerMixin.manager.start()
+    ManagerMixin.pool = ManagerMixin.manager.Pool(4)
+
+    testcases = (
+        sorted(list(testcases_processes.values()), key=lambda tc:tc.__name__) +
+        sorted(list(testcases_threads.values()), key=lambda tc:tc.__name__) +
+        sorted(list(testcases_manager.values()), key=lambda tc:tc.__name__)
+        )
+
+    loadTestsFromTestCase = unittest.defaultTestLoader.loadTestsFromTestCase
+    suite = unittest.TestSuite(loadTestsFromTestCase(tc) for tc in testcases)
+    run(suite)
+
+    ThreadsMixin.pool.terminate()
+    ProcessesMixin.pool.terminate()
+    ManagerMixin.pool.terminate()
+    ManagerMixin.manager.shutdown()
+
+    del ProcessesMixin.pool, ThreadsMixin.pool, ManagerMixin.pool
+
+def main():
+    test_main(unittest.TextTestRunner(verbosity=2).run)
+
+if __name__ == '__main__':
+    main()

Modified: python/branches/py3k/Modules/_multiprocessing/connection.h
==============================================================================
--- /python/trunk/Modules/_multiprocessing/connection.h	(original)
+++ python/branches/py3k/Modules/_multiprocessing/connection.h	Wed Jun 11 18:44:04 2008
@@ -175,9 +175,9 @@
 		mp_SetError(PyExc_IOError, res);
 	} else {    
 		if (freeme == NULL) {
-			result = PyString_FromStringAndSize(self->buffer, res);
+			result = PyBytes_FromStringAndSize(self->buffer, res);
 		} else {
-			result = PyString_FromStringAndSize(freeme, res);
+			result = PyBytes_FromStringAndSize(freeme, res);
 			PyMem_Free(freeme);
 		}
 	}
@@ -263,7 +263,7 @@
 	if (!pickled_string)
 		goto failure;
 
-	if (PyString_AsStringAndSize(pickled_string, &buffer, &length) < 0)
+	if (PyBytes_AsStringAndSize(pickled_string, &buffer, &length) < 0)
 		goto failure;
 
 	Py_BEGIN_ALLOW_THREADS
@@ -311,9 +311,9 @@
 		mp_SetError(PyExc_IOError, res);
 	} else {    
 		if (freeme == NULL) {
-			temp = PyString_FromStringAndSize(self->buffer, res);
+			temp = PyBytes_FromStringAndSize(self->buffer, res);
 		} else {
-			temp = PyString_FromStringAndSize(freeme, res);
+			temp = PyBytes_FromStringAndSize(freeme, res);
 			PyMem_Free(freeme);
 		}
 	}

Modified: python/branches/py3k/Modules/_multiprocessing/multiprocessing.c
==============================================================================
--- /python/trunk/Modules/_multiprocessing/multiprocessing.c	(original)
+++ python/branches/py3k/Modules/_multiprocessing/multiprocessing.c	Wed Jun 11 18:44:04 2008
@@ -212,20 +212,33 @@
  * Initialize
  */
 
-PyMODINIT_FUNC
-init_multiprocessing(void)
+static struct PyModuleDef multiprocessing_module = {
+	PyModuleDef_HEAD_INIT,
+	"_multiprocessing",
+	NULL,
+	-1,
+	module_methods,
+	NULL,
+	NULL,
+	NULL,
+	NULL
+};
+
+
+PyObject*
+PyInit__multiprocessing(void)
 {
 	PyObject *module, *temp;
 
 	/* Initialize module */
-	module = Py_InitModule("_multiprocessing", module_methods);
+	module = PyModule_Create(&multiprocessing_module);
 	if (!module)
-		return;
+		return NULL;
 
 	/* Get copy of objects from pickle */
 	temp = PyImport_ImportModule(PICKLE_MODULE);
 	if (!temp)
-		return;
+		return NULL;
 	pickle_dumps = PyObject_GetAttrString(temp, "dumps");
 	pickle_loads = PyObject_GetAttrString(temp, "loads");
 	pickle_protocol = PyObject_GetAttrString(temp, "HIGHEST_PROTOCOL");
@@ -234,20 +247,20 @@
 	/* Get copy of BufferTooShort */
 	temp = PyImport_ImportModule("multiprocessing");
 	if (!temp)
-		return;
+		return NULL;
 	BufferTooShort = PyObject_GetAttrString(temp, "BufferTooShort");
 	Py_XDECREF(temp);
 
 	/* Add connection type to module */
 	if (PyType_Ready(&ConnectionType) < 0)
-		return;
+		return NULL;
 	Py_INCREF(&ConnectionType);	
 	PyModule_AddObject(module, "Connection", (PyObject*)&ConnectionType);
 
 #if defined(MS_WINDOWS) || HAVE_SEM_OPEN
 	/* Add SemLock type to module */
 	if (PyType_Ready(&SemLockType) < 0)
-		return;
+		return NULL;
 	Py_INCREF(&SemLockType);
 	PyDict_SetItemString(SemLockType.tp_dict, "SEM_VALUE_MAX", 
 			     Py_BuildValue("i", SEM_VALUE_MAX));
@@ -257,7 +270,7 @@
 #ifdef MS_WINDOWS
 	/* Add PipeConnection to module */
 	if (PyType_Ready(&PipeConnectionType) < 0)
-		return;
+		return NULL;
 	Py_INCREF(&PipeConnectionType);
 	PyModule_AddObject(module, "PipeConnection",
 			   (PyObject*)&PipeConnectionType);
@@ -265,30 +278,30 @@
 	/* Initialize win32 class and add to multiprocessing */
 	temp = create_win32_namespace();
 	if (!temp)
-		return;
+		return NULL;
 	PyModule_AddObject(module, "win32", temp);
 
 	/* Initialize the event handle used to signal Ctrl-C */
 	sigint_event = CreateEvent(NULL, TRUE, FALSE, NULL);
 	if (!sigint_event) {
 		PyErr_SetFromWindowsErr(0);
-		return;
+		return NULL;
 	}
 	if (!SetConsoleCtrlHandler(ProcessingCtrlHandler, TRUE)) {
 		PyErr_SetFromWindowsErr(0);
-		return;
+		return NULL;
 	}
 #endif
 
 	/* Add configuration macros */
 	temp = PyDict_New();
 	if (!temp)
-		return;
+		return NULL;
 	if (PyModule_AddObject(module, "flags", temp) < 0)
-		return;
+		return NULL;
 
 #define ADD_FLAG(name) \
-    if (PyDict_SetItemString(temp, #name, Py_BuildValue("i", name)) < 0) return
+       if (PyDict_SetItemString(temp, #name, Py_BuildValue("i", name)) < 0) return NULL
 	
 #ifdef HAVE_SEM_OPEN
 	ADD_FLAG(HAVE_SEM_OPEN);
@@ -305,4 +318,5 @@
 #ifdef HAVE_BROKEN_SEM_UNLINK
 	ADD_FLAG(HAVE_BROKEN_SEM_UNLINK);
 #endif
+        return module;
 }

Modified: python/branches/py3k/setup.py
==============================================================================
--- python/branches/py3k/setup.py	(original)
+++ python/branches/py3k/setup.py	Wed Jun 11 18:44:04 2008
@@ -1110,6 +1110,56 @@
 
         # _fileio -- supposedly cross platform
         exts.append(Extension('_fileio', ['_fileio.c']))
+        # Richard Oudkerk's multiprocessing module
+        if platform == 'win32':             # Windows
+            macros = dict()
+            libraries = ['ws2_32']
+
+        elif platform == 'darwin':          # Mac OSX
+            macros = dict(
+                HAVE_SEM_OPEN=1,
+                HAVE_SEM_TIMEDWAIT=0,
+                HAVE_FD_TRANSFER=1,
+                HAVE_BROKEN_SEM_GETVALUE=1
+                )
+            libraries = []
+
+        elif platform == 'cygwin':          # Cygwin
+            macros = dict(
+                HAVE_SEM_OPEN=1,
+                HAVE_SEM_TIMEDWAIT=1,
+                HAVE_FD_TRANSFER=0,
+                HAVE_BROKEN_SEM_UNLINK=1
+                )
+            libraries = []
+        else:                                   # Linux and other unices
+            macros = dict(
+                HAVE_SEM_OPEN=1,
+                HAVE_SEM_TIMEDWAIT=1,
+                HAVE_FD_TRANSFER=1
+                )
+            libraries = ['rt']
+
+        if platform == 'win32':
+            multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
+                                     '_multiprocessing/semaphore.c',
+                                     '_multiprocessing/pipe_connection.c',
+                                     '_multiprocessing/socket_connection.c',
+                                     '_multiprocessing/win32_functions.c'
+                                   ]
+
+        else:
+            multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
+                                     '_multiprocessing/socket_connection.c'
+                                   ]
+
+            if macros.get('HAVE_SEM_OPEN', False):
+                multiprocessing_srcs.append('_multiprocessing/semaphore.c')
+
+        exts.append ( Extension('_multiprocessing', multiprocessing_srcs,
+                                 define_macros=list(macros.items()),
+                                 include_dirs=["Modules/_multiprocessing"]))
+        # End multiprocessing
 
         # Platform-specific libraries
         if platform in ('linux2', 'freebsd4', 'freebsd5', 'freebsd6',


More information about the Python-3000-checkins mailing list