Python threading (was: Re: global interpreter lock not working as it should)
Armin Steinhoff
a-steinhoff at web.de
Tue Aug 6 16:20:06 EDT 2002
Hi All,
after modifying ceval.c and the lock support for POSIX systems ... here some
results.
The number of thread switches is 10 times higher with the new lock routines.
After including a conditional sched_yield() .. the thread switches speeds up
by the factor 23 :)
There are still problems: the interpreter_lock will be allocated at every thread
start ... WHY??? (-> recursive use of the interpreter_lock looks weird ...)
Results:
sched_yield version (ceval.c modified):
#ifdef WITH_THREAD
if (interpreter_lock) {
/* Give another thread a chance */
pthread_lock *thelock = (pthread_lock *)interpreter_lock;
if (PyThreadState_Swap(NULL) != tstate)
Py_FatalError("ceval: tstate mix-up");
PyThread_release_lock(interpreter_lock);
/* Other threads may run now */
if(thelock->locked) // threads are waiting for the GIL mutex
sched_yield();
PyThread_acquire_lock(interpreter_lock, 1);
if (PyThreadState_Swap(tstate) != NULL)
Py_FatalError("ceval: orphan tstate");
}
#endif
Counts:
[202038, 312444, 322712, 206103, 216143, 323075, 361574, 279071, 335451, 358612]
Total = 2917223
Switches:
[1855, 1890, 1892, 1858, 1852, 1889, 1899, 1877, 1888, 1888]
Total = 18788
ceval.c unmodified:
Counts:
[286616, 328785, 305904, 284464, 313173, 281241, 308752, 295980, 317234, 280630]
Total = 3002779
Switches:
[927, 937, 923, 915, 932, 914, 925, 930, 914, 904]
Total = 9221
------------------------------------------------------------------------------
The lock support of thread_pthread.h has been re-written in the following way:
/*
* Lock support.
*/
PyThread_type_lock
PyThread_allocate_lock(void)
{
pthread_lock *lock;
int status, error = 0;
dprintf(("PyThread_allocate_lock called\n"));
if (!initialized)
PyThread_init_thread();
lock = (pthread_lock *) malloc(sizeof(pthread_lock));
memset((void *)lock, '\0', sizeof(pthread_lock));
if (lock) {
lock->locked = 0;
status = pthread_mutex_init(&lock->mut, pthread_mutexattr_default);
CHECK_STATUS("pthread_mutex_init");
if (error) {
free((void *)lock);
lock = 0;
}
}
dprintf(("PyThread_allocate_lock() -> %p\n", lock));
return (PyThread_type_lock) lock;
}
void
PyThread_free_lock(PyThread_type_lock lock)
{
pthread_lock *thelock = (pthread_lock *)lock;
int status, error = 0;
dprintf(("PyThread_free_lock(%p) called\n", lock));
status = pthread_mutex_destroy( &thelock->mut );
CHECK_STATUS("pthread_mutex_destroy");
free((void *)thelock);
}
int
PyThread_acquire_lock(PyThread_type_lock lock, int waitflag)
{
int success =0;
pthread_lock *thelock = (pthread_lock *)lock;
int status, error = 0;
dprintf(("PyThread_acquire_lock(%p, %d) called\n", lock, waitflag));
if (waitflag )
{
atomic_add(&thelock->locked, 1); // QNX specific ...
status = pthread_mutex_lock( &thelock->mut );
thelock->locked--;
success =1;
CHECK_STATUS("pthread_mutex_lock[3]");
if(error) success = 0;
}
else
{
status = pthread_mutex_trylock( &thelock->mut );
if(status == EOK)
success = 1;
}
dprintf(("PyThread_acquire_lock(%p, %d) -> %d\n", lock, waitflag, success));
return success;
}
void
PyThread_release_lock(PyThread_type_lock lock)
{
pthread_lock *thelock = (pthread_lock *)lock;
int status, error = 0;
dprintf(("PyThread_release_lock(%p) called\n", lock));
status = pthread_mutex_unlock( &thelock->mut );
CHECK_STATUS("pthread_mutex_unlock[3]");
}
More information about the Python-list
mailing list