diff options
Diffstat (limited to 'gl/glthread')
| -rw-r--r-- | gl/glthread/lock.c | 1057 | ||||
| -rw-r--r-- | gl/glthread/lock.h | 927 | ||||
| -rw-r--r-- | gl/glthread/threadlib.c | 73 |
3 files changed, 2057 insertions, 0 deletions
diff --git a/gl/glthread/lock.c b/gl/glthread/lock.c new file mode 100644 index 00000000..f62aa301 --- /dev/null +++ b/gl/glthread/lock.c | |||
| @@ -0,0 +1,1057 @@ | |||
| 1 | /* Locking in multithreaded situations. | ||
| 2 | Copyright (C) 2005-2013 Free Software Foundation, Inc. | ||
| 3 | |||
| 4 | This program is free software; you can redistribute it and/or modify | ||
| 5 | it under the terms of the GNU General Public License as published by | ||
| 6 | the Free Software Foundation; either version 3, or (at your option) | ||
| 7 | any later version. | ||
| 8 | |||
| 9 | This program is distributed in the hope that it will be useful, | ||
| 10 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | GNU General Public License for more details. | ||
| 13 | |||
| 14 | You should have received a copy of the GNU General Public License | ||
| 15 | along with this program; if not, see <http://www.gnu.org/licenses/>. */ | ||
| 16 | |||
| 17 | /* Written by Bruno Haible <bruno@clisp.org>, 2005. | ||
| 18 | Based on GCC's gthr-posix.h, gthr-posix95.h, gthr-solaris.h, | ||
| 19 | gthr-win32.h. */ | ||
| 20 | |||
| 21 | #include <config.h> | ||
| 22 | |||
| 23 | #include "glthread/lock.h" | ||
| 24 | |||
| 25 | /* ========================================================================= */ | ||
| 26 | |||
| 27 | #if USE_POSIX_THREADS | ||
| 28 | |||
| 29 | /* -------------------------- gl_lock_t datatype -------------------------- */ | ||
| 30 | |||
| 31 | /* ------------------------- gl_rwlock_t datatype ------------------------- */ | ||
| 32 | |||
| 33 | # if HAVE_PTHREAD_RWLOCK | ||
| 34 | |||
| 35 | # if !defined PTHREAD_RWLOCK_INITIALIZER | ||
| 36 | |||
| 37 | int | ||
| 38 | glthread_rwlock_init_multithreaded (gl_rwlock_t *lock) | ||
| 39 | { | ||
| 40 | int err; | ||
| 41 | |||
| 42 | err = pthread_rwlock_init (&lock->rwlock, NULL); | ||
| 43 | if (err != 0) | ||
| 44 | return err; | ||
| 45 | lock->initialized = 1; | ||
| 46 | return 0; | ||
| 47 | } | ||
| 48 | |||
| 49 | int | ||
| 50 | glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock) | ||
| 51 | { | ||
| 52 | if (!lock->initialized) | ||
| 53 | { | ||
| 54 | int err; | ||
| 55 | |||
| 56 | err = pthread_mutex_lock (&lock->guard); | ||
| 57 | if (err != 0) | ||
| 58 | return err; | ||
| 59 | if (!lock->initialized) | ||
| 60 | { | ||
| 61 | err = glthread_rwlock_init_multithreaded (lock); | ||
| 62 | if (err != 0) | ||
| 63 | { | ||
| 64 | pthread_mutex_unlock (&lock->guard); | ||
| 65 | return err; | ||
| 66 | } | ||
| 67 | } | ||
| 68 | err = pthread_mutex_unlock (&lock->guard); | ||
| 69 | if (err != 0) | ||
| 70 | return err; | ||
| 71 | } | ||
| 72 | return pthread_rwlock_rdlock (&lock->rwlock); | ||
| 73 | } | ||
| 74 | |||
| 75 | int | ||
| 76 | glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock) | ||
| 77 | { | ||
| 78 | if (!lock->initialized) | ||
| 79 | { | ||
| 80 | int err; | ||
| 81 | |||
| 82 | err = pthread_mutex_lock (&lock->guard); | ||
| 83 | if (err != 0) | ||
| 84 | return err; | ||
| 85 | if (!lock->initialized) | ||
| 86 | { | ||
| 87 | err = glthread_rwlock_init_multithreaded (lock); | ||
| 88 | if (err != 0) | ||
| 89 | { | ||
| 90 | pthread_mutex_unlock (&lock->guard); | ||
| 91 | return err; | ||
| 92 | } | ||
| 93 | } | ||
| 94 | err = pthread_mutex_unlock (&lock->guard); | ||
| 95 | if (err != 0) | ||
| 96 | return err; | ||
| 97 | } | ||
| 98 | return pthread_rwlock_wrlock (&lock->rwlock); | ||
| 99 | } | ||
| 100 | |||
| 101 | int | ||
| 102 | glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock) | ||
| 103 | { | ||
| 104 | if (!lock->initialized) | ||
| 105 | return EINVAL; | ||
| 106 | return pthread_rwlock_unlock (&lock->rwlock); | ||
| 107 | } | ||
| 108 | |||
| 109 | int | ||
| 110 | glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock) | ||
| 111 | { | ||
| 112 | int err; | ||
| 113 | |||
| 114 | if (!lock->initialized) | ||
| 115 | return EINVAL; | ||
| 116 | err = pthread_rwlock_destroy (&lock->rwlock); | ||
| 117 | if (err != 0) | ||
| 118 | return err; | ||
| 119 | lock->initialized = 0; | ||
| 120 | return 0; | ||
| 121 | } | ||
| 122 | |||
| 123 | # endif | ||
| 124 | |||
| 125 | # else | ||
| 126 | |||
| 127 | int | ||
| 128 | glthread_rwlock_init_multithreaded (gl_rwlock_t *lock) | ||
| 129 | { | ||
| 130 | int err; | ||
| 131 | |||
| 132 | err = pthread_mutex_init (&lock->lock, NULL); | ||
| 133 | if (err != 0) | ||
| 134 | return err; | ||
| 135 | err = pthread_cond_init (&lock->waiting_readers, NULL); | ||
| 136 | if (err != 0) | ||
| 137 | return err; | ||
| 138 | err = pthread_cond_init (&lock->waiting_writers, NULL); | ||
| 139 | if (err != 0) | ||
| 140 | return err; | ||
| 141 | lock->waiting_writers_count = 0; | ||
| 142 | lock->runcount = 0; | ||
| 143 | return 0; | ||
| 144 | } | ||
| 145 | |||
| 146 | int | ||
| 147 | glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock) | ||
| 148 | { | ||
| 149 | int err; | ||
| 150 | |||
| 151 | err = pthread_mutex_lock (&lock->lock); | ||
| 152 | if (err != 0) | ||
| 153 | return err; | ||
| 154 | /* Test whether only readers are currently running, and whether the runcount | ||
| 155 | field will not overflow. */ | ||
| 156 | /* POSIX says: "It is implementation-defined whether the calling thread | ||
| 157 | acquires the lock when a writer does not hold the lock and there are | ||
| 158 | writers blocked on the lock." Let's say, no: give the writers a higher | ||
| 159 | priority. */ | ||
| 160 | while (!(lock->runcount + 1 > 0 && lock->waiting_writers_count == 0)) | ||
| 161 | { | ||
| 162 | /* This thread has to wait for a while. Enqueue it among the | ||
| 163 | waiting_readers. */ | ||
| 164 | err = pthread_cond_wait (&lock->waiting_readers, &lock->lock); | ||
| 165 | if (err != 0) | ||
| 166 | { | ||
| 167 | pthread_mutex_unlock (&lock->lock); | ||
| 168 | return err; | ||
| 169 | } | ||
| 170 | } | ||
| 171 | lock->runcount++; | ||
| 172 | return pthread_mutex_unlock (&lock->lock); | ||
| 173 | } | ||
| 174 | |||
| 175 | int | ||
| 176 | glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock) | ||
| 177 | { | ||
| 178 | int err; | ||
| 179 | |||
| 180 | err = pthread_mutex_lock (&lock->lock); | ||
| 181 | if (err != 0) | ||
| 182 | return err; | ||
| 183 | /* Test whether no readers or writers are currently running. */ | ||
| 184 | while (!(lock->runcount == 0)) | ||
| 185 | { | ||
| 186 | /* This thread has to wait for a while. Enqueue it among the | ||
| 187 | waiting_writers. */ | ||
| 188 | lock->waiting_writers_count++; | ||
| 189 | err = pthread_cond_wait (&lock->waiting_writers, &lock->lock); | ||
| 190 | if (err != 0) | ||
| 191 | { | ||
| 192 | lock->waiting_writers_count--; | ||
| 193 | pthread_mutex_unlock (&lock->lock); | ||
| 194 | return err; | ||
| 195 | } | ||
| 196 | lock->waiting_writers_count--; | ||
| 197 | } | ||
| 198 | lock->runcount--; /* runcount becomes -1 */ | ||
| 199 | return pthread_mutex_unlock (&lock->lock); | ||
| 200 | } | ||
| 201 | |||
| 202 | int | ||
| 203 | glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock) | ||
| 204 | { | ||
| 205 | int err; | ||
| 206 | |||
| 207 | err = pthread_mutex_lock (&lock->lock); | ||
| 208 | if (err != 0) | ||
| 209 | return err; | ||
| 210 | if (lock->runcount < 0) | ||
| 211 | { | ||
| 212 | /* Drop a writer lock. */ | ||
| 213 | if (!(lock->runcount == -1)) | ||
| 214 | { | ||
| 215 | pthread_mutex_unlock (&lock->lock); | ||
| 216 | return EINVAL; | ||
| 217 | } | ||
| 218 | lock->runcount = 0; | ||
| 219 | } | ||
| 220 | else | ||
| 221 | { | ||
| 222 | /* Drop a reader lock. */ | ||
| 223 | if (!(lock->runcount > 0)) | ||
| 224 | { | ||
| 225 | pthread_mutex_unlock (&lock->lock); | ||
| 226 | return EINVAL; | ||
| 227 | } | ||
| 228 | lock->runcount--; | ||
| 229 | } | ||
| 230 | if (lock->runcount == 0) | ||
| 231 | { | ||
| 232 | /* POSIX recommends that "write locks shall take precedence over read | ||
| 233 | locks", to avoid "writer starvation". */ | ||
| 234 | if (lock->waiting_writers_count > 0) | ||
| 235 | { | ||
| 236 | /* Wake up one of the waiting writers. */ | ||
| 237 | err = pthread_cond_signal (&lock->waiting_writers); | ||
| 238 | if (err != 0) | ||
| 239 | { | ||
| 240 | pthread_mutex_unlock (&lock->lock); | ||
| 241 | return err; | ||
| 242 | } | ||
| 243 | } | ||
| 244 | else | ||
| 245 | { | ||
| 246 | /* Wake up all waiting readers. */ | ||
| 247 | err = pthread_cond_broadcast (&lock->waiting_readers); | ||
| 248 | if (err != 0) | ||
| 249 | { | ||
| 250 | pthread_mutex_unlock (&lock->lock); | ||
| 251 | return err; | ||
| 252 | } | ||
| 253 | } | ||
| 254 | } | ||
| 255 | return pthread_mutex_unlock (&lock->lock); | ||
| 256 | } | ||
| 257 | |||
| 258 | int | ||
| 259 | glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock) | ||
| 260 | { | ||
| 261 | int err; | ||
| 262 | |||
| 263 | err = pthread_mutex_destroy (&lock->lock); | ||
| 264 | if (err != 0) | ||
| 265 | return err; | ||
| 266 | err = pthread_cond_destroy (&lock->waiting_readers); | ||
| 267 | if (err != 0) | ||
| 268 | return err; | ||
| 269 | err = pthread_cond_destroy (&lock->waiting_writers); | ||
| 270 | if (err != 0) | ||
| 271 | return err; | ||
| 272 | return 0; | ||
| 273 | } | ||
| 274 | |||
| 275 | # endif | ||
| 276 | |||
| 277 | /* --------------------- gl_recursive_lock_t datatype --------------------- */ | ||
| 278 | |||
| 279 | # if HAVE_PTHREAD_MUTEX_RECURSIVE | ||
| 280 | |||
| 281 | # if defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER || defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP | ||
| 282 | |||
| 283 | int | ||
| 284 | glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock) | ||
| 285 | { | ||
| 286 | pthread_mutexattr_t attributes; | ||
| 287 | int err; | ||
| 288 | |||
| 289 | err = pthread_mutexattr_init (&attributes); | ||
| 290 | if (err != 0) | ||
| 291 | return err; | ||
| 292 | err = pthread_mutexattr_settype (&attributes, PTHREAD_MUTEX_RECURSIVE); | ||
| 293 | if (err != 0) | ||
| 294 | { | ||
| 295 | pthread_mutexattr_destroy (&attributes); | ||
| 296 | return err; | ||
| 297 | } | ||
| 298 | err = pthread_mutex_init (lock, &attributes); | ||
| 299 | if (err != 0) | ||
| 300 | { | ||
| 301 | pthread_mutexattr_destroy (&attributes); | ||
| 302 | return err; | ||
| 303 | } | ||
| 304 | err = pthread_mutexattr_destroy (&attributes); | ||
| 305 | if (err != 0) | ||
| 306 | return err; | ||
| 307 | return 0; | ||
| 308 | } | ||
| 309 | |||
| 310 | # else | ||
| 311 | |||
| 312 | int | ||
| 313 | glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock) | ||
| 314 | { | ||
| 315 | pthread_mutexattr_t attributes; | ||
| 316 | int err; | ||
| 317 | |||
| 318 | err = pthread_mutexattr_init (&attributes); | ||
| 319 | if (err != 0) | ||
| 320 | return err; | ||
| 321 | err = pthread_mutexattr_settype (&attributes, PTHREAD_MUTEX_RECURSIVE); | ||
| 322 | if (err != 0) | ||
| 323 | { | ||
| 324 | pthread_mutexattr_destroy (&attributes); | ||
| 325 | return err; | ||
| 326 | } | ||
| 327 | err = pthread_mutex_init (&lock->recmutex, &attributes); | ||
| 328 | if (err != 0) | ||
| 329 | { | ||
| 330 | pthread_mutexattr_destroy (&attributes); | ||
| 331 | return err; | ||
| 332 | } | ||
| 333 | err = pthread_mutexattr_destroy (&attributes); | ||
| 334 | if (err != 0) | ||
| 335 | return err; | ||
| 336 | lock->initialized = 1; | ||
| 337 | return 0; | ||
| 338 | } | ||
| 339 | |||
| 340 | int | ||
| 341 | glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock) | ||
| 342 | { | ||
| 343 | if (!lock->initialized) | ||
| 344 | { | ||
| 345 | int err; | ||
| 346 | |||
| 347 | err = pthread_mutex_lock (&lock->guard); | ||
| 348 | if (err != 0) | ||
| 349 | return err; | ||
| 350 | if (!lock->initialized) | ||
| 351 | { | ||
| 352 | err = glthread_recursive_lock_init_multithreaded (lock); | ||
| 353 | if (err != 0) | ||
| 354 | { | ||
| 355 | pthread_mutex_unlock (&lock->guard); | ||
| 356 | return err; | ||
| 357 | } | ||
| 358 | } | ||
| 359 | err = pthread_mutex_unlock (&lock->guard); | ||
| 360 | if (err != 0) | ||
| 361 | return err; | ||
| 362 | } | ||
| 363 | return pthread_mutex_lock (&lock->recmutex); | ||
| 364 | } | ||
| 365 | |||
| 366 | int | ||
| 367 | glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock) | ||
| 368 | { | ||
| 369 | if (!lock->initialized) | ||
| 370 | return EINVAL; | ||
| 371 | return pthread_mutex_unlock (&lock->recmutex); | ||
| 372 | } | ||
| 373 | |||
| 374 | int | ||
| 375 | glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock) | ||
| 376 | { | ||
| 377 | int err; | ||
| 378 | |||
| 379 | if (!lock->initialized) | ||
| 380 | return EINVAL; | ||
| 381 | err = pthread_mutex_destroy (&lock->recmutex); | ||
| 382 | if (err != 0) | ||
| 383 | return err; | ||
| 384 | lock->initialized = 0; | ||
| 385 | return 0; | ||
| 386 | } | ||
| 387 | |||
| 388 | # endif | ||
| 389 | |||
| 390 | # else | ||
| 391 | |||
| 392 | int | ||
| 393 | glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock) | ||
| 394 | { | ||
| 395 | int err; | ||
| 396 | |||
| 397 | err = pthread_mutex_init (&lock->mutex, NULL); | ||
| 398 | if (err != 0) | ||
| 399 | return err; | ||
| 400 | lock->owner = (pthread_t) 0; | ||
| 401 | lock->depth = 0; | ||
| 402 | return 0; | ||
| 403 | } | ||
| 404 | |||
| 405 | int | ||
| 406 | glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock) | ||
| 407 | { | ||
| 408 | pthread_t self = pthread_self (); | ||
| 409 | if (lock->owner != self) | ||
| 410 | { | ||
| 411 | int err; | ||
| 412 | |||
| 413 | err = pthread_mutex_lock (&lock->mutex); | ||
| 414 | if (err != 0) | ||
| 415 | return err; | ||
| 416 | lock->owner = self; | ||
| 417 | } | ||
| 418 | if (++(lock->depth) == 0) /* wraparound? */ | ||
| 419 | { | ||
| 420 | lock->depth--; | ||
| 421 | return EAGAIN; | ||
| 422 | } | ||
| 423 | return 0; | ||
| 424 | } | ||
| 425 | |||
| 426 | int | ||
| 427 | glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock) | ||
| 428 | { | ||
| 429 | if (lock->owner != pthread_self ()) | ||
| 430 | return EPERM; | ||
| 431 | if (lock->depth == 0) | ||
| 432 | return EINVAL; | ||
| 433 | if (--(lock->depth) == 0) | ||
| 434 | { | ||
| 435 | lock->owner = (pthread_t) 0; | ||
| 436 | return pthread_mutex_unlock (&lock->mutex); | ||
| 437 | } | ||
| 438 | else | ||
| 439 | return 0; | ||
| 440 | } | ||
| 441 | |||
| 442 | int | ||
| 443 | glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock) | ||
| 444 | { | ||
| 445 | if (lock->owner != (pthread_t) 0) | ||
| 446 | return EBUSY; | ||
| 447 | return pthread_mutex_destroy (&lock->mutex); | ||
| 448 | } | ||
| 449 | |||
| 450 | # endif | ||
| 451 | |||
| 452 | /* -------------------------- gl_once_t datatype -------------------------- */ | ||
| 453 | |||
| 454 | static const pthread_once_t fresh_once = PTHREAD_ONCE_INIT; | ||
| 455 | |||
| 456 | int | ||
| 457 | glthread_once_singlethreaded (pthread_once_t *once_control) | ||
| 458 | { | ||
| 459 | /* We don't know whether pthread_once_t is an integer type, a floating-point | ||
| 460 | type, a pointer type, or a structure type. */ | ||
| 461 | char *firstbyte = (char *)once_control; | ||
| 462 | if (*firstbyte == *(const char *)&fresh_once) | ||
| 463 | { | ||
| 464 | /* First time use of once_control. Invert the first byte. */ | ||
| 465 | *firstbyte = ~ *(const char *)&fresh_once; | ||
| 466 | return 1; | ||
| 467 | } | ||
| 468 | else | ||
| 469 | return 0; | ||
| 470 | } | ||
| 471 | |||
| 472 | #endif | ||
| 473 | |||
| 474 | /* ========================================================================= */ | ||
| 475 | |||
| 476 | #if USE_PTH_THREADS | ||
| 477 | |||
| 478 | /* Use the GNU Pth threads library. */ | ||
| 479 | |||
| 480 | /* -------------------------- gl_lock_t datatype -------------------------- */ | ||
| 481 | |||
| 482 | /* ------------------------- gl_rwlock_t datatype ------------------------- */ | ||
| 483 | |||
| 484 | /* --------------------- gl_recursive_lock_t datatype --------------------- */ | ||
| 485 | |||
| 486 | /* -------------------------- gl_once_t datatype -------------------------- */ | ||
| 487 | |||
| 488 | static void | ||
| 489 | glthread_once_call (void *arg) | ||
| 490 | { | ||
| 491 | void (**gl_once_temp_addr) (void) = (void (**) (void)) arg; | ||
| 492 | void (*initfunction) (void) = *gl_once_temp_addr; | ||
| 493 | initfunction (); | ||
| 494 | } | ||
| 495 | |||
| 496 | int | ||
| 497 | glthread_once_multithreaded (pth_once_t *once_control, void (*initfunction) (void)) | ||
| 498 | { | ||
| 499 | void (*temp) (void) = initfunction; | ||
| 500 | return (!pth_once (once_control, glthread_once_call, &temp) ? errno : 0); | ||
| 501 | } | ||
| 502 | |||
| 503 | int | ||
| 504 | glthread_once_singlethreaded (pth_once_t *once_control) | ||
| 505 | { | ||
| 506 | /* We know that pth_once_t is an integer type. */ | ||
| 507 | if (*once_control == PTH_ONCE_INIT) | ||
| 508 | { | ||
| 509 | /* First time use of once_control. Invert the marker. */ | ||
| 510 | *once_control = ~ PTH_ONCE_INIT; | ||
| 511 | return 1; | ||
| 512 | } | ||
| 513 | else | ||
| 514 | return 0; | ||
| 515 | } | ||
| 516 | |||
| 517 | #endif | ||
| 518 | |||
| 519 | /* ========================================================================= */ | ||
| 520 | |||
| 521 | #if USE_SOLARIS_THREADS | ||
| 522 | |||
| 523 | /* Use the old Solaris threads library. */ | ||
| 524 | |||
| 525 | /* -------------------------- gl_lock_t datatype -------------------------- */ | ||
| 526 | |||
| 527 | /* ------------------------- gl_rwlock_t datatype ------------------------- */ | ||
| 528 | |||
| 529 | /* --------------------- gl_recursive_lock_t datatype --------------------- */ | ||
| 530 | |||
| 531 | int | ||
| 532 | glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock) | ||
| 533 | { | ||
| 534 | int err; | ||
| 535 | |||
| 536 | err = mutex_init (&lock->mutex, USYNC_THREAD, NULL); | ||
| 537 | if (err != 0) | ||
| 538 | return err; | ||
| 539 | lock->owner = (thread_t) 0; | ||
| 540 | lock->depth = 0; | ||
| 541 | return 0; | ||
| 542 | } | ||
| 543 | |||
| 544 | int | ||
| 545 | glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock) | ||
| 546 | { | ||
| 547 | thread_t self = thr_self (); | ||
| 548 | if (lock->owner != self) | ||
| 549 | { | ||
| 550 | int err; | ||
| 551 | |||
| 552 | err = mutex_lock (&lock->mutex); | ||
| 553 | if (err != 0) | ||
| 554 | return err; | ||
| 555 | lock->owner = self; | ||
| 556 | } | ||
| 557 | if (++(lock->depth) == 0) /* wraparound? */ | ||
| 558 | { | ||
| 559 | lock->depth--; | ||
| 560 | return EAGAIN; | ||
| 561 | } | ||
| 562 | return 0; | ||
| 563 | } | ||
| 564 | |||
| 565 | int | ||
| 566 | glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock) | ||
| 567 | { | ||
| 568 | if (lock->owner != thr_self ()) | ||
| 569 | return EPERM; | ||
| 570 | if (lock->depth == 0) | ||
| 571 | return EINVAL; | ||
| 572 | if (--(lock->depth) == 0) | ||
| 573 | { | ||
| 574 | lock->owner = (thread_t) 0; | ||
| 575 | return mutex_unlock (&lock->mutex); | ||
| 576 | } | ||
| 577 | else | ||
| 578 | return 0; | ||
| 579 | } | ||
| 580 | |||
| 581 | int | ||
| 582 | glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock) | ||
| 583 | { | ||
| 584 | if (lock->owner != (thread_t) 0) | ||
| 585 | return EBUSY; | ||
| 586 | return mutex_destroy (&lock->mutex); | ||
| 587 | } | ||
| 588 | |||
| 589 | /* -------------------------- gl_once_t datatype -------------------------- */ | ||
| 590 | |||
| 591 | int | ||
| 592 | glthread_once_multithreaded (gl_once_t *once_control, void (*initfunction) (void)) | ||
| 593 | { | ||
| 594 | if (!once_control->inited) | ||
| 595 | { | ||
| 596 | int err; | ||
| 597 | |||
| 598 | /* Use the mutex to guarantee that if another thread is already calling | ||
| 599 | the initfunction, this thread waits until it's finished. */ | ||
| 600 | err = mutex_lock (&once_control->mutex); | ||
| 601 | if (err != 0) | ||
| 602 | return err; | ||
| 603 | if (!once_control->inited) | ||
| 604 | { | ||
| 605 | once_control->inited = 1; | ||
| 606 | initfunction (); | ||
| 607 | } | ||
| 608 | return mutex_unlock (&once_control->mutex); | ||
| 609 | } | ||
| 610 | else | ||
| 611 | return 0; | ||
| 612 | } | ||
| 613 | |||
| 614 | int | ||
| 615 | glthread_once_singlethreaded (gl_once_t *once_control) | ||
| 616 | { | ||
| 617 | /* We know that gl_once_t contains an integer type. */ | ||
| 618 | if (!once_control->inited) | ||
| 619 | { | ||
| 620 | /* First time use of once_control. Invert the marker. */ | ||
| 621 | once_control->inited = ~ 0; | ||
| 622 | return 1; | ||
| 623 | } | ||
| 624 | else | ||
| 625 | return 0; | ||
| 626 | } | ||
| 627 | |||
| 628 | #endif | ||
| 629 | |||
| 630 | /* ========================================================================= */ | ||
| 631 | |||
| 632 | #if USE_WINDOWS_THREADS | ||
| 633 | |||
| 634 | /* -------------------------- gl_lock_t datatype -------------------------- */ | ||
| 635 | |||
| 636 | void | ||
| 637 | glthread_lock_init_func (gl_lock_t *lock) | ||
| 638 | { | ||
| 639 | InitializeCriticalSection (&lock->lock); | ||
| 640 | lock->guard.done = 1; | ||
| 641 | } | ||
| 642 | |||
| 643 | int | ||
| 644 | glthread_lock_lock_func (gl_lock_t *lock) | ||
| 645 | { | ||
| 646 | if (!lock->guard.done) | ||
| 647 | { | ||
| 648 | if (InterlockedIncrement (&lock->guard.started) == 0) | ||
| 649 | /* This thread is the first one to need this lock. Initialize it. */ | ||
| 650 | glthread_lock_init (lock); | ||
| 651 | else | ||
| 652 | /* Yield the CPU while waiting for another thread to finish | ||
| 653 | initializing this lock. */ | ||
| 654 | while (!lock->guard.done) | ||
| 655 | Sleep (0); | ||
| 656 | } | ||
| 657 | EnterCriticalSection (&lock->lock); | ||
| 658 | return 0; | ||
| 659 | } | ||
| 660 | |||
| 661 | int | ||
| 662 | glthread_lock_unlock_func (gl_lock_t *lock) | ||
| 663 | { | ||
| 664 | if (!lock->guard.done) | ||
| 665 | return EINVAL; | ||
| 666 | LeaveCriticalSection (&lock->lock); | ||
| 667 | return 0; | ||
| 668 | } | ||
| 669 | |||
| 670 | int | ||
| 671 | glthread_lock_destroy_func (gl_lock_t *lock) | ||
| 672 | { | ||
| 673 | if (!lock->guard.done) | ||
| 674 | return EINVAL; | ||
| 675 | DeleteCriticalSection (&lock->lock); | ||
| 676 | lock->guard.done = 0; | ||
| 677 | return 0; | ||
| 678 | } | ||
| 679 | |||
| 680 | /* ------------------------- gl_rwlock_t datatype ------------------------- */ | ||
| 681 | |||
| 682 | /* In this file, the waitqueues are implemented as circular arrays. */ | ||
| 683 | #define gl_waitqueue_t gl_carray_waitqueue_t | ||
| 684 | |||
| 685 | static void | ||
| 686 | gl_waitqueue_init (gl_waitqueue_t *wq) | ||
| 687 | { | ||
| 688 | wq->array = NULL; | ||
| 689 | wq->count = 0; | ||
| 690 | wq->alloc = 0; | ||
| 691 | wq->offset = 0; | ||
| 692 | } | ||
| 693 | |||
| 694 | /* Enqueues the current thread, represented by an event, in a wait queue. | ||
| 695 | Returns INVALID_HANDLE_VALUE if an allocation failure occurs. */ | ||
| 696 | static HANDLE | ||
| 697 | gl_waitqueue_add (gl_waitqueue_t *wq) | ||
| 698 | { | ||
| 699 | HANDLE event; | ||
| 700 | unsigned int index; | ||
| 701 | |||
| 702 | if (wq->count == wq->alloc) | ||
| 703 | { | ||
| 704 | unsigned int new_alloc = 2 * wq->alloc + 1; | ||
| 705 | HANDLE *new_array = | ||
| 706 | (HANDLE *) realloc (wq->array, new_alloc * sizeof (HANDLE)); | ||
| 707 | if (new_array == NULL) | ||
| 708 | /* No more memory. */ | ||
| 709 | return INVALID_HANDLE_VALUE; | ||
| 710 | /* Now is a good opportunity to rotate the array so that its contents | ||
| 711 | starts at offset 0. */ | ||
| 712 | if (wq->offset > 0) | ||
| 713 | { | ||
| 714 | unsigned int old_count = wq->count; | ||
| 715 | unsigned int old_alloc = wq->alloc; | ||
| 716 | unsigned int old_offset = wq->offset; | ||
| 717 | unsigned int i; | ||
| 718 | if (old_offset + old_count > old_alloc) | ||
| 719 | { | ||
| 720 | unsigned int limit = old_offset + old_count - old_alloc; | ||
| 721 | for (i = 0; i < limit; i++) | ||
| 722 | new_array[old_alloc + i] = new_array[i]; | ||
| 723 | } | ||
| 724 | for (i = 0; i < old_count; i++) | ||
| 725 | new_array[i] = new_array[old_offset + i]; | ||
| 726 | wq->offset = 0; | ||
| 727 | } | ||
| 728 | wq->array = new_array; | ||
| 729 | wq->alloc = new_alloc; | ||
| 730 | } | ||
| 731 | /* Whether the created event is a manual-reset one or an auto-reset one, | ||
| 732 | does not matter, since we will wait on it only once. */ | ||
| 733 | event = CreateEvent (NULL, TRUE, FALSE, NULL); | ||
| 734 | if (event == INVALID_HANDLE_VALUE) | ||
| 735 | /* No way to allocate an event. */ | ||
| 736 | return INVALID_HANDLE_VALUE; | ||
| 737 | index = wq->offset + wq->count; | ||
| 738 | if (index >= wq->alloc) | ||
| 739 | index -= wq->alloc; | ||
| 740 | wq->array[index] = event; | ||
| 741 | wq->count++; | ||
| 742 | return event; | ||
| 743 | } | ||
| 744 | |||
| 745 | /* Notifies the first thread from a wait queue and dequeues it. */ | ||
| 746 | static void | ||
| 747 | gl_waitqueue_notify_first (gl_waitqueue_t *wq) | ||
| 748 | { | ||
| 749 | SetEvent (wq->array[wq->offset + 0]); | ||
| 750 | wq->offset++; | ||
| 751 | wq->count--; | ||
| 752 | if (wq->count == 0 || wq->offset == wq->alloc) | ||
| 753 | wq->offset = 0; | ||
| 754 | } | ||
| 755 | |||
| 756 | /* Notifies all threads from a wait queue and dequeues them all. */ | ||
| 757 | static void | ||
| 758 | gl_waitqueue_notify_all (gl_waitqueue_t *wq) | ||
| 759 | { | ||
| 760 | unsigned int i; | ||
| 761 | |||
| 762 | for (i = 0; i < wq->count; i++) | ||
| 763 | { | ||
| 764 | unsigned int index = wq->offset + i; | ||
| 765 | if (index >= wq->alloc) | ||
| 766 | index -= wq->alloc; | ||
| 767 | SetEvent (wq->array[index]); | ||
| 768 | } | ||
| 769 | wq->count = 0; | ||
| 770 | wq->offset = 0; | ||
| 771 | } | ||
| 772 | |||
| 773 | void | ||
| 774 | glthread_rwlock_init_func (gl_rwlock_t *lock) | ||
| 775 | { | ||
| 776 | InitializeCriticalSection (&lock->lock); | ||
| 777 | gl_waitqueue_init (&lock->waiting_readers); | ||
| 778 | gl_waitqueue_init (&lock->waiting_writers); | ||
| 779 | lock->runcount = 0; | ||
| 780 | lock->guard.done = 1; | ||
| 781 | } | ||
| 782 | |||
| 783 | int | ||
| 784 | glthread_rwlock_rdlock_func (gl_rwlock_t *lock) | ||
| 785 | { | ||
| 786 | if (!lock->guard.done) | ||
| 787 | { | ||
| 788 | if (InterlockedIncrement (&lock->guard.started) == 0) | ||
| 789 | /* This thread is the first one to need this lock. Initialize it. */ | ||
| 790 | glthread_rwlock_init (lock); | ||
| 791 | else | ||
| 792 | /* Yield the CPU while waiting for another thread to finish | ||
| 793 | initializing this lock. */ | ||
| 794 | while (!lock->guard.done) | ||
| 795 | Sleep (0); | ||
| 796 | } | ||
| 797 | EnterCriticalSection (&lock->lock); | ||
| 798 | /* Test whether only readers are currently running, and whether the runcount | ||
| 799 | field will not overflow. */ | ||
| 800 | if (!(lock->runcount + 1 > 0)) | ||
| 801 | { | ||
| 802 | /* This thread has to wait for a while. Enqueue it among the | ||
| 803 | waiting_readers. */ | ||
| 804 | HANDLE event = gl_waitqueue_add (&lock->waiting_readers); | ||
| 805 | if (event != INVALID_HANDLE_VALUE) | ||
| 806 | { | ||
| 807 | DWORD result; | ||
| 808 | LeaveCriticalSection (&lock->lock); | ||
| 809 | /* Wait until another thread signals this event. */ | ||
| 810 | result = WaitForSingleObject (event, INFINITE); | ||
| 811 | if (result == WAIT_FAILED || result == WAIT_TIMEOUT) | ||
| 812 | abort (); | ||
| 813 | CloseHandle (event); | ||
| 814 | /* The thread which signalled the event already did the bookkeeping: | ||
| 815 | removed us from the waiting_readers, incremented lock->runcount. */ | ||
| 816 | if (!(lock->runcount > 0)) | ||
| 817 | abort (); | ||
| 818 | return 0; | ||
| 819 | } | ||
| 820 | else | ||
| 821 | { | ||
| 822 | /* Allocation failure. Weird. */ | ||
| 823 | do | ||
| 824 | { | ||
| 825 | LeaveCriticalSection (&lock->lock); | ||
| 826 | Sleep (1); | ||
| 827 | EnterCriticalSection (&lock->lock); | ||
| 828 | } | ||
| 829 | while (!(lock->runcount + 1 > 0)); | ||
| 830 | } | ||
| 831 | } | ||
| 832 | lock->runcount++; | ||
| 833 | LeaveCriticalSection (&lock->lock); | ||
| 834 | return 0; | ||
| 835 | } | ||
| 836 | |||
| 837 | int | ||
| 838 | glthread_rwlock_wrlock_func (gl_rwlock_t *lock) | ||
| 839 | { | ||
| 840 | if (!lock->guard.done) | ||
| 841 | { | ||
| 842 | if (InterlockedIncrement (&lock->guard.started) == 0) | ||
| 843 | /* This thread is the first one to need this lock. Initialize it. */ | ||
| 844 | glthread_rwlock_init (lock); | ||
| 845 | else | ||
| 846 | /* Yield the CPU while waiting for another thread to finish | ||
| 847 | initializing this lock. */ | ||
| 848 | while (!lock->guard.done) | ||
| 849 | Sleep (0); | ||
| 850 | } | ||
| 851 | EnterCriticalSection (&lock->lock); | ||
| 852 | /* Test whether no readers or writers are currently running. */ | ||
| 853 | if (!(lock->runcount == 0)) | ||
| 854 | { | ||
| 855 | /* This thread has to wait for a while. Enqueue it among the | ||
| 856 | waiting_writers. */ | ||
| 857 | HANDLE event = gl_waitqueue_add (&lock->waiting_writers); | ||
| 858 | if (event != INVALID_HANDLE_VALUE) | ||
| 859 | { | ||
| 860 | DWORD result; | ||
| 861 | LeaveCriticalSection (&lock->lock); | ||
| 862 | /* Wait until another thread signals this event. */ | ||
| 863 | result = WaitForSingleObject (event, INFINITE); | ||
| 864 | if (result == WAIT_FAILED || result == WAIT_TIMEOUT) | ||
| 865 | abort (); | ||
| 866 | CloseHandle (event); | ||
| 867 | /* The thread which signalled the event already did the bookkeeping: | ||
| 868 | removed us from the waiting_writers, set lock->runcount = -1. */ | ||
| 869 | if (!(lock->runcount == -1)) | ||
| 870 | abort (); | ||
| 871 | return 0; | ||
| 872 | } | ||
| 873 | else | ||
| 874 | { | ||
| 875 | /* Allocation failure. Weird. */ | ||
| 876 | do | ||
| 877 | { | ||
| 878 | LeaveCriticalSection (&lock->lock); | ||
| 879 | Sleep (1); | ||
| 880 | EnterCriticalSection (&lock->lock); | ||
| 881 | } | ||
| 882 | while (!(lock->runcount == 0)); | ||
| 883 | } | ||
| 884 | } | ||
| 885 | lock->runcount--; /* runcount becomes -1 */ | ||
| 886 | LeaveCriticalSection (&lock->lock); | ||
| 887 | return 0; | ||
| 888 | } | ||
| 889 | |||
| 890 | int | ||
| 891 | glthread_rwlock_unlock_func (gl_rwlock_t *lock) | ||
| 892 | { | ||
| 893 | if (!lock->guard.done) | ||
| 894 | return EINVAL; | ||
| 895 | EnterCriticalSection (&lock->lock); | ||
| 896 | if (lock->runcount < 0) | ||
| 897 | { | ||
| 898 | /* Drop a writer lock. */ | ||
| 899 | if (!(lock->runcount == -1)) | ||
| 900 | abort (); | ||
| 901 | lock->runcount = 0; | ||
| 902 | } | ||
| 903 | else | ||
| 904 | { | ||
| 905 | /* Drop a reader lock. */ | ||
| 906 | if (!(lock->runcount > 0)) | ||
| 907 | { | ||
| 908 | LeaveCriticalSection (&lock->lock); | ||
| 909 | return EPERM; | ||
| 910 | } | ||
| 911 | lock->runcount--; | ||
| 912 | } | ||
| 913 | if (lock->runcount == 0) | ||
| 914 | { | ||
| 915 | /* POSIX recommends that "write locks shall take precedence over read | ||
| 916 | locks", to avoid "writer starvation". */ | ||
| 917 | if (lock->waiting_writers.count > 0) | ||
| 918 | { | ||
| 919 | /* Wake up one of the waiting writers. */ | ||
| 920 | lock->runcount--; | ||
| 921 | gl_waitqueue_notify_first (&lock->waiting_writers); | ||
| 922 | } | ||
| 923 | else | ||
| 924 | { | ||
| 925 | /* Wake up all waiting readers. */ | ||
| 926 | lock->runcount += lock->waiting_readers.count; | ||
| 927 | gl_waitqueue_notify_all (&lock->waiting_readers); | ||
| 928 | } | ||
| 929 | } | ||
| 930 | LeaveCriticalSection (&lock->lock); | ||
| 931 | return 0; | ||
| 932 | } | ||
| 933 | |||
| 934 | int | ||
| 935 | glthread_rwlock_destroy_func (gl_rwlock_t *lock) | ||
| 936 | { | ||
| 937 | if (!lock->guard.done) | ||
| 938 | return EINVAL; | ||
| 939 | if (lock->runcount != 0) | ||
| 940 | return EBUSY; | ||
| 941 | DeleteCriticalSection (&lock->lock); | ||
| 942 | if (lock->waiting_readers.array != NULL) | ||
| 943 | free (lock->waiting_readers.array); | ||
| 944 | if (lock->waiting_writers.array != NULL) | ||
| 945 | free (lock->waiting_writers.array); | ||
| 946 | lock->guard.done = 0; | ||
| 947 | return 0; | ||
| 948 | } | ||
| 949 | |||
| 950 | /* --------------------- gl_recursive_lock_t datatype --------------------- */ | ||
| 951 | |||
| 952 | void | ||
| 953 | glthread_recursive_lock_init_func (gl_recursive_lock_t *lock) | ||
| 954 | { | ||
| 955 | lock->owner = 0; | ||
| 956 | lock->depth = 0; | ||
| 957 | InitializeCriticalSection (&lock->lock); | ||
| 958 | lock->guard.done = 1; | ||
| 959 | } | ||
| 960 | |||
| 961 | int | ||
| 962 | glthread_recursive_lock_lock_func (gl_recursive_lock_t *lock) | ||
| 963 | { | ||
| 964 | if (!lock->guard.done) | ||
| 965 | { | ||
| 966 | if (InterlockedIncrement (&lock->guard.started) == 0) | ||
| 967 | /* This thread is the first one to need this lock. Initialize it. */ | ||
| 968 | glthread_recursive_lock_init (lock); | ||
| 969 | else | ||
| 970 | /* Yield the CPU while waiting for another thread to finish | ||
| 971 | initializing this lock. */ | ||
| 972 | while (!lock->guard.done) | ||
| 973 | Sleep (0); | ||
| 974 | } | ||
| 975 | { | ||
| 976 | DWORD self = GetCurrentThreadId (); | ||
| 977 | if (lock->owner != self) | ||
| 978 | { | ||
| 979 | EnterCriticalSection (&lock->lock); | ||
| 980 | lock->owner = self; | ||
| 981 | } | ||
| 982 | if (++(lock->depth) == 0) /* wraparound? */ | ||
| 983 | { | ||
| 984 | lock->depth--; | ||
| 985 | return EAGAIN; | ||
| 986 | } | ||
| 987 | } | ||
| 988 | return 0; | ||
| 989 | } | ||
| 990 | |||
| 991 | int | ||
| 992 | glthread_recursive_lock_unlock_func (gl_recursive_lock_t *lock) | ||
| 993 | { | ||
| 994 | if (lock->owner != GetCurrentThreadId ()) | ||
| 995 | return EPERM; | ||
| 996 | if (lock->depth == 0) | ||
| 997 | return EINVAL; | ||
| 998 | if (--(lock->depth) == 0) | ||
| 999 | { | ||
| 1000 | lock->owner = 0; | ||
| 1001 | LeaveCriticalSection (&lock->lock); | ||
| 1002 | } | ||
| 1003 | return 0; | ||
| 1004 | } | ||
| 1005 | |||
| 1006 | int | ||
| 1007 | glthread_recursive_lock_destroy_func (gl_recursive_lock_t *lock) | ||
| 1008 | { | ||
| 1009 | if (lock->owner != 0) | ||
| 1010 | return EBUSY; | ||
| 1011 | DeleteCriticalSection (&lock->lock); | ||
| 1012 | lock->guard.done = 0; | ||
| 1013 | return 0; | ||
| 1014 | } | ||
| 1015 | |||
| 1016 | /* -------------------------- gl_once_t datatype -------------------------- */ | ||
| 1017 | |||
| 1018 | void | ||
| 1019 | glthread_once_func (gl_once_t *once_control, void (*initfunction) (void)) | ||
| 1020 | { | ||
| 1021 | if (once_control->inited <= 0) | ||
| 1022 | { | ||
| 1023 | if (InterlockedIncrement (&once_control->started) == 0) | ||
| 1024 | { | ||
| 1025 | /* This thread is the first one to come to this once_control. */ | ||
| 1026 | InitializeCriticalSection (&once_control->lock); | ||
| 1027 | EnterCriticalSection (&once_control->lock); | ||
| 1028 | once_control->inited = 0; | ||
| 1029 | initfunction (); | ||
| 1030 | once_control->inited = 1; | ||
| 1031 | LeaveCriticalSection (&once_control->lock); | ||
| 1032 | } | ||
| 1033 | else | ||
| 1034 | { | ||
| 1035 | /* Undo last operation. */ | ||
| 1036 | InterlockedDecrement (&once_control->started); | ||
| 1037 | /* Some other thread has already started the initialization. | ||
| 1038 | Yield the CPU while waiting for the other thread to finish | ||
| 1039 | initializing and taking the lock. */ | ||
| 1040 | while (once_control->inited < 0) | ||
| 1041 | Sleep (0); | ||
| 1042 | if (once_control->inited <= 0) | ||
| 1043 | { | ||
| 1044 | /* Take the lock. This blocks until the other thread has | ||
| 1045 | finished calling the initfunction. */ | ||
| 1046 | EnterCriticalSection (&once_control->lock); | ||
| 1047 | LeaveCriticalSection (&once_control->lock); | ||
| 1048 | if (!(once_control->inited > 0)) | ||
| 1049 | abort (); | ||
| 1050 | } | ||
| 1051 | } | ||
| 1052 | } | ||
| 1053 | } | ||
| 1054 | |||
| 1055 | #endif | ||
| 1056 | |||
| 1057 | /* ========================================================================= */ | ||
diff --git a/gl/glthread/lock.h b/gl/glthread/lock.h new file mode 100644 index 00000000..d20bbdef --- /dev/null +++ b/gl/glthread/lock.h | |||
| @@ -0,0 +1,927 @@ | |||
| 1 | /* Locking in multithreaded situations. | ||
| 2 | Copyright (C) 2005-2013 Free Software Foundation, Inc. | ||
| 3 | |||
| 4 | This program is free software; you can redistribute it and/or modify | ||
| 5 | it under the terms of the GNU General Public License as published by | ||
| 6 | the Free Software Foundation; either version 3, or (at your option) | ||
| 7 | any later version. | ||
| 8 | |||
| 9 | This program is distributed in the hope that it will be useful, | ||
| 10 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | GNU General Public License for more details. | ||
| 13 | |||
| 14 | You should have received a copy of the GNU General Public License | ||
| 15 | along with this program; if not, see <http://www.gnu.org/licenses/>. */ | ||
| 16 | |||
| 17 | /* Written by Bruno Haible <bruno@clisp.org>, 2005. | ||
| 18 | Based on GCC's gthr-posix.h, gthr-posix95.h, gthr-solaris.h, | ||
| 19 | gthr-win32.h. */ | ||
| 20 | |||
| 21 | /* This file contains locking primitives for use with a given thread library. | ||
| 22 | It does not contain primitives for creating threads or for other | ||
| 23 | synchronization primitives. | ||
| 24 | |||
| 25 | Normal (non-recursive) locks: | ||
| 26 | Type: gl_lock_t | ||
| 27 | Declaration: gl_lock_define(extern, name) | ||
| 28 | Initializer: gl_lock_define_initialized(, name) | ||
| 29 | Initialization: gl_lock_init (name); | ||
| 30 | Taking the lock: gl_lock_lock (name); | ||
| 31 | Releasing the lock: gl_lock_unlock (name); | ||
| 32 | De-initialization: gl_lock_destroy (name); | ||
| 33 | Equivalent functions with control of error handling: | ||
| 34 | Initialization: err = glthread_lock_init (&name); | ||
| 35 | Taking the lock: err = glthread_lock_lock (&name); | ||
| 36 | Releasing the lock: err = glthread_lock_unlock (&name); | ||
| 37 | De-initialization: err = glthread_lock_destroy (&name); | ||
| 38 | |||
| 39 | Read-Write (non-recursive) locks: | ||
| 40 | Type: gl_rwlock_t | ||
| 41 | Declaration: gl_rwlock_define(extern, name) | ||
| 42 | Initializer: gl_rwlock_define_initialized(, name) | ||
| 43 | Initialization: gl_rwlock_init (name); | ||
| 44 | Taking the lock: gl_rwlock_rdlock (name); | ||
| 45 | gl_rwlock_wrlock (name); | ||
| 46 | Releasing the lock: gl_rwlock_unlock (name); | ||
| 47 | De-initialization: gl_rwlock_destroy (name); | ||
| 48 | Equivalent functions with control of error handling: | ||
| 49 | Initialization: err = glthread_rwlock_init (&name); | ||
| 50 | Taking the lock: err = glthread_rwlock_rdlock (&name); | ||
| 51 | err = glthread_rwlock_wrlock (&name); | ||
| 52 | Releasing the lock: err = glthread_rwlock_unlock (&name); | ||
| 53 | De-initialization: err = glthread_rwlock_destroy (&name); | ||
| 54 | |||
| 55 | Recursive locks: | ||
| 56 | Type: gl_recursive_lock_t | ||
| 57 | Declaration: gl_recursive_lock_define(extern, name) | ||
| 58 | Initializer: gl_recursive_lock_define_initialized(, name) | ||
| 59 | Initialization: gl_recursive_lock_init (name); | ||
| 60 | Taking the lock: gl_recursive_lock_lock (name); | ||
| 61 | Releasing the lock: gl_recursive_lock_unlock (name); | ||
| 62 | De-initialization: gl_recursive_lock_destroy (name); | ||
| 63 | Equivalent functions with control of error handling: | ||
| 64 | Initialization: err = glthread_recursive_lock_init (&name); | ||
| 65 | Taking the lock: err = glthread_recursive_lock_lock (&name); | ||
| 66 | Releasing the lock: err = glthread_recursive_lock_unlock (&name); | ||
| 67 | De-initialization: err = glthread_recursive_lock_destroy (&name); | ||
| 68 | |||
| 69 | Once-only execution: | ||
| 70 | Type: gl_once_t | ||
| 71 | Initializer: gl_once_define(extern, name) | ||
| 72 | Execution: gl_once (name, initfunction); | ||
| 73 | Equivalent functions with control of error handling: | ||
| 74 | Execution: err = glthread_once (&name, initfunction); | ||
| 75 | */ | ||
| 76 | |||
| 77 | |||
| 78 | #ifndef _LOCK_H | ||
| 79 | #define _LOCK_H | ||
| 80 | |||
| 81 | #include <errno.h> | ||
| 82 | #include <stdlib.h> | ||
| 83 | |||
| 84 | /* ========================================================================= */ | ||
| 85 | |||
| 86 | #if USE_POSIX_THREADS | ||
| 87 | |||
| 88 | /* Use the POSIX threads library. */ | ||
| 89 | |||
| 90 | # include <pthread.h> | ||
| 91 | |||
| 92 | # ifdef __cplusplus | ||
| 93 | extern "C" { | ||
| 94 | # endif | ||
| 95 | |||
| 96 | # if PTHREAD_IN_USE_DETECTION_HARD | ||
| 97 | |||
| 98 | /* The pthread_in_use() detection needs to be done at runtime. */ | ||
| 99 | # define pthread_in_use() \ | ||
| 100 | glthread_in_use () | ||
| 101 | extern int glthread_in_use (void); | ||
| 102 | |||
| 103 | # endif | ||
| 104 | |||
| 105 | # if USE_POSIX_THREADS_WEAK | ||
| 106 | |||
| 107 | /* Use weak references to the POSIX threads library. */ | ||
| 108 | |||
| 109 | /* Weak references avoid dragging in external libraries if the other parts | ||
| 110 | of the program don't use them. Here we use them, because we don't want | ||
| 111 | every program that uses libintl to depend on libpthread. This assumes | ||
| 112 | that libpthread would not be loaded after libintl; i.e. if libintl is | ||
| 113 | loaded first, by an executable that does not depend on libpthread, and | ||
| 114 | then a module is dynamically loaded that depends on libpthread, libintl | ||
| 115 | will not be multithread-safe. */ | ||
| 116 | |||
| 117 | /* The way to test at runtime whether libpthread is present is to test | ||
| 118 | whether a function pointer's value, such as &pthread_mutex_init, is | ||
| 119 | non-NULL. However, some versions of GCC have a bug through which, in | ||
| 120 | PIC mode, &foo != NULL always evaluates to true if there is a direct | ||
| 121 | call to foo(...) in the same function. To avoid this, we test the | ||
| 122 | address of a function in libpthread that we don't use. */ | ||
| 123 | |||
| 124 | # pragma weak pthread_mutex_init | ||
| 125 | # pragma weak pthread_mutex_lock | ||
| 126 | # pragma weak pthread_mutex_unlock | ||
| 127 | # pragma weak pthread_mutex_destroy | ||
| 128 | # pragma weak pthread_rwlock_init | ||
| 129 | # pragma weak pthread_rwlock_rdlock | ||
| 130 | # pragma weak pthread_rwlock_wrlock | ||
| 131 | # pragma weak pthread_rwlock_unlock | ||
| 132 | # pragma weak pthread_rwlock_destroy | ||
| 133 | # pragma weak pthread_once | ||
| 134 | # pragma weak pthread_cond_init | ||
| 135 | # pragma weak pthread_cond_wait | ||
| 136 | # pragma weak pthread_cond_signal | ||
| 137 | # pragma weak pthread_cond_broadcast | ||
| 138 | # pragma weak pthread_cond_destroy | ||
| 139 | # pragma weak pthread_mutexattr_init | ||
| 140 | # pragma weak pthread_mutexattr_settype | ||
| 141 | # pragma weak pthread_mutexattr_destroy | ||
| 142 | # ifndef pthread_self | ||
| 143 | # pragma weak pthread_self | ||
| 144 | # endif | ||
| 145 | |||
| 146 | # if !PTHREAD_IN_USE_DETECTION_HARD | ||
| 147 | # pragma weak pthread_cancel | ||
| 148 | # define pthread_in_use() (pthread_cancel != NULL) | ||
| 149 | # endif | ||
| 150 | |||
| 151 | # else | ||
| 152 | |||
| 153 | # if !PTHREAD_IN_USE_DETECTION_HARD | ||
| 154 | # define pthread_in_use() 1 | ||
| 155 | # endif | ||
| 156 | |||
| 157 | # endif | ||
| 158 | |||
| 159 | /* -------------------------- gl_lock_t datatype -------------------------- */ | ||
| 160 | |||
| 161 | typedef pthread_mutex_t gl_lock_t; | ||
| 162 | # define gl_lock_define(STORAGECLASS, NAME) \ | ||
| 163 | STORAGECLASS pthread_mutex_t NAME; | ||
| 164 | # define gl_lock_define_initialized(STORAGECLASS, NAME) \ | ||
| 165 | STORAGECLASS pthread_mutex_t NAME = gl_lock_initializer; | ||
| 166 | # define gl_lock_initializer \ | ||
| 167 | PTHREAD_MUTEX_INITIALIZER | ||
| 168 | # define glthread_lock_init(LOCK) \ | ||
| 169 | (pthread_in_use () ? pthread_mutex_init (LOCK, NULL) : 0) | ||
| 170 | # define glthread_lock_lock(LOCK) \ | ||
| 171 | (pthread_in_use () ? pthread_mutex_lock (LOCK) : 0) | ||
| 172 | # define glthread_lock_unlock(LOCK) \ | ||
| 173 | (pthread_in_use () ? pthread_mutex_unlock (LOCK) : 0) | ||
| 174 | # define glthread_lock_destroy(LOCK) \ | ||
| 175 | (pthread_in_use () ? pthread_mutex_destroy (LOCK) : 0) | ||
| 176 | |||
| 177 | /* ------------------------- gl_rwlock_t datatype ------------------------- */ | ||
| 178 | |||
| 179 | # if HAVE_PTHREAD_RWLOCK | ||
| 180 | |||
| 181 | # ifdef PTHREAD_RWLOCK_INITIALIZER | ||
| 182 | |||
| 183 | typedef pthread_rwlock_t gl_rwlock_t; | ||
| 184 | # define gl_rwlock_define(STORAGECLASS, NAME) \ | ||
| 185 | STORAGECLASS pthread_rwlock_t NAME; | ||
| 186 | # define gl_rwlock_define_initialized(STORAGECLASS, NAME) \ | ||
| 187 | STORAGECLASS pthread_rwlock_t NAME = gl_rwlock_initializer; | ||
| 188 | # define gl_rwlock_initializer \ | ||
| 189 | PTHREAD_RWLOCK_INITIALIZER | ||
| 190 | # define glthread_rwlock_init(LOCK) \ | ||
| 191 | (pthread_in_use () ? pthread_rwlock_init (LOCK, NULL) : 0) | ||
| 192 | # define glthread_rwlock_rdlock(LOCK) \ | ||
| 193 | (pthread_in_use () ? pthread_rwlock_rdlock (LOCK) : 0) | ||
| 194 | # define glthread_rwlock_wrlock(LOCK) \ | ||
| 195 | (pthread_in_use () ? pthread_rwlock_wrlock (LOCK) : 0) | ||
| 196 | # define glthread_rwlock_unlock(LOCK) \ | ||
| 197 | (pthread_in_use () ? pthread_rwlock_unlock (LOCK) : 0) | ||
| 198 | # define glthread_rwlock_destroy(LOCK) \ | ||
| 199 | (pthread_in_use () ? pthread_rwlock_destroy (LOCK) : 0) | ||
| 200 | |||
| 201 | # else | ||
| 202 | |||
| 203 | typedef struct | ||
| 204 | { | ||
| 205 | int initialized; | ||
| 206 | pthread_mutex_t guard; /* protects the initialization */ | ||
| 207 | pthread_rwlock_t rwlock; /* read-write lock */ | ||
| 208 | } | ||
| 209 | gl_rwlock_t; | ||
| 210 | # define gl_rwlock_define(STORAGECLASS, NAME) \ | ||
| 211 | STORAGECLASS gl_rwlock_t NAME; | ||
| 212 | # define gl_rwlock_define_initialized(STORAGECLASS, NAME) \ | ||
| 213 | STORAGECLASS gl_rwlock_t NAME = gl_rwlock_initializer; | ||
| 214 | # define gl_rwlock_initializer \ | ||
| 215 | { 0, PTHREAD_MUTEX_INITIALIZER } | ||
| 216 | # define glthread_rwlock_init(LOCK) \ | ||
| 217 | (pthread_in_use () ? glthread_rwlock_init_multithreaded (LOCK) : 0) | ||
| 218 | # define glthread_rwlock_rdlock(LOCK) \ | ||
| 219 | (pthread_in_use () ? glthread_rwlock_rdlock_multithreaded (LOCK) : 0) | ||
| 220 | # define glthread_rwlock_wrlock(LOCK) \ | ||
| 221 | (pthread_in_use () ? glthread_rwlock_wrlock_multithreaded (LOCK) : 0) | ||
| 222 | # define glthread_rwlock_unlock(LOCK) \ | ||
| 223 | (pthread_in_use () ? glthread_rwlock_unlock_multithreaded (LOCK) : 0) | ||
| 224 | # define glthread_rwlock_destroy(LOCK) \ | ||
| 225 | (pthread_in_use () ? glthread_rwlock_destroy_multithreaded (LOCK) : 0) | ||
| 226 | extern int glthread_rwlock_init_multithreaded (gl_rwlock_t *lock); | ||
| 227 | extern int glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock); | ||
| 228 | extern int glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock); | ||
| 229 | extern int glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock); | ||
| 230 | extern int glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock); | ||
| 231 | |||
| 232 | # endif | ||
| 233 | |||
| 234 | # else | ||
| 235 | |||
| 236 | typedef struct | ||
| 237 | { | ||
| 238 | pthread_mutex_t lock; /* protects the remaining fields */ | ||
| 239 | pthread_cond_t waiting_readers; /* waiting readers */ | ||
| 240 | pthread_cond_t waiting_writers; /* waiting writers */ | ||
| 241 | unsigned int waiting_writers_count; /* number of waiting writers */ | ||
| 242 | int runcount; /* number of readers running, or -1 when a writer runs */ | ||
| 243 | } | ||
| 244 | gl_rwlock_t; | ||
| 245 | # define gl_rwlock_define(STORAGECLASS, NAME) \ | ||
| 246 | STORAGECLASS gl_rwlock_t NAME; | ||
| 247 | # define gl_rwlock_define_initialized(STORAGECLASS, NAME) \ | ||
| 248 | STORAGECLASS gl_rwlock_t NAME = gl_rwlock_initializer; | ||
| 249 | # define gl_rwlock_initializer \ | ||
| 250 | { PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER, PTHREAD_COND_INITIALIZER, 0, 0 } | ||
| 251 | # define glthread_rwlock_init(LOCK) \ | ||
| 252 | (pthread_in_use () ? glthread_rwlock_init_multithreaded (LOCK) : 0) | ||
| 253 | # define glthread_rwlock_rdlock(LOCK) \ | ||
| 254 | (pthread_in_use () ? glthread_rwlock_rdlock_multithreaded (LOCK) : 0) | ||
| 255 | # define glthread_rwlock_wrlock(LOCK) \ | ||
| 256 | (pthread_in_use () ? glthread_rwlock_wrlock_multithreaded (LOCK) : 0) | ||
| 257 | # define glthread_rwlock_unlock(LOCK) \ | ||
| 258 | (pthread_in_use () ? glthread_rwlock_unlock_multithreaded (LOCK) : 0) | ||
| 259 | # define glthread_rwlock_destroy(LOCK) \ | ||
| 260 | (pthread_in_use () ? glthread_rwlock_destroy_multithreaded (LOCK) : 0) | ||
| 261 | extern int glthread_rwlock_init_multithreaded (gl_rwlock_t *lock); | ||
| 262 | extern int glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock); | ||
| 263 | extern int glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock); | ||
| 264 | extern int glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock); | ||
| 265 | extern int glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock); | ||
| 266 | |||
| 267 | # endif | ||
| 268 | |||
| 269 | /* --------------------- gl_recursive_lock_t datatype --------------------- */ | ||
| 270 | |||
| 271 | # if HAVE_PTHREAD_MUTEX_RECURSIVE | ||
| 272 | |||
| 273 | # if defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER || defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP | ||
| 274 | |||
| 275 | typedef pthread_mutex_t gl_recursive_lock_t; | ||
| 276 | # define gl_recursive_lock_define(STORAGECLASS, NAME) \ | ||
| 277 | STORAGECLASS pthread_mutex_t NAME; | ||
| 278 | # define gl_recursive_lock_define_initialized(STORAGECLASS, NAME) \ | ||
| 279 | STORAGECLASS pthread_mutex_t NAME = gl_recursive_lock_initializer; | ||
| 280 | # ifdef PTHREAD_RECURSIVE_MUTEX_INITIALIZER | ||
| 281 | # define gl_recursive_lock_initializer \ | ||
| 282 | PTHREAD_RECURSIVE_MUTEX_INITIALIZER | ||
| 283 | # else | ||
| 284 | # define gl_recursive_lock_initializer \ | ||
| 285 | PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP | ||
| 286 | # endif | ||
| 287 | # define glthread_recursive_lock_init(LOCK) \ | ||
| 288 | (pthread_in_use () ? glthread_recursive_lock_init_multithreaded (LOCK) : 0) | ||
| 289 | # define glthread_recursive_lock_lock(LOCK) \ | ||
| 290 | (pthread_in_use () ? pthread_mutex_lock (LOCK) : 0) | ||
| 291 | # define glthread_recursive_lock_unlock(LOCK) \ | ||
| 292 | (pthread_in_use () ? pthread_mutex_unlock (LOCK) : 0) | ||
| 293 | # define glthread_recursive_lock_destroy(LOCK) \ | ||
| 294 | (pthread_in_use () ? pthread_mutex_destroy (LOCK) : 0) | ||
| 295 | extern int glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock); | ||
| 296 | |||
| 297 | # else | ||
| 298 | |||
| 299 | typedef struct | ||
| 300 | { | ||
| 301 | pthread_mutex_t recmutex; /* recursive mutex */ | ||
| 302 | pthread_mutex_t guard; /* protects the initialization */ | ||
| 303 | int initialized; | ||
| 304 | } | ||
| 305 | gl_recursive_lock_t; | ||
| 306 | # define gl_recursive_lock_define(STORAGECLASS, NAME) \ | ||
| 307 | STORAGECLASS gl_recursive_lock_t NAME; | ||
| 308 | # define gl_recursive_lock_define_initialized(STORAGECLASS, NAME) \ | ||
| 309 | STORAGECLASS gl_recursive_lock_t NAME = gl_recursive_lock_initializer; | ||
| 310 | # define gl_recursive_lock_initializer \ | ||
| 311 | { PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, 0 } | ||
| 312 | # define glthread_recursive_lock_init(LOCK) \ | ||
| 313 | (pthread_in_use () ? glthread_recursive_lock_init_multithreaded (LOCK) : 0) | ||
| 314 | # define glthread_recursive_lock_lock(LOCK) \ | ||
| 315 | (pthread_in_use () ? glthread_recursive_lock_lock_multithreaded (LOCK) : 0) | ||
| 316 | # define glthread_recursive_lock_unlock(LOCK) \ | ||
| 317 | (pthread_in_use () ? glthread_recursive_lock_unlock_multithreaded (LOCK) : 0) | ||
| 318 | # define glthread_recursive_lock_destroy(LOCK) \ | ||
| 319 | (pthread_in_use () ? glthread_recursive_lock_destroy_multithreaded (LOCK) : 0) | ||
| 320 | extern int glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock); | ||
| 321 | extern int glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock); | ||
| 322 | extern int glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock); | ||
| 323 | extern int glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock); | ||
| 324 | |||
| 325 | # endif | ||
| 326 | |||
| 327 | # else | ||
| 328 | |||
| 329 | /* Old versions of POSIX threads on Solaris did not have recursive locks. | ||
| 330 | We have to implement them ourselves. */ | ||
| 331 | |||
| 332 | typedef struct | ||
| 333 | { | ||
| 334 | pthread_mutex_t mutex; | ||
| 335 | pthread_t owner; | ||
| 336 | unsigned long depth; | ||
| 337 | } | ||
| 338 | gl_recursive_lock_t; | ||
| 339 | # define gl_recursive_lock_define(STORAGECLASS, NAME) \ | ||
| 340 | STORAGECLASS gl_recursive_lock_t NAME; | ||
| 341 | # define gl_recursive_lock_define_initialized(STORAGECLASS, NAME) \ | ||
| 342 | STORAGECLASS gl_recursive_lock_t NAME = gl_recursive_lock_initializer; | ||
| 343 | # define gl_recursive_lock_initializer \ | ||
| 344 | { PTHREAD_MUTEX_INITIALIZER, (pthread_t) 0, 0 } | ||
| 345 | # define glthread_recursive_lock_init(LOCK) \ | ||
| 346 | (pthread_in_use () ? glthread_recursive_lock_init_multithreaded (LOCK) : 0) | ||
| 347 | # define glthread_recursive_lock_lock(LOCK) \ | ||
| 348 | (pthread_in_use () ? glthread_recursive_lock_lock_multithreaded (LOCK) : 0) | ||
| 349 | # define glthread_recursive_lock_unlock(LOCK) \ | ||
| 350 | (pthread_in_use () ? glthread_recursive_lock_unlock_multithreaded (LOCK) : 0) | ||
| 351 | # define glthread_recursive_lock_destroy(LOCK) \ | ||
| 352 | (pthread_in_use () ? glthread_recursive_lock_destroy_multithreaded (LOCK) : 0) | ||
| 353 | extern int glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock); | ||
| 354 | extern int glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock); | ||
| 355 | extern int glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock); | ||
| 356 | extern int glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock); | ||
| 357 | |||
| 358 | # endif | ||
| 359 | |||
| 360 | /* -------------------------- gl_once_t datatype -------------------------- */ | ||
| 361 | |||
| 362 | typedef pthread_once_t gl_once_t; | ||
| 363 | # define gl_once_define(STORAGECLASS, NAME) \ | ||
| 364 | STORAGECLASS pthread_once_t NAME = PTHREAD_ONCE_INIT; | ||
| 365 | # define glthread_once(ONCE_CONTROL, INITFUNCTION) \ | ||
| 366 | (pthread_in_use () \ | ||
| 367 | ? pthread_once (ONCE_CONTROL, INITFUNCTION) \ | ||
| 368 | : (glthread_once_singlethreaded (ONCE_CONTROL) ? (INITFUNCTION (), 0) : 0)) | ||
| 369 | extern int glthread_once_singlethreaded (pthread_once_t *once_control); | ||
| 370 | |||
| 371 | # ifdef __cplusplus | ||
| 372 | } | ||
| 373 | # endif | ||
| 374 | |||
| 375 | #endif | ||
| 376 | |||
| 377 | /* ========================================================================= */ | ||
| 378 | |||
| 379 | #if USE_PTH_THREADS | ||
| 380 | |||
| 381 | /* Use the GNU Pth threads library. */ | ||
| 382 | |||
| 383 | # include <pth.h> | ||
| 384 | |||
| 385 | # ifdef __cplusplus | ||
| 386 | extern "C" { | ||
| 387 | # endif | ||
| 388 | |||
| 389 | # if USE_PTH_THREADS_WEAK | ||
| 390 | |||
| 391 | /* Use weak references to the GNU Pth threads library. */ | ||
| 392 | |||
| 393 | # pragma weak pth_mutex_init | ||
| 394 | # pragma weak pth_mutex_acquire | ||
| 395 | # pragma weak pth_mutex_release | ||
| 396 | # pragma weak pth_rwlock_init | ||
| 397 | # pragma weak pth_rwlock_acquire | ||
| 398 | # pragma weak pth_rwlock_release | ||
| 399 | # pragma weak pth_once | ||
| 400 | |||
| 401 | # pragma weak pth_cancel | ||
| 402 | # define pth_in_use() (pth_cancel != NULL) | ||
| 403 | |||
| 404 | # else | ||
| 405 | |||
| 406 | # define pth_in_use() 1 | ||
| 407 | |||
| 408 | # endif | ||
| 409 | |||
| 410 | /* -------------------------- gl_lock_t datatype -------------------------- */ | ||
| 411 | |||
| 412 | typedef pth_mutex_t gl_lock_t; | ||
| 413 | # define gl_lock_define(STORAGECLASS, NAME) \ | ||
| 414 | STORAGECLASS pth_mutex_t NAME; | ||
| 415 | # define gl_lock_define_initialized(STORAGECLASS, NAME) \ | ||
| 416 | STORAGECLASS pth_mutex_t NAME = gl_lock_initializer; | ||
| 417 | # define gl_lock_initializer \ | ||
| 418 | PTH_MUTEX_INIT | ||
| 419 | # define glthread_lock_init(LOCK) \ | ||
| 420 | (pth_in_use () && !pth_mutex_init (LOCK) ? errno : 0) | ||
| 421 | # define glthread_lock_lock(LOCK) \ | ||
| 422 | (pth_in_use () && !pth_mutex_acquire (LOCK, 0, NULL) ? errno : 0) | ||
| 423 | # define glthread_lock_unlock(LOCK) \ | ||
| 424 | (pth_in_use () && !pth_mutex_release (LOCK) ? errno : 0) | ||
| 425 | # define glthread_lock_destroy(LOCK) \ | ||
| 426 | ((void)(LOCK), 0) | ||
| 427 | |||
| 428 | /* ------------------------- gl_rwlock_t datatype ------------------------- */ | ||
| 429 | |||
| 430 | typedef pth_rwlock_t gl_rwlock_t; | ||
| 431 | # define gl_rwlock_define(STORAGECLASS, NAME) \ | ||
| 432 | STORAGECLASS pth_rwlock_t NAME; | ||
| 433 | # define gl_rwlock_define_initialized(STORAGECLASS, NAME) \ | ||
| 434 | STORAGECLASS pth_rwlock_t NAME = gl_rwlock_initializer; | ||
| 435 | # define gl_rwlock_initializer \ | ||
| 436 | PTH_RWLOCK_INIT | ||
| 437 | # define glthread_rwlock_init(LOCK) \ | ||
| 438 | (pth_in_use () && !pth_rwlock_init (LOCK) ? errno : 0) | ||
| 439 | # define glthread_rwlock_rdlock(LOCK) \ | ||
| 440 | (pth_in_use () && !pth_rwlock_acquire (LOCK, PTH_RWLOCK_RD, 0, NULL) ? errno : 0) | ||
| 441 | # define glthread_rwlock_wrlock(LOCK) \ | ||
| 442 | (pth_in_use () && !pth_rwlock_acquire (LOCK, PTH_RWLOCK_RW, 0, NULL) ? errno : 0) | ||
| 443 | # define glthread_rwlock_unlock(LOCK) \ | ||
| 444 | (pth_in_use () && !pth_rwlock_release (LOCK) ? errno : 0) | ||
| 445 | # define glthread_rwlock_destroy(LOCK) \ | ||
| 446 | ((void)(LOCK), 0) | ||
| 447 | |||
| 448 | /* --------------------- gl_recursive_lock_t datatype --------------------- */ | ||
| 449 | |||
| 450 | /* In Pth, mutexes are recursive by default. */ | ||
| 451 | typedef pth_mutex_t gl_recursive_lock_t; | ||
| 452 | # define gl_recursive_lock_define(STORAGECLASS, NAME) \ | ||
| 453 | STORAGECLASS pth_mutex_t NAME; | ||
| 454 | # define gl_recursive_lock_define_initialized(STORAGECLASS, NAME) \ | ||
| 455 | STORAGECLASS pth_mutex_t NAME = gl_recursive_lock_initializer; | ||
| 456 | # define gl_recursive_lock_initializer \ | ||
| 457 | PTH_MUTEX_INIT | ||
| 458 | # define glthread_recursive_lock_init(LOCK) \ | ||
| 459 | (pth_in_use () && !pth_mutex_init (LOCK) ? errno : 0) | ||
| 460 | # define glthread_recursive_lock_lock(LOCK) \ | ||
| 461 | (pth_in_use () && !pth_mutex_acquire (LOCK, 0, NULL) ? errno : 0) | ||
| 462 | # define glthread_recursive_lock_unlock(LOCK) \ | ||
| 463 | (pth_in_use () && !pth_mutex_release (LOCK) ? errno : 0) | ||
| 464 | # define glthread_recursive_lock_destroy(LOCK) \ | ||
| 465 | ((void)(LOCK), 0) | ||
| 466 | |||
| 467 | /* -------------------------- gl_once_t datatype -------------------------- */ | ||
| 468 | |||
| 469 | typedef pth_once_t gl_once_t; | ||
| 470 | # define gl_once_define(STORAGECLASS, NAME) \ | ||
| 471 | STORAGECLASS pth_once_t NAME = PTH_ONCE_INIT; | ||
| 472 | # define glthread_once(ONCE_CONTROL, INITFUNCTION) \ | ||
| 473 | (pth_in_use () \ | ||
| 474 | ? glthread_once_multithreaded (ONCE_CONTROL, INITFUNCTION) \ | ||
| 475 | : (glthread_once_singlethreaded (ONCE_CONTROL) ? (INITFUNCTION (), 0) : 0)) | ||
| 476 | extern int glthread_once_multithreaded (pth_once_t *once_control, void (*initfunction) (void)); | ||
| 477 | extern int glthread_once_singlethreaded (pth_once_t *once_control); | ||
| 478 | |||
| 479 | # ifdef __cplusplus | ||
| 480 | } | ||
| 481 | # endif | ||
| 482 | |||
| 483 | #endif | ||
| 484 | |||
| 485 | /* ========================================================================= */ | ||
| 486 | |||
| 487 | #if USE_SOLARIS_THREADS | ||
| 488 | |||
| 489 | /* Use the old Solaris threads library. */ | ||
| 490 | |||
| 491 | # include <thread.h> | ||
| 492 | # include <synch.h> | ||
| 493 | |||
| 494 | # ifdef __cplusplus | ||
| 495 | extern "C" { | ||
| 496 | # endif | ||
| 497 | |||
| 498 | # if USE_SOLARIS_THREADS_WEAK | ||
| 499 | |||
| 500 | /* Use weak references to the old Solaris threads library. */ | ||
| 501 | |||
| 502 | # pragma weak mutex_init | ||
| 503 | # pragma weak mutex_lock | ||
| 504 | # pragma weak mutex_unlock | ||
| 505 | # pragma weak mutex_destroy | ||
| 506 | # pragma weak rwlock_init | ||
| 507 | # pragma weak rw_rdlock | ||
| 508 | # pragma weak rw_wrlock | ||
| 509 | # pragma weak rw_unlock | ||
| 510 | # pragma weak rwlock_destroy | ||
| 511 | # pragma weak thr_self | ||
| 512 | |||
| 513 | # pragma weak thr_suspend | ||
| 514 | # define thread_in_use() (thr_suspend != NULL) | ||
| 515 | |||
| 516 | # else | ||
| 517 | |||
| 518 | # define thread_in_use() 1 | ||
| 519 | |||
| 520 | # endif | ||
| 521 | |||
| 522 | /* -------------------------- gl_lock_t datatype -------------------------- */ | ||
| 523 | |||
| 524 | typedef mutex_t gl_lock_t; | ||
| 525 | # define gl_lock_define(STORAGECLASS, NAME) \ | ||
| 526 | STORAGECLASS mutex_t NAME; | ||
| 527 | # define gl_lock_define_initialized(STORAGECLASS, NAME) \ | ||
| 528 | STORAGECLASS mutex_t NAME = gl_lock_initializer; | ||
| 529 | # define gl_lock_initializer \ | ||
| 530 | DEFAULTMUTEX | ||
| 531 | # define glthread_lock_init(LOCK) \ | ||
| 532 | (thread_in_use () ? mutex_init (LOCK, USYNC_THREAD, NULL) : 0) | ||
| 533 | # define glthread_lock_lock(LOCK) \ | ||
| 534 | (thread_in_use () ? mutex_lock (LOCK) : 0) | ||
| 535 | # define glthread_lock_unlock(LOCK) \ | ||
| 536 | (thread_in_use () ? mutex_unlock (LOCK) : 0) | ||
| 537 | # define glthread_lock_destroy(LOCK) \ | ||
| 538 | (thread_in_use () ? mutex_destroy (LOCK) : 0) | ||
| 539 | |||
| 540 | /* ------------------------- gl_rwlock_t datatype ------------------------- */ | ||
| 541 | |||
| 542 | typedef rwlock_t gl_rwlock_t; | ||
| 543 | # define gl_rwlock_define(STORAGECLASS, NAME) \ | ||
| 544 | STORAGECLASS rwlock_t NAME; | ||
| 545 | # define gl_rwlock_define_initialized(STORAGECLASS, NAME) \ | ||
| 546 | STORAGECLASS rwlock_t NAME = gl_rwlock_initializer; | ||
| 547 | # define gl_rwlock_initializer \ | ||
| 548 | DEFAULTRWLOCK | ||
| 549 | # define glthread_rwlock_init(LOCK) \ | ||
| 550 | (thread_in_use () ? rwlock_init (LOCK, USYNC_THREAD, NULL) : 0) | ||
| 551 | # define glthread_rwlock_rdlock(LOCK) \ | ||
| 552 | (thread_in_use () ? rw_rdlock (LOCK) : 0) | ||
| 553 | # define glthread_rwlock_wrlock(LOCK) \ | ||
| 554 | (thread_in_use () ? rw_wrlock (LOCK) : 0) | ||
| 555 | # define glthread_rwlock_unlock(LOCK) \ | ||
| 556 | (thread_in_use () ? rw_unlock (LOCK) : 0) | ||
| 557 | # define glthread_rwlock_destroy(LOCK) \ | ||
| 558 | (thread_in_use () ? rwlock_destroy (LOCK) : 0) | ||
| 559 | |||
| 560 | /* --------------------- gl_recursive_lock_t datatype --------------------- */ | ||
| 561 | |||
| 562 | /* Old Solaris threads did not have recursive locks. | ||
| 563 | We have to implement them ourselves. */ | ||
| 564 | |||
| 565 | typedef struct | ||
| 566 | { | ||
| 567 | mutex_t mutex; | ||
| 568 | thread_t owner; | ||
| 569 | unsigned long depth; | ||
| 570 | } | ||
| 571 | gl_recursive_lock_t; | ||
| 572 | # define gl_recursive_lock_define(STORAGECLASS, NAME) \ | ||
| 573 | STORAGECLASS gl_recursive_lock_t NAME; | ||
| 574 | # define gl_recursive_lock_define_initialized(STORAGECLASS, NAME) \ | ||
| 575 | STORAGECLASS gl_recursive_lock_t NAME = gl_recursive_lock_initializer; | ||
| 576 | # define gl_recursive_lock_initializer \ | ||
| 577 | { DEFAULTMUTEX, (thread_t) 0, 0 } | ||
| 578 | # define glthread_recursive_lock_init(LOCK) \ | ||
| 579 | (thread_in_use () ? glthread_recursive_lock_init_multithreaded (LOCK) : 0) | ||
| 580 | # define glthread_recursive_lock_lock(LOCK) \ | ||
| 581 | (thread_in_use () ? glthread_recursive_lock_lock_multithreaded (LOCK) : 0) | ||
| 582 | # define glthread_recursive_lock_unlock(LOCK) \ | ||
| 583 | (thread_in_use () ? glthread_recursive_lock_unlock_multithreaded (LOCK) : 0) | ||
| 584 | # define glthread_recursive_lock_destroy(LOCK) \ | ||
| 585 | (thread_in_use () ? glthread_recursive_lock_destroy_multithreaded (LOCK) : 0) | ||
| 586 | extern int glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock); | ||
| 587 | extern int glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock); | ||
| 588 | extern int glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock); | ||
| 589 | extern int glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock); | ||
| 590 | |||
| 591 | /* -------------------------- gl_once_t datatype -------------------------- */ | ||
| 592 | |||
| 593 | typedef struct | ||
| 594 | { | ||
| 595 | volatile int inited; | ||
| 596 | mutex_t mutex; | ||
| 597 | } | ||
| 598 | gl_once_t; | ||
| 599 | # define gl_once_define(STORAGECLASS, NAME) \ | ||
| 600 | STORAGECLASS gl_once_t NAME = { 0, DEFAULTMUTEX }; | ||
| 601 | # define glthread_once(ONCE_CONTROL, INITFUNCTION) \ | ||
| 602 | (thread_in_use () \ | ||
| 603 | ? glthread_once_multithreaded (ONCE_CONTROL, INITFUNCTION) \ | ||
| 604 | : (glthread_once_singlethreaded (ONCE_CONTROL) ? (INITFUNCTION (), 0) : 0)) | ||
| 605 | extern int glthread_once_multithreaded (gl_once_t *once_control, void (*initfunction) (void)); | ||
| 606 | extern int glthread_once_singlethreaded (gl_once_t *once_control); | ||
| 607 | |||
| 608 | # ifdef __cplusplus | ||
| 609 | } | ||
| 610 | # endif | ||
| 611 | |||
| 612 | #endif | ||
| 613 | |||
| 614 | /* ========================================================================= */ | ||
| 615 | |||
| 616 | #if USE_WINDOWS_THREADS | ||
| 617 | |||
| 618 | # define WIN32_LEAN_AND_MEAN /* avoid including junk */ | ||
| 619 | # include <windows.h> | ||
| 620 | |||
| 621 | # ifdef __cplusplus | ||
| 622 | extern "C" { | ||
| 623 | # endif | ||
| 624 | |||
| 625 | /* We can use CRITICAL_SECTION directly, rather than the native Windows Event, | ||
| 626 | Mutex, Semaphore types, because | ||
| 627 | - we need only to synchronize inside a single process (address space), | ||
| 628 | not inter-process locking, | ||
| 629 | - we don't need to support trylock operations. (TryEnterCriticalSection | ||
| 630 | does not work on Windows 95/98/ME. Packages that need trylock usually | ||
| 631 | define their own mutex type.) */ | ||
| 632 | |||
| 633 | /* There is no way to statically initialize a CRITICAL_SECTION. It needs | ||
| 634 | to be done lazily, once only. For this we need spinlocks. */ | ||
| 635 | |||
| 636 | typedef struct { volatile int done; volatile long started; } gl_spinlock_t; | ||
| 637 | |||
| 638 | /* -------------------------- gl_lock_t datatype -------------------------- */ | ||
| 639 | |||
| 640 | typedef struct | ||
| 641 | { | ||
| 642 | gl_spinlock_t guard; /* protects the initialization */ | ||
| 643 | CRITICAL_SECTION lock; | ||
| 644 | } | ||
| 645 | gl_lock_t; | ||
| 646 | # define gl_lock_define(STORAGECLASS, NAME) \ | ||
| 647 | STORAGECLASS gl_lock_t NAME; | ||
| 648 | # define gl_lock_define_initialized(STORAGECLASS, NAME) \ | ||
| 649 | STORAGECLASS gl_lock_t NAME = gl_lock_initializer; | ||
| 650 | # define gl_lock_initializer \ | ||
| 651 | { { 0, -1 } } | ||
| 652 | # define glthread_lock_init(LOCK) \ | ||
| 653 | (glthread_lock_init_func (LOCK), 0) | ||
| 654 | # define glthread_lock_lock(LOCK) \ | ||
| 655 | glthread_lock_lock_func (LOCK) | ||
| 656 | # define glthread_lock_unlock(LOCK) \ | ||
| 657 | glthread_lock_unlock_func (LOCK) | ||
| 658 | # define glthread_lock_destroy(LOCK) \ | ||
| 659 | glthread_lock_destroy_func (LOCK) | ||
| 660 | extern void glthread_lock_init_func (gl_lock_t *lock); | ||
| 661 | extern int glthread_lock_lock_func (gl_lock_t *lock); | ||
| 662 | extern int glthread_lock_unlock_func (gl_lock_t *lock); | ||
| 663 | extern int glthread_lock_destroy_func (gl_lock_t *lock); | ||
| 664 | |||
| 665 | /* ------------------------- gl_rwlock_t datatype ------------------------- */ | ||
| 666 | |||
| 667 | /* It is impossible to implement read-write locks using plain locks, without | ||
| 668 | introducing an extra thread dedicated to managing read-write locks. | ||
| 669 | Therefore here we need to use the low-level Event type. */ | ||
| 670 | |||
| 671 | typedef struct | ||
| 672 | { | ||
| 673 | HANDLE *array; /* array of waiting threads, each represented by an event */ | ||
| 674 | unsigned int count; /* number of waiting threads */ | ||
| 675 | unsigned int alloc; /* length of allocated array */ | ||
| 676 | unsigned int offset; /* index of first waiting thread in array */ | ||
| 677 | } | ||
| 678 | gl_carray_waitqueue_t; | ||
| 679 | typedef struct | ||
| 680 | { | ||
| 681 | gl_spinlock_t guard; /* protects the initialization */ | ||
| 682 | CRITICAL_SECTION lock; /* protects the remaining fields */ | ||
| 683 | gl_carray_waitqueue_t waiting_readers; /* waiting readers */ | ||
| 684 | gl_carray_waitqueue_t waiting_writers; /* waiting writers */ | ||
| 685 | int runcount; /* number of readers running, or -1 when a writer runs */ | ||
| 686 | } | ||
| 687 | gl_rwlock_t; | ||
| 688 | # define gl_rwlock_define(STORAGECLASS, NAME) \ | ||
| 689 | STORAGECLASS gl_rwlock_t NAME; | ||
| 690 | # define gl_rwlock_define_initialized(STORAGECLASS, NAME) \ | ||
| 691 | STORAGECLASS gl_rwlock_t NAME = gl_rwlock_initializer; | ||
| 692 | # define gl_rwlock_initializer \ | ||
| 693 | { { 0, -1 } } | ||
| 694 | # define glthread_rwlock_init(LOCK) \ | ||
| 695 | (glthread_rwlock_init_func (LOCK), 0) | ||
| 696 | # define glthread_rwlock_rdlock(LOCK) \ | ||
| 697 | glthread_rwlock_rdlock_func (LOCK) | ||
| 698 | # define glthread_rwlock_wrlock(LOCK) \ | ||
| 699 | glthread_rwlock_wrlock_func (LOCK) | ||
| 700 | # define glthread_rwlock_unlock(LOCK) \ | ||
| 701 | glthread_rwlock_unlock_func (LOCK) | ||
| 702 | # define glthread_rwlock_destroy(LOCK) \ | ||
| 703 | glthread_rwlock_destroy_func (LOCK) | ||
| 704 | extern void glthread_rwlock_init_func (gl_rwlock_t *lock); | ||
| 705 | extern int glthread_rwlock_rdlock_func (gl_rwlock_t *lock); | ||
| 706 | extern int glthread_rwlock_wrlock_func (gl_rwlock_t *lock); | ||
| 707 | extern int glthread_rwlock_unlock_func (gl_rwlock_t *lock); | ||
| 708 | extern int glthread_rwlock_destroy_func (gl_rwlock_t *lock); | ||
| 709 | |||
| 710 | /* --------------------- gl_recursive_lock_t datatype --------------------- */ | ||
| 711 | |||
| 712 | /* The native Windows documentation says that CRITICAL_SECTION already | ||
| 713 | implements a recursive lock. But we need not rely on it: It's easy to | ||
| 714 | implement a recursive lock without this assumption. */ | ||
| 715 | |||
| 716 | typedef struct | ||
| 717 | { | ||
| 718 | gl_spinlock_t guard; /* protects the initialization */ | ||
| 719 | DWORD owner; | ||
| 720 | unsigned long depth; | ||
| 721 | CRITICAL_SECTION lock; | ||
| 722 | } | ||
| 723 | gl_recursive_lock_t; | ||
| 724 | # define gl_recursive_lock_define(STORAGECLASS, NAME) \ | ||
| 725 | STORAGECLASS gl_recursive_lock_t NAME; | ||
| 726 | # define gl_recursive_lock_define_initialized(STORAGECLASS, NAME) \ | ||
| 727 | STORAGECLASS gl_recursive_lock_t NAME = gl_recursive_lock_initializer; | ||
| 728 | # define gl_recursive_lock_initializer \ | ||
| 729 | { { 0, -1 }, 0, 0 } | ||
| 730 | # define glthread_recursive_lock_init(LOCK) \ | ||
| 731 | (glthread_recursive_lock_init_func (LOCK), 0) | ||
| 732 | # define glthread_recursive_lock_lock(LOCK) \ | ||
| 733 | glthread_recursive_lock_lock_func (LOCK) | ||
| 734 | # define glthread_recursive_lock_unlock(LOCK) \ | ||
| 735 | glthread_recursive_lock_unlock_func (LOCK) | ||
| 736 | # define glthread_recursive_lock_destroy(LOCK) \ | ||
| 737 | glthread_recursive_lock_destroy_func (LOCK) | ||
| 738 | extern void glthread_recursive_lock_init_func (gl_recursive_lock_t *lock); | ||
| 739 | extern int glthread_recursive_lock_lock_func (gl_recursive_lock_t *lock); | ||
| 740 | extern int glthread_recursive_lock_unlock_func (gl_recursive_lock_t *lock); | ||
| 741 | extern int glthread_recursive_lock_destroy_func (gl_recursive_lock_t *lock); | ||
| 742 | |||
| 743 | /* -------------------------- gl_once_t datatype -------------------------- */ | ||
| 744 | |||
| 745 | typedef struct | ||
| 746 | { | ||
| 747 | volatile int inited; | ||
| 748 | volatile long started; | ||
| 749 | CRITICAL_SECTION lock; | ||
| 750 | } | ||
| 751 | gl_once_t; | ||
| 752 | # define gl_once_define(STORAGECLASS, NAME) \ | ||
| 753 | STORAGECLASS gl_once_t NAME = { -1, -1 }; | ||
| 754 | # define glthread_once(ONCE_CONTROL, INITFUNCTION) \ | ||
| 755 | (glthread_once_func (ONCE_CONTROL, INITFUNCTION), 0) | ||
| 756 | extern void glthread_once_func (gl_once_t *once_control, void (*initfunction) (void)); | ||
| 757 | |||
| 758 | # ifdef __cplusplus | ||
| 759 | } | ||
| 760 | # endif | ||
| 761 | |||
| 762 | #endif | ||
| 763 | |||
| 764 | /* ========================================================================= */ | ||
| 765 | |||
| 766 | #if !(USE_POSIX_THREADS || USE_PTH_THREADS || USE_SOLARIS_THREADS || USE_WINDOWS_THREADS) | ||
| 767 | |||
| 768 | /* Provide dummy implementation if threads are not supported. */ | ||
| 769 | |||
| 770 | /* -------------------------- gl_lock_t datatype -------------------------- */ | ||
| 771 | |||
| 772 | typedef int gl_lock_t; | ||
| 773 | # define gl_lock_define(STORAGECLASS, NAME) | ||
| 774 | # define gl_lock_define_initialized(STORAGECLASS, NAME) | ||
| 775 | # define glthread_lock_init(NAME) 0 | ||
| 776 | # define glthread_lock_lock(NAME) 0 | ||
| 777 | # define glthread_lock_unlock(NAME) 0 | ||
| 778 | # define glthread_lock_destroy(NAME) 0 | ||
| 779 | |||
| 780 | /* ------------------------- gl_rwlock_t datatype ------------------------- */ | ||
| 781 | |||
| 782 | typedef int gl_rwlock_t; | ||
| 783 | # define gl_rwlock_define(STORAGECLASS, NAME) | ||
| 784 | # define gl_rwlock_define_initialized(STORAGECLASS, NAME) | ||
| 785 | # define glthread_rwlock_init(NAME) 0 | ||
| 786 | # define glthread_rwlock_rdlock(NAME) 0 | ||
| 787 | # define glthread_rwlock_wrlock(NAME) 0 | ||
| 788 | # define glthread_rwlock_unlock(NAME) 0 | ||
| 789 | # define glthread_rwlock_destroy(NAME) 0 | ||
| 790 | |||
| 791 | /* --------------------- gl_recursive_lock_t datatype --------------------- */ | ||
| 792 | |||
| 793 | typedef int gl_recursive_lock_t; | ||
| 794 | # define gl_recursive_lock_define(STORAGECLASS, NAME) | ||
| 795 | # define gl_recursive_lock_define_initialized(STORAGECLASS, NAME) | ||
| 796 | # define glthread_recursive_lock_init(NAME) 0 | ||
| 797 | # define glthread_recursive_lock_lock(NAME) 0 | ||
| 798 | # define glthread_recursive_lock_unlock(NAME) 0 | ||
| 799 | # define glthread_recursive_lock_destroy(NAME) 0 | ||
| 800 | |||
| 801 | /* -------------------------- gl_once_t datatype -------------------------- */ | ||
| 802 | |||
| 803 | typedef int gl_once_t; | ||
| 804 | # define gl_once_define(STORAGECLASS, NAME) \ | ||
| 805 | STORAGECLASS gl_once_t NAME = 0; | ||
| 806 | # define glthread_once(ONCE_CONTROL, INITFUNCTION) \ | ||
| 807 | (*(ONCE_CONTROL) == 0 ? (*(ONCE_CONTROL) = ~ 0, INITFUNCTION (), 0) : 0) | ||
| 808 | |||
| 809 | #endif | ||
| 810 | |||
| 811 | /* ========================================================================= */ | ||
| 812 | |||
| 813 | /* Macros with built-in error handling. */ | ||
| 814 | |||
| 815 | /* -------------------------- gl_lock_t datatype -------------------------- */ | ||
| 816 | |||
| 817 | #define gl_lock_init(NAME) \ | ||
| 818 | do \ | ||
| 819 | { \ | ||
| 820 | if (glthread_lock_init (&NAME)) \ | ||
| 821 | abort (); \ | ||
| 822 | } \ | ||
| 823 | while (0) | ||
| 824 | #define gl_lock_lock(NAME) \ | ||
| 825 | do \ | ||
| 826 | { \ | ||
| 827 | if (glthread_lock_lock (&NAME)) \ | ||
| 828 | abort (); \ | ||
| 829 | } \ | ||
| 830 | while (0) | ||
| 831 | #define gl_lock_unlock(NAME) \ | ||
| 832 | do \ | ||
| 833 | { \ | ||
| 834 | if (glthread_lock_unlock (&NAME)) \ | ||
| 835 | abort (); \ | ||
| 836 | } \ | ||
| 837 | while (0) | ||
| 838 | #define gl_lock_destroy(NAME) \ | ||
| 839 | do \ | ||
| 840 | { \ | ||
| 841 | if (glthread_lock_destroy (&NAME)) \ | ||
| 842 | abort (); \ | ||
| 843 | } \ | ||
| 844 | while (0) | ||
| 845 | |||
| 846 | /* ------------------------- gl_rwlock_t datatype ------------------------- */ | ||
| 847 | |||
| 848 | #define gl_rwlock_init(NAME) \ | ||
| 849 | do \ | ||
| 850 | { \ | ||
| 851 | if (glthread_rwlock_init (&NAME)) \ | ||
| 852 | abort (); \ | ||
| 853 | } \ | ||
| 854 | while (0) | ||
| 855 | #define gl_rwlock_rdlock(NAME) \ | ||
| 856 | do \ | ||
| 857 | { \ | ||
| 858 | if (glthread_rwlock_rdlock (&NAME)) \ | ||
| 859 | abort (); \ | ||
| 860 | } \ | ||
| 861 | while (0) | ||
| 862 | #define gl_rwlock_wrlock(NAME) \ | ||
| 863 | do \ | ||
| 864 | { \ | ||
| 865 | if (glthread_rwlock_wrlock (&NAME)) \ | ||
| 866 | abort (); \ | ||
| 867 | } \ | ||
| 868 | while (0) | ||
| 869 | #define gl_rwlock_unlock(NAME) \ | ||
| 870 | do \ | ||
| 871 | { \ | ||
| 872 | if (glthread_rwlock_unlock (&NAME)) \ | ||
| 873 | abort (); \ | ||
| 874 | } \ | ||
| 875 | while (0) | ||
| 876 | #define gl_rwlock_destroy(NAME) \ | ||
| 877 | do \ | ||
| 878 | { \ | ||
| 879 | if (glthread_rwlock_destroy (&NAME)) \ | ||
| 880 | abort (); \ | ||
| 881 | } \ | ||
| 882 | while (0) | ||
| 883 | |||
| 884 | /* --------------------- gl_recursive_lock_t datatype --------------------- */ | ||
| 885 | |||
| 886 | #define gl_recursive_lock_init(NAME) \ | ||
| 887 | do \ | ||
| 888 | { \ | ||
| 889 | if (glthread_recursive_lock_init (&NAME)) \ | ||
| 890 | abort (); \ | ||
| 891 | } \ | ||
| 892 | while (0) | ||
| 893 | #define gl_recursive_lock_lock(NAME) \ | ||
| 894 | do \ | ||
| 895 | { \ | ||
| 896 | if (glthread_recursive_lock_lock (&NAME)) \ | ||
| 897 | abort (); \ | ||
| 898 | } \ | ||
| 899 | while (0) | ||
| 900 | #define gl_recursive_lock_unlock(NAME) \ | ||
| 901 | do \ | ||
| 902 | { \ | ||
| 903 | if (glthread_recursive_lock_unlock (&NAME)) \ | ||
| 904 | abort (); \ | ||
| 905 | } \ | ||
| 906 | while (0) | ||
| 907 | #define gl_recursive_lock_destroy(NAME) \ | ||
| 908 | do \ | ||
| 909 | { \ | ||
| 910 | if (glthread_recursive_lock_destroy (&NAME)) \ | ||
| 911 | abort (); \ | ||
| 912 | } \ | ||
| 913 | while (0) | ||
| 914 | |||
| 915 | /* -------------------------- gl_once_t datatype -------------------------- */ | ||
| 916 | |||
| 917 | #define gl_once(NAME, INITFUNCTION) \ | ||
| 918 | do \ | ||
| 919 | { \ | ||
| 920 | if (glthread_once (&NAME, INITFUNCTION)) \ | ||
| 921 | abort (); \ | ||
| 922 | } \ | ||
| 923 | while (0) | ||
| 924 | |||
| 925 | /* ========================================================================= */ | ||
| 926 | |||
| 927 | #endif /* _LOCK_H */ | ||
diff --git a/gl/glthread/threadlib.c b/gl/glthread/threadlib.c new file mode 100644 index 00000000..b4476573 --- /dev/null +++ b/gl/glthread/threadlib.c | |||
| @@ -0,0 +1,73 @@ | |||
| 1 | /* Multithreading primitives. | ||
| 2 | Copyright (C) 2005-2013 Free Software Foundation, Inc. | ||
| 3 | |||
| 4 | This program is free software; you can redistribute it and/or modify | ||
| 5 | it under the terms of the GNU General Public License as published by | ||
| 6 | the Free Software Foundation; either version 3, or (at your option) | ||
| 7 | any later version. | ||
| 8 | |||
| 9 | This program is distributed in the hope that it will be useful, | ||
| 10 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | GNU General Public License for more details. | ||
| 13 | |||
| 14 | You should have received a copy of the GNU General Public License | ||
| 15 | along with this program; if not, see <http://www.gnu.org/licenses/>. */ | ||
| 16 | |||
| 17 | /* Written by Bruno Haible <bruno@clisp.org>, 2005. */ | ||
| 18 | |||
| 19 | #include <config.h> | ||
| 20 | |||
| 21 | /* ========================================================================= */ | ||
| 22 | |||
| 23 | #if USE_POSIX_THREADS | ||
| 24 | |||
| 25 | /* Use the POSIX threads library. */ | ||
| 26 | |||
| 27 | # include <pthread.h> | ||
| 28 | # include <stdlib.h> | ||
| 29 | |||
| 30 | # if PTHREAD_IN_USE_DETECTION_HARD | ||
| 31 | |||
| 32 | /* The function to be executed by a dummy thread. */ | ||
| 33 | static void * | ||
| 34 | dummy_thread_func (void *arg) | ||
| 35 | { | ||
| 36 | return arg; | ||
| 37 | } | ||
| 38 | |||
| 39 | int | ||
| 40 | glthread_in_use (void) | ||
| 41 | { | ||
| 42 | static int tested; | ||
| 43 | static int result; /* 1: linked with -lpthread, 0: only with libc */ | ||
| 44 | |||
| 45 | if (!tested) | ||
| 46 | { | ||
| 47 | pthread_t thread; | ||
| 48 | |||
| 49 | if (pthread_create (&thread, NULL, dummy_thread_func, NULL) != 0) | ||
| 50 | /* Thread creation failed. */ | ||
| 51 | result = 0; | ||
| 52 | else | ||
| 53 | { | ||
| 54 | /* Thread creation works. */ | ||
| 55 | void *retval; | ||
| 56 | if (pthread_join (thread, &retval) != 0) | ||
| 57 | abort (); | ||
| 58 | result = 1; | ||
| 59 | } | ||
| 60 | tested = 1; | ||
| 61 | } | ||
| 62 | return result; | ||
| 63 | } | ||
| 64 | |||
| 65 | # endif | ||
| 66 | |||
| 67 | #endif | ||
| 68 | |||
| 69 | /* ========================================================================= */ | ||
| 70 | |||
| 71 | /* This declaration is solely to ensure that after preprocessing | ||
| 72 | this file is never empty. */ | ||
| 73 | typedef int dummy; | ||
