Giving up use of function pthread_cancel()

This commit is contained in:
Thomas Schmitt 2017-01-28 21:02:17 +01:00
parent 6f6bf688d9
commit 724d518dbc
5 changed files with 95 additions and 32 deletions

View File

@ -1 +1 @@
#define Cdrskin_timestamP "2016.11.18.132335"
#define Cdrskin_timestamP "2017.01.28.200155"

View File

@ -1,7 +1,7 @@
/* -*- indent-tabs-mode: t; tab-width: 8; c-basic-offset: 8; -*- */
/* Copyright (c) 2004 - 2006 Derek Foreman, Ben Jansens
Copyright (c) 2006 - 2014 Thomas Schmitt <scdbackup@gmx.net>
Copyright (c) 2006 - 2017 Thomas Schmitt <scdbackup@gmx.net>
Provided under GPL version 2 or later.
*/
@ -123,6 +123,42 @@ struct w_list
static struct w_list *workers = NULL;
int burn_async_manage_lock(int mode)
{
int ret;
static pthread_mutex_t access_lock;
static int mutex_initialized = 0;
static int mutex_locked = 0;
if (mode == BURN_ASYNC_LOCK_INIT) {
if (mutex_initialized)
return 2;
ret = pthread_mutex_init(&access_lock, NULL);
if (ret != 0)
return 0;
mutex_initialized = 1;
return 1;
}
if (!mutex_initialized)
return 0;
if (mode == BURN_ASYNC_LOCK_OBTAIN) {
ret = pthread_mutex_lock(&access_lock);
if (ret != 0)
return 0;
mutex_locked = 1;
} else if (mode == BURN_ASYNC_LOCK_RELEASE) {
if (!mutex_locked)
return 2;
ret = pthread_mutex_unlock(&access_lock);
if (ret != 0)
return 0;
mutex_locked = 0;
}
return 1;
}
static struct w_list *find_worker(struct burn_drive *d)
{
struct w_list *a;
@ -149,9 +185,8 @@ static void add_worker(int w_type, struct burn_drive *d,
a->drive = d;
a->u = *data;
/*
memcpy(&(a->u), data, sizeof(union w_list_data));
*/
burn_async_manage_lock(BURN_ASYNC_LOCK_INIT);
/* insert at front of the list */
a->next = workers;
@ -162,6 +197,7 @@ static void add_worker(int w_type, struct burn_drive *d,
d->busy = BURN_DRIVE_SPAWNING;
#ifdef Libburn_create_detached_threadS
/* ts A71019 :
Trying to start the threads detached to get rid of the zombies
which do neither react on pthread_join() nor on pthread_detach().
@ -169,12 +205,12 @@ static void add_worker(int w_type, struct burn_drive *d,
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
attr_pt= &attr;
/*
libdax_msgs_submit(libdax_messenger, -1, 0x00020158,
LIBDAX_MSGS_SEV_DEBUG, LIBDAX_MSGS_PRIO_LOW,
"add_worker(): Creating detached thread.", 0, 0);
*/
#endif
#endif /* Libburn_create_detached_threadS */
/* Worker specific locks are to be released early by the worker */
if (f == (WorkerFunc) burn_fifo_source_shoveller)
burn_async_manage_lock(BURN_ASYNC_LOCK_OBTAIN);
if (pthread_create(&a->thread, attr_pt, f, a)) {
free(a);
@ -704,7 +740,6 @@ ex:;
static void *fifo_worker_func(struct w_list *w)
{
int old;
#define Libburn_protect_fifo_threaD 1
@ -718,10 +753,6 @@ static void *fifo_worker_func(struct w_list *w)
pthread_sigmask(SIG_SETMASK, &sigset, &oldset);
#endif /* Libburn_protect_fifo_threaD */
pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old);
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old);
/* Note: Only burn_fifo_abort() shall cancel the fifo thread */
burn_fifo_source_shoveller(w->u.fifo.source, w->u.fifo.flag);
remove_worker(pthread_self());
@ -764,18 +795,19 @@ int burn_fifo_abort(struct burn_source_fifo *fs, int flag)
int ret;
pthread_t pt;
if (fs->thread_is_valid <= 0 || fs->thread_handle == NULL)
return(2);
burn_async_manage_lock(BURN_ASYNC_LOCK_OBTAIN);
#ifdef NIX
libdax_msgs_submit(libdax_messenger, -1, 0x00000002,
LIBDAX_MSGS_SEV_DEBUG, LIBDAX_MSGS_PRIO_HIGH,
"Aborting running burn_source_fifo thread", 0, 0);
#endif /* NIX */
if (fs->thread_is_valid <= 0 || fs->thread_handle == NULL) {
burn_async_manage_lock(BURN_ASYNC_LOCK_RELEASE);
return 2;
}
pt = *((pthread_t *) fs->thread_handle);
burn_async_manage_lock(BURN_ASYNC_LOCK_RELEASE);
fs->do_abort = 1;
ret = pthread_join(pt, NULL);
pt= *((pthread_t *) fs->thread_handle);
remove_worker(pt);
ret = pthread_cancel(pt);
return (ret == 0);
}

View File

@ -1,5 +1,10 @@
/* -*- indent-tabs-mode: t; tab-width: 8; c-basic-offset: 8; -*- */
/* Copyright (c) 2004 - 2006 Derek Foreman, Ben Jansens
Copyright (c) 2006 - 2017 Thomas Schmitt <scdbackup@gmx.net>
Provided under GPL version 2 or later.
*/
#ifndef BURN__ASYNC_H
#define BURN__ASYNC_H
@ -14,5 +19,10 @@ int burn_fifo_start(struct burn_source *source, int flag);
/* To abort a running fifo thread before the fifo object gets deleted */
int burn_fifo_abort(struct burn_source_fifo *fs, int flag);
/* ts B70126 */
#define BURN_ASYNC_LOCK_RELEASE 0
#define BURN_ASYNC_LOCK_OBTAIN 1
#define BURN_ASYNC_LOCK_INIT 2
int burn_async_manage_lock(int mode);
#endif /* BURN__ASYNC_H */

View File

@ -1,7 +1,7 @@
/* -*- indent-tabs-mode: t; tab-width: 8; c-basic-offset: 8; -*- */
/* Copyright (c) 2004 - 2006 Derek Foreman, Ben Jansens
Copyright (c) 2006 - 2014 Thomas Schmitt <scdbackup@gmx.net>
Copyright (c) 2006 - 2017 Thomas Schmitt <scdbackup@gmx.net>
Provided under GPL version 2 or later.
*/
@ -356,13 +356,20 @@ int burn_fifo_source_shoveller(struct burn_source *source, int flag)
fs->thread_pid = getpid();
fs->thread_is_valid = 1;
/* Lock was obtained by async.c:add_worker() */
burn_async_manage_lock(BURN_ASYNC_LOCK_RELEASE);
bufsize = fs->chunksize * fs->chunks;
while (!fs->end_of_consumption) {
if (fs->do_abort)
goto emergency_exit;
/* wait for enough buffer space available */
wpos = fs->buf_writepos;
counted = 0;
while (1) {
if (fs->do_abort)
goto emergency_exit;
rpos = fs->buf_readpos;
diff = rpos - wpos;
trans_end = 0;
@ -405,6 +412,8 @@ int burn_fifo_source_shoveller(struct burn_source *source, int flag)
}
/* Obtain next chunk */
if (fs->do_abort)
goto emergency_exit;
if (fs->inp->read != NULL)
ret = fs->inp->read(fs->inp,
(unsigned char *) bufpt, fs->inp_read_size);
@ -430,6 +439,8 @@ int burn_fifo_source_shoveller(struct burn_source *source, int flag)
fs->put_counter++;
/* activate read chunk */
if (fs->do_abort)
goto emergency_exit;
if (ret > fs->inp_read_size)
/* beware of ill custom burn_source */
ret = fs->inp_read_size;
@ -463,8 +474,11 @@ int burn_fifo_source_shoveller(struct burn_source *source, int flag)
fs->end_of_input = 1;
/* wait for end of reading by consumer */;
while (fs->buf_readpos != fs->buf_writepos && !fs->end_of_consumption)
fifo_sleep(0);
while (fs->buf_readpos != fs->buf_writepos && !fs->end_of_consumption) {
if (fs->do_abort)
goto emergency_exit;
fifo_sleep(0);
}
/* destroy ring buffer */;
if (!fs->end_of_consumption)
@ -481,8 +495,11 @@ int burn_fifo_source_shoveller(struct burn_source *source, int flag)
((size_t) fs->chunksize) * (size_t) fs->chunks, 0);
fs->buf = NULL;
emergency_exit:;
burn_async_manage_lock(BURN_ASYNC_LOCK_OBTAIN);
fs->thread_handle= NULL;
fs->thread_is_valid = 0;
burn_async_manage_lock(BURN_ASYNC_LOCK_RELEASE);
return (fs->input_error == 0);
}
@ -524,6 +541,7 @@ struct burn_source *burn_fifo_source_new(struct burn_source *inp,
fs->thread_handle = NULL;
fs->thread_pid = 0;
fs->thread_is_valid = 0;
fs->do_abort = 0;
fs->inp = NULL; /* set later */
if (flag & 1)
fs->inp_read_size = 32 * 1024;

View File

@ -1,7 +1,7 @@
/* -*- indent-tabs-mode: t; tab-width: 8; c-basic-offset: 8; -*- */
/* Copyright (c) 2004 - 2006 Derek Foreman, Ben Jansens
Copyright (c) 2006 - 2010 Thomas Schmitt <scdbackup@gmx.net>
Copyright (c) 2006 - 2017 Thomas Schmitt <scdbackup@gmx.net>
Provided under GPL version 2 or later.
*/
@ -27,8 +27,8 @@ struct burn_source_fifo {
/* The fifo stays inactive and unequipped with eventual resources
until its read() method is called for the first time.
Only then burn_fifo_start() gets called, allocates the complete
resources, starts a thread with burn_fifo_source_shuffler()
which shuffles data and finally destroys the resources.
resources, starts a thread with burn_fifo_source_shoveller()
which shovels data and finally destroys the resources.
This late start is to stay modest in case of multiple tracks
in one disc.
*/
@ -38,6 +38,9 @@ struct burn_source_fifo {
int thread_pid;
int thread_is_valid;
/* The shoveller aborts if this is 1. Resource leaks are possible. */
volatile int do_abort;
/* the burn_source for which this fifo is acting as proxy */
struct burn_source *inp;
int inp_read_size;