New API call burn_nominal_slowdown()

This commit is contained in:
Thomas Schmitt 2020-08-26 16:04:06 +02:00
부모 0e1f5dc3da
커밋 8b9a8cfb4b
4개의 변경된 파일116개의 추가작업 그리고 27개의 파일을 삭제

파일 보기

@ -1 +1 @@
#define Cdrskin_timestamP "2020.08.24.132739"
#define Cdrskin_timestamP "2020.08.26.140343"

파일 보기

@ -29,6 +29,9 @@ processing tracks of more than 2 GB size.
*/
#include <sys/types.h>
/* For struct timeval */
#include <sys/time.h>
#ifndef DOXYGEN
#if defined(__cplusplus)
@ -3013,6 +3016,9 @@ void burn_drive_set_speed(struct burn_drive *d, int read, int write);
MMC specifies that with the Exact bit the desired speed settings shall
either be obeyed by the drive exactly, or that the drive shall indicate
failure and not accept the settings.
But many drives reply no error and nevertheless adjust their read speed
only coarsly or ignore the setting after a few MB of fast read attempts.
The call parameters have the same meaning as with burn_drive_set_speed().
@param d The drive to set speed for. It must be a role 1 drive.
@param read Read speed in k/s (0 is max, -1 is min).
@ -3023,6 +3029,44 @@ void burn_drive_set_speed(struct burn_drive *d, int read, int write);
int burn_drive_set_speed_exact(struct burn_drive *d, int read, int write);
/* ts C00822 */
/** Waits until the time has elapsed since the given previous time to transmit
the given byte count with the given speed in KB/second (KB = 1000 bytes).
This call may be used between random access read operations like
burn_read_data() in order to force a slower speed than the drive is
willing to use if it gets read requests as fast as it delivers data.
The parameter us_corr carries microseconds of time deviations from one
call to the next one. Such deviations may happen because of small
inexactnesses of the sleeper function and because of temporary delays
in the data supply so that sleeping for a negative time span would have
been necessary. The next call will reduce or enlarge its own sleeping
period by this value.
@param kb_per_second the desired speed in 1000 bytes per second.
Supplied by the caller.
@max_corr the maximum backlog in microseconds which shall
be compensated by the next call. Supplied by the
caller. Not more than 1 billion = 1000 seconds.
@param prev_time time keeper updated by burn_nominal_slowdown().
The caller provides the memory and elsewise should
carry it unchanged from call to call.
@param us_corr updated by burn_nominal_slowdown(). See above.
The caller provides the memory and elsewise should
carry it unchanged from call to call.
@param b_since_prev byte count since the previous call. This number
has to be counted and supplied by the caller.
@param flag Bitfield for control purposes:
bit0= initialize *prev_time and *us_corr,
ignore other parameters, do not wait
@return 2=no wait because no usable kb_per_second , 1=success , 0=failure
@since 1.5.4
*/
int burn_nominal_slowdown(int kb_per_second, int max_corr,
struct timeval *prev_time,
int *us_corr, off_t b_since_prev, int flag);
/* ts A70711 */
/** Controls the behavior with writing when the drive buffer is suspected to
be full. To check and wait for enough free buffer space before writing
@ -4067,10 +4111,13 @@ int burn_random_access_write(struct burn_drive *d, off_t byte_address,
/* ts A81215 */
/** Inquire the maximum amount of readable data.
It is supposed that all LBAs in the range from 0 to capacity - 1
can be read via burn_read_data() although some of them may never have been
recorded. If tracks are recognizable then it is better to only read
LBAs which are part of some track.
On DVD and BD it is supposed that all LBAs in the range from 0 to
capacity - 1 can be read via burn_read_data() although some of them may
never have been recorded. With multi-session CD there have to be
expected unreadable TAO Run-out blocks.
If tracks are recognizable then it is better to only read LBAs which
are part of some track and on CD to be cautious about the last two blocks
of each track which might be TAO Run-out blocks.
If the drive is actually a large file or block device, then the capacity
is curbed to a maximum of 0x7ffffff0 blocks = 4 TB - 32 KB.
@param d The drive from which to read

파일 보기

@ -108,6 +108,7 @@ burn_msf_to_sectors;
burn_msgs_obtain;
burn_msgs_set_severities;
burn_msgs_submit;
burn_nominal_slowdown;
burn_obtain_profile_name;
burn_offst_source_new;
burn_os_alloc_buffer;

파일 보기

@ -2765,30 +2765,57 @@ void burn_stdio_mmc_sync_cache(struct burn_drive *d)
}
/* ts A70912 */
/* Enforces eventual nominal write speed.
@param flag bit0= initialize *prev_time */
int burn_stdio_slowdown(struct burn_drive *d, struct timeval *prev_time,
int amount, int flag)
/* ts C00824 : API */
/* Enforces nominal write speed */
int burn_nominal_slowdown(int kb_per_second, int max_corr,
struct timeval *prev_time,
int *us_corr, off_t b_since_prev, int flag)
{
struct timeval tnow;
double to_wait;
double to_wait, goal, corr;
int abs_max_corr;
if (flag & 1) {
gettimeofday(prev_time, NULL);
*us_corr = 0;
return 1;
}
if(d->nominal_write_speed <= 0)
if (kb_per_second <= 0)
return 2;
if (max_corr < -1.0e9 || max_corr > 1.0e9)
abs_max_corr = 1000000000;
else
abs_max_corr = abs(max_corr);
gettimeofday(&tnow, NULL);
to_wait = ( ((double) amount) / (double) d->nominal_write_speed ) -
(double) ( tnow.tv_sec - prev_time->tv_sec ) -
(double) ( tnow.tv_usec - prev_time->tv_usec ) / 1.0e6
- 0.001; /* best would be 1 / kernel granularity HZ */
if (to_wait >= 0.0001) {
usleep((int) (to_wait * 1000000.0));
goal = ((double) b_since_prev) / 1000.0 / ((double) kb_per_second) +
((double) prev_time->tv_sec) +
((double) prev_time->tv_usec) / 1.0e6 +
((double) *us_corr) / 1.0e6 ;
to_wait = goal - ((double) tnow.tv_sec) -
((double) tnow.tv_usec) / 1.0e6;
/* usleep might be restricted to 999999 microseconds */
while (to_wait > 0.0) {
if (to_wait >= 0.5) {
usleep(500000);
to_wait -= 0.5;
} else if (to_wait >= 0.00001) {
usleep((int) (to_wait * 1000000.0));
to_wait = 0.0;
} else {
to_wait = 0.0;
}
}
gettimeofday(prev_time, NULL);
corr = (goal - ((double) prev_time->tv_sec) -
((double) prev_time->tv_usec) / 1.0e6) * 1.0e6;
if (corr > abs_max_corr)
*us_corr = abs_max_corr;
else if (corr < -abs_max_corr)
*us_corr = -abs_max_corr;
else
*us_corr = corr;
return 1;
}
@ -2801,7 +2828,7 @@ int burn_stdio_write_track(struct burn_write_opts *o, struct burn_session *s,
struct burn_track *t = s->track[tnum];
struct burn_drive *d = o->drive;
char *buf = NULL;
int i, prev_sync_sector = 0;
int i, prev_sync_sector = 0, us_corr = 0, max_corr = 250000;
struct buffer *out = d->buffer;
struct timeval prev_time;
@ -2821,7 +2848,10 @@ int burn_stdio_write_track(struct burn_write_opts *o, struct burn_session *s,
d->do_simulate = o->simulate;
d->sync_cache = burn_stdio_mmc_sync_cache;
burn_stdio_slowdown(d, &prev_time, 0, 1); /* initialize */
/* initialize */
burn_nominal_slowdown(d->nominal_write_speed, max_corr,
&prev_time, &us_corr, (off_t) 0, 1);
for (i = 0; open_ended || i < sectors; i++) {
/* transact a (CD sized) sector */
if (!sector_data(o, t, 0))
@ -2834,14 +2864,25 @@ int burn_stdio_write_track(struct burn_write_opts *o, struct burn_session *s,
}
d->progress.sector++;
/* Flush to disk from time to time */
if (d->progress.sector - prev_sync_sector >=
o->stdio_fsync_size && o->stdio_fsync_size > 0) {
prev_sync_sector = d->progress.sector;
if (!o->simulate)
burn_stdio_sync_cache(d->stdio_fd, d, 1);
if (o->stdio_fsync_size > 0) {
if (d->progress.sector - prev_sync_sector >=
o->stdio_fsync_size) {
if (!o->simulate)
burn_stdio_sync_cache(d->stdio_fd, d,
1);
burn_nominal_slowdown(
d->nominal_write_speed, max_corr,
&prev_time, &us_corr,
(off_t) (d->progress.sector -
prev_sync_sector) *
(off_t) 2048,
0);
prev_sync_sector = d->progress.sector;
}
} else if ((d->progress.sector % 512) == 0) {
burn_nominal_slowdown(d->nominal_write_speed, max_corr,
&prev_time, &us_corr, (off_t) (512 * 2048), 0);
}
if ((d->progress.sector % 512) == 0)
burn_stdio_slowdown(d, &prev_time, 512 * 2, 0);
}
/* Pad up buffer to next full o->obs (usually 32 kB) */