*** /dev/null Sat Nov 23 00:57:17 2024
--- - Sat Nov 23 00:57:18 2024
***************
*** 0 ****
--- 1,1317 ----
+ /*
+ ** OSSP al -- Assembly Line
+ ** Copyright (c) 2002 The OSSP Project <http://www.ossp.org/>
+ ** Copyright (c) 2002 Cable & Wireless Deutschland <http://www.cw.com/de/>
+ ** Copyright (c) 2002 Ralf S. Engelschall <rse@engelschall.com>
+ ** Copyright (c) 2002 Michael van Elst <mlelstv@dev.de.cw.net>
+ **
+ ** This file is part of OSSP al, an abstract datatype of a data buffer
+ ** that can assemble, move and truncate data but avoids actual copying.
+ **
+ ** Permission to use, copy, modify, and distribute this software for
+ ** any purpose with or without fee is hereby granted, provided that
+ ** the above copyright notice and this permission notice appear in all
+ ** copies.
+ **
+ ** THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
+ ** WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ ** MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ ** IN NO EVENT SHALL THE AUTHORS AND COPYRIGHT HOLDERS AND THEIR
+ ** CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ ** LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ ** USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ ** ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ ** OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ ** OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ ** SUCH DAMAGE.
+ **
+ ** al.c: assembly line library implementation
+ */
+
+ #include <stddef.h>
+ #include <stdlib.h>
+ #include <string.h>
+ #ifdef TEST
+ #include <stdio.h>
+ #endif
+
+ #include "list.h"
+
+ #include "al.h"
+
+ /* unique library identifier */
+ const char al_id[] = "OSSP al";
+
+ /* support for OSSP ex based exception throwing */
+ #ifdef WITH_EX
+ #include "ex.h"
+ #define AL_RC(rv) \
+ ( (rv) != AL_OK && (ex_catching && !ex_shielding) \
+ ? (ex_throw(al_id, NULL, (rv)), (rv)) : (rv) )
+ #else
+ #define AL_RC(rv) (rv)
+ #endif /* WITH_EX */
+
+ struct al_buffer_st;
+ typedef struct al_buffer_st al_buffer_t;
+
+ typedef struct {
+ void *(*malloc)(size_t); /* malloc(3) style function (for al_chunk_t) */
+ void (*free)(void *); /* free(3) style function (for al_chunk_t) */
+ void *(*balloc)(size_t); /* malloc(3) style function (for al_buffer_t) */
+ void (*bfree)(void *); /* free(3) style function (for al_buffer_t) */
+ size_t new_buffersize; /* default size for memory underlying al_buffer_t */
+ int max_freechunks; /* maximum number of cached al_chunk_t objects */
+ } al_memops_t;
+
+ struct al_st {
+ LIST(al_chunk_t) chunks; /* list header for al_chunk_t objects */
+ size_t bytes; /* total cached number of bytes in chunk */
+ al_memops_t m; /* assembly line memory operations (see above) */
+ };
+
+ struct al_chunk_st {
+ NODE(al_chunk_t) chunks; /* list node for al_chunk_t object chaining */
+ al_buffer_t *buf; /* (non-exlusively) referenced buffer object */
+ size_t begin; /* offset into buf->mem where data starts */
+ size_t end; /* offset into buf->mem where data ends */
+ al_label_t label; /* tag data with a label, chunks with distinct labels are not coalesced */
+ };
+
+ struct al_buffer_st {
+ char *mem; /* reference to underlying chunk of data */
+ size_t size; /* size of underlying chunk of data */
+ int usecount; /* reference count (from al_chunk_t) */
+ void (*freemem)(char *p, size_t n, void *u); /* callback function to reclaim memory when it is no longer referenced */
+ void *userdata; /* arbitrary pointer to pass context data to the callback function */
+ };
+
+ struct al_tx_st {
+ al_td_t dir; /* traversal direction */
+ al_chunk_t *cur; /* current chunk during traveral steps */
+ size_t skip; /* number of bytes to skip for traversal */
+ size_t togo; /* number of bytes left for traversal */
+ al_label_t label; /* label filter or NULL */
+ al_chunk_t view; /* synthetic chunk for returning during traversal steps */
+ };
+
+ /* number of bytes described by a chunk */
+ #define AL_CHUNK_LEN(alc) \
+ ((alc)->end - (alc)->begin)
+
+ /* memory pointer into a chunk, offset must be less than length */
+ #define AL_CHUNK_PTR(alc, off) \
+ ((alc)->buf->mem + (alc)->begin + (off))
+
+ /* number of bytes of a span that are stored in a chunk */
+ #define AL_CHUNK_SPAN(alc, off, n) \
+ ((n) > (AL_CHUNK_LEN(alc) - (off)) ? \
+ (AL_CHUNK_LEN(alc) - (off)) : (n))
+
+ /* return chunk label */
+ #define AL_CHUNK_LABEL(alc) \
+ ((alc)->label)
+
+ /* check wether labels match */
+ #define AL_SAME_LABEL(alc,label) \
+ ((label) == NULL || AL_CHUNK_LABEL(alc) == (label))
+
+ /*
+ * number of bytes a chunk can be grown to the end
+ * we must not grow a chunk in a shared buffer since
+ * we do not track other chunks sharing the buffer
+ */
+ #define AL_CHUNK_RESERVE(alc,label) \
+ ( (alc) != NULL \
+ ? ( (alc)->buf->usecount > 1 || !AL_SAME_LABEL(alc,label) \
+ ? 0 \
+ : (alc)->buf->size - (alc)->end) \
+ : 0)
+
+ /* grow chunk to the end */
+ #define AL_RESIZE(al, alc, n) \
+ do { (alc)->end += n; (al)->bytes += (n); } while (0)
+
+ /*
+ * number of bytes a chunk can be grown to the beginning
+ * we must not grow a chunk in a shared buffer since
+ * we do not track other chunks sharing the buffer
+ *
+ * empty chunks can be aligned with end
+ */
+ #define AL_CHUNK_PRESERVE(alc,label) \
+ ( (alc) != NULL \
+ ? ( (alc)->buf->usecount > 1 || !AL_SAME_LABEL(alc,label) \
+ ? 0 \
+ : (alc)->end <= (alc)->begin ? \
+ (alc)->buf->size \
+ : (alc)->begin) \
+ : 0)
+
+ /*
+ * grow chunk to the beginning
+ *
+ * empty chunks can be aligned with end
+ */
+ #define AL_PRESIZE(al, alc, n) \
+ do { \
+ if ((alc)->end <= (alc)->begin) \
+ (alc)->end = (alc)->begin = (alc)->buf->size; \
+ (alc)->begin -= n; (al)->bytes += (n); \
+ } while (0)
+
+ /*
+ * callback to release buffer memory allocated by the library
+ */
+ static
+ void freemem(char *p, size_t n, void *u)
+ {
+ void (*f)(void *) = (void (*)(void *))u;
+ f(p);
+ }
+
+ /*
+ * allocate backing store and its control structure from heap
+ *
+ * can be freed when usecount drops to zero
+ */
+ static al_rc_t
+ new_buffer(al_t *al, al_buffer_t **bufp)
+ {
+ size_t n = al->m.new_buffersize;
+ al_buffer_t *buf;
+
+ if ((buf = (al_buffer_t *)(al->m.malloc)(sizeof(al_buffer_t))) == NULL)
+ return AL_ERR_MEM;
+
+ if ((buf->mem = (al->m.balloc)(n)) == NULL) {
+ (al->m.free)(buf);
+ return AL_ERR_MEM;
+ }
+
+ buf->freemem = freemem;
+ buf->userdata = (void *)al->m.bfree;
+ buf->size = n;
+ buf->usecount = 0;
+
+ *bufp = buf;
+ return AL_OK;
+ }
+
+ static al_rc_t
+ dispose_buffer(al_t *al, al_buffer_t *buf)
+ {
+ /* must not happen */
+ if (buf->usecount != 0)
+ return AL_ERR_INT;
+
+ if (buf->freemem)
+ (buf->freemem)(buf->mem, buf->size, buf->userdata);
+
+ (al->m.free)(buf);
+ return AL_OK;
+ }
+
+ /*
+ * allocate only control structure for backing store from heap
+ * and attach to existing memory
+ * only the control structure will be freed later
+ */
+ static
+ al_rc_t make_buffer(al_t *al, char *p, size_t n, void (*freemem_cb)(char *, size_t, void *), void *u, al_buffer_t **bufp)
+ {
+ al_buffer_t *buf;
+
+ if ((buf = (al_buffer_t *)(al->m.malloc)(sizeof(al_buffer_t))) == NULL)
+ return AL_ERR_MEM;
+
+ buf->mem = p;
+
+ buf->freemem = freemem_cb;
+ buf->userdata = u;
+ buf->size = n;
+ buf->usecount = 0;
+
+ *bufp = buf;
+ return AL_OK;
+ }
+
+ /*
+ * allocate chunks from heap and attach to backing store
+ * keep some freed chunks in a freelist to avoid expensive malloc()
+ * and excessive fragmentation
+ * maintain usage count of backing store
+ * free backing store when no longer in use, this also includes
+ * the case where new_chunk fails and the buffer has no other
+ * users
+ */
+ static struct {
+ LIST(al_chunk_t) chunks;
+ } alc_freelist;
+ static int alc_freecount = 0;
+
+ static al_rc_t
+ new_chunk(al_t *al, al_buffer_t *buf, al_label_t label, al_chunk_t **alcp)
+ {
+ al_chunk_t *alc;
+
+ if (ISEMPTY(&alc_freelist, chunks)) {
+ if ((alc = (al_chunk_t *)(al->m.malloc)(sizeof(al_chunk_t))) == NULL) {
+ if (buf->usecount == 0)
+ dispose_buffer(al,buf);
+ return AL_ERR_MEM;
+ }
+ } else {
+ /* thl: FIXME i would suggest using REMTAIL() here because dispose_chunk
+ * puts the latest disposed chunks at the end of the list and
+ * those latest data should have a closer approximation to the
+ * CPU in terms of internal cache, external cache, RAM and VM
+ */
+ REMHEAD(&alc_freelist, chunks, alc);
+ --alc_freecount;
+ }
+
+ NODEINIT(alc, chunks);
+ alc->buf = buf;
+ alc->begin = 0;
+ alc->end = 0;
+ alc->label = label;
+
+ buf->usecount++;
+
+ *alcp = alc;
+ return AL_OK;
+ }
+
+ static al_rc_t
+ split_chunk(al_t *al, al_chunk_t *orig, size_t off, al_chunk_t **alcp)
+ {
+ al_rc_t rc;
+ al_chunk_t *alc;
+ size_t len = AL_CHUNK_LEN(orig);
+
+ if (off > len)
+ return AL_ERR_ARG;
+
+ rc = new_chunk(al, orig->buf, orig->label, &alc);
+ if (rc != AL_OK)
+ return rc;
+
+ alc->begin = orig->begin;
+ alc->end = orig->begin+off;
+ orig->begin = alc->end;
+
+ *alcp = alc;
+ return AL_OK;
+ }
+
+ static void
+ dispose_chunk(al_t *al, al_chunk_t *alc)
+ {
+ alc->buf->usecount--;
+ if (alc->buf->usecount == 0)
+ dispose_buffer(al,alc->buf);
+ alc->buf = NULL;
+ alc->label = NULL;
+
+ /* stop freelist from growing infinitely */
+ if (alc_freecount >= al->m.max_freechunks)
+ (al->m.free)(alc);
+ else {
+ ADDTAIL(&alc_freelist, chunks, alc);
+ alc_freecount++;
+ }
+ }
+
+ /*
+ * find chunk that represents a particular offset
+ * a reference to the chunk is stored in *alcp
+ * the relative offset into the chunk is stored in *skipp
+ * return AL_OK and *alcp == NULL if positioned exactly to end of list
+ * return AL_ERR_EOF when no such chunk can be found
+ */
+ static al_rc_t
+ al_seek(al_t *al, size_t off, al_chunk_t **alcp, size_t *skipp)
+ {
+ al_chunk_t *cur;
+ size_t pos, end;
+ size_t chunksize;
+
+ if ((al->bytes / 2) >= off) { /* FIXME poor man's heuristic */
+ /* forward search */
+ pos = 0;
+ FOREACH(al,chunks,cur) {
+ chunksize = AL_CHUNK_LEN(cur);
+ end = pos+chunksize;
+ if (pos <= off && off < end) {
+ *alcp = cur;
+ *skipp = off - pos;
+ return AL_OK;
+ }
+ if (end > off)
+ break;
+ pos = end;
+ }
+ /* seek to EOF position is ok */
+ if (pos == off) {
+ *alcp = NULL;
+ *skipp = 0;
+ return AL_OK;
+ }
+ } else {
+ /* reverse search */
+ pos = al->bytes;
+ /* seek to EOF position is ok */
+ if (pos == off) {
+ *alcp = NULL;
+ *skipp = 0;
+ return AL_OK;
+ }
+ FOREACHR(al,chunks,cur) {
+ chunksize = AL_CHUNK_LEN(cur);
+ end = pos;
+ pos -= chunksize;
+ if (pos <= off && off < end) {
+ *alcp = cur;
+ *skipp = off - pos;
+ return AL_OK;
+ }
+ if (pos < off)
+ break;
+ }
+ }
+
+ return AL_ERR_EOF;
+ }
+
+ #ifdef TEST
+ static void
+ dump(al_t *al)
+ {
+ al_chunk_t *cur;
+ size_t total;
+
+ printf("AL: %p\n", al);
+ total = 0;
+ FOREACH(al,chunks,cur) {
+ printf(" C: %p [%p] (%d @ %p + %d < %d (use=%d))\n",
+ cur,
+ cur->label,
+ cur->buf->size,cur->buf->mem,
+ cur->begin,cur->end,
+ cur->buf->usecount);
+ total += AL_CHUNK_LEN(cur);
+ }
+ printf("size = %d == %d\n",al->bytes,total);
+ printf("--\n\n");
+ }
+ #endif
+
+ /****************************************************************************/
+
+ /*
+ * allocate an empty assembly line
+ * dispose all chunks and all allocated backing store
+ */
+ al_rc_t
+ al_create(al_t **alp)
+ {
+ al_t *al;
+
+ /* argument sanity check(s) */
+ if (alp == NULL)
+ return AL_RC(AL_ERR_ARG);
+
+ /* allocate and initialize new assembly line object */
+ /* XXX - what malloc ? */
+ if ((al = (al_t *)malloc(sizeof(al_t))) == NULL)
+ return AL_RC(AL_ERR_MEM);
+
+ LISTINIT(al,chunks);
+ al->bytes = 0;
+
+ /* memory policy */
+ al->m.malloc = malloc; /* structure allocation */
+ al->m.free = free;
+ al->m.balloc = malloc; /* buffer allocation */
+ al->m.bfree = free;
+ #ifdef TEST
+ al->m.new_buffersize = 42;
+ #else
+ al->m.new_buffersize = 4096;
+ #endif
+ al->m.max_freechunks = 500;
+
+ /* pass object to caller */
+ *alp = al;
+
+ return AL_OK;
+ }
+
+ al_rc_t
+ al_destroy(al_t *al)
+ {
+ al_chunk_t *cur, *pred;
+
+ /* argument sanity check(s) */
+ if (al == NULL)
+ return AL_RC(AL_ERR_ARG);
+
+ /* free chunks and backing store */
+ FOREACHD(al,chunks,cur,pred) {
+ REMOVE(al,chunks,cur);
+ dispose_chunk(al,cur);
+ }
+
+ /* free object itself */
+ /* XXX - which free() ? */
+ free(al);
+
+ return AL_OK;
+ }
+
+ /*
+ * copy bytes into buffer, FIFO
+ *
+ * stops copy operation when a new buffer cannot be created
+ * but leaves data structure consistent
+ */
+ al_rc_t
+ al_append_bytes(al_t *al, const char *src, size_t n, al_label_t label)
+ {
+ al_rc_t rc;
+ al_chunk_t *cur;
+ al_buffer_t *buf;
+ size_t res, step;
+ char *dst;
+
+ /* argument sanity check(s) */
+ if (al == NULL || src == NULL)
+ return AL_RC(AL_ERR_ARG);
+
+ cur = TAIL(al,chunks);
+ res = AL_CHUNK_RESERVE(cur,label);
+
+ while (n > 0) {
+ if (res == 0) {
+ rc = new_buffer(al, &buf);
+ if (rc != AL_OK)
+ return AL_RC(rc);
+ rc = new_chunk(al,buf,label,&cur);
+ if (rc != AL_OK)
+ return AL_RC(rc);
+ res = AL_CHUNK_RESERVE(cur,label);
+ ADDTAIL(al, chunks, cur);
+ }
+ step = n;
+ if (step > res)
+ step = res;
+
+ dst = AL_CHUNK_PTR(cur, AL_CHUNK_LEN(cur));
+ memcpy(dst, src, step);
+
+ src += step;
+ AL_RESIZE(al, cur, step);
+ n -= step;
+ res = AL_CHUNK_RESERVE(cur,label);
+ }
+
+ return AL_OK;
+ }
+
+ /*
+ * copy bytes into buffer, LIFO
+ *
+ * stops copy operation when a new buffer cannot be created
+ * but leaves data structure consistent
+ */
+ al_rc_t
+ al_prepend_bytes(al_t *al, const char *src, size_t n, al_label_t label)
+ {
+ al_rc_t rc;
+ al_chunk_t *cur;
+ al_buffer_t *buf;
+ size_t res, step;
+ char *dst;
+
+ /* argument sanity check(s) */
+ if (al == NULL || src == NULL)
+ return AL_RC(AL_ERR_ARG);
+
+ cur = HEAD(al,chunks);
+ res = AL_CHUNK_PRESERVE(cur,label);
+
+ src += n;
+
+ while (n > 0) {
+ if (res == 0) {
+ rc = new_buffer(al, &buf);
+ if (rc != AL_OK)
+ return AL_RC(rc);
+ rc = new_chunk(al,buf,label,&cur);
+ if (rc != AL_OK)
+ return AL_RC(rc);
+ res = AL_CHUNK_PRESERVE(cur,label);
+ ADDHEAD(al, chunks, cur);
+ }
+ step = n;
+ if (step > res)
+ step = res;
+
+ src -= step;
+ AL_PRESIZE(al, cur, step);
+ n -= step;
+ res = AL_CHUNK_PRESERVE(cur,label);
+
+ dst = AL_CHUNK_PTR(cur, 0);
+ memcpy(dst, src, step);
+ }
+
+ return AL_OK;
+ }
+
+ /*
+ * append a caller supplied buffer
+ *
+ * buffer must exist until list is destroyed
+ * XXX - buffer can have multiple refs caused by splice operations
+ * XXX - some list operations modify the buffer
+ *
+ */
+ al_rc_t al_attach_buffer(al_t *al, char *p, size_t n, al_label_t label,
+ void (*freemem_cb)(char *, size_t, void *), void *u)
+ {
+ al_rc_t rc;
+ al_buffer_t *buf;
+ al_chunk_t *cur;
+
+ /* argument sanity check(s) */
+ if (al == NULL || p == NULL || n <= 0)
+ return AL_RC(AL_ERR_ARG);
+
+ rc = make_buffer(al, p, n, freemem_cb, u, &buf);
+ if (rc != AL_OK)
+ return AL_RC(rc);
+ rc = new_chunk(al,buf,label,&cur);
+ if (rc != AL_OK)
+ return AL_RC(rc);
+ ADDTAIL(al, chunks, cur);
+
+ /* validate data in buffer */
+ AL_RESIZE(al, cur, n);
+
+ return AL_OK;
+ }
+
+ /*
+ * this is the central function to manipulate assembly line
+ *
+ * cut arbitrary spans from list into a destination buffer (or drop it)
+ * insert (destructive move) content from another list into the cut
+ *
+ * this is analog to perls splice function, except that
+ * -> the output is not stored in the target buffer but is appended
+ * -> the replacement data is moved and not copied.
+ *
+ */
+ al_rc_t
+ al_splice(al_t *al, size_t off, size_t n, al_t *nal, al_t *tal)
+ {
+ al_rc_t rc;
+ al_chunk_t *cur, *start, *end, *next, *ins, *splitbuf;
+ size_t pos, skip, len, step;
+ int doinsert;
+
+ /* argument sanity check(s) */
+ if (al == NULL)
+ return AL_RC(AL_ERR_ARG);
+
+ /* optimization: treat an empty list as no insertion at all */
+ doinsert = (nal != NULL) && !ISEMPTY(nal,chunks);
+
+ /*
+ * seek to beginning, return EOF when seek position does not exist
+ * EOD must be a valid seek position so that we can append data
+ */
+ rc = al_seek(al, off, &cur, &skip);
+ if (rc != AL_OK)
+ return AL_RC(rc);
+
+ /*
+ * remember insertion point
+ *
+ * caveat: if the first chunk is removed from input the insertion
+ * point shifts to the next chunk (or EOD -> NULL pointer)
+ */
+ ins = cur;
+
+ /*
+ * the inseration point is either behind the list or
+ * within the current chunk
+ *
+ * if it is behind the list:
+ * -> insert operation switches to append mode
+ *
+ * if the insertion point is at the beginning of the current chunk:
+ * -> insertion point moves later if the chunk is removed
+ *
+ * if the inseration point is in the middle of the current chunk:
+ * -> chunk is split into two so that the insertion
+ * point is at the beginning of the second part
+ *
+ * insertion point cannot be at EOD of the chunk
+ *
+ * splitting at this point preserves all data in case the
+ * allocation of the split buffer fails
+ */
+ if (doinsert) {
+ if (ins != NULL && skip > 0) {
+ rc = split_chunk(al, ins, skip, &splitbuf);
+ if (rc != AL_OK)
+ return AL_RC(rc);
+ INSERT(al,chunks,ins,splitbuf);
+ skip = 0;
+ }
+ }
+
+ /*
+ * as long as there is data to move
+ */
+ pos = off;
+ while (n > 0 && cur != NULL) {
+ next = NEXT(cur, chunks);
+ len = AL_CHUNK_LEN(cur);
+
+ /*
+ * check if partial chunk was selected
+ * if skip > 0 we have to preserve bytes at the beginning
+ * if skip == 0 && n < len-skip we have to preserve bytes at the end
+ */
+ if (skip > 0 || n < len - skip) {
+
+ /* compute number of bytes to process */
+ step = AL_CHUNK_SPAN(cur, skip, n);
+
+ /* copy data to target */
+ if (tal != NULL) {
+ /*
+ * XXX - this operation can fail
+ */
+ size_t before = tal->bytes;
+
+ rc = al_append_bytes(tal, AL_CHUNK_PTR(cur, skip),
+ step, AL_CHUNK_LABEL(cur));
+ if (rc != AL_OK)
+ /* correct step size to actual size */
+ step = tal->bytes - before;
+
+ } else
+ rc = AL_OK;
+
+ /*
+ * cut span from src chunk
+ * if skip == 0, simply shrink the chunk from the beginning
+ * if skip > 0, compute number of bytes to preserve,
+ * align buffer and shrink chunk from the end
+ */
+ if (skip == 0)
+ AL_PRESIZE(al, cur, -step);
+ else {
+ /* align trailing bytes */
+ if (len > (skip+step)) {
+ memmove(
+ AL_CHUNK_PTR(cur, skip),
+ AL_CHUNK_PTR(cur, skip+step),
+ len - (skip+step)
+ );
+ }
+ AL_RESIZE(al, cur, -step);
+ }
+
+ /*
+ * bail out from failed al_append_bytes operation above
+ *
+ */
+ if (rc != AL_OK)
+ return AL_RC(rc);
+ }
+ else {
+ /*
+ * the whole chunk has to be moved,
+ *
+ * scan ahead for more such chunks to unlink
+ * and relink a whole chain in a single operation
+ *
+ * move the chunks to the target chain
+ * manual accounting for total size
+ */
+
+ /*
+ * when the insertion chunk is removed, we have to adjust
+ * the insertion point
+ *
+ * if the insertion chunk was the last chunk we get a NULL
+ * next pointer and the insertion method switches to append
+ * mode
+ */
+
+ step = len;
+ start = cur;
+ end = cur;
+
+ if (cur == ins)
+ ins = next;
+
+ while (next && step + (len = AL_CHUNK_LEN(next)) <= n) {
+ step += len;
+ end = next;
+ next = NEXT(next, chunks);
+ if (end == ins)
+ ins = next;
+ }
+ REMOVE2(al, chunks, start, end);
+
+ al->bytes -= step;
+ if (tal == NULL) {
+ do {
+ cur = start;
+ start = NEXT(cur, chunks);
+ dispose_chunk(al, cur);
+ } while (cur != end);
+ } else {
+ ADDTAIL2(tal, chunks, start, end);
+ tal->bytes += step;
+ }
+ }
+ n -= step;
+ pos += step;
+ cur = next;
+ skip = 0;
+ }
+
+ /*
+ * now splice in replacement chunks
+ */
+ if (doinsert) {
+ if (ins != NULL) {
+ /*
+ * complex case where list is inserted
+ *
+ * the original list has already been modified so
+ * that we can simply insert between two chunks
+ */
+ INSERTLIST(al,chunks,ins,nal);
+ } else {
+ /*
+ * simple case where list end is 'replaced'
+ */
+ APPENDLIST(al,chunks,nal);
+ }
+
+ al->bytes += nal->bytes;
+ nal->bytes = 0;
+
+ }
+
+ return AL_OK;
+ }
+
+ /*
+ *
+ */
+ al_rc_t
+ al_setlabel(al_t *al, size_t off, size_t n,
+ al_label_t oldlabel, al_label_t newlabel)
+ {
+ al_rc_t rc;
+ al_chunk_t *cur, *splitbuf;
+ size_t skip, len;
+
+ /* argument sanity check(s) */
+ if (al == NULL)
+ return AL_RC(AL_ERR_ARG);
+
+ /*
+ * seek to beginning, return EOF when seek position does not exist
+ * EOD must be a valid seek position so that we can append data
+ */
+ rc = al_seek(al, off, &cur, &skip);
+ if (rc != AL_OK)
+ return AL_RC(rc);
+
+ /*
+ * seek to EOD, nothing to label
+ */
+ if (cur == NULL)
+ return AL_OK;
+
+ /*
+ * if first chunk doesn't need relabeling
+ * then skip it, adjust seek size
+ *
+ * else if offset is not at chunk start
+ * then split chunk at offset, continue
+ * with second half
+ */
+ if (!AL_SAME_LABEL(cur, oldlabel) ||
+ AL_SAME_LABEL(cur, newlabel)) {
+ len = AL_CHUNK_LEN(cur) - skip;
+ n = n < len ? 0 : n - len;
+ cur = NEXT(cur, chunks);
+ } else if (skip > 0) {
+ rc = split_chunk(al, cur, skip, &splitbuf);
+ if (rc != AL_OK)
+ return AL_RC(rc);
+ INSERT(al,chunks,cur,splitbuf);
+ }
+
+ /*
+ * for all remaining chunks and bytes
+ *
+ * if chunk doesn't need relabeling
+ * then skip it, adjust size
+ *
+ * else if chunk isn't covered in total
+ * then split chunk at end offset and
+ * process first half
+ */
+ while (n > 0 && cur != NULL) {
+ len = AL_CHUNK_LEN(cur);
+ if (!AL_SAME_LABEL(cur, oldlabel) ||
+ AL_SAME_LABEL(cur, newlabel)) {
+ n = n < len ? 0 : n - len;
+ } else {
+ if (n < len) {
+ /*
+ * split last chunk at end offset
+ */
+ rc = split_chunk(al, cur, n, &splitbuf);
+ if (rc != AL_OK)
+ return AL_RC(rc);
+ INSERT(al,chunks,cur,splitbuf);
+ cur = splitbuf;
+ len = AL_CHUNK_LEN(cur);
+ }
+ AL_CHUNK_LABEL(cur) = newlabel;
+ n -= len;
+ }
+ cur = NEXT(cur, chunks);
+ }
+
+ return AL_OK;
+ }
+
+ /*
+ * assembly line traversal requires a context. It needs to be
+ * malloced to keep its inner structure private
+ */
+ al_rc_t
+ al_txalloc(al_t *al, al_tx_t **txp)
+ {
+ al_tx_t *tx;
+
+ tx = (al_tx_t*)(al->m.malloc)(sizeof(al_tx_t));
+ if (tx == NULL)
+ return AL_RC(AL_ERR_MEM);
+
+ *txp = tx;
+ return AL_OK;
+ }
+
+ /*
+ * free traversal context using proper policy function
+ */
+ al_rc_t
+ al_txfree(al_t *al, al_tx_t *tx)
+ {
+ (al->m.free)(tx);
+ return AL_OK;
+ }
+
+ /*
+ * initiate assembly line traversal
+ *
+ * - do initial seek, fail if position does not exist
+ * - save traversal parameters
+ */
+ al_rc_t
+ al_traverse(al_t *al, size_t off, size_t n, al_td_t dir, al_label_t label, al_tx_t *tx)
+ {
+ al_rc_t rc;
+
+ tx->cur = NULL;
+
+ rc = al_seek(al, off, &tx->cur, &tx->skip);
+ if (rc != AL_OK)
+ return AL_RC(rc);
+
+ tx->dir = dir;
+ tx->togo = n;
+ tx->label = label;
+
+ return AL_OK;
+ }
+
+ /*
+ * assembly line traversal step
+ *
+ * return EOF if at end of assembly line
+ * clip view chunk to traversal bounds
+ * advance chunk cursor according to traversal direction
+ */
+ al_rc_t
+ al_traverse_next(al_t *al, al_tx_t *tx, al_chunk_t **alcp)
+ {
+ size_t step;
+
+ do {
+ if (tx->togo <= 0) /* XXX - togo can be negative from bad input */
+ return AL_ERR_EOF;
+
+ if (tx->cur == NULL) /* premature EOF */
+ return AL_ERR_EOF;
+
+ /* compute number of bytes to process */
+ step = AL_CHUNK_SPAN(tx->cur, tx->skip, tx->togo);
+
+ /*
+ * synthetic chunk which is NOT maintained in usecount
+ * MUST NOT BE USED for modifications to chunk list
+ * MUST NOT BE USED for modifications to chunk size
+ * ALLOWED is read access to chunk content
+ * ALLOWED is modification in place of chunk content
+ */
+ tx->view = *(tx->cur);
+ tx->view.begin += tx->skip;
+ tx->view.end = tx->view.begin + step;
+
+ switch (tx->dir) {
+ case AL_FORWARD:
+ case AL_BACKWARD:
+ break;
+ case AL_FORWARD_SPAN:
+ case AL_BACKWARD_SPAN:
+ if (!AL_SAME_LABEL(&tx->view, tx->label)) {
+ tx->togo = 0;
+ return AL_ERR_EOF;
+ }
+ break;
+ }
+
+ switch (tx->dir) {
+ case AL_FORWARD:
+ case AL_FORWARD_SPAN:
+ tx->cur = NEXT(tx->cur,chunks);
+ tx->togo -= step;
+ tx->skip = 0;
+ break;
+ case AL_BACKWARD:
+ case AL_BACKWARD_SPAN:
+ tx->cur = PREV(tx->cur,chunks);
+ tx->togo -= step;
+ tx->skip = 0;
+ break;
+ }
+ } while (!AL_SAME_LABEL(&tx->view, tx->label));
+
+ *alcp = &tx->view;
+ return AL_OK;
+ }
+
+ /*
+ * assembly line traversal end
+ *
+ * to free resources allocated/locked during traversal
+ * currently a NOOP
+ */
+ al_rc_t al_traverse_end(al_t *al, al_tx_t *tx, int final)
+ {
+ return AL_OK;
+ }
+
+ /*
+ * full assembly line traversal with callback
+ *
+ * traversal is aborted when callback return != AL_OK
+ * reaching EOF (and also aborting with AL_ERR_EOF) is not an error
+ *
+ * traversal context is kept on stack (XXX ?)
+ */
+ al_rc_t
+ al_traverse_cb(al_t *al, size_t off, size_t n, al_td_t dir, al_label_t label,
+ al_rc_t (*cb)(al_chunk_t *, void *), void *u)
+ {
+ al_rc_t rc;
+ al_tx_t tx; /* XXX - private tx structure on stack */
+ al_chunk_t *view;
+
+ rc = al_traverse(al, off, n, dir, label, &tx);
+ if (rc != AL_OK)
+ return AL_RC(rc);
+
+ while ((rc = al_traverse_next(al, &tx, &view)) == AL_OK) {
+ rc = cb(view, u);
+ if (rc != AL_OK)
+ break;
+ }
+
+ al_traverse_end(al, &tx, 1);
+
+ if (rc != AL_ERR_EOF)
+ return AL_RC(rc);
+
+ return AL_OK;
+ }
+
+ /*
+ * full assembly line traversal with data copy to linear buffer
+ *
+ * returns actual number of bytes copied
+ *
+ * Do not copy if destination pointer is NULL, but still count.
+ * This can be used to precalculate the size of the needed linear
+ * buffer with n set to some arbitrary huge value.
+ *
+ * traversal context is kept on stack (XXX ?)
+ */
+ al_rc_t
+ al_flatten(al_t *al, size_t off, size_t n, al_td_t dir, al_label_t label,
+ char *dst, size_t *lenp)
+ {
+ al_rc_t rc;
+ al_tx_t tx; /* XXX - private tx structure on stack */
+ al_chunk_t *view;
+ size_t step, total;
+
+ *lenp = 0; /* keep caller on safe side */
+
+ rc = al_traverse(al, off, n, dir, label, &tx);
+ if (rc != AL_OK)
+ return AL_RC(rc);
+
+ switch (dir) {
+ case AL_FORWARD:
+ case AL_FORWARD_SPAN:
+ break;
+ case AL_BACKWARD:
+ case AL_BACKWARD_SPAN:
+ dst += n;
+ break;
+ }
+
+ total = 0;
+
+ if (dst == NULL) {
+ while ((rc = al_traverse_next(al, &tx, &view)) == AL_OK)
+ total += AL_CHUNK_LEN(view);
+ } else {
+ while ((rc = al_traverse_next(al, &tx, &view)) == AL_OK) {
+ step = AL_CHUNK_LEN(view);
+ switch (dir) {
+ case AL_FORWARD:
+ case AL_FORWARD_SPAN:
+ memmove(dst, AL_CHUNK_PTR(view, 0), step);
+ dst += step;
+ break;
+ case AL_BACKWARD:
+ case AL_BACKWARD_SPAN:
+ dst -= step;
+ memmove(dst, AL_CHUNK_PTR(view, 0), step);
+ break;
+ }
+ total += step;
+ }
+ }
+ *lenp = total;
+
+ al_traverse_end(al, &tx, 1);
+
+ if (rc != AL_ERR_EOF)
+ return AL_RC(rc);
+
+ return AL_OK;
+ }
+
+ /*
+ * full assembly line traversal with data copy to target assembly line
+ *
+ * traversal context is kept on stack (XXX ?)
+ */
+ al_rc_t
+ al_copy(al_t *al, size_t off, size_t n, al_label_t label, al_t *tal)
+ {
+ al_rc_t rc;
+ al_tx_t tx; /* XXX - private tx structure on stack */
+ al_chunk_t *view;
+ size_t step;
+
+ rc = al_traverse(al, off, n, AL_FORWARD, label, &tx);
+ if (rc != AL_OK)
+ return AL_RC(rc);
+
+ while ((rc = al_traverse_next(al, &tx, &view)) == AL_OK) {
+ step = AL_CHUNK_LEN(view);
+ al_append_bytes(tal, AL_CHUNK_PTR(view, 0), step, AL_CHUNK_LABEL(view));
+ }
+
+ al_traverse_end(al, &tx, 1);
+
+ if (rc != AL_ERR_EOF)
+ return AL_RC(rc);
+
+ return AL_OK;
+ }
+
+ /*
+ * traverse assembly line and retrieve first chunk
+ *
+ * traversal context is kept on stack (XXX ?)
+ */
+ al_rc_t
+ al_firstlabel(al_t *al, size_t off, size_t n, al_td_t dir, al_label_t label,
+ al_label_t *labelp)
+ {
+ al_rc_t rc;
+ al_tx_t tx; /* XXX - private tx structure on stack */
+ al_chunk_t *view;
+
+ rc = al_traverse(al, off, n, dir, label, &tx);
+ if (rc != AL_OK)
+ return AL_RC(rc);
+
+ if ((rc = al_traverse_next(al, &tx, &view)) == AL_OK)
+ *labelp = AL_CHUNK_LABEL(view);
+
+ al_traverse_end(al, &tx, 1);
+
+ return rc;
+ }
+
+ /*
+ * traverse assembly line forward and search first chunk
+ * that matches label and continue search until end of
+ * span of same label
+ *
+ * return offset to first byte in *offp
+ * return size of span to last byte in *spanp
+ *
+ * traversal context is kept on stack (XXX ?)
+ */
+ al_rc_t
+ al_spanlabel(al_t *al, size_t off, size_t n, al_label_t label,
+ size_t *offp, size_t *spanp)
+ {
+ al_rc_t rc;
+ al_tx_t tx; /* XXX - private tx structure on stack */
+ al_chunk_t *view;
+ size_t len, total, start;
+ int have_first;
+
+ /*
+ * we need to track absolute traversal position,
+ * so we have to see all chunks.. no filtering
+ * allowed
+ */
+ rc = al_traverse(al, off, n, AL_FORWARD, NULL, &tx);
+ if (rc != AL_OK)
+ return AL_RC(rc);
+
+ have_first = 0;
+ start = 0;
+ total = 0;
+ while ((rc = al_traverse_next(al, &tx, &view)) == AL_OK) {
+ len = AL_CHUNK_LEN(view);
+ if (AL_SAME_LABEL(view, label)) {
+ if (!have_first) {
+ start = total;
+ have_first = 1;
+ }
+ } else if (have_first)
+ break;
+ total += len;
+ }
+
+ al_traverse_end(al, &tx, 1);
+
+ if (rc != AL_OK && rc != AL_ERR_EOF)
+ return AL_RC(rc);
+
+ if (!have_first)
+ return AL_RC(AL_ERR_EOF);
+
+ *offp = off + start;
+ *spanp = total - start;
+
+ return AL_OK;
+ }
+
+ /*
+ * relay macros to caller
+ *
+ * al_bytes - total number of bytes in assembly line
+ * al_chunk_len - number of bytes in chunk
+ * al_chunk_span - clip interval (off,off+n( against chunk length
+ * al_chunk_label- return chunk label
+ * al_same_label - check if label matches with chunk label
+ * al_chunk_ptr - return memory pointer to byte within chunk
+ *
+ * off must be a valid offset within (0,len(
+ */
+
+ size_t
+ al_bytes(const al_t *al)
+ {
+ return al->bytes;
+ }
+
+ size_t
+ al_chunk_len(al_chunk_t *alc)
+ {
+ return AL_CHUNK_LEN(alc);
+ }
+
+ size_t
+ al_chunk_span(al_chunk_t *alc, size_t off, size_t n)
+ {
+ return AL_CHUNK_SPAN(alc, off, n);
+ }
+
+ al_label_t
+ al_chunk_label(al_chunk_t *alc)
+ {
+ return AL_CHUNK_LABEL(alc);
+ }
+
+ int
+ al_same_label(al_chunk_t *alc, al_label_t label)
+ {
+ return AL_SAME_LABEL(alc, label);
+ }
+
+ char *
+ al_chunk_ptr(al_chunk_t *alc, size_t off)
+ {
+ return AL_CHUNK_PTR(alc, off);
+ }
+
+ /*
+ * convert error code into human readable form
+ */
+ const char *
+ al_error(al_rc_t rc)
+ {
+ const char *mess;
+
+ switch (rc) {
+ case AL_OK: mess = "Everything Ok"; break;
+ case AL_ERR_ARG: mess = "Invalid Argument"; break;
+ case AL_ERR_MEM: mess = "Not Enough Memory"; break;
+ case AL_ERR_EOF: mess = "End Of Data"; break;
+ case AL_ERR_INT: mess = "Internal Error"; break;
+ default: mess = "Invalid Result Code"; break;
+ }
+
+ return mess;
+ }
+
|