From 7af5ab48e0a663fc82a81ecee687cf3a171a1dfd Mon Sep 17 00:00:00 2001 From: uno20001 Date: Mon, 20 Apr 2020 12:49:01 +0200 Subject: [PATCH 1/3] add linked list implementation --- ll.c | 214 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ll.h | 69 +++++++++++++++++++ 2 files changed, 283 insertions(+) create mode 100644 ll.c create mode 100644 ll.h diff --git a/ll.c b/ll.c new file mode 100644 index 0000000..f4c2b0f --- /dev/null +++ b/ll.c @@ -0,0 +1,214 @@ +/* +This file is part of mktorrent + +mktorrent is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +mktorrent is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA +*/ + + +#include +#include + +#include "export.h" +#include "ll.h" + + +EXPORT struct ll *ll_new(void) +{ + struct ll *list = calloc(1, sizeof(*list)); + + if (list) + LL_TAIL(list) = &list->head; + + return list; +} + +EXPORT void ll_free(struct ll *list, ll_node_data_destructor destructor) +{ + if (list) { + + struct ll_node *head = LL_HEAD(list), *next; + + while (head) { + + if (destructor) + destructor(LL_DATA(head)); + + if (head->data_size) + free(LL_DATA(head)); + + next = LL_NEXT(head); + + free(head); + + head = next; + } + + free(list); + } +} + +static struct ll_node *ll_node_new( + void *data, + const size_t data_size, + struct ll_node *prev, + struct ll_node *next) +{ + struct ll_node *node = calloc(1, sizeof(*node)); + + if (!node) + return NULL; + + if (data_size) { + LL_DATA(node) = calloc(1, data_size); + + if (!LL_DATA(node)) + goto oom_node_data; + + memcpy(LL_DATA(node), data, data_size); + } + else + LL_DATA(node) = data; + + LL_DATASIZE(node) = data_size; + LL_PREV(node) = prev; + LL_NEXT(node) = next; + + return node; + +oom_node_data: + free(node); + + return NULL; +} + +/* appends a new node with 'data' to the end of 'list' */ +EXPORT struct ll_node *ll_append(struct ll *list, void *data, const size_t data_size) +{ + if (!list) + return NULL; + + struct ll_node *node = ll_node_new(data, data_size, LL_TAIL(list), NULL); + + if (node) { + LL_NEXT(LL_TAIL(list)) = node; + LL_TAIL(list) = node; + } + + return node; +} + +/* concatenates two lists while destroying the second one */ +EXPORT struct ll *ll_extend(struct ll *list, struct ll *other) +{ + if (!list) + return NULL; + + if (!other) + return list; + + LL_NEXT(LL_TAIL(list)) = LL_HEAD(other); + + free(other); + + return list; +} + +/* sort the given range using recursive merge sort in a stable way; + * sets 'first' to the new head, 'last' to the new tail; + * the new head will have ->prev = NULL, + * and the new tail will have ->next = NULL; + */ +static void ll_sort_node_range( + struct ll_node **first, + struct ll_node **last, + ll_node_data_cmp cmp) +{ +#define APPEND_AND_STEP(t, x) do { \ + LL_NEXT(t) = (x); \ + LL_PREV(x) = (t); \ + \ + LL_STEP(x); \ + LL_STEP(t); \ +} while(0) + + if (first == NULL || *first == NULL || last == NULL || *last == NULL) + return; + + /* sorting a one element range is trivial */ + if (*first == *last) { + LL_CLEAR_LINKS(*first); + return; + } + + struct ll_node *middle = *first, *middle2 = *last; + + while (middle != middle2 && LL_NEXT(middle) != middle2) { + middle = LL_NEXT(middle); + middle2 = LL_PREV(middle2); + } + + /* middle is now the midpoint of the list */ + + /* 'tail' is the tail of the new, sorted list */ + struct ll_node dummy, *tail = &dummy; + + struct ll_node *a = *first; + struct ll_node *b = LL_NEXT(middle); + + /* the values of middle and *last are not used anymore in this function, + * so they can be safely overwritten by the recursive calls + */ + ll_sort_node_range(&a, &middle, cmp); + ll_sort_node_range(&b, last, cmp); + + while (a && b) { + int r = cmp(LL_DATA(a), LL_DATA(b)); + + if (r <= 0) APPEND_AND_STEP(tail, a); /* if a.val <= b.val, append a */ + else APPEND_AND_STEP(tail, b); /* otherwise, append b */ + } + + /* at this point only one of a or b might be non-NULL, + * so only one of the next two loops will run + */ + + /* append remaining nodes from the first half */ + while (a) APPEND_AND_STEP(tail, a); + + /* append remaining nodes from the second half */ + while (b) APPEND_AND_STEP(tail, b); + + /* the prev ptr of the first "real" node points to dummy, clear that */ + LL_PREV(LL_NEXT(&dummy)) = NULL; + + /* set the new head and tail */ + *first = LL_NEXT(&dummy); + *last = tail; + +#undef APPEND_AND_STEP +} + +EXPORT struct ll *ll_sort(struct ll *list, ll_node_data_cmp cmp) +{ + if (list == NULL || cmp == NULL) + return NULL; + + ll_sort_node_range(&LL_HEAD(list), &LL_TAIL(list), cmp); + + if (LL_HEAD(list)) + LL_PREV(LL_HEAD(list)) = &list->head; + + return list; +} diff --git a/ll.h b/ll.h new file mode 100644 index 0000000..40d8801 --- /dev/null +++ b/ll.h @@ -0,0 +1,69 @@ +#ifndef MKTORRENT_LL_H +#define MKTORRENT_LL_H + +struct ll_node { + struct ll_node *prev, *next; + + size_t data_size; + void *data; +}; + +struct ll { + struct ll_node head, *tail; +}; + +typedef void (*ll_node_data_destructor)(void *); +typedef int (*ll_node_data_cmp)(const void *, const void *); + + +#define LL_DATA(node) ((node)->data) +#define LL_DATA_AS(node, type) ((type) LL_DATA(node)) +#define LL_DATASIZE(node) ((node)->data_size) +#define LL_PREV(node) ((node)->prev) +#define LL_NEXT(node) ((node)->next) +#define LL_STEP(node) ((node) = LL_NEXT(node)) +#define LL_STEP_PREV(node) ((node) = LL_PREV(node)) +#define LL_CLEAR_LINKS(node) do { LL_NEXT(node) = NULL; LL_PREV(node) = NULL; } while(0) + +#define LL_HEAD(list) ((list)->head.next) +#define LL_TAIL(list) ((list)->tail) +#define LL_IS_EMPTY(list) (LL_HEAD(list) == NULL) +#define LL_IS_SINGLETON(list) (!LL_IS_EMPTY(list) && LL_NEXT(LL_HEAD(list)) == NULL) +#define LL_FOR_FROM_TO_STEP(node, from, to, step) for (struct ll_node *(node) = from; node != to; step(node)) +#define LL_FOR(node, list) LL_FOR_FROM_TO_STEP(node, LL_HEAD(list), NULL, LL_STEP) +#define LL_FOR_FROM(node, from) LL_FOR_FROM_TO_STEP(node, from, NULL, LL_STEP) + + +/* creates a new linked list instance */ +EXPORT struct ll *ll_new(void); + + +/* frees the given list, calls a "destructor" function + * on the data pointers if provided + */ +EXPORT void ll_free(struct ll *, ll_node_data_destructor); + + +/* appends a new node with data to the end of the given list, + * if the provided size is zero, then the data pointer is set to + * the pointer provided in the arguments, otherwise size number of + * "bytes" is allocated, and that amount of bytes is copied into + * this newly allocated node from the given pointer + */ +EXPORT struct ll_node *ll_append(struct ll *, void *, const size_t); + + +/* concatenates the second list to the first one + * while destroying the second one, + * returns the first one + */ +EXPORT struct ll *ll_extend(struct ll *, struct ll *); + + +/* sorts the given list using recursive merge sort based on + * the provieded comparator function, fails if either of the arguments is + * NULL, returns the given list + */ +EXPORT struct ll *ll_sort(struct ll *, ll_node_data_cmp); + +#endif /* MKTORRENT_LL_H */ From 765f27536eb58d48fac6ea2841625ea1de5773b1 Mon Sep 17 00:00:00 2001 From: uno20001 Date: Mon, 20 Apr 2020 12:51:04 +0200 Subject: [PATCH 2/3] make rest of code use linked list --- Makefile | 2 +- hash.c | 7 ++- hash_pthreads.c | 4 +- init.c | 162 +++++++++++++++++++++++++----------------------- init.h | 1 + main.c | 5 ++ mktorrent.h | 30 ++------- output.c | 75 ++++++++++++++-------- 8 files changed, 154 insertions(+), 132 deletions(-) diff --git a/Makefile b/Makefile index 4d6ec80..d1f5702 100644 --- a/Makefile +++ b/Makefile @@ -66,4 +66,4 @@ program = mktorrent version = 1.1 HEADERS = mktorrent.h -SRCS = ftw.c init.c sha1.c hash.c output.c main.c msg.c +SRCS = ftw.c init.c sha1.c hash.c output.c main.c msg.c ll.c diff --git a/hash.c b/hash.c index c85f0e2..4c622be 100644 --- a/hash.c +++ b/hash.c @@ -36,6 +36,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA #include "mktorrent.h" #include "hash.h" #include "msg.h" +#include "ll.h" #ifndef O_BINARY #define O_BINARY 0 @@ -56,7 +57,6 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ EXPORT unsigned char *make_hash(struct metafile *m) { - flist_t *f; /* pointer to a place in the file list */ unsigned char *hash_string; /* the hash string */ unsigned char *pos; /* position in the hash string */ unsigned char *read_buf; /* read buffer */ @@ -83,8 +83,9 @@ EXPORT unsigned char *make_hash(struct metafile *m) /* and initiate r to 0 since we haven't read anything yet */ r = 0; /* go through all the files in the file list */ - for (f = m->file_list; f; f = f->next) { - + LL_FOR(file_node, m->file_list) { + struct file_data *f = LL_DATA_AS(file_node, struct file_data*); + /* open the current file for reading */ FATAL_IF((fd = open(f->path, OPENFLAGS)) == -1, "cannot open '%s' for reading: %s\n", f->path, strerror(errno)); diff --git a/hash_pthreads.c b/hash_pthreads.c index fc672ea..8c28f18 100644 --- a/hash_pthreads.c +++ b/hash_pthreads.c @@ -203,7 +203,6 @@ static void *worker(void *data) static void read_files(struct metafile *m, struct queue *q, unsigned char *pos) { int fd; /* file descriptor */ - flist_t *f; /* pointer to a place in the file list */ size_t r = 0; /* number of bytes read from file(s) into the read buffer */ #ifndef NO_HASH_CHECK @@ -213,7 +212,8 @@ static void read_files(struct metafile *m, struct queue *q, unsigned char *pos) struct piece *p = get_free(q, m->piece_length); /* go through all the files in the file list */ - for (f = m->file_list; f; f = f->next) { + LL_FOR(file_node, m->file_list) { + struct file_data *f = LL_DATA_AS(file_node, struct file_data*); /* open the current file for reading */ FATAL_IF((fd = open(f->path, OPENFLAGS)) == -1, diff --git a/init.c b/init.c index 1a6b1e7..5ca6df5 100644 --- a/init.c +++ b/init.c @@ -121,13 +121,12 @@ static void set_absolute_file_path(struct metafile *m) * parse a comma separated list of strings [,]* and * return a string list containing the substrings */ -static slist_t *get_slist(char *s) +static struct ll *get_slist(char *s) { - slist_t *list, *last; char *e; - /* allocate memory for the first node in the list */ - list = last = malloc(sizeof(slist_t)); + /* allocate a new list */ + struct ll *list = ll_new(); FATAL_IF0(list == NULL, "out of memory\n"); /* add URLs to the list while there are commas in the string */ @@ -135,20 +134,15 @@ static slist_t *get_slist(char *s) /* set the commas to \0 so the URLs appear as * separate strings */ *e = '\0'; - last->s = s; + + FATAL_IF0(ll_append(list, s, 0) == NULL, "out of memory\n"); /* move s to point to the next URL */ s = e + 1; - - /* append another node to the list */ - last->next = malloc(sizeof(slist_t)); - last = last->next; - FATAL_IF0(last == NULL, "out of memory\n"); } /* set the last string in the list */ - last->s = s; - last->next = NULL; + FATAL_IF0(ll_append(list, s, 0) == NULL, "out of memory\n"); /* return the list */ return list; @@ -176,11 +170,15 @@ static int is_dir(struct metafile *m, char *target) /* since we know the torrent is just a single file and we've already stat'ed it, we might as well set the file list */ - m->file_list = malloc(sizeof(flist_t)); - FATAL_IF0(m->file_list == NULL, "out of memory\n"); - m->file_list->path = target; - m->file_list->size = s.st_size; - m->file_list->next = NULL; + struct file_data fd = { + strdup(target), + s.st_size + }; + + FATAL_IF0( + fd.path == NULL || ll_append(m->file_list, &fd, sizeof(fd)) == NULL, + "out of memory\n"); + /* ..and size variable */ m->size = s.st_size; @@ -195,8 +193,6 @@ static int is_dir(struct metafile *m, char *target) */ static int process_node(const char *path, const struct stat *sb, void *data) { - flist_t **p; /* pointer to a node in the file list */ - flist_t *new_node; /* place to store a newly created node */ struct metafile *m = data; /* skip non-regular files */ @@ -219,28 +215,17 @@ static int process_node(const char *path, const struct stat *sb, void *data) /* count the total size of the files */ m->size += sb->st_size; - /* find where to insert the new node so that the file list - remains ordered by the path */ - p = &m->file_list; - while (*p && strcmp(path, (*p)->path) > 0) - p = &((*p)->next); - /* create a new file list node for the file */ - new_node = malloc(sizeof(flist_t)); - if (new_node == NULL || - (new_node->path = strdup(path)) == NULL) { + struct file_data fd = { + strdup(path), + sb->st_size + }; + + if (fd.path == NULL || ll_append(m->file_list, &fd, sizeof(fd)) == NULL) { fprintf(stderr, "fatal error: out of memory\n"); return -1; } - new_node->size = sb->st_size; - /* now insert the node there */ - new_node->next = *p; - *p = new_node; - - /* insertion sort is a really stupid way of sorting a list, - but usually a torrent doesn't contain too many files, - so we'll probably be alright ;) */ return 0; } @@ -302,34 +287,41 @@ static void print_help() /* * print the full announce list */ -static void print_announce_list(llist_t *list) +static void print_announce_list(struct ll *list) { - unsigned int n; - - for (n = 1; list; list = list->next, n++) { - slist_t *l = list->l; - - printf(" %u : %s\n", n, l->s); - for (l = l->next; l; l = l->next) - printf(" %s\n", l->s); + unsigned int tier = 1; + + LL_FOR(node, list) { + + struct ll *inner_list = LL_DATA(node); + + printf(" %u : %s\n", + tier, LL_DATA_AS(LL_HEAD(inner_list), const char*)); + + LL_FOR_FROM(inner_node, LL_NEXT(LL_HEAD(inner_list))) { + printf(" %s\n", LL_DATA_AS(inner_node, const char*)); + } + + tier += 1; } } /* * print the list of web seed URLs */ -static void print_web_seed_list(slist_t *list) +static void print_web_seed_list(struct ll *list) { printf(" Web Seed URL: "); - if (list == NULL) { + if (LL_IS_EMPTY(list)) { printf("none\n"); return; } - - printf("%s\n", list->s); - for (list = list->next; list; list = list->next) - printf(" %s\n", list->s); + + printf("%s\n", LL_DATA_AS(LL_HEAD(list), const char*)); + LL_FOR_FROM(node, LL_NEXT(LL_HEAD(list))) { + printf(" %s\n", LL_DATA_AS(node, const char*)); + } } /* @@ -374,6 +366,24 @@ static void dump_options(struct metafile *m) printf("\"%s\"\n\n", m->comment); } +static int file_data_cmp_by_name(const void *a, const void *b) +{ + const struct file_data *x = a, *y = b; + return strcmp(x->path, y->path); +} + +static void file_data_clear(void *data) +{ + struct file_data *fd = data; + free(fd->path); +} + +static void free_inner_list(void *data) +{ + struct ll *list = data; + ll_free(list, NULL); +} + /* * parse and check the command line options given * and fill out the appropriate fields of the @@ -382,8 +392,6 @@ static void dump_options(struct metafile *m) EXPORT void init(struct metafile *m, int argc, char *argv[]) { int c; /* return value of getopt() */ - llist_t *announce_last = NULL; - slist_t *web_seed_last = NULL; #ifdef USE_LONG_OPTIONS /* the option structure to pass to getopt_long() */ static struct option long_options[] = { @@ -405,6 +413,15 @@ EXPORT void init(struct metafile *m, int argc, char *argv[]) }; #endif + m->announce_list = ll_new(); + FATAL_IF0(m->announce_list == NULL, "out of memory\n"); + + m->web_seed_list = ll_new(); + FATAL_IF0(m->web_seed_list == NULL, "out of memory\n"); + + m->file_list = ll_new(); + FATAL_IF0(m->file_list == NULL, "out of memory\n"); + /* now parse the command line options given */ #ifdef USE_PTHREADS #define OPT_STRING "a:c:dhl:n:o:ps:t:vw:" @@ -420,17 +437,9 @@ EXPORT void init(struct metafile *m, int argc, char *argv[]) #undef OPT_STRING switch (c) { case 'a': - if (announce_last == NULL) { - m->announce_list = announce_last = - malloc(sizeof(llist_t)); - } else { - announce_last->next = - malloc(sizeof(llist_t)); - announce_last = announce_last->next; - - } - FATAL_IF0(announce_last == NULL, "out of memory\n"); - announce_last->l = get_slist(optarg); + FATAL_IF0( + ll_append(m->announce_list, get_slist(optarg), 0) == NULL, + "out of memory\n"); break; case 'c': m->comment = optarg; @@ -465,16 +474,7 @@ EXPORT void init(struct metafile *m, int argc, char *argv[]) m->verbose = 1; break; case 'w': - if (web_seed_last == NULL) { - m->web_seed_list = web_seed_last = - get_slist(optarg); - } else { - web_seed_last->next = - get_slist(optarg); - web_seed_last = web_seed_last->next; - } - while (web_seed_last->next) - web_seed_last = web_seed_last->next; + ll_extend(m->web_seed_list, get_slist(optarg)); break; case '?': fatal("use -h for help.\n"); @@ -487,9 +487,6 @@ EXPORT void init(struct metafile *m, int argc, char *argv[]) "the piece length must be a number between 15 and 28.\n"); m->piece_length = 1 << m->piece_length; - if (announce_last != NULL) - announce_last->next = NULL; - /* ..and a file or directory from which to create the torrent */ FATAL_IF0(optind >= argc, "must specify the contents, use -h for help\n"); @@ -533,6 +530,8 @@ EXPORT void init(struct metafile *m, int argc, char *argv[]) if (file_tree_walk("." DIRSEP, MAX_OPENFD, process_node, m)) exit(EXIT_FAILURE); } + + ll_sort(m->file_list, file_data_cmp_by_name); /* calculate the number of pieces pieces = ceil( size / piece_length ) */ @@ -544,3 +543,12 @@ EXPORT void init(struct metafile *m, int argc, char *argv[]) "That's %u pieces of %u bytes each.\n\n", m->size, m->pieces, m->piece_length); } + +EXPORT void cleanup_metafile(struct metafile *m) +{ + ll_free(m->announce_list, free_inner_list); + + ll_free(m->file_list, file_data_clear); + + ll_free(m->web_seed_list, NULL); +} diff --git a/init.h b/init.h index 9cfa0a1..7cade00 100644 --- a/init.h +++ b/init.h @@ -5,5 +5,6 @@ #include "mktorrent.h" /* struct metafile */ EXPORT void init(struct metafile *m, int argc, char *argv[]); +EXPORT void cleanup_metafile(struct metafile *m); #endif /* MKTORRENT_INIT_H */ diff --git a/main.c b/main.c index 2933118..9ede290 100644 --- a/main.c +++ b/main.c @@ -31,6 +31,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA #include "hash.h" #include "output.h" #include "msg.h" +#include "ll.h" #ifdef ALLINONE /* include all .c files in alphabetical order */ @@ -44,6 +45,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA #endif #include "init.c" +#include "ll.c" #include "msg.c" #include "output.c" @@ -141,6 +143,9 @@ int main(int argc, char *argv[]) /* close the file stream */ close_file(file); + + /* free allocated memory */ + cleanup_metafile(&m); /* yeih! everything seemed to go as planned */ return EXIT_SUCCESS; diff --git a/mktorrent.h b/mktorrent.h index abcd2cf..5c9dea1 100644 --- a/mktorrent.h +++ b/mktorrent.h @@ -9,39 +9,21 @@ #define DIRSEP_CHAR '/' #endif -/* string list */ -struct slist_s; -typedef struct slist_s slist_t; -struct slist_s { - char *s; - slist_t *next; -}; - -/* list of string lists */ -struct llist_s; -typedef struct llist_s llist_t; -struct llist_s { - slist_t *l; - llist_t *next; -}; +#include "ll.h" -/* file list */ -struct flist_s; -typedef struct flist_s flist_t; -struct flist_s { +struct file_data { char *path; int64_t size; - flist_t *next; }; struct metafile { /* options */ unsigned int piece_length; /* piece length */ - llist_t *announce_list; /* announce URLs */ + struct ll *announce_list; /* announce URLs */ char *comment; /* optional comment */ const char *torrent_name; /* name of torrent (name of directory) */ char *metainfo_file_path; /* absolute path to the metainfo file */ - slist_t *web_seed_list; /* web seed URLs */ + struct ll *web_seed_list; /* web seed URLs */ int target_is_directory; /* target is a directory */ int no_creation_date; /* don't write the creation date */ int private; /* set the private flag */ @@ -52,8 +34,8 @@ struct metafile { #endif /* information calculated by read_dir() */ - int64_t size; /* combined size of all files */ - flist_t *file_list; /* list of files and their sizes */ + int64_t size; /* combined size of all files */ + struct ll *file_list; /* list of files and their sizes */ unsigned int pieces; /* number of pieces */ }; diff --git a/output.c b/output.c index 5f8407b..ed7745b 100644 --- a/output.c +++ b/output.c @@ -37,19 +37,25 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA /* * write announce list */ -static void write_announce_list(FILE *f, llist_t *list) +static void write_announce_list(FILE *f, struct ll *list) { /* the announce list is a list of lists of urls */ fprintf(f, "13:announce-listl"); /* go through them all.. */ - for (; list; list = list->next) { - slist_t *l; + LL_FOR(tier_node, list) { /* .. and print the lists */ fprintf(f, "l"); - for (l = list->l; l; l = l->next) + + LL_FOR(announce_url_node, LL_DATA_AS(tier_node, struct ll*)) { + + const char *announce_url = + LL_DATA_AS(announce_url_node, const char*); + fprintf(f, "%lu:%s", - (unsigned long)strlen(l->s), l->s); + (unsigned long) strlen(announce_url), announce_url); + } + fprintf(f, "e"); } fprintf(f, "e"); @@ -58,29 +64,31 @@ static void write_announce_list(FILE *f, llist_t *list) /* * write file list */ -static void write_file_list(FILE *f, flist_t *list) +static void write_file_list(FILE *f, struct ll *list) { char *a, *b; fprintf(f, "5:filesl"); /* go through all the files */ - for (; list; list = list->next) { + LL_FOR(file_node, list) { + struct file_data *fd = LL_DATA_AS(file_node, struct file_data*); + /* the file list contains a dictionary for every file with entries for the length and path write the length first */ - fprintf(f, "d6:lengthi%" PRIoff "e4:pathl", list->size); + fprintf(f, "d6:lengthi%" PRIoff "e4:pathl", fd->size); /* the file path is written as a list of subdirectories and the last entry is the filename sorry this code is even uglier than the rest */ - a = list->path; + a = fd->path; /* while there are subdirectories before the filename.. */ while ((b = strchr(a, DIRSEP_CHAR)) != NULL) { /* set the next DIRSEP_CHAR to '\0' so fprintf will only write the first subdirectory name */ *b = '\0'; /* print it bencoded */ - fprintf(f, "%lu:%s", (unsigned long)strlen(a), a); + fprintf(f, "%lu:%s", b - a, a); /* undo our alteration to the string */ *b = DIRSEP_CHAR; /* and move a to the beginning of the next @@ -99,13 +107,16 @@ static void write_file_list(FILE *f, flist_t *list) /* * write web seed list */ -static void write_web_seed_list(FILE *f, slist_t *list) +static void write_web_seed_list(FILE *f, struct ll *list) { /* print the entry and start the list */ fprintf(f, "8:url-listl"); /* go through the list and write each URL */ - for (; list; list = list->next) - fprintf(f, "%lu:%s", (unsigned long)strlen(list->s), list->s); + LL_FOR(node, list) { + const char *web_seed_url = LL_DATA_AS(node, const char*); + fprintf(f, "%lu:%s", + (unsigned long) strlen(web_seed_url), web_seed_url); + } /* end the list */ fprintf(f, "e"); } @@ -123,14 +134,24 @@ EXPORT void write_metainfo(FILE *f, struct metafile *m, unsigned char *hash_stri /* every metainfo file is one big dictonary */ fprintf(f, "d"); - if (m->announce_list != NULL) { + if (!LL_IS_EMPTY(m->announce_list)) { + + struct ll *first_tier = + LL_DATA_AS(LL_HEAD(m->announce_list), struct ll*); + /* write the announce URL */ + const char *first_announce_url + = LL_DATA_AS(LL_HEAD(first_tier), const char*); + fprintf(f, "8:announce%lu:%s", - (unsigned long)strlen(m->announce_list->l->s), - m->announce_list->l->s); + (unsigned long) strlen(first_announce_url), first_announce_url); + /* write the announce-list entry if we have - more than one announce URL */ - if (m->announce_list->next || m->announce_list->l->next) + * more than one announce URL, namely + * a) there are at least two tiers, or (first part of OR) + * b) there are at least two URLs in tier 1 (second part of OR) + */ + if (LL_NEXT(LL_HEAD(m->announce_list)) || LL_NEXT(LL_HEAD(first_tier))) write_announce_list(f, m->announce_list); } @@ -152,7 +173,8 @@ EXPORT void write_metainfo(FILE *f, struct metafile *m, unsigned char *hash_stri /* first entry is either 'length', which specifies the length of a single file torrent, or a list of files and their respective sizes */ if (!m->target_is_directory) - fprintf(f, "6:lengthi%" PRIoff "e", m->file_list->size); + fprintf(f, "6:lengthi%" PRIoff "e", + LL_DATA_AS(LL_HEAD(m->file_list), struct file_data*)->size); else write_file_list(f, m->file_list); @@ -168,18 +190,21 @@ EXPORT void write_metainfo(FILE *f, struct metafile *m, unsigned char *hash_stri fprintf(f, "7:privatei1e"); if (m->source) - fprintf(f, "6:source%lu:%s", (unsigned long)strlen(m->source), m->source); + fprintf(f, "6:source%lu:%s", + (unsigned long) strlen(m->source), m->source); /* end the info section */ fprintf(f, "e"); /* add url-list if one is specified */ - if (m->web_seed_list != NULL) { - if (m->web_seed_list->next == NULL) + if (!LL_IS_EMPTY(m->web_seed_list)) { + if (LL_IS_SINGLETON(m->web_seed_list)) { + const char *first_web_seed = + LL_DATA_AS(LL_HEAD(m->web_seed_list), const char*); + fprintf(f, "8:url-list%lu:%s", - (unsigned long)strlen(m->web_seed_list->s), - m->web_seed_list->s); - else + (unsigned long) strlen(first_web_seed), first_web_seed); + } else write_web_seed_list(f, m->web_seed_list); } From 5c4c0d4f95aa514df8e85eabf507ea339c958aca Mon Sep 17 00:00:00 2001 From: uno20001 Date: Mon, 20 Apr 2020 13:26:24 +0200 Subject: [PATCH 3/3] extend cleanup_metafile() and free hash --- init.c | 2 ++ main.c | 8 ++++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/init.c b/init.c index 5ca6df5..a379764 100644 --- a/init.c +++ b/init.c @@ -551,4 +551,6 @@ EXPORT void cleanup_metafile(struct metafile *m) ll_free(m->file_list, file_data_clear); ll_free(m->web_seed_list, NULL); + + free(m->metainfo_file_path); } diff --git a/main.c b/main.c index 9ede290..6f36681 100644 --- a/main.c +++ b/main.c @@ -138,14 +138,18 @@ int main(int argc, char *argv[]) _after_ we did all the hashing in case we fail */ file = open_file(m.metainfo_file_path); - /* calculate hash string and write the metainfo to file */ - write_metainfo(file, &m, make_hash(&m)); + /* calculate hash string... */ + unsigned char *hash = make_hash(&m); + /* and write the metainfo to file */ + write_metainfo(file, &m, hash); + /* close the file stream */ close_file(file); /* free allocated memory */ cleanup_metafile(&m); + free(hash); /* yeih! everything seemed to go as planned */ return EXIT_SUCCESS;