links

lynx-like text mode web browser
git clone anongit@rnpnr.xyz:links.git
Log | Files | Refs | Feed | README | LICENSE

cache.c (14179B)


      1 /* cache.c
      2  * (c) 2002 Mikulas Patocka
      3  * This file is a part of the Links program, released under GPL.
      4  */
      5 
      6 #include <errno.h>
      7 #include <limits.h>
      8 #include <search.h>
      9 #include <unistd.h>
     10 
     11 #include "links.h"
     12 
     13 static struct list_head cache = { &cache, &cache };
     14 
     15 static int cache_size;
     16 static tcount cache_count = 1;
     17 static void *cache_root;
     18 
     19 static int
     20 ce_compare(const void *p1, const void *p2)
     21 {
     22 	if (p1 == p2)
     23 		return 0;
     24 	return strcmp(p1, p2);
     25 }
     26 
     27 static void
     28 cache_add_to_tree(struct cache_entry *e)
     29 {
     30 	void **p;
     31 
     32 	if (!e->url[0])
     33 		return;
     34 	if (!(p = tsearch(e->url, &cache_root, ce_compare)))
     35 		die("tsearch: %s\n", strerror(errno));
     36 
     37 	if ((unsigned char *)*p != e->url)
     38 		internal("cache_add_to_tree: url '%s' is already present",
     39 		         e->url);
     40 }
     41 
     42 static void
     43 cache_delete_from_tree(struct cache_entry *e)
     44 {
     45 	if (!e->url[0])
     46 		return;
     47 
     48 	if (!tdelete(e->url, &cache_root, ce_compare))
     49 		internal("cache_delete_from_tree: url '%s' not found", e->url);
     50 }
     51 
     52 static struct cache_entry *
     53 cache_search_tree(unsigned char *url)
     54 {
     55 	void **p;
     56 
     57 	if (!(p = tfind(url, &cache_root, ce_compare)))
     58 		return NULL;
     59 
     60 	return get_struct(*p, struct cache_entry, url);
     61 }
     62 
     63 int
     64 cache_info(int type)
     65 {
     66 	int i = 0;
     67 	struct cache_entry *ce = NULL;
     68 	struct list_head *lce;
     69 	switch (type) {
     70 	case CI_BYTES:
     71 		return cache_size;
     72 	case CI_FILES:
     73 		return list_size(&cache);
     74 	case CI_LOCKED:
     75 		foreach (struct cache_entry, ce, lce, cache)
     76 			i += !!ce->refcount;
     77 		return i;
     78 	case CI_LOADING:
     79 		foreach (struct cache_entry, ce, lce, cache)
     80 			i += is_entry_used(ce);
     81 		return i;
     82 	default:
     83 		die("cache_info: bad request");
     84 	}
     85 	return 0;
     86 }
     87 
     88 int
     89 decompress_info(int type)
     90 {
     91 	int i = 0;
     92 	struct cache_entry *ce = NULL;
     93 	struct list_head *lce;
     94 	switch (type) {
     95 	case CI_BYTES:
     96 		return decompressed_cache_size;
     97 	case CI_FILES:
     98 		foreach (struct cache_entry, ce, lce, cache)
     99 			i += !!ce->decompressed;
    100 		return i;
    101 	case CI_LOCKED:
    102 		foreach (struct cache_entry, ce, lce, cache)
    103 			i += ce->decompressed && ce->refcount;
    104 		return i;
    105 	default:
    106 		internal("compress_info: bad request");
    107 	}
    108 	return 0;
    109 }
    110 
    111 int
    112 find_in_cache(unsigned char *url, struct cache_entry **f)
    113 {
    114 	struct cache_entry *e;
    115 	url = remove_proxy_prefix(url);
    116 	e = cache_search_tree(url);
    117 	if (e) {
    118 		e->refcount++;
    119 		del_from_list(e);
    120 		add_to_list(cache, e);
    121 		*f = e;
    122 		return 0;
    123 	}
    124 	return -1;
    125 }
    126 
    127 static int
    128 get_cache_entry(unsigned char *url, struct cache_entry **f)
    129 {
    130 	if (!find_in_cache(url, f))
    131 		return 0;
    132 	return new_cache_entry(url, f);
    133 }
    134 
    135 int
    136 get_connection_cache_entry(struct connection *c)
    137 {
    138 	struct cache_entry *e;
    139 	if (get_cache_entry(c->url, &c->cache))
    140 		return -1;
    141 	e = c->cache;
    142 	free(e->ip_address);
    143 	e->ip_address = NULL;
    144 	if (!*c->socks_proxy && !is_proxy_url(c->url)
    145 	    && c->last_lookup_state.addr.n) {
    146 		unsigned char *a;
    147 		unsigned char *s = NULL;
    148 		size_t l = 0;
    149 		a = print_address(&c->last_lookup_state.addr
    150 		                       .a[c->last_lookup_state.addr_index]);
    151 		if (a)
    152 			l = add_to_str(&s, l, a);
    153 		if (c->last_lookup_state.addr.n > 1) {
    154 			int i, d = 0;
    155 			if (l)
    156 				l = add_chr_to_str(&s, l, ' ');
    157 			l = add_chr_to_str(&s, l, '(');
    158 			for (i = 0; i < c->last_lookup_state.addr.n; i++) {
    159 				if (i == c->last_lookup_state.addr_index)
    160 					continue;
    161 				a = print_address(
    162 				    &c->last_lookup_state.addr.a[i]);
    163 				if (a) {
    164 					if (d)
    165 						l = add_to_str(&s, l,
    166 						               cast_uchar ", ");
    167 					l = add_to_str(&s, l, a);
    168 					d = 1;
    169 				}
    170 			}
    171 			l = add_chr_to_str(&s, l, ')');
    172 		}
    173 		e->ip_address = s;
    174 	}
    175 	return 0;
    176 }
    177 
    178 int
    179 new_cache_entry(unsigned char *url, struct cache_entry **f)
    180 {
    181 	struct cache_entry *e;
    182 	shrink_memory(SH_CHECK_QUOTA);
    183 	url = remove_proxy_prefix(url);
    184 	e = mem_calloc(sizeof(struct cache_entry) + strlen((char *)url));
    185 	strcpy((char *)e->url, (char *)url);
    186 	e->length = 0;
    187 	e->incomplete = 1;
    188 	e->data_size = 0;
    189 	e->http_code = -1;
    190 	init_list(e->frag);
    191 	e->count = cache_count++;
    192 	e->count2 = cache_count++;
    193 	e->refcount = 1;
    194 	e->decompressed = NULL;
    195 	e->decompressed_len = 0;
    196 	cache_add_to_tree(e);
    197 	add_to_list(cache, e);
    198 	*f = e;
    199 	return 0;
    200 }
    201 
    202 void
    203 detach_cache_entry(struct cache_entry *e)
    204 {
    205 	cache_delete_from_tree(e);
    206 	e->url[0] = 0;
    207 }
    208 
    209 #define sf(x) e->data_size += (x), cache_size += (int)(x)
    210 
    211 #define C_ALIGN(x)                                                             \
    212 	((((x) + sizeof(struct fragment)) | (page_size - 1))                   \
    213 	 - sizeof(struct fragment))
    214 
    215 int
    216 add_fragment(struct cache_entry *e, off_t offset, const unsigned char *data,
    217              off_t length)
    218 {
    219 	struct fragment *f = NULL;
    220 	struct list_head *lf;
    221 	struct fragment *nf;
    222 	int trunc = 0;
    223 	off_t ca;
    224 	if (!length)
    225 		return 0;
    226 	free_decompressed_data(e);
    227 	e->incomplete = 1;
    228 	if ((off_t)(0UL + offset + length) < 0
    229 	    || (off_t)(0UL + offset + length) < offset)
    230 		return S_LARGE_FILE;
    231 	if ((off_t)(0UL + offset + (off_t)C_ALIGN(length)) < 0
    232 	    || (off_t)(0UL + offset + (off_t)C_ALIGN(length)) < offset)
    233 		return S_LARGE_FILE;
    234 	if (e->length < offset + length)
    235 		e->length = offset + length;
    236 	e->count = cache_count++;
    237 	if (list_empty(e->frag)) {
    238 		e->count2 = cache_count++;
    239 	} else {
    240 		f = list_struct(e->frag.prev, struct fragment);
    241 		if (f->offset + f->length != offset) {
    242 			e->count2 = cache_count++;
    243 		} else {
    244 			lf = &f->list_entry;
    245 			goto have_f;
    246 		}
    247 	}
    248 	foreach (struct fragment, f, lf, e->frag) {
    249 have_f:
    250 		if (f->offset > offset)
    251 			break;
    252 		if (f->offset <= offset && f->offset + f->length >= offset) {
    253 			if (offset + length > f->offset + f->length) {
    254 				if (memcmp(f->data + offset - f->offset, data,
    255 				           (size_t)(f->offset + f->length
    256 				                    - offset)))
    257 					trunc = 1;
    258 				if (offset - f->offset + length
    259 				    <= f->real_length) {
    260 					sf((offset + length)
    261 					   - (f->offset + f->length));
    262 					f->length = offset - f->offset + length;
    263 				} else {
    264 					sf(-(f->offset + f->length - offset));
    265 					f->length = offset - f->offset;
    266 					lf = f->list_entry.next;
    267 					break;
    268 				}
    269 			} else if (memcmp(f->data + offset - f->offset, data,
    270 			                  (size_t)length)) {
    271 				trunc = 1;
    272 			}
    273 			memcpy(f->data + offset - f->offset, data,
    274 			       (size_t)length);
    275 			goto ch_o;
    276 		}
    277 	}
    278 	if (C_ALIGN(length) > INT_MAX - sizeof(struct fragment)
    279 	    || C_ALIGN(length) < 0)
    280 		overalloc();
    281 	ca = C_ALIGN(length);
    282 	if (ca > INT_MAX - (int)sizeof(struct fragment) || ca < 0)
    283 		return S_LARGE_FILE;
    284 	nf = xmalloc(sizeof(struct fragment) + (size_t)ca);
    285 	sf(length);
    286 	nf->offset = offset;
    287 	nf->length = length;
    288 	nf->real_length = C_ALIGN(length);
    289 	memcpy(nf->data, data, (size_t)length);
    290 	add_before_list_entry(lf, &nf->list_entry);
    291 	f = nf;
    292 ch_o:
    293 	while (
    294 	    f->list_entry.next != &e->frag
    295 	    && f->offset + f->length
    296 		   > list_struct(f->list_entry.next, struct fragment)->offset) {
    297 		struct fragment *next =
    298 		    list_struct(f->list_entry.next, struct fragment);
    299 		if (f->offset + f->length < next->offset + next->length) {
    300 			f = xrealloc(f, sizeof(struct fragment)
    301 			                    + (size_t)(next->offset - f->offset
    302 			                               + next->length));
    303 			fix_list_after_realloc(f);
    304 			if (memcmp(
    305 				f->data + next->offset - f->offset, next->data,
    306 				(size_t)(f->offset + f->length - next->offset)))
    307 				trunc = 1;
    308 			memcpy(f->data + f->length,
    309 			       next->data + f->offset + f->length
    310 			           - next->offset,
    311 			       (size_t)(next->offset + next->length - f->offset
    312 			                - f->length));
    313 			sf(next->offset + next->length - f->offset - f->length);
    314 			f->length = f->real_length =
    315 			    next->offset + next->length - f->offset;
    316 		} else if (memcmp(f->data + next->offset - f->offset,
    317 		                  next->data, (size_t)next->length))
    318 			trunc = 1;
    319 		del_from_list(next);
    320 		sf(-next->length);
    321 		free(next);
    322 	}
    323 	if (trunc)
    324 		truncate_entry(e, offset + length, 0);
    325 	if (e->length > e->max_length) {
    326 		e->max_length = e->length;
    327 		return 1;
    328 	}
    329 	return 0;
    330 }
    331 
    332 int
    333 defrag_entry(struct cache_entry *e)
    334 {
    335 	struct fragment *f, *n;
    336 	struct list_head *g, *h;
    337 	off_t l;
    338 	if (list_empty(e->frag))
    339 		return 0;
    340 	f = list_struct(e->frag.next, struct fragment);
    341 	if (f->offset)
    342 		return 0;
    343 	for (g = f->list_entry.next;
    344 	     g != &e->frag
    345 	     && list_struct(g, struct fragment)->offset
    346 	            <= list_struct(g->prev, struct fragment)->offset
    347 	                   + list_struct(g->prev, struct fragment)->length;
    348 	     g = g->next)
    349 		if (list_struct(g, struct fragment)->offset
    350 		    < list_struct(g->prev, struct fragment)->offset
    351 		          + list_struct(g->prev, struct fragment)->length) {
    352 			internal("fragments overlay");
    353 			return S_INTERNAL;
    354 		}
    355 	if (g == f->list_entry.next) {
    356 		if (f->length != f->real_length) {
    357 			f = xrealloc(f, sizeof(struct fragment)
    358 			                    + (size_t)f->length);
    359 			if (f) {
    360 				f->real_length = f->length;
    361 				fix_list_after_realloc(f);
    362 			}
    363 		}
    364 		return 0;
    365 	}
    366 	for (l = 0, h = &f->list_entry; h != g; h = h->next) {
    367 		if ((off_t)(0UL + l + list_struct(h, struct fragment)->length)
    368 		        < 0
    369 		    || (off_t)(0UL + l
    370 		               + list_struct(h, struct fragment)->length)
    371 		           < l)
    372 			return S_LARGE_FILE;
    373 		l += list_struct(h, struct fragment)->length;
    374 	}
    375 	if (l > INT_MAX - (int)sizeof(struct fragment))
    376 		return S_LARGE_FILE;
    377 	n = xmalloc(sizeof(struct fragment) + (size_t)l);
    378 	n->offset = 0;
    379 	n->length = l;
    380 	n->real_length = l;
    381 	for (l = 0, h = &f->list_entry; h != g; h = h->next) {
    382 		struct fragment *hf = list_struct(h, struct fragment);
    383 		memcpy(n->data + l, hf->data, (size_t)hf->length);
    384 		l += hf->length;
    385 		h = h->prev;
    386 		del_from_list(hf);
    387 		free(hf);
    388 	}
    389 	add_to_list(e->frag, n);
    390 	return 0;
    391 }
    392 
    393 void
    394 truncate_entry(struct cache_entry *e, off_t off, int final)
    395 {
    396 	int modified = final == 2;
    397 	struct fragment *f = NULL, *g;
    398 	struct list_head *lf;
    399 	if (e->length > off) {
    400 		e->length = off;
    401 		e->incomplete = 1;
    402 	}
    403 	foreach (struct fragment, f, lf, e->frag) {
    404 		if (f->offset >= off) {
    405 			modified = 1;
    406 			sf(-f->length);
    407 			lf = lf->prev;
    408 			del_from_list(f);
    409 			free(f);
    410 			continue;
    411 		}
    412 		if (f->offset + f->length > off) {
    413 			modified = 1;
    414 			sf(-(f->offset + f->length - off));
    415 			f->length = off - f->offset;
    416 			if (final) {
    417 				g = xrealloc(f, sizeof(struct fragment)
    418 				                    + (size_t)f->length);
    419 				if (g) {
    420 					f = g;
    421 					fix_list_after_realloc(f);
    422 					f->real_length = f->length;
    423 					lf = &f->list_entry;
    424 				}
    425 			}
    426 		}
    427 	}
    428 	if (modified) {
    429 		free_decompressed_data(e);
    430 		e->count = cache_count++;
    431 		e->count2 = cache_count++;
    432 	}
    433 }
    434 
    435 void
    436 free_entry_to(struct cache_entry *e, off_t off)
    437 {
    438 	struct fragment *f = NULL;
    439 	struct list_head *lf;
    440 	e->incomplete = 1;
    441 	free_decompressed_data(e);
    442 	foreach (struct fragment, f, lf, e->frag) {
    443 		if (f->offset + f->length <= off) {
    444 			sf(-f->length);
    445 			lf = lf->prev;
    446 			del_from_list(f);
    447 			free(f);
    448 		} else if (f->offset < off) {
    449 			sf(f->offset - off);
    450 			memmove(f->data, f->data + (off - f->offset),
    451 			        (size_t)(f->length -= off - f->offset));
    452 			f->offset = off;
    453 		} else
    454 			break;
    455 	}
    456 }
    457 
    458 void
    459 delete_entry_content(struct cache_entry *e)
    460 {
    461 	truncate_entry(e, 0, 2);
    462 	free(e->last_modified);
    463 	e->last_modified = NULL;
    464 }
    465 
    466 void
    467 trim_cache_entry(struct cache_entry *e)
    468 {
    469 	struct fragment *f = NULL, *nf;
    470 	struct list_head *lf;
    471 	foreach (struct fragment, f, lf, e->frag) {
    472 		if (f->length != f->real_length) {
    473 			nf = xrealloc(f, sizeof(struct fragment)
    474 			                     + (size_t)f->length);
    475 			if (nf) {
    476 				f = nf;
    477 				fix_list_after_realloc(f);
    478 				f->real_length = f->length;
    479 				lf = &f->list_entry;
    480 			}
    481 		}
    482 	}
    483 }
    484 
    485 void
    486 delete_cache_entry(struct cache_entry *e)
    487 {
    488 	if (e->refcount)
    489 		internal("deleting locked cache entry");
    490 	cache_delete_from_tree(e);
    491 	delete_entry_content(e);
    492 	del_from_list(e);
    493 	free(e->head);
    494 	free(e->redirect);
    495 	free(e->ip_address);
    496 	free(e->ssl_info);
    497 	free(e->ssl_authority);
    498 	free(e);
    499 }
    500 
    501 static int
    502 shrink_file_cache(int u)
    503 {
    504 	int r = 0;
    505 	struct cache_entry *e = NULL, *f = NULL;
    506 	struct list_head *le, *lf;
    507 	int ncs = cache_size;
    508 	int ccs = 0, ccs2 = 0;
    509 
    510 	if (u == SH_CHECK_QUOTA
    511 	    && cache_size + decompressed_cache_size <= memory_cache_size)
    512 		goto ret;
    513 	foreachback (struct cache_entry, e, le, cache) {
    514 		if (e->refcount || is_entry_used(e)) {
    515 			if (ncs < e->data_size)
    516 				internal("cache_size underflow: %d, %d", ncs,
    517 				         e->data_size);
    518 			ncs -= e->data_size;
    519 		} else if (u == SH_FREE_SOMETHING) {
    520 			if (e->decompressed_len)
    521 				free_decompressed_data(e);
    522 			else
    523 				delete_cache_entry(e);
    524 			r = 1;
    525 			goto ret;
    526 		}
    527 		if (!e->refcount && e->decompressed_len
    528 		    && cache_size + decompressed_cache_size
    529 		           > memory_cache_size) {
    530 			free_decompressed_data(e);
    531 			r = 1;
    532 		}
    533 		ccs += e->data_size;
    534 		ccs2 += e->decompressed_len;
    535 	}
    536 	if (ccs != cache_size) {
    537 		internal("cache size badly computed: %d != %d", cache_size,
    538 		         ccs);
    539 		cache_size = ccs;
    540 	}
    541 	if (ccs2 != decompressed_cache_size) {
    542 		internal("decompressed cache size badly computed: %d != %d",
    543 		         decompressed_cache_size, ccs2);
    544 		decompressed_cache_size = ccs2;
    545 	}
    546 	if (u == SH_CHECK_QUOTA && ncs <= memory_cache_size)
    547 		goto ret;
    548 	foreachback (struct cache_entry, e, le, cache) {
    549 		if (u == SH_CHECK_QUOTA
    550 		    && ncs <= memory_cache_size * MEMORY_CACHE_GC_PERCENT)
    551 			goto g;
    552 		if (e->refcount || is_entry_used(e)) {
    553 			e->tgc = 0;
    554 			continue;
    555 		}
    556 		e->tgc = 1;
    557 		if (ncs < (int)e->data_size) {
    558 			internal("cache_size underflow: %d, %d", ncs,
    559 			         e->data_size);
    560 		}
    561 		ncs -= e->data_size;
    562 	}
    563 	if (ncs)
    564 		internal(
    565 		    "cache_size(%d) is larger than size of all objects(%d)",
    566 		    cache_size, (cache_size - ncs));
    567 g:
    568 	if (le->next == &cache)
    569 		goto ret;
    570 	le = le->next;
    571 	if (u == SH_CHECK_QUOTA) {
    572 		foreachfrom (struct cache_entry, f, lf, cache, le) {
    573 			if (f->data_size
    574 			    && ncs + f->data_size
    575 			           <= memory_cache_size
    576 			                  * MEMORY_CACHE_GC_PERCENT) {
    577 				ncs += f->data_size;
    578 				f->tgc = 0;
    579 			}
    580 		}
    581 	}
    582 	foreachfrom (struct cache_entry, f, lf, cache, le) {
    583 		if (f->tgc) {
    584 			lf = lf->prev;
    585 			delete_cache_entry(f);
    586 			r = 1;
    587 		}
    588 	}
    589 ret:
    590 	return r | (list_empty(cache) ? ST_CACHE_EMPTY : 0);
    591 }
    592 
    593 void
    594 init_cache(void)
    595 {
    596 	register_cache_upcall(shrink_file_cache, 0, cast_uchar "file");
    597 }