diff options
Diffstat (limited to 'external/unbound/services/cache')
-rw-r--r-- | external/unbound/services/cache/dns.c | 816 | ||||
-rw-r--r-- | external/unbound/services/cache/dns.h | 194 | ||||
-rw-r--r-- | external/unbound/services/cache/infra.c | 569 | ||||
-rw-r--r-- | external/unbound/services/cache/infra.h | 309 | ||||
-rw-r--r-- | external/unbound/services/cache/rrset.c | 417 | ||||
-rw-r--r-- | external/unbound/services/cache/rrset.h | 231 |
6 files changed, 2536 insertions, 0 deletions
diff --git a/external/unbound/services/cache/dns.c b/external/unbound/services/cache/dns.c new file mode 100644 index 000000000..c663b8e8b --- /dev/null +++ b/external/unbound/services/cache/dns.c @@ -0,0 +1,816 @@ +/* + * services/cache/dns.c - Cache services for DNS using msg and rrset caches. + * + * Copyright (c) 2007, NLnet Labs. All rights reserved. + * + * This software is open source. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of the NLNET LABS nor the names of its contributors may + * be used to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * \file + * + * This file contains the DNS cache. + */ +#include "config.h" +#include "iterator/iter_delegpt.h" +#include "validator/val_nsec.h" +#include "services/cache/dns.h" +#include "services/cache/rrset.h" +#include "util/data/msgreply.h" +#include "util/data/packed_rrset.h" +#include "util/data/dname.h" +#include "util/module.h" +#include "util/net_help.h" +#include "util/regional.h" +#include "util/config_file.h" +#include "ldns/sbuffer.h" + +/** store rrsets in the rrset cache. + * @param env: module environment with caches. + * @param rep: contains list of rrsets to store. + * @param now: current time. + * @param leeway: during prefetch how much leeway to update TTLs. + * This makes rrsets (other than type NS) timeout sooner so they get + * updated with a new full TTL. + * Type NS does not get this, because it must not be refreshed from the + * child domain, but keep counting down properly. + * @param pside: if from parentside discovered NS, so that its NS is okay + * in a prefetch situation to be updated (without becoming sticky). + * @param qrep: update rrsets here if cache is better + * @param region: for qrep allocs. + */ +static void +store_rrsets(struct module_env* env, struct reply_info* rep, time_t now, + time_t leeway, int pside, struct reply_info* qrep, + struct regional* region) +{ + size_t i; + /* see if rrset already exists in cache, if not insert it. */ + for(i=0; i<rep->rrset_count; i++) { + rep->ref[i].key = rep->rrsets[i]; + rep->ref[i].id = rep->rrsets[i]->id; + /* update ref if it was in the cache */ + switch(rrset_cache_update(env->rrset_cache, &rep->ref[i], + env->alloc, now + ((ntohs(rep->ref[i].key->rk.type)== + LDNS_RR_TYPE_NS && !pside)?0:leeway))) { + case 0: /* ref unchanged, item inserted */ + break; + case 2: /* ref updated, cache is superior */ + if(region) { + struct ub_packed_rrset_key* ck; + lock_rw_rdlock(&rep->ref[i].key->entry.lock); + /* if deleted rrset, do not copy it */ + if(rep->ref[i].key->id == 0) + ck = NULL; + else ck = packed_rrset_copy_region( + rep->ref[i].key, region, now); + lock_rw_unlock(&rep->ref[i].key->entry.lock); + if(ck) { + /* use cached copy if memory allows */ + qrep->rrsets[i] = ck; + } + } + /* no break: also copy key item */ + case 1: /* ref updated, item inserted */ + rep->rrsets[i] = rep->ref[i].key; + } + } +} + +void +dns_cache_store_msg(struct module_env* env, struct query_info* qinfo, + hashvalue_t hash, struct reply_info* rep, time_t leeway, int pside, + struct reply_info* qrep, struct regional* region) +{ + struct msgreply_entry* e; + time_t ttl = rep->ttl; + size_t i; + + /* store RRsets */ + for(i=0; i<rep->rrset_count; i++) { + rep->ref[i].key = rep->rrsets[i]; + rep->ref[i].id = rep->rrsets[i]->id; + } + + /* there was a reply_info_sortref(rep) here but it seems to be + * unnecessary, because the cache gets locked per rrset. */ + reply_info_set_ttls(rep, *env->now); + store_rrsets(env, rep, *env->now, leeway, pside, qrep, region); + if(ttl == 0) { + /* we do not store the message, but we did store the RRs, + * which could be useful for delegation information */ + verbose(VERB_ALGO, "TTL 0: dropped msg from cache"); + free(rep); + return; + } + + /* store msg in the cache */ + reply_info_sortref(rep); + if(!(e = query_info_entrysetup(qinfo, rep, hash))) { + log_err("store_msg: malloc failed"); + return; + } + slabhash_insert(env->msg_cache, hash, &e->entry, rep, env->alloc); +} + +/** find closest NS or DNAME and returns the rrset (locked) */ +static struct ub_packed_rrset_key* +find_closest_of_type(struct module_env* env, uint8_t* qname, size_t qnamelen, + uint16_t qclass, time_t now, uint16_t searchtype, int stripfront) +{ + struct ub_packed_rrset_key *rrset; + uint8_t lablen; + + if(stripfront) { + /* strip off so that DNAMEs have strict subdomain match */ + lablen = *qname; + qname += lablen + 1; + qnamelen -= lablen + 1; + } + + /* snip off front part of qname until the type is found */ + while(qnamelen > 0) { + if((rrset = rrset_cache_lookup(env->rrset_cache, qname, + qnamelen, searchtype, qclass, 0, now, 0))) + return rrset; + + /* snip off front label */ + lablen = *qname; + qname += lablen + 1; + qnamelen -= lablen + 1; + } + return NULL; +} + +/** add addr to additional section */ +static void +addr_to_additional(struct ub_packed_rrset_key* rrset, struct regional* region, + struct dns_msg* msg, time_t now) +{ + if((msg->rep->rrsets[msg->rep->rrset_count] = + packed_rrset_copy_region(rrset, region, now))) { + msg->rep->ar_numrrsets++; + msg->rep->rrset_count++; + } +} + +/** lookup message in message cache */ +static struct msgreply_entry* +msg_cache_lookup(struct module_env* env, uint8_t* qname, size_t qnamelen, + uint16_t qtype, uint16_t qclass, time_t now, int wr) +{ + struct lruhash_entry* e; + struct query_info k; + hashvalue_t h; + + k.qname = qname; + k.qname_len = qnamelen; + k.qtype = qtype; + k.qclass = qclass; + h = query_info_hash(&k); + e = slabhash_lookup(env->msg_cache, h, &k, wr); + + if(!e) return NULL; + if( now > ((struct reply_info*)e->data)->ttl ) { + lock_rw_unlock(&e->lock); + return NULL; + } + return (struct msgreply_entry*)e->key; +} + +/** find and add A and AAAA records for nameservers in delegpt */ +static int +find_add_addrs(struct module_env* env, uint16_t qclass, + struct regional* region, struct delegpt* dp, time_t now, + struct dns_msg** msg) +{ + struct delegpt_ns* ns; + struct msgreply_entry* neg; + struct ub_packed_rrset_key* akey; + for(ns = dp->nslist; ns; ns = ns->next) { + akey = rrset_cache_lookup(env->rrset_cache, ns->name, + ns->namelen, LDNS_RR_TYPE_A, qclass, 0, now, 0); + if(akey) { + if(!delegpt_add_rrset_A(dp, region, akey, 0)) { + lock_rw_unlock(&akey->entry.lock); + return 0; + } + if(msg) + addr_to_additional(akey, region, *msg, now); + lock_rw_unlock(&akey->entry.lock); + } else { + neg = msg_cache_lookup(env, ns->name, ns->namelen, + LDNS_RR_TYPE_A, qclass, now, 0); + if(neg) { + delegpt_add_neg_msg(dp, neg); + lock_rw_unlock(&neg->entry.lock); + } + } + akey = rrset_cache_lookup(env->rrset_cache, ns->name, + ns->namelen, LDNS_RR_TYPE_AAAA, qclass, 0, now, 0); + if(akey) { + if(!delegpt_add_rrset_AAAA(dp, region, akey, 0)) { + lock_rw_unlock(&akey->entry.lock); + return 0; + } + if(msg) + addr_to_additional(akey, region, *msg, now); + lock_rw_unlock(&akey->entry.lock); + } else { + neg = msg_cache_lookup(env, ns->name, ns->namelen, + LDNS_RR_TYPE_AAAA, qclass, now, 0); + if(neg) { + delegpt_add_neg_msg(dp, neg); + lock_rw_unlock(&neg->entry.lock); + } + } + } + return 1; +} + +/** find and add A and AAAA records for missing nameservers in delegpt */ +int +cache_fill_missing(struct module_env* env, uint16_t qclass, + struct regional* region, struct delegpt* dp) +{ + struct delegpt_ns* ns; + struct msgreply_entry* neg; + struct ub_packed_rrset_key* akey; + time_t now = *env->now; + for(ns = dp->nslist; ns; ns = ns->next) { + akey = rrset_cache_lookup(env->rrset_cache, ns->name, + ns->namelen, LDNS_RR_TYPE_A, qclass, 0, now, 0); + if(akey) { + if(!delegpt_add_rrset_A(dp, region, akey, ns->lame)) { + lock_rw_unlock(&akey->entry.lock); + return 0; + } + log_nametypeclass(VERB_ALGO, "found in cache", + ns->name, LDNS_RR_TYPE_A, qclass); + lock_rw_unlock(&akey->entry.lock); + } else { + neg = msg_cache_lookup(env, ns->name, ns->namelen, + LDNS_RR_TYPE_A, qclass, now, 0); + if(neg) { + delegpt_add_neg_msg(dp, neg); + lock_rw_unlock(&neg->entry.lock); + } + } + akey = rrset_cache_lookup(env->rrset_cache, ns->name, + ns->namelen, LDNS_RR_TYPE_AAAA, qclass, 0, now, 0); + if(akey) { + if(!delegpt_add_rrset_AAAA(dp, region, akey, ns->lame)) { + lock_rw_unlock(&akey->entry.lock); + return 0; + } + log_nametypeclass(VERB_ALGO, "found in cache", + ns->name, LDNS_RR_TYPE_AAAA, qclass); + lock_rw_unlock(&akey->entry.lock); + } else { + neg = msg_cache_lookup(env, ns->name, ns->namelen, + LDNS_RR_TYPE_AAAA, qclass, now, 0); + if(neg) { + delegpt_add_neg_msg(dp, neg); + lock_rw_unlock(&neg->entry.lock); + } + } + } + return 1; +} + +/** find and add DS or NSEC to delegation msg */ +static void +find_add_ds(struct module_env* env, struct regional* region, + struct dns_msg* msg, struct delegpt* dp, time_t now) +{ + /* Lookup the DS or NSEC at the delegation point. */ + struct ub_packed_rrset_key* rrset = rrset_cache_lookup( + env->rrset_cache, dp->name, dp->namelen, LDNS_RR_TYPE_DS, + msg->qinfo.qclass, 0, now, 0); + if(!rrset) { + /* NOTE: this won't work for alternate NSEC schemes + * (opt-in, NSEC3) */ + rrset = rrset_cache_lookup(env->rrset_cache, dp->name, + dp->namelen, LDNS_RR_TYPE_NSEC, msg->qinfo.qclass, + 0, now, 0); + /* Note: the PACKED_RRSET_NSEC_AT_APEX flag is not used. + * since this is a referral, we need the NSEC at the parent + * side of the zone cut, not the NSEC at apex side. */ + if(rrset && nsec_has_type(rrset, LDNS_RR_TYPE_DS)) { + lock_rw_unlock(&rrset->entry.lock); + rrset = NULL; /* discard wrong NSEC */ + } + } + if(rrset) { + /* add it to auth section. This is the second rrset. */ + if((msg->rep->rrsets[msg->rep->rrset_count] = + packed_rrset_copy_region(rrset, region, now))) { + msg->rep->ns_numrrsets++; + msg->rep->rrset_count++; + } + lock_rw_unlock(&rrset->entry.lock); + } +} + +struct dns_msg* +dns_msg_create(uint8_t* qname, size_t qnamelen, uint16_t qtype, + uint16_t qclass, struct regional* region, size_t capacity) +{ + struct dns_msg* msg = (struct dns_msg*)regional_alloc(region, + sizeof(struct dns_msg)); + if(!msg) + return NULL; + msg->qinfo.qname = regional_alloc_init(region, qname, qnamelen); + if(!msg->qinfo.qname) + return NULL; + msg->qinfo.qname_len = qnamelen; + msg->qinfo.qtype = qtype; + msg->qinfo.qclass = qclass; + /* non-packed reply_info, because it needs to grow the array */ + msg->rep = (struct reply_info*)regional_alloc_zero(region, + sizeof(struct reply_info)-sizeof(struct rrset_ref)); + if(!msg->rep) + return NULL; + msg->rep->flags = BIT_QR; /* with QR, no AA */ + msg->rep->qdcount = 1; + msg->rep->rrsets = (struct ub_packed_rrset_key**) + regional_alloc(region, + capacity*sizeof(struct ub_packed_rrset_key*)); + if(!msg->rep->rrsets) + return NULL; + return msg; +} + +int +dns_msg_authadd(struct dns_msg* msg, struct regional* region, + struct ub_packed_rrset_key* rrset, time_t now) +{ + if(!(msg->rep->rrsets[msg->rep->rrset_count++] = + packed_rrset_copy_region(rrset, region, now))) + return 0; + msg->rep->ns_numrrsets++; + return 1; +} + +struct delegpt* +dns_cache_find_delegation(struct module_env* env, uint8_t* qname, + size_t qnamelen, uint16_t qtype, uint16_t qclass, + struct regional* region, struct dns_msg** msg, time_t now) +{ + /* try to find closest NS rrset */ + struct ub_packed_rrset_key* nskey; + struct packed_rrset_data* nsdata; + struct delegpt* dp; + + nskey = find_closest_of_type(env, qname, qnamelen, qclass, now, + LDNS_RR_TYPE_NS, 0); + if(!nskey) /* hope the caller has hints to prime or something */ + return NULL; + nsdata = (struct packed_rrset_data*)nskey->entry.data; + /* got the NS key, create delegation point */ + dp = delegpt_create(region); + if(!dp || !delegpt_set_name(dp, region, nskey->rk.dname)) { + lock_rw_unlock(&nskey->entry.lock); + log_err("find_delegation: out of memory"); + return NULL; + } + /* create referral message */ + if(msg) { + /* allocate the array to as much as we could need: + * NS rrset + DS/NSEC rrset + + * A rrset for every NS RR + * AAAA rrset for every NS RR + */ + *msg = dns_msg_create(qname, qnamelen, qtype, qclass, region, + 2 + nsdata->count*2); + if(!*msg || !dns_msg_authadd(*msg, region, nskey, now)) { + lock_rw_unlock(&nskey->entry.lock); + log_err("find_delegation: out of memory"); + return NULL; + } + } + if(!delegpt_rrset_add_ns(dp, region, nskey, 0)) + log_err("find_delegation: addns out of memory"); + lock_rw_unlock(&nskey->entry.lock); /* first unlock before next lookup*/ + /* find and add DS/NSEC (if any) */ + if(msg) + find_add_ds(env, region, *msg, dp, now); + /* find and add A entries */ + if(!find_add_addrs(env, qclass, region, dp, now, msg)) + log_err("find_delegation: addrs out of memory"); + return dp; +} + +/** allocate dns_msg from query_info and reply_info */ +static struct dns_msg* +gen_dns_msg(struct regional* region, struct query_info* q, size_t num) +{ + struct dns_msg* msg = (struct dns_msg*)regional_alloc(region, + sizeof(struct dns_msg)); + if(!msg) + return NULL; + memcpy(&msg->qinfo, q, sizeof(struct query_info)); + msg->qinfo.qname = regional_alloc_init(region, q->qname, q->qname_len); + if(!msg->qinfo.qname) + return NULL; + /* allocate replyinfo struct and rrset key array separately */ + msg->rep = (struct reply_info*)regional_alloc(region, + sizeof(struct reply_info) - sizeof(struct rrset_ref)); + if(!msg->rep) + return NULL; + msg->rep->rrsets = (struct ub_packed_rrset_key**) + regional_alloc(region, + num * sizeof(struct ub_packed_rrset_key*)); + if(!msg->rep->rrsets) + return NULL; + return msg; +} + +/** generate dns_msg from cached message */ +static struct dns_msg* +tomsg(struct module_env* env, struct query_info* q, struct reply_info* r, + struct regional* region, time_t now, struct regional* scratch) +{ + struct dns_msg* msg; + size_t i; + if(now > r->ttl) + return NULL; + msg = gen_dns_msg(region, q, r->rrset_count); + if(!msg) + return NULL; + msg->rep->flags = r->flags; + msg->rep->qdcount = r->qdcount; + msg->rep->ttl = r->ttl - now; + if(r->prefetch_ttl > now) + msg->rep->prefetch_ttl = r->prefetch_ttl - now; + else msg->rep->prefetch_ttl = PREFETCH_TTL_CALC(msg->rep->ttl); + msg->rep->security = r->security; + msg->rep->an_numrrsets = r->an_numrrsets; + msg->rep->ns_numrrsets = r->ns_numrrsets; + msg->rep->ar_numrrsets = r->ar_numrrsets; + msg->rep->rrset_count = r->rrset_count; + msg->rep->authoritative = r->authoritative; + if(!rrset_array_lock(r->ref, r->rrset_count, now)) + return NULL; + if(r->an_numrrsets > 0 && (r->rrsets[0]->rk.type == htons( + LDNS_RR_TYPE_CNAME) || r->rrsets[0]->rk.type == htons( + LDNS_RR_TYPE_DNAME)) && !reply_check_cname_chain(r)) { + /* cname chain is now invalid, reconstruct msg */ + rrset_array_unlock(r->ref, r->rrset_count); + return NULL; + } + if(r->security == sec_status_secure && !reply_all_rrsets_secure(r)) { + /* message rrsets have changed status, revalidate */ + rrset_array_unlock(r->ref, r->rrset_count); + return NULL; + } + for(i=0; i<msg->rep->rrset_count; i++) { + msg->rep->rrsets[i] = packed_rrset_copy_region(r->rrsets[i], + region, now); + if(!msg->rep->rrsets[i]) { + rrset_array_unlock(r->ref, r->rrset_count); + return NULL; + } + } + rrset_array_unlock_touch(env->rrset_cache, scratch, r->ref, + r->rrset_count); + return msg; +} + +/** synthesize RRset-only response from cached RRset item */ +static struct dns_msg* +rrset_msg(struct ub_packed_rrset_key* rrset, struct regional* region, + time_t now, struct query_info* q) +{ + struct dns_msg* msg; + struct packed_rrset_data* d = (struct packed_rrset_data*) + rrset->entry.data; + if(now > d->ttl) + return NULL; + msg = gen_dns_msg(region, q, 1); /* only the CNAME (or other) RRset */ + if(!msg) + return NULL; + msg->rep->flags = BIT_QR; /* reply, no AA, no error */ + msg->rep->authoritative = 0; /* reply stored in cache can't be authoritative */ + msg->rep->qdcount = 1; + msg->rep->ttl = d->ttl - now; + msg->rep->prefetch_ttl = PREFETCH_TTL_CALC(msg->rep->ttl); + msg->rep->security = sec_status_unchecked; + msg->rep->an_numrrsets = 1; + msg->rep->ns_numrrsets = 0; + msg->rep->ar_numrrsets = 0; + msg->rep->rrset_count = 1; + msg->rep->rrsets[0] = packed_rrset_copy_region(rrset, region, now); + if(!msg->rep->rrsets[0]) /* copy CNAME */ + return NULL; + return msg; +} + +/** synthesize DNAME+CNAME response from cached DNAME item */ +static struct dns_msg* +synth_dname_msg(struct ub_packed_rrset_key* rrset, struct regional* region, + time_t now, struct query_info* q) +{ + struct dns_msg* msg; + struct ub_packed_rrset_key* ck; + struct packed_rrset_data* newd, *d = (struct packed_rrset_data*) + rrset->entry.data; + uint8_t* newname, *dtarg = NULL; + size_t newlen, dtarglen; + if(now > d->ttl) + return NULL; + /* only allow validated (with DNSSEC) DNAMEs used from cache + * for insecure DNAMEs, query again. */ + if(d->security != sec_status_secure) + return NULL; + msg = gen_dns_msg(region, q, 2); /* DNAME + CNAME RRset */ + if(!msg) + return NULL; + msg->rep->flags = BIT_QR; /* reply, no AA, no error */ + msg->rep->authoritative = 0; /* reply stored in cache can't be authoritative */ + msg->rep->qdcount = 1; + msg->rep->ttl = d->ttl - now; + msg->rep->prefetch_ttl = PREFETCH_TTL_CALC(msg->rep->ttl); + msg->rep->security = sec_status_unchecked; + msg->rep->an_numrrsets = 1; + msg->rep->ns_numrrsets = 0; + msg->rep->ar_numrrsets = 0; + msg->rep->rrset_count = 1; + msg->rep->rrsets[0] = packed_rrset_copy_region(rrset, region, now); + if(!msg->rep->rrsets[0]) /* copy DNAME */ + return NULL; + /* synth CNAME rrset */ + get_cname_target(rrset, &dtarg, &dtarglen); + if(!dtarg) + return NULL; + newlen = q->qname_len + dtarglen - rrset->rk.dname_len; + if(newlen > LDNS_MAX_DOMAINLEN) { + msg->rep->flags |= LDNS_RCODE_YXDOMAIN; + return msg; + } + newname = (uint8_t*)regional_alloc(region, newlen); + if(!newname) + return NULL; + /* new name is concatenation of qname front (without DNAME owner) + * and DNAME target name */ + memcpy(newname, q->qname, q->qname_len-rrset->rk.dname_len); + memmove(newname+(q->qname_len-rrset->rk.dname_len), dtarg, dtarglen); + /* create rest of CNAME rrset */ + ck = (struct ub_packed_rrset_key*)regional_alloc(region, + sizeof(struct ub_packed_rrset_key)); + if(!ck) + return NULL; + memset(&ck->entry, 0, sizeof(ck->entry)); + msg->rep->rrsets[1] = ck; + ck->entry.key = ck; + ck->rk.type = htons(LDNS_RR_TYPE_CNAME); + ck->rk.rrset_class = rrset->rk.rrset_class; + ck->rk.flags = 0; + ck->rk.dname = regional_alloc_init(region, q->qname, q->qname_len); + if(!ck->rk.dname) + return NULL; + ck->rk.dname_len = q->qname_len; + ck->entry.hash = rrset_key_hash(&ck->rk); + newd = (struct packed_rrset_data*)regional_alloc_zero(region, + sizeof(struct packed_rrset_data) + sizeof(size_t) + + sizeof(uint8_t*) + sizeof(time_t) + sizeof(uint16_t) + + newlen); + if(!newd) + return NULL; + ck->entry.data = newd; + newd->ttl = 0; /* 0 for synthesized CNAME TTL */ + newd->count = 1; + newd->rrsig_count = 0; + newd->trust = rrset_trust_ans_noAA; + newd->rr_len = (size_t*)((uint8_t*)newd + + sizeof(struct packed_rrset_data)); + newd->rr_len[0] = newlen + sizeof(uint16_t); + packed_rrset_ptr_fixup(newd); + newd->rr_ttl[0] = newd->ttl; + msg->rep->ttl = newd->ttl; + msg->rep->prefetch_ttl = PREFETCH_TTL_CALC(newd->ttl); + sldns_write_uint16(newd->rr_data[0], newlen); + memmove(newd->rr_data[0] + sizeof(uint16_t), newname, newlen); + msg->rep->an_numrrsets ++; + msg->rep->rrset_count ++; + return msg; +} + +struct dns_msg* +dns_cache_lookup(struct module_env* env, + uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass, + struct regional* region, struct regional* scratch) +{ + struct lruhash_entry* e; + struct query_info k; + hashvalue_t h; + time_t now = *env->now; + struct ub_packed_rrset_key* rrset; + + /* lookup first, this has both NXdomains and ANSWER responses */ + k.qname = qname; + k.qname_len = qnamelen; + k.qtype = qtype; + k.qclass = qclass; + h = query_info_hash(&k); + e = slabhash_lookup(env->msg_cache, h, &k, 0); + if(e) { + struct msgreply_entry* key = (struct msgreply_entry*)e->key; + struct reply_info* data = (struct reply_info*)e->data; + struct dns_msg* msg = tomsg(env, &key->key, data, region, now, + scratch); + if(msg) { + lock_rw_unlock(&e->lock); + return msg; + } + /* could be msg==NULL; due to TTL or not all rrsets available */ + lock_rw_unlock(&e->lock); + } + + /* see if a DNAME exists. Checked for first, to enforce that DNAMEs + * are more important, the CNAME is resynthesized and thus + * consistent with the DNAME */ + if( (rrset=find_closest_of_type(env, qname, qnamelen, qclass, now, + LDNS_RR_TYPE_DNAME, 1))) { + /* synthesize a DNAME+CNAME message based on this */ + struct dns_msg* msg = synth_dname_msg(rrset, region, now, &k); + if(msg) { + lock_rw_unlock(&rrset->entry.lock); + return msg; + } + lock_rw_unlock(&rrset->entry.lock); + } + + /* see if we have CNAME for this domain, + * but not for DS records (which are part of the parent) */ + if( qtype != LDNS_RR_TYPE_DS && + (rrset=rrset_cache_lookup(env->rrset_cache, qname, qnamelen, + LDNS_RR_TYPE_CNAME, qclass, 0, now, 0))) { + struct dns_msg* msg = rrset_msg(rrset, region, now, &k); + if(msg) { + lock_rw_unlock(&rrset->entry.lock); + return msg; + } + lock_rw_unlock(&rrset->entry.lock); + } + + /* construct DS, DNSKEY, DLV messages from rrset cache. */ + if((qtype == LDNS_RR_TYPE_DS || qtype == LDNS_RR_TYPE_DNSKEY || + qtype == LDNS_RR_TYPE_DLV) && + (rrset=rrset_cache_lookup(env->rrset_cache, qname, qnamelen, + qtype, qclass, 0, now, 0))) { + /* if the rrset is from the additional section, and the + * signatures have fallen off, then do not synthesize a msg + * instead, allow a full query for signed results to happen. + * Forego all rrset data from additional section, because + * some signatures may not be present and cause validation + * failure. + */ + struct packed_rrset_data *d = (struct packed_rrset_data*) + rrset->entry.data; + if(d->trust != rrset_trust_add_noAA && + d->trust != rrset_trust_add_AA && + (qtype == LDNS_RR_TYPE_DS || + (d->trust != rrset_trust_auth_noAA + && d->trust != rrset_trust_auth_AA) )) { + struct dns_msg* msg = rrset_msg(rrset, region, now, &k); + if(msg) { + lock_rw_unlock(&rrset->entry.lock); + return msg; + } + } + lock_rw_unlock(&rrset->entry.lock); + } + + /* stop downwards cache search on NXDOMAIN. + * Empty nonterminals are NOERROR, so an NXDOMAIN for foo + * means bla.foo also does not exist. The DNSSEC proofs are + * the same. We search upwards for NXDOMAINs. */ + if(env->cfg->harden_below_nxdomain) + while(!dname_is_root(k.qname)) { + dname_remove_label(&k.qname, &k.qname_len); + h = query_info_hash(&k); + e = slabhash_lookup(env->msg_cache, h, &k, 0); + if(e) { + struct reply_info* data = (struct reply_info*)e->data; + struct dns_msg* msg; + if(FLAGS_GET_RCODE(data->flags) == LDNS_RCODE_NXDOMAIN + && data->security == sec_status_secure + && (msg=tomsg(env, &k, data, region, now, scratch))){ + lock_rw_unlock(&e->lock); + msg->qinfo.qname=qname; + msg->qinfo.qname_len=qnamelen; + /* check that DNSSEC really works out */ + msg->rep->security = sec_status_unchecked; + return msg; + } + lock_rw_unlock(&e->lock); + } + } + + return NULL; +} + +int +dns_cache_store(struct module_env* env, struct query_info* msgqinf, + struct reply_info* msgrep, int is_referral, time_t leeway, int pside, + struct regional* region) +{ + struct reply_info* rep = NULL; + /* alloc, malloc properly (not in region, like msg is) */ + rep = reply_info_copy(msgrep, env->alloc, NULL); + if(!rep) + return 0; + /* ttl must be relative ;i.e. 0..86400 not time(0)+86400. + * the env->now is added to message and RRsets in this routine. */ + /* the leeway is used to invalidate other rrsets earlier */ + + if(is_referral) { + /* store rrsets */ + struct rrset_ref ref; + size_t i; + for(i=0; i<rep->rrset_count; i++) { + packed_rrset_ttl_add((struct packed_rrset_data*) + rep->rrsets[i]->entry.data, *env->now); + ref.key = rep->rrsets[i]; + ref.id = rep->rrsets[i]->id; + /*ignore ret: it was in the cache, ref updated */ + /* no leeway for typeNS */ + (void)rrset_cache_update(env->rrset_cache, &ref, + env->alloc, *env->now + + ((ntohs(ref.key->rk.type)==LDNS_RR_TYPE_NS + && !pside) ? 0:leeway)); + } + free(rep); + return 1; + } else { + /* store msg, and rrsets */ + struct query_info qinf; + hashvalue_t h; + + qinf = *msgqinf; + qinf.qname = memdup(msgqinf->qname, msgqinf->qname_len); + if(!qinf.qname) { + reply_info_parsedelete(rep, env->alloc); + return 0; + } + /* fixup flags to be sensible for a reply based on the cache */ + /* this module means that RA is available. It is an answer QR. + * Not AA from cache. Not CD in cache (depends on client bit). */ + rep->flags |= (BIT_RA | BIT_QR); + rep->flags &= ~(BIT_AA | BIT_CD); + h = query_info_hash(&qinf); + dns_cache_store_msg(env, &qinf, h, rep, leeway, pside, msgrep, + region); + /* qname is used inside query_info_entrysetup, and set to + * NULL. If it has not been used, free it. free(0) is safe. */ + free(qinf.qname); + } + return 1; +} + +int +dns_cache_prefetch_adjust(struct module_env* env, struct query_info* qinfo, + time_t adjust) +{ + struct msgreply_entry* msg; + msg = msg_cache_lookup(env, qinfo->qname, qinfo->qname_len, + qinfo->qtype, qinfo->qclass, *env->now, 1); + if(msg) { + struct reply_info* rep = (struct reply_info*)msg->entry.data; + if(rep) { + rep->prefetch_ttl += adjust; + lock_rw_unlock(&msg->entry.lock); + return 1; + } + lock_rw_unlock(&msg->entry.lock); + } + return 0; +} diff --git a/external/unbound/services/cache/dns.h b/external/unbound/services/cache/dns.h new file mode 100644 index 000000000..05a3e6296 --- /dev/null +++ b/external/unbound/services/cache/dns.h @@ -0,0 +1,194 @@ +/* + * services/cache/dns.h - Cache services for DNS using msg and rrset caches. + * + * Copyright (c) 2007, NLnet Labs. All rights reserved. + * + * This software is open source. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of the NLNET LABS nor the names of its contributors may + * be used to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * \file + * + * This file contains the DNS cache. + */ + +#ifndef SERVICES_CACHE_DNS_H +#define SERVICES_CACHE_DNS_H +#include "util/storage/lruhash.h" +#include "util/data/msgreply.h" +struct module_env; +struct query_info; +struct reply_info; +struct regional; +struct delegpt; + +/** + * Region allocated message reply + */ +struct dns_msg { + /** query info */ + struct query_info qinfo; + /** reply info - ptr to packed repinfo structure */ + struct reply_info *rep; +}; + +/** + * Allocate a dns_msg with malloc/alloc structure and store in dns cache. + * + * @param env: environment, with alloc structure and dns cache. + * @param qinf: query info, the query for which answer is stored. + * this is allocated in a region, and will be copied to malloc area + * before insertion. + * @param rep: reply in dns_msg from dns_alloc_msg for example. + * this is allocated in a region, and will be copied to malloc area + * before insertion. + * @param is_referral: If true, then the given message to be stored is a + * referral. The cache implementation may use this as a hint. + * It will store only the RRsets, not the message. + * @param leeway: TTL value, if not 0, other rrsets are considered expired + * that many seconds before actual TTL expiry. + * @param pside: if true, information came from a server which was fetched + * from the parentside of the zonecut. This means that the type NS + * can be updated to full TTL even in prefetch situations. + * @param region: region to allocate better entries from cache into. + * (used when is_referral is false). + * @return 0 on alloc error (out of memory). + */ +int dns_cache_store(struct module_env* env, struct query_info* qinf, + struct reply_info* rep, int is_referral, time_t leeway, int pside, + struct regional* region); + +/** + * Store message in the cache. Stores in message cache and rrset cache. + * Both qinfo and rep should be malloced and are put in the cache. + * They should not be used after this call, as they are then in shared cache. + * Does not return errors, they are logged and only lead to less cache. + * + * @param env: module environment with the DNS cache. + * @param qinfo: query info + * @param hash: hash over qinfo. + * @param rep: reply info, together with qinfo makes up the message. + * Adjusts the reply info TTLs to absolute time. + * @param leeway: TTL value, if not 0, other rrsets are considered expired + * that many seconds before actual TTL expiry. + * @param pside: if true, information came from a server which was fetched + * from the parentside of the zonecut. This means that the type NS + * can be updated to full TTL even in prefetch situations. + * @param qrep: message that can be altered with better rrs from cache. + * @param region: to allocate into for qmsg. + */ +void dns_cache_store_msg(struct module_env* env, struct query_info* qinfo, + hashvalue_t hash, struct reply_info* rep, time_t leeway, int pside, + struct reply_info* qrep, struct regional* region); + +/** + * Find a delegation from the cache. + * @param env: module environment with the DNS cache. + * @param qname: query name. + * @param qnamelen: length of qname. + * @param qtype: query type. + * @param qclass: query class. + * @param region: where to allocate result delegation. + * @param msg: if not NULL, delegation message is returned here, synthesized + * from the cache. + * @param timenow: the time now, for checking if TTL on cache entries is OK. + * @return new delegation or NULL on error or if not found in cache. + */ +struct delegpt* dns_cache_find_delegation(struct module_env* env, + uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass, + struct regional* region, struct dns_msg** msg, time_t timenow); + +/** + * Find cached message + * @param env: module environment with the DNS cache. + * @param qname: query name. + * @param qnamelen: length of qname. + * @param qtype: query type. + * @param qclass: query class. + * @param region: where to allocate result. + * @param scratch: where to allocate temporary data. + * @return new response message (alloced in region, rrsets do not have IDs). + * or NULL on error or if not found in cache. + * TTLs are made relative to the current time. + */ +struct dns_msg* dns_cache_lookup(struct module_env* env, + uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass, + struct regional* region, struct regional* scratch); + +/** + * find and add A and AAAA records for missing nameservers in delegpt + * @param env: module environment with rrset cache + * @param qclass: which class to look in. + * @param region: where to store new dp info. + * @param dp: delegation point to fill missing entries. + * @return false on alloc failure. + */ +int cache_fill_missing(struct module_env* env, uint16_t qclass, + struct regional* region, struct delegpt* dp); + +/** + * Utility, create new, unpacked data structure for cache response. + * QR bit set, no AA. Query set as indicated. Space for number of rrsets. + * @param qname: query section name + * @param qnamelen: len of qname + * @param qtype: query section type + * @param qclass: query section class + * @param region: where to alloc. + * @param capacity: number of rrsets space to create in the array. + * @return new dns_msg struct or NULL on mem fail. + */ +struct dns_msg* dns_msg_create(uint8_t* qname, size_t qnamelen, uint16_t qtype, + uint16_t qclass, struct regional* region, size_t capacity); + +/** + * Add rrset to authority section in unpacked dns_msg message. Must have enough + * space left, does not grow the array. + * @param msg: msg to put it in. + * @param region: region to alloc in + * @param rrset: to add in authority section + * @param now: now. + * @return true if worked, false on fail + */ +int dns_msg_authadd(struct dns_msg* msg, struct regional* region, + struct ub_packed_rrset_key* rrset, time_t now); + +/** + * Adjust the prefetch_ttl for a cached message. This adds a value to the + * prefetch ttl - postponing the time when it will be prefetched for future + * incoming queries. + * @param env: module environment with caches and time. + * @param qinfo: query info for the query that needs adjustment. + * @param adjust: time in seconds to add to the prefetch_leeway. + * @return false if not in cache. true if added. + */ +int dns_cache_prefetch_adjust(struct module_env* env, struct query_info* qinfo, + time_t adjust); + +#endif /* SERVICES_CACHE_DNS_H */ diff --git a/external/unbound/services/cache/infra.c b/external/unbound/services/cache/infra.c new file mode 100644 index 000000000..07f2103d7 --- /dev/null +++ b/external/unbound/services/cache/infra.c @@ -0,0 +1,569 @@ +/* + * services/cache/infra.c - infrastructure cache, server rtt and capabilities + * + * Copyright (c) 2007, NLnet Labs. All rights reserved. + * + * This software is open source. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of the NLNET LABS nor the names of its contributors may + * be used to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * \file + * + * This file contains the infrastructure cache. + */ +#include "config.h" +#include "ldns/rrdef.h" +#include "services/cache/infra.h" +#include "util/storage/slabhash.h" +#include "util/storage/lookup3.h" +#include "util/data/dname.h" +#include "util/log.h" +#include "util/net_help.h" +#include "util/config_file.h" +#include "iterator/iterator.h" + +/** Timeout when only a single probe query per IP is allowed. */ +#define PROBE_MAXRTO 12000 /* in msec */ + +/** number of timeouts for a type when the domain can be blocked ; + * even if another type has completely rtt maxed it, the different type + * can do this number of packets (until those all timeout too) */ +#define TIMEOUT_COUNT_MAX 3 + +size_t +infra_sizefunc(void* k, void* ATTR_UNUSED(d)) +{ + struct infra_key* key = (struct infra_key*)k; + return sizeof(*key) + sizeof(struct infra_data) + key->namelen + + lock_get_mem(&key->entry.lock); +} + +int +infra_compfunc(void* key1, void* key2) +{ + struct infra_key* k1 = (struct infra_key*)key1; + struct infra_key* k2 = (struct infra_key*)key2; + int r = sockaddr_cmp(&k1->addr, k1->addrlen, &k2->addr, k2->addrlen); + if(r != 0) + return r; + if(k1->namelen != k2->namelen) { + if(k1->namelen < k2->namelen) + return -1; + return 1; + } + return query_dname_compare(k1->zonename, k2->zonename); +} + +void +infra_delkeyfunc(void* k, void* ATTR_UNUSED(arg)) +{ + struct infra_key* key = (struct infra_key*)k; + if(!key) + return; + lock_rw_destroy(&key->entry.lock); + free(key->zonename); + free(key); +} + +void +infra_deldatafunc(void* d, void* ATTR_UNUSED(arg)) +{ + struct infra_data* data = (struct infra_data*)d; + free(data); +} + +struct infra_cache* +infra_create(struct config_file* cfg) +{ + struct infra_cache* infra = (struct infra_cache*)calloc(1, + sizeof(struct infra_cache)); + size_t maxmem = cfg->infra_cache_numhosts * (sizeof(struct infra_key)+ + sizeof(struct infra_data)+INFRA_BYTES_NAME); + infra->hosts = slabhash_create(cfg->infra_cache_slabs, + INFRA_HOST_STARTSIZE, maxmem, &infra_sizefunc, &infra_compfunc, + &infra_delkeyfunc, &infra_deldatafunc, NULL); + if(!infra->hosts) { + free(infra); + return NULL; + } + infra->host_ttl = cfg->host_ttl; + return infra; +} + +void +infra_delete(struct infra_cache* infra) +{ + if(!infra) + return; + slabhash_delete(infra->hosts); + free(infra); +} + +struct infra_cache* +infra_adjust(struct infra_cache* infra, struct config_file* cfg) +{ + size_t maxmem; + if(!infra) + return infra_create(cfg); + infra->host_ttl = cfg->host_ttl; + maxmem = cfg->infra_cache_numhosts * (sizeof(struct infra_key)+ + sizeof(struct infra_data)+INFRA_BYTES_NAME); + if(maxmem != slabhash_get_size(infra->hosts) || + cfg->infra_cache_slabs != infra->hosts->size) { + infra_delete(infra); + infra = infra_create(cfg); + } + return infra; +} + +/** calculate the hash value for a host key */ +static hashvalue_t +hash_addr(struct sockaddr_storage* addr, socklen_t addrlen) +{ + hashvalue_t h = 0xab; + /* select the pieces to hash, some OS have changing data inside */ + if(addr_is_ip6(addr, addrlen)) { + struct sockaddr_in6* in6 = (struct sockaddr_in6*)addr; + h = hashlittle(&in6->sin6_family, sizeof(in6->sin6_family), h); + h = hashlittle(&in6->sin6_port, sizeof(in6->sin6_port), h); + h = hashlittle(&in6->sin6_addr, INET6_SIZE, h); + } else { + struct sockaddr_in* in = (struct sockaddr_in*)addr; + h = hashlittle(&in->sin_family, sizeof(in->sin_family), h); + h = hashlittle(&in->sin_port, sizeof(in->sin_port), h); + h = hashlittle(&in->sin_addr, INET_SIZE, h); + } + return h; +} + +/** calculate infra hash for a key */ +static hashvalue_t +hash_infra(struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* name) +{ + return dname_query_hash(name, hash_addr(addr, addrlen)); +} + +/** lookup version that does not check host ttl (you check it) */ +struct lruhash_entry* +infra_lookup_nottl(struct infra_cache* infra, struct sockaddr_storage* addr, + socklen_t addrlen, uint8_t* name, size_t namelen, int wr) +{ + struct infra_key k; + k.addrlen = addrlen; + memcpy(&k.addr, addr, addrlen); + k.namelen = namelen; + k.zonename = name; + k.entry.hash = hash_infra(addr, addrlen, name); + k.entry.key = (void*)&k; + k.entry.data = NULL; + return slabhash_lookup(infra->hosts, k.entry.hash, &k, wr); +} + +/** init the data elements */ +static void +data_entry_init(struct infra_cache* infra, struct lruhash_entry* e, + time_t timenow) +{ + struct infra_data* data = (struct infra_data*)e->data; + data->ttl = timenow + infra->host_ttl; + rtt_init(&data->rtt); + data->edns_version = 0; + data->edns_lame_known = 0; + data->probedelay = 0; + data->isdnsseclame = 0; + data->rec_lame = 0; + data->lame_type_A = 0; + data->lame_other = 0; + data->timeout_A = 0; + data->timeout_AAAA = 0; + data->timeout_other = 0; +} + +/** + * Create and init a new entry for a host + * @param infra: infra structure with config parameters. + * @param addr: host address. + * @param addrlen: length of addr. + * @param name: name of zone + * @param namelen: length of name. + * @param tm: time now. + * @return: the new entry or NULL on malloc failure. + */ +static struct lruhash_entry* +new_entry(struct infra_cache* infra, struct sockaddr_storage* addr, + socklen_t addrlen, uint8_t* name, size_t namelen, time_t tm) +{ + struct infra_data* data; + struct infra_key* key = (struct infra_key*)malloc(sizeof(*key)); + if(!key) + return NULL; + data = (struct infra_data*)malloc(sizeof(struct infra_data)); + if(!data) { + free(key); + return NULL; + } + key->zonename = memdup(name, namelen); + if(!key->zonename) { + free(key); + free(data); + return NULL; + } + key->namelen = namelen; + lock_rw_init(&key->entry.lock); + key->entry.hash = hash_infra(addr, addrlen, name); + key->entry.key = (void*)key; + key->entry.data = (void*)data; + key->addrlen = addrlen; + memcpy(&key->addr, addr, addrlen); + data_entry_init(infra, &key->entry, tm); + return &key->entry; +} + +int +infra_host(struct infra_cache* infra, struct sockaddr_storage* addr, + socklen_t addrlen, uint8_t* nm, size_t nmlen, time_t timenow, + int* edns_vs, uint8_t* edns_lame_known, int* to) +{ + struct lruhash_entry* e = infra_lookup_nottl(infra, addr, addrlen, + nm, nmlen, 0); + struct infra_data* data; + int wr = 0; + if(e && ((struct infra_data*)e->data)->ttl < timenow) { + /* it expired, try to reuse existing entry */ + int old = ((struct infra_data*)e->data)->rtt.rto; + uint8_t tA = ((struct infra_data*)e->data)->timeout_A; + uint8_t tAAAA = ((struct infra_data*)e->data)->timeout_AAAA; + uint8_t tother = ((struct infra_data*)e->data)->timeout_other; + lock_rw_unlock(&e->lock); + e = infra_lookup_nottl(infra, addr, addrlen, nm, nmlen, 1); + if(e) { + /* if its still there we have a writelock, init */ + /* re-initialise */ + /* do not touch lameness, it may be valid still */ + data_entry_init(infra, e, timenow); + wr = 1; + /* TOP_TIMEOUT remains on reuse */ + if(old >= USEFUL_SERVER_TOP_TIMEOUT) { + ((struct infra_data*)e->data)->rtt.rto + = USEFUL_SERVER_TOP_TIMEOUT; + ((struct infra_data*)e->data)->timeout_A = tA; + ((struct infra_data*)e->data)->timeout_AAAA = tAAAA; + ((struct infra_data*)e->data)->timeout_other = tother; + } + } + } + if(!e) { + /* insert new entry */ + if(!(e = new_entry(infra, addr, addrlen, nm, nmlen, timenow))) + return 0; + data = (struct infra_data*)e->data; + *edns_vs = data->edns_version; + *edns_lame_known = data->edns_lame_known; + *to = rtt_timeout(&data->rtt); + slabhash_insert(infra->hosts, e->hash, e, data, NULL); + return 1; + } + /* use existing entry */ + data = (struct infra_data*)e->data; + *edns_vs = data->edns_version; + *edns_lame_known = data->edns_lame_known; + *to = rtt_timeout(&data->rtt); + if(*to >= PROBE_MAXRTO && rtt_notimeout(&data->rtt)*4 <= *to) { + /* delay other queries, this is the probe query */ + if(!wr) { + lock_rw_unlock(&e->lock); + e = infra_lookup_nottl(infra, addr,addrlen,nm,nmlen, 1); + if(!e) { /* flushed from cache real fast, no use to + allocate just for the probedelay */ + return 1; + } + data = (struct infra_data*)e->data; + } + /* add 999 to round up the timeout value from msec to sec, + * then add a whole second so it is certain that this probe + * has timed out before the next is allowed */ + data->probedelay = timenow + ((*to)+1999)/1000; + } + lock_rw_unlock(&e->lock); + return 1; +} + +int +infra_set_lame(struct infra_cache* infra, struct sockaddr_storage* addr, + socklen_t addrlen, uint8_t* nm, size_t nmlen, time_t timenow, + int dnsseclame, int reclame, uint16_t qtype) +{ + struct infra_data* data; + struct lruhash_entry* e; + int needtoinsert = 0; + e = infra_lookup_nottl(infra, addr, addrlen, nm, nmlen, 1); + if(!e) { + /* insert it */ + if(!(e = new_entry(infra, addr, addrlen, nm, nmlen, timenow))) { + log_err("set_lame: malloc failure"); + return 0; + } + needtoinsert = 1; + } else if( ((struct infra_data*)e->data)->ttl < timenow) { + /* expired, reuse existing entry */ + data_entry_init(infra, e, timenow); + } + /* got an entry, now set the zone lame */ + data = (struct infra_data*)e->data; + /* merge data (if any) */ + if(dnsseclame) + data->isdnsseclame = 1; + if(reclame) + data->rec_lame = 1; + if(!dnsseclame && !reclame && qtype == LDNS_RR_TYPE_A) + data->lame_type_A = 1; + if(!dnsseclame && !reclame && qtype != LDNS_RR_TYPE_A) + data->lame_other = 1; + /* done */ + if(needtoinsert) + slabhash_insert(infra->hosts, e->hash, e, e->data, NULL); + else { lock_rw_unlock(&e->lock); } + return 1; +} + +void +infra_update_tcp_works(struct infra_cache* infra, + struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* nm, + size_t nmlen) +{ + struct lruhash_entry* e = infra_lookup_nottl(infra, addr, addrlen, + nm, nmlen, 1); + struct infra_data* data; + if(!e) + return; /* doesn't exist */ + data = (struct infra_data*)e->data; + if(data->rtt.rto >= RTT_MAX_TIMEOUT) + /* do not disqualify this server altogether, it is better + * than nothing */ + data->rtt.rto = RTT_MAX_TIMEOUT-1000; + lock_rw_unlock(&e->lock); +} + +int +infra_rtt_update(struct infra_cache* infra, struct sockaddr_storage* addr, + socklen_t addrlen, uint8_t* nm, size_t nmlen, int qtype, + int roundtrip, int orig_rtt, time_t timenow) +{ + struct lruhash_entry* e = infra_lookup_nottl(infra, addr, addrlen, + nm, nmlen, 1); + struct infra_data* data; + int needtoinsert = 0; + int rto = 1; + if(!e) { + if(!(e = new_entry(infra, addr, addrlen, nm, nmlen, timenow))) + return 0; + needtoinsert = 1; + } else if(((struct infra_data*)e->data)->ttl < timenow) { + data_entry_init(infra, e, timenow); + } + /* have an entry, update the rtt */ + data = (struct infra_data*)e->data; + if(roundtrip == -1) { + rtt_lost(&data->rtt, orig_rtt); + if(qtype == LDNS_RR_TYPE_A) { + if(data->timeout_A < TIMEOUT_COUNT_MAX) + data->timeout_A++; + } else if(qtype == LDNS_RR_TYPE_AAAA) { + if(data->timeout_AAAA < TIMEOUT_COUNT_MAX) + data->timeout_AAAA++; + } else { + if(data->timeout_other < TIMEOUT_COUNT_MAX) + data->timeout_other++; + } + } else { + /* if we got a reply, but the old timeout was above server + * selection height, delete the timeout so the server is + * fully available again */ + if(rtt_unclamped(&data->rtt) >= USEFUL_SERVER_TOP_TIMEOUT) + rtt_init(&data->rtt); + rtt_update(&data->rtt, roundtrip); + data->probedelay = 0; + if(qtype == LDNS_RR_TYPE_A) + data->timeout_A = 0; + else if(qtype == LDNS_RR_TYPE_AAAA) + data->timeout_AAAA = 0; + else data->timeout_other = 0; + } + if(data->rtt.rto > 0) + rto = data->rtt.rto; + + if(needtoinsert) + slabhash_insert(infra->hosts, e->hash, e, e->data, NULL); + else { lock_rw_unlock(&e->lock); } + return rto; +} + +long long infra_get_host_rto(struct infra_cache* infra, + struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* nm, + size_t nmlen, struct rtt_info* rtt, int* delay, time_t timenow, + int* tA, int* tAAAA, int* tother) +{ + struct lruhash_entry* e = infra_lookup_nottl(infra, addr, addrlen, + nm, nmlen, 0); + struct infra_data* data; + long long ttl = -2; + if(!e) return -1; + data = (struct infra_data*)e->data; + if(data->ttl >= timenow) { + ttl = (long long)(data->ttl - timenow); + memmove(rtt, &data->rtt, sizeof(*rtt)); + if(timenow < data->probedelay) + *delay = (int)(data->probedelay - timenow); + else *delay = 0; + } + *tA = (int)data->timeout_A; + *tAAAA = (int)data->timeout_AAAA; + *tother = (int)data->timeout_other; + lock_rw_unlock(&e->lock); + return ttl; +} + +int +infra_edns_update(struct infra_cache* infra, struct sockaddr_storage* addr, + socklen_t addrlen, uint8_t* nm, size_t nmlen, int edns_version, + time_t timenow) +{ + struct lruhash_entry* e = infra_lookup_nottl(infra, addr, addrlen, + nm, nmlen, 1); + struct infra_data* data; + int needtoinsert = 0; + if(!e) { + if(!(e = new_entry(infra, addr, addrlen, nm, nmlen, timenow))) + return 0; + needtoinsert = 1; + } else if(((struct infra_data*)e->data)->ttl < timenow) { + data_entry_init(infra, e, timenow); + } + /* have an entry, update the rtt, and the ttl */ + data = (struct infra_data*)e->data; + /* do not update if noEDNS and stored is yesEDNS */ + if(!(edns_version == -1 && (data->edns_version != -1 && + data->edns_lame_known))) { + data->edns_version = edns_version; + data->edns_lame_known = 1; + } + + if(needtoinsert) + slabhash_insert(infra->hosts, e->hash, e, e->data, NULL); + else { lock_rw_unlock(&e->lock); } + return 1; +} + +int +infra_get_lame_rtt(struct infra_cache* infra, + struct sockaddr_storage* addr, socklen_t addrlen, + uint8_t* name, size_t namelen, uint16_t qtype, + int* lame, int* dnsseclame, int* reclame, int* rtt, time_t timenow) +{ + struct infra_data* host; + struct lruhash_entry* e = infra_lookup_nottl(infra, addr, addrlen, + name, namelen, 0); + if(!e) + return 0; + host = (struct infra_data*)e->data; + *rtt = rtt_unclamped(&host->rtt); + if(host->rtt.rto >= PROBE_MAXRTO && timenow < host->probedelay + && rtt_notimeout(&host->rtt)*4 <= host->rtt.rto) { + /* single probe for this domain, and we are not probing */ + /* unless the query type allows a probe to happen */ + if(qtype == LDNS_RR_TYPE_A) { + if(host->timeout_A >= TIMEOUT_COUNT_MAX) + *rtt = USEFUL_SERVER_TOP_TIMEOUT; + else *rtt = USEFUL_SERVER_TOP_TIMEOUT-1000; + } else if(qtype == LDNS_RR_TYPE_AAAA) { + if(host->timeout_AAAA >= TIMEOUT_COUNT_MAX) + *rtt = USEFUL_SERVER_TOP_TIMEOUT; + else *rtt = USEFUL_SERVER_TOP_TIMEOUT-1000; + } else { + if(host->timeout_other >= TIMEOUT_COUNT_MAX) + *rtt = USEFUL_SERVER_TOP_TIMEOUT; + else *rtt = USEFUL_SERVER_TOP_TIMEOUT-1000; + } + } + if(timenow > host->ttl) { + /* expired entry */ + /* see if this can be a re-probe of an unresponsive server */ + /* minus 1000 because that is outside of the RTTBAND, so + * blacklisted servers stay blacklisted if this is chosen */ + if(host->rtt.rto >= USEFUL_SERVER_TOP_TIMEOUT) { + lock_rw_unlock(&e->lock); + *rtt = USEFUL_SERVER_TOP_TIMEOUT-1000; + *lame = 0; + *dnsseclame = 0; + *reclame = 0; + return 1; + } + lock_rw_unlock(&e->lock); + return 0; + } + /* check lameness first */ + if(host->lame_type_A && qtype == LDNS_RR_TYPE_A) { + lock_rw_unlock(&e->lock); + *lame = 1; + *dnsseclame = 0; + *reclame = 0; + return 1; + } else if(host->lame_other && qtype != LDNS_RR_TYPE_A) { + lock_rw_unlock(&e->lock); + *lame = 1; + *dnsseclame = 0; + *reclame = 0; + return 1; + } else if(host->isdnsseclame) { + lock_rw_unlock(&e->lock); + *lame = 0; + *dnsseclame = 1; + *reclame = 0; + return 1; + } else if(host->rec_lame) { + lock_rw_unlock(&e->lock); + *lame = 0; + *dnsseclame = 0; + *reclame = 1; + return 1; + } + /* no lameness for this type of query */ + lock_rw_unlock(&e->lock); + *lame = 0; + *dnsseclame = 0; + *reclame = 0; + return 1; +} + +size_t +infra_get_mem(struct infra_cache* infra) +{ + return sizeof(*infra) + slabhash_get_mem(infra->hosts); +} diff --git a/external/unbound/services/cache/infra.h b/external/unbound/services/cache/infra.h new file mode 100644 index 000000000..fc54f7f0d --- /dev/null +++ b/external/unbound/services/cache/infra.h @@ -0,0 +1,309 @@ +/* + * services/cache/infra.h - infrastructure cache, server rtt and capabilities + * + * Copyright (c) 2007, NLnet Labs. All rights reserved. + * + * This software is open source. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of the NLNET LABS nor the names of its contributors may + * be used to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * \file + * + * This file contains the infrastructure cache. + */ + +#ifndef SERVICES_CACHE_INFRA_H +#define SERVICES_CACHE_INFRA_H +#include "util/storage/lruhash.h" +#include "util/rtt.h" +struct slabhash; +struct config_file; + +/** + * Host information kept for every server, per zone. + */ +struct infra_key { + /** the host address. */ + struct sockaddr_storage addr; + /** length of addr. */ + socklen_t addrlen; + /** zone name in wireformat */ + uint8_t* zonename; + /** length of zonename */ + size_t namelen; + /** hash table entry, data of type infra_data. */ + struct lruhash_entry entry; +}; + +/** + * Host information encompasses host capabilities and retransmission timeouts. + * And lameness information (notAuthoritative, noEDNS, Recursive) + */ +struct infra_data { + /** TTL value for this entry. absolute time. */ + time_t ttl; + + /** time in seconds (absolute) when probing re-commences, 0 disabled */ + time_t probedelay; + /** round trip times for timeout calculation */ + struct rtt_info rtt; + + /** edns version that the host supports, -1 means no EDNS */ + int edns_version; + /** if the EDNS lameness is already known or not. + * EDNS lame is when EDNS queries or replies are dropped, + * and cause a timeout */ + uint8_t edns_lame_known; + + /** is the host lame (does not serve the zone authoritatively), + * or is the host dnssec lame (does not serve DNSSEC data) */ + uint8_t isdnsseclame; + /** is the host recursion lame (not AA, but RA) */ + uint8_t rec_lame; + /** the host is lame (not authoritative) for A records */ + uint8_t lame_type_A; + /** the host is lame (not authoritative) for other query types */ + uint8_t lame_other; + + /** timeouts counter for type A */ + uint8_t timeout_A; + /** timeouts counter for type AAAA */ + uint8_t timeout_AAAA; + /** timeouts counter for others */ + uint8_t timeout_other; +}; + +/** + * Infra cache + */ +struct infra_cache { + /** The hash table with hosts */ + struct slabhash* hosts; + /** TTL value for host information, in seconds */ + int host_ttl; +}; + +/** infra host cache default hash lookup size */ +#define INFRA_HOST_STARTSIZE 32 +/** bytes per zonename reserved in the hostcache, dnamelen(zonename.com.) */ +#define INFRA_BYTES_NAME 14 + +/** + * Create infra cache. + * @param cfg: config parameters or NULL for defaults. + * @return: new infra cache, or NULL. + */ +struct infra_cache* infra_create(struct config_file* cfg); + +/** + * Delete infra cache. + * @param infra: infrastructure cache to delete. + */ +void infra_delete(struct infra_cache* infra); + +/** + * Adjust infra cache to use updated configuration settings. + * This may clean the cache. Operates a bit like realloc. + * There may be no threading or use by other threads. + * @param infra: existing cache. If NULL a new infra cache is returned. + * @param cfg: config options. + * @return the new infra cache pointer or NULL on error. + */ +struct infra_cache* infra_adjust(struct infra_cache* infra, + struct config_file* cfg); + +/** + * Plain find infra data function (used by the the other functions) + * @param infra: infrastructure cache. + * @param addr: host address. + * @param addrlen: length of addr. + * @param name: domain name of zone. + * @param namelen: length of domain name. + * @param wr: if true, writelock, else readlock. + * @return the entry, could be expired (this is not checked) or NULL. + */ +struct lruhash_entry* infra_lookup_nottl(struct infra_cache* infra, + struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* name, + size_t namelen, int wr); + +/** + * Find host information to send a packet. Creates new entry if not found. + * Lameness is empty. EDNS is 0 (try with first), and rtt is returned for + * the first message to it. + * Use this to send a packet only, because it also locks out others when + * probing is restricted. + * @param infra: infrastructure cache. + * @param addr: host address. + * @param addrlen: length of addr. + * @param name: domain name of zone. + * @param namelen: length of domain name. + * @param timenow: what time it is now. + * @param edns_vs: edns version it supports, is returned. + * @param edns_lame_known: if EDNS lame (EDNS is dropped in transit) has + * already been probed, is returned. + * @param to: timeout to use, is returned. + * @return: 0 on error. + */ +int infra_host(struct infra_cache* infra, struct sockaddr_storage* addr, + socklen_t addrlen, uint8_t* name, size_t namelen, + time_t timenow, int* edns_vs, uint8_t* edns_lame_known, int* to); + +/** + * Set a host to be lame for the given zone. + * @param infra: infrastructure cache. + * @param addr: host address. + * @param addrlen: length of addr. + * @param name: domain name of zone apex. + * @param namelen: length of domain name. + * @param timenow: what time it is now. + * @param dnsseclame: if true the host is set dnssec lame. + * if false, the host is marked lame (not serving the zone). + * @param reclame: if true host is a recursor not AA server. + * if false, dnsseclame or marked lame. + * @param qtype: the query type for which it is lame. + * @return: 0 on error. + */ +int infra_set_lame(struct infra_cache* infra, + struct sockaddr_storage* addr, socklen_t addrlen, + uint8_t* name, size_t namelen, time_t timenow, int dnsseclame, + int reclame, uint16_t qtype); + +/** + * Update rtt information for the host. + * @param infra: infrastructure cache. + * @param addr: host address. + * @param addrlen: length of addr. + * @param name: zone name + * @param namelen: zone name length + * @param qtype: query type. + * @param roundtrip: estimate of roundtrip time in milliseconds or -1 for + * timeout. + * @param orig_rtt: original rtt for the query that timed out (roundtrip==-1). + * ignored if roundtrip != -1. + * @param timenow: what time it is now. + * @return: 0 on error. new rto otherwise. + */ +int infra_rtt_update(struct infra_cache* infra, struct sockaddr_storage* addr, + socklen_t addrlen, uint8_t* name, size_t namelen, int qtype, + int roundtrip, int orig_rtt, time_t timenow); + +/** + * Update information for the host, store that a TCP transaction works. + * @param infra: infrastructure cache. + * @param addr: host address. + * @param addrlen: length of addr. + * @param name: name of zone + * @param namelen: length of name + */ +void infra_update_tcp_works(struct infra_cache* infra, + struct sockaddr_storage* addr, socklen_t addrlen, + uint8_t* name, size_t namelen); + +/** + * Update edns information for the host. + * @param infra: infrastructure cache. + * @param addr: host address. + * @param addrlen: length of addr. + * @param name: name of zone + * @param namelen: length of name + * @param edns_version: the version that it publishes. + * If it is known to support EDNS then no-EDNS is not stored over it. + * @param timenow: what time it is now. + * @return: 0 on error. + */ +int infra_edns_update(struct infra_cache* infra, + struct sockaddr_storage* addr, socklen_t addrlen, + uint8_t* name, size_t namelen, int edns_version, time_t timenow); + +/** + * Get Lameness information and average RTT if host is in the cache. + * This information is to be used for server selection. + * @param infra: infrastructure cache. + * @param addr: host address. + * @param addrlen: length of addr. + * @param name: zone name. + * @param namelen: zone name length. + * @param qtype: the query to be made. + * @param lame: if function returns true, this returns lameness of the zone. + * @param dnsseclame: if function returns true, this returns if the zone + * is dnssec-lame. + * @param reclame: if function returns true, this is if it is recursion lame. + * @param rtt: if function returns true, this returns avg rtt of the server. + * The rtt value is unclamped and reflects recent timeouts. + * @param timenow: what time it is now. + * @return if found in cache, or false if not (or TTL bad). + */ +int infra_get_lame_rtt(struct infra_cache* infra, + struct sockaddr_storage* addr, socklen_t addrlen, + uint8_t* name, size_t namelen, uint16_t qtype, + int* lame, int* dnsseclame, int* reclame, int* rtt, time_t timenow); + +/** + * Get additional (debug) info on timing. + * @param infra: infra cache. + * @param addr: host address. + * @param addrlen: length of addr. + * @param name: zone name + * @param namelen: zone name length + * @param rtt: the rtt_info is copied into here (caller alloced return struct). + * @param delay: probe delay (if any). + * @param timenow: what time it is now. + * @param tA: timeout counter on type A. + * @param tAAAA: timeout counter on type AAAA. + * @param tother: timeout counter on type other. + * @return TTL the infra host element is valid for. If -1: not found in cache. + * TTL -2: found but expired. + */ +long long infra_get_host_rto(struct infra_cache* infra, + struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* name, + size_t namelen, struct rtt_info* rtt, int* delay, time_t timenow, + int* tA, int* tAAAA, int* tother); + +/** + * Get memory used by the infra cache. + * @param infra: infrastructure cache. + * @return memory in use in bytes. + */ +size_t infra_get_mem(struct infra_cache* infra); + +/** calculate size for the hashtable, does not count size of lameness, + * so the hashtable is a fixed number of items */ +size_t infra_sizefunc(void* k, void* d); + +/** compare two addresses, returns -1, 0, or +1 */ +int infra_compfunc(void* key1, void* key2); + +/** delete key, and destroy the lock */ +void infra_delkeyfunc(void* k, void* arg); + +/** delete data and destroy the lameness hashtable */ +void infra_deldatafunc(void* d, void* arg); + +#endif /* SERVICES_CACHE_INFRA_H */ diff --git a/external/unbound/services/cache/rrset.c b/external/unbound/services/cache/rrset.c new file mode 100644 index 000000000..5f52dbce1 --- /dev/null +++ b/external/unbound/services/cache/rrset.c @@ -0,0 +1,417 @@ +/* + * services/cache/rrset.c - Resource record set cache. + * + * Copyright (c) 2007, NLnet Labs. All rights reserved. + * + * This software is open source. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of the NLNET LABS nor the names of its contributors may + * be used to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * \file + * + * This file contains the rrset cache. + */ +#include "config.h" +#include "services/cache/rrset.h" +#include "ldns/rrdef.h" +#include "util/storage/slabhash.h" +#include "util/config_file.h" +#include "util/data/packed_rrset.h" +#include "util/data/msgreply.h" +#include "util/regional.h" +#include "util/alloc.h" + +void +rrset_markdel(void* key) +{ + struct ub_packed_rrset_key* r = (struct ub_packed_rrset_key*)key; + r->id = 0; +} + +struct rrset_cache* rrset_cache_create(struct config_file* cfg, + struct alloc_cache* alloc) +{ + size_t slabs = (cfg?cfg->rrset_cache_slabs:HASH_DEFAULT_SLABS); + size_t startarray = HASH_DEFAULT_STARTARRAY; + size_t maxmem = (cfg?cfg->rrset_cache_size:HASH_DEFAULT_MAXMEM); + + struct rrset_cache *r = (struct rrset_cache*)slabhash_create(slabs, + startarray, maxmem, ub_rrset_sizefunc, ub_rrset_compare, + ub_rrset_key_delete, rrset_data_delete, alloc); + slabhash_setmarkdel(&r->table, &rrset_markdel); + return r; +} + +void rrset_cache_delete(struct rrset_cache* r) +{ + if(!r) + return; + slabhash_delete(&r->table); + /* slabhash delete also does free(r), since table is first in struct*/ +} + +struct rrset_cache* rrset_cache_adjust(struct rrset_cache *r, + struct config_file* cfg, struct alloc_cache* alloc) +{ + if(!r || !cfg || cfg->rrset_cache_slabs != r->table.size || + cfg->rrset_cache_size != slabhash_get_size(&r->table)) + { + rrset_cache_delete(r); + r = rrset_cache_create(cfg, alloc); + } + return r; +} + +void +rrset_cache_touch(struct rrset_cache* r, struct ub_packed_rrset_key* key, + hashvalue_t hash, rrset_id_t id) +{ + struct lruhash* table = slabhash_gettable(&r->table, hash); + /* + * This leads to locking problems, deadlocks, if the caller is + * holding any other rrset lock. + * Because a lookup through the hashtable does: + * tablelock -> entrylock (for that entry caller holds) + * And this would do + * entrylock(already held) -> tablelock + * And if two threads do this, it results in deadlock. + * So, the caller must not hold entrylock. + */ + lock_quick_lock(&table->lock); + /* we have locked the hash table, the item can still be deleted. + * because it could already have been reclaimed, but not yet set id=0. + * This is because some lruhash routines have lazy deletion. + * so, we must acquire a lock on the item to verify the id != 0. + * also, with hash not changed, we are using the right slab. + */ + lock_rw_rdlock(&key->entry.lock); + if(key->id == id && key->entry.hash == hash) { + lru_touch(table, &key->entry); + } + lock_rw_unlock(&key->entry.lock); + lock_quick_unlock(&table->lock); +} + +/** see if rrset needs to be updated in the cache */ +static int +need_to_update_rrset(void* nd, void* cd, time_t timenow, int equal, int ns) +{ + struct packed_rrset_data* newd = (struct packed_rrset_data*)nd; + struct packed_rrset_data* cached = (struct packed_rrset_data*)cd; + /* o store if rrset has been validated + * everything better than bogus data + * secure is preferred */ + if( newd->security == sec_status_secure && + cached->security != sec_status_secure) + return 1; + if( cached->security == sec_status_bogus && + newd->security != sec_status_bogus && !equal) + return 1; + /* o if current RRset is more trustworthy - insert it */ + if( newd->trust > cached->trust ) { + /* if the cached rrset is bogus, and this one equal, + * do not update the TTL - let it expire. */ + if(equal && cached->ttl >= timenow && + cached->security == sec_status_bogus) + return 0; + return 1; + } + /* o item in cache has expired */ + if( cached->ttl < timenow ) + return 1; + /* o same trust, but different in data - insert it */ + if( newd->trust == cached->trust && !equal ) { + /* if this is type NS, do not 'stick' to owner that changes + * the NS RRset, but use the old TTL for the new data, and + * update to fetch the latest data. ttl is not expired, because + * that check was before this one. */ + if(ns) { + size_t i; + newd->ttl = cached->ttl; + for(i=0; i<(newd->count+newd->rrsig_count); i++) + if(newd->rr_ttl[i] > newd->ttl) + newd->rr_ttl[i] = newd->ttl; + } + return 1; + } + return 0; +} + +/** Update RRSet special key ID */ +static void +rrset_update_id(struct rrset_ref* ref, struct alloc_cache* alloc) +{ + /* this may clear the cache and invalidate lock below */ + uint64_t newid = alloc_get_id(alloc); + /* obtain writelock */ + lock_rw_wrlock(&ref->key->entry.lock); + /* check if it was deleted in the meantime, if so, skip update */ + if(ref->key->id == ref->id) { + ref->key->id = newid; + ref->id = newid; + } + lock_rw_unlock(&ref->key->entry.lock); +} + +int +rrset_cache_update(struct rrset_cache* r, struct rrset_ref* ref, + struct alloc_cache* alloc, time_t timenow) +{ + struct lruhash_entry* e; + struct ub_packed_rrset_key* k = ref->key; + hashvalue_t h = k->entry.hash; + uint16_t rrset_type = ntohs(k->rk.type); + int equal = 0; + log_assert(ref->id != 0 && k->id != 0); + /* looks up item with a readlock - no editing! */ + if((e=slabhash_lookup(&r->table, h, k, 0)) != 0) { + /* return id and key as they will be used in the cache + * since the lruhash_insert, if item already exists, deallocs + * the passed key in favor of the already stored key. + * because of the small gap (see below) this key ptr and id + * may prove later to be already deleted, which is no problem + * as it only makes a cache miss. + */ + ref->key = (struct ub_packed_rrset_key*)e->key; + ref->id = ref->key->id; + equal = rrsetdata_equal((struct packed_rrset_data*)k->entry. + data, (struct packed_rrset_data*)e->data); + if(!need_to_update_rrset(k->entry.data, e->data, timenow, + equal, (rrset_type==LDNS_RR_TYPE_NS))) { + /* cache is superior, return that value */ + lock_rw_unlock(&e->lock); + ub_packed_rrset_parsedelete(k, alloc); + if(equal) return 2; + return 1; + } + lock_rw_unlock(&e->lock); + /* Go on and insert the passed item. + * small gap here, where entry is not locked. + * possibly entry is updated with something else. + * we then overwrite that with our data. + * this is just too bad, its cache anyway. */ + /* use insert to update entry to manage lruhash + * cache size values nicely. */ + } + log_assert(ref->key->id != 0); + slabhash_insert(&r->table, h, &k->entry, k->entry.data, alloc); + if(e) { + /* For NSEC, NSEC3, DNAME, when rdata is updated, update + * the ID number so that proofs in message cache are + * invalidated */ + if((rrset_type == LDNS_RR_TYPE_NSEC + || rrset_type == LDNS_RR_TYPE_NSEC3 + || rrset_type == LDNS_RR_TYPE_DNAME) && !equal) { + rrset_update_id(ref, alloc); + } + return 1; + } + return 0; +} + +struct ub_packed_rrset_key* +rrset_cache_lookup(struct rrset_cache* r, uint8_t* qname, size_t qnamelen, + uint16_t qtype, uint16_t qclass, uint32_t flags, time_t timenow, + int wr) +{ + struct lruhash_entry* e; + struct ub_packed_rrset_key key; + + key.entry.key = &key; + key.entry.data = NULL; + key.rk.dname = qname; + key.rk.dname_len = qnamelen; + key.rk.type = htons(qtype); + key.rk.rrset_class = htons(qclass); + key.rk.flags = flags; + + key.entry.hash = rrset_key_hash(&key.rk); + + if((e = slabhash_lookup(&r->table, key.entry.hash, &key, wr))) { + /* check TTL */ + struct packed_rrset_data* data = + (struct packed_rrset_data*)e->data; + if(timenow > data->ttl) { + lock_rw_unlock(&e->lock); + return NULL; + } + /* we're done */ + return (struct ub_packed_rrset_key*)e->key; + } + return NULL; +} + +int +rrset_array_lock(struct rrset_ref* ref, size_t count, time_t timenow) +{ + size_t i; + for(i=0; i<count; i++) { + if(i>0 && ref[i].key == ref[i-1].key) + continue; /* only lock items once */ + lock_rw_rdlock(&ref[i].key->entry.lock); + if(ref[i].id != ref[i].key->id || timenow > + ((struct packed_rrset_data*)(ref[i].key->entry.data)) + ->ttl) { + /* failure! rollback our readlocks */ + rrset_array_unlock(ref, i+1); + return 0; + } + } + return 1; +} + +void +rrset_array_unlock(struct rrset_ref* ref, size_t count) +{ + size_t i; + for(i=0; i<count; i++) { + if(i>0 && ref[i].key == ref[i-1].key) + continue; /* only unlock items once */ + lock_rw_unlock(&ref[i].key->entry.lock); + } +} + +void +rrset_array_unlock_touch(struct rrset_cache* r, struct regional* scratch, + struct rrset_ref* ref, size_t count) +{ + hashvalue_t* h; + size_t i; + if(!(h = (hashvalue_t*)regional_alloc(scratch, + sizeof(hashvalue_t)*count))) + log_warn("rrset LRU: memory allocation failed"); + else /* store hash values */ + for(i=0; i<count; i++) + h[i] = ref[i].key->entry.hash; + /* unlock */ + for(i=0; i<count; i++) { + if(i>0 && ref[i].key == ref[i-1].key) + continue; /* only unlock items once */ + lock_rw_unlock(&ref[i].key->entry.lock); + } + if(h) { + /* LRU touch, with no rrset locks held */ + for(i=0; i<count; i++) { + if(i>0 && ref[i].key == ref[i-1].key) + continue; /* only touch items once */ + rrset_cache_touch(r, ref[i].key, h[i], ref[i].id); + } + } +} + +void +rrset_update_sec_status(struct rrset_cache* r, + struct ub_packed_rrset_key* rrset, time_t now) +{ + struct packed_rrset_data* updata = + (struct packed_rrset_data*)rrset->entry.data; + struct lruhash_entry* e; + struct packed_rrset_data* cachedata; + + /* hash it again to make sure it has a hash */ + rrset->entry.hash = rrset_key_hash(&rrset->rk); + + e = slabhash_lookup(&r->table, rrset->entry.hash, rrset, 1); + if(!e) + return; /* not in the cache anymore */ + cachedata = (struct packed_rrset_data*)e->data; + if(!rrsetdata_equal(updata, cachedata)) { + lock_rw_unlock(&e->lock); + return; /* rrset has changed in the meantime */ + } + /* update the cached rrset */ + if(updata->security > cachedata->security) { + size_t i; + if(updata->trust > cachedata->trust) + cachedata->trust = updata->trust; + cachedata->security = updata->security; + /* for NS records only shorter TTLs, other types: update it */ + if(ntohs(rrset->rk.type) != LDNS_RR_TYPE_NS || + updata->ttl+now < cachedata->ttl || + cachedata->ttl < now || + updata->security == sec_status_bogus) { + cachedata->ttl = updata->ttl + now; + for(i=0; i<cachedata->count+cachedata->rrsig_count; i++) + cachedata->rr_ttl[i] = updata->rr_ttl[i]+now; + } + } + lock_rw_unlock(&e->lock); +} + +void +rrset_check_sec_status(struct rrset_cache* r, + struct ub_packed_rrset_key* rrset, time_t now) +{ + struct packed_rrset_data* updata = + (struct packed_rrset_data*)rrset->entry.data; + struct lruhash_entry* e; + struct packed_rrset_data* cachedata; + + /* hash it again to make sure it has a hash */ + rrset->entry.hash = rrset_key_hash(&rrset->rk); + + e = slabhash_lookup(&r->table, rrset->entry.hash, rrset, 0); + if(!e) + return; /* not in the cache anymore */ + cachedata = (struct packed_rrset_data*)e->data; + if(now > cachedata->ttl || !rrsetdata_equal(updata, cachedata)) { + lock_rw_unlock(&e->lock); + return; /* expired, or rrset has changed in the meantime */ + } + if(cachedata->security > updata->security) { + updata->security = cachedata->security; + if(cachedata->security == sec_status_bogus) { + size_t i; + updata->ttl = cachedata->ttl - now; + for(i=0; i<cachedata->count+cachedata->rrsig_count; i++) + if(cachedata->rr_ttl[i] < now) + updata->rr_ttl[i] = 0; + else updata->rr_ttl[i] = + cachedata->rr_ttl[i]-now; + } + if(cachedata->trust > updata->trust) + updata->trust = cachedata->trust; + } + lock_rw_unlock(&e->lock); +} + +void rrset_cache_remove(struct rrset_cache* r, uint8_t* nm, size_t nmlen, + uint16_t type, uint16_t dclass, uint32_t flags) +{ + struct ub_packed_rrset_key key; + key.entry.key = &key; + key.rk.dname = nm; + key.rk.dname_len = nmlen; + key.rk.rrset_class = htons(dclass); + key.rk.type = htons(type); + key.rk.flags = flags; + key.entry.hash = rrset_key_hash(&key.rk); + slabhash_remove(&r->table, key.entry.hash, &key); +} diff --git a/external/unbound/services/cache/rrset.h b/external/unbound/services/cache/rrset.h new file mode 100644 index 000000000..98e44a4e5 --- /dev/null +++ b/external/unbound/services/cache/rrset.h @@ -0,0 +1,231 @@ +/* + * services/cache/rrset.h - Resource record set cache. + * + * Copyright (c) 2007, NLnet Labs. All rights reserved. + * + * This software is open source. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of the NLNET LABS nor the names of its contributors may + * be used to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * \file + * + * This file contains the rrset cache. + */ + +#ifndef SERVICES_CACHE_RRSET_H +#define SERVICES_CACHE_RRSET_H +#include "util/storage/lruhash.h" +#include "util/storage/slabhash.h" +#include "util/data/packed_rrset.h" +struct config_file; +struct alloc_cache; +struct rrset_ref; +struct regional; + +/** + * The rrset cache + * Thin wrapper around hashtable, like a typedef. + */ +struct rrset_cache { + /** uses partitioned hash table */ + struct slabhash table; +}; + +/** + * Create rrset cache + * @param cfg: config settings or NULL for defaults. + * @param alloc: initial default rrset key allocation. + * @return: NULL on error. + */ +struct rrset_cache* rrset_cache_create(struct config_file* cfg, + struct alloc_cache* alloc); + +/** + * Delete rrset cache + * @param r: rrset cache to delete. + */ +void rrset_cache_delete(struct rrset_cache* r); + +/** + * Adjust settings of the cache to settings from the config file. + * May purge the cache. May recreate the cache. + * There may be no threading or use by other threads. + * @param r: rrset cache to adjust (like realloc). + * @param cfg: config settings or NULL for defaults. + * @param alloc: initial default rrset key allocation. + * @return 0 on error, or new rrset cache pointer on success. + */ +struct rrset_cache* rrset_cache_adjust(struct rrset_cache* r, + struct config_file* cfg, struct alloc_cache* alloc); + +/** + * Touch rrset, with given pointer and id. + * Caller may not hold a lock on ANY rrset, this could give deadlock. + * + * This routine is faster than a hashtable lookup: + * o no bin_lock is acquired. + * o no walk through the bin-overflow-list. + * o no comparison of the entry key to find it. + * + * @param r: rrset cache. + * @param key: rrset key. Marked recently used (if it was not deleted + * before the lock is acquired, in that case nothing happens). + * @param hash: hash value of the item. Please read it from the key when + * you have it locked. Used to find slab from slabhash. + * @param id: used to check that the item is unchanged and not deleted. + */ +void rrset_cache_touch(struct rrset_cache* r, struct ub_packed_rrset_key* key, + hashvalue_t hash, rrset_id_t id); + +/** + * Update an rrset in the rrset cache. Stores the information for later use. + * Will lookup if the rrset is in the cache and perform an update if necessary. + * If the item was present, and superior, references are returned to that. + * The passed item is then deallocated with rrset_parsedelete. + * + * A superior rrset is: + * o rrset with better trust value. + * o same trust value, different rdata, newly passed rrset is inserted. + * If rdata is the same, TTL in the cache is updated. + * + * @param r: the rrset cache. + * @param ref: reference (ptr and id) to the rrset. Pass reference setup for + * the new rrset. The reference may be changed if the cached rrset is + * superior. + * Before calling the rrset is presumed newly allocated and changeable. + * Afer calling you do not hold a lock, and the rrset is inserted in + * the hashtable so you need a lock to change it. + * @param alloc: how to allocate (and deallocate) the special rrset key. + * @param timenow: current time (to see if ttl in cache is expired). + * @return: true if the passed reference is updated, false if it is unchanged. + * 0: reference unchanged, inserted in cache. + * 1: reference updated, item is inserted in cache. + * 2: reference updated, item in cache is considered superior. + * also the rdata is equal (but other parameters in cache are superior). + */ +int rrset_cache_update(struct rrset_cache* r, struct rrset_ref* ref, + struct alloc_cache* alloc, time_t timenow); + +/** + * Lookup rrset. You obtain read/write lock. You must unlock before lookup + * anything of else. + * @param r: the rrset cache. + * @param qname: name of rrset to lookup. + * @param qnamelen: length of name of rrset to lookup. + * @param qtype: type of rrset to lookup (host order). + * @param qclass: class of rrset to lookup (host order). + * @param flags: rrset flags, or 0. + * @param timenow: used to compare with TTL. + * @param wr: set true to get writelock. + * @return packed rrset key pointer. Remember to unlock the key.entry.lock. + * or NULL if could not be found or it was timed out. + */ +struct ub_packed_rrset_key* rrset_cache_lookup(struct rrset_cache* r, + uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass, + uint32_t flags, time_t timenow, int wr); + +/** + * Obtain readlock on a (sorted) list of rrset references. + * Checks TTLs and IDs of the rrsets and rollbacks locking if not Ok. + * @param ref: array of rrset references (key pointer and ID value). + * duplicate references are allowed and handled. + * @param count: size of array. + * @param timenow: used to compare with TTL. + * @return true on success, false on a failure, which can be that some + * RRsets have timed out, or that they do not exist any more, the + * RRsets have been purged from the cache. + * If true, you hold readlocks on all the ref items. + */ +int rrset_array_lock(struct rrset_ref* ref, size_t count, time_t timenow); + +/** + * Unlock array (sorted) of rrset references. + * @param ref: array of rrset references (key pointer and ID value). + * duplicate references are allowed and handled. + * @param count: size of array. + */ +void rrset_array_unlock(struct rrset_ref* ref, size_t count); + +/** + * Unlock array (sorted) of rrset references and at the same time + * touch LRU on the rrsets. It needs the scratch region for temporary + * storage as it uses the initial locks to obtain hash values. + * @param r: the rrset cache. In this cache LRU is updated. + * @param scratch: region for temporary storage of hash values. + * if memory allocation fails, the lru touch fails silently, + * but locks are released. memory errors are logged. + * @param ref: array of rrset references (key pointer and ID value). + * duplicate references are allowed and handled. + * @param count: size of array. + */ +void rrset_array_unlock_touch(struct rrset_cache* r, struct regional* scratch, + struct rrset_ref* ref, size_t count); + +/** + * Update security status of an rrset. Looks up the rrset. + * If found, checks if rdata is equal. + * If so, it will update the security, trust and rrset-ttl values. + * The values are only updated if security is increased (towards secure). + * @param r: the rrset cache. + * @param rrset: which rrset to attempt to update. This rrset is left + * untouched. The rrset in the cache is updated in-place. + * @param now: current time. + */ +void rrset_update_sec_status(struct rrset_cache* r, + struct ub_packed_rrset_key* rrset, time_t now); + +/** + * Looks up security status of an rrset. Looks up the rrset. + * If found, checks if rdata is equal, and entry did not expire. + * If so, it will update the security, trust and rrset-ttl values. + * @param r: the rrset cache. + * @param rrset: This rrset may change security status due to the cache. + * But its status will only improve, towards secure. + * @param now: current time. + */ +void rrset_check_sec_status(struct rrset_cache* r, + struct ub_packed_rrset_key* rrset, time_t now); + +/** + * Remove an rrset from the cache, by name and type and flags + * @param r: rrset cache + * @param nm: name of rrset + * @param nmlen: length of name + * @param type: type of rrset + * @param dclass: class of rrset, host order + * @param flags: flags of rrset, host order + */ +void rrset_cache_remove(struct rrset_cache* r, uint8_t* nm, size_t nmlen, + uint16_t type, uint16_t dclass, uint32_t flags); + +/** mark rrset to be deleted, set id=0 */ +void rrset_markdel(void* key); + +#endif /* SERVICES_CACHE_RRSET_H */ |