summaryrefslogtreecommitdiffstats
path: root/crypto/lhash
diff options
context:
space:
mode:
authorRich Salz <rsalz@openssl.org>2017-06-07 11:23:37 -0400
committerRich Salz <rsalz@openssl.org>2017-06-07 13:06:55 -0400
commit1ec7eff1b2d761592df564d7ee92f65e08da4cd6 (patch)
treea6cb87a9d109897e8b3262846f0a44eae3d224b9 /crypto/lhash
parentc1abfde735eca6346eb2c0641b67b11d0e68b94c (diff)
Add a lock around the OBJ_NAME table
Various initialization functions modify this table, which can cause heap corruption in the absence of external synchronization. Some stats are modified from OPENSSL_LH_retrieve, where callers aren't expecting to have to take out an exclusive lock. Switch to using atomic operations for those stats. Reviewed-by: Matt Caswell <matt@openssl.org> Reviewed-by: Rich Salz <rsalz@openssl.org> (Merged from https://github.com/openssl/openssl/pull/3525) (cherry picked from commit be606c013d31847718ceb5d97c567988a771c2e5)
Diffstat (limited to 'crypto/lhash')
-rw-r--r--crypto/lhash/lh_stats.c22
-rw-r--r--crypto/lhash/lhash.c29
-rw-r--r--crypto/lhash/lhash_lcl.h19
3 files changed, 47 insertions, 23 deletions
diff --git a/crypto/lhash/lh_stats.c b/crypto/lhash/lh_stats.c
index 7337832422..5586afa0d8 100644
--- a/crypto/lhash/lh_stats.c
+++ b/crypto/lhash/lh_stats.c
@@ -61,6 +61,9 @@ void OPENSSL_LH_node_usage_stats(const OPENSSL_LHASH *lh, FILE *fp)
void OPENSSL_LH_stats_bio(const OPENSSL_LHASH *lh, BIO *out)
{
+ OPENSSL_LHASH *lh_mut = (OPENSSL_LHASH *) lh;
+ int ret;
+
BIO_printf(out, "num_items = %lu\n", lh->num_items);
BIO_printf(out, "num_nodes = %u\n", lh->num_nodes);
BIO_printf(out, "num_alloc_nodes = %u\n", lh->num_alloc_nodes);
@@ -69,15 +72,24 @@ void OPENSSL_LH_stats_bio(const OPENSSL_LHASH *lh, BIO *out)
BIO_printf(out, "num_contracts = %lu\n", lh->num_contracts);
BIO_printf(out, "num_contract_reallocs = %lu\n",
lh->num_contract_reallocs);
- BIO_printf(out, "num_hash_calls = %lu\n", lh->num_hash_calls);
- BIO_printf(out, "num_comp_calls = %lu\n", lh->num_comp_calls);
+ CRYPTO_atomic_add(&lh_mut->num_hash_calls, 0, &ret,
+ lh->retrieve_stats_lock);
+ BIO_printf(out, "num_hash_calls = %d\n", ret);
+ CRYPTO_atomic_add(&lh_mut->num_comp_calls, 0, &ret,
+ lh->retrieve_stats_lock);
+ BIO_printf(out, "num_comp_calls = %d\n", ret);
BIO_printf(out, "num_insert = %lu\n", lh->num_insert);
BIO_printf(out, "num_replace = %lu\n", lh->num_replace);
BIO_printf(out, "num_delete = %lu\n", lh->num_delete);
BIO_printf(out, "num_no_delete = %lu\n", lh->num_no_delete);
- BIO_printf(out, "num_retrieve = %lu\n", lh->num_retrieve);
- BIO_printf(out, "num_retrieve_miss = %lu\n", lh->num_retrieve_miss);
- BIO_printf(out, "num_hash_comps = %lu\n", lh->num_hash_comps);
+ CRYPTO_atomic_add(&lh_mut->num_retrieve, 0, &ret, lh->retrieve_stats_lock);
+ BIO_printf(out, "num_retrieve = %d\n", ret);
+ CRYPTO_atomic_add(&lh_mut->num_retrieve_miss, 0, &ret,
+ lh->retrieve_stats_lock);
+ BIO_printf(out, "num_retrieve_miss = %d\n", ret);
+ CRYPTO_atomic_add(&lh_mut->num_hash_comps, 0, &ret,
+ lh->retrieve_stats_lock);
+ BIO_printf(out, "num_hash_comps = %d\n", ret);
}
void OPENSSL_LH_node_stats_bio(const OPENSSL_LHASH *lh, BIO *out)
diff --git a/crypto/lhash/lhash.c b/crypto/lhash/lhash.c
index adde832cc4..f24d0c4c8a 100644
--- a/crypto/lhash/lhash.c
+++ b/crypto/lhash/lhash.c
@@ -29,9 +29,11 @@ OPENSSL_LHASH *OPENSSL_LH_new(OPENSSL_LH_HASHFUNC h, OPENSSL_LH_COMPFUNC c)
OPENSSL_LHASH *ret;
if ((ret = OPENSSL_zalloc(sizeof(*ret))) == NULL)
- goto err0;
+ return NULL;
if ((ret->b = OPENSSL_zalloc(sizeof(*ret->b) * MIN_NODES)) == NULL)
- goto err1;
+ goto err;
+ if ((ret->retrieve_stats_lock = CRYPTO_THREAD_lock_new()) == NULL)
+ goto err;
ret->comp = ((c == NULL) ? (OPENSSL_LH_COMPFUNC)strcmp : c);
ret->hash = ((h == NULL) ? (OPENSSL_LH_HASHFUNC)OPENSSL_LH_strhash : h);
ret->num_nodes = MIN_NODES / 2;
@@ -41,10 +43,10 @@ OPENSSL_LHASH *OPENSSL_LH_new(OPENSSL_LH_HASHFUNC h, OPENSSL_LH_COMPFUNC c)
ret->down_load = DOWN_LOAD;
return (ret);
- err1:
+err:
+ OPENSSL_free(ret->b);
OPENSSL_free(ret);
- err0:
- return (NULL);
+ return NULL;
}
void OPENSSL_LH_free(OPENSSL_LHASH *lh)
@@ -63,6 +65,7 @@ void OPENSSL_LH_free(OPENSSL_LHASH *lh)
n = nn;
}
}
+ CRYPTO_THREAD_lock_free(lh->retrieve_stats_lock);
OPENSSL_free(lh->b);
OPENSSL_free(lh);
}
@@ -133,18 +136,19 @@ void *OPENSSL_LH_retrieve(OPENSSL_LHASH *lh, const void *data)
unsigned long hash;
OPENSSL_LH_NODE **rn;
void *ret;
+ int scratch;
lh->error = 0;
rn = getrn(lh, data, &hash);
if (*rn == NULL) {
- lh->num_retrieve_miss++;
- return (NULL);
+ CRYPTO_atomic_add(&lh->num_retrieve_miss, 1, &scratch, lh->retrieve_stats_lock);
+ return NULL;
} else {
ret = (*rn)->data;
- lh->num_retrieve++;
+ CRYPTO_atomic_add(&lh->num_retrieve, 1, &scratch, lh->retrieve_stats_lock);
}
- return (ret);
+ return ret;
}
static void doall_util_fn(OPENSSL_LHASH *lh, int use_arg,
@@ -270,9 +274,10 @@ static OPENSSL_LH_NODE **getrn(OPENSSL_LHASH *lh,
OPENSSL_LH_NODE **ret, *n1;
unsigned long hash, nn;
OPENSSL_LH_COMPFUNC cf;
+ int scratch;
hash = (*(lh->hash)) (data);
- lh->num_hash_calls++;
+ CRYPTO_atomic_add(&lh->num_hash_calls, 1, &scratch, lh->retrieve_stats_lock);
*rhash = hash;
nn = hash % lh->pmax;
@@ -282,12 +287,12 @@ static OPENSSL_LH_NODE **getrn(OPENSSL_LHASH *lh,
cf = lh->comp;
ret = &(lh->b[(int)nn]);
for (n1 = *ret; n1 != NULL; n1 = n1->next) {
- lh->num_hash_comps++;
+ CRYPTO_atomic_add(&lh->num_hash_comps, 1, &scratch, lh->retrieve_stats_lock);
if (n1->hash != hash) {
ret = &(n1->next);
continue;
}
- lh->num_comp_calls++;
+ CRYPTO_atomic_add(&lh->num_comp_calls, 1, &scratch, lh->retrieve_stats_lock);
if (cf(n1->data, data) == 0)
break;
ret = &(n1->next);
diff --git a/crypto/lhash/lhash_lcl.h b/crypto/lhash/lhash_lcl.h
index eb4a1a3f65..01d463fb36 100644
--- a/crypto/lhash/lhash_lcl.h
+++ b/crypto/lhash/lhash_lcl.h
@@ -6,7 +6,7 @@
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
*/
-
+#include <openssl/crypto.h>
struct lhash_node_st {
void *data;
@@ -18,6 +18,13 @@ struct lhash_st {
OPENSSL_LH_NODE **b;
OPENSSL_LH_COMPFUNC comp;
OPENSSL_LH_HASHFUNC hash;
+ /*
+ * some stats are updated on lookup, which callers aren't expecting to have
+ * to take an exclusive lock around. This lock protects them on platforms
+ * without atomics, and their types are int rather than unsigned long below
+ * so they can be adjusted with CRYPTO_atomic_add.
+ */
+ CRYPTO_RWLOCK *retrieve_stats_lock;
unsigned int num_nodes;
unsigned int num_alloc_nodes;
unsigned int p;
@@ -29,14 +36,14 @@ struct lhash_st {
unsigned long num_expand_reallocs;
unsigned long num_contracts;
unsigned long num_contract_reallocs;
- unsigned long num_hash_calls;
- unsigned long num_comp_calls;
+ int num_hash_calls;
+ int num_comp_calls;
unsigned long num_insert;
unsigned long num_replace;
unsigned long num_delete;
unsigned long num_no_delete;
- unsigned long num_retrieve;
- unsigned long num_retrieve_miss;
- unsigned long num_hash_comps;
+ int num_retrieve;
+ int num_retrieve_miss;
+ int num_hash_comps;
int error;
};