[NET]: More kzalloc conversions.
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
31380de95c
commit
77d04bd957
|
@ -55,15 +55,12 @@ int alloc_divert_blk(struct net_device *dev)
|
|||
|
||||
dev->divert = NULL;
|
||||
if (dev->type == ARPHRD_ETHER) {
|
||||
dev->divert = (struct divert_blk *)
|
||||
kmalloc(alloc_size, GFP_KERNEL);
|
||||
dev->divert = kzalloc(alloc_size, GFP_KERNEL);
|
||||
if (dev->divert == NULL) {
|
||||
printk(KERN_INFO "divert: unable to allocate divert_blk for %s\n",
|
||||
dev->name);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(dev->divert, 0, sizeof(struct divert_blk));
|
||||
dev_hold(dev);
|
||||
}
|
||||
|
||||
|
|
|
@ -318,12 +318,10 @@ static void __devinit flow_cache_cpu_prepare(int cpu)
|
|||
/* NOTHING */;
|
||||
|
||||
flow_table(cpu) = (struct flow_cache_entry **)
|
||||
__get_free_pages(GFP_KERNEL, order);
|
||||
__get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
|
||||
if (!flow_table(cpu))
|
||||
panic("NET: failed to allocate flow cache order %lu\n", order);
|
||||
|
||||
memset(flow_table(cpu), 0, PAGE_SIZE << order);
|
||||
|
||||
flow_hash_rnd_recalc(cpu) = 1;
|
||||
flow_count(cpu) = 0;
|
||||
|
||||
|
|
|
@ -159,11 +159,10 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
|
|||
if (parm->interval < -2 || parm->interval > 3)
|
||||
return -EINVAL;
|
||||
|
||||
est = kmalloc(sizeof(*est), GFP_KERNEL);
|
||||
est = kzalloc(sizeof(*est), GFP_KERNEL);
|
||||
if (est == NULL)
|
||||
return -ENOBUFS;
|
||||
|
||||
memset(est, 0, sizeof(*est));
|
||||
est->interval = parm->interval + 2;
|
||||
est->bstats = bstats;
|
||||
est->rate_est = rate_est;
|
||||
|
|
|
@ -284,14 +284,11 @@ static struct neighbour **neigh_hash_alloc(unsigned int entries)
|
|||
struct neighbour **ret;
|
||||
|
||||
if (size <= PAGE_SIZE) {
|
||||
ret = kmalloc(size, GFP_ATOMIC);
|
||||
ret = kzalloc(size, GFP_ATOMIC);
|
||||
} else {
|
||||
ret = (struct neighbour **)
|
||||
__get_free_pages(GFP_ATOMIC, get_order(size));
|
||||
__get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
|
||||
}
|
||||
if (ret)
|
||||
memset(ret, 0, size);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1089,8 +1086,7 @@ static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
|
|||
if (hh->hh_type == protocol)
|
||||
break;
|
||||
|
||||
if (!hh && (hh = kmalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
|
||||
memset(hh, 0, sizeof(struct hh_cache));
|
||||
if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
|
||||
rwlock_init(&hh->hh_lock);
|
||||
hh->hh_type = protocol;
|
||||
atomic_set(&hh->hh_refcnt, 0);
|
||||
|
@ -1366,13 +1362,11 @@ void neigh_table_init(struct neigh_table *tbl)
|
|||
tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
|
||||
|
||||
phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
|
||||
tbl->phash_buckets = kmalloc(phsize, GFP_KERNEL);
|
||||
tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
|
||||
|
||||
if (!tbl->hash_buckets || !tbl->phash_buckets)
|
||||
panic("cannot allocate neighbour cache hashes");
|
||||
|
||||
memset(tbl->phash_buckets, 0, phsize);
|
||||
|
||||
get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
|
||||
|
||||
rwlock_init(&tbl->lock);
|
||||
|
|
|
@ -38,13 +38,11 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
|
|||
{
|
||||
const int lopt_size = sizeof(struct listen_sock) +
|
||||
nr_table_entries * sizeof(struct request_sock *);
|
||||
struct listen_sock *lopt = kmalloc(lopt_size, GFP_KERNEL);
|
||||
struct listen_sock *lopt = kzalloc(lopt_size, GFP_KERNEL);
|
||||
|
||||
if (lopt == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(lopt, 0, lopt_size);
|
||||
|
||||
for (lopt->max_qlen_log = 6;
|
||||
(1 << lopt->max_qlen_log) < sysctl_max_syn_backlog;
|
||||
lopt->max_qlen_log++);
|
||||
|
|
Loading…
Reference in New Issue