Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (22 commits) [IPCONFIG]: The kernel gets no IP from some DHCP servers b43legacy: Fix module init message rndis_wlan: fix broken data copy libertas: compare the current command with response libertas: fix sanity check on sequence number in command response p54: fix eeprom parser length sanity checks p54: fix EEPROM structure endianness ssb: Add pcibios_enable_device() return value check rc80211-pid: fix rate adjustment [ESP]: Add select on AUTHENC [TCP]: Improve ipv4 established hash function. [NETPOLL]: Revert two bogus cleanups that broke netconsole. [PPPOL2TP]: Add missing sock_put() in pppol2tp_tunnel_closeall() Subject: [PPPOL2TP] add missing sock_put() in pppol2tp_recv_dequeue() [BLUETOOTH]: l2cap info_timer delete fix in hci_conn_del [NET]: Fix race in generic address resolution. iucv: fix build error on !SMP [TCP]: Must count fack_count also when skipping [TUN]: Fix RTNL-locking in tun/tap driver [SCTP]: Use proc_create to setup de->proc_fops. ...
This commit is contained in:
commit
27d0483aa1
|
@ -455,6 +455,7 @@ static void pppol2tp_recv_dequeue(struct pppol2tp_session *session)
|
|||
skb_queue_len(&session->reorder_q));
|
||||
__skb_unlink(skb, &session->reorder_q);
|
||||
kfree_skb(skb);
|
||||
sock_put(session->sock);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1110,6 +1111,8 @@ static void pppol2tp_tunnel_closeall(struct pppol2tp_tunnel *tunnel)
|
|||
for (hash = 0; hash < PPPOL2TP_HASH_SIZE; hash++) {
|
||||
again:
|
||||
hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
|
||||
struct sk_buff *skb;
|
||||
|
||||
session = hlist_entry(walk, struct pppol2tp_session, hlist);
|
||||
|
||||
sk = session->sock;
|
||||
|
@ -1138,7 +1141,10 @@ static void pppol2tp_tunnel_closeall(struct pppol2tp_tunnel *tunnel)
|
|||
/* Purge any queued data */
|
||||
skb_queue_purge(&sk->sk_receive_queue);
|
||||
skb_queue_purge(&sk->sk_write_queue);
|
||||
skb_queue_purge(&session->reorder_q);
|
||||
while ((skb = skb_dequeue(&session->reorder_q))) {
|
||||
kfree_skb(skb);
|
||||
sock_put(sk);
|
||||
}
|
||||
|
||||
release_sock(sk);
|
||||
sock_put(sk);
|
||||
|
|
|
@ -663,7 +663,11 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
|
|||
case SIOCSIFHWADDR:
|
||||
{
|
||||
/* try to set the actual net device's hw address */
|
||||
int ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
|
||||
int ret;
|
||||
|
||||
rtnl_lock();
|
||||
ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
|
||||
rtnl_unlock();
|
||||
|
||||
if (ret == 0) {
|
||||
/** Set the character device's hardware address. This is used when
|
||||
|
|
|
@ -3829,7 +3829,7 @@ static void b43legacy_print_driverinfo(void)
|
|||
#ifdef CONFIG_B43LEGACY_DMA
|
||||
feat_dma = "D";
|
||||
#endif
|
||||
printk(KERN_INFO "Broadcom 43xx driver loaded "
|
||||
printk(KERN_INFO "Broadcom 43xx-legacy driver loaded "
|
||||
"[ Features: %s%s%s%s%s, Firmware-ID: "
|
||||
B43legacy_SUPPORTED_FIRMWARE_ID " ]\n",
|
||||
feat_pci, feat_leds, feat_rfkill, feat_pio, feat_dma);
|
||||
|
|
|
@ -562,9 +562,7 @@ int lbs_process_rx_command(struct lbs_private *priv)
|
|||
}
|
||||
|
||||
resp = (void *)priv->upld_buf;
|
||||
|
||||
curcmd = le16_to_cpu(resp->command);
|
||||
|
||||
curcmd = le16_to_cpu(priv->cur_cmd->cmdbuf->command);
|
||||
respcmd = le16_to_cpu(resp->command);
|
||||
result = le16_to_cpu(resp->result);
|
||||
|
||||
|
@ -572,9 +570,9 @@ int lbs_process_rx_command(struct lbs_private *priv)
|
|||
respcmd, le16_to_cpu(resp->seqnum), priv->upld_len, jiffies);
|
||||
lbs_deb_hex(LBS_DEB_HOST, "CMD_RESP", (void *) resp, priv->upld_len);
|
||||
|
||||
if (resp->seqnum != resp->seqnum) {
|
||||
if (resp->seqnum != priv->cur_cmd->cmdbuf->seqnum) {
|
||||
lbs_pr_info("Received CMD_RESP with invalid sequence %d (expected %d)\n",
|
||||
le16_to_cpu(resp->seqnum), le16_to_cpu(resp->seqnum));
|
||||
le16_to_cpu(resp->seqnum), le16_to_cpu(priv->cur_cmd->cmdbuf->seqnum));
|
||||
spin_unlock_irqrestore(&priv->driver_lock, flags);
|
||||
ret = -1;
|
||||
goto done;
|
||||
|
|
|
@ -166,18 +166,23 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
|
|||
struct p54_common *priv = dev->priv;
|
||||
struct eeprom_pda_wrap *wrap = NULL;
|
||||
struct pda_entry *entry;
|
||||
int i = 0;
|
||||
unsigned int data_len, entry_len;
|
||||
void *tmp;
|
||||
int err;
|
||||
u8 *end = (u8 *)eeprom + len;
|
||||
|
||||
wrap = (struct eeprom_pda_wrap *) eeprom;
|
||||
entry = (void *)wrap->data + wrap->len;
|
||||
i += 2;
|
||||
i += le16_to_cpu(entry->len)*2;
|
||||
while (i < len) {
|
||||
entry = (void *)wrap->data + le16_to_cpu(wrap->len);
|
||||
|
||||
/* verify that at least the entry length/code fits */
|
||||
while ((u8 *)entry <= end - sizeof(*entry)) {
|
||||
entry_len = le16_to_cpu(entry->len);
|
||||
data_len = ((entry_len - 1) << 1);
|
||||
|
||||
/* abort if entry exceeds whole structure */
|
||||
if ((u8 *)entry + sizeof(*entry) + data_len > end)
|
||||
break;
|
||||
|
||||
switch (le16_to_cpu(entry->code)) {
|
||||
case PDR_MAC_ADDRESS:
|
||||
SET_IEEE80211_PERM_ADDR(dev, entry->data);
|
||||
|
@ -249,13 +254,12 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
|
|||
priv->version = *(u8 *)(entry->data + 1);
|
||||
break;
|
||||
case PDR_END:
|
||||
i = len;
|
||||
/* make it overrun */
|
||||
entry_len = len;
|
||||
break;
|
||||
}
|
||||
|
||||
entry = (void *)entry + (entry_len + 1)*2;
|
||||
i += 2;
|
||||
i += entry_len*2;
|
||||
}
|
||||
|
||||
if (!priv->iq_autocal || !priv->output_limit || !priv->curve_data) {
|
||||
|
|
|
@ -53,10 +53,10 @@ struct pda_entry {
|
|||
} __attribute__ ((packed));
|
||||
|
||||
struct eeprom_pda_wrap {
|
||||
u32 magic;
|
||||
u16 pad;
|
||||
u16 len;
|
||||
u32 arm_opcode;
|
||||
__le32 magic;
|
||||
__le16 pad;
|
||||
__le16 len;
|
||||
__le32 arm_opcode;
|
||||
u8 data[0];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
|
|
|
@ -260,7 +260,7 @@ struct NDIS_802_11_KEY {
|
|||
__le32 KeyLength;
|
||||
u8 Bssid[6];
|
||||
u8 Padding[6];
|
||||
__le64 KeyRSC;
|
||||
u8 KeyRSC[8];
|
||||
u8 KeyMaterial[32];
|
||||
} __attribute__((packed));
|
||||
|
||||
|
@ -1508,7 +1508,7 @@ static int rndis_iw_set_encode_ext(struct net_device *dev,
|
|||
struct usbnet *usbdev = dev->priv;
|
||||
struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
|
||||
struct NDIS_802_11_KEY ndis_key;
|
||||
int i, keyidx, ret;
|
||||
int keyidx, ret;
|
||||
u8 *addr;
|
||||
|
||||
keyidx = wrqu->encoding.flags & IW_ENCODE_INDEX;
|
||||
|
@ -1543,9 +1543,7 @@ static int rndis_iw_set_encode_ext(struct net_device *dev,
|
|||
ndis_key.KeyIndex = cpu_to_le32(keyidx);
|
||||
|
||||
if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
|
||||
for (i = 0; i < 6; i++)
|
||||
ndis_key.KeyRSC |=
|
||||
cpu_to_le64(ext->rx_seq[i] << (i * 8));
|
||||
memcpy(ndis_key.KeyRSC, ext->rx_seq, 6);
|
||||
ndis_key.KeyIndex |= cpu_to_le32(1 << 29);
|
||||
}
|
||||
|
||||
|
|
|
@ -111,7 +111,10 @@ static void __init ssb_fixup_pcibridge(struct pci_dev *dev)
|
|||
|
||||
/* Enable PCI bridge bus mastering and memory space */
|
||||
pci_set_master(dev);
|
||||
pcibios_enable_device(dev, ~0);
|
||||
if (pcibios_enable_device(dev, ~0) < 0) {
|
||||
ssb_printk(KERN_ERR "PCI: SSB bridge enable failed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Enable PCI bridge BAR1 prefetch and burst */
|
||||
pci_write_config_dword(dev, SSB_BAR1_CONTROL, 3);
|
||||
|
|
|
@ -25,6 +25,7 @@ struct netpoll {
|
|||
|
||||
struct netpoll_info {
|
||||
atomic_t refcnt;
|
||||
int rx_flags;
|
||||
spinlock_t rx_lock;
|
||||
struct netpoll *rx_np; /* netpoll that registered an rx_hook */
|
||||
struct sk_buff_head arp_tx; /* list of arp requests to reply to */
|
||||
|
@ -50,12 +51,12 @@ static inline int netpoll_rx(struct sk_buff *skb)
|
|||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
if (!npinfo || !npinfo->rx_np)
|
||||
if (!npinfo || (!npinfo->rx_np && !npinfo->rx_flags))
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
||||
/* check rx_np again with the lock held */
|
||||
if (npinfo->rx_np && __netpoll_rx(skb))
|
||||
/* check rx_flags again with the lock held */
|
||||
if (npinfo->rx_flags && __netpoll_rx(skb))
|
||||
ret = 1;
|
||||
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
||||
|
||||
|
|
|
@ -175,7 +175,8 @@ extern void build_ehash_secret(void);
|
|||
static inline unsigned int inet_ehashfn(const __be32 laddr, const __u16 lport,
|
||||
const __be32 faddr, const __be16 fport)
|
||||
{
|
||||
return jhash_2words((__force __u32) laddr ^ (__force __u32) faddr,
|
||||
return jhash_3words((__force __u32) laddr,
|
||||
(__force __u32) faddr,
|
||||
((__u32) lport) << 16 | (__force __u32)fport,
|
||||
inet_ehash_secret);
|
||||
}
|
||||
|
|
|
@ -417,7 +417,8 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
|
|||
l2cap_sock_kill(sk);
|
||||
}
|
||||
|
||||
del_timer_sync(&conn->info_timer);
|
||||
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
|
||||
del_timer_sync(&conn->info_timer);
|
||||
|
||||
hcon->l2cap_data = NULL;
|
||||
kfree(conn);
|
||||
|
|
|
@ -839,7 +839,7 @@ static void neigh_timer_handler(unsigned long arg)
|
|||
struct sk_buff *skb = skb_peek(&neigh->arp_queue);
|
||||
/* keep skb alive even if arp_queue overflows */
|
||||
if (skb)
|
||||
skb_get(skb);
|
||||
skb = skb_copy(skb, GFP_ATOMIC);
|
||||
write_unlock(&neigh->lock);
|
||||
neigh->ops->solicit(neigh, skb);
|
||||
atomic_inc(&neigh->probes);
|
||||
|
|
|
@ -39,6 +39,8 @@ static struct sk_buff_head skb_pool;
|
|||
static atomic_t trapped;
|
||||
|
||||
#define USEC_PER_POLL 50
|
||||
#define NETPOLL_RX_ENABLED 1
|
||||
#define NETPOLL_RX_DROP 2
|
||||
|
||||
#define MAX_SKB_SIZE \
|
||||
(MAX_UDP_CHUNK + sizeof(struct udphdr) + \
|
||||
|
@ -126,11 +128,13 @@ static int poll_one_napi(struct netpoll_info *npinfo,
|
|||
if (!test_bit(NAPI_STATE_SCHED, &napi->state))
|
||||
return budget;
|
||||
|
||||
npinfo->rx_flags |= NETPOLL_RX_DROP;
|
||||
atomic_inc(&trapped);
|
||||
|
||||
work = napi->poll(napi, budget);
|
||||
|
||||
atomic_dec(&trapped);
|
||||
npinfo->rx_flags &= ~NETPOLL_RX_DROP;
|
||||
|
||||
return budget - work;
|
||||
}
|
||||
|
@ -472,7 +476,7 @@ int __netpoll_rx(struct sk_buff *skb)
|
|||
if (skb->dev->type != ARPHRD_ETHER)
|
||||
goto out;
|
||||
|
||||
/* if receive ARP during middle of NAPI poll, then queue */
|
||||
/* check if netpoll clients need ARP */
|
||||
if (skb->protocol == htons(ETH_P_ARP) &&
|
||||
atomic_read(&trapped)) {
|
||||
skb_queue_tail(&npi->arp_tx, skb);
|
||||
|
@ -534,9 +538,6 @@ int __netpoll_rx(struct sk_buff *skb)
|
|||
return 1;
|
||||
|
||||
out:
|
||||
/* If packet received while already in poll then just
|
||||
* silently drop.
|
||||
*/
|
||||
if (atomic_read(&trapped)) {
|
||||
kfree_skb(skb);
|
||||
return 1;
|
||||
|
@ -675,6 +676,7 @@ int netpoll_setup(struct netpoll *np)
|
|||
goto release;
|
||||
}
|
||||
|
||||
npinfo->rx_flags = 0;
|
||||
npinfo->rx_np = NULL;
|
||||
|
||||
spin_lock_init(&npinfo->rx_lock);
|
||||
|
@ -756,6 +758,7 @@ int netpoll_setup(struct netpoll *np)
|
|||
|
||||
if (np->rx_hook) {
|
||||
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
||||
npinfo->rx_flags |= NETPOLL_RX_ENABLED;
|
||||
npinfo->rx_np = np;
|
||||
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
||||
}
|
||||
|
@ -797,6 +800,7 @@ void netpoll_cleanup(struct netpoll *np)
|
|||
if (npinfo->rx_np == np) {
|
||||
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
||||
npinfo->rx_np = NULL;
|
||||
npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
|
||||
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -343,7 +343,7 @@ config INET_ESP
|
|||
tristate "IP: ESP transformation"
|
||||
select XFRM
|
||||
select CRYPTO
|
||||
select CRYPTO_AEAD
|
||||
select CRYPTO_AUTHENC
|
||||
select CRYPTO_HMAC
|
||||
select CRYPTO_MD5
|
||||
select CRYPTO_CBC
|
||||
|
|
|
@ -753,9 +753,9 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
|
|||
printk("Unknown ARP type 0x%04x for device %s\n", dev->type, dev->name);
|
||||
b->htype = dev->type; /* can cause undefined behavior */
|
||||
}
|
||||
|
||||
/* server_ip and your_ip address are both already zero per RFC2131 */
|
||||
b->hlen = dev->addr_len;
|
||||
b->your_ip = NONE;
|
||||
b->server_ip = NONE;
|
||||
memcpy(b->hw_addr, dev->dev_addr, dev->addr_len);
|
||||
b->secs = htons(jiffies_diff / HZ);
|
||||
b->xid = d->xid;
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
/*
|
||||
* Binary Increase Congestion control for TCP
|
||||
*
|
||||
* Home page:
|
||||
* http://netsrv.csc.ncsu.edu/twiki/bin/view/Main/BIC
|
||||
* This is from the implementation of BICTCP in
|
||||
* Lison-Xu, Kahaled Harfoush, and Injong Rhee.
|
||||
* "Binary Increase Congestion Control for Fast, Long Distance
|
||||
* Networks" in InfoComm 2004
|
||||
* Available from:
|
||||
* http://www.csc.ncsu.edu/faculty/rhee/export/bitcp.pdf
|
||||
* http://netsrv.csc.ncsu.edu/export/bitcp.pdf
|
||||
*
|
||||
* Unless BIC is enabled and congestion window is large
|
||||
* this behaves the same as the original Reno.
|
||||
|
|
|
@ -1367,7 +1367,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
|
|||
* a normal way
|
||||
*/
|
||||
static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
|
||||
u32 skip_to_seq)
|
||||
u32 skip_to_seq, int *fack_count)
|
||||
{
|
||||
tcp_for_write_queue_from(skb, sk) {
|
||||
if (skb == tcp_send_head(sk))
|
||||
|
@ -1375,6 +1375,8 @@ static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
|
|||
|
||||
if (!before(TCP_SKB_CB(skb)->end_seq, skip_to_seq))
|
||||
break;
|
||||
|
||||
*fack_count += tcp_skb_pcount(skb);
|
||||
}
|
||||
return skb;
|
||||
}
|
||||
|
@ -1390,7 +1392,7 @@ static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
|
|||
return skb;
|
||||
|
||||
if (before(next_dup->start_seq, skip_to_seq)) {
|
||||
skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq);
|
||||
skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq, fack_count);
|
||||
tcp_sacktag_walk(skb, sk, NULL,
|
||||
next_dup->start_seq, next_dup->end_seq,
|
||||
1, fack_count, reord, flag);
|
||||
|
@ -1537,7 +1539,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
|
|||
|
||||
/* Head todo? */
|
||||
if (before(start_seq, cache->start_seq)) {
|
||||
skb = tcp_sacktag_skip(skb, sk, start_seq);
|
||||
skb = tcp_sacktag_skip(skb, sk, start_seq,
|
||||
&fack_count);
|
||||
skb = tcp_sacktag_walk(skb, sk, next_dup,
|
||||
start_seq,
|
||||
cache->start_seq,
|
||||
|
@ -1565,7 +1568,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
|
|||
goto walk;
|
||||
}
|
||||
|
||||
skb = tcp_sacktag_skip(skb, sk, cache->end_seq);
|
||||
skb = tcp_sacktag_skip(skb, sk, cache->end_seq,
|
||||
&fack_count);
|
||||
/* Check overlap against next cached too (past this one already) */
|
||||
cache++;
|
||||
continue;
|
||||
|
@ -1577,7 +1581,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
|
|||
break;
|
||||
fack_count = tp->fackets_out;
|
||||
}
|
||||
skb = tcp_sacktag_skip(skb, sk, start_seq);
|
||||
skb = tcp_sacktag_skip(skb, sk, start_seq, &fack_count);
|
||||
|
||||
walk:
|
||||
skb = tcp_sacktag_walk(skb, sk, next_dup, start_seq, end_seq,
|
||||
|
|
|
@ -85,7 +85,7 @@ config INET6_ESP
|
|||
depends on IPV6
|
||||
select XFRM
|
||||
select CRYPTO
|
||||
select CRYPTO_AEAD
|
||||
select CRYPTO_AUTHENC
|
||||
select CRYPTO_HMAC
|
||||
select CRYPTO_MD5
|
||||
select CRYPTO_CBC
|
||||
|
|
|
@ -76,9 +76,11 @@ static int __init ircomm_init(void)
|
|||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
{ struct proc_dir_entry *ent;
|
||||
ent = create_proc_entry("ircomm", 0, proc_irda);
|
||||
if (ent)
|
||||
ent->proc_fops = &ircomm_proc_fops;
|
||||
ent = proc_create("ircomm", 0, proc_irda, &ircomm_proc_fops);
|
||||
if (!ent) {
|
||||
printk(KERN_ERR "ircomm_init: can't create /proc entry!\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
|
||||
|
|
|
@ -128,13 +128,11 @@ static int __init irlan_init(void)
|
|||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
{ struct proc_dir_entry *proc;
|
||||
proc = create_proc_entry("irlan", 0, proc_irda);
|
||||
proc = proc_create("irlan", 0, proc_irda, &irlan_fops);
|
||||
if (!proc) {
|
||||
printk(KERN_ERR "irlan_init: can't create /proc entry!\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
proc->proc_fops = &irlan_fops;
|
||||
}
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
|
||||
|
|
|
@ -72,11 +72,9 @@ void __init irda_proc_register(void)
|
|||
return;
|
||||
proc_irda->owner = THIS_MODULE;
|
||||
|
||||
for (i=0; i<ARRAY_SIZE(irda_dirs); i++) {
|
||||
d = create_proc_entry(irda_dirs[i].name, 0, proc_irda);
|
||||
if (d)
|
||||
d->proc_fops = irda_dirs[i].fops;
|
||||
}
|
||||
for (i = 0; i < ARRAY_SIZE(irda_dirs); i++)
|
||||
d = proc_create(irda_dirs[i].name, 0, proc_irda,
|
||||
irda_dirs[i].fops);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -621,7 +621,6 @@ static int iucv_sever_pathid(u16 pathid, u8 userdata[16])
|
|||
return iucv_call_b2f0(IUCV_SEVER, parm);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/**
|
||||
* __iucv_cleanup_queue
|
||||
* @dummy: unused dummy argument
|
||||
|
@ -632,7 +631,6 @@ static int iucv_sever_pathid(u16 pathid, u8 userdata[16])
|
|||
static void __iucv_cleanup_queue(void *dummy)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* iucv_cleanup_queue
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
* Copyright 2002-2005, Instant802 Networks, Inc.
|
||||
* Copyright 2005, Devicescape Software, Inc.
|
||||
* Copyright 2007, Mattias Nissler <mattias.nissler@gmx.de>
|
||||
* Copyright 2007, Stefano Brivio <stefano.brivio@polimi.it>
|
||||
* Copyright 2007-2008, Stefano Brivio <stefano.brivio@polimi.it>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
|
@ -63,72 +63,66 @@
|
|||
* RC_PID_ARITH_SHIFT.
|
||||
*/
|
||||
|
||||
|
||||
/* Shift the adjustment so that we won't switch to a lower rate if it exhibited
|
||||
* a worse failed frames behaviour and we'll choose the highest rate whose
|
||||
* failed frames behaviour is not worse than the one of the original rate
|
||||
* target. While at it, check that the adjustment is within the ranges. Then,
|
||||
* provide the new rate index. */
|
||||
static int rate_control_pid_shift_adjust(struct rc_pid_rateinfo *r,
|
||||
int adj, int cur, int l)
|
||||
{
|
||||
int i, j, k, tmp;
|
||||
|
||||
j = r[cur].rev_index;
|
||||
i = j + adj;
|
||||
|
||||
if (i < 0)
|
||||
return r[0].index;
|
||||
if (i >= l - 1)
|
||||
return r[l - 1].index;
|
||||
|
||||
tmp = i;
|
||||
|
||||
if (adj < 0) {
|
||||
for (k = j; k >= i; k--)
|
||||
if (r[k].diff <= r[j].diff)
|
||||
tmp = k;
|
||||
} else {
|
||||
for (k = i + 1; k + i < l; k++)
|
||||
if (r[k].diff <= r[i].diff)
|
||||
tmp = k;
|
||||
}
|
||||
|
||||
return r[tmp].index;
|
||||
}
|
||||
|
||||
/* Adjust the rate while ensuring that we won't switch to a lower rate if it
|
||||
* exhibited a worse failed frames behaviour and we'll choose the highest rate
|
||||
* whose failed frames behaviour is not worse than the one of the original rate
|
||||
* target. While at it, check that the new rate is valid. */
|
||||
static void rate_control_pid_adjust_rate(struct ieee80211_local *local,
|
||||
struct sta_info *sta, int adj,
|
||||
struct rc_pid_rateinfo *rinfo)
|
||||
{
|
||||
struct ieee80211_sub_if_data *sdata;
|
||||
struct ieee80211_hw_mode *mode;
|
||||
int newidx;
|
||||
int maxrate;
|
||||
int back = (adj > 0) ? 1 : -1;
|
||||
int cur_sorted, new_sorted, probe, tmp, n_bitrates;
|
||||
int cur = sta->txrate;
|
||||
|
||||
sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev);
|
||||
|
||||
mode = local->oper_hw_mode;
|
||||
maxrate = sdata->bss ? sdata->bss->max_ratectrl_rateidx : -1;
|
||||
n_bitrates = mode->num_rates;
|
||||
|
||||
newidx = rate_control_pid_shift_adjust(rinfo, adj, sta->txrate,
|
||||
mode->num_rates);
|
||||
/* Map passed arguments to sorted values. */
|
||||
cur_sorted = rinfo[cur].rev_index;
|
||||
new_sorted = cur_sorted + adj;
|
||||
|
||||
while (newidx != sta->txrate) {
|
||||
if (rate_supported(sta, mode, newidx) &&
|
||||
(maxrate < 0 || newidx <= maxrate)) {
|
||||
sta->txrate = newidx;
|
||||
/* Check limits. */
|
||||
if (new_sorted < 0)
|
||||
new_sorted = rinfo[0].rev_index;
|
||||
else if (new_sorted >= n_bitrates)
|
||||
new_sorted = rinfo[n_bitrates - 1].rev_index;
|
||||
|
||||
tmp = new_sorted;
|
||||
|
||||
if (adj < 0) {
|
||||
/* Ensure that the rate decrease isn't disadvantageous. */
|
||||
for (probe = cur_sorted; probe >= new_sorted; probe--)
|
||||
if (rinfo[probe].diff <= rinfo[cur_sorted].diff &&
|
||||
rate_supported(sta, mode, rinfo[probe].index))
|
||||
tmp = probe;
|
||||
} else {
|
||||
/* Look for rate increase with zero (or below) cost. */
|
||||
for (probe = new_sorted + 1; probe < n_bitrates; probe++)
|
||||
if (rinfo[probe].diff <= rinfo[new_sorted].diff &&
|
||||
rate_supported(sta, mode, rinfo[probe].index))
|
||||
tmp = probe;
|
||||
}
|
||||
|
||||
/* Fit the rate found to the nearest supported rate. */
|
||||
do {
|
||||
if (rate_supported(sta, mode, rinfo[tmp].index)) {
|
||||
sta->txrate = rinfo[tmp].index;
|
||||
break;
|
||||
}
|
||||
|
||||
newidx += back;
|
||||
}
|
||||
if (adj < 0)
|
||||
tmp--;
|
||||
else
|
||||
tmp++;
|
||||
} while (tmp < n_bitrates && tmp >= 0);
|
||||
|
||||
#ifdef CONFIG_MAC80211_DEBUGFS
|
||||
rate_control_pid_event_rate_change(
|
||||
&((struct rc_pid_sta_info *)sta->rate_ctrl_priv)->events,
|
||||
newidx, mode->rates[newidx].rate);
|
||||
cur, mode->rates[cur].rate);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -256,12 +256,10 @@ int __init sctp_eps_proc_init(void)
|
|||
{
|
||||
struct proc_dir_entry *p;
|
||||
|
||||
p = create_proc_entry("eps", S_IRUGO, proc_net_sctp);
|
||||
p = proc_create("eps", S_IRUGO, proc_net_sctp, &sctp_eps_seq_fops);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
p->proc_fops = &sctp_eps_seq_fops;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -367,12 +365,11 @@ int __init sctp_assocs_proc_init(void)
|
|||
{
|
||||
struct proc_dir_entry *p;
|
||||
|
||||
p = create_proc_entry("assocs", S_IRUGO, proc_net_sctp);
|
||||
p = proc_create("assocs", S_IRUGO, proc_net_sctp,
|
||||
&sctp_assocs_seq_fops);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
p->proc_fops = &sctp_assocs_seq_fops;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue