1 | /* $NetBSD: ath.c,v 1.122 2016/06/10 13:27:13 ozaki-r Exp $ */ |
2 | |
3 | /*- |
4 | * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting |
5 | * All rights reserved. |
6 | * |
7 | * Redistribution and use in source and binary forms, with or without |
8 | * modification, are permitted provided that the following conditions |
9 | * are met: |
10 | * 1. Redistributions of source code must retain the above copyright |
11 | * notice, this list of conditions and the following disclaimer, |
12 | * without modification. |
13 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer |
14 | * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any |
15 | * redistribution must be conditioned upon including a substantially |
16 | * similar Disclaimer requirement for further binary redistribution. |
17 | * 3. Neither the names of the above-listed copyright holders nor the names |
18 | * of any contributors may be used to endorse or promote products derived |
19 | * from this software without specific prior written permission. |
20 | * |
21 | * Alternatively, this software may be distributed under the terms of the |
22 | * GNU General Public License ("GPL") version 2 as published by the Free |
23 | * Software Foundation. |
24 | * |
25 | * NO WARRANTY |
26 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
27 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
28 | * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY |
29 | * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL |
30 | * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, |
31 | * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
32 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
33 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER |
34 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
35 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
36 | * THE POSSIBILITY OF SUCH DAMAGES. |
37 | */ |
38 | |
39 | #include <sys/cdefs.h> |
40 | #ifdef __FreeBSD__ |
41 | __FBSDID("$FreeBSD: src/sys/dev/ath/if_ath.c,v 1.104 2005/09/16 10:09:23 ru Exp $" ); |
42 | #endif |
43 | #ifdef __NetBSD__ |
44 | __KERNEL_RCSID(0, "$NetBSD: ath.c,v 1.122 2016/06/10 13:27:13 ozaki-r Exp $" ); |
45 | #endif |
46 | |
47 | /* |
48 | * Driver for the Atheros Wireless LAN controller. |
49 | * |
50 | * This software is derived from work of Atsushi Onoe; his contribution |
51 | * is greatly appreciated. |
52 | */ |
53 | |
54 | #ifdef _KERNEL_OPT |
55 | #include "opt_inet.h" |
56 | #endif |
57 | |
58 | #include <sys/param.h> |
59 | #include <sys/reboot.h> |
60 | #include <sys/systm.h> |
61 | #include <sys/types.h> |
62 | #include <sys/sysctl.h> |
63 | #include <sys/mbuf.h> |
64 | #include <sys/malloc.h> |
65 | #include <sys/kernel.h> |
66 | #include <sys/socket.h> |
67 | #include <sys/sockio.h> |
68 | #include <sys/errno.h> |
69 | #include <sys/callout.h> |
70 | #include <sys/bus.h> |
71 | #include <sys/endian.h> |
72 | |
73 | #include <net/if.h> |
74 | #include <net/if_dl.h> |
75 | #include <net/if_media.h> |
76 | #include <net/if_types.h> |
77 | #include <net/if_arp.h> |
78 | #include <net/if_ether.h> |
79 | #include <net/if_llc.h> |
80 | |
81 | #include <net80211/ieee80211_netbsd.h> |
82 | #include <net80211/ieee80211_var.h> |
83 | |
84 | #include <net/bpf.h> |
85 | |
86 | #ifdef INET |
87 | #include <netinet/in.h> |
88 | #endif |
89 | |
90 | #include <sys/device.h> |
91 | #include <dev/ic/ath_netbsd.h> |
92 | |
93 | #define AR_DEBUG |
94 | #include <dev/ic/athvar.h> |
95 | #include "ah_desc.h" |
96 | #include "ah_devid.h" /* XXX for softled */ |
97 | #include "opt_ah.h" |
98 | |
99 | #ifdef ATH_TX99_DIAG |
100 | #include <dev/ath/ath_tx99/ath_tx99.h> |
101 | #endif |
102 | |
103 | /* unaligned little endian access */ |
104 | #define LE_READ_2(p) \ |
105 | ((u_int16_t) \ |
106 | ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8))) |
107 | #define LE_READ_4(p) \ |
108 | ((u_int32_t) \ |
109 | ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8) | \ |
110 | (((u_int8_t *)(p))[2] << 16) | (((u_int8_t *)(p))[3] << 24))) |
111 | |
112 | enum { |
113 | ATH_LED_TX, |
114 | ATH_LED_RX, |
115 | ATH_LED_POLL, |
116 | }; |
117 | |
118 | #ifdef AH_NEED_DESC_SWAP |
119 | #define HTOAH32(x) htole32(x) |
120 | #else |
121 | #define HTOAH32(x) (x) |
122 | #endif |
123 | |
124 | static int ath_ifinit(struct ifnet *); |
125 | static int ath_init(struct ath_softc *); |
126 | static void ath_stop_locked(struct ifnet *, int); |
127 | static void ath_stop(struct ifnet *, int); |
128 | static void ath_start(struct ifnet *); |
129 | static int ath_media_change(struct ifnet *); |
130 | static void ath_watchdog(struct ifnet *); |
131 | static int ath_ioctl(struct ifnet *, u_long, void *); |
132 | static void ath_fatal_proc(void *, int); |
133 | static void ath_rxorn_proc(void *, int); |
134 | static void ath_bmiss_proc(void *, int); |
135 | static void ath_radar_proc(void *, int); |
136 | static int ath_key_alloc(struct ieee80211com *, |
137 | const struct ieee80211_key *, |
138 | ieee80211_keyix *, ieee80211_keyix *); |
139 | static int ath_key_delete(struct ieee80211com *, |
140 | const struct ieee80211_key *); |
141 | static int ath_key_set(struct ieee80211com *, const struct ieee80211_key *, |
142 | const u_int8_t mac[IEEE80211_ADDR_LEN]); |
143 | static void ath_key_update_begin(struct ieee80211com *); |
144 | static void ath_key_update_end(struct ieee80211com *); |
145 | static void ath_mode_init(struct ath_softc *); |
146 | static void ath_setslottime(struct ath_softc *); |
147 | static void ath_updateslot(struct ifnet *); |
148 | static int ath_beaconq_setup(struct ath_hal *); |
149 | static int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *); |
150 | static void ath_beacon_setup(struct ath_softc *, struct ath_buf *); |
151 | static void ath_beacon_proc(void *, int); |
152 | static void ath_bstuck_proc(void *, int); |
153 | static void ath_beacon_free(struct ath_softc *); |
154 | static void ath_beacon_config(struct ath_softc *); |
155 | static void ath_descdma_cleanup(struct ath_softc *sc, |
156 | struct ath_descdma *, ath_bufhead *); |
157 | static int ath_desc_alloc(struct ath_softc *); |
158 | static void ath_desc_free(struct ath_softc *); |
159 | static struct ieee80211_node *ath_node_alloc(struct ieee80211_node_table *); |
160 | static void ath_node_free(struct ieee80211_node *); |
161 | static u_int8_t ath_node_getrssi(const struct ieee80211_node *); |
162 | static int ath_rxbuf_init(struct ath_softc *, struct ath_buf *); |
163 | static void ath_recv_mgmt(struct ieee80211com *ic, struct mbuf *m, |
164 | struct ieee80211_node *ni, |
165 | int subtype, int , u_int32_t rstamp); |
166 | static void ath_setdefantenna(struct ath_softc *, u_int); |
167 | static void ath_rx_proc(void *, int); |
168 | static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); |
169 | static int ath_tx_setup(struct ath_softc *, int, int); |
170 | static int ath_wme_update(struct ieee80211com *); |
171 | static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); |
172 | static void ath_tx_cleanup(struct ath_softc *); |
173 | static int ath_tx_start(struct ath_softc *, struct ieee80211_node *, |
174 | struct ath_buf *, struct mbuf *); |
175 | static void ath_tx_proc_q0(void *, int); |
176 | static void ath_tx_proc_q0123(void *, int); |
177 | static void ath_tx_proc(void *, int); |
178 | static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); |
179 | static void ath_draintxq(struct ath_softc *); |
180 | static void ath_stoprecv(struct ath_softc *); |
181 | static int ath_startrecv(struct ath_softc *); |
182 | static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); |
183 | static void ath_next_scan(void *); |
184 | static void ath_calibrate(void *); |
185 | static int ath_newstate(struct ieee80211com *, enum ieee80211_state, int); |
186 | static void ath_setup_stationkey(struct ieee80211_node *); |
187 | static void ath_newassoc(struct ieee80211_node *, int); |
188 | static int ath_getchannels(struct ath_softc *, u_int cc, |
189 | HAL_BOOL outdoor, HAL_BOOL xchanmode); |
190 | static void ath_led_event(struct ath_softc *, int); |
191 | static void ath_update_txpow(struct ath_softc *); |
192 | static void ath_freetx(struct mbuf *); |
193 | static void ath_restore_diversity(struct ath_softc *); |
194 | |
195 | static int ath_rate_setup(struct ath_softc *, u_int mode); |
196 | static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); |
197 | |
198 | static void ath_bpfattach(struct ath_softc *); |
199 | static void ath_announce(struct ath_softc *); |
200 | |
201 | int ath_dwelltime = 200; /* 5 channels/second */ |
202 | int ath_calinterval = 30; /* calibrate every 30 secs */ |
203 | int ath_outdoor = AH_TRUE; /* outdoor operation */ |
204 | int ath_xchanmode = AH_TRUE; /* enable extended channels */ |
205 | int ath_countrycode = CTRY_DEFAULT; /* country code */ |
206 | int ath_regdomain = 0; /* regulatory domain */ |
207 | int ath_debug = 0; |
208 | int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */ |
209 | int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */ |
210 | |
211 | #ifdef AR_DEBUG |
212 | enum { |
213 | ATH_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ |
214 | ATH_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */ |
215 | ATH_DEBUG_RECV = 0x00000004, /* basic recv operation */ |
216 | ATH_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */ |
217 | ATH_DEBUG_RATE = 0x00000010, /* rate control */ |
218 | ATH_DEBUG_RESET = 0x00000020, /* reset processing */ |
219 | ATH_DEBUG_MODE = 0x00000040, /* mode init/setup */ |
220 | ATH_DEBUG_BEACON = 0x00000080, /* beacon handling */ |
221 | ATH_DEBUG_WATCHDOG = 0x00000100, /* watchdog timeout */ |
222 | ATH_DEBUG_INTR = 0x00001000, /* ISR */ |
223 | ATH_DEBUG_TX_PROC = 0x00002000, /* tx ISR proc */ |
224 | ATH_DEBUG_RX_PROC = 0x00004000, /* rx ISR proc */ |
225 | ATH_DEBUG_BEACON_PROC = 0x00008000, /* beacon ISR proc */ |
226 | ATH_DEBUG_CALIBRATE = 0x00010000, /* periodic calibration */ |
227 | ATH_DEBUG_KEYCACHE = 0x00020000, /* key cache management */ |
228 | ATH_DEBUG_STATE = 0x00040000, /* 802.11 state transitions */ |
229 | ATH_DEBUG_NODE = 0x00080000, /* node management */ |
230 | ATH_DEBUG_LED = 0x00100000, /* led management */ |
231 | ATH_DEBUG_FF = 0x00200000, /* fast frames */ |
232 | ATH_DEBUG_DFS = 0x00400000, /* DFS processing */ |
233 | ATH_DEBUG_FATAL = 0x80000000, /* fatal errors */ |
234 | ATH_DEBUG_ANY = 0xffffffff |
235 | }; |
236 | #define IFF_DUMPPKTS(sc, m) \ |
237 | ((sc->sc_debug & (m)) || \ |
238 | (sc->sc_if.if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2)) |
239 | #define DPRINTF(sc, m, fmt, ...) do { \ |
240 | if (sc->sc_debug & (m)) \ |
241 | printf(fmt, __VA_ARGS__); \ |
242 | } while (0) |
243 | #define KEYPRINTF(sc, ix, hk, mac) do { \ |
244 | if (sc->sc_debug & ATH_DEBUG_KEYCACHE) \ |
245 | ath_keyprint(__func__, ix, hk, mac); \ |
246 | } while (0) |
247 | static void ath_printrxbuf(struct ath_buf *bf, int); |
248 | static void ath_printtxbuf(struct ath_buf *bf, int); |
249 | #else |
250 | #define IFF_DUMPPKTS(sc, m) \ |
251 | ((sc->sc_if.if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2)) |
252 | #define DPRINTF(m, fmt, ...) |
253 | #define KEYPRINTF(sc, k, ix, mac) |
254 | #endif |
255 | |
256 | MALLOC_DEFINE(M_ATHDEV, "athdev" , "ath driver dma buffers" ); |
257 | |
258 | int |
259 | ath_attach(u_int16_t devid, struct ath_softc *sc) |
260 | { |
261 | struct ifnet *ifp = &sc->sc_if; |
262 | struct ieee80211com *ic = &sc->sc_ic; |
263 | struct ath_hal *ah = NULL; |
264 | HAL_STATUS status; |
265 | int error = 0, i; |
266 | |
267 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n" , __func__, devid); |
268 | |
269 | pmf_self_suspensor_init(sc->sc_dev, &sc->sc_suspensor, &sc->sc_qual); |
270 | |
271 | memcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); |
272 | |
273 | ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, &status); |
274 | if (ah == NULL) { |
275 | if_printf(ifp, "unable to attach hardware; HAL status %u\n" , |
276 | status); |
277 | error = ENXIO; |
278 | goto bad; |
279 | } |
280 | if (ah->ah_abi != HAL_ABI_VERSION) { |
281 | if_printf(ifp, "HAL ABI mismatch detected " |
282 | "(HAL:0x%x != driver:0x%x)\n" , |
283 | ah->ah_abi, HAL_ABI_VERSION); |
284 | error = ENXIO; |
285 | goto bad; |
286 | } |
287 | sc->sc_ah = ah; |
288 | |
289 | if (!prop_dictionary_set_bool(device_properties(sc->sc_dev), |
290 | "pmf-powerdown" , false)) |
291 | goto bad; |
292 | |
293 | /* |
294 | * Check if the MAC has multi-rate retry support. |
295 | * We do this by trying to setup a fake extended |
296 | * descriptor. MAC's that don't have support will |
297 | * return false w/o doing anything. MAC's that do |
298 | * support it will return true w/o doing anything. |
299 | */ |
300 | sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); |
301 | |
302 | /* |
303 | * Check if the device has hardware counters for PHY |
304 | * errors. If so we need to enable the MIB interrupt |
305 | * so we can act on stat triggers. |
306 | */ |
307 | if (ath_hal_hwphycounters(ah)) |
308 | sc->sc_needmib = 1; |
309 | |
310 | /* |
311 | * Get the hardware key cache size. |
312 | */ |
313 | sc->sc_keymax = ath_hal_keycachesize(ah); |
314 | if (sc->sc_keymax > ATH_KEYMAX) { |
315 | if_printf(ifp, "Warning, using only %u of %u key cache slots\n" , |
316 | ATH_KEYMAX, sc->sc_keymax); |
317 | sc->sc_keymax = ATH_KEYMAX; |
318 | } |
319 | /* |
320 | * Reset the key cache since some parts do not |
321 | * reset the contents on initial power up. |
322 | */ |
323 | for (i = 0; i < sc->sc_keymax; i++) |
324 | ath_hal_keyreset(ah, i); |
325 | /* |
326 | * Mark key cache slots associated with global keys |
327 | * as in use. If we knew TKIP was not to be used we |
328 | * could leave the +32, +64, and +32+64 slots free. |
329 | * XXX only for splitmic. |
330 | */ |
331 | for (i = 0; i < IEEE80211_WEP_NKID; i++) { |
332 | setbit(sc->sc_keymap, i); |
333 | setbit(sc->sc_keymap, i+32); |
334 | setbit(sc->sc_keymap, i+64); |
335 | setbit(sc->sc_keymap, i+32+64); |
336 | } |
337 | |
338 | /* |
339 | * Collect the channel list using the default country |
340 | * code and including outdoor channels. The 802.11 layer |
341 | * is resposible for filtering this list based on settings |
342 | * like the phy mode. |
343 | */ |
344 | error = ath_getchannels(sc, ath_countrycode, |
345 | ath_outdoor, ath_xchanmode); |
346 | if (error != 0) |
347 | goto bad; |
348 | |
349 | /* |
350 | * Setup rate tables for all potential media types. |
351 | */ |
352 | ath_rate_setup(sc, IEEE80211_MODE_11A); |
353 | ath_rate_setup(sc, IEEE80211_MODE_11B); |
354 | ath_rate_setup(sc, IEEE80211_MODE_11G); |
355 | ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); |
356 | ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); |
357 | /* NB: setup here so ath_rate_update is happy */ |
358 | ath_setcurmode(sc, IEEE80211_MODE_11A); |
359 | |
360 | /* |
361 | * Allocate tx+rx descriptors and populate the lists. |
362 | */ |
363 | error = ath_desc_alloc(sc); |
364 | if (error != 0) { |
365 | if_printf(ifp, "failed to allocate descriptors: %d\n" , error); |
366 | goto bad; |
367 | } |
368 | ATH_CALLOUT_INIT(&sc->sc_scan_ch, debug_mpsafenet ? CALLOUT_MPSAFE : 0); |
369 | ATH_CALLOUT_INIT(&sc->sc_cal_ch, CALLOUT_MPSAFE); |
370 | #if 0 |
371 | ATH_CALLOUT_INIT(&sc->sc_dfs_ch, CALLOUT_MPSAFE); |
372 | #endif |
373 | |
374 | ATH_TXBUF_LOCK_INIT(sc); |
375 | |
376 | TASK_INIT(&sc->sc_rxtask, 0, ath_rx_proc, sc); |
377 | TASK_INIT(&sc->sc_rxorntask, 0, ath_rxorn_proc, sc); |
378 | TASK_INIT(&sc->sc_fataltask, 0, ath_fatal_proc, sc); |
379 | TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); |
380 | TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); |
381 | TASK_INIT(&sc->sc_radartask, 0, ath_radar_proc, sc); |
382 | |
383 | /* |
384 | * Allocate hardware transmit queues: one queue for |
385 | * beacon frames and one data queue for each QoS |
386 | * priority. Note that the hal handles reseting |
387 | * these queues at the needed time. |
388 | * |
389 | * XXX PS-Poll |
390 | */ |
391 | sc->sc_bhalq = ath_beaconq_setup(ah); |
392 | if (sc->sc_bhalq == (u_int) -1) { |
393 | if_printf(ifp, "unable to setup a beacon xmit queue!\n" ); |
394 | error = EIO; |
395 | goto bad2; |
396 | } |
397 | sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); |
398 | if (sc->sc_cabq == NULL) { |
399 | if_printf(ifp, "unable to setup CAB xmit queue!\n" ); |
400 | error = EIO; |
401 | goto bad2; |
402 | } |
403 | /* NB: insure BK queue is the lowest priority h/w queue */ |
404 | if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { |
405 | if_printf(ifp, "unable to setup xmit queue for %s traffic!\n" , |
406 | ieee80211_wme_acnames[WME_AC_BK]); |
407 | error = EIO; |
408 | goto bad2; |
409 | } |
410 | if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || |
411 | !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || |
412 | !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { |
413 | /* |
414 | * Not enough hardware tx queues to properly do WME; |
415 | * just punt and assign them all to the same h/w queue. |
416 | * We could do a better job of this if, for example, |
417 | * we allocate queues when we switch from station to |
418 | * AP mode. |
419 | */ |
420 | if (sc->sc_ac2q[WME_AC_VI] != NULL) |
421 | ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); |
422 | if (sc->sc_ac2q[WME_AC_BE] != NULL) |
423 | ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); |
424 | sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; |
425 | sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; |
426 | sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; |
427 | } |
428 | |
429 | /* |
430 | * Special case certain configurations. Note the |
431 | * CAB queue is handled by these specially so don't |
432 | * include them when checking the txq setup mask. |
433 | */ |
434 | switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) { |
435 | case 0x01: |
436 | TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); |
437 | break; |
438 | case 0x0f: |
439 | TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); |
440 | break; |
441 | default: |
442 | TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); |
443 | break; |
444 | } |
445 | |
446 | /* |
447 | * Setup rate control. Some rate control modules |
448 | * call back to change the anntena state so expose |
449 | * the necessary entry points. |
450 | * XXX maybe belongs in struct ath_ratectrl? |
451 | */ |
452 | sc->sc_setdefantenna = ath_setdefantenna; |
453 | sc->sc_rc = ath_rate_attach(sc); |
454 | if (sc->sc_rc == NULL) { |
455 | error = EIO; |
456 | goto bad2; |
457 | } |
458 | |
459 | sc->sc_blinking = 0; |
460 | sc->sc_ledstate = 1; |
461 | sc->sc_ledon = 0; /* low true */ |
462 | sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ |
463 | ATH_CALLOUT_INIT(&sc->sc_ledtimer, CALLOUT_MPSAFE); |
464 | /* |
465 | * Auto-enable soft led processing for IBM cards and for |
466 | * 5211 minipci cards. Users can also manually enable/disable |
467 | * support with a sysctl. |
468 | */ |
469 | sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); |
470 | if (sc->sc_softled) { |
471 | ath_hal_gpioCfgOutput(ah, sc->sc_ledpin, |
472 | HAL_GPIO_MUX_MAC_NETWORK_LED); |
473 | ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon); |
474 | } |
475 | |
476 | ifp->if_softc = sc; |
477 | ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; |
478 | ifp->if_start = ath_start; |
479 | ifp->if_stop = ath_stop; |
480 | ifp->if_watchdog = ath_watchdog; |
481 | ifp->if_ioctl = ath_ioctl; |
482 | ifp->if_init = ath_ifinit; |
483 | IFQ_SET_READY(&ifp->if_snd); |
484 | |
485 | ic->ic_ifp = ifp; |
486 | ic->ic_reset = ath_reset; |
487 | ic->ic_newassoc = ath_newassoc; |
488 | ic->ic_updateslot = ath_updateslot; |
489 | ic->ic_wme.wme_update = ath_wme_update; |
490 | /* XXX not right but it's not used anywhere important */ |
491 | ic->ic_phytype = IEEE80211_T_OFDM; |
492 | ic->ic_opmode = IEEE80211_M_STA; |
493 | ic->ic_caps = |
494 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ |
495 | | IEEE80211_C_HOSTAP /* hostap mode */ |
496 | | IEEE80211_C_MONITOR /* monitor mode */ |
497 | | IEEE80211_C_SHPREAMBLE /* short preamble supported */ |
498 | | IEEE80211_C_SHSLOT /* short slot time supported */ |
499 | | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ |
500 | | IEEE80211_C_TXFRAG /* handle tx frags */ |
501 | ; |
502 | /* |
503 | * Query the hal to figure out h/w crypto support. |
504 | */ |
505 | if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) |
506 | ic->ic_caps |= IEEE80211_C_WEP; |
507 | if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) |
508 | ic->ic_caps |= IEEE80211_C_AES; |
509 | if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) |
510 | ic->ic_caps |= IEEE80211_C_AES_CCM; |
511 | if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) |
512 | ic->ic_caps |= IEEE80211_C_CKIP; |
513 | if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { |
514 | ic->ic_caps |= IEEE80211_C_TKIP; |
515 | /* |
516 | * Check if h/w does the MIC and/or whether the |
517 | * separate key cache entries are required to |
518 | * handle both tx+rx MIC keys. |
519 | */ |
520 | if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) |
521 | ic->ic_caps |= IEEE80211_C_TKIPMIC; |
522 | |
523 | /* |
524 | * If the h/w supports storing tx+rx MIC keys |
525 | * in one cache slot automatically enable use. |
526 | */ |
527 | if (ath_hal_hastkipsplit(ah) || |
528 | !ath_hal_settkipsplit(ah, AH_FALSE)) |
529 | sc->sc_splitmic = 1; |
530 | |
531 | /* |
532 | * If the h/w can do TKIP MIC together with WME then |
533 | * we use it; otherwise we force the MIC to be done |
534 | * in software by the net80211 layer. |
535 | */ |
536 | if (ath_hal_haswmetkipmic(ah)) |
537 | ic->ic_caps |= IEEE80211_C_WME_TKIPMIC; |
538 | } |
539 | sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR); |
540 | sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); |
541 | /* |
542 | * Mark key cache slots associated with global keys |
543 | * as in use. If we knew TKIP was not to be used we |
544 | * could leave the +32, +64, and +32+64 slots free. |
545 | */ |
546 | for (i = 0; i < IEEE80211_WEP_NKID; i++) { |
547 | setbit(sc->sc_keymap, i); |
548 | setbit(sc->sc_keymap, i+64); |
549 | if (sc->sc_splitmic) { |
550 | setbit(sc->sc_keymap, i+32); |
551 | setbit(sc->sc_keymap, i+32+64); |
552 | } |
553 | } |
554 | /* |
555 | * TPC support can be done either with a global cap or |
556 | * per-packet support. The latter is not available on |
557 | * all parts. We're a bit pedantic here as all parts |
558 | * support a global cap. |
559 | */ |
560 | if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah)) |
561 | ic->ic_caps |= IEEE80211_C_TXPMGT; |
562 | |
563 | /* |
564 | * Mark WME capability only if we have sufficient |
565 | * hardware queues to do proper priority scheduling. |
566 | */ |
567 | if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) |
568 | ic->ic_caps |= IEEE80211_C_WME; |
569 | /* |
570 | * Check for misc other capabilities. |
571 | */ |
572 | if (ath_hal_hasbursting(ah)) |
573 | ic->ic_caps |= IEEE80211_C_BURST; |
574 | |
575 | /* |
576 | * Indicate we need the 802.11 header padded to a |
577 | * 32-bit boundary for 4-address and QoS frames. |
578 | */ |
579 | ic->ic_flags |= IEEE80211_F_DATAPAD; |
580 | |
581 | /* |
582 | * Query the hal about antenna support. |
583 | */ |
584 | sc->sc_defant = ath_hal_getdefantenna(ah); |
585 | |
586 | /* |
587 | * Not all chips have the VEOL support we want to |
588 | * use with IBSS beacons; check here for it. |
589 | */ |
590 | sc->sc_hasveol = ath_hal_hasveol(ah); |
591 | |
592 | /* get mac address from hardware */ |
593 | ath_hal_getmac(ah, ic->ic_myaddr); |
594 | |
595 | if_attach(ifp); |
596 | /* call MI attach routine. */ |
597 | ieee80211_ifattach(ic); |
598 | /* override default methods */ |
599 | ic->ic_node_alloc = ath_node_alloc; |
600 | sc->sc_node_free = ic->ic_node_free; |
601 | ic->ic_node_free = ath_node_free; |
602 | ic->ic_node_getrssi = ath_node_getrssi; |
603 | sc->sc_recv_mgmt = ic->ic_recv_mgmt; |
604 | ic->ic_recv_mgmt = ath_recv_mgmt; |
605 | sc->sc_newstate = ic->ic_newstate; |
606 | ic->ic_newstate = ath_newstate; |
607 | ic->ic_crypto.cs_max_keyix = sc->sc_keymax; |
608 | ic->ic_crypto.cs_key_alloc = ath_key_alloc; |
609 | ic->ic_crypto.cs_key_delete = ath_key_delete; |
610 | ic->ic_crypto.cs_key_set = ath_key_set; |
611 | ic->ic_crypto.cs_key_update_begin = ath_key_update_begin; |
612 | ic->ic_crypto.cs_key_update_end = ath_key_update_end; |
613 | /* complete initialization */ |
614 | ieee80211_media_init(ic, ath_media_change, ieee80211_media_status); |
615 | |
616 | ath_bpfattach(sc); |
617 | |
618 | sc->sc_flags |= ATH_ATTACHED; |
619 | |
620 | /* |
621 | * Setup dynamic sysctl's now that country code and |
622 | * regdomain are available from the hal. |
623 | */ |
624 | ath_sysctlattach(sc); |
625 | |
626 | ieee80211_announce(ic); |
627 | ath_announce(sc); |
628 | return 0; |
629 | bad2: |
630 | ath_tx_cleanup(sc); |
631 | ath_desc_free(sc); |
632 | bad: |
633 | if (ah) |
634 | ath_hal_detach(ah); |
635 | /* XXX don't get under the abstraction like this */ |
636 | sc->sc_dev->dv_flags &= ~DVF_ACTIVE; |
637 | return error; |
638 | } |
639 | |
640 | int |
641 | ath_detach(struct ath_softc *sc) |
642 | { |
643 | struct ifnet *ifp = &sc->sc_if; |
644 | int s; |
645 | |
646 | if ((sc->sc_flags & ATH_ATTACHED) == 0) |
647 | return (0); |
648 | |
649 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n" , |
650 | __func__, ifp->if_flags); |
651 | |
652 | s = splnet(); |
653 | ath_stop(ifp, 1); |
654 | bpf_detach(ifp); |
655 | /* |
656 | * NB: the order of these is important: |
657 | * o call the 802.11 layer before detaching the hal to |
658 | * insure callbacks into the driver to delete global |
659 | * key cache entries can be handled |
660 | * o reclaim the tx queue data structures after calling |
661 | * the 802.11 layer as we'll get called back to reclaim |
662 | * node state and potentially want to use them |
663 | * o to cleanup the tx queues the hal is called, so detach |
664 | * it last |
665 | * Other than that, it's straightforward... |
666 | */ |
667 | ieee80211_ifdetach(&sc->sc_ic); |
668 | #ifdef ATH_TX99_DIAG |
669 | if (sc->sc_tx99 != NULL) |
670 | sc->sc_tx99->detach(sc->sc_tx99); |
671 | #endif |
672 | ath_rate_detach(sc->sc_rc); |
673 | ath_desc_free(sc); |
674 | ath_tx_cleanup(sc); |
675 | sysctl_teardown(&sc->sc_sysctllog); |
676 | ath_hal_detach(sc->sc_ah); |
677 | if_detach(ifp); |
678 | splx(s); |
679 | |
680 | return 0; |
681 | } |
682 | |
683 | void |
684 | ath_suspend(struct ath_softc *sc) |
685 | { |
686 | #if notyet |
687 | /* |
688 | * Set the chip in full sleep mode. Note that we are |
689 | * careful to do this only when bringing the interface |
690 | * completely to a stop. When the chip is in this state |
691 | * it must be carefully woken up or references to |
692 | * registers in the PCI clock domain may freeze the bus |
693 | * (and system). This varies by chip and is mostly an |
694 | * issue with newer parts that go to sleep more quickly. |
695 | */ |
696 | ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP); |
697 | #endif |
698 | } |
699 | |
700 | bool |
701 | ath_resume(struct ath_softc *sc) |
702 | { |
703 | struct ath_hal *ah = sc->sc_ah; |
704 | struct ieee80211com *ic = &sc->sc_ic; |
705 | HAL_STATUS status; |
706 | int i; |
707 | |
708 | #if notyet |
709 | ath_hal_setpower(ah, HAL_PM_AWAKE); |
710 | #else |
711 | ath_hal_reset(ah, ic->ic_opmode, &sc->sc_curchan, AH_FALSE, &status); |
712 | #endif |
713 | |
714 | /* |
715 | * Reset the key cache since some parts do not |
716 | * reset the contents on initial power up. |
717 | */ |
718 | for (i = 0; i < sc->sc_keymax; i++) |
719 | ath_hal_keyreset(ah, i); |
720 | |
721 | ath_hal_resettxqueue(ah, sc->sc_bhalq); |
722 | for (i = 0; i < HAL_NUM_TX_QUEUES; i++) |
723 | if (ATH_TXQ_SETUP(sc, i)) |
724 | ath_hal_resettxqueue(ah, i); |
725 | |
726 | if (sc->sc_softled) { |
727 | ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_ledpin, |
728 | HAL_GPIO_MUX_MAC_NETWORK_LED); |
729 | ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !sc->sc_ledon); |
730 | } |
731 | return true; |
732 | } |
733 | |
734 | /* |
735 | * Interrupt handler. Most of the actual processing is deferred. |
736 | */ |
737 | int |
738 | ath_intr(void *arg) |
739 | { |
740 | struct ath_softc *sc = arg; |
741 | struct ifnet *ifp = &sc->sc_if; |
742 | struct ath_hal *ah = sc->sc_ah; |
743 | HAL_INT status = 0; |
744 | |
745 | if (!device_activation(sc->sc_dev, DEVACT_LEVEL_DRIVER)) { |
746 | /* |
747 | * The hardware is not ready/present, don't touch anything. |
748 | * Note this can happen early on if the IRQ is shared. |
749 | */ |
750 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n" , __func__); |
751 | return 0; |
752 | } |
753 | |
754 | if (!ath_hal_intrpend(ah)) /* shared irq, not for us */ |
755 | return 0; |
756 | |
757 | if ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) != (IFF_RUNNING|IFF_UP)) { |
758 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n" , |
759 | __func__, ifp->if_flags); |
760 | ath_hal_getisr(ah, &status); /* clear ISR */ |
761 | ath_hal_intrset(ah, 0); /* disable further intr's */ |
762 | return 1; /* XXX */ |
763 | } |
764 | /* |
765 | * Figure out the reason(s) for the interrupt. Note |
766 | * that the hal returns a pseudo-ISR that may include |
767 | * bits we haven't explicitly enabled so we mask the |
768 | * value to insure we only process bits we requested. |
769 | */ |
770 | ath_hal_getisr(ah, &status); /* NB: clears ISR too */ |
771 | DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n" , __func__, status); |
772 | status &= sc->sc_imask; /* discard unasked for bits */ |
773 | if (status & HAL_INT_FATAL) { |
774 | /* |
775 | * Fatal errors are unrecoverable. Typically |
776 | * these are caused by DMA errors. Unfortunately |
777 | * the exact reason is not (presently) returned |
778 | * by the hal. |
779 | */ |
780 | sc->sc_stats.ast_hardware++; |
781 | ath_hal_intrset(ah, 0); /* disable intr's until reset */ |
782 | TASK_RUN_OR_ENQUEUE(&sc->sc_fataltask); |
783 | } else if (status & HAL_INT_RXORN) { |
784 | sc->sc_stats.ast_rxorn++; |
785 | ath_hal_intrset(ah, 0); /* disable intr's until reset */ |
786 | TASK_RUN_OR_ENQUEUE(&sc->sc_rxorntask); |
787 | } else { |
788 | if (status & HAL_INT_SWBA) { |
789 | /* |
790 | * Software beacon alert--time to send a beacon. |
791 | * Handle beacon transmission directly; deferring |
792 | * this is too slow to meet timing constraints |
793 | * under load. |
794 | */ |
795 | ath_beacon_proc(sc, 0); |
796 | } |
797 | if (status & HAL_INT_RXEOL) { |
798 | /* |
799 | * NB: the hardware should re-read the link when |
800 | * RXE bit is written, but it doesn't work at |
801 | * least on older hardware revs. |
802 | */ |
803 | sc->sc_stats.ast_rxeol++; |
804 | sc->sc_rxlink = NULL; |
805 | } |
806 | if (status & HAL_INT_TXURN) { |
807 | sc->sc_stats.ast_txurn++; |
808 | /* bump tx trigger level */ |
809 | ath_hal_updatetxtriglevel(ah, AH_TRUE); |
810 | } |
811 | if (status & HAL_INT_RX) |
812 | TASK_RUN_OR_ENQUEUE(&sc->sc_rxtask); |
813 | if (status & HAL_INT_TX) |
814 | TASK_RUN_OR_ENQUEUE(&sc->sc_txtask); |
815 | if (status & HAL_INT_BMISS) { |
816 | sc->sc_stats.ast_bmiss++; |
817 | TASK_RUN_OR_ENQUEUE(&sc->sc_bmisstask); |
818 | } |
819 | if (status & HAL_INT_MIB) { |
820 | sc->sc_stats.ast_mib++; |
821 | /* |
822 | * Disable interrupts until we service the MIB |
823 | * interrupt; otherwise it will continue to fire. |
824 | */ |
825 | ath_hal_intrset(ah, 0); |
826 | /* |
827 | * Let the hal handle the event. We assume it will |
828 | * clear whatever condition caused the interrupt. |
829 | */ |
830 | ath_hal_mibevent(ah, &sc->sc_halstats); |
831 | ath_hal_intrset(ah, sc->sc_imask); |
832 | } |
833 | } |
834 | return 1; |
835 | } |
836 | |
837 | /* Swap transmit descriptor. |
838 | * if AH_NEED_DESC_SWAP flag is not defined this becomes a "null" |
839 | * function. |
840 | */ |
841 | static inline void |
842 | ath_desc_swap(struct ath_desc *ds) |
843 | { |
844 | #ifdef AH_NEED_DESC_SWAP |
845 | ds->ds_link = htole32(ds->ds_link); |
846 | ds->ds_data = htole32(ds->ds_data); |
847 | ds->ds_ctl0 = htole32(ds->ds_ctl0); |
848 | ds->ds_ctl1 = htole32(ds->ds_ctl1); |
849 | ds->ds_hw[0] = htole32(ds->ds_hw[0]); |
850 | ds->ds_hw[1] = htole32(ds->ds_hw[1]); |
851 | #endif |
852 | } |
853 | |
854 | static void |
855 | ath_fatal_proc(void *arg, int pending) |
856 | { |
857 | struct ath_softc *sc = arg; |
858 | struct ifnet *ifp = &sc->sc_if; |
859 | |
860 | if_printf(ifp, "hardware error; resetting\n" ); |
861 | ath_reset(ifp); |
862 | } |
863 | |
864 | static void |
865 | ath_rxorn_proc(void *arg, int pending) |
866 | { |
867 | struct ath_softc *sc = arg; |
868 | struct ifnet *ifp = &sc->sc_if; |
869 | |
870 | if_printf(ifp, "rx FIFO overrun; resetting\n" ); |
871 | ath_reset(ifp); |
872 | } |
873 | |
874 | static void |
875 | ath_bmiss_proc(void *arg, int pending) |
876 | { |
877 | struct ath_softc *sc = arg; |
878 | struct ieee80211com *ic = &sc->sc_ic; |
879 | |
880 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n" , __func__, pending); |
881 | KASSERTMSG(ic->ic_opmode == IEEE80211_M_STA, |
882 | "unexpect operating mode %u" , ic->ic_opmode); |
883 | if (ic->ic_state == IEEE80211_S_RUN) { |
884 | u_int64_t lastrx = sc->sc_lastrx; |
885 | u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah); |
886 | |
887 | DPRINTF(sc, ATH_DEBUG_BEACON, |
888 | "%s: tsf %" PRIu64 " lastrx %" PRId64 |
889 | " (%" PRIu64 ") bmiss %u\n" , |
890 | __func__, tsf, tsf - lastrx, lastrx, |
891 | ic->ic_bmisstimeout*1024); |
892 | /* |
893 | * Workaround phantom bmiss interrupts by sanity-checking |
894 | * the time of our last rx'd frame. If it is within the |
895 | * beacon miss interval then ignore the interrupt. If it's |
896 | * truly a bmiss we'll get another interrupt soon and that'll |
897 | * be dispatched up for processing. |
898 | */ |
899 | if (tsf - lastrx > ic->ic_bmisstimeout*1024) { |
900 | NET_LOCK_GIANT(); |
901 | ieee80211_beacon_miss(ic); |
902 | NET_UNLOCK_GIANT(); |
903 | } else |
904 | sc->sc_stats.ast_bmiss_phantom++; |
905 | } |
906 | } |
907 | |
908 | static void |
909 | ath_radar_proc(void *arg, int pending) |
910 | { |
911 | #if 0 |
912 | struct ath_softc *sc = arg; |
913 | struct ifnet *ifp = &sc->sc_if; |
914 | struct ath_hal *ah = sc->sc_ah; |
915 | HAL_CHANNEL hchan; |
916 | |
917 | if (ath_hal_procdfs(ah, &hchan)) { |
918 | if_printf(ifp, "radar detected on channel %u/0x%x/0x%x\n" , |
919 | hchan.channel, hchan.channelFlags, hchan.privFlags); |
920 | /* |
921 | * Initiate channel change. |
922 | */ |
923 | /* XXX not yet */ |
924 | } |
925 | #endif |
926 | } |
927 | |
928 | static u_int |
929 | ath_chan2flags(struct ieee80211com *ic, struct ieee80211_channel *chan) |
930 | { |
931 | #define N(a) (sizeof(a) / sizeof(a[0])) |
932 | static const u_int modeflags[] = { |
933 | 0, /* IEEE80211_MODE_AUTO */ |
934 | CHANNEL_A, /* IEEE80211_MODE_11A */ |
935 | CHANNEL_B, /* IEEE80211_MODE_11B */ |
936 | CHANNEL_PUREG, /* IEEE80211_MODE_11G */ |
937 | 0, /* IEEE80211_MODE_FH */ |
938 | CHANNEL_ST, /* IEEE80211_MODE_TURBO_A */ |
939 | CHANNEL_108G /* IEEE80211_MODE_TURBO_G */ |
940 | }; |
941 | enum ieee80211_phymode mode = ieee80211_chan2mode(ic, chan); |
942 | |
943 | KASSERTMSG(mode < N(modeflags), "unexpected phy mode %u" , mode); |
944 | KASSERTMSG(modeflags[mode] != 0, "mode %u undefined" , mode); |
945 | return modeflags[mode]; |
946 | #undef N |
947 | } |
948 | |
949 | static int |
950 | ath_ifinit(struct ifnet *ifp) |
951 | { |
952 | struct ath_softc *sc = (struct ath_softc *)ifp->if_softc; |
953 | |
954 | return ath_init(sc); |
955 | } |
956 | |
957 | static void |
958 | ath_settkipmic(struct ath_softc *sc) |
959 | { |
960 | struct ieee80211com *ic = &sc->sc_ic; |
961 | struct ath_hal *ah = sc->sc_ah; |
962 | |
963 | if ((ic->ic_caps & IEEE80211_C_TKIP) && |
964 | !(ic->ic_caps & IEEE80211_C_WME_TKIPMIC)) { |
965 | if (ic->ic_flags & IEEE80211_F_WME) { |
966 | (void)ath_hal_settkipmic(ah, AH_FALSE); |
967 | ic->ic_caps &= ~IEEE80211_C_TKIPMIC; |
968 | } else { |
969 | (void)ath_hal_settkipmic(ah, AH_TRUE); |
970 | ic->ic_caps |= IEEE80211_C_TKIPMIC; |
971 | } |
972 | } |
973 | } |
974 | |
975 | static int |
976 | ath_init(struct ath_softc *sc) |
977 | { |
978 | struct ifnet *ifp = &sc->sc_if; |
979 | struct ieee80211com *ic = &sc->sc_ic; |
980 | struct ath_hal *ah = sc->sc_ah; |
981 | HAL_STATUS status; |
982 | int error = 0, s; |
983 | |
984 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n" , |
985 | __func__, ifp->if_flags); |
986 | |
987 | if (device_is_active(sc->sc_dev)) { |
988 | s = splnet(); |
989 | } else if (!pmf_device_subtree_resume(sc->sc_dev, &sc->sc_qual) || |
990 | !device_is_active(sc->sc_dev)) |
991 | return 0; |
992 | else |
993 | s = splnet(); |
994 | |
995 | /* |
996 | * Stop anything previously setup. This is safe |
997 | * whether this is the first time through or not. |
998 | */ |
999 | ath_stop_locked(ifp, 0); |
1000 | |
1001 | /* |
1002 | * The basic interface to setting the hardware in a good |
1003 | * state is ``reset''. On return the hardware is known to |
1004 | * be powered up and with interrupts disabled. This must |
1005 | * be followed by initialization of the appropriate bits |
1006 | * and then setup of the interrupt mask. |
1007 | */ |
1008 | ath_settkipmic(sc); |
1009 | sc->sc_curchan.channel = ic->ic_curchan->ic_freq; |
1010 | sc->sc_curchan.channelFlags = ath_chan2flags(ic, ic->ic_curchan); |
1011 | if (!ath_hal_reset(ah, ic->ic_opmode, &sc->sc_curchan, AH_FALSE, &status)) { |
1012 | if_printf(ifp, "unable to reset hardware; hal status %u\n" , |
1013 | status); |
1014 | error = EIO; |
1015 | goto done; |
1016 | } |
1017 | |
1018 | /* |
1019 | * This is needed only to setup initial state |
1020 | * but it's best done after a reset. |
1021 | */ |
1022 | ath_update_txpow(sc); |
1023 | /* |
1024 | * Likewise this is set during reset so update |
1025 | * state cached in the driver. |
1026 | */ |
1027 | ath_restore_diversity(sc); |
1028 | sc->sc_calinterval = 1; |
1029 | sc->sc_caltries = 0; |
1030 | |
1031 | /* |
1032 | * Setup the hardware after reset: the key cache |
1033 | * is filled as needed and the receive engine is |
1034 | * set going. Frame transmit is handled entirely |
1035 | * in the frame output path; there's nothing to do |
1036 | * here except setup the interrupt mask. |
1037 | */ |
1038 | if ((error = ath_startrecv(sc)) != 0) { |
1039 | if_printf(ifp, "unable to start recv logic\n" ); |
1040 | goto done; |
1041 | } |
1042 | |
1043 | /* |
1044 | * Enable interrupts. |
1045 | */ |
1046 | sc->sc_imask = HAL_INT_RX | HAL_INT_TX |
1047 | | HAL_INT_RXEOL | HAL_INT_RXORN |
1048 | | HAL_INT_FATAL | HAL_INT_GLOBAL; |
1049 | /* |
1050 | * Enable MIB interrupts when there are hardware phy counters. |
1051 | * Note we only do this (at the moment) for station mode. |
1052 | */ |
1053 | if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) |
1054 | sc->sc_imask |= HAL_INT_MIB; |
1055 | ath_hal_intrset(ah, sc->sc_imask); |
1056 | |
1057 | ifp->if_flags |= IFF_RUNNING; |
1058 | ic->ic_state = IEEE80211_S_INIT; |
1059 | |
1060 | /* |
1061 | * The hardware should be ready to go now so it's safe |
1062 | * to kick the 802.11 state machine as it's likely to |
1063 | * immediately call back to us to send mgmt frames. |
1064 | */ |
1065 | ath_chan_change(sc, ic->ic_curchan); |
1066 | #ifdef ATH_TX99_DIAG |
1067 | if (sc->sc_tx99 != NULL) |
1068 | sc->sc_tx99->start(sc->sc_tx99); |
1069 | else |
1070 | #endif |
1071 | if (ic->ic_opmode != IEEE80211_M_MONITOR) { |
1072 | if (ic->ic_roaming != IEEE80211_ROAMING_MANUAL) |
1073 | ieee80211_new_state(ic, IEEE80211_S_SCAN, -1); |
1074 | } else |
1075 | ieee80211_new_state(ic, IEEE80211_S_RUN, -1); |
1076 | done: |
1077 | splx(s); |
1078 | return error; |
1079 | } |
1080 | |
1081 | static void |
1082 | ath_stop_locked(struct ifnet *ifp, int disable) |
1083 | { |
1084 | struct ath_softc *sc = ifp->if_softc; |
1085 | struct ieee80211com *ic = &sc->sc_ic; |
1086 | struct ath_hal *ah = sc->sc_ah; |
1087 | |
1088 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %d if_flags 0x%x\n" , |
1089 | __func__, !device_is_enabled(sc->sc_dev), ifp->if_flags); |
1090 | |
1091 | /* KASSERT() IPL_NET */ |
1092 | if (ifp->if_flags & IFF_RUNNING) { |
1093 | /* |
1094 | * Shutdown the hardware and driver: |
1095 | * reset 802.11 state machine |
1096 | * turn off timers |
1097 | * disable interrupts |
1098 | * turn off the radio |
1099 | * clear transmit machinery |
1100 | * clear receive machinery |
1101 | * drain and release tx queues |
1102 | * reclaim beacon resources |
1103 | * power down hardware |
1104 | * |
1105 | * Note that some of this work is not possible if the |
1106 | * hardware is gone (invalid). |
1107 | */ |
1108 | #ifdef ATH_TX99_DIAG |
1109 | if (sc->sc_tx99 != NULL) |
1110 | sc->sc_tx99->stop(sc->sc_tx99); |
1111 | #endif |
1112 | ieee80211_new_state(ic, IEEE80211_S_INIT, -1); |
1113 | ifp->if_flags &= ~IFF_RUNNING; |
1114 | ifp->if_timer = 0; |
1115 | if (device_is_enabled(sc->sc_dev)) { |
1116 | if (sc->sc_softled) { |
1117 | callout_stop(&sc->sc_ledtimer); |
1118 | ath_hal_gpioset(ah, sc->sc_ledpin, |
1119 | !sc->sc_ledon); |
1120 | sc->sc_blinking = 0; |
1121 | } |
1122 | ath_hal_intrset(ah, 0); |
1123 | } |
1124 | ath_draintxq(sc); |
1125 | if (device_is_enabled(sc->sc_dev)) { |
1126 | ath_stoprecv(sc); |
1127 | ath_hal_phydisable(ah); |
1128 | } else |
1129 | sc->sc_rxlink = NULL; |
1130 | IF_PURGE(&ifp->if_snd); |
1131 | ath_beacon_free(sc); |
1132 | } |
1133 | if (disable) |
1134 | pmf_device_suspend(sc->sc_dev, &sc->sc_qual); |
1135 | } |
1136 | |
1137 | static void |
1138 | ath_stop(struct ifnet *ifp, int disable) |
1139 | { |
1140 | int s; |
1141 | |
1142 | s = splnet(); |
1143 | ath_stop_locked(ifp, disable); |
1144 | splx(s); |
1145 | } |
1146 | |
1147 | static void |
1148 | ath_restore_diversity(struct ath_softc *sc) |
1149 | { |
1150 | struct ifnet *ifp = &sc->sc_if; |
1151 | struct ath_hal *ah = sc->sc_ah; |
1152 | |
1153 | if (!ath_hal_setdiversity(sc->sc_ah, sc->sc_diversity) || |
1154 | sc->sc_diversity != ath_hal_getdiversity(ah)) { |
1155 | if_printf(ifp, "could not restore diversity setting %d\n" , |
1156 | sc->sc_diversity); |
1157 | sc->sc_diversity = ath_hal_getdiversity(ah); |
1158 | } |
1159 | } |
1160 | |
1161 | /* |
1162 | * Reset the hardware w/o losing operational state. This is |
1163 | * basically a more efficient way of doing ath_stop, ath_init, |
1164 | * followed by state transitions to the current 802.11 |
1165 | * operational state. Used to recover from various errors and |
1166 | * to reset or reload hardware state. |
1167 | */ |
1168 | int |
1169 | ath_reset(struct ifnet *ifp) |
1170 | { |
1171 | struct ath_softc *sc = ifp->if_softc; |
1172 | struct ieee80211com *ic = &sc->sc_ic; |
1173 | struct ath_hal *ah = sc->sc_ah; |
1174 | struct ieee80211_channel *c; |
1175 | HAL_STATUS status; |
1176 | |
1177 | /* |
1178 | * Convert to a HAL channel description with the flags |
1179 | * constrained to reflect the current operating mode. |
1180 | */ |
1181 | c = ic->ic_curchan; |
1182 | sc->sc_curchan.channel = c->ic_freq; |
1183 | sc->sc_curchan.channelFlags = ath_chan2flags(ic, c); |
1184 | |
1185 | ath_hal_intrset(ah, 0); /* disable interrupts */ |
1186 | ath_draintxq(sc); /* stop xmit side */ |
1187 | ath_stoprecv(sc); /* stop recv side */ |
1188 | ath_settkipmic(sc); /* configure TKIP MIC handling */ |
1189 | /* NB: indicate channel change so we do a full reset */ |
1190 | if (!ath_hal_reset(ah, ic->ic_opmode, &sc->sc_curchan, AH_TRUE, &status)) |
1191 | if_printf(ifp, "%s: unable to reset hardware; hal status %u\n" , |
1192 | __func__, status); |
1193 | ath_update_txpow(sc); /* update tx power state */ |
1194 | ath_restore_diversity(sc); |
1195 | sc->sc_calinterval = 1; |
1196 | sc->sc_caltries = 0; |
1197 | if (ath_startrecv(sc) != 0) /* restart recv */ |
1198 | if_printf(ifp, "%s: unable to start recv logic\n" , __func__); |
1199 | /* |
1200 | * We may be doing a reset in response to an ioctl |
1201 | * that changes the channel so update any state that |
1202 | * might change as a result. |
1203 | */ |
1204 | ath_chan_change(sc, c); |
1205 | if (ic->ic_state == IEEE80211_S_RUN) |
1206 | ath_beacon_config(sc); /* restart beacons */ |
1207 | ath_hal_intrset(ah, sc->sc_imask); |
1208 | |
1209 | ath_start(ifp); /* restart xmit */ |
1210 | return 0; |
1211 | } |
1212 | |
1213 | /* |
1214 | * Cleanup driver resources when we run out of buffers |
1215 | * while processing fragments; return the tx buffers |
1216 | * allocated and drop node references. |
1217 | */ |
1218 | static void |
1219 | ath_txfrag_cleanup(struct ath_softc *sc, |
1220 | ath_bufhead *frags, struct ieee80211_node *ni) |
1221 | { |
1222 | struct ath_buf *bf; |
1223 | |
1224 | ATH_TXBUF_LOCK_ASSERT(sc); |
1225 | |
1226 | while ((bf = STAILQ_FIRST(frags)) != NULL) { |
1227 | STAILQ_REMOVE_HEAD(frags, bf_list); |
1228 | STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); |
1229 | sc->sc_if.if_flags &= ~IFF_OACTIVE; |
1230 | ieee80211_node_decref(ni); |
1231 | } |
1232 | } |
1233 | |
1234 | /* |
1235 | * Setup xmit of a fragmented frame. Allocate a buffer |
1236 | * for each frag and bump the node reference count to |
1237 | * reflect the held reference to be setup by ath_tx_start. |
1238 | */ |
1239 | static int |
1240 | ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags, |
1241 | struct mbuf *m0, struct ieee80211_node *ni) |
1242 | { |
1243 | struct mbuf *m; |
1244 | struct ath_buf *bf; |
1245 | |
1246 | ATH_TXBUF_LOCK(sc); |
1247 | for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) { |
1248 | bf = STAILQ_FIRST(&sc->sc_txbuf); |
1249 | if (bf == NULL) { /* out of buffers, cleanup */ |
1250 | DPRINTF(sc, ATH_DEBUG_XMIT, "%s: out of xmit buffers\n" , |
1251 | __func__); |
1252 | sc->sc_if.if_flags |= IFF_OACTIVE; |
1253 | ath_txfrag_cleanup(sc, frags, ni); |
1254 | break; |
1255 | } |
1256 | STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list); |
1257 | ieee80211_node_incref(ni); |
1258 | STAILQ_INSERT_TAIL(frags, bf, bf_list); |
1259 | } |
1260 | ATH_TXBUF_UNLOCK(sc); |
1261 | |
1262 | return !STAILQ_EMPTY(frags); |
1263 | } |
1264 | |
1265 | static void |
1266 | ath_start(struct ifnet *ifp) |
1267 | { |
1268 | struct ath_softc *sc = ifp->if_softc; |
1269 | struct ath_hal *ah = sc->sc_ah; |
1270 | struct ieee80211com *ic = &sc->sc_ic; |
1271 | struct ieee80211_node *ni; |
1272 | struct ath_buf *bf; |
1273 | struct mbuf *m, *next; |
1274 | struct ieee80211_frame *wh; |
1275 | struct ether_header *eh; |
1276 | ath_bufhead frags; |
1277 | |
1278 | if ((ifp->if_flags & IFF_RUNNING) == 0 || |
1279 | !device_is_active(sc->sc_dev)) |
1280 | return; |
1281 | |
1282 | if (sc->sc_flags & ATH_KEY_UPDATING) |
1283 | return; |
1284 | |
1285 | for (;;) { |
1286 | /* |
1287 | * Grab a TX buffer and associated resources. |
1288 | */ |
1289 | ATH_TXBUF_LOCK(sc); |
1290 | bf = STAILQ_FIRST(&sc->sc_txbuf); |
1291 | if (bf != NULL) |
1292 | STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list); |
1293 | ATH_TXBUF_UNLOCK(sc); |
1294 | if (bf == NULL) { |
1295 | DPRINTF(sc, ATH_DEBUG_XMIT, "%s: out of xmit buffers\n" , |
1296 | __func__); |
1297 | sc->sc_stats.ast_tx_qstop++; |
1298 | ifp->if_flags |= IFF_OACTIVE; |
1299 | break; |
1300 | } |
1301 | /* |
1302 | * Poll the management queue for frames; they |
1303 | * have priority over normal data frames. |
1304 | */ |
1305 | IF_DEQUEUE(&ic->ic_mgtq, m); |
1306 | if (m == NULL) { |
1307 | /* |
1308 | * No data frames go out unless we're associated. |
1309 | */ |
1310 | if (ic->ic_state != IEEE80211_S_RUN) { |
1311 | DPRINTF(sc, ATH_DEBUG_XMIT, |
1312 | "%s: discard data packet, state %s\n" , |
1313 | __func__, |
1314 | ieee80211_state_name[ic->ic_state]); |
1315 | sc->sc_stats.ast_tx_discard++; |
1316 | ATH_TXBUF_LOCK(sc); |
1317 | STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); |
1318 | ATH_TXBUF_UNLOCK(sc); |
1319 | break; |
1320 | } |
1321 | IFQ_DEQUEUE(&ifp->if_snd, m); /* XXX: LOCK */ |
1322 | if (m == NULL) { |
1323 | ATH_TXBUF_LOCK(sc); |
1324 | STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); |
1325 | ATH_TXBUF_UNLOCK(sc); |
1326 | break; |
1327 | } |
1328 | STAILQ_INIT(&frags); |
1329 | /* |
1330 | * Find the node for the destination so we can do |
1331 | * things like power save and fast frames aggregation. |
1332 | */ |
1333 | if (m->m_len < sizeof(struct ether_header) && |
1334 | (m = m_pullup(m, sizeof(struct ether_header))) == NULL) { |
1335 | ic->ic_stats.is_tx_nobuf++; /* XXX */ |
1336 | ni = NULL; |
1337 | goto bad; |
1338 | } |
1339 | eh = mtod(m, struct ether_header *); |
1340 | ni = ieee80211_find_txnode(ic, eh->ether_dhost); |
1341 | if (ni == NULL) { |
1342 | /* NB: ieee80211_find_txnode does stat+msg */ |
1343 | m_freem(m); |
1344 | goto bad; |
1345 | } |
1346 | if ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) && |
1347 | (m->m_flags & M_PWR_SAV) == 0) { |
1348 | /* |
1349 | * Station in power save mode; pass the frame |
1350 | * to the 802.11 layer and continue. We'll get |
1351 | * the frame back when the time is right. |
1352 | */ |
1353 | ieee80211_pwrsave(ic, ni, m); |
1354 | goto reclaim; |
1355 | } |
1356 | /* calculate priority so we can find the tx queue */ |
1357 | if (ieee80211_classify(ic, m, ni)) { |
1358 | DPRINTF(sc, ATH_DEBUG_XMIT, |
1359 | "%s: discard, classification failure\n" , |
1360 | __func__); |
1361 | m_freem(m); |
1362 | goto bad; |
1363 | } |
1364 | ifp->if_opackets++; |
1365 | |
1366 | bpf_mtap(ifp, m); |
1367 | /* |
1368 | * Encapsulate the packet in prep for transmission. |
1369 | */ |
1370 | m = ieee80211_encap(ic, m, ni); |
1371 | if (m == NULL) { |
1372 | DPRINTF(sc, ATH_DEBUG_XMIT, |
1373 | "%s: encapsulation failure\n" , |
1374 | __func__); |
1375 | sc->sc_stats.ast_tx_encap++; |
1376 | goto bad; |
1377 | } |
1378 | /* |
1379 | * Check for fragmentation. If this has frame |
1380 | * has been broken up verify we have enough |
1381 | * buffers to send all the fragments so all |
1382 | * go out or none... |
1383 | */ |
1384 | if ((m->m_flags & M_FRAG) && |
1385 | !ath_txfrag_setup(sc, &frags, m, ni)) { |
1386 | DPRINTF(sc, ATH_DEBUG_ANY, |
1387 | "%s: out of txfrag buffers\n" , __func__); |
1388 | ic->ic_stats.is_tx_nobuf++; /* XXX */ |
1389 | ath_freetx(m); |
1390 | goto bad; |
1391 | } |
1392 | } else { |
1393 | /* |
1394 | * Hack! The referenced node pointer is in the |
1395 | * rcvif field of the packet header. This is |
1396 | * placed there by ieee80211_mgmt_output because |
1397 | * we need to hold the reference with the frame |
1398 | * and there's no other way (other than packet |
1399 | * tags which we consider too expensive to use) |
1400 | * to pass it along. |
1401 | */ |
1402 | ni = M_GETCTX(m, struct ieee80211_node *); |
1403 | M_CLEARCTX(m); |
1404 | |
1405 | wh = mtod(m, struct ieee80211_frame *); |
1406 | if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == |
1407 | IEEE80211_FC0_SUBTYPE_PROBE_RESP) { |
1408 | /* fill time stamp */ |
1409 | u_int64_t tsf; |
1410 | u_int32_t *tstamp; |
1411 | |
1412 | tsf = ath_hal_gettsf64(ah); |
1413 | /* XXX: adjust 100us delay to xmit */ |
1414 | tsf += 100; |
1415 | tstamp = (u_int32_t *)&wh[1]; |
1416 | tstamp[0] = htole32(tsf & 0xffffffff); |
1417 | tstamp[1] = htole32(tsf >> 32); |
1418 | } |
1419 | sc->sc_stats.ast_tx_mgmt++; |
1420 | } |
1421 | |
1422 | nextfrag: |
1423 | next = m->m_nextpkt; |
1424 | if (ath_tx_start(sc, ni, bf, m)) { |
1425 | bad: |
1426 | ifp->if_oerrors++; |
1427 | reclaim: |
1428 | ATH_TXBUF_LOCK(sc); |
1429 | STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); |
1430 | ath_txfrag_cleanup(sc, &frags, ni); |
1431 | ATH_TXBUF_UNLOCK(sc); |
1432 | if (ni != NULL) |
1433 | ieee80211_free_node(ni); |
1434 | continue; |
1435 | } |
1436 | if (next != NULL) { |
1437 | m = next; |
1438 | bf = STAILQ_FIRST(&frags); |
1439 | KASSERTMSG(bf != NULL, "no buf for txfrag" ); |
1440 | STAILQ_REMOVE_HEAD(&frags, bf_list); |
1441 | goto nextfrag; |
1442 | } |
1443 | |
1444 | ifp->if_timer = 1; |
1445 | } |
1446 | } |
1447 | |
1448 | static int |
1449 | ath_media_change(struct ifnet *ifp) |
1450 | { |
1451 | #define IS_UP(ifp) \ |
1452 | ((ifp->if_flags & IFF_UP) && (ifp->if_flags & IFF_RUNNING)) |
1453 | int error; |
1454 | |
1455 | error = ieee80211_media_change(ifp); |
1456 | if (error == ENETRESET) { |
1457 | if (IS_UP(ifp)) |
1458 | ath_init(ifp->if_softc); /* XXX lose error */ |
1459 | error = 0; |
1460 | } |
1461 | return error; |
1462 | #undef IS_UP |
1463 | } |
1464 | |
1465 | #ifdef AR_DEBUG |
1466 | static void |
1467 | ath_keyprint(const char *tag, u_int ix, |
1468 | const HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN]) |
1469 | { |
1470 | static const char *ciphers[] = { |
1471 | "WEP" , |
1472 | "AES-OCB" , |
1473 | "AES-CCM" , |
1474 | "CKIP" , |
1475 | "TKIP" , |
1476 | "CLR" , |
1477 | }; |
1478 | int i, n; |
1479 | |
1480 | printf("%s: [%02u] %-7s " , tag, ix, ciphers[hk->kv_type]); |
1481 | for (i = 0, n = hk->kv_len; i < n; i++) |
1482 | printf("%02x" , hk->kv_val[i]); |
1483 | printf(" mac %s" , ether_sprintf(mac)); |
1484 | if (hk->kv_type == HAL_CIPHER_TKIP) { |
1485 | printf(" mic " ); |
1486 | for (i = 0; i < sizeof(hk->kv_mic); i++) |
1487 | printf("%02x" , hk->kv_mic[i]); |
1488 | } |
1489 | printf("\n" ); |
1490 | } |
1491 | #endif |
1492 | |
1493 | /* |
1494 | * Set a TKIP key into the hardware. This handles the |
1495 | * potential distribution of key state to multiple key |
1496 | * cache slots for TKIP. |
1497 | */ |
1498 | static int |
1499 | ath_keyset_tkip(struct ath_softc *sc, const struct ieee80211_key *k, |
1500 | HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN]) |
1501 | { |
1502 | #define IEEE80211_KEY_XR (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV) |
1503 | static const u_int8_t zerobssid[IEEE80211_ADDR_LEN]; |
1504 | struct ath_hal *ah = sc->sc_ah; |
1505 | |
1506 | KASSERTMSG(k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP, |
1507 | "got a non-TKIP key, cipher %u" , k->wk_cipher->ic_cipher); |
1508 | if ((k->wk_flags & IEEE80211_KEY_XR) == IEEE80211_KEY_XR) { |
1509 | if (sc->sc_splitmic) { |
1510 | /* |
1511 | * TX key goes at first index, RX key at the rx index. |
1512 | * The hal handles the MIC keys at index+64. |
1513 | */ |
1514 | memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_mic)); |
1515 | KEYPRINTF(sc, k->wk_keyix, hk, zerobssid); |
1516 | if (!ath_hal_keyset(ah, ATH_KEY(k->wk_keyix), hk, |
1517 | zerobssid)) |
1518 | return 0; |
1519 | |
1520 | memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic)); |
1521 | KEYPRINTF(sc, k->wk_keyix+32, hk, mac); |
1522 | /* XXX delete tx key on failure? */ |
1523 | return ath_hal_keyset(ah, ATH_KEY(k->wk_keyix+32), |
1524 | hk, mac); |
1525 | } else { |
1526 | /* |
1527 | * Room for both TX+RX MIC keys in one key cache |
1528 | * slot, just set key at the first index; the HAL |
1529 | * will handle the reset. |
1530 | */ |
1531 | memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic)); |
1532 | memcpy(hk->kv_txmic, k->wk_txmic, sizeof(hk->kv_txmic)); |
1533 | KEYPRINTF(sc, k->wk_keyix, hk, mac); |
1534 | return ath_hal_keyset(ah, ATH_KEY(k->wk_keyix), hk, mac); |
1535 | } |
1536 | } else if (k->wk_flags & IEEE80211_KEY_XMIT) { |
1537 | if (sc->sc_splitmic) { |
1538 | /* |
1539 | * NB: must pass MIC key in expected location when |
1540 | * the keycache only holds one MIC key per entry. |
1541 | */ |
1542 | memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_txmic)); |
1543 | } else |
1544 | memcpy(hk->kv_txmic, k->wk_txmic, sizeof(hk->kv_txmic)); |
1545 | KEYPRINTF(sc, k->wk_keyix, hk, mac); |
1546 | return ath_hal_keyset(ah, ATH_KEY(k->wk_keyix), hk, mac); |
1547 | } else if (k->wk_flags & IEEE80211_KEY_RECV) { |
1548 | memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic)); |
1549 | KEYPRINTF(sc, k->wk_keyix, hk, mac); |
1550 | return ath_hal_keyset(ah, k->wk_keyix, hk, mac); |
1551 | } |
1552 | return 0; |
1553 | #undef IEEE80211_KEY_XR |
1554 | } |
1555 | |
1556 | /* |
1557 | * Set a net80211 key into the hardware. This handles the |
1558 | * potential distribution of key state to multiple key |
1559 | * cache slots for TKIP with hardware MIC support. |
1560 | */ |
1561 | static int |
1562 | ath_keyset(struct ath_softc *sc, const struct ieee80211_key *k, |
1563 | const u_int8_t mac0[IEEE80211_ADDR_LEN], |
1564 | struct ieee80211_node *bss) |
1565 | { |
1566 | #define N(a) (sizeof(a)/sizeof(a[0])) |
1567 | static const u_int8_t ciphermap[] = { |
1568 | HAL_CIPHER_WEP, /* IEEE80211_CIPHER_WEP */ |
1569 | HAL_CIPHER_TKIP, /* IEEE80211_CIPHER_TKIP */ |
1570 | HAL_CIPHER_AES_OCB, /* IEEE80211_CIPHER_AES_OCB */ |
1571 | HAL_CIPHER_AES_CCM, /* IEEE80211_CIPHER_AES_CCM */ |
1572 | (u_int8_t) -1, /* 4 is not allocated */ |
1573 | HAL_CIPHER_CKIP, /* IEEE80211_CIPHER_CKIP */ |
1574 | HAL_CIPHER_CLR, /* IEEE80211_CIPHER_NONE */ |
1575 | }; |
1576 | struct ath_hal *ah = sc->sc_ah; |
1577 | const struct ieee80211_cipher *cip = k->wk_cipher; |
1578 | u_int8_t gmac[IEEE80211_ADDR_LEN]; |
1579 | const u_int8_t *mac; |
1580 | HAL_KEYVAL hk; |
1581 | |
1582 | memset(&hk, 0, sizeof(hk)); |
1583 | /* |
1584 | * Software crypto uses a "clear key" so non-crypto |
1585 | * state kept in the key cache are maintained and |
1586 | * so that rx frames have an entry to match. |
1587 | */ |
1588 | if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) { |
1589 | KASSERTMSG(cip->ic_cipher < N(ciphermap), |
1590 | "invalid cipher type %u" , cip->ic_cipher); |
1591 | hk.kv_type = ciphermap[cip->ic_cipher]; |
1592 | hk.kv_len = k->wk_keylen; |
1593 | memcpy(hk.kv_val, k->wk_key, k->wk_keylen); |
1594 | } else |
1595 | hk.kv_type = HAL_CIPHER_CLR; |
1596 | |
1597 | if ((k->wk_flags & IEEE80211_KEY_GROUP) && sc->sc_mcastkey) { |
1598 | /* |
1599 | * Group keys on hardware that supports multicast frame |
1600 | * key search use a mac that is the sender's address with |
1601 | * the high bit set instead of the app-specified address. |
1602 | */ |
1603 | IEEE80211_ADDR_COPY(gmac, bss->ni_macaddr); |
1604 | gmac[0] |= 0x80; |
1605 | mac = gmac; |
1606 | } else |
1607 | mac = mac0; |
1608 | |
1609 | if ((hk.kv_type == HAL_CIPHER_TKIP && |
1610 | (k->wk_flags & IEEE80211_KEY_SWMIC) == 0)) { |
1611 | return ath_keyset_tkip(sc, k, &hk, mac); |
1612 | } else { |
1613 | KEYPRINTF(sc, k->wk_keyix, &hk, mac); |
1614 | return ath_hal_keyset(ah, ATH_KEY(k->wk_keyix), &hk, mac); |
1615 | } |
1616 | #undef N |
1617 | } |
1618 | |
1619 | /* |
1620 | * Allocate tx/rx key slots for TKIP. We allocate two slots for |
1621 | * each key, one for decrypt/encrypt and the other for the MIC. |
1622 | */ |
1623 | static u_int16_t |
1624 | key_alloc_2pair(struct ath_softc *sc, |
1625 | ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix) |
1626 | { |
1627 | #define N(a) (sizeof(a)/sizeof(a[0])) |
1628 | u_int i, keyix; |
1629 | |
1630 | KASSERTMSG(sc->sc_splitmic, "key cache !split" ); |
1631 | /* XXX could optimize */ |
1632 | for (i = 0; i < N(sc->sc_keymap)/4; i++) { |
1633 | u_int8_t b = sc->sc_keymap[i]; |
1634 | if (b != 0xff) { |
1635 | /* |
1636 | * One or more slots in this byte are free. |
1637 | */ |
1638 | keyix = i*NBBY; |
1639 | while (b & 1) { |
1640 | again: |
1641 | keyix++; |
1642 | b >>= 1; |
1643 | } |
1644 | /* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */ |
1645 | if (isset(sc->sc_keymap, keyix+32) || |
1646 | isset(sc->sc_keymap, keyix+64) || |
1647 | isset(sc->sc_keymap, keyix+32+64)) { |
1648 | /* full pair unavailable */ |
1649 | /* XXX statistic */ |
1650 | if (keyix == (i+1)*NBBY) { |
1651 | /* no slots were appropriate, advance */ |
1652 | continue; |
1653 | } |
1654 | goto again; |
1655 | } |
1656 | setbit(sc->sc_keymap, keyix); |
1657 | setbit(sc->sc_keymap, keyix+64); |
1658 | setbit(sc->sc_keymap, keyix+32); |
1659 | setbit(sc->sc_keymap, keyix+32+64); |
1660 | DPRINTF(sc, ATH_DEBUG_KEYCACHE, |
1661 | "%s: key pair %u,%u %u,%u\n" , |
1662 | __func__, keyix, keyix+64, |
1663 | keyix+32, keyix+32+64); |
1664 | *txkeyix = keyix; |
1665 | *rxkeyix = keyix+32; |
1666 | return keyix; |
1667 | } |
1668 | } |
1669 | DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n" , __func__); |
1670 | return IEEE80211_KEYIX_NONE; |
1671 | #undef N |
1672 | } |
1673 | |
1674 | /* |
1675 | * Allocate tx/rx key slots for TKIP. We allocate two slots for |
1676 | * each key, one for decrypt/encrypt and the other for the MIC. |
1677 | */ |
1678 | static int |
1679 | key_alloc_pair(struct ath_softc *sc, ieee80211_keyix *txkeyix, |
1680 | ieee80211_keyix *rxkeyix) |
1681 | { |
1682 | #define N(a) (sizeof(a)/sizeof(a[0])) |
1683 | u_int i, keyix; |
1684 | |
1685 | KASSERTMSG(!sc->sc_splitmic, "key cache split" ); |
1686 | /* XXX could optimize */ |
1687 | for (i = 0; i < N(sc->sc_keymap)/4; i++) { |
1688 | uint8_t b = sc->sc_keymap[i]; |
1689 | if (b != 0xff) { |
1690 | /* |
1691 | * One or more slots in this byte are free. |
1692 | */ |
1693 | keyix = i*NBBY; |
1694 | while (b & 1) { |
1695 | again: |
1696 | keyix++; |
1697 | b >>= 1; |
1698 | } |
1699 | if (isset(sc->sc_keymap, keyix+64)) { |
1700 | /* full pair unavailable */ |
1701 | /* XXX statistic */ |
1702 | if (keyix == (i+1)*NBBY) { |
1703 | /* no slots were appropriate, advance */ |
1704 | continue; |
1705 | } |
1706 | goto again; |
1707 | } |
1708 | setbit(sc->sc_keymap, keyix); |
1709 | setbit(sc->sc_keymap, keyix+64); |
1710 | DPRINTF(sc, ATH_DEBUG_KEYCACHE, |
1711 | "%s: key pair %u,%u\n" , |
1712 | __func__, keyix, keyix+64); |
1713 | *txkeyix = *rxkeyix = keyix; |
1714 | return 1; |
1715 | } |
1716 | } |
1717 | DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n" , __func__); |
1718 | return 0; |
1719 | #undef N |
1720 | } |
1721 | |
1722 | /* |
1723 | * Allocate a single key cache slot. |
1724 | */ |
1725 | static int |
1726 | key_alloc_single(struct ath_softc *sc, |
1727 | ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix) |
1728 | { |
1729 | #define N(a) (sizeof(a)/sizeof(a[0])) |
1730 | u_int i, keyix; |
1731 | |
1732 | /* XXX try i,i+32,i+64,i+32+64 to minimize key pair conflicts */ |
1733 | for (i = 0; i < N(sc->sc_keymap); i++) { |
1734 | u_int8_t b = sc->sc_keymap[i]; |
1735 | if (b != 0xff) { |
1736 | /* |
1737 | * One or more slots are free. |
1738 | */ |
1739 | keyix = i*NBBY; |
1740 | while (b & 1) |
1741 | keyix++, b >>= 1; |
1742 | setbit(sc->sc_keymap, keyix); |
1743 | DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: key %u\n" , |
1744 | __func__, keyix); |
1745 | *txkeyix = *rxkeyix = keyix; |
1746 | return 1; |
1747 | } |
1748 | } |
1749 | DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of space\n" , __func__); |
1750 | return 0; |
1751 | #undef N |
1752 | } |
1753 | |
1754 | /* |
1755 | * Allocate one or more key cache slots for a uniacst key. The |
1756 | * key itself is needed only to identify the cipher. For hardware |
1757 | * TKIP with split cipher+MIC keys we allocate two key cache slot |
1758 | * pairs so that we can setup separate TX and RX MIC keys. Note |
1759 | * that the MIC key for a TKIP key at slot i is assumed by the |
1760 | * hardware to be at slot i+64. This limits TKIP keys to the first |
1761 | * 64 entries. |
1762 | */ |
1763 | static int |
1764 | ath_key_alloc(struct ieee80211com *ic, const struct ieee80211_key *k, |
1765 | ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix) |
1766 | { |
1767 | struct ath_softc *sc = ic->ic_ifp->if_softc; |
1768 | |
1769 | /* |
1770 | * Group key allocation must be handled specially for |
1771 | * parts that do not support multicast key cache search |
1772 | * functionality. For those parts the key id must match |
1773 | * the h/w key index so lookups find the right key. On |
1774 | * parts w/ the key search facility we install the sender's |
1775 | * mac address (with the high bit set) and let the hardware |
1776 | * find the key w/o using the key id. This is preferred as |
1777 | * it permits us to support multiple users for adhoc and/or |
1778 | * multi-station operation. |
1779 | */ |
1780 | if ((k->wk_flags & IEEE80211_KEY_GROUP) && !sc->sc_mcastkey) { |
1781 | if (!(&ic->ic_nw_keys[0] <= k && |
1782 | k < &ic->ic_nw_keys[IEEE80211_WEP_NKID])) { |
1783 | /* should not happen */ |
1784 | DPRINTF(sc, ATH_DEBUG_KEYCACHE, |
1785 | "%s: bogus group key\n" , __func__); |
1786 | return 0; |
1787 | } |
1788 | /* |
1789 | * XXX we pre-allocate the global keys so |
1790 | * have no way to check if they've already been allocated. |
1791 | */ |
1792 | *keyix = *rxkeyix = k - ic->ic_nw_keys; |
1793 | return 1; |
1794 | } |
1795 | |
1796 | /* |
1797 | * We allocate two pair for TKIP when using the h/w to do |
1798 | * the MIC. For everything else, including software crypto, |
1799 | * we allocate a single entry. Note that s/w crypto requires |
1800 | * a pass-through slot on the 5211 and 5212. The 5210 does |
1801 | * not support pass-through cache entries and we map all |
1802 | * those requests to slot 0. |
1803 | */ |
1804 | if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { |
1805 | return key_alloc_single(sc, keyix, rxkeyix); |
1806 | } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP && |
1807 | (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) { |
1808 | if (sc->sc_splitmic) |
1809 | return key_alloc_2pair(sc, keyix, rxkeyix); |
1810 | else |
1811 | return key_alloc_pair(sc, keyix, rxkeyix); |
1812 | } else { |
1813 | return key_alloc_single(sc, keyix, rxkeyix); |
1814 | } |
1815 | } |
1816 | |
1817 | /* |
1818 | * Delete an entry in the key cache allocated by ath_key_alloc. |
1819 | */ |
1820 | static int |
1821 | ath_key_delete(struct ieee80211com *ic, const struct ieee80211_key *k) |
1822 | { |
1823 | struct ath_softc *sc = ic->ic_ifp->if_softc; |
1824 | struct ath_hal *ah = sc->sc_ah; |
1825 | const struct ieee80211_cipher *cip = k->wk_cipher; |
1826 | u_int keyix = k->wk_keyix; |
1827 | |
1828 | DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: delete key %u\n" , __func__, keyix); |
1829 | |
1830 | if (!device_has_power(sc->sc_dev)) { |
1831 | aprint_error_dev(sc->sc_dev, "deleting keyix %d w/o power\n" , |
1832 | k->wk_keyix); |
1833 | } |
1834 | |
1835 | ath_hal_keyreset(ah, keyix); |
1836 | /* |
1837 | * Handle split tx/rx keying required for TKIP with h/w MIC. |
1838 | */ |
1839 | if (cip->ic_cipher == IEEE80211_CIPHER_TKIP && |
1840 | (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic) |
1841 | ath_hal_keyreset(ah, keyix+32); /* RX key */ |
1842 | if (keyix >= IEEE80211_WEP_NKID) { |
1843 | /* |
1844 | * Don't touch keymap entries for global keys so |
1845 | * they are never considered for dynamic allocation. |
1846 | */ |
1847 | clrbit(sc->sc_keymap, keyix); |
1848 | if (cip->ic_cipher == IEEE80211_CIPHER_TKIP && |
1849 | (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) { |
1850 | clrbit(sc->sc_keymap, keyix+64); /* TX key MIC */ |
1851 | if (sc->sc_splitmic) { |
1852 | /* +32 for RX key, +32+64 for RX key MIC */ |
1853 | clrbit(sc->sc_keymap, keyix+32); |
1854 | clrbit(sc->sc_keymap, keyix+32+64); |
1855 | } |
1856 | } |
1857 | } |
1858 | return 1; |
1859 | } |
1860 | |
1861 | /* |
1862 | * Set the key cache contents for the specified key. Key cache |
1863 | * slot(s) must already have been allocated by ath_key_alloc. |
1864 | */ |
1865 | static int |
1866 | ath_key_set(struct ieee80211com *ic, const struct ieee80211_key *k, |
1867 | const u_int8_t mac[IEEE80211_ADDR_LEN]) |
1868 | { |
1869 | struct ath_softc *sc = ic->ic_ifp->if_softc; |
1870 | |
1871 | if (!device_has_power(sc->sc_dev)) { |
1872 | aprint_error_dev(sc->sc_dev, "setting keyix %d w/o power\n" , |
1873 | k->wk_keyix); |
1874 | } |
1875 | return ath_keyset(sc, k, mac, ic->ic_bss); |
1876 | } |
1877 | |
1878 | /* |
1879 | * Block/unblock tx+rx processing while a key change is done. |
1880 | * We assume the caller serializes key management operations |
1881 | * so we only need to worry about synchronization with other |
1882 | * uses that originate in the driver. |
1883 | */ |
1884 | static void |
1885 | ath_key_update_begin(struct ieee80211com *ic) |
1886 | { |
1887 | struct ifnet *ifp = ic->ic_ifp; |
1888 | struct ath_softc *sc = ifp->if_softc; |
1889 | |
1890 | DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n" , __func__); |
1891 | #if 0 |
1892 | tasklet_disable(&sc->sc_rxtq); |
1893 | #endif |
1894 | sc->sc_flags |= ATH_KEY_UPDATING; |
1895 | } |
1896 | |
1897 | static void |
1898 | ath_key_update_end(struct ieee80211com *ic) |
1899 | { |
1900 | struct ifnet *ifp = ic->ic_ifp; |
1901 | struct ath_softc *sc = ifp->if_softc; |
1902 | |
1903 | DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n" , __func__); |
1904 | sc->sc_flags &= ~ATH_KEY_UPDATING; |
1905 | #if 0 |
1906 | tasklet_enable(&sc->sc_rxtq); |
1907 | #endif |
1908 | } |
1909 | |
1910 | /* |
1911 | * Calculate the receive filter according to the |
1912 | * operating mode and state: |
1913 | * |
1914 | * o always accept unicast, broadcast, and multicast traffic |
1915 | * o maintain current state of phy error reception (the hal |
1916 | * may enable phy error frames for noise immunity work) |
1917 | * o probe request frames are accepted only when operating in |
1918 | * hostap, adhoc, or monitor modes |
1919 | * o enable promiscuous mode according to the interface state |
1920 | * o accept beacons: |
1921 | * - when operating in adhoc mode so the 802.11 layer creates |
1922 | * node table entries for peers, |
1923 | * - when operating in station mode for collecting rssi data when |
1924 | * the station is otherwise quiet, or |
1925 | * - when scanning |
1926 | */ |
1927 | static u_int32_t |
1928 | ath_calcrxfilter(struct ath_softc *sc, enum ieee80211_state state) |
1929 | { |
1930 | struct ieee80211com *ic = &sc->sc_ic; |
1931 | struct ath_hal *ah = sc->sc_ah; |
1932 | struct ifnet *ifp = &sc->sc_if; |
1933 | u_int32_t rfilt; |
1934 | |
1935 | rfilt = (ath_hal_getrxfilter(ah) & HAL_RX_FILTER_PHYERR) |
1936 | | HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST; |
1937 | if (ic->ic_opmode != IEEE80211_M_STA) |
1938 | rfilt |= HAL_RX_FILTER_PROBEREQ; |
1939 | if (ic->ic_opmode != IEEE80211_M_HOSTAP && |
1940 | (ifp->if_flags & IFF_PROMISC)) |
1941 | rfilt |= HAL_RX_FILTER_PROM; |
1942 | if (ifp->if_flags & IFF_PROMISC) |
1943 | rfilt |= HAL_RX_FILTER_CONTROL | HAL_RX_FILTER_PROBEREQ; |
1944 | if (ic->ic_opmode == IEEE80211_M_STA || |
1945 | ic->ic_opmode == IEEE80211_M_IBSS || |
1946 | state == IEEE80211_S_SCAN) |
1947 | rfilt |= HAL_RX_FILTER_BEACON; |
1948 | return rfilt; |
1949 | } |
1950 | |
1951 | static void |
1952 | ath_mode_init(struct ath_softc *sc) |
1953 | { |
1954 | struct ifnet *ifp = &sc->sc_if; |
1955 | struct ieee80211com *ic = &sc->sc_ic; |
1956 | struct ath_hal *ah = sc->sc_ah; |
1957 | struct ether_multi *enm; |
1958 | struct ether_multistep estep; |
1959 | u_int32_t rfilt, mfilt[2], val; |
1960 | int i; |
1961 | uint8_t pos; |
1962 | |
1963 | /* configure rx filter */ |
1964 | rfilt = ath_calcrxfilter(sc, ic->ic_state); |
1965 | ath_hal_setrxfilter(ah, rfilt); |
1966 | |
1967 | /* configure operational mode */ |
1968 | ath_hal_setopmode(ah); |
1969 | |
1970 | /* Write keys to hardware; it may have been powered down. */ |
1971 | ath_key_update_begin(ic); |
1972 | for (i = 0; i < IEEE80211_WEP_NKID; i++) { |
1973 | ath_key_set(ic, |
1974 | &ic->ic_crypto.cs_nw_keys[i], |
1975 | ic->ic_myaddr); |
1976 | } |
1977 | ath_key_update_end(ic); |
1978 | |
1979 | /* |
1980 | * Handle any link-level address change. Note that we only |
1981 | * need to force ic_myaddr; any other addresses are handled |
1982 | * as a byproduct of the ifnet code marking the interface |
1983 | * down then up. |
1984 | * |
1985 | * XXX should get from lladdr instead of arpcom but that's more work |
1986 | */ |
1987 | IEEE80211_ADDR_COPY(ic->ic_myaddr, CLLADDR(sc->sc_if.if_sadl)); |
1988 | ath_hal_setmac(ah, ic->ic_myaddr); |
1989 | |
1990 | /* calculate and install multicast filter */ |
1991 | ifp->if_flags &= ~IFF_ALLMULTI; |
1992 | mfilt[0] = mfilt[1] = 0; |
1993 | ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm); |
1994 | while (enm != NULL) { |
1995 | void *dl; |
1996 | /* XXX Punt on ranges. */ |
1997 | if (!IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) { |
1998 | mfilt[0] = mfilt[1] = 0xffffffff; |
1999 | ifp->if_flags |= IFF_ALLMULTI; |
2000 | break; |
2001 | } |
2002 | dl = enm->enm_addrlo; |
2003 | val = LE_READ_4((char *)dl + 0); |
2004 | pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; |
2005 | val = LE_READ_4((char *)dl + 3); |
2006 | pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; |
2007 | pos &= 0x3f; |
2008 | mfilt[pos / 32] |= (1 << (pos % 32)); |
2009 | |
2010 | ETHER_NEXT_MULTI(estep, enm); |
2011 | } |
2012 | |
2013 | ath_hal_setmcastfilter(ah, mfilt[0], mfilt[1]); |
2014 | DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, MC filter %08x:%08x\n" , |
2015 | __func__, rfilt, mfilt[0], mfilt[1]); |
2016 | } |
2017 | |
2018 | /* |
2019 | * Set the slot time based on the current setting. |
2020 | */ |
2021 | static void |
2022 | ath_setslottime(struct ath_softc *sc) |
2023 | { |
2024 | struct ieee80211com *ic = &sc->sc_ic; |
2025 | struct ath_hal *ah = sc->sc_ah; |
2026 | |
2027 | if (ic->ic_flags & IEEE80211_F_SHSLOT) |
2028 | ath_hal_setslottime(ah, HAL_SLOT_TIME_9); |
2029 | else |
2030 | ath_hal_setslottime(ah, HAL_SLOT_TIME_20); |
2031 | sc->sc_updateslot = OK; |
2032 | } |
2033 | |
2034 | /* |
2035 | * Callback from the 802.11 layer to update the |
2036 | * slot time based on the current setting. |
2037 | */ |
2038 | static void |
2039 | ath_updateslot(struct ifnet *ifp) |
2040 | { |
2041 | struct ath_softc *sc = ifp->if_softc; |
2042 | struct ieee80211com *ic = &sc->sc_ic; |
2043 | |
2044 | /* |
2045 | * When not coordinating the BSS, change the hardware |
2046 | * immediately. For other operation we defer the change |
2047 | * until beacon updates have propagated to the stations. |
2048 | */ |
2049 | if (ic->ic_opmode == IEEE80211_M_HOSTAP) |
2050 | sc->sc_updateslot = UPDATE; |
2051 | else |
2052 | ath_setslottime(sc); |
2053 | } |
2054 | |
2055 | /* |
2056 | * Setup a h/w transmit queue for beacons. |
2057 | */ |
2058 | static int |
2059 | ath_beaconq_setup(struct ath_hal *ah) |
2060 | { |
2061 | HAL_TXQ_INFO qi; |
2062 | |
2063 | memset(&qi, 0, sizeof(qi)); |
2064 | qi.tqi_aifs = HAL_TXQ_USEDEFAULT; |
2065 | qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; |
2066 | qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; |
2067 | /* NB: for dynamic turbo, don't enable any other interrupts */ |
2068 | qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE; |
2069 | return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi); |
2070 | } |
2071 | |
2072 | /* |
2073 | * Setup the transmit queue parameters for the beacon queue. |
2074 | */ |
2075 | static int |
2076 | ath_beaconq_config(struct ath_softc *sc) |
2077 | { |
2078 | #define ATH_EXPONENT_TO_VALUE(v) ((1<<(v))-1) |
2079 | struct ieee80211com *ic = &sc->sc_ic; |
2080 | struct ath_hal *ah = sc->sc_ah; |
2081 | HAL_TXQ_INFO qi; |
2082 | |
2083 | ath_hal_gettxqueueprops(ah, sc->sc_bhalq, &qi); |
2084 | if (ic->ic_opmode == IEEE80211_M_HOSTAP) { |
2085 | /* |
2086 | * Always burst out beacon and CAB traffic. |
2087 | */ |
2088 | qi.tqi_aifs = ATH_BEACON_AIFS_DEFAULT; |
2089 | qi.tqi_cwmin = ATH_BEACON_CWMIN_DEFAULT; |
2090 | qi.tqi_cwmax = ATH_BEACON_CWMAX_DEFAULT; |
2091 | } else { |
2092 | struct wmeParams *wmep = |
2093 | &ic->ic_wme.wme_chanParams.cap_wmeParams[WME_AC_BE]; |
2094 | /* |
2095 | * Adhoc mode; important thing is to use 2x cwmin. |
2096 | */ |
2097 | qi.tqi_aifs = wmep->wmep_aifsn; |
2098 | qi.tqi_cwmin = 2*ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); |
2099 | qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); |
2100 | } |
2101 | |
2102 | if (!ath_hal_settxqueueprops(ah, sc->sc_bhalq, &qi)) { |
2103 | device_printf(sc->sc_dev, "unable to update parameters for " |
2104 | "beacon hardware queue!\n" ); |
2105 | return 0; |
2106 | } else { |
2107 | ath_hal_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */ |
2108 | return 1; |
2109 | } |
2110 | #undef ATH_EXPONENT_TO_VALUE |
2111 | } |
2112 | |
2113 | /* |
2114 | * Allocate and setup an initial beacon frame. |
2115 | */ |
2116 | static int |
2117 | ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni) |
2118 | { |
2119 | struct ieee80211com *ic = ni->ni_ic; |
2120 | struct ath_buf *bf; |
2121 | struct mbuf *m; |
2122 | int error; |
2123 | |
2124 | bf = STAILQ_FIRST(&sc->sc_bbuf); |
2125 | if (bf == NULL) { |
2126 | DPRINTF(sc, ATH_DEBUG_BEACON, "%s: no dma buffers\n" , __func__); |
2127 | sc->sc_stats.ast_be_nombuf++; /* XXX */ |
2128 | return ENOMEM; /* XXX */ |
2129 | } |
2130 | /* |
2131 | * NB: the beacon data buffer must be 32-bit aligned; |
2132 | * we assume the mbuf routines will return us something |
2133 | * with this alignment (perhaps should assert). |
2134 | */ |
2135 | m = ieee80211_beacon_alloc(ic, ni, &sc->sc_boff); |
2136 | if (m == NULL) { |
2137 | DPRINTF(sc, ATH_DEBUG_BEACON, "%s: cannot get mbuf\n" , |
2138 | __func__); |
2139 | sc->sc_stats.ast_be_nombuf++; |
2140 | return ENOMEM; |
2141 | } |
2142 | error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m, |
2143 | BUS_DMA_NOWAIT); |
2144 | if (error == 0) { |
2145 | bf->bf_m = m; |
2146 | bf->bf_node = ieee80211_ref_node(ni); |
2147 | } else { |
2148 | m_freem(m); |
2149 | } |
2150 | return error; |
2151 | } |
2152 | |
2153 | /* |
2154 | * Setup the beacon frame for transmit. |
2155 | */ |
2156 | static void |
2157 | ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf) |
2158 | { |
2159 | #define USE_SHPREAMBLE(_ic) \ |
2160 | (((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\ |
2161 | == IEEE80211_F_SHPREAMBLE) |
2162 | struct ieee80211_node *ni = bf->bf_node; |
2163 | struct ieee80211com *ic = ni->ni_ic; |
2164 | struct mbuf *m = bf->bf_m; |
2165 | struct ath_hal *ah = sc->sc_ah; |
2166 | struct ath_desc *ds; |
2167 | int flags, antenna; |
2168 | const HAL_RATE_TABLE *rt; |
2169 | u_int8_t rix, rate; |
2170 | |
2171 | DPRINTF(sc, ATH_DEBUG_BEACON, "%s: m %p len %u\n" , |
2172 | __func__, m, m->m_len); |
2173 | |
2174 | /* setup descriptors */ |
2175 | ds = bf->bf_desc; |
2176 | |
2177 | flags = HAL_TXDESC_NOACK; |
2178 | if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) { |
2179 | ds->ds_link = HTOAH32(bf->bf_daddr); /* self-linked */ |
2180 | flags |= HAL_TXDESC_VEOL; |
2181 | /* |
2182 | * Let hardware handle antenna switching unless |
2183 | * the user has selected a transmit antenna |
2184 | * (sc_txantenna is not 0). |
2185 | */ |
2186 | antenna = sc->sc_txantenna; |
2187 | } else { |
2188 | ds->ds_link = 0; |
2189 | /* |
2190 | * Switch antenna every 4 beacons, unless the user |
2191 | * has selected a transmit antenna (sc_txantenna |
2192 | * is not 0). |
2193 | * |
2194 | * XXX assumes two antenna |
2195 | */ |
2196 | if (sc->sc_txantenna == 0) |
2197 | antenna = (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1); |
2198 | else |
2199 | antenna = sc->sc_txantenna; |
2200 | } |
2201 | |
2202 | KASSERTMSG(bf->bf_nseg == 1, |
2203 | "multi-segment beacon frame; nseg %u" , bf->bf_nseg); |
2204 | ds->ds_data = bf->bf_segs[0].ds_addr; |
2205 | /* |
2206 | * Calculate rate code. |
2207 | * XXX everything at min xmit rate |
2208 | */ |
2209 | rix = sc->sc_minrateix; |
2210 | rt = sc->sc_currates; |
2211 | rate = rt->info[rix].rateCode; |
2212 | if (USE_SHPREAMBLE(ic)) |
2213 | rate |= rt->info[rix].shortPreamble; |
2214 | ath_hal_setuptxdesc(ah, ds |
2215 | , m->m_len + IEEE80211_CRC_LEN /* frame length */ |
2216 | , sizeof(struct ieee80211_frame)/* header length */ |
2217 | , HAL_PKT_TYPE_BEACON /* Atheros packet type */ |
2218 | , ni->ni_txpower /* txpower XXX */ |
2219 | , rate, 1 /* series 0 rate/tries */ |
2220 | , HAL_TXKEYIX_INVALID /* no encryption */ |
2221 | , antenna /* antenna mode */ |
2222 | , flags /* no ack, veol for beacons */ |
2223 | , 0 /* rts/cts rate */ |
2224 | , 0 /* rts/cts duration */ |
2225 | ); |
2226 | /* NB: beacon's BufLen must be a multiple of 4 bytes */ |
2227 | ath_hal_filltxdesc(ah, ds |
2228 | , roundup(m->m_len, 4) /* buffer length */ |
2229 | , AH_TRUE /* first segment */ |
2230 | , AH_TRUE /* last segment */ |
2231 | , ds /* first descriptor */ |
2232 | ); |
2233 | |
2234 | /* NB: The desc swap function becomes void, if descriptor swapping |
2235 | * is not enabled |
2236 | */ |
2237 | ath_desc_swap(ds); |
2238 | |
2239 | #undef USE_SHPREAMBLE |
2240 | } |
2241 | |
2242 | /* |
2243 | * Transmit a beacon frame at SWBA. Dynamic updates to the |
2244 | * frame contents are done as needed and the slot time is |
2245 | * also adjusted based on current state. |
2246 | */ |
2247 | static void |
2248 | ath_beacon_proc(void *arg, int pending) |
2249 | { |
2250 | struct ath_softc *sc = arg; |
2251 | struct ath_buf *bf = STAILQ_FIRST(&sc->sc_bbuf); |
2252 | struct ieee80211_node *ni = bf->bf_node; |
2253 | struct ieee80211com *ic = ni->ni_ic; |
2254 | struct ath_hal *ah = sc->sc_ah; |
2255 | struct mbuf *m; |
2256 | int ncabq, error, otherant; |
2257 | |
2258 | DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n" , |
2259 | __func__, pending); |
2260 | |
2261 | if (ic->ic_opmode == IEEE80211_M_STA || |
2262 | ic->ic_opmode == IEEE80211_M_MONITOR || |
2263 | bf == NULL || bf->bf_m == NULL) { |
2264 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: ic_flags=%x bf=%p bf_m=%p\n" , |
2265 | __func__, ic->ic_flags, bf, bf ? bf->bf_m : NULL); |
2266 | return; |
2267 | } |
2268 | /* |
2269 | * Check if the previous beacon has gone out. If |
2270 | * not don't try to post another, skip this period |
2271 | * and wait for the next. Missed beacons indicate |
2272 | * a problem and should not occur. If we miss too |
2273 | * many consecutive beacons reset the device. |
2274 | */ |
2275 | if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) { |
2276 | sc->sc_bmisscount++; |
2277 | DPRINTF(sc, ATH_DEBUG_BEACON_PROC, |
2278 | "%s: missed %u consecutive beacons\n" , |
2279 | __func__, sc->sc_bmisscount); |
2280 | if (sc->sc_bmisscount > 3) /* NB: 3 is a guess */ |
2281 | TASK_RUN_OR_ENQUEUE(&sc->sc_bstucktask); |
2282 | return; |
2283 | } |
2284 | if (sc->sc_bmisscount != 0) { |
2285 | DPRINTF(sc, ATH_DEBUG_BEACON, |
2286 | "%s: resume beacon xmit after %u misses\n" , |
2287 | __func__, sc->sc_bmisscount); |
2288 | sc->sc_bmisscount = 0; |
2289 | } |
2290 | |
2291 | /* |
2292 | * Update dynamic beacon contents. If this returns |
2293 | * non-zero then we need to remap the memory because |
2294 | * the beacon frame changed size (probably because |
2295 | * of the TIM bitmap). |
2296 | */ |
2297 | m = bf->bf_m; |
2298 | ncabq = ath_hal_numtxpending(ah, sc->sc_cabq->axq_qnum); |
2299 | if (ieee80211_beacon_update(ic, bf->bf_node, &sc->sc_boff, m, ncabq)) { |
2300 | /* XXX too conservative? */ |
2301 | bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); |
2302 | error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m, |
2303 | BUS_DMA_NOWAIT); |
2304 | if (error != 0) { |
2305 | if_printf(&sc->sc_if, |
2306 | "%s: bus_dmamap_load_mbuf failed, error %u\n" , |
2307 | __func__, error); |
2308 | return; |
2309 | } |
2310 | } |
2311 | |
2312 | /* |
2313 | * Handle slot time change when a non-ERP station joins/leaves |
2314 | * an 11g network. The 802.11 layer notifies us via callback, |
2315 | * we mark updateslot, then wait one beacon before effecting |
2316 | * the change. This gives associated stations at least one |
2317 | * beacon interval to note the state change. |
2318 | */ |
2319 | /* XXX locking */ |
2320 | if (sc->sc_updateslot == UPDATE) |
2321 | sc->sc_updateslot = COMMIT; /* commit next beacon */ |
2322 | else if (sc->sc_updateslot == COMMIT) |
2323 | ath_setslottime(sc); /* commit change to h/w */ |
2324 | |
2325 | /* |
2326 | * Check recent per-antenna transmit statistics and flip |
2327 | * the default antenna if noticeably more frames went out |
2328 | * on the non-default antenna. |
2329 | * XXX assumes 2 anntenae |
2330 | */ |
2331 | otherant = sc->sc_defant & 1 ? 2 : 1; |
2332 | if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2) |
2333 | ath_setdefantenna(sc, otherant); |
2334 | sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0; |
2335 | |
2336 | /* |
2337 | * Construct tx descriptor. |
2338 | */ |
2339 | ath_beacon_setup(sc, bf); |
2340 | |
2341 | /* |
2342 | * Stop any current dma and put the new frame on the queue. |
2343 | * This should never fail since we check above that no frames |
2344 | * are still pending on the queue. |
2345 | */ |
2346 | if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) { |
2347 | DPRINTF(sc, ATH_DEBUG_ANY, |
2348 | "%s: beacon queue %u did not stop?\n" , |
2349 | __func__, sc->sc_bhalq); |
2350 | } |
2351 | bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, |
2352 | bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); |
2353 | |
2354 | /* |
2355 | * Enable the CAB queue before the beacon queue to |
2356 | * insure cab frames are triggered by this beacon. |
2357 | */ |
2358 | if (ncabq != 0 && (sc->sc_boff.bo_tim[4] & 1)) /* NB: only at DTIM */ |
2359 | ath_hal_txstart(ah, sc->sc_cabq->axq_qnum); |
2360 | ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr); |
2361 | ath_hal_txstart(ah, sc->sc_bhalq); |
2362 | DPRINTF(sc, ATH_DEBUG_BEACON_PROC, |
2363 | "%s: TXDP[%u] = %" PRIx64 " (%p)\n" , __func__, |
2364 | sc->sc_bhalq, (uint64_t)bf->bf_daddr, bf->bf_desc); |
2365 | |
2366 | sc->sc_stats.ast_be_xmit++; |
2367 | } |
2368 | |
2369 | /* |
2370 | * Reset the hardware after detecting beacons have stopped. |
2371 | */ |
2372 | static void |
2373 | ath_bstuck_proc(void *arg, int pending) |
2374 | { |
2375 | struct ath_softc *sc = arg; |
2376 | struct ifnet *ifp = &sc->sc_if; |
2377 | |
2378 | if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n" , |
2379 | sc->sc_bmisscount); |
2380 | ath_reset(ifp); |
2381 | } |
2382 | |
2383 | /* |
2384 | * Reclaim beacon resources. |
2385 | */ |
2386 | static void |
2387 | ath_beacon_free(struct ath_softc *sc) |
2388 | { |
2389 | struct ath_buf *bf; |
2390 | |
2391 | STAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) { |
2392 | if (bf->bf_m != NULL) { |
2393 | bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); |
2394 | m_freem(bf->bf_m); |
2395 | bf->bf_m = NULL; |
2396 | } |
2397 | if (bf->bf_node != NULL) { |
2398 | ieee80211_free_node(bf->bf_node); |
2399 | bf->bf_node = NULL; |
2400 | } |
2401 | } |
2402 | } |
2403 | |
2404 | /* |
2405 | * Configure the beacon and sleep timers. |
2406 | * |
2407 | * When operating as an AP this resets the TSF and sets |
2408 | * up the hardware to notify us when we need to issue beacons. |
2409 | * |
2410 | * When operating in station mode this sets up the beacon |
2411 | * timers according to the timestamp of the last received |
2412 | * beacon and the current TSF, configures PCF and DTIM |
2413 | * handling, programs the sleep registers so the hardware |
2414 | * will wakeup in time to receive beacons, and configures |
2415 | * the beacon miss handling so we'll receive a BMISS |
2416 | * interrupt when we stop seeing beacons from the AP |
2417 | * we've associated with. |
2418 | */ |
2419 | static void |
2420 | ath_beacon_config(struct ath_softc *sc) |
2421 | { |
2422 | #define TSF_TO_TU(_h,_l) \ |
2423 | ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10)) |
2424 | #define FUDGE 2 |
2425 | struct ath_hal *ah = sc->sc_ah; |
2426 | struct ieee80211com *ic = &sc->sc_ic; |
2427 | struct ieee80211_node *ni = ic->ic_bss; |
2428 | u_int32_t nexttbtt, intval, tsftu; |
2429 | u_int64_t tsf; |
2430 | |
2431 | /* extract tstamp from last beacon and convert to TU */ |
2432 | nexttbtt = TSF_TO_TU(LE_READ_4(ni->ni_tstamp.data + 4), |
2433 | LE_READ_4(ni->ni_tstamp.data)); |
2434 | /* NB: the beacon interval is kept internally in TU's */ |
2435 | intval = ni->ni_intval & HAL_BEACON_PERIOD; |
2436 | if (nexttbtt == 0) /* e.g. for ap mode */ |
2437 | nexttbtt = intval; |
2438 | else if (intval) /* NB: can be 0 for monitor mode */ |
2439 | nexttbtt = roundup(nexttbtt, intval); |
2440 | DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n" , |
2441 | __func__, nexttbtt, intval, ni->ni_intval); |
2442 | if (ic->ic_opmode == IEEE80211_M_STA) { |
2443 | HAL_BEACON_STATE bs; |
2444 | int dtimperiod, dtimcount; |
2445 | int cfpperiod, cfpcount; |
2446 | |
2447 | /* |
2448 | * Setup dtim and cfp parameters according to |
2449 | * last beacon we received (which may be none). |
2450 | */ |
2451 | dtimperiod = ni->ni_dtim_period; |
2452 | if (dtimperiod <= 0) /* NB: 0 if not known */ |
2453 | dtimperiod = 1; |
2454 | dtimcount = ni->ni_dtim_count; |
2455 | if (dtimcount >= dtimperiod) /* NB: sanity check */ |
2456 | dtimcount = 0; /* XXX? */ |
2457 | cfpperiod = 1; /* NB: no PCF support yet */ |
2458 | cfpcount = 0; |
2459 | /* |
2460 | * Pull nexttbtt forward to reflect the current |
2461 | * TSF and calculate dtim+cfp state for the result. |
2462 | */ |
2463 | tsf = ath_hal_gettsf64(ah); |
2464 | tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE; |
2465 | do { |
2466 | nexttbtt += intval; |
2467 | if (--dtimcount < 0) { |
2468 | dtimcount = dtimperiod - 1; |
2469 | if (--cfpcount < 0) |
2470 | cfpcount = cfpperiod - 1; |
2471 | } |
2472 | } while (nexttbtt < tsftu); |
2473 | memset(&bs, 0, sizeof(bs)); |
2474 | bs.bs_intval = intval; |
2475 | bs.bs_nexttbtt = nexttbtt; |
2476 | bs.bs_dtimperiod = dtimperiod*intval; |
2477 | bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval; |
2478 | bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod; |
2479 | bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod; |
2480 | bs.bs_cfpmaxduration = 0; |
2481 | #if 0 |
2482 | /* |
2483 | * The 802.11 layer records the offset to the DTIM |
2484 | * bitmap while receiving beacons; use it here to |
2485 | * enable h/w detection of our AID being marked in |
2486 | * the bitmap vector (to indicate frames for us are |
2487 | * pending at the AP). |
2488 | * XXX do DTIM handling in s/w to WAR old h/w bugs |
2489 | * XXX enable based on h/w rev for newer chips |
2490 | */ |
2491 | bs.bs_timoffset = ni->ni_timoff; |
2492 | #endif |
2493 | /* |
2494 | * Calculate the number of consecutive beacons to miss |
2495 | * before taking a BMISS interrupt. The configuration |
2496 | * is specified in ms, so we need to convert that to |
2497 | * TU's and then calculate based on the beacon interval. |
2498 | * Note that we clamp the result to at most 10 beacons. |
2499 | */ |
2500 | bs.bs_bmissthreshold = howmany(ic->ic_bmisstimeout, intval); |
2501 | if (bs.bs_bmissthreshold > 10) |
2502 | bs.bs_bmissthreshold = 10; |
2503 | else if (bs.bs_bmissthreshold <= 0) |
2504 | bs.bs_bmissthreshold = 1; |
2505 | |
2506 | /* |
2507 | * Calculate sleep duration. The configuration is |
2508 | * given in ms. We insure a multiple of the beacon |
2509 | * period is used. Also, if the sleep duration is |
2510 | * greater than the DTIM period then it makes senses |
2511 | * to make it a multiple of that. |
2512 | * |
2513 | * XXX fixed at 100ms |
2514 | */ |
2515 | bs.bs_sleepduration = |
2516 | roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval); |
2517 | if (bs.bs_sleepduration > bs.bs_dtimperiod) |
2518 | bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod); |
2519 | |
2520 | DPRINTF(sc, ATH_DEBUG_BEACON, |
2521 | "%s: tsf %ju tsf:tu %u intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n" |
2522 | , __func__ |
2523 | , tsf, tsftu |
2524 | , bs.bs_intval |
2525 | , bs.bs_nexttbtt |
2526 | , bs.bs_dtimperiod |
2527 | , bs.bs_nextdtim |
2528 | , bs.bs_bmissthreshold |
2529 | , bs.bs_sleepduration |
2530 | , bs.bs_cfpperiod |
2531 | , bs.bs_cfpmaxduration |
2532 | , bs.bs_cfpnext |
2533 | , bs.bs_timoffset |
2534 | ); |
2535 | ath_hal_intrset(ah, 0); |
2536 | ath_hal_beacontimers(ah, &bs); |
2537 | sc->sc_imask |= HAL_INT_BMISS; |
2538 | ath_hal_intrset(ah, sc->sc_imask); |
2539 | } else { |
2540 | ath_hal_intrset(ah, 0); |
2541 | if (nexttbtt == intval) |
2542 | intval |= HAL_BEACON_RESET_TSF; |
2543 | if (ic->ic_opmode == IEEE80211_M_IBSS) { |
2544 | /* |
2545 | * In IBSS mode enable the beacon timers but only |
2546 | * enable SWBA interrupts if we need to manually |
2547 | * prepare beacon frames. Otherwise we use a |
2548 | * self-linked tx descriptor and let the hardware |
2549 | * deal with things. |
2550 | */ |
2551 | intval |= HAL_BEACON_ENA; |
2552 | if (!sc->sc_hasveol) |
2553 | sc->sc_imask |= HAL_INT_SWBA; |
2554 | if ((intval & HAL_BEACON_RESET_TSF) == 0) { |
2555 | /* |
2556 | * Pull nexttbtt forward to reflect |
2557 | * the current TSF. |
2558 | */ |
2559 | tsf = ath_hal_gettsf64(ah); |
2560 | tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE; |
2561 | do { |
2562 | nexttbtt += intval; |
2563 | } while (nexttbtt < tsftu); |
2564 | } |
2565 | ath_beaconq_config(sc); |
2566 | } else if (ic->ic_opmode == IEEE80211_M_HOSTAP) { |
2567 | /* |
2568 | * In AP mode we enable the beacon timers and |
2569 | * SWBA interrupts to prepare beacon frames. |
2570 | */ |
2571 | intval |= HAL_BEACON_ENA; |
2572 | sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */ |
2573 | ath_beaconq_config(sc); |
2574 | } |
2575 | ath_hal_beaconinit(ah, nexttbtt, intval); |
2576 | sc->sc_bmisscount = 0; |
2577 | ath_hal_intrset(ah, sc->sc_imask); |
2578 | /* |
2579 | * When using a self-linked beacon descriptor in |
2580 | * ibss mode load it once here. |
2581 | */ |
2582 | if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) |
2583 | ath_beacon_proc(sc, 0); |
2584 | } |
2585 | sc->sc_syncbeacon = 0; |
2586 | #undef UNDEF |
2587 | #undef TSF_TO_TU |
2588 | } |
2589 | |
2590 | static int |
2591 | ath_descdma_setup(struct ath_softc *sc, |
2592 | struct ath_descdma *dd, ath_bufhead *head, |
2593 | const char *name, int nbuf, int ndesc) |
2594 | { |
2595 | #define DS2PHYS(_dd, _ds) \ |
2596 | ((_dd)->dd_desc_paddr + ((char *)(_ds) - (char *)(_dd)->dd_desc)) |
2597 | struct ifnet *ifp = &sc->sc_if; |
2598 | struct ath_desc *ds; |
2599 | struct ath_buf *bf; |
2600 | int i, bsize, error; |
2601 | |
2602 | DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n" , |
2603 | __func__, name, nbuf, ndesc); |
2604 | |
2605 | dd->dd_name = name; |
2606 | dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc; |
2607 | |
2608 | /* |
2609 | * Setup DMA descriptor area. |
2610 | */ |
2611 | dd->dd_dmat = sc->sc_dmat; |
2612 | |
2613 | error = bus_dmamem_alloc(dd->dd_dmat, dd->dd_desc_len, PAGE_SIZE, |
2614 | 0, &dd->dd_dseg, 1, &dd->dd_dnseg, 0); |
2615 | |
2616 | if (error != 0) { |
2617 | if_printf(ifp, "unable to alloc memory for %u %s descriptors, " |
2618 | "error %u\n" , nbuf * ndesc, dd->dd_name, error); |
2619 | goto fail0; |
2620 | } |
2621 | |
2622 | error = bus_dmamem_map(dd->dd_dmat, &dd->dd_dseg, dd->dd_dnseg, |
2623 | dd->dd_desc_len, (void **)&dd->dd_desc, BUS_DMA_COHERENT); |
2624 | if (error != 0) { |
2625 | if_printf(ifp, "unable to map %u %s descriptors, error = %u\n" , |
2626 | nbuf * ndesc, dd->dd_name, error); |
2627 | goto fail1; |
2628 | } |
2629 | |
2630 | /* allocate descriptors */ |
2631 | error = bus_dmamap_create(dd->dd_dmat, dd->dd_desc_len, 1, |
2632 | dd->dd_desc_len, 0, BUS_DMA_NOWAIT, &dd->dd_dmamap); |
2633 | if (error != 0) { |
2634 | if_printf(ifp, "unable to create dmamap for %s descriptors, " |
2635 | "error %u\n" , dd->dd_name, error); |
2636 | goto fail2; |
2637 | } |
2638 | |
2639 | error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, dd->dd_desc, |
2640 | dd->dd_desc_len, NULL, BUS_DMA_NOWAIT); |
2641 | if (error != 0) { |
2642 | if_printf(ifp, "unable to map %s descriptors, error %u\n" , |
2643 | dd->dd_name, error); |
2644 | goto fail3; |
2645 | } |
2646 | |
2647 | ds = dd->dd_desc; |
2648 | dd->dd_desc_paddr = dd->dd_dmamap->dm_segs[0].ds_addr; |
2649 | DPRINTF(sc, ATH_DEBUG_RESET, |
2650 | "%s: %s DMA map: %p (%lu) -> %" PRIx64 " (%lu)\n" , |
2651 | __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len, |
2652 | (uint64_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len); |
2653 | |
2654 | /* allocate rx buffers */ |
2655 | bsize = sizeof(struct ath_buf) * nbuf; |
2656 | bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO); |
2657 | if (bf == NULL) { |
2658 | if_printf(ifp, "malloc of %s buffers failed, size %u\n" , |
2659 | dd->dd_name, bsize); |
2660 | goto fail4; |
2661 | } |
2662 | dd->dd_bufptr = bf; |
2663 | |
2664 | STAILQ_INIT(head); |
2665 | for (i = 0; i < nbuf; i++, bf++, ds += ndesc) { |
2666 | bf->bf_desc = ds; |
2667 | bf->bf_daddr = DS2PHYS(dd, ds); |
2668 | error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, ndesc, |
2669 | MCLBYTES, 0, BUS_DMA_NOWAIT, &bf->bf_dmamap); |
2670 | if (error != 0) { |
2671 | if_printf(ifp, "unable to create dmamap for %s " |
2672 | "buffer %u, error %u\n" , dd->dd_name, i, error); |
2673 | ath_descdma_cleanup(sc, dd, head); |
2674 | return error; |
2675 | } |
2676 | STAILQ_INSERT_TAIL(head, bf, bf_list); |
2677 | } |
2678 | return 0; |
2679 | fail4: |
2680 | bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); |
2681 | fail3: |
2682 | bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); |
2683 | fail2: |
2684 | bus_dmamem_unmap(dd->dd_dmat, (void *)dd->dd_desc, dd->dd_desc_len); |
2685 | fail1: |
2686 | bus_dmamem_free(dd->dd_dmat, &dd->dd_dseg, dd->dd_dnseg); |
2687 | fail0: |
2688 | memset(dd, 0, sizeof(*dd)); |
2689 | return error; |
2690 | #undef DS2PHYS |
2691 | } |
2692 | |
2693 | static void |
2694 | ath_descdma_cleanup(struct ath_softc *sc, |
2695 | struct ath_descdma *dd, ath_bufhead *head) |
2696 | { |
2697 | struct ath_buf *bf; |
2698 | struct ieee80211_node *ni; |
2699 | |
2700 | bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); |
2701 | bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); |
2702 | bus_dmamem_unmap(dd->dd_dmat, (void *)dd->dd_desc, dd->dd_desc_len); |
2703 | bus_dmamem_free(dd->dd_dmat, &dd->dd_dseg, dd->dd_dnseg); |
2704 | |
2705 | STAILQ_FOREACH(bf, head, bf_list) { |
2706 | if (bf->bf_m) { |
2707 | m_freem(bf->bf_m); |
2708 | bf->bf_m = NULL; |
2709 | } |
2710 | if (bf->bf_dmamap != NULL) { |
2711 | bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); |
2712 | bf->bf_dmamap = NULL; |
2713 | } |
2714 | ni = bf->bf_node; |
2715 | bf->bf_node = NULL; |
2716 | if (ni != NULL) { |
2717 | /* |
2718 | * Reclaim node reference. |
2719 | */ |
2720 | ieee80211_free_node(ni); |
2721 | } |
2722 | } |
2723 | |
2724 | STAILQ_INIT(head); |
2725 | free(dd->dd_bufptr, M_ATHDEV); |
2726 | memset(dd, 0, sizeof(*dd)); |
2727 | } |
2728 | |
2729 | static int |
2730 | ath_desc_alloc(struct ath_softc *sc) |
2731 | { |
2732 | int error; |
2733 | |
2734 | error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, |
2735 | "rx" , ath_rxbuf, 1); |
2736 | if (error != 0) |
2737 | return error; |
2738 | |
2739 | error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, |
2740 | "tx" , ath_txbuf, ATH_TXDESC); |
2741 | if (error != 0) { |
2742 | ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); |
2743 | return error; |
2744 | } |
2745 | |
2746 | error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, |
2747 | "beacon" , 1, 1); |
2748 | if (error != 0) { |
2749 | ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); |
2750 | ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); |
2751 | return error; |
2752 | } |
2753 | return 0; |
2754 | } |
2755 | |
2756 | static void |
2757 | ath_desc_free(struct ath_softc *sc) |
2758 | { |
2759 | |
2760 | if (sc->sc_bdma.dd_desc_len != 0) |
2761 | ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); |
2762 | if (sc->sc_txdma.dd_desc_len != 0) |
2763 | ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); |
2764 | if (sc->sc_rxdma.dd_desc_len != 0) |
2765 | ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); |
2766 | } |
2767 | |
2768 | static struct ieee80211_node * |
2769 | ath_node_alloc(struct ieee80211_node_table *nt) |
2770 | { |
2771 | struct ieee80211com *ic = nt->nt_ic; |
2772 | struct ath_softc *sc = ic->ic_ifp->if_softc; |
2773 | const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; |
2774 | struct ath_node *an; |
2775 | |
2776 | an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); |
2777 | if (an == NULL) { |
2778 | /* XXX stat+msg */ |
2779 | return NULL; |
2780 | } |
2781 | an->an_avgrssi = ATH_RSSI_DUMMY_MARKER; |
2782 | ath_rate_node_init(sc, an); |
2783 | |
2784 | DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n" , __func__, an); |
2785 | return &an->an_node; |
2786 | } |
2787 | |
2788 | static void |
2789 | ath_node_free(struct ieee80211_node *ni) |
2790 | { |
2791 | struct ieee80211com *ic = ni->ni_ic; |
2792 | struct ath_softc *sc = ic->ic_ifp->if_softc; |
2793 | |
2794 | DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n" , __func__, ni); |
2795 | |
2796 | ath_rate_node_cleanup(sc, ATH_NODE(ni)); |
2797 | sc->sc_node_free(ni); |
2798 | } |
2799 | |
2800 | static u_int8_t |
2801 | (const struct ieee80211_node *ni) |
2802 | { |
2803 | #define HAL_EP_RND(x, mul) \ |
2804 | ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) |
2805 | u_int32_t = ATH_NODE_CONST(ni)->an_avgrssi; |
2806 | int32_t ; |
2807 | |
2808 | /* |
2809 | * When only one frame is received there will be no state in |
2810 | * avgrssi so fallback on the value recorded by the 802.11 layer. |
2811 | */ |
2812 | if (avgrssi != ATH_RSSI_DUMMY_MARKER) |
2813 | rssi = HAL_EP_RND(avgrssi, HAL_RSSI_EP_MULTIPLIER); |
2814 | else |
2815 | rssi = ni->ni_rssi; |
2816 | return rssi < 0 ? 0 : rssi > 127 ? 127 : rssi; |
2817 | #undef HAL_EP_RND |
2818 | } |
2819 | |
2820 | static int |
2821 | ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) |
2822 | { |
2823 | struct ath_hal *ah = sc->sc_ah; |
2824 | int error; |
2825 | struct mbuf *m; |
2826 | struct ath_desc *ds; |
2827 | |
2828 | m = bf->bf_m; |
2829 | if (m == NULL) { |
2830 | /* |
2831 | * NB: by assigning a page to the rx dma buffer we |
2832 | * implicitly satisfy the Atheros requirement that |
2833 | * this buffer be cache-line-aligned and sized to be |
2834 | * multiple of the cache line size. Not doing this |
2835 | * causes weird stuff to happen (for the 5210 at least). |
2836 | */ |
2837 | m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); |
2838 | if (m == NULL) { |
2839 | DPRINTF(sc, ATH_DEBUG_ANY, |
2840 | "%s: no mbuf/cluster\n" , __func__); |
2841 | sc->sc_stats.ast_rx_nombuf++; |
2842 | return ENOMEM; |
2843 | } |
2844 | bf->bf_m = m; |
2845 | m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; |
2846 | |
2847 | error = bus_dmamap_load_mbuf(sc->sc_dmat, |
2848 | bf->bf_dmamap, m, |
2849 | BUS_DMA_NOWAIT); |
2850 | if (error != 0) { |
2851 | DPRINTF(sc, ATH_DEBUG_ANY, |
2852 | "%s: bus_dmamap_load_mbuf failed; error %d\n" , |
2853 | __func__, error); |
2854 | sc->sc_stats.ast_rx_busdma++; |
2855 | return error; |
2856 | } |
2857 | KASSERTMSG(bf->bf_nseg == 1, |
2858 | "multi-segment packet; nseg %u" , bf->bf_nseg); |
2859 | } |
2860 | bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, |
2861 | bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); |
2862 | |
2863 | /* |
2864 | * Setup descriptors. For receive we always terminate |
2865 | * the descriptor list with a self-linked entry so we'll |
2866 | * not get overrun under high load (as can happen with a |
2867 | * 5212 when ANI processing enables PHY error frames). |
2868 | * |
2869 | * To insure the last descriptor is self-linked we create |
2870 | * each descriptor as self-linked and add it to the end. As |
2871 | * each additional descriptor is added the previous self-linked |
2872 | * entry is ``fixed'' naturally. This should be safe even |
2873 | * if DMA is happening. When processing RX interrupts we |
2874 | * never remove/process the last, self-linked, entry on the |
2875 | * descriptor list. This insures the hardware always has |
2876 | * someplace to write a new frame. |
2877 | */ |
2878 | ds = bf->bf_desc; |
2879 | ds->ds_link = HTOAH32(bf->bf_daddr); /* link to self */ |
2880 | ds->ds_data = bf->bf_segs[0].ds_addr; |
2881 | /* ds->ds_vdata = mtod(m, void *); for radar */ |
2882 | ath_hal_setuprxdesc(ah, ds |
2883 | , m->m_len /* buffer size */ |
2884 | , 0 |
2885 | ); |
2886 | |
2887 | if (sc->sc_rxlink != NULL) |
2888 | *sc->sc_rxlink = bf->bf_daddr; |
2889 | sc->sc_rxlink = &ds->ds_link; |
2890 | return 0; |
2891 | } |
2892 | |
2893 | /* |
2894 | * Extend 15-bit time stamp from rx descriptor to |
2895 | * a full 64-bit TSF using the specified TSF. |
2896 | */ |
2897 | static inline u_int64_t |
2898 | ath_extend_tsf(u_int32_t rstamp, u_int64_t tsf) |
2899 | { |
2900 | if ((tsf & 0x7fff) < rstamp) |
2901 | tsf -= 0x8000; |
2902 | return ((tsf &~ 0x7fff) | rstamp); |
2903 | } |
2904 | |
2905 | /* |
2906 | * Intercept management frames to collect beacon rssi data |
2907 | * and to do ibss merges. |
2908 | */ |
2909 | static void |
2910 | ath_recv_mgmt(struct ieee80211com *ic, struct mbuf *m, |
2911 | struct ieee80211_node *ni, |
2912 | int subtype, int , u_int32_t rstamp) |
2913 | { |
2914 | struct ath_softc *sc = ic->ic_ifp->if_softc; |
2915 | |
2916 | /* |
2917 | * Call up first so subsequent work can use information |
2918 | * potentially stored in the node (e.g. for ibss merge). |
2919 | */ |
2920 | sc->sc_recv_mgmt(ic, m, ni, subtype, rssi, rstamp); |
2921 | switch (subtype) { |
2922 | case IEEE80211_FC0_SUBTYPE_BEACON: |
2923 | /* update rssi statistics for use by the hal */ |
2924 | ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi); |
2925 | if (sc->sc_syncbeacon && |
2926 | ni == ic->ic_bss && ic->ic_state == IEEE80211_S_RUN) { |
2927 | /* |
2928 | * Resync beacon timers using the tsf of the beacon |
2929 | * frame we just received. |
2930 | */ |
2931 | ath_beacon_config(sc); |
2932 | } |
2933 | /* fall thru... */ |
2934 | case IEEE80211_FC0_SUBTYPE_PROBE_RESP: |
2935 | if (ic->ic_opmode == IEEE80211_M_IBSS && |
2936 | ic->ic_state == IEEE80211_S_RUN) { |
2937 | u_int64_t tsf = ath_extend_tsf(rstamp, |
2938 | ath_hal_gettsf64(sc->sc_ah)); |
2939 | |
2940 | /* |
2941 | * Handle ibss merge as needed; check the tsf on the |
2942 | * frame before attempting the merge. The 802.11 spec |
2943 | * says the station should change its bssid to match |
2944 | * the oldest station with the same ssid, where oldest |
2945 | * is determined by the tsf. Note that hardware |
2946 | * reconfiguration happens through callback to |
2947 | * ath_newstate as the state machine will go from |
2948 | * RUN -> RUN when this happens. |
2949 | */ |
2950 | if (le64toh(ni->ni_tstamp.tsf) >= tsf) { |
2951 | DPRINTF(sc, ATH_DEBUG_STATE, |
2952 | "ibss merge, rstamp %u tsf %ju " |
2953 | "tstamp %ju\n" , rstamp, (uintmax_t)tsf, |
2954 | (uintmax_t)ni->ni_tstamp.tsf); |
2955 | (void) ieee80211_ibss_merge(ni); |
2956 | } |
2957 | } |
2958 | break; |
2959 | } |
2960 | } |
2961 | |
2962 | /* |
2963 | * Set the default antenna. |
2964 | */ |
2965 | static void |
2966 | ath_setdefantenna(struct ath_softc *sc, u_int antenna) |
2967 | { |
2968 | struct ath_hal *ah = sc->sc_ah; |
2969 | |
2970 | /* XXX block beacon interrupts */ |
2971 | ath_hal_setdefantenna(ah, antenna); |
2972 | if (sc->sc_defant != antenna) |
2973 | sc->sc_stats.ast_ant_defswitch++; |
2974 | sc->sc_defant = antenna; |
2975 | sc->sc_rxotherant = 0; |
2976 | } |
2977 | |
2978 | static void |
2979 | ath_handle_micerror(struct ieee80211com *ic, |
2980 | struct ieee80211_frame *wh, int keyix) |
2981 | { |
2982 | struct ieee80211_node *ni; |
2983 | |
2984 | /* XXX recheck MIC to deal w/ chips that lie */ |
2985 | /* XXX discard MIC errors on !data frames */ |
2986 | ni = ieee80211_find_rxnode_withkey(ic, (const struct ieee80211_frame_min *) wh, keyix); |
2987 | if (ni != NULL) { |
2988 | ieee80211_notify_michael_failure(ic, wh, keyix); |
2989 | ieee80211_free_node(ni); |
2990 | } |
2991 | } |
2992 | |
2993 | static void |
2994 | ath_rx_proc(void *arg, int npending) |
2995 | { |
2996 | #define PA2DESC(_sc, _pa) \ |
2997 | ((struct ath_desc *)((char *)(_sc)->sc_rxdma.dd_desc + \ |
2998 | ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) |
2999 | struct ath_softc *sc = arg; |
3000 | struct ath_buf *bf; |
3001 | struct ieee80211com *ic = &sc->sc_ic; |
3002 | struct ifnet *ifp = &sc->sc_if; |
3003 | struct ath_hal *ah = sc->sc_ah; |
3004 | struct ath_desc *ds; |
3005 | struct mbuf *m; |
3006 | struct ieee80211_node *ni; |
3007 | struct ath_node *an; |
3008 | int len, ngood, type; |
3009 | u_int phyerr; |
3010 | HAL_STATUS status; |
3011 | int16_t nf; |
3012 | u_int64_t tsf; |
3013 | uint8_t rxerr_tap, rxerr_mon; |
3014 | |
3015 | NET_LOCK_GIANT(); /* XXX */ |
3016 | |
3017 | rxerr_tap = |
3018 | (ifp->if_flags & IFF_PROMISC) ? HAL_RXERR_CRC|HAL_RXERR_PHY : 0; |
3019 | |
3020 | if (sc->sc_ic.ic_opmode == IEEE80211_M_MONITOR) |
3021 | rxerr_mon = HAL_RXERR_DECRYPT|HAL_RXERR_MIC; |
3022 | else if (ifp->if_flags & IFF_PROMISC) |
3023 | rxerr_tap |= HAL_RXERR_DECRYPT|HAL_RXERR_MIC; |
3024 | |
3025 | DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n" , __func__, npending); |
3026 | ngood = 0; |
3027 | nf = ath_hal_getchannoise(ah, &sc->sc_curchan); |
3028 | tsf = ath_hal_gettsf64(ah); |
3029 | do { |
3030 | bf = STAILQ_FIRST(&sc->sc_rxbuf); |
3031 | if (bf == NULL) { /* NB: shouldn't happen */ |
3032 | if_printf(ifp, "%s: no buffer!\n" , __func__); |
3033 | break; |
3034 | } |
3035 | ds = bf->bf_desc; |
3036 | if (ds->ds_link == bf->bf_daddr) { |
3037 | /* NB: never process the self-linked entry at the end */ |
3038 | break; |
3039 | } |
3040 | m = bf->bf_m; |
3041 | if (m == NULL) { /* NB: shouldn't happen */ |
3042 | if_printf(ifp, "%s: no mbuf!\n" , __func__); |
3043 | break; |
3044 | } |
3045 | /* XXX sync descriptor memory */ |
3046 | /* |
3047 | * Must provide the virtual address of the current |
3048 | * descriptor, the physical address, and the virtual |
3049 | * address of the next descriptor in the h/w chain. |
3050 | * This allows the HAL to look ahead to see if the |
3051 | * hardware is done with a descriptor by checking the |
3052 | * done bit in the following descriptor and the address |
3053 | * of the current descriptor the DMA engine is working |
3054 | * on. All this is necessary because of our use of |
3055 | * a self-linked list to avoid rx overruns. |
3056 | */ |
3057 | status = ath_hal_rxprocdesc(ah, ds, |
3058 | bf->bf_daddr, PA2DESC(sc, ds->ds_link), |
3059 | &ds->ds_rxstat); |
3060 | #ifdef AR_DEBUG |
3061 | if (sc->sc_debug & ATH_DEBUG_RECV_DESC) |
3062 | ath_printrxbuf(bf, status == HAL_OK); |
3063 | #endif |
3064 | if (status == HAL_EINPROGRESS) |
3065 | break; |
3066 | STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list); |
3067 | if (ds->ds_rxstat.rs_more) { |
3068 | /* |
3069 | * Frame spans multiple descriptors; this |
3070 | * cannot happen yet as we don't support |
3071 | * jumbograms. If not in monitor mode, |
3072 | * discard the frame. |
3073 | */ |
3074 | if (ic->ic_opmode != IEEE80211_M_MONITOR) { |
3075 | sc->sc_stats.ast_rx_toobig++; |
3076 | goto rx_next; |
3077 | } |
3078 | /* fall thru for monitor mode handling... */ |
3079 | } else if (ds->ds_rxstat.rs_status != 0) { |
3080 | if (ds->ds_rxstat.rs_status & HAL_RXERR_CRC) |
3081 | sc->sc_stats.ast_rx_crcerr++; |
3082 | if (ds->ds_rxstat.rs_status & HAL_RXERR_FIFO) |
3083 | sc->sc_stats.ast_rx_fifoerr++; |
3084 | if (ds->ds_rxstat.rs_status & HAL_RXERR_PHY) { |
3085 | sc->sc_stats.ast_rx_phyerr++; |
3086 | phyerr = ds->ds_rxstat.rs_phyerr & 0x1f; |
3087 | sc->sc_stats.ast_rx_phy[phyerr]++; |
3088 | goto rx_next; |
3089 | } |
3090 | if (ds->ds_rxstat.rs_status & HAL_RXERR_DECRYPT) { |
3091 | /* |
3092 | * Decrypt error. If the error occurred |
3093 | * because there was no hardware key, then |
3094 | * let the frame through so the upper layers |
3095 | * can process it. This is necessary for 5210 |
3096 | * parts which have no way to setup a ``clear'' |
3097 | * key cache entry. |
3098 | * |
3099 | * XXX do key cache faulting |
3100 | */ |
3101 | if (ds->ds_rxstat.rs_keyix == HAL_RXKEYIX_INVALID) |
3102 | goto rx_accept; |
3103 | sc->sc_stats.ast_rx_badcrypt++; |
3104 | } |
3105 | if (ds->ds_rxstat.rs_status & HAL_RXERR_MIC) { |
3106 | sc->sc_stats.ast_rx_badmic++; |
3107 | /* |
3108 | * Do minimal work required to hand off |
3109 | * the 802.11 header for notifcation. |
3110 | */ |
3111 | /* XXX frag's and qos frames */ |
3112 | len = ds->ds_rxstat.rs_datalen; |
3113 | if (len >= sizeof (struct ieee80211_frame)) { |
3114 | bus_dmamap_sync(sc->sc_dmat, |
3115 | bf->bf_dmamap, |
3116 | 0, bf->bf_dmamap->dm_mapsize, |
3117 | BUS_DMASYNC_POSTREAD); |
3118 | ath_handle_micerror(ic, |
3119 | mtod(m, struct ieee80211_frame *), |
3120 | sc->sc_splitmic ? |
3121 | ds->ds_rxstat.rs_keyix-32 : ds->ds_rxstat.rs_keyix); |
3122 | } |
3123 | } |
3124 | ifp->if_ierrors++; |
3125 | /* |
3126 | * Reject error frames, we normally don't want |
3127 | * to see them in monitor mode (in monitor mode |
3128 | * allow through packets that have crypto problems). |
3129 | */ |
3130 | |
3131 | if (ds->ds_rxstat.rs_status &~ (rxerr_tap|rxerr_mon)) |
3132 | goto rx_next; |
3133 | } |
3134 | rx_accept: |
3135 | /* |
3136 | * Sync and unmap the frame. At this point we're |
3137 | * committed to passing the mbuf somewhere so clear |
3138 | * bf_m; this means a new sk_buff must be allocated |
3139 | * when the rx descriptor is setup again to receive |
3140 | * another frame. |
3141 | */ |
3142 | bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, |
3143 | 0, bf->bf_dmamap->dm_mapsize, |
3144 | BUS_DMASYNC_POSTREAD); |
3145 | bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); |
3146 | bf->bf_m = NULL; |
3147 | |
3148 | m_set_rcvif(m, ifp); |
3149 | len = ds->ds_rxstat.rs_datalen; |
3150 | m->m_pkthdr.len = m->m_len = len; |
3151 | |
3152 | sc->sc_stats.ast_ant_rx[ds->ds_rxstat.rs_antenna]++; |
3153 | |
3154 | if (sc->sc_drvbpf) { |
3155 | u_int8_t rix; |
3156 | |
3157 | /* |
3158 | * Discard anything shorter than an ack or cts. |
3159 | */ |
3160 | if (len < IEEE80211_ACK_LEN) { |
3161 | DPRINTF(sc, ATH_DEBUG_RECV, |
3162 | "%s: runt packet %d\n" , |
3163 | __func__, len); |
3164 | sc->sc_stats.ast_rx_tooshort++; |
3165 | m_freem(m); |
3166 | goto rx_next; |
3167 | } |
3168 | rix = ds->ds_rxstat.rs_rate; |
3169 | sc->sc_rx_th.wr_tsf = htole64( |
3170 | ath_extend_tsf(ds->ds_rxstat.rs_tstamp, tsf)); |
3171 | sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags; |
3172 | if (ds->ds_rxstat.rs_status & |
3173 | (HAL_RXERR_CRC|HAL_RXERR_PHY)) { |
3174 | sc->sc_rx_th.wr_flags |= |
3175 | IEEE80211_RADIOTAP_F_BADFCS; |
3176 | } |
3177 | sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate; |
3178 | sc->sc_rx_th.wr_antsignal = ds->ds_rxstat.rs_rssi + nf; |
3179 | sc->sc_rx_th.wr_antnoise = nf; |
3180 | sc->sc_rx_th.wr_antenna = ds->ds_rxstat.rs_antenna; |
3181 | |
3182 | bpf_mtap2(sc->sc_drvbpf, &sc->sc_rx_th, |
3183 | sc->sc_rx_th_len, m); |
3184 | } |
3185 | |
3186 | if (ds->ds_rxstat.rs_status & rxerr_tap) { |
3187 | m_freem(m); |
3188 | goto rx_next; |
3189 | } |
3190 | /* |
3191 | * From this point on we assume the frame is at least |
3192 | * as large as ieee80211_frame_min; verify that. |
3193 | */ |
3194 | if (len < IEEE80211_MIN_LEN) { |
3195 | DPRINTF(sc, ATH_DEBUG_RECV, "%s: short packet %d\n" , |
3196 | __func__, len); |
3197 | sc->sc_stats.ast_rx_tooshort++; |
3198 | m_freem(m); |
3199 | goto rx_next; |
3200 | } |
3201 | |
3202 | if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) { |
3203 | ieee80211_dump_pkt(mtod(m, void *), len, |
3204 | sc->sc_hwmap[ds->ds_rxstat.rs_rate].ieeerate, |
3205 | ds->ds_rxstat.rs_rssi); |
3206 | } |
3207 | |
3208 | m_adj(m, -IEEE80211_CRC_LEN); |
3209 | |
3210 | /* |
3211 | * Locate the node for sender, track state, and then |
3212 | * pass the (referenced) node up to the 802.11 layer |
3213 | * for its use. |
3214 | */ |
3215 | ni = ieee80211_find_rxnode_withkey(ic, |
3216 | mtod(m, const struct ieee80211_frame_min *), |
3217 | ds->ds_rxstat.rs_keyix == HAL_RXKEYIX_INVALID ? |
3218 | IEEE80211_KEYIX_NONE : ds->ds_rxstat.rs_keyix); |
3219 | /* |
3220 | * Track rx rssi and do any rx antenna management. |
3221 | */ |
3222 | an = ATH_NODE(ni); |
3223 | ATH_RSSI_LPF(an->an_avgrssi, ds->ds_rxstat.rs_rssi); |
3224 | ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, ds->ds_rxstat.rs_rssi); |
3225 | /* |
3226 | * Send frame up for processing. |
3227 | */ |
3228 | type = ieee80211_input(ic, m, ni, |
3229 | ds->ds_rxstat.rs_rssi, ds->ds_rxstat.rs_tstamp); |
3230 | ieee80211_free_node(ni); |
3231 | if (sc->sc_diversity) { |
3232 | /* |
3233 | * When using fast diversity, change the default rx |
3234 | * antenna if diversity chooses the other antenna 3 |
3235 | * times in a row. |
3236 | */ |
3237 | if (sc->sc_defant != ds->ds_rxstat.rs_antenna) { |
3238 | if (++sc->sc_rxotherant >= 3) |
3239 | ath_setdefantenna(sc, |
3240 | ds->ds_rxstat.rs_antenna); |
3241 | } else |
3242 | sc->sc_rxotherant = 0; |
3243 | } |
3244 | if (sc->sc_softled) { |
3245 | /* |
3246 | * Blink for any data frame. Otherwise do a |
3247 | * heartbeat-style blink when idle. The latter |
3248 | * is mainly for station mode where we depend on |
3249 | * periodic beacon frames to trigger the poll event. |
3250 | */ |
3251 | if (type == IEEE80211_FC0_TYPE_DATA) { |
3252 | sc->sc_rxrate = ds->ds_rxstat.rs_rate; |
3253 | ath_led_event(sc, ATH_LED_RX); |
3254 | } else if (ticks - sc->sc_ledevent >= sc->sc_ledidle) |
3255 | ath_led_event(sc, ATH_LED_POLL); |
3256 | } |
3257 | /* |
3258 | * Arrange to update the last rx timestamp only for |
3259 | * frames from our ap when operating in station mode. |
3260 | * This assumes the rx key is always setup when associated. |
3261 | */ |
3262 | if (ic->ic_opmode == IEEE80211_M_STA && |
3263 | ds->ds_rxstat.rs_keyix != HAL_RXKEYIX_INVALID) |
3264 | ngood++; |
3265 | rx_next: |
3266 | STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); |
3267 | } while (ath_rxbuf_init(sc, bf) == 0); |
3268 | |
3269 | /* rx signal state monitoring */ |
3270 | ath_hal_rxmonitor(ah, &sc->sc_halstats, &sc->sc_curchan); |
3271 | #if 0 |
3272 | if (ath_hal_radar_event(ah)) |
3273 | TASK_RUN_OR_ENQUEUE(&sc->sc_radartask); |
3274 | #endif |
3275 | if (ngood) |
3276 | sc->sc_lastrx = tsf; |
3277 | |
3278 | #ifdef __NetBSD__ |
3279 | /* XXX Why isn't this necessary in FreeBSD? */ |
3280 | if ((ifp->if_flags & IFF_OACTIVE) == 0 && !IFQ_IS_EMPTY(&ifp->if_snd)) |
3281 | ath_start(ifp); |
3282 | #endif /* __NetBSD__ */ |
3283 | |
3284 | NET_UNLOCK_GIANT(); /* XXX */ |
3285 | #undef PA2DESC |
3286 | } |
3287 | |
3288 | /* |
3289 | * Setup a h/w transmit queue. |
3290 | */ |
3291 | static struct ath_txq * |
3292 | ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) |
3293 | { |
3294 | #define N(a) (sizeof(a)/sizeof(a[0])) |
3295 | struct ath_hal *ah = sc->sc_ah; |
3296 | HAL_TXQ_INFO qi; |
3297 | int qnum; |
3298 | |
3299 | memset(&qi, 0, sizeof(qi)); |
3300 | qi.tqi_subtype = subtype; |
3301 | qi.tqi_aifs = HAL_TXQ_USEDEFAULT; |
3302 | qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; |
3303 | qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; |
3304 | /* |
3305 | * Enable interrupts only for EOL and DESC conditions. |
3306 | * We mark tx descriptors to receive a DESC interrupt |
3307 | * when a tx queue gets deep; otherwise waiting for the |
3308 | * EOL to reap descriptors. Note that this is done to |
3309 | * reduce interrupt load and this only defers reaping |
3310 | * descriptors, never transmitting frames. Aside from |
3311 | * reducing interrupts this also permits more concurrency. |
3312 | * The only potential downside is if the tx queue backs |
3313 | * up in which case the top half of the kernel may backup |
3314 | * due to a lack of tx descriptors. |
3315 | */ |
3316 | qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE; |
3317 | qnum = ath_hal_setuptxqueue(ah, qtype, &qi); |
3318 | if (qnum == -1) { |
3319 | /* |
3320 | * NB: don't print a message, this happens |
3321 | * normally on parts with too few tx queues |
3322 | */ |
3323 | return NULL; |
3324 | } |
3325 | if (qnum >= N(sc->sc_txq)) { |
3326 | device_printf(sc->sc_dev, |
3327 | "hal qnum %u out of range, max %zu!\n" , |
3328 | qnum, N(sc->sc_txq)); |
3329 | ath_hal_releasetxqueue(ah, qnum); |
3330 | return NULL; |
3331 | } |
3332 | if (!ATH_TXQ_SETUP(sc, qnum)) { |
3333 | struct ath_txq *txq = &sc->sc_txq[qnum]; |
3334 | |
3335 | txq->axq_qnum = qnum; |
3336 | txq->axq_depth = 0; |
3337 | txq->axq_intrcnt = 0; |
3338 | txq->axq_link = NULL; |
3339 | STAILQ_INIT(&txq->axq_q); |
3340 | ATH_TXQ_LOCK_INIT(sc, txq); |
3341 | sc->sc_txqsetup |= 1<<qnum; |
3342 | } |
3343 | return &sc->sc_txq[qnum]; |
3344 | #undef N |
3345 | } |
3346 | |
3347 | /* |
3348 | * Setup a hardware data transmit queue for the specified |
3349 | * access control. The hal may not support all requested |
3350 | * queues in which case it will return a reference to a |
3351 | * previously setup queue. We record the mapping from ac's |
3352 | * to h/w queues for use by ath_tx_start and also track |
3353 | * the set of h/w queues being used to optimize work in the |
3354 | * transmit interrupt handler and related routines. |
3355 | */ |
3356 | static int |
3357 | ath_tx_setup(struct ath_softc *sc, int ac, int haltype) |
3358 | { |
3359 | #define N(a) (sizeof(a)/sizeof(a[0])) |
3360 | struct ath_txq *txq; |
3361 | |
3362 | if (ac >= N(sc->sc_ac2q)) { |
3363 | device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n" , |
3364 | ac, N(sc->sc_ac2q)); |
3365 | return 0; |
3366 | } |
3367 | txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); |
3368 | if (txq != NULL) { |
3369 | sc->sc_ac2q[ac] = txq; |
3370 | return 1; |
3371 | } else |
3372 | return 0; |
3373 | #undef N |
3374 | } |
3375 | |
3376 | /* |
3377 | * Update WME parameters for a transmit queue. |
3378 | */ |
3379 | static int |
3380 | ath_txq_update(struct ath_softc *sc, int ac) |
3381 | { |
3382 | #define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1) |
3383 | #define ATH_TXOP_TO_US(v) (v<<5) |
3384 | struct ieee80211com *ic = &sc->sc_ic; |
3385 | struct ath_txq *txq = sc->sc_ac2q[ac]; |
3386 | struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; |
3387 | struct ath_hal *ah = sc->sc_ah; |
3388 | HAL_TXQ_INFO qi; |
3389 | |
3390 | ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); |
3391 | qi.tqi_aifs = wmep->wmep_aifsn; |
3392 | qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); |
3393 | qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); |
3394 | qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit); |
3395 | |
3396 | if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { |
3397 | device_printf(sc->sc_dev, "unable to update hardware queue " |
3398 | "parameters for %s traffic!\n" , |
3399 | ieee80211_wme_acnames[ac]); |
3400 | return 0; |
3401 | } else { |
3402 | ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ |
3403 | return 1; |
3404 | } |
3405 | #undef ATH_TXOP_TO_US |
3406 | #undef ATH_EXPONENT_TO_VALUE |
3407 | } |
3408 | |
3409 | /* |
3410 | * Callback from the 802.11 layer to update WME parameters. |
3411 | */ |
3412 | static int |
3413 | ath_wme_update(struct ieee80211com *ic) |
3414 | { |
3415 | struct ath_softc *sc = ic->ic_ifp->if_softc; |
3416 | |
3417 | return !ath_txq_update(sc, WME_AC_BE) || |
3418 | !ath_txq_update(sc, WME_AC_BK) || |
3419 | !ath_txq_update(sc, WME_AC_VI) || |
3420 | !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; |
3421 | } |
3422 | |
3423 | /* |
3424 | * Reclaim resources for a setup queue. |
3425 | */ |
3426 | static void |
3427 | ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) |
3428 | { |
3429 | |
3430 | ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); |
3431 | ATH_TXQ_LOCK_DESTROY(txq); |
3432 | sc->sc_txqsetup &= ~(1<<txq->axq_qnum); |
3433 | } |
3434 | |
3435 | /* |
3436 | * Reclaim all tx queue resources. |
3437 | */ |
3438 | static void |
3439 | ath_tx_cleanup(struct ath_softc *sc) |
3440 | { |
3441 | int i; |
3442 | |
3443 | ATH_TXBUF_LOCK_DESTROY(sc); |
3444 | for (i = 0; i < HAL_NUM_TX_QUEUES; i++) |
3445 | if (ATH_TXQ_SETUP(sc, i)) |
3446 | ath_tx_cleanupq(sc, &sc->sc_txq[i]); |
3447 | } |
3448 | |
3449 | /* |
3450 | * Defragment an mbuf chain, returning at most maxfrags separate |
3451 | * mbufs+clusters. If this is not possible NULL is returned and |
3452 | * the original mbuf chain is left in its present (potentially |
3453 | * modified) state. We use two techniques: collapsing consecutive |
3454 | * mbufs and replacing consecutive mbufs by a cluster. |
3455 | */ |
3456 | static struct mbuf * |
3457 | ath_defrag(struct mbuf *m0, int how, int maxfrags) |
3458 | { |
3459 | struct mbuf *m, *n, *n2, **prev; |
3460 | u_int curfrags; |
3461 | |
3462 | /* |
3463 | * Calculate the current number of frags. |
3464 | */ |
3465 | curfrags = 0; |
3466 | for (m = m0; m != NULL; m = m->m_next) |
3467 | curfrags++; |
3468 | /* |
3469 | * First, try to collapse mbufs. Note that we always collapse |
3470 | * towards the front so we don't need to deal with moving the |
3471 | * pkthdr. This may be suboptimal if the first mbuf has much |
3472 | * less data than the following. |
3473 | */ |
3474 | m = m0; |
3475 | again: |
3476 | for (;;) { |
3477 | n = m->m_next; |
3478 | if (n == NULL) |
3479 | break; |
3480 | if (n->m_len < M_TRAILINGSPACE(m)) { |
3481 | memcpy(mtod(m, char *) + m->m_len, mtod(n, void *), |
3482 | n->m_len); |
3483 | m->m_len += n->m_len; |
3484 | m->m_next = n->m_next; |
3485 | m_free(n); |
3486 | if (--curfrags <= maxfrags) |
3487 | return m0; |
3488 | } else |
3489 | m = n; |
3490 | } |
3491 | KASSERTMSG(maxfrags > 1, |
3492 | "maxfrags %u, but normal collapse failed" , maxfrags); |
3493 | /* |
3494 | * Collapse consecutive mbufs to a cluster. |
3495 | */ |
3496 | prev = &m0->m_next; /* NB: not the first mbuf */ |
3497 | while ((n = *prev) != NULL) { |
3498 | if ((n2 = n->m_next) != NULL && |
3499 | n->m_len + n2->m_len < MCLBYTES) { |
3500 | m = m_getcl(how, MT_DATA, 0); |
3501 | if (m == NULL) |
3502 | goto bad; |
3503 | bcopy(mtod(n, void *), mtod(m, void *), n->m_len); |
3504 | bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len, |
3505 | n2->m_len); |
3506 | m->m_len = n->m_len + n2->m_len; |
3507 | m->m_next = n2->m_next; |
3508 | *prev = m; |
3509 | m_free(n); |
3510 | m_free(n2); |
3511 | if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */ |
3512 | return m0; |
3513 | /* |
3514 | * Still not there, try the normal collapse |
3515 | * again before we allocate another cluster. |
3516 | */ |
3517 | goto again; |
3518 | } |
3519 | prev = &n->m_next; |
3520 | } |
3521 | /* |
3522 | * No place where we can collapse to a cluster; punt. |
3523 | * This can occur if, for example, you request 2 frags |
3524 | * but the packet requires that both be clusters (we |
3525 | * never reallocate the first mbuf to avoid moving the |
3526 | * packet header). |
3527 | */ |
3528 | bad: |
3529 | return NULL; |
3530 | } |
3531 | |
3532 | /* |
3533 | * Return h/w rate index for an IEEE rate (w/o basic rate bit). |
3534 | */ |
3535 | static int |
3536 | ath_tx_findrix(const HAL_RATE_TABLE *rt, int rate) |
3537 | { |
3538 | int i; |
3539 | |
3540 | for (i = 0; i < rt->rateCount; i++) |
3541 | if ((rt->info[i].dot11Rate & IEEE80211_RATE_VAL) == rate) |
3542 | return i; |
3543 | return 0; /* NB: lowest rate */ |
3544 | } |
3545 | |
3546 | static void |
3547 | ath_freetx(struct mbuf *m) |
3548 | { |
3549 | struct mbuf *next; |
3550 | |
3551 | do { |
3552 | next = m->m_nextpkt; |
3553 | m->m_nextpkt = NULL; |
3554 | m_freem(m); |
3555 | } while ((m = next) != NULL); |
3556 | } |
3557 | |
3558 | static int |
3559 | deduct_pad_bytes(int len, int hdrlen) |
3560 | { |
3561 | /* XXX I am suspicious that this code, which I extracted |
3562 | * XXX from ath_tx_start() for reuse, does the right thing. |
3563 | */ |
3564 | return len - (hdrlen & 3); |
3565 | } |
3566 | |
3567 | static int |
3568 | ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, |
3569 | struct mbuf *m0) |
3570 | { |
3571 | struct ieee80211com *ic = &sc->sc_ic; |
3572 | struct ath_hal *ah = sc->sc_ah; |
3573 | struct ifnet *ifp = &sc->sc_if; |
3574 | const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams; |
3575 | int i, error, iswep, ismcast, isfrag, ismrr; |
3576 | int keyix, hdrlen, pktlen, try0; |
3577 | u_int8_t rix, txrate, ctsrate; |
3578 | u_int8_t cix = 0xff; /* NB: silence compiler */ |
3579 | struct ath_desc *ds, *ds0; |
3580 | struct ath_txq *txq; |
3581 | struct ieee80211_frame *wh; |
3582 | u_int subtype, flags, ctsduration; |
3583 | HAL_PKT_TYPE atype; |
3584 | const HAL_RATE_TABLE *rt; |
3585 | HAL_BOOL shortPreamble; |
3586 | struct ath_node *an; |
3587 | struct mbuf *m; |
3588 | u_int pri; |
3589 | |
3590 | wh = mtod(m0, struct ieee80211_frame *); |
3591 | iswep = wh->i_fc[1] & IEEE80211_FC1_WEP; |
3592 | ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); |
3593 | isfrag = m0->m_flags & M_FRAG; |
3594 | hdrlen = ieee80211_anyhdrsize(wh); |
3595 | /* |
3596 | * Packet length must not include any |
3597 | * pad bytes; deduct them here. |
3598 | */ |
3599 | pktlen = deduct_pad_bytes(m0->m_pkthdr.len, hdrlen); |
3600 | |
3601 | if (iswep) { |
3602 | const struct ieee80211_cipher *cip; |
3603 | struct ieee80211_key *k; |
3604 | |
3605 | /* |
3606 | * Construct the 802.11 header+trailer for an encrypted |
3607 | * frame. The only reason this can fail is because of an |
3608 | * unknown or unsupported cipher/key type. |
3609 | */ |
3610 | k = ieee80211_crypto_encap(ic, ni, m0); |
3611 | if (k == NULL) { |
3612 | /* |
3613 | * This can happen when the key is yanked after the |
3614 | * frame was queued. Just discard the frame; the |
3615 | * 802.11 layer counts failures and provides |
3616 | * debugging/diagnostics. |
3617 | */ |
3618 | ath_freetx(m0); |
3619 | return EIO; |
3620 | } |
3621 | /* |
3622 | * Adjust the packet + header lengths for the crypto |
3623 | * additions and calculate the h/w key index. When |
3624 | * a s/w mic is done the frame will have had any mic |
3625 | * added to it prior to entry so m0->m_pkthdr.len above will |
3626 | * account for it. Otherwise we need to add it to the |
3627 | * packet length. |
3628 | */ |
3629 | cip = k->wk_cipher; |
3630 | hdrlen += cip->ic_header; |
3631 | pktlen += cip->ic_header + cip->ic_trailer; |
3632 | /* NB: frags always have any TKIP MIC done in s/w */ |
3633 | if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag) |
3634 | pktlen += cip->ic_miclen; |
3635 | keyix = k->wk_keyix; |
3636 | |
3637 | /* packet header may have moved, reset our local pointer */ |
3638 | wh = mtod(m0, struct ieee80211_frame *); |
3639 | } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) { |
3640 | /* |
3641 | * Use station key cache slot, if assigned. |
3642 | */ |
3643 | keyix = ni->ni_ucastkey.wk_keyix; |
3644 | if (keyix == IEEE80211_KEYIX_NONE) |
3645 | keyix = HAL_TXKEYIX_INVALID; |
3646 | } else |
3647 | keyix = HAL_TXKEYIX_INVALID; |
3648 | |
3649 | pktlen += IEEE80211_CRC_LEN; |
3650 | |
3651 | /* |
3652 | * Load the DMA map so any coalescing is done. This |
3653 | * also calculates the number of descriptors we need. |
3654 | */ |
3655 | error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m0, |
3656 | BUS_DMA_NOWAIT); |
3657 | if (error == EFBIG) { |
3658 | /* XXX packet requires too many descriptors */ |
3659 | bf->bf_nseg = ATH_TXDESC+1; |
3660 | } else if (error != 0) { |
3661 | sc->sc_stats.ast_tx_busdma++; |
3662 | ath_freetx(m0); |
3663 | return error; |
3664 | } |
3665 | /* |
3666 | * Discard null packets and check for packets that |
3667 | * require too many TX descriptors. We try to convert |
3668 | * the latter to a cluster. |
3669 | */ |
3670 | if (error == EFBIG) { /* too many desc's, linearize */ |
3671 | sc->sc_stats.ast_tx_linear++; |
3672 | m = ath_defrag(m0, M_DONTWAIT, ATH_TXDESC); |
3673 | if (m == NULL) { |
3674 | ath_freetx(m0); |
3675 | sc->sc_stats.ast_tx_nombuf++; |
3676 | return ENOMEM; |
3677 | } |
3678 | m0 = m; |
3679 | error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m0, |
3680 | BUS_DMA_NOWAIT); |
3681 | if (error != 0) { |
3682 | sc->sc_stats.ast_tx_busdma++; |
3683 | ath_freetx(m0); |
3684 | return error; |
3685 | } |
3686 | KASSERTMSG(bf->bf_nseg <= ATH_TXDESC, |
3687 | "too many segments after defrag; nseg %u" , bf->bf_nseg); |
3688 | } else if (bf->bf_nseg == 0) { /* null packet, discard */ |
3689 | sc->sc_stats.ast_tx_nodata++; |
3690 | ath_freetx(m0); |
3691 | return EIO; |
3692 | } |
3693 | DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n" , __func__, m0, pktlen); |
3694 | bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, |
3695 | bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); |
3696 | bf->bf_m = m0; |
3697 | bf->bf_node = ni; /* NB: held reference */ |
3698 | |
3699 | /* setup descriptors */ |
3700 | ds = bf->bf_desc; |
3701 | rt = sc->sc_currates; |
3702 | KASSERTMSG(rt != NULL, "no rate table, mode %u" , sc->sc_curmode); |
3703 | |
3704 | /* |
3705 | * NB: the 802.11 layer marks whether or not we should |
3706 | * use short preamble based on the current mode and |
3707 | * negotiated parameters. |
3708 | */ |
3709 | if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && |
3710 | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE) && !ismcast) { |
3711 | shortPreamble = AH_TRUE; |
3712 | sc->sc_stats.ast_tx_shortpre++; |
3713 | } else { |
3714 | shortPreamble = AH_FALSE; |
3715 | } |
3716 | |
3717 | an = ATH_NODE(ni); |
3718 | flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ |
3719 | ismrr = 0; /* default no multi-rate retry*/ |
3720 | /* |
3721 | * Calculate Atheros packet type from IEEE80211 packet header, |
3722 | * setup for rate calculations, and select h/w transmit queue. |
3723 | */ |
3724 | switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { |
3725 | case IEEE80211_FC0_TYPE_MGT: |
3726 | subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; |
3727 | if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) |
3728 | atype = HAL_PKT_TYPE_BEACON; |
3729 | else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) |
3730 | atype = HAL_PKT_TYPE_PROBE_RESP; |
3731 | else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) |
3732 | atype = HAL_PKT_TYPE_ATIM; |
3733 | else |
3734 | atype = HAL_PKT_TYPE_NORMAL; /* XXX */ |
3735 | rix = sc->sc_minrateix; |
3736 | txrate = rt->info[rix].rateCode; |
3737 | if (shortPreamble) |
3738 | txrate |= rt->info[rix].shortPreamble; |
3739 | try0 = ATH_TXMGTTRY; |
3740 | /* NB: force all management frames to highest queue */ |
3741 | if (ni->ni_flags & IEEE80211_NODE_QOS) { |
3742 | /* NB: force all management frames to highest queue */ |
3743 | pri = WME_AC_VO; |
3744 | } else |
3745 | pri = WME_AC_BE; |
3746 | flags |= HAL_TXDESC_INTREQ; /* force interrupt */ |
3747 | break; |
3748 | case IEEE80211_FC0_TYPE_CTL: |
3749 | atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */ |
3750 | rix = sc->sc_minrateix; |
3751 | txrate = rt->info[rix].rateCode; |
3752 | if (shortPreamble) |
3753 | txrate |= rt->info[rix].shortPreamble; |
3754 | try0 = ATH_TXMGTTRY; |
3755 | /* NB: force all ctl frames to highest queue */ |
3756 | if (ni->ni_flags & IEEE80211_NODE_QOS) { |
3757 | /* NB: force all ctl frames to highest queue */ |
3758 | pri = WME_AC_VO; |
3759 | } else |
3760 | pri = WME_AC_BE; |
3761 | flags |= HAL_TXDESC_INTREQ; /* force interrupt */ |
3762 | break; |
3763 | case IEEE80211_FC0_TYPE_DATA: |
3764 | atype = HAL_PKT_TYPE_NORMAL; /* default */ |
3765 | /* |
3766 | * Data frames: multicast frames go out at a fixed rate, |
3767 | * otherwise consult the rate control module for the |
3768 | * rate to use. |
3769 | */ |
3770 | if (ismcast) { |
3771 | /* |
3772 | * Check mcast rate setting in case it's changed. |
3773 | * XXX move out of fastpath |
3774 | */ |
3775 | if (ic->ic_mcast_rate != sc->sc_mcastrate) { |
3776 | sc->sc_mcastrix = |
3777 | ath_tx_findrix(rt, ic->ic_mcast_rate); |
3778 | sc->sc_mcastrate = ic->ic_mcast_rate; |
3779 | } |
3780 | rix = sc->sc_mcastrix; |
3781 | txrate = rt->info[rix].rateCode; |
3782 | try0 = 1; |
3783 | } else { |
3784 | ath_rate_findrate(sc, an, shortPreamble, pktlen, |
3785 | &rix, &try0, &txrate); |
3786 | sc->sc_txrate = txrate; /* for LED blinking */ |
3787 | if (try0 != ATH_TXMAXTRY) |
3788 | ismrr = 1; |
3789 | } |
3790 | pri = M_WME_GETAC(m0); |
3791 | if (cap->cap_wmeParams[pri].wmep_noackPolicy) |
3792 | flags |= HAL_TXDESC_NOACK; |
3793 | break; |
3794 | default: |
3795 | if_printf(ifp, "bogus frame type 0x%x (%s)\n" , |
3796 | wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); |
3797 | /* XXX statistic */ |
3798 | ath_freetx(m0); |
3799 | return EIO; |
3800 | } |
3801 | txq = sc->sc_ac2q[pri]; |
3802 | |
3803 | /* |
3804 | * When servicing one or more stations in power-save mode |
3805 | * multicast frames must be buffered until after the beacon. |
3806 | * We use the CAB queue for that. |
3807 | */ |
3808 | if (ismcast && ic->ic_ps_sta) { |
3809 | txq = sc->sc_cabq; |
3810 | /* XXX? more bit in 802.11 frame header */ |
3811 | } |
3812 | |
3813 | /* |
3814 | * Calculate miscellaneous flags. |
3815 | */ |
3816 | if (ismcast) { |
3817 | flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ |
3818 | } else if (pktlen > ic->ic_rtsthreshold) { |
3819 | flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ |
3820 | cix = rt->info[rix].controlRate; |
3821 | sc->sc_stats.ast_tx_rts++; |
3822 | } |
3823 | if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */ |
3824 | sc->sc_stats.ast_tx_noack++; |
3825 | |
3826 | /* |
3827 | * If 802.11g protection is enabled, determine whether |
3828 | * to use RTS/CTS or just CTS. Note that this is only |
3829 | * done for OFDM unicast frames. |
3830 | */ |
3831 | if ((ic->ic_flags & IEEE80211_F_USEPROT) && |
3832 | rt->info[rix].phy == IEEE80211_T_OFDM && |
3833 | (flags & HAL_TXDESC_NOACK) == 0) { |
3834 | /* XXX fragments must use CCK rates w/ protection */ |
3835 | if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) |
3836 | flags |= HAL_TXDESC_RTSENA; |
3837 | else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) |
3838 | flags |= HAL_TXDESC_CTSENA; |
3839 | if (isfrag) { |
3840 | /* |
3841 | * For frags it would be desirable to use the |
3842 | * highest CCK rate for RTS/CTS. But stations |
3843 | * farther away may detect it at a lower CCK rate |
3844 | * so use the configured protection rate instead |
3845 | * (for now). |
3846 | */ |
3847 | cix = rt->info[sc->sc_protrix].controlRate; |
3848 | } else |
3849 | cix = rt->info[sc->sc_protrix].controlRate; |
3850 | sc->sc_stats.ast_tx_protect++; |
3851 | } |
3852 | |
3853 | /* |
3854 | * Calculate duration. This logically belongs in the 802.11 |
3855 | * layer but it lacks sufficient information to calculate it. |
3856 | */ |
3857 | if ((flags & HAL_TXDESC_NOACK) == 0 && |
3858 | (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { |
3859 | u_int16_t dur; |
3860 | /* |
3861 | * XXX not right with fragmentation. |
3862 | */ |
3863 | if (shortPreamble) |
3864 | dur = rt->info[rix].spAckDuration; |
3865 | else |
3866 | dur = rt->info[rix].lpAckDuration; |
3867 | if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { |
3868 | dur += dur; /* additional SIFS+ACK */ |
3869 | KASSERTMSG(m0->m_nextpkt != NULL, "no fragment" ); |
3870 | /* |
3871 | * Include the size of next fragment so NAV is |
3872 | * updated properly. The last fragment uses only |
3873 | * the ACK duration |
3874 | */ |
3875 | dur += ath_hal_computetxtime(ah, rt, |
3876 | deduct_pad_bytes(m0->m_nextpkt->m_pkthdr.len, |
3877 | hdrlen) - |
3878 | deduct_pad_bytes(m0->m_pkthdr.len, hdrlen) + pktlen, |
3879 | rix, shortPreamble); |
3880 | } |
3881 | if (isfrag) { |
3882 | /* |
3883 | * Force hardware to use computed duration for next |
3884 | * fragment by disabling multi-rate retry which updates |
3885 | * duration based on the multi-rate duration table. |
3886 | */ |
3887 | try0 = ATH_TXMAXTRY; |
3888 | } |
3889 | *(u_int16_t *)wh->i_dur = htole16(dur); |
3890 | } |
3891 | |
3892 | /* |
3893 | * Calculate RTS/CTS rate and duration if needed. |
3894 | */ |
3895 | ctsduration = 0; |
3896 | if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) { |
3897 | /* |
3898 | * CTS transmit rate is derived from the transmit rate |
3899 | * by looking in the h/w rate table. We must also factor |
3900 | * in whether or not a short preamble is to be used. |
3901 | */ |
3902 | /* NB: cix is set above where RTS/CTS is enabled */ |
3903 | KASSERTMSG(cix != 0xff, "cix not setup" ); |
3904 | ctsrate = rt->info[cix].rateCode; |
3905 | /* |
3906 | * Compute the transmit duration based on the frame |
3907 | * size and the size of an ACK frame. We call into the |
3908 | * HAL to do the computation since it depends on the |
3909 | * characteristics of the actual PHY being used. |
3910 | * |
3911 | * NB: CTS is assumed the same size as an ACK so we can |
3912 | * use the precalculated ACK durations. |
3913 | */ |
3914 | if (shortPreamble) { |
3915 | ctsrate |= rt->info[cix].shortPreamble; |
3916 | if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ |
3917 | ctsduration += rt->info[cix].spAckDuration; |
3918 | ctsduration += ath_hal_computetxtime(ah, |
3919 | rt, pktlen, rix, AH_TRUE); |
3920 | if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ |
3921 | ctsduration += rt->info[rix].spAckDuration; |
3922 | } else { |
3923 | if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ |
3924 | ctsduration += rt->info[cix].lpAckDuration; |
3925 | ctsduration += ath_hal_computetxtime(ah, |
3926 | rt, pktlen, rix, AH_FALSE); |
3927 | if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ |
3928 | ctsduration += rt->info[rix].lpAckDuration; |
3929 | } |
3930 | /* |
3931 | * Must disable multi-rate retry when using RTS/CTS. |
3932 | */ |
3933 | ismrr = 0; |
3934 | try0 = ATH_TXMGTTRY; /* XXX */ |
3935 | } else |
3936 | ctsrate = 0; |
3937 | |
3938 | if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) |
3939 | ieee80211_dump_pkt(mtod(m0, void *), m0->m_len, |
3940 | sc->sc_hwmap[txrate].ieeerate, -1); |
3941 | bpf_mtap3(ic->ic_rawbpf, m0); |
3942 | if (sc->sc_drvbpf) { |
3943 | u_int64_t tsf = ath_hal_gettsf64(ah); |
3944 | |
3945 | sc->sc_tx_th.wt_tsf = htole64(tsf); |
3946 | sc->sc_tx_th.wt_flags = sc->sc_hwmap[txrate].txflags; |
3947 | if (iswep) |
3948 | sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; |
3949 | if (isfrag) |
3950 | sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; |
3951 | sc->sc_tx_th.wt_rate = sc->sc_hwmap[txrate].ieeerate; |
3952 | sc->sc_tx_th.wt_txpower = ni->ni_txpower; |
3953 | sc->sc_tx_th.wt_antenna = sc->sc_txantenna; |
3954 | |
3955 | bpf_mtap2(sc->sc_drvbpf, &sc->sc_tx_th, sc->sc_tx_th_len, m0); |
3956 | } |
3957 | |
3958 | /* |
3959 | * Determine if a tx interrupt should be generated for |
3960 | * this descriptor. We take a tx interrupt to reap |
3961 | * descriptors when the h/w hits an EOL condition or |
3962 | * when the descriptor is specifically marked to generate |
3963 | * an interrupt. We periodically mark descriptors in this |
3964 | * way to insure timely replenishing of the supply needed |
3965 | * for sending frames. Defering interrupts reduces system |
3966 | * load and potentially allows more concurrent work to be |
3967 | * done but if done to aggressively can cause senders to |
3968 | * backup. |
3969 | * |
3970 | * NB: use >= to deal with sc_txintrperiod changing |
3971 | * dynamically through sysctl. |
3972 | */ |
3973 | if (flags & HAL_TXDESC_INTREQ) { |
3974 | txq->axq_intrcnt = 0; |
3975 | } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { |
3976 | flags |= HAL_TXDESC_INTREQ; |
3977 | txq->axq_intrcnt = 0; |
3978 | } |
3979 | |
3980 | /* |
3981 | * Formulate first tx descriptor with tx controls. |
3982 | */ |
3983 | /* XXX check return value? */ |
3984 | ath_hal_setuptxdesc(ah, ds |
3985 | , pktlen /* packet length */ |
3986 | , hdrlen /* header length */ |
3987 | , atype /* Atheros packet type */ |
3988 | , ni->ni_txpower /* txpower */ |
3989 | , txrate, try0 /* series 0 rate/tries */ |
3990 | , keyix /* key cache index */ |
3991 | , sc->sc_txantenna /* antenna mode */ |
3992 | , flags /* flags */ |
3993 | , ctsrate /* rts/cts rate */ |
3994 | , ctsduration /* rts/cts duration */ |
3995 | ); |
3996 | bf->bf_flags = flags; |
3997 | /* |
3998 | * Setup the multi-rate retry state only when we're |
3999 | * going to use it. This assumes ath_hal_setuptxdesc |
4000 | * initializes the descriptors (so we don't have to) |
4001 | * when the hardware supports multi-rate retry and |
4002 | * we don't use it. |
4003 | */ |
4004 | if (ismrr) |
4005 | ath_rate_setupxtxdesc(sc, an, ds, shortPreamble, rix); |
4006 | |
4007 | /* |
4008 | * Fillin the remainder of the descriptor info. |
4009 | */ |
4010 | ds0 = ds; |
4011 | for (i = 0; i < bf->bf_nseg; i++, ds++) { |
4012 | ds->ds_data = bf->bf_segs[i].ds_addr; |
4013 | if (i == bf->bf_nseg - 1) |
4014 | ds->ds_link = 0; |
4015 | else |
4016 | ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1); |
4017 | ath_hal_filltxdesc(ah, ds |
4018 | , bf->bf_segs[i].ds_len /* segment length */ |
4019 | , i == 0 /* first segment */ |
4020 | , i == bf->bf_nseg - 1 /* last segment */ |
4021 | , ds0 /* first descriptor */ |
4022 | ); |
4023 | |
4024 | /* NB: The desc swap function becomes void, |
4025 | * if descriptor swapping is not enabled |
4026 | */ |
4027 | ath_desc_swap(ds); |
4028 | |
4029 | DPRINTF(sc, ATH_DEBUG_XMIT, |
4030 | "%s: %d: %08x %08x %08x %08x %08x %08x\n" , |
4031 | __func__, i, ds->ds_link, ds->ds_data, |
4032 | ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]); |
4033 | } |
4034 | /* |
4035 | * Insert the frame on the outbound list and |
4036 | * pass it on to the hardware. |
4037 | */ |
4038 | ATH_TXQ_LOCK(txq); |
4039 | ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); |
4040 | if (txq->axq_link == NULL) { |
4041 | ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); |
4042 | DPRINTF(sc, ATH_DEBUG_XMIT, |
4043 | "%s: TXDP[%u] = %" PRIx64 " (%p) depth %d\n" , __func__, |
4044 | txq->axq_qnum, (uint64_t)bf->bf_daddr, bf->bf_desc, |
4045 | txq->axq_depth); |
4046 | } else { |
4047 | *txq->axq_link = HTOAH32(bf->bf_daddr); |
4048 | DPRINTF(sc, ATH_DEBUG_XMIT, |
4049 | "%s: link[%u](%p)=%" PRIx64 " (%p) depth %d\n" , |
4050 | __func__, txq->axq_qnum, txq->axq_link, |
4051 | (uint64_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth); |
4052 | } |
4053 | txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link; |
4054 | /* |
4055 | * The CAB queue is started from the SWBA handler since |
4056 | * frames only go out on DTIM and to avoid possible races. |
4057 | */ |
4058 | if (txq != sc->sc_cabq) |
4059 | ath_hal_txstart(ah, txq->axq_qnum); |
4060 | ATH_TXQ_UNLOCK(txq); |
4061 | |
4062 | return 0; |
4063 | } |
4064 | |
4065 | /* |
4066 | * Process completed xmit descriptors from the specified queue. |
4067 | */ |
4068 | static int |
4069 | ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) |
4070 | { |
4071 | struct ath_hal *ah = sc->sc_ah; |
4072 | struct ieee80211com *ic = &sc->sc_ic; |
4073 | struct ath_buf *bf; |
4074 | struct ath_desc *ds, *ds0; |
4075 | struct ieee80211_node *ni; |
4076 | struct ath_node *an; |
4077 | int sr, lr, pri, nacked; |
4078 | HAL_STATUS status; |
4079 | |
4080 | DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n" , |
4081 | __func__, txq->axq_qnum, |
4082 | (void *)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), |
4083 | txq->axq_link); |
4084 | nacked = 0; |
4085 | for (;;) { |
4086 | ATH_TXQ_LOCK(txq); |
4087 | txq->axq_intrcnt = 0; /* reset periodic desc intr count */ |
4088 | bf = STAILQ_FIRST(&txq->axq_q); |
4089 | if (bf == NULL) { |
4090 | txq->axq_link = NULL; |
4091 | ATH_TXQ_UNLOCK(txq); |
4092 | break; |
4093 | } |
4094 | ds0 = &bf->bf_desc[0]; |
4095 | ds = &bf->bf_desc[bf->bf_nseg - 1]; |
4096 | status = ath_hal_txprocdesc(ah, ds, &ds->ds_txstat); |
4097 | if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) |
4098 | ath_printtxbuf(bf, status == HAL_OK); |
4099 | if (status == HAL_EINPROGRESS) { |
4100 | ATH_TXQ_UNLOCK(txq); |
4101 | break; |
4102 | } |
4103 | ATH_TXQ_REMOVE_HEAD(txq, bf_list); |
4104 | ATH_TXQ_UNLOCK(txq); |
4105 | |
4106 | ni = bf->bf_node; |
4107 | if (ni != NULL) { |
4108 | an = ATH_NODE(ni); |
4109 | if (ds->ds_txstat.ts_status == 0) { |
4110 | u_int8_t txant = ds->ds_txstat.ts_antenna; |
4111 | sc->sc_stats.ast_ant_tx[txant]++; |
4112 | sc->sc_ant_tx[txant]++; |
4113 | if (ds->ds_txstat.ts_rate & HAL_TXSTAT_ALTRATE) |
4114 | sc->sc_stats.ast_tx_altrate++; |
4115 | sc->sc_stats.ast_tx_rssi = |
4116 | ds->ds_txstat.ts_rssi; |
4117 | ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, |
4118 | ds->ds_txstat.ts_rssi); |
4119 | pri = M_WME_GETAC(bf->bf_m); |
4120 | if (pri >= WME_AC_VO) |
4121 | ic->ic_wme.wme_hipri_traffic++; |
4122 | ni->ni_inact = ni->ni_inact_reload; |
4123 | } else { |
4124 | if (ds->ds_txstat.ts_status & HAL_TXERR_XRETRY) |
4125 | sc->sc_stats.ast_tx_xretries++; |
4126 | if (ds->ds_txstat.ts_status & HAL_TXERR_FIFO) |
4127 | sc->sc_stats.ast_tx_fifoerr++; |
4128 | if (ds->ds_txstat.ts_status & HAL_TXERR_FILT) |
4129 | sc->sc_stats.ast_tx_filtered++; |
4130 | } |
4131 | sr = ds->ds_txstat.ts_shortretry; |
4132 | lr = ds->ds_txstat.ts_longretry; |
4133 | sc->sc_stats.ast_tx_shortretry += sr; |
4134 | sc->sc_stats.ast_tx_longretry += lr; |
4135 | /* |
4136 | * Hand the descriptor to the rate control algorithm. |
4137 | */ |
4138 | if ((ds->ds_txstat.ts_status & HAL_TXERR_FILT) == 0 && |
4139 | (bf->bf_flags & HAL_TXDESC_NOACK) == 0) { |
4140 | /* |
4141 | * If frame was ack'd update the last rx time |
4142 | * used to workaround phantom bmiss interrupts. |
4143 | */ |
4144 | if (ds->ds_txstat.ts_status == 0) |
4145 | nacked++; |
4146 | ath_rate_tx_complete(sc, an, ds, ds0); |
4147 | } |
4148 | /* |
4149 | * Reclaim reference to node. |
4150 | * |
4151 | * NB: the node may be reclaimed here if, for example |
4152 | * this is a DEAUTH message that was sent and the |
4153 | * node was timed out due to inactivity. |
4154 | */ |
4155 | ieee80211_free_node(ni); |
4156 | } |
4157 | bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0, |
4158 | bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); |
4159 | bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); |
4160 | m_freem(bf->bf_m); |
4161 | bf->bf_m = NULL; |
4162 | bf->bf_node = NULL; |
4163 | |
4164 | ATH_TXBUF_LOCK(sc); |
4165 | STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); |
4166 | sc->sc_if.if_flags &= ~IFF_OACTIVE; |
4167 | ATH_TXBUF_UNLOCK(sc); |
4168 | } |
4169 | return nacked; |
4170 | } |
4171 | |
4172 | static inline int |
4173 | txqactive(struct ath_hal *ah, int qnum) |
4174 | { |
4175 | u_int32_t txqs = 1<<qnum; |
4176 | ath_hal_gettxintrtxqs(ah, &txqs); |
4177 | return (txqs & (1<<qnum)); |
4178 | } |
4179 | |
4180 | /* |
4181 | * Deferred processing of transmit interrupt; special-cased |
4182 | * for a single hardware transmit queue (e.g. 5210 and 5211). |
4183 | */ |
4184 | static void |
4185 | ath_tx_proc_q0(void *arg, int npending) |
4186 | { |
4187 | struct ath_softc *sc = arg; |
4188 | struct ifnet *ifp = &sc->sc_if; |
4189 | |
4190 | if (txqactive(sc->sc_ah, 0) && ath_tx_processq(sc, &sc->sc_txq[0]) > 0){ |
4191 | sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); |
4192 | } |
4193 | if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum)) |
4194 | ath_tx_processq(sc, sc->sc_cabq); |
4195 | |
4196 | if (sc->sc_softled) |
4197 | ath_led_event(sc, ATH_LED_TX); |
4198 | |
4199 | ath_start(ifp); |
4200 | } |
4201 | |
4202 | /* |
4203 | * Deferred processing of transmit interrupt; special-cased |
4204 | * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). |
4205 | */ |
4206 | static void |
4207 | ath_tx_proc_q0123(void *arg, int npending) |
4208 | { |
4209 | struct ath_softc *sc = arg; |
4210 | struct ifnet *ifp = &sc->sc_if; |
4211 | int nacked; |
4212 | |
4213 | /* |
4214 | * Process each active queue. |
4215 | */ |
4216 | nacked = 0; |
4217 | if (txqactive(sc->sc_ah, 0)) |
4218 | nacked += ath_tx_processq(sc, &sc->sc_txq[0]); |
4219 | if (txqactive(sc->sc_ah, 1)) |
4220 | nacked += ath_tx_processq(sc, &sc->sc_txq[1]); |
4221 | if (txqactive(sc->sc_ah, 2)) |
4222 | nacked += ath_tx_processq(sc, &sc->sc_txq[2]); |
4223 | if (txqactive(sc->sc_ah, 3)) |
4224 | nacked += ath_tx_processq(sc, &sc->sc_txq[3]); |
4225 | if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum)) |
4226 | ath_tx_processq(sc, sc->sc_cabq); |
4227 | if (nacked) { |
4228 | sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); |
4229 | } |
4230 | |
4231 | if (sc->sc_softled) |
4232 | ath_led_event(sc, ATH_LED_TX); |
4233 | |
4234 | ath_start(ifp); |
4235 | } |
4236 | |
4237 | /* |
4238 | * Deferred processing of transmit interrupt. |
4239 | */ |
4240 | static void |
4241 | ath_tx_proc(void *arg, int npending) |
4242 | { |
4243 | struct ath_softc *sc = arg; |
4244 | struct ifnet *ifp = &sc->sc_if; |
4245 | int i, nacked; |
4246 | |
4247 | /* |
4248 | * Process each active queue. |
4249 | */ |
4250 | nacked = 0; |
4251 | for (i = 0; i < HAL_NUM_TX_QUEUES; i++) |
4252 | if (ATH_TXQ_SETUP(sc, i) && txqactive(sc->sc_ah, i)) |
4253 | nacked += ath_tx_processq(sc, &sc->sc_txq[i]); |
4254 | if (nacked) { |
4255 | sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); |
4256 | } |
4257 | |
4258 | if (sc->sc_softled) |
4259 | ath_led_event(sc, ATH_LED_TX); |
4260 | |
4261 | ath_start(ifp); |
4262 | } |
4263 | |
4264 | static void |
4265 | ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) |
4266 | { |
4267 | struct ath_hal *ah = sc->sc_ah; |
4268 | struct ieee80211_node *ni; |
4269 | struct ath_buf *bf; |
4270 | struct ath_desc *ds; |
4271 | |
4272 | /* |
4273 | * NB: this assumes output has been stopped and |
4274 | * we do not need to block ath_tx_tasklet |
4275 | */ |
4276 | for (;;) { |
4277 | ATH_TXQ_LOCK(txq); |
4278 | bf = STAILQ_FIRST(&txq->axq_q); |
4279 | if (bf == NULL) { |
4280 | txq->axq_link = NULL; |
4281 | ATH_TXQ_UNLOCK(txq); |
4282 | break; |
4283 | } |
4284 | ATH_TXQ_REMOVE_HEAD(txq, bf_list); |
4285 | ATH_TXQ_UNLOCK(txq); |
4286 | ds = &bf->bf_desc[bf->bf_nseg - 1]; |
4287 | if (sc->sc_debug & ATH_DEBUG_RESET) |
4288 | ath_printtxbuf(bf, |
4289 | ath_hal_txprocdesc(ah, bf->bf_desc, |
4290 | &ds->ds_txstat) == HAL_OK); |
4291 | bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); |
4292 | m_freem(bf->bf_m); |
4293 | bf->bf_m = NULL; |
4294 | ni = bf->bf_node; |
4295 | bf->bf_node = NULL; |
4296 | if (ni != NULL) { |
4297 | /* |
4298 | * Reclaim node reference. |
4299 | */ |
4300 | ieee80211_free_node(ni); |
4301 | } |
4302 | ATH_TXBUF_LOCK(sc); |
4303 | STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); |
4304 | sc->sc_if.if_flags &= ~IFF_OACTIVE; |
4305 | ATH_TXBUF_UNLOCK(sc); |
4306 | } |
4307 | } |
4308 | |
4309 | static void |
4310 | ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) |
4311 | { |
4312 | struct ath_hal *ah = sc->sc_ah; |
4313 | |
4314 | (void) ath_hal_stoptxdma(ah, txq->axq_qnum); |
4315 | DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n" , |
4316 | __func__, txq->axq_qnum, |
4317 | (void *)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), |
4318 | txq->axq_link); |
4319 | } |
4320 | |
4321 | /* |
4322 | * Drain the transmit queues and reclaim resources. |
4323 | */ |
4324 | static void |
4325 | ath_draintxq(struct ath_softc *sc) |
4326 | { |
4327 | struct ath_hal *ah = sc->sc_ah; |
4328 | int i; |
4329 | |
4330 | /* XXX return value */ |
4331 | if (device_is_active(sc->sc_dev)) { |
4332 | /* don't touch the hardware if marked invalid */ |
4333 | (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); |
4334 | DPRINTF(sc, ATH_DEBUG_RESET, |
4335 | "%s: beacon queue %p\n" , __func__, |
4336 | (void *)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq)); |
4337 | for (i = 0; i < HAL_NUM_TX_QUEUES; i++) |
4338 | if (ATH_TXQ_SETUP(sc, i)) |
4339 | ath_tx_stopdma(sc, &sc->sc_txq[i]); |
4340 | } |
4341 | for (i = 0; i < HAL_NUM_TX_QUEUES; i++) |
4342 | if (ATH_TXQ_SETUP(sc, i)) |
4343 | ath_tx_draintxq(sc, &sc->sc_txq[i]); |
4344 | } |
4345 | |
4346 | /* |
4347 | * Disable the receive h/w in preparation for a reset. |
4348 | */ |
4349 | static void |
4350 | ath_stoprecv(struct ath_softc *sc) |
4351 | { |
4352 | #define PA2DESC(_sc, _pa) \ |
4353 | ((struct ath_desc *)((char *)(_sc)->sc_rxdma.dd_desc + \ |
4354 | ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) |
4355 | struct ath_hal *ah = sc->sc_ah; |
4356 | |
4357 | ath_hal_stoppcurecv(ah); /* disable PCU */ |
4358 | ath_hal_setrxfilter(ah, 0); /* clear recv filter */ |
4359 | ath_hal_stopdmarecv(ah); /* disable DMA engine */ |
4360 | DELAY(3000); /* 3ms is long enough for 1 frame */ |
4361 | if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) { |
4362 | struct ath_buf *bf; |
4363 | |
4364 | printf("%s: rx queue %p, link %p\n" , __func__, |
4365 | (void *)(uintptr_t) ath_hal_getrxbuf(ah), sc->sc_rxlink); |
4366 | STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { |
4367 | struct ath_desc *ds = bf->bf_desc; |
4368 | HAL_STATUS status = ath_hal_rxprocdesc(ah, ds, |
4369 | bf->bf_daddr, PA2DESC(sc, ds->ds_link), |
4370 | &ds->ds_rxstat); |
4371 | if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL)) |
4372 | ath_printrxbuf(bf, status == HAL_OK); |
4373 | } |
4374 | } |
4375 | sc->sc_rxlink = NULL; /* just in case */ |
4376 | #undef PA2DESC |
4377 | } |
4378 | |
4379 | /* |
4380 | * Enable the receive h/w following a reset. |
4381 | */ |
4382 | static int |
4383 | ath_startrecv(struct ath_softc *sc) |
4384 | { |
4385 | struct ath_hal *ah = sc->sc_ah; |
4386 | struct ath_buf *bf; |
4387 | |
4388 | sc->sc_rxlink = NULL; |
4389 | STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { |
4390 | int error = ath_rxbuf_init(sc, bf); |
4391 | if (error != 0) { |
4392 | DPRINTF(sc, ATH_DEBUG_RECV, |
4393 | "%s: ath_rxbuf_init failed %d\n" , |
4394 | __func__, error); |
4395 | return error; |
4396 | } |
4397 | } |
4398 | |
4399 | bf = STAILQ_FIRST(&sc->sc_rxbuf); |
4400 | ath_hal_putrxbuf(ah, bf->bf_daddr); |
4401 | ath_hal_rxena(ah); /* enable recv descriptors */ |
4402 | ath_mode_init(sc); /* set filters, etc. */ |
4403 | ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */ |
4404 | return 0; |
4405 | } |
4406 | |
4407 | /* |
4408 | * Update internal state after a channel change. |
4409 | */ |
4410 | static void |
4411 | ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) |
4412 | { |
4413 | struct ieee80211com *ic = &sc->sc_ic; |
4414 | enum ieee80211_phymode mode; |
4415 | u_int16_t flags; |
4416 | |
4417 | /* |
4418 | * Change channels and update the h/w rate map |
4419 | * if we're switching; e.g. 11a to 11b/g. |
4420 | */ |
4421 | mode = ieee80211_chan2mode(ic, chan); |
4422 | if (mode != sc->sc_curmode) |
4423 | ath_setcurmode(sc, mode); |
4424 | /* |
4425 | * Update BPF state. NB: ethereal et. al. don't handle |
4426 | * merged flags well so pick a unique mode for their use. |
4427 | */ |
4428 | if (IEEE80211_IS_CHAN_A(chan)) |
4429 | flags = IEEE80211_CHAN_A; |
4430 | /* XXX 11g schizophrenia */ |
4431 | else if (IEEE80211_IS_CHAN_G(chan) || |
4432 | IEEE80211_IS_CHAN_PUREG(chan)) |
4433 | flags = IEEE80211_CHAN_G; |
4434 | else |
4435 | flags = IEEE80211_CHAN_B; |
4436 | if (IEEE80211_IS_CHAN_T(chan)) |
4437 | flags |= IEEE80211_CHAN_TURBO; |
4438 | sc->sc_tx_th.wt_chan_freq = sc->sc_rx_th.wr_chan_freq = |
4439 | htole16(chan->ic_freq); |
4440 | sc->sc_tx_th.wt_chan_flags = sc->sc_rx_th.wr_chan_flags = |
4441 | htole16(flags); |
4442 | } |
4443 | |
4444 | #if 0 |
4445 | /* |
4446 | * Poll for a channel clear indication; this is required |
4447 | * for channels requiring DFS and not previously visited |
4448 | * and/or with a recent radar detection. |
4449 | */ |
4450 | static void |
4451 | ath_dfswait(void *arg) |
4452 | { |
4453 | struct ath_softc *sc = arg; |
4454 | struct ath_hal *ah = sc->sc_ah; |
4455 | HAL_CHANNEL hchan; |
4456 | |
4457 | ath_hal_radar_wait(ah, &hchan); |
4458 | if (hchan.privFlags & CHANNEL_INTERFERENCE) { |
4459 | if_printf(&sc->sc_if, |
4460 | "channel %u/0x%x/0x%x has interference\n" , |
4461 | hchan.channel, hchan.channelFlags, hchan.privFlags); |
4462 | return; |
4463 | } |
4464 | if ((hchan.privFlags & CHANNEL_DFS) == 0) { |
4465 | /* XXX should not happen */ |
4466 | return; |
4467 | } |
4468 | if (hchan.privFlags & CHANNEL_DFS_CLEAR) { |
4469 | sc->sc_curchan.privFlags |= CHANNEL_DFS_CLEAR; |
4470 | sc->sc_if.if_flags &= ~IFF_OACTIVE; |
4471 | if_printf(&sc->sc_if, |
4472 | "channel %u/0x%x/0x%x marked clear\n" , |
4473 | hchan.channel, hchan.channelFlags, hchan.privFlags); |
4474 | } else |
4475 | callout_reset(&sc->sc_dfs_ch, 2 * hz, ath_dfswait, sc); |
4476 | } |
4477 | #endif |
4478 | |
4479 | /* |
4480 | * Set/change channels. If the channel is really being changed, |
4481 | * it's done by reseting the chip. To accomplish this we must |
4482 | * first cleanup any pending DMA, then restart stuff after a la |
4483 | * ath_init. |
4484 | */ |
4485 | static int |
4486 | ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) |
4487 | { |
4488 | struct ath_hal *ah = sc->sc_ah; |
4489 | struct ieee80211com *ic = &sc->sc_ic; |
4490 | HAL_CHANNEL hchan; |
4491 | |
4492 | /* |
4493 | * Convert to a HAL channel description with |
4494 | * the flags constrained to reflect the current |
4495 | * operating mode. |
4496 | */ |
4497 | hchan.channel = chan->ic_freq; |
4498 | hchan.channelFlags = ath_chan2flags(ic, chan); |
4499 | |
4500 | DPRINTF(sc, ATH_DEBUG_RESET, |
4501 | "%s: %u (%u MHz, hal flags 0x%x) -> %u (%u MHz, hal flags 0x%x)\n" , |
4502 | __func__, |
4503 | ath_hal_mhz2ieee(ah, sc->sc_curchan.channel, |
4504 | sc->sc_curchan.channelFlags), |
4505 | sc->sc_curchan.channel, sc->sc_curchan.channelFlags, |
4506 | ath_hal_mhz2ieee(ah, hchan.channel, hchan.channelFlags), |
4507 | hchan.channel, hchan.channelFlags); |
4508 | if (hchan.channel != sc->sc_curchan.channel || |
4509 | hchan.channelFlags != sc->sc_curchan.channelFlags) { |
4510 | HAL_STATUS status; |
4511 | |
4512 | /* |
4513 | * To switch channels clear any pending DMA operations; |
4514 | * wait long enough for the RX fifo to drain, reset the |
4515 | * hardware at the new frequency, and then re-enable |
4516 | * the relevant bits of the h/w. |
4517 | */ |
4518 | ath_hal_intrset(ah, 0); /* disable interrupts */ |
4519 | ath_draintxq(sc); /* clear pending tx frames */ |
4520 | ath_stoprecv(sc); /* turn off frame recv */ |
4521 | if (!ath_hal_reset(ah, ic->ic_opmode, &hchan, AH_TRUE, &status)) { |
4522 | if_printf(ic->ic_ifp, "%s: unable to reset " |
4523 | "channel %u (%u MHz, flags 0x%x hal flags 0x%x)\n" , |
4524 | __func__, ieee80211_chan2ieee(ic, chan), |
4525 | chan->ic_freq, chan->ic_flags, hchan.channelFlags); |
4526 | return EIO; |
4527 | } |
4528 | sc->sc_curchan = hchan; |
4529 | ath_update_txpow(sc); /* update tx power state */ |
4530 | ath_restore_diversity(sc); |
4531 | sc->sc_calinterval = 1; |
4532 | sc->sc_caltries = 0; |
4533 | |
4534 | /* |
4535 | * Re-enable rx framework. |
4536 | */ |
4537 | if (ath_startrecv(sc) != 0) { |
4538 | if_printf(&sc->sc_if, |
4539 | "%s: unable to restart recv logic\n" , __func__); |
4540 | return EIO; |
4541 | } |
4542 | |
4543 | /* |
4544 | * Change channels and update the h/w rate map |
4545 | * if we're switching; e.g. 11a to 11b/g. |
4546 | */ |
4547 | ic->ic_ibss_chan = chan; |
4548 | ath_chan_change(sc, chan); |
4549 | |
4550 | #if 0 |
4551 | /* |
4552 | * Handle DFS required waiting period to determine |
4553 | * if channel is clear of radar traffic. |
4554 | */ |
4555 | if (ic->ic_opmode == IEEE80211_M_HOSTAP) { |
4556 | #define DFS_AND_NOT_CLEAR(_c) \ |
4557 | (((_c)->privFlags & (CHANNEL_DFS | CHANNEL_DFS_CLEAR)) == CHANNEL_DFS) |
4558 | if (DFS_AND_NOT_CLEAR(&sc->sc_curchan)) { |
4559 | if_printf(&sc->sc_if, |
4560 | "wait for DFS clear channel signal\n" ); |
4561 | /* XXX stop sndq */ |
4562 | sc->sc_if.if_flags |= IFF_OACTIVE; |
4563 | callout_reset(&sc->sc_dfs_ch, |
4564 | 2 * hz, ath_dfswait, sc); |
4565 | } else |
4566 | callout_stop(&sc->sc_dfs_ch); |
4567 | #undef DFS_NOT_CLEAR |
4568 | } |
4569 | #endif |
4570 | |
4571 | /* |
4572 | * Re-enable interrupts. |
4573 | */ |
4574 | ath_hal_intrset(ah, sc->sc_imask); |
4575 | } |
4576 | return 0; |
4577 | } |
4578 | |
4579 | static void |
4580 | ath_next_scan(void *arg) |
4581 | { |
4582 | struct ath_softc *sc = arg; |
4583 | struct ieee80211com *ic = &sc->sc_ic; |
4584 | int s; |
4585 | |
4586 | /* don't call ath_start w/o network interrupts blocked */ |
4587 | s = splnet(); |
4588 | |
4589 | if (ic->ic_state == IEEE80211_S_SCAN) |
4590 | ieee80211_next_scan(ic); |
4591 | splx(s); |
4592 | } |
4593 | |
4594 | /* |
4595 | * Periodically recalibrate the PHY to account |
4596 | * for temperature/environment changes. |
4597 | */ |
4598 | static void |
4599 | ath_calibrate(void *arg) |
4600 | { |
4601 | struct ath_softc *sc = arg; |
4602 | struct ath_hal *ah = sc->sc_ah; |
4603 | HAL_BOOL iqCalDone; |
4604 | int s; |
4605 | |
4606 | sc->sc_stats.ast_per_cal++; |
4607 | |
4608 | s = splnet(); |
4609 | |
4610 | if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { |
4611 | /* |
4612 | * Rfgain is out of bounds, reset the chip |
4613 | * to load new gain values. |
4614 | */ |
4615 | DPRINTF(sc, ATH_DEBUG_CALIBRATE, |
4616 | "%s: rfgain change\n" , __func__); |
4617 | sc->sc_stats.ast_per_rfgain++; |
4618 | ath_reset(&sc->sc_if); |
4619 | } |
4620 | if (!ath_hal_calibrate(ah, &sc->sc_curchan, &iqCalDone)) { |
4621 | DPRINTF(sc, ATH_DEBUG_ANY, |
4622 | "%s: calibration of channel %u failed\n" , |
4623 | __func__, sc->sc_curchan.channel); |
4624 | sc->sc_stats.ast_per_calfail++; |
4625 | } |
4626 | /* |
4627 | * Calibrate noise floor data again in case of change. |
4628 | */ |
4629 | ath_hal_process_noisefloor(ah); |
4630 | /* |
4631 | * Poll more frequently when the IQ calibration is in |
4632 | * progress to speedup loading the final settings. |
4633 | * We temper this aggressive polling with an exponential |
4634 | * back off after 4 tries up to ath_calinterval. |
4635 | */ |
4636 | if (iqCalDone || sc->sc_calinterval >= ath_calinterval) { |
4637 | sc->sc_caltries = 0; |
4638 | sc->sc_calinterval = ath_calinterval; |
4639 | } else if (sc->sc_caltries > 4) { |
4640 | sc->sc_caltries = 0; |
4641 | sc->sc_calinterval <<= 1; |
4642 | if (sc->sc_calinterval > ath_calinterval) |
4643 | sc->sc_calinterval = ath_calinterval; |
4644 | } |
4645 | KASSERTMSG(0 < sc->sc_calinterval && |
4646 | sc->sc_calinterval <= ath_calinterval, |
4647 | "bad calibration interval %u" , sc->sc_calinterval); |
4648 | |
4649 | DPRINTF(sc, ATH_DEBUG_CALIBRATE, |
4650 | "%s: next +%u (%siqCalDone tries %u)\n" , __func__, |
4651 | sc->sc_calinterval, iqCalDone ? "" : "!" , sc->sc_caltries); |
4652 | sc->sc_caltries++; |
4653 | callout_reset(&sc->sc_cal_ch, sc->sc_calinterval * hz, |
4654 | ath_calibrate, sc); |
4655 | splx(s); |
4656 | } |
4657 | |
4658 | static int |
4659 | ath_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg) |
4660 | { |
4661 | struct ifnet *ifp = ic->ic_ifp; |
4662 | struct ath_softc *sc = ifp->if_softc; |
4663 | struct ath_hal *ah = sc->sc_ah; |
4664 | struct ieee80211_node *ni; |
4665 | int i, error; |
4666 | const u_int8_t *bssid; |
4667 | u_int32_t rfilt; |
4668 | static const HAL_LED_STATE leds[] = { |
4669 | HAL_LED_INIT, /* IEEE80211_S_INIT */ |
4670 | HAL_LED_SCAN, /* IEEE80211_S_SCAN */ |
4671 | HAL_LED_AUTH, /* IEEE80211_S_AUTH */ |
4672 | HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ |
4673 | HAL_LED_RUN, /* IEEE80211_S_RUN */ |
4674 | }; |
4675 | |
4676 | DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n" , __func__, |
4677 | ieee80211_state_name[ic->ic_state], |
4678 | ieee80211_state_name[nstate]); |
4679 | |
4680 | callout_stop(&sc->sc_scan_ch); |
4681 | callout_stop(&sc->sc_cal_ch); |
4682 | #if 0 |
4683 | callout_stop(&sc->sc_dfs_ch); |
4684 | #endif |
4685 | ath_hal_setledstate(ah, leds[nstate]); /* set LED */ |
4686 | |
4687 | if (nstate == IEEE80211_S_INIT) { |
4688 | sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); |
4689 | /* |
4690 | * NB: disable interrupts so we don't rx frames. |
4691 | */ |
4692 | ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); |
4693 | /* |
4694 | * Notify the rate control algorithm. |
4695 | */ |
4696 | ath_rate_newstate(sc, nstate); |
4697 | goto done; |
4698 | } |
4699 | ni = ic->ic_bss; |
4700 | error = ath_chan_set(sc, ic->ic_curchan); |
4701 | if (error != 0) |
4702 | goto bad; |
4703 | rfilt = ath_calcrxfilter(sc, nstate); |
4704 | if (nstate == IEEE80211_S_SCAN) |
4705 | bssid = ifp->if_broadcastaddr; |
4706 | else |
4707 | bssid = ni->ni_bssid; |
4708 | ath_hal_setrxfilter(ah, rfilt); |
4709 | DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s\n" , |
4710 | __func__, rfilt, ether_sprintf(bssid)); |
4711 | |
4712 | if (nstate == IEEE80211_S_RUN && ic->ic_opmode == IEEE80211_M_STA) |
4713 | ath_hal_setassocid(ah, bssid, ni->ni_associd); |
4714 | else |
4715 | ath_hal_setassocid(ah, bssid, 0); |
4716 | if (ic->ic_flags & IEEE80211_F_PRIVACY) { |
4717 | for (i = 0; i < IEEE80211_WEP_NKID; i++) |
4718 | if (ath_hal_keyisvalid(ah, i)) |
4719 | ath_hal_keysetmac(ah, i, bssid); |
4720 | } |
4721 | |
4722 | /* |
4723 | * Notify the rate control algorithm so rates |
4724 | * are setup should ath_beacon_alloc be called. |
4725 | */ |
4726 | ath_rate_newstate(sc, nstate); |
4727 | |
4728 | if (ic->ic_opmode == IEEE80211_M_MONITOR) { |
4729 | /* nothing to do */; |
4730 | } else if (nstate == IEEE80211_S_RUN) { |
4731 | DPRINTF(sc, ATH_DEBUG_STATE, |
4732 | "%s(RUN): ic_flags=0x%08x iv=%d bssid=%s " |
4733 | "capinfo=0x%04x chan=%d\n" |
4734 | , __func__ |
4735 | , ic->ic_flags |
4736 | , ni->ni_intval |
4737 | , ether_sprintf(ni->ni_bssid) |
4738 | , ni->ni_capinfo |
4739 | , ieee80211_chan2ieee(ic, ic->ic_curchan)); |
4740 | |
4741 | switch (ic->ic_opmode) { |
4742 | case IEEE80211_M_HOSTAP: |
4743 | case IEEE80211_M_IBSS: |
4744 | /* |
4745 | * Allocate and setup the beacon frame. |
4746 | * |
4747 | * Stop any previous beacon DMA. This may be |
4748 | * necessary, for example, when an ibss merge |
4749 | * causes reconfiguration; there will be a state |
4750 | * transition from RUN->RUN that means we may |
4751 | * be called with beacon transmission active. |
4752 | */ |
4753 | ath_hal_stoptxdma(ah, sc->sc_bhalq); |
4754 | ath_beacon_free(sc); |
4755 | error = ath_beacon_alloc(sc, ni); |
4756 | if (error != 0) |
4757 | goto bad; |
4758 | /* |
4759 | * If joining an adhoc network defer beacon timer |
4760 | * configuration to the next beacon frame so we |
4761 | * have a current TSF to use. Otherwise we're |
4762 | * starting an ibss/bss so there's no need to delay. |
4763 | */ |
4764 | if (ic->ic_opmode == IEEE80211_M_IBSS && |
4765 | ic->ic_bss->ni_tstamp.tsf != 0) |
4766 | sc->sc_syncbeacon = 1; |
4767 | else |
4768 | ath_beacon_config(sc); |
4769 | break; |
4770 | case IEEE80211_M_STA: |
4771 | /* |
4772 | * Allocate a key cache slot to the station. |
4773 | */ |
4774 | if ((ic->ic_flags & IEEE80211_F_PRIVACY) == 0 && |
4775 | sc->sc_hasclrkey && |
4776 | ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE) |
4777 | ath_setup_stationkey(ni); |
4778 | /* |
4779 | * Defer beacon timer configuration to the next |
4780 | * beacon frame so we have a current TSF to use |
4781 | * (any TSF collected when scanning is likely old). |
4782 | */ |
4783 | sc->sc_syncbeacon = 1; |
4784 | break; |
4785 | default: |
4786 | break; |
4787 | } |
4788 | /* |
4789 | * Let the hal process statistics collected during a |
4790 | * scan so it can provide calibrated noise floor data. |
4791 | */ |
4792 | ath_hal_process_noisefloor(ah); |
4793 | /* |
4794 | * Reset rssi stats; maybe not the best place... |
4795 | */ |
4796 | sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; |
4797 | sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; |
4798 | sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; |
4799 | } else { |
4800 | ath_hal_intrset(ah, |
4801 | sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); |
4802 | sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); |
4803 | } |
4804 | done: |
4805 | /* |
4806 | * Invoke the parent method to complete the work. |
4807 | */ |
4808 | error = sc->sc_newstate(ic, nstate, arg); |
4809 | /* |
4810 | * Finally, start any timers. |
4811 | */ |
4812 | if (nstate == IEEE80211_S_RUN) { |
4813 | /* start periodic recalibration timer */ |
4814 | callout_reset(&sc->sc_cal_ch, sc->sc_calinterval * hz, |
4815 | ath_calibrate, sc); |
4816 | } else if (nstate == IEEE80211_S_SCAN) { |
4817 | /* start ap/neighbor scan timer */ |
4818 | callout_reset(&sc->sc_scan_ch, (ath_dwelltime * hz) / 1000, |
4819 | ath_next_scan, sc); |
4820 | } |
4821 | bad: |
4822 | return error; |
4823 | } |
4824 | |
4825 | /* |
4826 | * Allocate a key cache slot to the station so we can |
4827 | * setup a mapping from key index to node. The key cache |
4828 | * slot is needed for managing antenna state and for |
4829 | * compression when stations do not use crypto. We do |
4830 | * it uniliaterally here; if crypto is employed this slot |
4831 | * will be reassigned. |
4832 | */ |
4833 | static void |
4834 | ath_setup_stationkey(struct ieee80211_node *ni) |
4835 | { |
4836 | struct ieee80211com *ic = ni->ni_ic; |
4837 | struct ath_softc *sc = ic->ic_ifp->if_softc; |
4838 | ieee80211_keyix keyix, rxkeyix; |
4839 | |
4840 | if (!ath_key_alloc(ic, &ni->ni_ucastkey, &keyix, &rxkeyix)) { |
4841 | /* |
4842 | * Key cache is full; we'll fall back to doing |
4843 | * the more expensive lookup in software. Note |
4844 | * this also means no h/w compression. |
4845 | */ |
4846 | /* XXX msg+statistic */ |
4847 | } else { |
4848 | /* XXX locking? */ |
4849 | ni->ni_ucastkey.wk_keyix = keyix; |
4850 | ni->ni_ucastkey.wk_rxkeyix = rxkeyix; |
4851 | /* NB: this will create a pass-thru key entry */ |
4852 | ath_keyset(sc, &ni->ni_ucastkey, ni->ni_macaddr, ic->ic_bss); |
4853 | } |
4854 | } |
4855 | |
4856 | /* |
4857 | * Setup driver-specific state for a newly associated node. |
4858 | * Note that we're called also on a re-associate, the isnew |
4859 | * param tells us if this is the first time or not. |
4860 | */ |
4861 | static void |
4862 | ath_newassoc(struct ieee80211_node *ni, int isnew) |
4863 | { |
4864 | struct ieee80211com *ic = ni->ni_ic; |
4865 | struct ath_softc *sc = ic->ic_ifp->if_softc; |
4866 | |
4867 | ath_rate_newassoc(sc, ATH_NODE(ni), isnew); |
4868 | if (isnew && |
4869 | (ic->ic_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey) { |
4870 | KASSERTMSG(ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE, |
4871 | "new assoc with a unicast key already setup (keyix %u)" , |
4872 | ni->ni_ucastkey.wk_keyix); |
4873 | ath_setup_stationkey(ni); |
4874 | } |
4875 | } |
4876 | |
4877 | static int |
4878 | ath_getchannels(struct ath_softc *sc, u_int cc, |
4879 | HAL_BOOL outdoor, HAL_BOOL xchanmode) |
4880 | { |
4881 | #define COMPAT (CHANNEL_ALL_NOTURBO|CHANNEL_PASSIVE) |
4882 | struct ieee80211com *ic = &sc->sc_ic; |
4883 | struct ifnet *ifp = &sc->sc_if; |
4884 | struct ath_hal *ah = sc->sc_ah; |
4885 | HAL_CHANNEL *chans; |
4886 | int i, ix, nchan; |
4887 | |
4888 | chans = malloc(IEEE80211_CHAN_MAX * sizeof(HAL_CHANNEL), |
4889 | M_TEMP, M_NOWAIT); |
4890 | if (chans == NULL) { |
4891 | if_printf(ifp, "unable to allocate channel table\n" ); |
4892 | return ENOMEM; |
4893 | } |
4894 | if (!ath_hal_init_channels(ah, chans, IEEE80211_CHAN_MAX, &nchan, |
4895 | NULL, 0, NULL, |
4896 | cc, HAL_MODE_ALL, outdoor, xchanmode)) { |
4897 | u_int32_t rd; |
4898 | |
4899 | (void)ath_hal_getregdomain(ah, &rd); |
4900 | if_printf(ifp, "unable to collect channel list from hal; " |
4901 | "regdomain likely %u country code %u\n" , rd, cc); |
4902 | free(chans, M_TEMP); |
4903 | return EINVAL; |
4904 | } |
4905 | |
4906 | /* |
4907 | * Convert HAL channels to ieee80211 ones and insert |
4908 | * them in the table according to their channel number. |
4909 | */ |
4910 | for (i = 0; i < nchan; i++) { |
4911 | HAL_CHANNEL *c = &chans[i]; |
4912 | u_int16_t flags; |
4913 | |
4914 | ix = ath_hal_mhz2ieee(ah, c->channel, c->channelFlags); |
4915 | if (ix > IEEE80211_CHAN_MAX) { |
4916 | if_printf(ifp, "bad hal channel %d (%u/%x) ignored\n" , |
4917 | ix, c->channel, c->channelFlags); |
4918 | continue; |
4919 | } |
4920 | if (ix < 0) { |
4921 | /* XXX can't handle stuff <2400 right now */ |
4922 | if (bootverbose) |
4923 | if_printf(ifp, "hal channel %d (%u/%x) " |
4924 | "cannot be handled; ignored\n" , |
4925 | ix, c->channel, c->channelFlags); |
4926 | continue; |
4927 | } |
4928 | /* |
4929 | * Calculate net80211 flags; most are compatible |
4930 | * but some need massaging. Note the static turbo |
4931 | * conversion can be removed once net80211 is updated |
4932 | * to understand static vs. dynamic turbo. |
4933 | */ |
4934 | flags = c->channelFlags & COMPAT; |
4935 | if (c->channelFlags & CHANNEL_STURBO) |
4936 | flags |= IEEE80211_CHAN_TURBO; |
4937 | if (ic->ic_channels[ix].ic_freq == 0) { |
4938 | ic->ic_channels[ix].ic_freq = c->channel; |
4939 | ic->ic_channels[ix].ic_flags = flags; |
4940 | } else { |
4941 | /* channels overlap; e.g. 11g and 11b */ |
4942 | ic->ic_channels[ix].ic_flags |= flags; |
4943 | } |
4944 | } |
4945 | free(chans, M_TEMP); |
4946 | return 0; |
4947 | #undef COMPAT |
4948 | } |
4949 | |
4950 | static void |
4951 | ath_led_done(void *arg) |
4952 | { |
4953 | struct ath_softc *sc = arg; |
4954 | |
4955 | sc->sc_blinking = 0; |
4956 | } |
4957 | |
4958 | /* |
4959 | * Turn the LED off: flip the pin and then set a timer so no |
4960 | * update will happen for the specified duration. |
4961 | */ |
4962 | static void |
4963 | ath_led_off(void *arg) |
4964 | { |
4965 | struct ath_softc *sc = arg; |
4966 | |
4967 | ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !sc->sc_ledon); |
4968 | callout_reset(&sc->sc_ledtimer, sc->sc_ledoff, ath_led_done, sc); |
4969 | } |
4970 | |
4971 | /* |
4972 | * Blink the LED according to the specified on/off times. |
4973 | */ |
4974 | static void |
4975 | ath_led_blink(struct ath_softc *sc, int on, int off) |
4976 | { |
4977 | DPRINTF(sc, ATH_DEBUG_LED, "%s: on %u off %u\n" , __func__, on, off); |
4978 | ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, sc->sc_ledon); |
4979 | sc->sc_blinking = 1; |
4980 | sc->sc_ledoff = off; |
4981 | callout_reset(&sc->sc_ledtimer, on, ath_led_off, sc); |
4982 | } |
4983 | |
4984 | static void |
4985 | ath_led_event(struct ath_softc *sc, int event) |
4986 | { |
4987 | |
4988 | sc->sc_ledevent = ticks; /* time of last event */ |
4989 | if (sc->sc_blinking) /* don't interrupt active blink */ |
4990 | return; |
4991 | switch (event) { |
4992 | case ATH_LED_POLL: |
4993 | ath_led_blink(sc, sc->sc_hwmap[0].ledon, |
4994 | sc->sc_hwmap[0].ledoff); |
4995 | break; |
4996 | case ATH_LED_TX: |
4997 | ath_led_blink(sc, sc->sc_hwmap[sc->sc_txrate].ledon, |
4998 | sc->sc_hwmap[sc->sc_txrate].ledoff); |
4999 | break; |
5000 | case ATH_LED_RX: |
5001 | ath_led_blink(sc, sc->sc_hwmap[sc->sc_rxrate].ledon, |
5002 | sc->sc_hwmap[sc->sc_rxrate].ledoff); |
5003 | break; |
5004 | } |
5005 | } |
5006 | |
5007 | static void |
5008 | ath_update_txpow(struct ath_softc *sc) |
5009 | { |
5010 | #define COMPAT (CHANNEL_ALL_NOTURBO|CHANNEL_PASSIVE) |
5011 | struct ieee80211com *ic = &sc->sc_ic; |
5012 | struct ath_hal *ah = sc->sc_ah; |
5013 | u_int32_t txpow; |
5014 | |
5015 | if (sc->sc_curtxpow != ic->ic_txpowlimit) { |
5016 | ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); |
5017 | /* read back in case value is clamped */ |
5018 | (void)ath_hal_gettxpowlimit(ah, &txpow); |
5019 | ic->ic_txpowlimit = sc->sc_curtxpow = txpow; |
5020 | } |
5021 | /* |
5022 | * Fetch max tx power level for status requests. |
5023 | */ |
5024 | (void)ath_hal_getmaxtxpow(sc->sc_ah, &txpow); |
5025 | ic->ic_bss->ni_txpower = txpow; |
5026 | } |
5027 | |
5028 | static void |
5029 | rate_setup(struct ath_softc *sc, |
5030 | const HAL_RATE_TABLE *rt, struct ieee80211_rateset *rs) |
5031 | { |
5032 | int i, maxrates; |
5033 | |
5034 | if (rt->rateCount > IEEE80211_RATE_MAXSIZE) { |
5035 | DPRINTF(sc, ATH_DEBUG_ANY, |
5036 | "%s: rate table too small (%u > %u)\n" , |
5037 | __func__, rt->rateCount, IEEE80211_RATE_MAXSIZE); |
5038 | maxrates = IEEE80211_RATE_MAXSIZE; |
5039 | } else |
5040 | maxrates = rt->rateCount; |
5041 | for (i = 0; i < maxrates; i++) |
5042 | rs->rs_rates[i] = rt->info[i].dot11Rate; |
5043 | rs->rs_nrates = maxrates; |
5044 | } |
5045 | |
5046 | static int |
5047 | ath_rate_setup(struct ath_softc *sc, u_int mode) |
5048 | { |
5049 | struct ath_hal *ah = sc->sc_ah; |
5050 | struct ieee80211com *ic = &sc->sc_ic; |
5051 | const HAL_RATE_TABLE *rt; |
5052 | |
5053 | switch (mode) { |
5054 | case IEEE80211_MODE_11A: |
5055 | rt = ath_hal_getratetable(ah, HAL_MODE_11A); |
5056 | break; |
5057 | case IEEE80211_MODE_11B: |
5058 | rt = ath_hal_getratetable(ah, HAL_MODE_11B); |
5059 | break; |
5060 | case IEEE80211_MODE_11G: |
5061 | rt = ath_hal_getratetable(ah, HAL_MODE_11G); |
5062 | break; |
5063 | case IEEE80211_MODE_TURBO_A: |
5064 | /* XXX until static/dynamic turbo is fixed */ |
5065 | rt = ath_hal_getratetable(ah, HAL_MODE_TURBO); |
5066 | break; |
5067 | case IEEE80211_MODE_TURBO_G: |
5068 | rt = ath_hal_getratetable(ah, HAL_MODE_108G); |
5069 | break; |
5070 | default: |
5071 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n" , |
5072 | __func__, mode); |
5073 | return 0; |
5074 | } |
5075 | sc->sc_rates[mode] = rt; |
5076 | if (rt != NULL) { |
5077 | rate_setup(sc, rt, &ic->ic_sup_rates[mode]); |
5078 | return 1; |
5079 | } else |
5080 | return 0; |
5081 | } |
5082 | |
5083 | static void |
5084 | ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) |
5085 | { |
5086 | #define N(a) (sizeof(a)/sizeof(a[0])) |
5087 | /* NB: on/off times from the Atheros NDIS driver, w/ permission */ |
5088 | static const struct { |
5089 | u_int rate; /* tx/rx 802.11 rate */ |
5090 | u_int16_t timeOn; /* LED on time (ms) */ |
5091 | u_int16_t timeOff; /* LED off time (ms) */ |
5092 | } blinkrates[] = { |
5093 | { 108, 40, 10 }, |
5094 | { 96, 44, 11 }, |
5095 | { 72, 50, 13 }, |
5096 | { 48, 57, 14 }, |
5097 | { 36, 67, 16 }, |
5098 | { 24, 80, 20 }, |
5099 | { 22, 100, 25 }, |
5100 | { 18, 133, 34 }, |
5101 | { 12, 160, 40 }, |
5102 | { 10, 200, 50 }, |
5103 | { 6, 240, 58 }, |
5104 | { 4, 267, 66 }, |
5105 | { 2, 400, 100 }, |
5106 | { 0, 500, 130 }, |
5107 | }; |
5108 | const HAL_RATE_TABLE *rt; |
5109 | int i, j; |
5110 | |
5111 | memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); |
5112 | rt = sc->sc_rates[mode]; |
5113 | KASSERTMSG(rt != NULL, "no h/w rate set for phy mode %u" , mode); |
5114 | for (i = 0; i < rt->rateCount; i++) |
5115 | sc->sc_rixmap[rt->info[i].dot11Rate & IEEE80211_RATE_VAL] = i; |
5116 | memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); |
5117 | for (i = 0; i < 32; i++) { |
5118 | u_int8_t ix = rt->rateCodeToIndex[i]; |
5119 | if (ix == 0xff) { |
5120 | sc->sc_hwmap[i].ledon = (500 * hz) / 1000; |
5121 | sc->sc_hwmap[i].ledoff = (130 * hz) / 1000; |
5122 | continue; |
5123 | } |
5124 | sc->sc_hwmap[i].ieeerate = |
5125 | rt->info[ix].dot11Rate & IEEE80211_RATE_VAL; |
5126 | sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; |
5127 | if (rt->info[ix].shortPreamble || |
5128 | rt->info[ix].phy == IEEE80211_T_OFDM) |
5129 | sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE; |
5130 | /* NB: receive frames include FCS */ |
5131 | sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags | |
5132 | IEEE80211_RADIOTAP_F_FCS; |
5133 | /* setup blink rate table to avoid per-packet lookup */ |
5134 | for (j = 0; j < N(blinkrates)-1; j++) |
5135 | if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) |
5136 | break; |
5137 | /* NB: this uses the last entry if the rate isn't found */ |
5138 | /* XXX beware of overlow */ |
5139 | sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000; |
5140 | sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000; |
5141 | } |
5142 | sc->sc_currates = rt; |
5143 | sc->sc_curmode = mode; |
5144 | /* |
5145 | * All protection frames are transmited at 2Mb/s for |
5146 | * 11g, otherwise at 1Mb/s. |
5147 | */ |
5148 | if (mode == IEEE80211_MODE_11G) |
5149 | sc->sc_protrix = ath_tx_findrix(rt, 2*2); |
5150 | else |
5151 | sc->sc_protrix = ath_tx_findrix(rt, 2*1); |
5152 | /* rate index used to send management frames */ |
5153 | sc->sc_minrateix = 0; |
5154 | /* |
5155 | * Setup multicast rate state. |
5156 | */ |
5157 | /* XXX layering violation */ |
5158 | sc->sc_mcastrix = ath_tx_findrix(rt, sc->sc_ic.ic_mcast_rate); |
5159 | sc->sc_mcastrate = sc->sc_ic.ic_mcast_rate; |
5160 | /* NB: caller is responsible for reseting rate control state */ |
5161 | #undef N |
5162 | } |
5163 | |
5164 | #ifdef AR_DEBUG |
5165 | static void |
5166 | ath_printrxbuf(struct ath_buf *bf, int done) |
5167 | { |
5168 | struct ath_desc *ds; |
5169 | int i; |
5170 | |
5171 | for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { |
5172 | printf("R%d (%p %" PRIx64 |
5173 | ") %08x %08x %08x %08x %08x %08x %02x %02x %c\n" , i, ds, |
5174 | (uint64_t)bf->bf_daddr + sizeof (struct ath_desc) * i, |
5175 | ds->ds_link, ds->ds_data, |
5176 | ds->ds_ctl0, ds->ds_ctl1, |
5177 | ds->ds_hw[0], ds->ds_hw[1], |
5178 | ds->ds_rxstat.rs_status, ds->ds_rxstat.rs_keyix, |
5179 | !done ? ' ' : (ds->ds_rxstat.rs_status == 0) ? '*' : '!'); |
5180 | } |
5181 | } |
5182 | |
5183 | static void |
5184 | ath_printtxbuf(struct ath_buf *bf, int done) |
5185 | { |
5186 | struct ath_desc *ds; |
5187 | int i; |
5188 | |
5189 | for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { |
5190 | printf("T%d (%p %" PRIx64 |
5191 | ") %08x %08x %08x %08x %08x %08x %08x %08x %c\n" , |
5192 | i, ds, |
5193 | (uint64_t)bf->bf_daddr + sizeof (struct ath_desc) * i, |
5194 | ds->ds_link, ds->ds_data, |
5195 | ds->ds_ctl0, ds->ds_ctl1, |
5196 | ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3], |
5197 | !done ? ' ' : (ds->ds_txstat.ts_status == 0) ? '*' : '!'); |
5198 | } |
5199 | } |
5200 | #endif /* AR_DEBUG */ |
5201 | |
5202 | static void |
5203 | ath_watchdog(struct ifnet *ifp) |
5204 | { |
5205 | struct ath_softc *sc = ifp->if_softc; |
5206 | struct ieee80211com *ic = &sc->sc_ic; |
5207 | struct ath_txq *axq; |
5208 | int i; |
5209 | |
5210 | ifp->if_timer = 0; |
5211 | if ((ifp->if_flags & IFF_RUNNING) == 0 || |
5212 | !device_is_active(sc->sc_dev)) |
5213 | return; |
5214 | for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { |
5215 | if (!ATH_TXQ_SETUP(sc, i)) |
5216 | continue; |
5217 | axq = &sc->sc_txq[i]; |
5218 | ATH_TXQ_LOCK(axq); |
5219 | if (axq->axq_timer == 0) |
5220 | ; |
5221 | else if (--axq->axq_timer == 0) { |
5222 | ATH_TXQ_UNLOCK(axq); |
5223 | if_printf(ifp, "device timeout (txq %d, " |
5224 | "txintrperiod %d)\n" , i, sc->sc_txintrperiod); |
5225 | if (sc->sc_txintrperiod > 1) |
5226 | sc->sc_txintrperiod--; |
5227 | ath_reset(ifp); |
5228 | ifp->if_oerrors++; |
5229 | sc->sc_stats.ast_watchdog++; |
5230 | break; |
5231 | } else |
5232 | ifp->if_timer = 1; |
5233 | ATH_TXQ_UNLOCK(axq); |
5234 | } |
5235 | ieee80211_watchdog(ic); |
5236 | } |
5237 | |
5238 | /* |
5239 | * Diagnostic interface to the HAL. This is used by various |
5240 | * tools to do things like retrieve register contents for |
5241 | * debugging. The mechanism is intentionally opaque so that |
5242 | * it can change frequently w/o concern for compatiblity. |
5243 | */ |
5244 | static int |
5245 | ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad) |
5246 | { |
5247 | struct ath_hal *ah = sc->sc_ah; |
5248 | u_int id = ad->ad_id & ATH_DIAG_ID; |
5249 | void *indata = NULL; |
5250 | void *outdata = NULL; |
5251 | u_int32_t insize = ad->ad_in_size; |
5252 | u_int32_t outsize = ad->ad_out_size; |
5253 | int error = 0; |
5254 | |
5255 | if (ad->ad_id & ATH_DIAG_IN) { |
5256 | /* |
5257 | * Copy in data. |
5258 | */ |
5259 | indata = malloc(insize, M_TEMP, M_NOWAIT); |
5260 | if (indata == NULL) { |
5261 | error = ENOMEM; |
5262 | goto bad; |
5263 | } |
5264 | error = copyin(ad->ad_in_data, indata, insize); |
5265 | if (error) |
5266 | goto bad; |
5267 | } |
5268 | if (ad->ad_id & ATH_DIAG_DYN) { |
5269 | /* |
5270 | * Allocate a buffer for the results (otherwise the HAL |
5271 | * returns a pointer to a buffer where we can read the |
5272 | * results). Note that we depend on the HAL leaving this |
5273 | * pointer for us to use below in reclaiming the buffer; |
5274 | * may want to be more defensive. |
5275 | */ |
5276 | outdata = malloc(outsize, M_TEMP, M_NOWAIT); |
5277 | if (outdata == NULL) { |
5278 | error = ENOMEM; |
5279 | goto bad; |
5280 | } |
5281 | } |
5282 | if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) { |
5283 | if (outsize < ad->ad_out_size) |
5284 | ad->ad_out_size = outsize; |
5285 | if (outdata != NULL) |
5286 | error = copyout(outdata, ad->ad_out_data, |
5287 | ad->ad_out_size); |
5288 | } else { |
5289 | error = EINVAL; |
5290 | } |
5291 | bad: |
5292 | if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL) |
5293 | free(indata, M_TEMP); |
5294 | if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL) |
5295 | free(outdata, M_TEMP); |
5296 | return error; |
5297 | } |
5298 | |
5299 | static int |
5300 | ath_ioctl(struct ifnet *ifp, u_long cmd, void *data) |
5301 | { |
5302 | #define IS_RUNNING(ifp) \ |
5303 | ((ifp->if_flags & IFF_UP) && (ifp->if_flags & IFF_RUNNING)) |
5304 | struct ath_softc *sc = ifp->if_softc; |
5305 | struct ieee80211com *ic = &sc->sc_ic; |
5306 | struct ifreq *ifr = (struct ifreq *)data; |
5307 | int error = 0, s; |
5308 | |
5309 | s = splnet(); |
5310 | switch (cmd) { |
5311 | case SIOCSIFFLAGS: |
5312 | if ((error = ifioctl_common(ifp, cmd, data)) != 0) |
5313 | break; |
5314 | switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) { |
5315 | case IFF_UP|IFF_RUNNING: |
5316 | /* |
5317 | * To avoid rescanning another access point, |
5318 | * do not call ath_init() here. Instead, |
5319 | * only reflect promisc mode settings. |
5320 | */ |
5321 | ath_mode_init(sc); |
5322 | break; |
5323 | case IFF_UP: |
5324 | /* |
5325 | * Beware of being called during attach/detach |
5326 | * to reset promiscuous mode. In that case we |
5327 | * will still be marked UP but not RUNNING. |
5328 | * However trying to re-init the interface |
5329 | * is the wrong thing to do as we've already |
5330 | * torn down much of our state. There's |
5331 | * probably a better way to deal with this. |
5332 | */ |
5333 | error = ath_init(sc); |
5334 | break; |
5335 | case IFF_RUNNING: |
5336 | ath_stop_locked(ifp, 1); |
5337 | break; |
5338 | case 0: |
5339 | break; |
5340 | } |
5341 | break; |
5342 | case SIOCADDMULTI: |
5343 | case SIOCDELMULTI: |
5344 | if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) { |
5345 | if (ifp->if_flags & IFF_RUNNING) |
5346 | ath_mode_init(sc); |
5347 | error = 0; |
5348 | } |
5349 | break; |
5350 | case SIOCGATHSTATS: |
5351 | /* NB: embed these numbers to get a consistent view */ |
5352 | sc->sc_stats.ast_tx_packets = ifp->if_opackets; |
5353 | sc->sc_stats.ast_rx_packets = ifp->if_ipackets; |
5354 | sc->sc_stats.ast_rx_rssi = ieee80211_getrssi(ic); |
5355 | splx(s); |
5356 | /* |
5357 | * NB: Drop the softc lock in case of a page fault; |
5358 | * we'll accept any potential inconsisentcy in the |
5359 | * statistics. The alternative is to copy the data |
5360 | * to a local structure. |
5361 | */ |
5362 | return copyout(&sc->sc_stats, |
5363 | ifr->ifr_data, sizeof (sc->sc_stats)); |
5364 | case SIOCGATHDIAG: |
5365 | error = ath_ioctl_diag(sc, (struct ath_diag *) ifr); |
5366 | break; |
5367 | default: |
5368 | error = ieee80211_ioctl(ic, cmd, data); |
5369 | if (error != ENETRESET) |
5370 | ; |
5371 | else if (IS_RUNNING(ifp) && |
5372 | ic->ic_roaming != IEEE80211_ROAMING_MANUAL) |
5373 | error = ath_init(sc); |
5374 | else |
5375 | error = 0; |
5376 | break; |
5377 | } |
5378 | splx(s); |
5379 | return error; |
5380 | #undef IS_RUNNING |
5381 | } |
5382 | |
5383 | static void |
5384 | ath_bpfattach(struct ath_softc *sc) |
5385 | { |
5386 | struct ifnet *ifp = &sc->sc_if; |
5387 | |
5388 | bpf_attach2(ifp, DLT_IEEE802_11_RADIO, |
5389 | sizeof(struct ieee80211_frame) + sizeof(sc->sc_tx_th), |
5390 | &sc->sc_drvbpf); |
5391 | |
5392 | /* |
5393 | * Initialize constant fields. |
5394 | * XXX make header lengths a multiple of 32-bits so subsequent |
5395 | * headers are properly aligned; this is a kludge to keep |
5396 | * certain applications happy. |
5397 | * |
5398 | * NB: the channel is setup each time we transition to the |
5399 | * RUN state to avoid filling it in for each frame. |
5400 | */ |
5401 | sc->sc_tx_th_len = roundup(sizeof(sc->sc_tx_th), sizeof(u_int32_t)); |
5402 | sc->sc_tx_th.wt_ihdr.it_len = htole16(sc->sc_tx_th_len); |
5403 | sc->sc_tx_th.wt_ihdr.it_present = htole32(ATH_TX_RADIOTAP_PRESENT); |
5404 | |
5405 | sc->sc_rx_th_len = roundup(sizeof(sc->sc_rx_th), sizeof(u_int32_t)); |
5406 | sc->sc_rx_th.wr_ihdr.it_len = htole16(sc->sc_rx_th_len); |
5407 | sc->sc_rx_th.wr_ihdr.it_present = htole32(ATH_RX_RADIOTAP_PRESENT); |
5408 | } |
5409 | |
5410 | /* |
5411 | * Announce various information on device/driver attach. |
5412 | */ |
5413 | static void |
5414 | ath_announce(struct ath_softc *sc) |
5415 | { |
5416 | #define HAL_MODE_DUALBAND (HAL_MODE_11A|HAL_MODE_11B) |
5417 | struct ifnet *ifp = &sc->sc_if; |
5418 | struct ath_hal *ah = sc->sc_ah; |
5419 | u_int modes, cc; |
5420 | |
5421 | if_printf(ifp, "mac %d.%d phy %d.%d" , |
5422 | ah->ah_macVersion, ah->ah_macRev, |
5423 | ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); |
5424 | /* |
5425 | * Print radio revision(s). We check the wireless modes |
5426 | * to avoid falsely printing revs for inoperable parts. |
5427 | * Dual-band radio revs are returned in the 5 GHz rev number. |
5428 | */ |
5429 | ath_hal_getcountrycode(ah, &cc); |
5430 | modes = ath_hal_getwirelessmodes(ah, cc); |
5431 | if ((modes & HAL_MODE_DUALBAND) == HAL_MODE_DUALBAND) { |
5432 | if (ah->ah_analog5GhzRev && ah->ah_analog2GhzRev) |
5433 | printf(" 5 GHz radio %d.%d 2 GHz radio %d.%d" , |
5434 | ah->ah_analog5GhzRev >> 4, |
5435 | ah->ah_analog5GhzRev & 0xf, |
5436 | ah->ah_analog2GhzRev >> 4, |
5437 | ah->ah_analog2GhzRev & 0xf); |
5438 | else |
5439 | printf(" radio %d.%d" , ah->ah_analog5GhzRev >> 4, |
5440 | ah->ah_analog5GhzRev & 0xf); |
5441 | } else |
5442 | printf(" radio %d.%d" , ah->ah_analog5GhzRev >> 4, |
5443 | ah->ah_analog5GhzRev & 0xf); |
5444 | printf("\n" ); |
5445 | if (bootverbose) { |
5446 | int i; |
5447 | for (i = 0; i <= WME_AC_VO; i++) { |
5448 | struct ath_txq *txq = sc->sc_ac2q[i]; |
5449 | if_printf(ifp, "Use hw queue %u for %s traffic\n" , |
5450 | txq->axq_qnum, ieee80211_wme_acnames[i]); |
5451 | } |
5452 | if_printf(ifp, "Use hw queue %u for CAB traffic\n" , |
5453 | sc->sc_cabq->axq_qnum); |
5454 | if_printf(ifp, "Use hw queue %u for beacons\n" , sc->sc_bhalq); |
5455 | } |
5456 | if (ath_rxbuf != ATH_RXBUF) |
5457 | if_printf(ifp, "using %u rx buffers\n" , ath_rxbuf); |
5458 | if (ath_txbuf != ATH_TXBUF) |
5459 | if_printf(ifp, "using %u tx buffers\n" , ath_txbuf); |
5460 | #undef HAL_MODE_DUALBAND |
5461 | } |
5462 | |