1 /* $NetBSD: if_ipw.c,v 1.8 2004/09/14 00:38:37 lukem Exp $ */ 2 3 /*- 4 * Copyright (c) 2004 5 * Damien Bergamini <damien.bergamini@free.fr>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __KERNEL_RCSID(0, "$NetBSD: if_ipw.c,v 1.8 2004/09/14 00:38:37 lukem Exp $"); 32 33 /*- 34 * Intel(R) PRO/Wireless 2100 MiniPCI driver 35 * http://www.intel.com/products/mobiletechnology/prowireless.htm 36 */ 37 38 #include "bpfilter.h" 39 40 #include <sys/param.h> 41 #include <sys/sockio.h> 42 #include <sys/sysctl.h> 43 #include <sys/mbuf.h> 44 #include <sys/kernel.h> 45 #include <sys/socket.h> 46 #include <sys/systm.h> 47 #include <sys/malloc.h> 48 #include <sys/conf.h> 49 50 #include <machine/bus.h> 51 #include <machine/endian.h> 52 #include <machine/intr.h> 53 54 #include <dev/pci/pcireg.h> 55 #include <dev/pci/pcivar.h> 56 #include <dev/pci/pcidevs.h> 57 58 #if NBPFILTER > 0 59 #include <net/bpf.h> 60 #endif 61 #include <net/if.h> 62 #include <net/if_arp.h> 63 #include <net/if_dl.h> 64 #include <net/if_ether.h> 65 #include <net/if_media.h> 66 #include <net/if_types.h> 67 68 #include <net80211/ieee80211_var.h> 69 #include <net80211/ieee80211_radiotap.h> 70 71 #include <netinet/in.h> 72 #include <netinet/in_systm.h> 73 #include <netinet/in_var.h> 74 #include <netinet/ip.h> 75 76 #include <dev/pci/if_ipwreg.h> 77 #include <dev/pci/if_ipwvar.h> 78 79 static int ipw_match(struct device *, struct cfdata *, void *); 80 static void ipw_attach(struct device *, struct device *, void *); 81 static int ipw_detach(struct device *, int); 82 static int ipw_media_change(struct ifnet *); 83 static int ipw_newstate(struct ieee80211com *, enum ieee80211_state, int); 84 static void ipw_command_intr(struct ipw_softc *, struct ipw_soft_buf *); 85 static void ipw_newstate_intr(struct ipw_softc *, struct ipw_soft_buf *); 86 static void ipw_data_intr(struct ipw_softc *, struct ipw_status *, 87 struct ipw_soft_bd *, struct ipw_soft_buf *); 88 static void ipw_notification_intr(struct ipw_softc *, struct ipw_soft_buf *); 89 static void ipw_rx_intr(struct ipw_softc *); 90 static void ipw_release_sbd(struct ipw_softc *, struct ipw_soft_bd *); 91 static void ipw_tx_intr(struct ipw_softc *); 92 static int ipw_intr(void *); 93 static int ipw_cmd(struct ipw_softc *, u_int32_t, void *, u_int32_t); 94 static int ipw_tx_start(struct ifnet *, struct mbuf *, struct ieee80211_node *); 95 static void ipw_start(struct ifnet *); 96 static void ipw_watchdog(struct ifnet *); 97 static int ipw_get_table1(struct ipw_softc *, u_int32_t *); 98 static int ipw_get_radio(struct ipw_softc *, int *); 99 static int ipw_ioctl(struct ifnet *, u_long, caddr_t); 100 static u_int32_t ipw_read_table1(struct ipw_softc *, u_int32_t); 101 static void ipw_write_table1(struct ipw_softc *, u_int32_t, u_int32_t); 102 static int ipw_read_table2(struct ipw_softc *, u_int32_t, void *, u_int32_t *); 103 static int ipw_tx_init(struct ipw_softc *); 104 static void ipw_tx_stop(struct ipw_softc *); 105 static int ipw_rx_init(struct ipw_softc *); 106 static void ipw_rx_stop(struct ipw_softc *); 107 static void ipw_reset(struct ipw_softc *); 108 static int ipw_clock_sync(struct ipw_softc *); 109 static int ipw_load_ucode(struct ipw_softc *, u_char *, int); 110 static int ipw_load_firmware(struct ipw_softc *, u_char *, int); 111 static int ipw_firmware_init(struct ipw_softc *, u_char *); 112 static int ipw_config(struct ipw_softc *); 113 static int ipw_init(struct ifnet *); 114 static void ipw_stop(struct ifnet *, int); 115 static void ipw_read_mem_1(struct ipw_softc *, bus_size_t, u_int8_t *, 116 bus_size_t); 117 static void ipw_write_mem_1(struct ipw_softc *, bus_size_t, u_int8_t *, 118 bus_size_t); 119 static void ipw_zero_mem_4(struct ipw_softc *, bus_size_t, bus_size_t); 120 121 static inline u_int8_t MEM_READ_1(struct ipw_softc *sc, u_int32_t addr) 122 { 123 CSR_WRITE_4(sc, IPW_CSR_INDIRECT_ADDR, addr); 124 return CSR_READ_1(sc, IPW_CSR_INDIRECT_DATA); 125 } 126 127 static inline u_int16_t MEM_READ_2(struct ipw_softc *sc, u_int32_t addr) 128 { 129 CSR_WRITE_4(sc, IPW_CSR_INDIRECT_ADDR, addr); 130 return CSR_READ_2(sc, IPW_CSR_INDIRECT_DATA); 131 } 132 133 static inline u_int32_t MEM_READ_4(struct ipw_softc *sc, u_int32_t addr) 134 { 135 CSR_WRITE_4(sc, IPW_CSR_INDIRECT_ADDR, addr); 136 return CSR_READ_4(sc, IPW_CSR_INDIRECT_DATA); 137 } 138 139 #ifdef IPW_DEBUG 140 #define DPRINTF(x) if (ipw_debug > 0) printf x 141 #define DPRINTFN(n, x) if (ipw_debug >= (n)) printf x 142 int ipw_debug = 0; 143 #else 144 #define DPRINTF(x) 145 #define DPRINTFN(n, x) 146 #endif 147 148 CFATTACH_DECL(ipw, sizeof (struct ipw_softc), ipw_match, ipw_attach, 149 ipw_detach, NULL); 150 151 static int 152 ipw_match(struct device *parent, struct cfdata *match, void *aux) 153 { 154 struct pci_attach_args *pa = aux; 155 156 if (PCI_VENDOR (pa->pa_id) == PCI_VENDOR_INTEL && 157 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_PRO_WL_2100) 158 return 1; 159 160 return 0; 161 } 162 163 /* Base Address Register */ 164 #define IPW_PCI_BAR0 0x10 165 166 static void 167 ipw_attach(struct device *parent, struct device *self, void *aux) 168 { 169 struct ipw_softc *sc = (struct ipw_softc *)self; 170 struct ieee80211com *ic = &sc->sc_ic; 171 struct ifnet *ifp = &ic->ic_if; 172 struct ieee80211_rateset *rs; 173 struct pci_attach_args *pa = aux; 174 const char *intrstr; 175 char devinfo[256]; 176 bus_space_tag_t memt; 177 bus_space_handle_t memh; 178 bus_addr_t base; 179 pci_intr_handle_t ih; 180 u_int32_t data; 181 int i, revision, error; 182 183 sc->sc_pct = pa->pa_pc; 184 185 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo, sizeof devinfo); 186 revision = PCI_REVISION(pa->pa_class); 187 aprint_normal(": %s (rev. 0x%02x)\n", devinfo, revision); 188 189 /* enable bus-mastering */ 190 data = pci_conf_read(sc->sc_pct, pa->pa_tag, PCI_COMMAND_STATUS_REG); 191 data |= PCI_COMMAND_MASTER_ENABLE; 192 pci_conf_write(sc->sc_pct, pa->pa_tag, PCI_COMMAND_STATUS_REG, data); 193 194 /* map the register window */ 195 error = pci_mapreg_map(pa, IPW_PCI_BAR0, PCI_MAPREG_TYPE_MEM | 196 PCI_MAPREG_MEM_TYPE_32BIT, 0, &memt, &memh, &base, &sc->sc_sz); 197 if (error != 0) { 198 aprint_error("%s: could not map memory space\n", 199 sc->sc_dev.dv_xname); 200 return; 201 } 202 203 sc->sc_st = memt; 204 sc->sc_sh = memh; 205 sc->sc_dmat = pa->pa_dmat; 206 207 /* disable interrupts */ 208 CSR_WRITE_4(sc, IPW_CSR_INTR_MASK, 0); 209 210 if (pci_intr_map(pa, &ih) != 0) { 211 aprint_error("%s: could not map interrupt\n", 212 sc->sc_dev.dv_xname); 213 return; 214 } 215 216 intrstr = pci_intr_string(sc->sc_pct, ih); 217 sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET, ipw_intr, sc); 218 if (sc->sc_ih == NULL) { 219 aprint_error("%s: could not establish interrupt", 220 sc->sc_dev.dv_xname); 221 if (intrstr != NULL) 222 aprint_error(" at %s", intrstr); 223 aprint_error("\n"); 224 return; 225 } 226 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr); 227 228 ic->ic_phytype = IEEE80211_T_DS; 229 ic->ic_opmode = IEEE80211_M_STA; 230 ic->ic_state = IEEE80211_S_INIT; 231 232 /* set device capabilities */ 233 ic->ic_caps = IEEE80211_C_IBSS | IEEE80211_C_MONITOR | 234 IEEE80211_C_PMGT | IEEE80211_C_TXPMGT | IEEE80211_C_WEP; 235 236 /* set supported 11.b rates */ 237 rs = &ic->ic_sup_rates[IEEE80211_MODE_11B]; 238 rs->rs_nrates = 4; 239 rs->rs_rates[0] = 2; /* 1Mbps */ 240 rs->rs_rates[1] = 4; /* 2Mbps */ 241 rs->rs_rates[2] = 11; /* 5.5Mbps */ 242 rs->rs_rates[3] = 22; /* 11Mbps */ 243 244 /* set supported 11.b channels (1 through 14) */ 245 for (i = 1; i <= 14; i++) { 246 ic->ic_channels[i].ic_freq = 247 ieee80211_ieee2mhz(i, IEEE80211_CHAN_B); 248 ic->ic_channels[i].ic_flags = IEEE80211_CHAN_B; 249 } 250 251 ic->ic_ibss_chan = &ic->ic_channels[0]; 252 253 ifp->if_softc = sc; 254 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 255 ifp->if_init = ipw_init; 256 ifp->if_stop = ipw_stop; 257 ifp->if_ioctl = ipw_ioctl; 258 ifp->if_start = ipw_start; 259 ifp->if_watchdog = ipw_watchdog; 260 IFQ_SET_READY(&ifp->if_snd); 261 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 262 263 if_attach(ifp); 264 ieee80211_ifattach(ifp); 265 /* override state transition machine */ 266 sc->sc_newstate = ic->ic_newstate; 267 ic->ic_newstate = ipw_newstate; 268 269 ieee80211_media_init(ifp, ipw_media_change, ieee80211_media_status); 270 271 #if NBPFILTER > 0 272 bpfattach2(ifp, DLT_IEEE802_11_RADIO, 273 sizeof (struct ieee80211_frame) + 64, &sc->sc_drvbpf); 274 275 sc->sc_rxtap_len = sizeof sc->sc_rxtapu; 276 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); 277 sc->sc_rxtap.wr_ihdr.it_present = htole32(IPW_RX_RADIOTAP_PRESENT); 278 279 sc->sc_txtap_len = sizeof sc->sc_txtapu; 280 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); 281 sc->sc_txtap.wt_ihdr.it_present = htole32(IPW_TX_RADIOTAP_PRESENT); 282 #endif 283 } 284 285 static int 286 ipw_detach(struct device* self, int flags) 287 { 288 struct ipw_softc *sc = (struct ipw_softc *)self; 289 struct ifnet *ifp = &sc->sc_ic.ic_if; 290 291 ipw_reset(sc); 292 293 #if NBPFILTER > 0 294 bpfdetach(ifp); 295 #endif 296 ieee80211_ifdetach(ifp); 297 if_detach(ifp); 298 299 if (sc->sc_ih != NULL) { 300 pci_intr_disestablish(sc->sc_pct, sc->sc_ih); 301 sc->sc_ih = NULL; 302 } 303 304 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz); 305 306 return 0; 307 } 308 309 static int 310 ipw_media_change(struct ifnet *ifp) 311 { 312 int error; 313 314 error = ieee80211_media_change(ifp); 315 if (error != ENETRESET) 316 return error; 317 318 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == (IFF_UP | IFF_RUNNING)) 319 ipw_init(ifp); 320 321 return 0; 322 } 323 324 static int 325 ipw_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg) 326 { 327 struct ifnet *ifp = &ic->ic_if; 328 struct ipw_softc *sc = ifp->if_softc; 329 struct ieee80211_node *ni = ic->ic_bss; 330 u_int32_t val, len; 331 332 switch (nstate) { 333 case IEEE80211_S_INIT: 334 break; 335 336 case IEEE80211_S_RUN: 337 len = IEEE80211_NWID_LEN; 338 ipw_read_table2(sc, IPW_INFO_CURRENT_SSID, ni->ni_essid, &len); 339 ni->ni_esslen = len; 340 341 val = ipw_read_table1(sc, IPW_INFO_CURRENT_CHANNEL); 342 ni->ni_chan = &ic->ic_channels[val]; 343 344 DELAY(100); /* firmware needs a short delay here */ 345 346 len = IEEE80211_ADDR_LEN; 347 ipw_read_table2(sc, IPW_INFO_CURRENT_BSSID, ni->ni_bssid, &len); 348 break; 349 350 case IEEE80211_S_SCAN: 351 case IEEE80211_S_AUTH: 352 case IEEE80211_S_ASSOC: 353 break; 354 } 355 356 ic->ic_state = nstate; 357 return 0; 358 } 359 360 static void 361 ipw_command_intr(struct ipw_softc *sc, struct ipw_soft_buf *sbuf) 362 { 363 struct ipw_cmd *cmd; 364 365 bus_dmamap_sync(sc->sc_dmat, sbuf->map, 0, sizeof (struct ipw_cmd), 366 BUS_DMASYNC_POSTREAD); 367 368 cmd = mtod(sbuf->m, struct ipw_cmd *); 369 370 DPRINTFN(2, ("RX!CMD!%u!%u!%u!%u!%u\n", 371 le32toh(cmd->type), le32toh(cmd->subtype), le32toh(cmd->seq), 372 le32toh(cmd->len), le32toh(cmd->status))); 373 374 /* 375 * Wake up processes waiting for command ack. In the case of the 376 * IPW_CMD_DISABLE command, wake up the process only when the adapter 377 * enters the IPW_STATE_DISABLED state. This is notified in 378 * ipw_newstate_intr(). 379 */ 380 if (le32toh(cmd->type) != IPW_CMD_DISABLE) 381 wakeup(sc->cmd); 382 } 383 384 static void 385 ipw_newstate_intr(struct ipw_softc *sc, struct ipw_soft_buf *sbuf) 386 { 387 struct ieee80211com *ic = &sc->sc_ic; 388 u_int32_t state; 389 390 bus_dmamap_sync(sc->sc_dmat, sbuf->map, 0, sizeof state, 391 BUS_DMASYNC_POSTREAD); 392 393 state = le32toh(*mtod(sbuf->m, u_int32_t *)); 394 395 DPRINTFN(2, ("RX!NEWSTATE!%u\n", state)); 396 397 switch (state) { 398 case IPW_STATE_ASSOCIATED: 399 ieee80211_new_state(ic, IEEE80211_S_RUN, -1); 400 break; 401 402 case IPW_STATE_SCANNING: 403 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1); 404 break; 405 406 case IPW_STATE_ASSOCIATION_LOST: 407 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 408 break; 409 410 case IPW_STATE_DISABLED: 411 wakeup(sc->cmd); 412 break; 413 414 case IPW_STATE_RADIO_DISABLED: 415 /* XXX should turn the interface down */ 416 break; 417 } 418 } 419 420 static void 421 ipw_data_intr(struct ipw_softc *sc, struct ipw_status *status, 422 struct ipw_soft_bd *sbd, struct ipw_soft_buf *sbuf) 423 { 424 struct ieee80211com *ic = &sc->sc_ic; 425 struct ifnet *ifp = &ic->ic_if; 426 struct mbuf *m; 427 struct ieee80211_frame *wh; 428 struct ieee80211_node *ni; 429 int error; 430 431 DPRINTFN(5, ("RX!DATA!%u!%u\n", le32toh(status->len), status->rssi)); 432 433 bus_dmamap_sync(sc->sc_dmat, sbuf->map, 0, le32toh(status->len), 434 BUS_DMASYNC_POSTREAD); 435 436 bus_dmamap_unload(sc->sc_dmat, sbuf->map); 437 438 /* Finalize mbuf */ 439 m = sbuf->m; 440 m->m_pkthdr.rcvif = ifp; 441 m->m_pkthdr.len = m->m_len = le32toh(status->len); 442 443 #if NBPFILTER > 0 444 if (sc->sc_drvbpf != NULL) { 445 struct ipw_rx_radiotap_header *tap = &sc->sc_rxtap; 446 447 tap->wr_flags = 0; 448 tap->wr_antsignal = status->rssi; 449 tap->wr_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq); 450 tap->wr_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags); 451 452 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m); 453 } 454 #endif 455 456 wh = mtod(m, struct ieee80211_frame *); 457 458 ni = ieee80211_find_rxnode(ic, wh); 459 460 /* Send it up to the upper layer */ 461 ieee80211_input(ifp, m, ni, status->rssi, 0/*rstamp*/); 462 463 ieee80211_release_node(ic, ni); 464 465 MGETHDR(m, M_DONTWAIT, MT_DATA); 466 if (m == NULL) { 467 aprint_error("%s: could not allocate rx mbuf\n", 468 sc->sc_dev.dv_xname); 469 return; 470 } 471 MCLGET(m, M_DONTWAIT); 472 if (!(m->m_flags & M_EXT)) { 473 m_freem(m); 474 aprint_error("%s: could not allocate rx mbuf cluster\n", 475 sc->sc_dev.dv_xname); 476 return; 477 } 478 479 error = bus_dmamap_load(sc->sc_dmat, sbuf->map, mtod(m, void *), 480 MCLBYTES, NULL, BUS_DMA_NOWAIT); 481 if (error != 0) { 482 aprint_error("%s: could not map rxbuf dma memory\n", 483 sc->sc_dev.dv_xname); 484 m_freem(m); 485 return; 486 } 487 488 sbuf->m = m; 489 sbd->bd->physaddr = htole32(sbuf->map->dm_segs[0].ds_addr); 490 } 491 492 static void 493 ipw_notification_intr(struct ipw_softc *sc, struct ipw_soft_buf *sbuf) 494 { 495 DPRINTFN(2, ("RX!NOTIFICATION\n")); 496 } 497 498 static void 499 ipw_rx_intr(struct ipw_softc *sc) 500 { 501 struct ipw_status *status; 502 struct ipw_soft_bd *sbd; 503 struct ipw_soft_buf *sbuf; 504 u_int32_t r, i; 505 506 r = CSR_READ_4(sc, IPW_CSR_RX_READ_INDEX); 507 508 for (i = (sc->rxcur + 1) % IPW_NRBD; i != r; i = (i + 1) % IPW_NRBD) { 509 510 bus_dmamap_sync(sc->sc_dmat, sc->rbd_map, 511 i * sizeof (struct ipw_bd), sizeof (struct ipw_bd), 512 BUS_DMASYNC_POSTREAD); 513 514 bus_dmamap_sync(sc->sc_dmat, sc->status_map, 515 i * sizeof (struct ipw_status), sizeof (struct ipw_status), 516 BUS_DMASYNC_POSTREAD); 517 518 status = &sc->status_list[i]; 519 sbd = &sc->srbd_list[i]; 520 sbuf = sbd->priv; 521 522 switch (le16toh(status->code) & 0xf) { 523 case IPW_STATUS_CODE_COMMAND: 524 ipw_command_intr(sc, sbuf); 525 break; 526 527 case IPW_STATUS_CODE_NEWSTATE: 528 ipw_newstate_intr(sc, sbuf); 529 break; 530 531 case IPW_STATUS_CODE_DATA_802_3: 532 case IPW_STATUS_CODE_DATA_802_11: 533 ipw_data_intr(sc, status, sbd, sbuf); 534 break; 535 536 case IPW_STATUS_CODE_NOTIFICATION: 537 ipw_notification_intr(sc, sbuf); 538 break; 539 540 default: 541 aprint_debug("%s: unknown status code %u\n", 542 sc->sc_dev.dv_xname, le16toh(status->code)); 543 } 544 sbd->bd->flags = 0; 545 546 bus_dmamap_sync(sc->sc_dmat, sc->rbd_map, 547 i * sizeof (struct ipw_bd), sizeof (struct ipw_bd), 548 BUS_DMASYNC_PREWRITE); 549 } 550 551 /* Tell the firmware what we have processed */ 552 sc->rxcur = (r == 0) ? IPW_NRBD - 1 : r - 1; 553 CSR_WRITE_4(sc, IPW_CSR_RX_WRITE_INDEX, sc->rxcur); 554 } 555 556 static void 557 ipw_release_sbd(struct ipw_softc *sc, struct ipw_soft_bd *sbd) 558 { 559 struct ieee80211com *ic; 560 struct ipw_soft_hdr *shdr; 561 struct ipw_soft_buf *sbuf; 562 563 switch (sbd->type) { 564 case IPW_SBD_TYPE_COMMAND: 565 bus_dmamap_unload(sc->sc_dmat, sc->cmd_map); 566 break; 567 568 case IPW_SBD_TYPE_HEADER: 569 shdr = sbd->priv; 570 bus_dmamap_unload(sc->sc_dmat, shdr->map); 571 TAILQ_INSERT_TAIL(&sc->sc_free_shdr, shdr, next); 572 break; 573 574 case IPW_SBD_TYPE_DATA: 575 ic = &sc->sc_ic; 576 sbuf = sbd->priv; 577 bus_dmamap_unload(sc->sc_dmat, sbuf->map); 578 m_freem(sbuf->m); 579 if (sbuf->ni != NULL) 580 ieee80211_release_node(ic, sbuf->ni); 581 /* kill watchdog timer */ 582 sc->sc_tx_timer = 0; 583 TAILQ_INSERT_TAIL(&sc->sc_free_sbuf, sbuf, next); 584 break; 585 } 586 sbd->type = IPW_SBD_TYPE_NOASSOC; 587 } 588 589 static void 590 ipw_tx_intr(struct ipw_softc *sc) 591 { 592 struct ifnet *ifp = &sc->sc_ic.ic_if; 593 u_int32_t r, i; 594 595 r = CSR_READ_4(sc, IPW_CSR_TX_READ_INDEX); 596 597 for (i = (sc->txold + 1) % IPW_NTBD; i != r; i = (i + 1) % IPW_NTBD) 598 ipw_release_sbd(sc, &sc->stbd_list[i]); 599 600 /* Remember what the firmware has processed */ 601 sc->txold = (r == 0) ? IPW_NTBD - 1 : r - 1; 602 603 /* Call start() since some buffer descriptors have been released */ 604 ifp->if_flags &= ~IFF_OACTIVE; 605 (*ifp->if_start)(ifp); 606 } 607 608 static int 609 ipw_intr(void *arg) 610 { 611 struct ipw_softc *sc = arg; 612 u_int32_t r; 613 614 if ((r = CSR_READ_4(sc, IPW_CSR_INTR)) == 0) 615 return 0; 616 617 /* Disable interrupts */ 618 CSR_WRITE_4(sc, IPW_CSR_INTR_MASK, 0); 619 620 DPRINTFN(8, ("INTR!0x%08x\n", r)); 621 622 if (r & IPW_INTR_RX_TRANSFER) 623 ipw_rx_intr(sc); 624 625 if (r & IPW_INTR_TX_TRANSFER) 626 ipw_tx_intr(sc); 627 628 if (r & IPW_INTR_FW_INIT_DONE) { 629 if (!(r & (IPW_INTR_FATAL_ERROR | IPW_INTR_PARITY_ERROR))) 630 wakeup(sc); 631 } 632 633 /* Acknowledge interrupts */ 634 CSR_WRITE_4(sc, IPW_CSR_INTR, r); 635 636 /* Re-enable interrupts */ 637 CSR_WRITE_4(sc, IPW_CSR_INTR_MASK, IPW_INTR_MASK); 638 639 return 0; 640 } 641 642 static int 643 ipw_cmd(struct ipw_softc *sc, u_int32_t type, void *data, u_int32_t len) 644 { 645 struct ipw_soft_bd *sbd; 646 int error; 647 648 sbd = &sc->stbd_list[sc->txcur]; 649 650 error = bus_dmamap_load(sc->sc_dmat, sc->cmd_map, sc->cmd, 651 sizeof (struct ipw_cmd), NULL, BUS_DMA_NOWAIT); 652 if (error != 0) { 653 aprint_error("%s: could not map cmd dma memory\n", 654 sc->sc_dev.dv_xname); 655 return error; 656 } 657 658 sc->cmd->type = htole32(type); 659 sc->cmd->subtype = htole32(0); 660 sc->cmd->len = htole32(len); 661 sc->cmd->seq = htole32(0); 662 if (data != NULL) 663 bcopy(data, sc->cmd->data, len); 664 665 sbd->type = IPW_SBD_TYPE_COMMAND; 666 sbd->bd->physaddr = htole32(sc->cmd_map->dm_segs[0].ds_addr); 667 sbd->bd->len = htole32(sizeof (struct ipw_cmd)); 668 sbd->bd->nfrag = 1; 669 sbd->bd->flags = IPW_BD_FLAG_TX_FRAME_COMMAND | 670 IPW_BD_FLAG_TX_LAST_FRAGMENT; 671 672 bus_dmamap_sync(sc->sc_dmat, sc->cmd_map, 0, sizeof (struct ipw_cmd), 673 BUS_DMASYNC_PREWRITE); 674 675 bus_dmamap_sync(sc->sc_dmat, sc->tbd_map, 676 sc->txcur * sizeof (struct ipw_bd), sizeof (struct ipw_bd), 677 BUS_DMASYNC_PREWRITE); 678 679 sc->txcur = (sc->txcur + 1) % IPW_NTBD; 680 CSR_WRITE_4(sc, IPW_CSR_TX_WRITE_INDEX, sc->txcur); 681 682 DPRINTFN(2, ("TX!CMD!%u!%u!%u!%u\n", type, 0, 0, len)); 683 684 /* Wait at most two seconds for command to complete */ 685 return tsleep(sc->cmd, 0, "ipwcmd", 2 * hz); 686 } 687 688 static int 689 ipw_tx_start(struct ifnet *ifp, struct mbuf *m, struct ieee80211_node *ni) 690 { 691 struct ipw_softc *sc = ifp->if_softc; 692 struct ieee80211com *ic = &sc->sc_ic; 693 struct ieee80211_frame *wh; 694 struct ipw_soft_bd *sbd; 695 struct ipw_soft_hdr *shdr; 696 struct ipw_soft_buf *sbuf; 697 int error, i; 698 699 if (ic->ic_flags & IEEE80211_F_PRIVACY) { 700 m = ieee80211_wep_crypt(ifp, m, 1); 701 if (m == NULL) 702 return ENOBUFS; 703 } 704 705 #if NBPFILTER > 0 706 if (sc->sc_drvbpf != NULL) { 707 struct ipw_tx_radiotap_header *tap = &sc->sc_txtap; 708 709 tap->wt_flags = 0; 710 tap->wt_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq); 711 tap->wt_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags); 712 713 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m); 714 } 715 #endif 716 717 wh = mtod(m, struct ieee80211_frame *); 718 719 shdr = TAILQ_FIRST(&sc->sc_free_shdr); 720 sbuf = TAILQ_FIRST(&sc->sc_free_sbuf); 721 722 shdr->hdr.type = htole32(IPW_HDR_TYPE_SEND); 723 shdr->hdr.subtype = htole32(0); 724 shdr->hdr.encrypted = (wh->i_fc[1] & IEEE80211_FC1_WEP) ? 1 : 0; 725 shdr->hdr.encrypt = 0; 726 shdr->hdr.keyidx = 0; 727 shdr->hdr.keysz = 0; 728 shdr->hdr.fragmentsz = htole16(0); 729 IEEE80211_ADDR_COPY(shdr->hdr.src_addr, wh->i_addr2); 730 if (ic->ic_opmode == IEEE80211_M_STA) 731 IEEE80211_ADDR_COPY(shdr->hdr.dst_addr, wh->i_addr3); 732 else 733 IEEE80211_ADDR_COPY(shdr->hdr.dst_addr, wh->i_addr1); 734 735 /* trim IEEE802.11 header */ 736 m_adj(m, sizeof (struct ieee80211_frame)); 737 738 /* 739 * We need to map the mbuf first to know how many buffer descriptors 740 * are needed for this transfer. 741 */ 742 error = bus_dmamap_load_mbuf(sc->sc_dmat, sbuf->map, m, BUS_DMA_NOWAIT); 743 if (error != 0) { 744 aprint_error("%s: could not map mbuf (error %d)\n", 745 sc->sc_dev.dv_xname, error); 746 m_freem(m); 747 return error; 748 } 749 750 error = bus_dmamap_load(sc->sc_dmat, shdr->map, &shdr->hdr, 751 sizeof (struct ipw_hdr), NULL, BUS_DMA_NOWAIT); 752 if (error != 0) { 753 aprint_error("%s: could not map hdr (error %d)\n", 754 sc->sc_dev.dv_xname, error); 755 bus_dmamap_unload(sc->sc_dmat, sbuf->map); 756 m_freem(m); 757 return error; 758 } 759 760 TAILQ_REMOVE(&sc->sc_free_sbuf, sbuf, next); 761 TAILQ_REMOVE(&sc->sc_free_shdr, shdr, next); 762 763 sbd = &sc->stbd_list[sc->txcur]; 764 sbd->type = IPW_SBD_TYPE_HEADER; 765 sbd->priv = shdr; 766 sbd->bd->physaddr = htole32(shdr->map->dm_segs[0].ds_addr); 767 sbd->bd->len = htole32(sizeof (struct ipw_hdr)); 768 sbd->bd->nfrag = 1 + sbuf->map->dm_nsegs; 769 sbd->bd->flags = IPW_BD_FLAG_TX_FRAME_802_3 | 770 IPW_BD_FLAG_TX_NOT_LAST_FRAGMENT; 771 772 DPRINTFN(5, ("TX!HDR!%u!%u!%u!%u\n", shdr->hdr.type, shdr->hdr.subtype, 773 shdr->hdr.encrypted, shdr->hdr.encrypt)); 774 DPRINTFN(5, ("!%s", ether_sprintf(shdr->hdr.src_addr))); 775 DPRINTFN(5, ("!%s\n", ether_sprintf(shdr->hdr.dst_addr))); 776 777 bus_dmamap_sync(sc->sc_dmat, sc->tbd_map, 778 sc->txcur * sizeof (struct ipw_bd), 779 sizeof (struct ipw_bd), BUS_DMASYNC_PREWRITE); 780 781 sc->txcur = (sc->txcur + 1) % IPW_NTBD; 782 783 sbuf->m = m; 784 sbuf->ni = ni; 785 786 for (i = 0; i < sbuf->map->dm_nsegs; i++) { 787 sbd = &sc->stbd_list[sc->txcur]; 788 sbd->bd->physaddr = htole32(sbuf->map->dm_segs[i].ds_addr); 789 sbd->bd->len = htole32(sbuf->map->dm_segs[i].ds_len); 790 sbd->bd->nfrag = 0; /* used only in first bd */ 791 sbd->bd->flags = IPW_BD_FLAG_TX_FRAME_802_3; 792 if (i == sbuf->map->dm_nsegs - 1) { 793 sbd->type = IPW_SBD_TYPE_DATA; 794 sbd->priv = sbuf; 795 sbd->bd->flags |= IPW_BD_FLAG_TX_LAST_FRAGMENT; 796 } else { 797 sbd->type = IPW_SBD_TYPE_NOASSOC; 798 sbd->bd->flags |= IPW_BD_FLAG_TX_NOT_LAST_FRAGMENT; 799 } 800 801 DPRINTFN(5, ("TX!FRAG!%d!%ld\n", i, 802 sbuf->map->dm_segs[i].ds_len)); 803 804 bus_dmamap_sync(sc->sc_dmat, sc->tbd_map, 805 sc->txcur * sizeof (struct ipw_bd), 806 sizeof (struct ipw_bd), BUS_DMASYNC_PREWRITE); 807 808 sc->txcur = (sc->txcur + 1) % IPW_NTBD; 809 } 810 811 bus_dmamap_sync(sc->sc_dmat, shdr->map, 0, sizeof (struct ipw_hdr), 812 BUS_DMASYNC_PREWRITE); 813 814 bus_dmamap_sync(sc->sc_dmat, sbuf->map, 0, MCLBYTES, 815 BUS_DMASYNC_PREWRITE); 816 817 /* Inform firmware about this new packet */ 818 CSR_WRITE_4(sc, IPW_CSR_TX_WRITE_INDEX, sc->txcur); 819 820 return 0; 821 } 822 823 static void 824 ipw_start(struct ifnet *ifp) 825 { 826 struct ipw_softc *sc = ifp->if_softc; 827 struct ieee80211com *ic = &sc->sc_ic; 828 struct mbuf *m; 829 struct ieee80211_node *ni; 830 831 for (;;) { 832 IF_DEQUEUE(&ifp->if_snd, m); 833 if (m == NULL) 834 break; 835 836 #if NBPFILTER > 0 837 if (ifp->if_bpf != NULL) 838 bpf_mtap(ifp->if_bpf, m); 839 #endif 840 841 m = ieee80211_encap(ifp, m, &ni); 842 if (m == NULL) 843 continue; 844 845 #if NBPFILTER > 0 846 if (ic->ic_rawbpf != NULL) 847 bpf_mtap(ic->ic_rawbpf, m); 848 #endif 849 850 if (ipw_tx_start(ifp, m, ni) != 0) { 851 if (ni != NULL) 852 ieee80211_release_node(ic, ni); 853 break; 854 } 855 856 /* start watchdog timer */ 857 sc->sc_tx_timer = 5; 858 ifp->if_timer = 1; 859 } 860 } 861 862 static void 863 ipw_watchdog(struct ifnet *ifp) 864 { 865 struct ipw_softc *sc = ifp->if_softc; 866 867 ifp->if_timer = 0; 868 869 if (sc->sc_tx_timer > 0) { 870 if (--sc->sc_tx_timer == 0) { 871 aprint_error("%s: device timeout\n", 872 sc->sc_dev.dv_xname); 873 #ifdef notyet 874 ipw_init(ifp); 875 #endif 876 return; 877 } 878 ifp->if_timer = 1; 879 } 880 881 ieee80211_watchdog(ifp); 882 } 883 884 static int 885 ipw_get_table1(struct ipw_softc *sc, u_int32_t *tbl) 886 { 887 u_int32_t addr, size, i; 888 889 if (!(sc->flags & IPW_FLAG_FW_INITED)) 890 return ENOTTY; 891 892 CSR_WRITE_4(sc, IPW_CSR_AUTOINC_ADDR, sc->table1_base); 893 894 size = CSR_READ_4(sc, IPW_CSR_AUTOINC_DATA); 895 if (suword(tbl, size) != 0) 896 return EFAULT; 897 898 for (i = 1, ++tbl; i < size; i++, tbl++) { 899 addr = CSR_READ_4(sc, IPW_CSR_AUTOINC_DATA); 900 if (suword(tbl, MEM_READ_4(sc, addr)) != 0) 901 return EFAULT; 902 } 903 return 0; 904 } 905 906 static int 907 ipw_get_radio(struct ipw_softc *sc, int *ret) 908 { 909 u_int32_t addr; 910 911 if (!(sc->flags & IPW_FLAG_FW_INITED)) 912 return ENOTTY; 913 914 addr = ipw_read_table1(sc, IPW_INFO_EEPROM_ADDRESS); 915 if ((MEM_READ_4(sc, addr + 32) >> 24) & 1) { 916 suword(ret, -1); 917 return 0; 918 } 919 920 if (CSR_READ_4(sc, IPW_CSR_IO) & IPW_IO_RADIO_DISABLED) 921 suword(ret, 0); 922 else 923 suword(ret, 1); 924 925 return 0; 926 } 927 928 static int 929 ipw_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 930 { 931 struct ipw_softc *sc = ifp->if_softc; 932 struct ifreq *ifr; 933 int s, error = 0; 934 935 s = splnet(); 936 937 switch (cmd) { 938 case SIOCSIFFLAGS: 939 if (ifp->if_flags & IFF_UP) { 940 if (!(ifp->if_flags & IFF_RUNNING)) 941 ipw_init(ifp); 942 } else { 943 if (ifp->if_flags & IFF_RUNNING) 944 ipw_stop(ifp, 1); 945 } 946 break; 947 948 case SIOCGTABLE1: 949 ifr = (struct ifreq *)data; 950 error = ipw_get_table1(sc, (u_int32_t *)ifr->ifr_data); 951 break; 952 953 case SIOCGRADIO: 954 ifr = (struct ifreq *)data; 955 error = ipw_get_radio(sc, (int *)ifr->ifr_data); 956 break; 957 958 case SIOCSLOADFW: 959 /* only super-user can do that! */ 960 if ((error = suser(curproc->p_ucred, &curproc->p_acflag)) != 0) 961 break; 962 963 ifr = (struct ifreq *)data; 964 error = ipw_firmware_init(sc, (u_char *)ifr->ifr_data); 965 break; 966 967 case SIOCSKILLFW: 968 /* only super-user can do that! */ 969 if ((error = suser(curproc->p_ucred, &curproc->p_acflag)) != 0) 970 break; 971 972 ipw_reset(sc); 973 break; 974 975 default: 976 error = ieee80211_ioctl(ifp, cmd, data); 977 if (error != ENETRESET) 978 break; 979 980 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 981 (IFF_UP | IFF_RUNNING)) 982 ipw_init(ifp); 983 error = 0; 984 } 985 986 splx(s); 987 return error; 988 } 989 990 static u_int32_t 991 ipw_read_table1(struct ipw_softc *sc, u_int32_t off) 992 { 993 return MEM_READ_4(sc, MEM_READ_4(sc, sc->table1_base + off)); 994 } 995 996 static void 997 ipw_write_table1(struct ipw_softc *sc, u_int32_t off, u_int32_t info) 998 { 999 MEM_WRITE_4(sc, MEM_READ_4(sc, sc->table1_base + off), info); 1000 } 1001 1002 static int 1003 ipw_read_table2(struct ipw_softc *sc, u_int32_t off, void *buf, u_int32_t *len) 1004 { 1005 u_int32_t addr, info; 1006 u_int16_t count, size; 1007 u_int32_t total; 1008 1009 /* addr[4] + count[2] + size[2] */ 1010 addr = MEM_READ_4(sc, sc->table2_base + off); 1011 info = MEM_READ_4(sc, sc->table2_base + off + 4); 1012 1013 count = info >> 16; 1014 size = info & 0xffff; 1015 total = count * size; 1016 1017 if (total > *len) { 1018 *len = total; 1019 return EINVAL; 1020 } 1021 1022 *len = total; 1023 ipw_read_mem_1(sc, addr, buf, total); 1024 1025 return 0; 1026 } 1027 1028 static int 1029 ipw_tx_init(struct ipw_softc *sc) 1030 { 1031 char *errmsg; 1032 struct ipw_bd *bd; 1033 struct ipw_soft_bd *sbd; 1034 struct ipw_soft_hdr *shdr; 1035 struct ipw_soft_buf *sbuf; 1036 int error, i, nsegs; 1037 1038 /* Allocate transmission buffer descriptors */ 1039 error = bus_dmamap_create(sc->sc_dmat, IPW_TBD_SZ, 1, IPW_TBD_SZ, 0, 1040 BUS_DMA_NOWAIT, &sc->tbd_map); 1041 if (error != 0) { 1042 errmsg = "could not create tbd dma map"; 1043 goto fail; 1044 } 1045 1046 error = bus_dmamem_alloc(sc->sc_dmat, IPW_TBD_SZ, PAGE_SIZE, 0, 1047 &sc->tbd_seg, 1, &nsegs, BUS_DMA_NOWAIT); 1048 if (error != 0) { 1049 errmsg = "could not allocate tbd dma memory"; 1050 goto fail; 1051 } 1052 1053 error = bus_dmamem_map(sc->sc_dmat, &sc->tbd_seg, nsegs, IPW_TBD_SZ, 1054 (caddr_t *)&sc->tbd_list, BUS_DMA_NOWAIT); 1055 if (error != 0) { 1056 errmsg = "could not map tbd dma memory"; 1057 goto fail; 1058 } 1059 1060 error = bus_dmamap_load(sc->sc_dmat, sc->tbd_map, sc->tbd_list, 1061 IPW_TBD_SZ, NULL, BUS_DMA_NOWAIT); 1062 if (error != 0) { 1063 errmsg = "could not load tbd dma memory"; 1064 goto fail; 1065 } 1066 1067 sc->stbd_list = malloc(IPW_NTBD * sizeof (struct ipw_soft_bd), 1068 M_DEVBUF, M_NOWAIT); 1069 if (sc->stbd_list == NULL) { 1070 errmsg = "could not allocate soft tbd"; 1071 error = ENOMEM; 1072 goto fail; 1073 } 1074 sbd = sc->stbd_list; 1075 bd = sc->tbd_list; 1076 for (i = 0; i < IPW_NTBD; i++, sbd++, bd++) { 1077 sbd->type = IPW_SBD_TYPE_NOASSOC; 1078 sbd->bd = bd; 1079 } 1080 1081 CSR_WRITE_4(sc, IPW_CSR_TX_BD_BASE, sc->tbd_map->dm_segs[0].ds_addr); 1082 CSR_WRITE_4(sc, IPW_CSR_TX_BD_SIZE, IPW_NTBD); 1083 CSR_WRITE_4(sc, IPW_CSR_TX_READ_INDEX, 0); 1084 CSR_WRITE_4(sc, IPW_CSR_TX_WRITE_INDEX, 0); 1085 sc->txold = IPW_NTBD - 1; /* latest bd index ack'ed by firmware */ 1086 sc->txcur = 0; /* bd index to write to */ 1087 1088 /* Allocate a DMA-able command */ 1089 error = bus_dmamap_create(sc->sc_dmat, sizeof (struct ipw_cmd), 1, 1090 sizeof (struct ipw_cmd), 0, BUS_DMA_NOWAIT, &sc->cmd_map); 1091 if (error != 0) { 1092 errmsg = "could not create cmd dma map"; 1093 goto fail; 1094 } 1095 1096 error = bus_dmamem_alloc(sc->sc_dmat, sizeof (struct ipw_cmd), 1097 PAGE_SIZE, 0, &sc->cmd_seg, 1, &nsegs, BUS_DMA_NOWAIT); 1098 if (error != 0) { 1099 errmsg = "could not allocate cmd dma memory"; 1100 goto fail; 1101 } 1102 1103 error = bus_dmamem_map(sc->sc_dmat, &sc->cmd_seg, nsegs, 1104 sizeof (struct ipw_cmd), (caddr_t *)&sc->cmd, BUS_DMA_NOWAIT); 1105 if (error != 0) { 1106 errmsg = "could not map cmd dma memory"; 1107 goto fail; 1108 } 1109 1110 /* Allocate a pool of DMA-able headers */ 1111 sc->shdr_list = malloc(IPW_NDATA * sizeof (struct ipw_soft_hdr), 1112 M_DEVBUF, M_NOWAIT); 1113 if (sc->shdr_list == NULL) { 1114 errmsg = "could not allocate soft hdr"; 1115 error = ENOMEM; 1116 goto fail; 1117 } 1118 TAILQ_INIT(&sc->sc_free_shdr); 1119 for (i = 0, shdr = sc->shdr_list; i < IPW_NDATA; i++, shdr++) { 1120 error = bus_dmamap_create(sc->sc_dmat, 1121 sizeof (struct ipw_soft_hdr), 1, 1122 sizeof (struct ipw_soft_hdr), 0, BUS_DMA_NOWAIT, 1123 &shdr->map); 1124 if (error != 0) { 1125 errmsg = "could not create hdr dma map"; 1126 goto fail; 1127 } 1128 TAILQ_INSERT_TAIL(&sc->sc_free_shdr, shdr, next); 1129 } 1130 1131 /* Allocate a pool of DMA-able buffers */ 1132 sc->tx_sbuf_list = malloc(IPW_NDATA * sizeof (struct ipw_soft_buf), 1133 M_DEVBUF, M_NOWAIT); 1134 if (sc->tx_sbuf_list == NULL) { 1135 errmsg = "could not allocate soft txbuf"; 1136 error = ENOMEM; 1137 goto fail; 1138 } 1139 TAILQ_INIT(&sc->sc_free_sbuf); 1140 for (i = 0, sbuf = sc->tx_sbuf_list; i < IPW_NDATA; i++, sbuf++) { 1141 error = bus_dmamap_create(sc->sc_dmat, IPW_NDATA * MCLBYTES, 1142 IPW_NDATA, MCLBYTES, 0, BUS_DMA_NOWAIT, &sbuf->map); 1143 if (error != 0) { 1144 errmsg = "could not create txbuf dma map"; 1145 goto fail; 1146 } 1147 TAILQ_INSERT_TAIL(&sc->sc_free_sbuf, sbuf, next); 1148 } 1149 1150 return 0; 1151 1152 fail: aprint_error("%s: %s\n", sc->sc_dev.dv_xname, errmsg); 1153 ipw_tx_stop(sc); 1154 1155 return error; 1156 } 1157 1158 static void 1159 ipw_tx_stop(struct ipw_softc *sc) 1160 { 1161 struct ipw_soft_hdr *shdr; 1162 struct ipw_soft_buf *sbuf; 1163 int i; 1164 1165 if (sc->tbd_map != NULL) { 1166 if (sc->tbd_list != NULL) { 1167 bus_dmamap_unload(sc->sc_dmat, sc->tbd_map); 1168 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->tbd_list, 1169 IPW_TBD_SZ); 1170 bus_dmamem_free(sc->sc_dmat, &sc->tbd_seg, 1); 1171 sc->tbd_list = NULL; 1172 } 1173 bus_dmamap_destroy(sc->sc_dmat, sc->tbd_map); 1174 sc->tbd_map = NULL; 1175 } 1176 1177 if (sc->stbd_list != NULL) { 1178 for (i = 0; i < IPW_NTBD; i++) 1179 ipw_release_sbd(sc, &sc->stbd_list[i]); 1180 free(sc->stbd_list, M_DEVBUF); 1181 sc->stbd_list = NULL; 1182 } 1183 1184 if (sc->cmd_map != NULL) { 1185 if (sc->cmd != NULL) { 1186 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->cmd, 1187 sizeof (struct ipw_cmd)); 1188 bus_dmamem_free(sc->sc_dmat, &sc->cmd_seg, 1); 1189 sc->cmd = NULL; 1190 } 1191 bus_dmamap_destroy(sc->sc_dmat, sc->cmd_map); 1192 sc->cmd_map = NULL; 1193 } 1194 1195 if (sc->shdr_list != NULL) { 1196 TAILQ_FOREACH(shdr, &sc->sc_free_shdr, next) 1197 bus_dmamap_destroy(sc->sc_dmat, shdr->map); 1198 free(sc->shdr_list, M_DEVBUF); 1199 sc->shdr_list = NULL; 1200 } 1201 1202 1203 if (sc->tx_sbuf_list != NULL) { 1204 TAILQ_FOREACH(sbuf, &sc->sc_free_sbuf, next) 1205 bus_dmamap_destroy(sc->sc_dmat, sbuf->map); 1206 free(sc->tx_sbuf_list, M_DEVBUF); 1207 sc->tx_sbuf_list = NULL; 1208 } 1209 } 1210 1211 static int 1212 ipw_rx_init(struct ipw_softc *sc) 1213 { 1214 char *errmsg; 1215 struct ipw_bd *bd; 1216 struct ipw_soft_bd *sbd; 1217 struct ipw_soft_buf *sbuf; 1218 int error, i, nsegs; 1219 1220 /* Allocate reception buffer descriptors */ 1221 error = bus_dmamap_create(sc->sc_dmat, IPW_RBD_SZ, 1, IPW_RBD_SZ, 0, 1222 BUS_DMA_NOWAIT, &sc->rbd_map); 1223 if (error != 0) { 1224 errmsg = "could not create rbd dma map"; 1225 goto fail; 1226 } 1227 1228 error = bus_dmamem_alloc(sc->sc_dmat, IPW_RBD_SZ, PAGE_SIZE, 0, 1229 &sc->rbd_seg, 1, &nsegs, BUS_DMA_NOWAIT); 1230 if (error != 0) { 1231 errmsg = "could not allocate rbd dma memory"; 1232 goto fail; 1233 } 1234 1235 error = bus_dmamem_map(sc->sc_dmat, &sc->rbd_seg, nsegs, IPW_RBD_SZ, 1236 (caddr_t *)&sc->rbd_list, BUS_DMA_NOWAIT); 1237 if (error != 0) { 1238 errmsg = "could not map rbd dma memory"; 1239 goto fail; 1240 } 1241 1242 error = bus_dmamap_load(sc->sc_dmat, sc->rbd_map, sc->rbd_list, 1243 IPW_RBD_SZ, NULL, BUS_DMA_NOWAIT); 1244 if (error != 0) { 1245 errmsg = "could not load rbd dma memory"; 1246 goto fail; 1247 } 1248 1249 sc->srbd_list = malloc(IPW_NRBD * sizeof (struct ipw_soft_bd), 1250 M_DEVBUF, M_NOWAIT); 1251 if (sc->srbd_list == NULL) { 1252 errmsg = "could not allocate soft rbd"; 1253 error = ENOMEM; 1254 goto fail; 1255 } 1256 sbd = sc->srbd_list; 1257 bd = sc->rbd_list; 1258 for (i = 0; i < IPW_NRBD; i++, sbd++, bd++) { 1259 sbd->type = IPW_SBD_TYPE_NOASSOC; 1260 sbd->bd = bd; 1261 } 1262 1263 CSR_WRITE_4(sc, IPW_CSR_RX_BD_BASE, sc->rbd_map->dm_segs[0].ds_addr); 1264 CSR_WRITE_4(sc, IPW_CSR_RX_BD_SIZE, IPW_NRBD); 1265 CSR_WRITE_4(sc, IPW_CSR_RX_READ_INDEX, 0); 1266 CSR_WRITE_4(sc, IPW_CSR_RX_WRITE_INDEX, IPW_NRBD - 1); 1267 sc->rxcur = IPW_NRBD - 1; /* latest bd index I've read */ 1268 1269 /* Allocate status descriptors */ 1270 error = bus_dmamap_create(sc->sc_dmat, IPW_STATUS_SZ, 1, IPW_STATUS_SZ, 1271 0, BUS_DMA_NOWAIT, &sc->status_map); 1272 if (error != 0) { 1273 errmsg = "could not create status dma map"; 1274 goto fail; 1275 } 1276 1277 error = bus_dmamem_alloc(sc->sc_dmat, IPW_STATUS_SZ, PAGE_SIZE, 0, 1278 &sc->status_seg, 1, &nsegs, BUS_DMA_NOWAIT); 1279 if (error != 0) { 1280 errmsg = "could not allocate status dma memory"; 1281 goto fail; 1282 } 1283 1284 error = bus_dmamem_map(sc->sc_dmat, &sc->status_seg, nsegs, 1285 IPW_STATUS_SZ, (caddr_t *)&sc->status_list, BUS_DMA_NOWAIT); 1286 if (error != 0) { 1287 errmsg = "could not map status dma memory"; 1288 goto fail; 1289 } 1290 1291 error = bus_dmamap_load(sc->sc_dmat, sc->status_map, sc->status_list, 1292 IPW_STATUS_SZ, NULL, BUS_DMA_NOWAIT); 1293 if (error != 0) { 1294 errmsg = "could not load status dma memory"; 1295 goto fail; 1296 } 1297 1298 CSR_WRITE_4(sc, IPW_CSR_RX_STATUS_BASE, 1299 sc->status_map->dm_segs[0].ds_addr); 1300 1301 sc->rx_sbuf_list = malloc(IPW_NRBD * sizeof (struct ipw_soft_buf), 1302 M_DEVBUF, M_NOWAIT); 1303 if (sc->rx_sbuf_list == NULL) { 1304 errmsg = "could not allocate soft rxbuf"; 1305 error = ENOMEM; 1306 goto fail; 1307 } 1308 1309 sbuf = sc->rx_sbuf_list; 1310 sbd = sc->srbd_list; 1311 for (i = 0; i < IPW_NRBD; i++, sbuf++, sbd++) { 1312 1313 MGETHDR(sbuf->m, M_DONTWAIT, MT_DATA); 1314 if (sbuf->m == NULL) { 1315 errmsg = "could not allocate rx mbuf"; 1316 error = ENOMEM; 1317 goto fail; 1318 } 1319 MCLGET(sbuf->m, M_DONTWAIT); 1320 if (!(sbuf->m->m_flags & M_EXT)) { 1321 m_freem(sbuf->m); 1322 errmsg = "could not allocate rx mbuf cluster"; 1323 error = ENOMEM; 1324 goto fail; 1325 } 1326 1327 error = bus_dmamap_create(sc->sc_dmat, IPW_NRBD * MCLBYTES, 1328 IPW_NRBD, MCLBYTES, 0, BUS_DMA_NOWAIT, &sbuf->map); 1329 if (error != 0) { 1330 m_freem(sbuf->m); 1331 errmsg = "could not create rxbuf dma map"; 1332 goto fail; 1333 } 1334 error = bus_dmamap_load(sc->sc_dmat, sbuf->map, 1335 mtod(sbuf->m, void *), MCLBYTES, NULL, BUS_DMA_NOWAIT); 1336 if (error != 0) { 1337 bus_dmamap_destroy(sc->sc_dmat, sbuf->map); 1338 m_freem(sbuf->m); 1339 errmsg = "could not map rxbuf dma memory"; 1340 goto fail; 1341 } 1342 sbd->type = IPW_SBD_TYPE_DATA; 1343 sbd->priv = sbuf; 1344 sbd->bd->physaddr = htole32(sbuf->map->dm_segs[0].ds_addr); 1345 sbd->bd->len = htole32(MCLBYTES); 1346 } 1347 1348 return 0; 1349 1350 fail: aprint_error("%s: %s\n", sc->sc_dev.dv_xname, errmsg); 1351 ipw_rx_stop(sc); 1352 1353 return error; 1354 } 1355 1356 static void 1357 ipw_rx_stop(struct ipw_softc *sc) 1358 { 1359 struct ipw_soft_bd *sbd; 1360 struct ipw_soft_buf *sbuf; 1361 int i; 1362 1363 if (sc->rbd_map != NULL) { 1364 if (sc->rbd_list != NULL) { 1365 bus_dmamap_unload(sc->sc_dmat, sc->rbd_map); 1366 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->rbd_list, 1367 IPW_RBD_SZ); 1368 bus_dmamem_free(sc->sc_dmat, &sc->rbd_seg, 1); 1369 sc->rbd_list = NULL; 1370 } 1371 bus_dmamap_destroy(sc->sc_dmat, sc->rbd_map); 1372 sc->rbd_map = NULL; 1373 } 1374 1375 if (sc->status_map != NULL) { 1376 if (sc->status_list != NULL) { 1377 bus_dmamap_unload(sc->sc_dmat, sc->status_map); 1378 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->status_list, 1379 IPW_STATUS_SZ); 1380 bus_dmamem_free(sc->sc_dmat, &sc->status_seg, 1); 1381 sc->status_list = NULL; 1382 } 1383 bus_dmamap_destroy(sc->sc_dmat, sc->status_map); 1384 sc->status_map = NULL; 1385 } 1386 1387 if (sc->srbd_list != NULL) { 1388 for (i = 0, sbd = sc->srbd_list; i < IPW_NRBD; i++, sbd++) { 1389 if (sbd->type == IPW_SBD_TYPE_NOASSOC) 1390 continue; 1391 1392 sbuf = sbd->priv; 1393 bus_dmamap_unload(sc->sc_dmat, sbuf->map); 1394 bus_dmamap_destroy(sc->sc_dmat, sbuf->map); 1395 m_freem(sbuf->m); 1396 } 1397 free(sc->srbd_list, M_DEVBUF); 1398 sc->srbd_list = NULL; 1399 } 1400 1401 if (sc->rx_sbuf_list != NULL) { 1402 free(sc->rx_sbuf_list, M_DEVBUF); 1403 sc->rx_sbuf_list = NULL; 1404 } 1405 } 1406 1407 static void 1408 ipw_reset(struct ipw_softc *sc) 1409 { 1410 struct ifnet *ifp = &sc->sc_ic.ic_if; 1411 int ntries; 1412 1413 ipw_stop(ifp, 1); 1414 1415 if (sc->flags & IPW_FLAG_FW_INITED) { 1416 ipw_cmd(sc, IPW_CMD_DISABLE_PHY, NULL, 0); 1417 ipw_cmd(sc, IPW_CMD_PREPARE_POWER_DOWN, NULL, 0); 1418 1419 sc->flags &= ~IPW_FLAG_FW_INITED; 1420 } 1421 1422 /* Disable interrupts */ 1423 CSR_WRITE_4(sc, IPW_CSR_INTR_MASK, 0); 1424 1425 CSR_WRITE_4(sc, IPW_CSR_RST, IPW_RST_STOP_MASTER); 1426 for (ntries = 0; ntries < 5; ntries++) { 1427 if (CSR_READ_4(sc, IPW_CSR_RST) & IPW_RST_MASTER_DISABLED) 1428 break; 1429 DELAY(10); 1430 } 1431 1432 CSR_WRITE_4(sc, IPW_CSR_RST, IPW_RST_SW_RESET); 1433 1434 ipw_rx_stop(sc); 1435 ipw_tx_stop(sc); 1436 1437 ifp->if_flags &= ~IFF_UP; 1438 } 1439 1440 static int 1441 ipw_clock_sync(struct ipw_softc *sc) 1442 { 1443 int ntries; 1444 u_int32_t r; 1445 1446 CSR_WRITE_4(sc, IPW_CSR_RST, IPW_RST_SW_RESET); 1447 for (ntries = 0; ntries < 1000; ntries++) { 1448 if (CSR_READ_4(sc, IPW_CSR_RST) & IPW_RST_PRINCETON_RESET) 1449 break; 1450 DELAY(10); 1451 } 1452 if (ntries == 1000) 1453 return EIO; 1454 1455 CSR_WRITE_4(sc, IPW_CSR_CTL, IPW_CTL_INIT_DONE); 1456 for (ntries = 0; ntries < 1000; ntries++) { 1457 if ((r = CSR_READ_4(sc, IPW_CSR_CTL)) & IPW_CTL_CLOCK_READY) 1458 break; 1459 DELAY(200); 1460 } 1461 if (ntries == 1000) 1462 return EIO; 1463 1464 CSR_WRITE_4(sc, IPW_CSR_CTL, r | IPW_CTL_ALLOW_STANDBY); 1465 1466 return 0; 1467 } 1468 1469 static int 1470 ipw_load_ucode(struct ipw_softc *sc, u_char *uc, int size) 1471 { 1472 int ntries; 1473 1474 MEM_WRITE_2(sc, 0x220000, 0x0703); 1475 MEM_WRITE_2(sc, 0x220000, 0x0707); 1476 1477 MEM_WRITE_1(sc, 0x210014, 0x72); 1478 MEM_WRITE_1(sc, 0x210014, 0x72); 1479 1480 MEM_WRITE_1(sc, 0x210000, 0x40); 1481 MEM_WRITE_1(sc, 0x210000, 0x00); 1482 MEM_WRITE_1(sc, 0x210000, 0x40); 1483 1484 MEM_WRITE_MULTI_1(sc, 0x210010, uc, size); 1485 1486 MEM_WRITE_1(sc, 0x210000, 0x00); 1487 MEM_WRITE_1(sc, 0x210000, 0x00); 1488 MEM_WRITE_1(sc, 0x210000, 0x80); 1489 1490 MEM_WRITE_2(sc, 0x220000, 0x0703); 1491 MEM_WRITE_2(sc, 0x220000, 0x0707); 1492 1493 MEM_WRITE_1(sc, 0x210014, 0x72); 1494 MEM_WRITE_1(sc, 0x210014, 0x72); 1495 1496 MEM_WRITE_1(sc, 0x210000, 0x00); 1497 MEM_WRITE_1(sc, 0x210000, 0x80); 1498 1499 for (ntries = 0; ntries < 10; ntries++) { 1500 if (MEM_READ_1(sc, 0x210000) & 1) 1501 break; 1502 DELAY(10); 1503 } 1504 if (ntries == 10) 1505 return EIO; 1506 1507 return 0; 1508 } 1509 1510 /* set of macros to handle unaligned little endian data in firmware image */ 1511 #define GETLE32(p) ((p)[0] | (p)[1] << 8 | (p)[2] << 16 | (p)[3] << 24) 1512 #define GETLE16(p) ((p)[0] | (p)[1] << 8) 1513 static int 1514 ipw_load_firmware(struct ipw_softc *sc, u_char *fw, int size) 1515 { 1516 u_char *p, *end; 1517 u_int32_t addr; 1518 u_int16_t len; 1519 1520 p = fw; 1521 end = fw + size; 1522 while (p < end) { 1523 if (p + 6 > end) 1524 return EINVAL; 1525 1526 addr = GETLE32(p); 1527 p += 4; 1528 len = GETLE16(p); 1529 p += 2; 1530 1531 if (p + len > end) 1532 return EINVAL; 1533 1534 ipw_write_mem_1(sc, addr, p, len); 1535 p += len; 1536 } 1537 return 0; 1538 } 1539 1540 static int 1541 ipw_firmware_init(struct ipw_softc *sc, u_char *data) 1542 { 1543 struct ieee80211com *ic = &sc->sc_ic; 1544 struct ifnet *ifp = &ic->ic_if; 1545 struct ipw_fw_hdr hdr; 1546 u_int32_t r, len, fw_size, uc_size; 1547 u_char *fw, *uc; 1548 int error; 1549 1550 ipw_reset(sc); 1551 1552 if ((error = copyin(data, &hdr, sizeof hdr)) != 0) 1553 goto fail1; 1554 1555 fw_size = le32toh(hdr.fw_size); 1556 uc_size = le32toh(hdr.uc_size); 1557 data += sizeof hdr; 1558 1559 if ((fw = malloc(fw_size, M_DEVBUF, M_NOWAIT)) == NULL) { 1560 error = ENOMEM; 1561 goto fail1; 1562 } 1563 1564 if ((error = copyin(data, fw, fw_size)) != 0) 1565 goto fail2; 1566 1567 data += fw_size; 1568 1569 if ((uc = malloc(uc_size, M_DEVBUF, M_NOWAIT)) == NULL) { 1570 error = ENOMEM; 1571 goto fail2; 1572 } 1573 1574 if ((error = copyin(data, uc, uc_size)) != 0) 1575 goto fail3; 1576 1577 if ((error = ipw_clock_sync(sc)) != 0) { 1578 aprint_error("%s: clock synchronization failed\n", 1579 sc->sc_dev.dv_xname); 1580 goto fail3; 1581 } 1582 1583 MEM_WRITE_4(sc, 0x003000e0, 0x80000000); 1584 1585 CSR_WRITE_4(sc, IPW_CSR_RST, 0); 1586 1587 if ((error = ipw_load_ucode(sc, uc, uc_size)) != 0) { 1588 aprint_error("%s: could not load microcode\n", 1589 sc->sc_dev.dv_xname); 1590 goto fail3; 1591 } 1592 1593 MEM_WRITE_4(sc, 0x003000e0, 0); 1594 1595 if ((error = ipw_clock_sync(sc)) != 0) { 1596 aprint_error("%s: clock synchronization failed\n", 1597 sc->sc_dev.dv_xname); 1598 goto fail3; 1599 } 1600 1601 if ((error = ipw_load_firmware(sc, fw, fw_size))) { 1602 aprint_error("%s: could not load firmware\n", 1603 sc->sc_dev.dv_xname); 1604 goto fail3; 1605 } 1606 1607 ipw_zero_mem_4(sc, 0x0002f200, 196); 1608 ipw_zero_mem_4(sc, 0x0002f610, 8); 1609 ipw_zero_mem_4(sc, 0x0002fa00, 8); 1610 ipw_zero_mem_4(sc, 0x0002fc00, 4); 1611 ipw_zero_mem_4(sc, 0x0002ff80, 32); 1612 1613 if ((error = ipw_rx_init(sc)) != 0) { 1614 aprint_error("%s: could not initialize rx queue\n", 1615 sc->sc_dev.dv_xname); 1616 goto fail3; 1617 } 1618 1619 if ((error = ipw_tx_init(sc)) != 0) { 1620 aprint_error("%s: could not initialize tx queue\n", 1621 sc->sc_dev.dv_xname); 1622 goto fail3; 1623 } 1624 1625 CSR_WRITE_4(sc, IPW_CSR_IO, IPW_IO_GPIO1_ENABLE | IPW_IO_GPIO3_MASK | 1626 IPW_IO_LED_OFF); 1627 1628 /* Enable interrupts */ 1629 CSR_WRITE_4(sc, IPW_CSR_INTR_MASK, IPW_INTR_MASK); 1630 1631 /* Let's go! */ 1632 CSR_WRITE_4(sc, IPW_CSR_RST, 0); 1633 1634 /* Wait at most 5 seconds for firmware initialization to complete */ 1635 if ((error = tsleep(sc, 0, "ipwinit", 5 * hz)) != 0) { 1636 aprint_error("%s: timeout waiting for firmware initialization " 1637 "to complete\n", sc->sc_dev.dv_xname); 1638 goto fail3; 1639 } 1640 1641 /* Firmware initialization completed */ 1642 sc->flags |= IPW_FLAG_FW_INITED; 1643 1644 free(uc, M_DEVBUF); 1645 free(fw, M_DEVBUF); 1646 1647 r = CSR_READ_4(sc, IPW_CSR_IO); 1648 CSR_WRITE_4(sc, IPW_CSR_IO, r | IPW_IO_GPIO1_MASK | IPW_IO_GPIO3_MASK); 1649 1650 /* Retrieve information tables base addresses */ 1651 sc->table1_base = CSR_READ_4(sc, IPW_CSR_TABLE1_BASE); 1652 sc->table2_base = CSR_READ_4(sc, IPW_CSR_TABLE2_BASE); 1653 1654 ipw_write_table1(sc, IPW_INFO_LOCK, 0); 1655 1656 /* Retrieve adapter MAC address */ 1657 len = IEEE80211_ADDR_LEN; 1658 ipw_read_table2(sc, IPW_INFO_ADAPTER_MAC, ic->ic_myaddr, &len); 1659 1660 IEEE80211_ADDR_COPY(LLADDR(ifp->if_sadl), ic->ic_myaddr); 1661 1662 return 0; 1663 1664 fail3: free(uc, M_DEVBUF); 1665 fail2: free(fw, M_DEVBUF); 1666 fail1: ipw_reset(sc); 1667 1668 return error; 1669 } 1670 1671 static int 1672 ipw_config(struct ipw_softc *sc) 1673 { 1674 struct ieee80211com *ic = &sc->sc_ic; 1675 struct ifnet *ifp = &ic->ic_if; 1676 struct ipw_security security; 1677 struct ieee80211_wepkey *k; 1678 struct ipw_wep_key wepkey; 1679 struct ipw_scan_options options; 1680 struct ipw_configuration config; 1681 u_int32_t data; 1682 int error, i; 1683 1684 switch (ic->ic_opmode) { 1685 case IEEE80211_M_STA: 1686 case IEEE80211_M_HOSTAP: 1687 data = htole32(IPW_MODE_BSS); 1688 break; 1689 1690 case IEEE80211_M_IBSS: 1691 case IEEE80211_M_AHDEMO: 1692 data = htole32(IPW_MODE_IBSS); 1693 break; 1694 1695 case IEEE80211_M_MONITOR: 1696 data = htole32(IPW_MODE_MONITOR); 1697 break; 1698 } 1699 DPRINTF(("Setting adapter mode to %u\n", data)); 1700 error = ipw_cmd(sc, IPW_CMD_SET_MODE, &data, sizeof data); 1701 if (error != 0) 1702 return error; 1703 1704 if (ic->ic_opmode == IEEE80211_M_IBSS || 1705 ic->ic_opmode == IEEE80211_M_MONITOR) { 1706 data = htole32(ieee80211_chan2ieee(ic, ic->ic_ibss_chan)); 1707 DPRINTF(("Setting adapter channel to %u\n", data)); 1708 error = ipw_cmd(sc, IPW_CMD_SET_CHANNEL, &data, sizeof data); 1709 if (error != 0) 1710 return error; 1711 } 1712 1713 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 1714 DPRINTF(("Enabling adapter\n")); 1715 return ipw_cmd(sc, IPW_CMD_ENABLE, NULL, 0); 1716 } 1717 1718 DPRINTF(("Setting adapter MAC to %s\n", ether_sprintf(ic->ic_myaddr))); 1719 IEEE80211_ADDR_COPY(LLADDR(ifp->if_sadl), ic->ic_myaddr); 1720 error = ipw_cmd(sc, IPW_CMD_SET_MAC_ADDRESS, ic->ic_myaddr, 1721 IEEE80211_ADDR_LEN); 1722 if (error != 0) 1723 return error; 1724 1725 config.flags = htole32(IPW_CFG_BSS_MASK | IPW_CFG_IBSS_MASK | 1726 IPW_CFG_PREAMBLE_LEN | IPW_CFG_802_1x_ENABLE); 1727 if (ic->ic_opmode == IEEE80211_M_IBSS) 1728 config.flags |= htole32(IPW_CFG_IBSS_AUTO_START); 1729 if (ifp->if_flags & IFF_PROMISC) 1730 config.flags |= htole32(IPW_CFG_PROMISCUOUS); 1731 config.channels = htole32(0x3fff); /* channels 1-14 */ 1732 config.ibss_chan = htole32(0x7ff); 1733 DPRINTF(("Setting adapter configuration 0x%08x\n", config.flags)); 1734 error = ipw_cmd(sc, IPW_CMD_SET_CONFIGURATION, &config, sizeof config); 1735 if (error != 0) 1736 return error; 1737 1738 data = htole32(0x3); /* 1, 2 */ 1739 DPRINTF(("Setting adapter basic tx rates to 0x%x\n", data)); 1740 error = ipw_cmd(sc, IPW_CMD_SET_BASIC_TX_RATES, &data, sizeof data); 1741 if (error != 0) 1742 return error; 1743 1744 data = htole32(0xf); /* 1, 2, 5.5, 11 */ 1745 DPRINTF(("Setting adapter tx rates to 0x%x\n", data)); 1746 error = ipw_cmd(sc, IPW_CMD_SET_TX_RATES, &data, sizeof data); 1747 if (error != 0) 1748 return error; 1749 1750 data = htole32(IPW_POWER_MODE_CAM); 1751 DPRINTF(("Setting adapter power mode to %u\n", data)); 1752 error = ipw_cmd(sc, IPW_CMD_SET_POWER_MODE, &data, sizeof data); 1753 if (error != 0) 1754 return error; 1755 1756 if (ic->ic_opmode == IEEE80211_M_IBSS) { 1757 data = htole32(ic->ic_txpower); 1758 DPRINTF(("Setting adapter tx power index to %u\n", data)); 1759 error = ipw_cmd(sc, IPW_CMD_SET_TX_POWER_INDEX, &data, 1760 sizeof data); 1761 if (error != 0) 1762 return error; 1763 } 1764 1765 data = htole32(ic->ic_rtsthreshold); 1766 DPRINTF(("Setting adapter RTS threshold to %u\n", data)); 1767 error = ipw_cmd(sc, IPW_CMD_SET_RTS_THRESHOLD, &data, sizeof data); 1768 if (error != 0) 1769 return error; 1770 1771 data = htole32(ic->ic_fragthreshold); 1772 DPRINTF(("Setting adapter frag threshold to %u\n", data)); 1773 error = ipw_cmd(sc, IPW_CMD_SET_FRAG_THRESHOLD, &data, sizeof data); 1774 if (error != 0) 1775 return error; 1776 1777 #ifdef IPW_DEBUG 1778 if (ipw_debug > 0) { 1779 printf("Setting adapter ESSID to "); 1780 ieee80211_print_essid(ic->ic_des_essid, ic->ic_des_esslen); 1781 printf("\n"); 1782 } 1783 #endif 1784 error = ipw_cmd(sc, IPW_CMD_SET_ESSID, ic->ic_des_essid, 1785 ic->ic_des_esslen); 1786 if (error != 0) 1787 return error; 1788 1789 /* no mandatory BSSID */ 1790 error = ipw_cmd(sc, IPW_CMD_SET_MANDATORY_BSSID, NULL, 0); 1791 if (error != 0) 1792 return error; 1793 1794 if (ic->ic_flags & IEEE80211_F_DESBSSID) { 1795 DPRINTF(("Setting adapter desired BSSID to %s\n", 1796 ether_sprintf(ic->ic_des_bssid))); 1797 error = ipw_cmd(sc, IPW_CMD_SET_DESIRED_BSSID, 1798 ic->ic_des_bssid, IEEE80211_ADDR_LEN); 1799 if (error != 0) 1800 return error; 1801 } 1802 1803 security.authmode = IPW_AUTH_OPEN; 1804 security.ciphers = htole32(IPW_CIPHER_NONE); 1805 security.version = htole16(0); 1806 security.replay_counters_number = 0; 1807 security.unicast_using_group = 0; 1808 DPRINTF(("Setting adapter authmode to %u\n", security.authmode)); 1809 error = ipw_cmd(sc, IPW_CMD_SET_SECURITY_INFORMATION, &security, 1810 sizeof security); 1811 if (error != 0) 1812 return error; 1813 1814 if (ic->ic_flags & IEEE80211_F_PRIVACY) { 1815 k = ic->ic_nw_keys; 1816 for (i = 0; i < IEEE80211_WEP_NKID; i++, k++) { 1817 if (k->wk_len == 0) 1818 continue; 1819 1820 wepkey.idx = i; 1821 wepkey.len = k->wk_len; 1822 bzero(wepkey.key, sizeof wepkey.key); 1823 bcopy(k->wk_key, wepkey.key, k->wk_len); 1824 DPRINTF(("Setting wep key index %d len %d\n", 1825 wepkey.idx, wepkey.len)); 1826 error = ipw_cmd(sc, IPW_CMD_SET_WEP_KEY, &wepkey, 1827 sizeof wepkey); 1828 if (error != 0) 1829 return error; 1830 } 1831 1832 data = htole32(ic->ic_wep_txkey); 1833 DPRINTF(("Setting adapter tx key index to %u\n", data)); 1834 error = ipw_cmd(sc, IPW_CMD_SET_WEP_KEY_INDEX, &data, 1835 sizeof data); 1836 if (error != 0) 1837 return error; 1838 } 1839 1840 data = htole32((sc->sc_ic.ic_flags & IEEE80211_F_PRIVACY) ? 0x8 : 0); 1841 DPRINTF(("Setting adapter wep flags to 0x%x\n", data)); 1842 error = ipw_cmd(sc, IPW_CMD_SET_WEP_FLAGS, &data, sizeof data); 1843 if (error != 0) 1844 return error; 1845 1846 if (ic->ic_opmode == IEEE80211_M_IBSS || 1847 ic->ic_opmode == IEEE80211_M_HOSTAP) { 1848 data = htole32(ic->ic_lintval); 1849 DPRINTF(("Setting adapter beacon interval to %u\n", data)); 1850 error = ipw_cmd(sc, IPW_CMD_SET_BEACON_INTERVAL, &data, 1851 sizeof data); 1852 if (error != 0) 1853 return error; 1854 } 1855 1856 options.flags = htole32(0); 1857 options.channels = htole32(0x3fff); /* scan channels 1-14 */ 1858 error = ipw_cmd(sc, IPW_CMD_SET_SCAN_OPTIONS, &options, sizeof options); 1859 if (error != 0) 1860 return error; 1861 1862 /* finally, enable adapter (start scanning for an access point) */ 1863 DPRINTF(("Enabling adapter\n")); 1864 error = ipw_cmd(sc, IPW_CMD_ENABLE, NULL, 0); 1865 if (error != 0) 1866 return error; 1867 1868 return 0; 1869 } 1870 1871 static int 1872 ipw_init(struct ifnet *ifp) 1873 { 1874 struct ipw_softc *sc = ifp->if_softc; 1875 struct ieee80211com *ic = &sc->sc_ic; 1876 1877 /* exit immediately if firmware has not been ioctl'd */ 1878 if (!(sc->flags & IPW_FLAG_FW_INITED)) { 1879 ifp->if_flags &= ~IFF_UP; 1880 return EIO; 1881 } 1882 1883 ipw_stop(ifp, 0); 1884 1885 if (ipw_config(sc) != 0) { 1886 aprint_error("%s: device configuration failed\n", 1887 sc->sc_dev.dv_xname); 1888 goto fail; 1889 } 1890 1891 ifp->if_flags &= ~IFF_OACTIVE; 1892 ifp->if_flags |= IFF_RUNNING; 1893 1894 ic->ic_bss->ni_chan = ic->ic_channels; 1895 1896 return 0; 1897 1898 fail: ipw_stop(ifp, 0); 1899 1900 return EIO; 1901 } 1902 1903 static void 1904 ipw_stop(struct ifnet *ifp, int disable) 1905 { 1906 struct ipw_softc *sc = ifp->if_softc; 1907 struct ieee80211com *ic = &sc->sc_ic; 1908 1909 if (ifp->if_flags & IFF_RUNNING) { 1910 DPRINTF(("Disabling adapter\n")); 1911 ipw_cmd(sc, IPW_CMD_DISABLE, NULL, 0); 1912 } 1913 1914 ifp->if_timer = 0; 1915 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1916 1917 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 1918 } 1919 1920 static void 1921 ipw_read_mem_1(struct ipw_softc *sc, bus_size_t offset, u_int8_t *datap, 1922 bus_size_t count) 1923 { 1924 for (; count > 0; offset++, datap++, count--) { 1925 CSR_WRITE_4(sc, IPW_CSR_INDIRECT_ADDR, offset & ~3); 1926 *datap = CSR_READ_1(sc, IPW_CSR_INDIRECT_DATA + (offset & 3)); 1927 } 1928 } 1929 1930 static void 1931 ipw_write_mem_1(struct ipw_softc *sc, bus_size_t offset, u_int8_t *datap, 1932 bus_size_t count) 1933 { 1934 for (; count > 0; offset++, datap++, count--) { 1935 CSR_WRITE_4(sc, IPW_CSR_INDIRECT_ADDR, offset & ~3); 1936 CSR_WRITE_1(sc, IPW_CSR_INDIRECT_DATA + (offset & 3), *datap); 1937 } 1938 } 1939 1940 static void 1941 ipw_zero_mem_4(struct ipw_softc *sc, bus_size_t offset, bus_size_t count) 1942 { 1943 CSR_WRITE_4(sc, IPW_CSR_AUTOINC_ADDR, offset); 1944 while (count-- > 0) 1945 CSR_WRITE_4(sc, IPW_CSR_AUTOINC_DATA, 0); 1946 } 1947 1948