aboutsummaryrefslogtreecommitdiff
path: root/drivers/scsi/aacraid/dpcsup.c
diff options
context:
space:
mode:
authorLibravatar Linus Torvalds <torvalds@linux-foundation.org>2023-02-21 18:24:12 -0800
committerLibravatar Linus Torvalds <torvalds@linux-foundation.org>2023-02-21 18:24:12 -0800
commit5b7c4cabbb65f5c469464da6c5f614cbd7f730f2 (patch)
treecc5c2d0a898769fd59549594fedb3ee6f84e59a0 /drivers/scsi/aacraid/dpcsup.c
downloadlinux-5b7c4cabbb65f5c469464da6c5f614cbd7f730f2.tar.gz
linux-5b7c4cabbb65f5c469464da6c5f614cbd7f730f2.zip
Merge tag 'net-next-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-nextgrafted
Pull networking updates from Jakub Kicinski: "Core: - Add dedicated kmem_cache for typical/small skb->head, avoid having to access struct page at kfree time, and improve memory use. - Introduce sysctl to set default RPS configuration for new netdevs. - Define Netlink protocol specification format which can be used to describe messages used by each family and auto-generate parsers. Add tools for generating kernel data structures and uAPI headers. - Expose all net/core sysctls inside netns. - Remove 4s sleep in netpoll if carrier is instantly detected on boot. - Add configurable limit of MDB entries per port, and port-vlan. - Continue populating drop reasons throughout the stack. - Retire a handful of legacy Qdiscs and classifiers. Protocols: - Support IPv4 big TCP (TSO frames larger than 64kB). - Add IP_LOCAL_PORT_RANGE socket option, to control local port range on socket by socket basis. - Track and report in procfs number of MPTCP sockets used. - Support mixing IPv4 and IPv6 flows in the in-kernel MPTCP path manager. - IPv6: don't check net.ipv6.route.max_size and rely on garbage collection to free memory (similarly to IPv4). - Support Penultimate Segment Pop (PSP) flavor in SRv6 (RFC8986). - ICMP: add per-rate limit counters. - Add support for user scanning requests in ieee802154. - Remove static WEP support. - Support minimal Wi-Fi 7 Extremely High Throughput (EHT) rate reporting. - WiFi 7 EHT channel puncturing support (client & AP). BPF: - Add a rbtree data structure following the "next-gen data structure" precedent set by recently added linked list, that is, by using kfunc + kptr instead of adding a new BPF map type. - Expose XDP hints via kfuncs with initial support for RX hash and timestamp metadata. - Add BPF_F_NO_TUNNEL_KEY extension to bpf_skb_set_tunnel_key to better support decap on GRE tunnel devices not operating in collect metadata. - Improve x86 JIT's codegen for PROBE_MEM runtime error checks. - Remove the need for trace_printk_lock for bpf_trace_printk and bpf_trace_vprintk helpers. - Extend libbpf's bpf_tracing.h support for tracing arguments of kprobes/uprobes and syscall as a special case. - Significantly reduce the search time for module symbols by livepatch and BPF. - Enable cpumasks to be used as kptrs, which is useful for tracing programs tracking which tasks end up running on which CPUs in different time intervals. - Add support for BPF trampoline on s390x and riscv64. - Add capability to export the XDP features supported by the NIC. - Add __bpf_kfunc tag for marking kernel functions as kfuncs. - Add cgroup.memory=nobpf kernel parameter option to disable BPF memory accounting for container environments. Netfilter: - Remove the CLUSTERIP target. It has been marked as obsolete for years, and we still have WARN splats wrt races of the out-of-band /proc interface installed by this target. - Add 'destroy' commands to nf_tables. They are identical to the existing 'delete' commands, but do not return an error if the referenced object (set, chain, rule...) did not exist. Driver API: - Improve cpumask_local_spread() locality to help NICs set the right IRQ affinity on AMD platforms. - Separate C22 and C45 MDIO bus transactions more clearly. - Introduce new DCB table to control DSCP rewrite on egress. - Support configuration of Physical Layer Collision Avoidance (PLCA) Reconciliation Sublayer (RS) (802.3cg-2019). Modern version of shared medium Ethernet. - Support for MAC Merge layer (IEEE 802.3-2018 clause 99). Allowing preemption of low priority frames by high priority frames. - Add support for controlling MACSec offload using netlink SET. - Rework devlink instance refcounts to allow registration and de-registration under the instance lock. Split the code into multiple files, drop some of the unnecessarily granular locks and factor out common parts of netlink operation handling. - Add TX frame aggregation parameters (for USB drivers). - Add a new attr TCA_EXT_WARN_MSG to report TC (offload) warning messages with notifications for debug. - Allow offloading of UDP NEW connections via act_ct. - Add support for per action HW stats in TC. - Support hardware miss to TC action (continue processing in SW from a specific point in the action chain). - Warn if old Wireless Extension user space interface is used with modern cfg80211/mac80211 drivers. Do not support Wireless Extensions for Wi-Fi 7 devices at all. Everyone should switch to using nl80211 interface instead. - Improve the CAN bit timing configuration. Use extack to return error messages directly to user space, update the SJW handling, including the definition of a new default value that will benefit CAN-FD controllers, by increasing their oscillator tolerance. New hardware / drivers: - Ethernet: - nVidia BlueField-3 support (control traffic driver) - Ethernet support for imx93 SoCs - Motorcomm yt8531 gigabit Ethernet PHY - onsemi NCN26000 10BASE-T1S PHY (with support for PLCA) - Microchip LAN8841 PHY (incl. cable diagnostics and PTP) - Amlogic gxl MDIO mux - WiFi: - RealTek RTL8188EU (rtl8xxxu) - Qualcomm Wi-Fi 7 devices (ath12k) - CAN: - Renesas R-Car V4H Drivers: - Bluetooth: - Set Per Platform Antenna Gain (PPAG) for Intel controllers. - Ethernet NICs: - Intel (1G, igc): - support TSN / Qbv / packet scheduling features of i226 model - Intel (100G, ice): - use GNSS subsystem instead of TTY - multi-buffer XDP support - extend support for GPIO pins to E823 devices - nVidia/Mellanox: - update the shared buffer configuration on PFC commands - implement PTP adjphase function for HW offset control - TC support for Geneve and GRE with VF tunnel offload - more efficient crypto key management method - multi-port eswitch support - Netronome/Corigine: - add DCB IEEE support - support IPsec offloading for NFP3800 - Freescale/NXP (enetc): - support XDP_REDIRECT for XDP non-linear buffers - improve reconfig, avoid link flap and waiting for idle - support MAC Merge layer - Other NICs: - sfc/ef100: add basic devlink support for ef100 - ionic: rx_push mode operation (writing descriptors via MMIO) - bnxt: use the auxiliary bus abstraction for RDMA - r8169: disable ASPM and reset bus in case of tx timeout - cpsw: support QSGMII mode for J721e CPSW9G - cpts: support pulse-per-second output - ngbe: add an mdio bus driver - usbnet: optimize usbnet_bh() by avoiding unnecessary queuing - r8152: handle devices with FW with NCM support - amd-xgbe: support 10Mbps, 2.5GbE speeds and rx-adaptation - virtio-net: support multi buffer XDP - virtio/vsock: replace virtio_vsock_pkt with sk_buff - tsnep: XDP support - Ethernet high-speed switches: - nVidia/Mellanox (mlxsw): - add support for latency TLV (in FW control messages) - Microchip (sparx5): - separate explicit and implicit traffic forwarding rules, make the implicit rules always active - add support for egress DSCP rewrite - IS0 VCAP support (Ingress Classification) - IS2 VCAP filters (protos, L3 addrs, L4 ports, flags, ToS etc.) - ES2 VCAP support (Egress Access Control) - support for Per-Stream Filtering and Policing (802.1Q, 8.6.5.1) - Ethernet embedded switches: - Marvell (mv88e6xxx): - add MAB (port auth) offload support - enable PTP receive for mv88e6390 - NXP (ocelot): - support MAC Merge layer - support for the the vsc7512 internal copper phys - Microchip: - lan9303: convert to PHYLINK - lan966x: support TC flower filter statistics - lan937x: PTP support for KSZ9563/KSZ8563 and LAN937x - lan937x: support Credit Based Shaper configuration - ksz9477: support Energy Efficient Ethernet - other: - qca8k: convert to regmap read/write API, use bulk operations - rswitch: Improve TX timestamp accuracy - Intel WiFi (iwlwifi): - EHT (Wi-Fi 7) rate reporting - STEP equalizer support: transfer some STEP (connection to radio on platforms with integrated wifi) related parameters from the BIOS to the firmware. - Qualcomm 802.11ax WiFi (ath11k): - IPQ5018 support - Fine Timing Measurement (FTM) responder role support - channel 177 support - MediaTek WiFi (mt76): - per-PHY LED support - mt7996: EHT (Wi-Fi 7) support - Wireless Ethernet Dispatch (WED) reset support - switch to using page pool allocator - RealTek WiFi (rtw89): - support new version of Bluetooth co-existance - Mobile: - rmnet: support TX aggregation" * tag 'net-next-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1872 commits) page_pool: add a comment explaining the fragment counter usage net: ethtool: fix __ethtool_dev_mm_supported() implementation ethtool: pse-pd: Fix double word in comments xsk: add linux/vmalloc.h to xsk.c sefltests: netdevsim: wait for devlink instance after netns removal selftest: fib_tests: Always cleanup before exit net/mlx5e: Align IPsec ASO result memory to be as required by hardware net/mlx5e: TC, Set CT miss to the specific ct action instance net/mlx5e: Rename CHAIN_TO_REG to MAPPED_OBJ_TO_REG net/mlx5: Refactor tc miss handling to a single function net/mlx5: Kconfig: Make tc offload depend on tc skb extension net/sched: flower: Support hardware miss to tc action net/sched: flower: Move filter handle initialization earlier net/sched: cls_api: Support hardware miss to tc action net/sched: Rename user cookie and act cookie sfc: fix builds without CONFIG_RTC_LIB sfc: clean up some inconsistent indentings net/mlx4_en: Introduce flexible array to silence overflow warning net: lan966x: Fix possible deadlock inside PTP net/ulp: Remove redundant ->clone() test in inet_clone_ulp(). ...
Diffstat (limited to 'drivers/scsi/aacraid/dpcsup.c')
-rw-r--r--drivers/scsi/aacraid/dpcsup.c456
1 files changed, 456 insertions, 0 deletions
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
new file mode 100644
index 000000000..fbe334c59
--- /dev/null
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Adaptec AAC series RAID controller driver
+ * (c) Copyright 2001 Red Hat Inc.
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
+ *
+ * Module Name:
+ * dpcsup.c
+ *
+ * Abstract: All DPC processing routines for the cyclone board occur here.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/blkdev.h>
+
+#include "aacraid.h"
+
+/**
+ * aac_response_normal - Handle command replies
+ * @q: Queue to read from
+ *
+ * This DPC routine will be run when the adapter interrupts us to let us
+ * know there is a response on our normal priority queue. We will pull off
+ * all QE there are and wake up all the waiters before exiting. We will
+ * take a spinlock out on the queue before operating on it.
+ */
+
+unsigned int aac_response_normal(struct aac_queue * q)
+{
+ struct aac_dev * dev = q->dev;
+ struct aac_entry *entry;
+ struct hw_fib * hwfib;
+ struct fib * fib;
+ int consumed = 0;
+ unsigned long flags, mflags;
+
+ spin_lock_irqsave(q->lock, flags);
+ /*
+ * Keep pulling response QEs off the response queue and waking
+ * up the waiters until there are no more QEs. We then return
+ * back to the system. If no response was requested we just
+ * deallocate the Fib here and continue.
+ */
+ while(aac_consumer_get(dev, q, &entry))
+ {
+ int fast;
+ u32 index = le32_to_cpu(entry->addr);
+ fast = index & 0x01;
+ fib = &dev->fibs[index >> 2];
+ hwfib = fib->hw_fib_va;
+
+ aac_consumer_free(dev, q, HostNormRespQueue);
+ /*
+ * Remove this fib from the Outstanding I/O queue.
+ * But only if it has not already been timed out.
+ *
+ * If the fib has been timed out already, then just
+ * continue. The caller has already been notified that
+ * the fib timed out.
+ */
+ atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending);
+
+ if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
+ spin_unlock_irqrestore(q->lock, flags);
+ aac_fib_complete(fib);
+ aac_fib_free(fib);
+ spin_lock_irqsave(q->lock, flags);
+ continue;
+ }
+ spin_unlock_irqrestore(q->lock, flags);
+
+ if (fast) {
+ /*
+ * Doctor the fib
+ */
+ *(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
+ hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
+ fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
+ }
+
+ FIB_COUNTER_INCREMENT(aac_config.FibRecved);
+
+ if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
+ {
+ __le32 *pstatus = (__le32 *)hwfib->data;
+ if (*pstatus & cpu_to_le32(0xffff0000))
+ *pstatus = cpu_to_le32(ST_OK);
+ }
+ if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async))
+ {
+ if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected)) {
+ FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
+ } else {
+ FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
+ }
+ /*
+ * NOTE: we cannot touch the fib after this
+ * call, because it may have been deallocated.
+ */
+ fib->callback(fib->callback_data, fib);
+ } else {
+ unsigned long flagv;
+ spin_lock_irqsave(&fib->event_lock, flagv);
+ if (!fib->done) {
+ fib->done = 1;
+ complete(&fib->event_wait);
+ }
+ spin_unlock_irqrestore(&fib->event_lock, flagv);
+
+ spin_lock_irqsave(&dev->manage_lock, mflags);
+ dev->management_fib_count--;
+ spin_unlock_irqrestore(&dev->manage_lock, mflags);
+
+ FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
+ if (fib->done == 2) {
+ spin_lock_irqsave(&fib->event_lock, flagv);
+ fib->done = 0;
+ spin_unlock_irqrestore(&fib->event_lock, flagv);
+ aac_fib_complete(fib);
+ aac_fib_free(fib);
+ }
+ }
+ consumed++;
+ spin_lock_irqsave(q->lock, flags);
+ }
+
+ if (consumed > aac_config.peak_fibs)
+ aac_config.peak_fibs = consumed;
+ if (consumed == 0)
+ aac_config.zero_fibs++;
+
+ spin_unlock_irqrestore(q->lock, flags);
+ return 0;
+}
+
+
+/**
+ * aac_command_normal - handle commands
+ * @q: queue to process
+ *
+ * This DPC routine will be queued when the adapter interrupts us to
+ * let us know there is a command on our normal priority queue. We will
+ * pull off all QE there are and wake up all the waiters before exiting.
+ * We will take a spinlock out on the queue before operating on it.
+ */
+
+unsigned int aac_command_normal(struct aac_queue *q)
+{
+ struct aac_dev * dev = q->dev;
+ struct aac_entry *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(q->lock, flags);
+
+ /*
+ * Keep pulling response QEs off the response queue and waking
+ * up the waiters until there are no more QEs. We then return
+ * back to the system.
+ */
+ while(aac_consumer_get(dev, q, &entry))
+ {
+ struct fib fibctx;
+ struct hw_fib * hw_fib;
+ u32 index;
+ struct fib *fib = &fibctx;
+
+ index = le32_to_cpu(entry->addr) / sizeof(struct hw_fib);
+ hw_fib = &dev->aif_base_va[index];
+
+ /*
+ * Allocate a FIB at all costs. For non queued stuff
+ * we can just use the stack so we are happy. We need
+ * a fib object in order to manage the linked lists
+ */
+ if (dev->aif_thread)
+ if((fib = kmalloc(sizeof(struct fib), GFP_ATOMIC)) == NULL)
+ fib = &fibctx;
+
+ memset(fib, 0, sizeof(struct fib));
+ INIT_LIST_HEAD(&fib->fiblink);
+ fib->type = FSAFS_NTC_FIB_CONTEXT;
+ fib->size = sizeof(struct fib);
+ fib->hw_fib_va = hw_fib;
+ fib->data = hw_fib->data;
+ fib->dev = dev;
+
+
+ if (dev->aif_thread && fib != &fibctx) {
+ list_add_tail(&fib->fiblink, &q->cmdq);
+ aac_consumer_free(dev, q, HostNormCmdQueue);
+ wake_up_interruptible(&q->cmdready);
+ } else {
+ aac_consumer_free(dev, q, HostNormCmdQueue);
+ spin_unlock_irqrestore(q->lock, flags);
+ /*
+ * Set the status of this FIB
+ */
+ *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
+ aac_fib_adapter_complete(fib, sizeof(u32));
+ spin_lock_irqsave(q->lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(q->lock, flags);
+ return 0;
+}
+
+/*
+ *
+ * aac_aif_callback
+ * @context: the context set in the fib - here it is scsi cmd
+ * @fibptr: pointer to the fib
+ *
+ * Handles the AIFs - new method (SRC)
+ *
+ */
+
+static void aac_aif_callback(void *context, struct fib * fibptr)
+{
+ struct fib *fibctx;
+ struct aac_dev *dev;
+ struct aac_aifcmd *cmd;
+
+ fibctx = (struct fib *)context;
+ BUG_ON(fibptr == NULL);
+ dev = fibptr->dev;
+
+ if ((fibptr->hw_fib_va->header.XferState &
+ cpu_to_le32(NoMoreAifDataAvailable)) ||
+ dev->sa_firmware) {
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ return;
+ }
+
+ aac_intr_normal(dev, 0, 1, 0, fibptr->hw_fib_va);
+
+ aac_fib_init(fibctx);
+ cmd = (struct aac_aifcmd *) fib_data(fibctx);
+ cmd->command = cpu_to_le32(AifReqEvent);
+
+ aac_fib_send(AifRequest,
+ fibctx,
+ sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
+ FsaNormal,
+ 0, 1,
+ (fib_callback)aac_aif_callback, fibctx);
+}
+
+
+/*
+ * aac_intr_normal - Handle command replies
+ * @dev: Device
+ * @index: completion reference
+ *
+ * This DPC routine will be run when the adapter interrupts us to let us
+ * know there is a response on our normal priority queue. We will pull off
+ * all QE there are and wake up all the waiters before exiting.
+ */
+unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif,
+ int isFastResponse, struct hw_fib *aif_fib)
+{
+ unsigned long mflags;
+ dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index));
+ if (isAif == 1) { /* AIF - common */
+ struct hw_fib * hw_fib;
+ struct fib * fib;
+ struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue];
+ unsigned long flags;
+
+ /*
+ * Allocate a FIB. For non queued stuff we can just use
+ * the stack so we are happy. We need a fib object in order to
+ * manage the linked lists.
+ */
+ if ((!dev->aif_thread)
+ || (!(fib = kzalloc(sizeof(struct fib),GFP_ATOMIC))))
+ return 1;
+ if (!(hw_fib = kzalloc(sizeof(struct hw_fib),GFP_ATOMIC))) {
+ kfree (fib);
+ return 1;
+ }
+ if (dev->sa_firmware) {
+ fib->hbacmd_size = index; /* store event type */
+ } else if (aif_fib != NULL) {
+ memcpy(hw_fib, aif_fib, sizeof(struct hw_fib));
+ } else {
+ memcpy(hw_fib, (struct hw_fib *)
+ (((uintptr_t)(dev->regs.sa)) + index),
+ sizeof(struct hw_fib));
+ }
+ INIT_LIST_HEAD(&fib->fiblink);
+ fib->type = FSAFS_NTC_FIB_CONTEXT;
+ fib->size = sizeof(struct fib);
+ fib->hw_fib_va = hw_fib;
+ fib->data = hw_fib->data;
+ fib->dev = dev;
+
+ spin_lock_irqsave(q->lock, flags);
+ list_add_tail(&fib->fiblink, &q->cmdq);
+ wake_up_interruptible(&q->cmdready);
+ spin_unlock_irqrestore(q->lock, flags);
+ return 1;
+ } else if (isAif == 2) { /* AIF - new (SRC) */
+ struct fib *fibctx;
+ struct aac_aifcmd *cmd;
+
+ fibctx = aac_fib_alloc(dev);
+ if (!fibctx)
+ return 1;
+ aac_fib_init(fibctx);
+
+ cmd = (struct aac_aifcmd *) fib_data(fibctx);
+ cmd->command = cpu_to_le32(AifReqEvent);
+
+ return aac_fib_send(AifRequest,
+ fibctx,
+ sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
+ FsaNormal,
+ 0, 1,
+ (fib_callback)aac_aif_callback, fibctx);
+ } else {
+ struct fib *fib = &dev->fibs[index];
+ int start_callback = 0;
+
+ /*
+ * Remove this fib from the Outstanding I/O queue.
+ * But only if it has not already been timed out.
+ *
+ * If the fib has been timed out already, then just
+ * continue. The caller has already been notified that
+ * the fib timed out.
+ */
+ atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending);
+
+ if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
+ aac_fib_complete(fib);
+ aac_fib_free(fib);
+ return 0;
+ }
+
+ FIB_COUNTER_INCREMENT(aac_config.FibRecved);
+
+ if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) {
+
+ if (isFastResponse)
+ fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
+
+ if (fib->callback) {
+ start_callback = 1;
+ } else {
+ unsigned long flagv;
+ int completed = 0;
+
+ dprintk((KERN_INFO "event_wait up\n"));
+ spin_lock_irqsave(&fib->event_lock, flagv);
+ if (fib->done == 2) {
+ fib->done = 1;
+ completed = 1;
+ } else {
+ fib->done = 1;
+ complete(&fib->event_wait);
+ }
+ spin_unlock_irqrestore(&fib->event_lock, flagv);
+
+ spin_lock_irqsave(&dev->manage_lock, mflags);
+ dev->management_fib_count--;
+ spin_unlock_irqrestore(&dev->manage_lock,
+ mflags);
+
+ FIB_COUNTER_INCREMENT(aac_config.NativeRecved);
+ if (completed)
+ aac_fib_complete(fib);
+ }
+ } else {
+ struct hw_fib *hwfib = fib->hw_fib_va;
+
+ if (isFastResponse) {
+ /* Doctor the fib */
+ *(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
+ hwfib->header.XferState |=
+ cpu_to_le32(AdapterProcessed);
+ fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
+ }
+
+ if (hwfib->header.Command ==
+ cpu_to_le16(NuFileSystem)) {
+ __le32 *pstatus = (__le32 *)hwfib->data;
+
+ if (*pstatus & cpu_to_le32(0xffff0000))
+ *pstatus = cpu_to_le32(ST_OK);
+ }
+ if (hwfib->header.XferState &
+ cpu_to_le32(NoResponseExpected | Async)) {
+ if (hwfib->header.XferState & cpu_to_le32(
+ NoResponseExpected)) {
+ FIB_COUNTER_INCREMENT(
+ aac_config.NoResponseRecved);
+ } else {
+ FIB_COUNTER_INCREMENT(
+ aac_config.AsyncRecved);
+ }
+ start_callback = 1;
+ } else {
+ unsigned long flagv;
+ int completed = 0;
+
+ dprintk((KERN_INFO "event_wait up\n"));
+ spin_lock_irqsave(&fib->event_lock, flagv);
+ if (fib->done == 2) {
+ fib->done = 1;
+ completed = 1;
+ } else {
+ fib->done = 1;
+ complete(&fib->event_wait);
+ }
+ spin_unlock_irqrestore(&fib->event_lock, flagv);
+
+ spin_lock_irqsave(&dev->manage_lock, mflags);
+ dev->management_fib_count--;
+ spin_unlock_irqrestore(&dev->manage_lock,
+ mflags);
+
+ FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
+ if (completed)
+ aac_fib_complete(fib);
+ }
+ }
+
+
+ if (start_callback) {
+ /*
+ * NOTE: we cannot touch the fib after this
+ * call, because it may have been deallocated.
+ */
+ if (likely(fib->callback && fib->callback_data)) {
+ fib->callback(fib->callback_data, fib);
+ } else {
+ aac_fib_complete(fib);
+ aac_fib_free(fib);
+ }
+
+ }
+ return 0;
+ }
+}