[SCSI] bfa: fix comments for c files

This patch addresses the comments from Randy Dunlap (Randy.Dunlap@oracle.com)
regarding comment blocks that begining with "/**". bfa driver comments
currently do not follow kernel-doc convention, we hence replace all
/** with /* and **/ with */.

Signed-off-by: Jing Huang <huangj@brocade.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
This commit is contained in:
Jing Huang 2010-10-18 17:17:23 -07:00 committed by James Bottomley
parent acdc79a60c
commit 5fbe25c7a6
18 changed files with 943 additions and 1003 deletions

View File

@ -21,11 +21,11 @@
BFA_TRC_FILE(HAL, CORE);
/**
/*
* BFA IOC FC related definitions
*/
/**
/*
* IOC local definitions
*/
#define BFA_IOCFC_TOV 5000 /* msecs */
@ -54,7 +54,7 @@ enum {
#define DEF_CFG_NUM_SBOOT_TGTS 16
#define DEF_CFG_NUM_SBOOT_LUNS 16
/**
/*
* forward declaration for IOC FC functions
*/
static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
@ -63,7 +63,7 @@ static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
static void bfa_iocfc_reset_cbfn(void *bfa_arg);
static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
/**
/*
* BFA Interrupt handling functions
*/
static void
@ -86,7 +86,7 @@ bfa_reqq_resume(struct bfa_s *bfa, int qid)
waitq = bfa_reqq(bfa, qid);
list_for_each_safe(qe, qen, waitq) {
/**
/*
* Callback only as long as there is room in request queue
*/
if (bfa_reqq_full(bfa, qid))
@ -104,7 +104,7 @@ bfa_msix_all(struct bfa_s *bfa, int vec)
bfa_intx(bfa);
}
/**
/*
* hal_intr_api
*/
bfa_boolean_t
@ -117,7 +117,7 @@ bfa_intx(struct bfa_s *bfa)
if (!intr)
return BFA_FALSE;
/**
/*
* RME completion queue interrupt
*/
qintr = intr & __HFN_INT_RME_MASK;
@ -131,7 +131,7 @@ bfa_intx(struct bfa_s *bfa)
if (!intr)
return BFA_TRUE;
/**
/*
* CPE completion queue interrupt
*/
qintr = intr & __HFN_INT_CPE_MASK;
@ -211,7 +211,7 @@ bfa_msix_reqq(struct bfa_s *bfa, int qid)
bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
/**
/*
* Resume any pending requests in the corresponding reqq.
*/
waitq = bfa_reqq(bfa, qid);
@ -259,14 +259,14 @@ bfa_msix_rspq(struct bfa_s *bfa, int qid)
}
}
/**
/*
* update CI
*/
bfa_rspq_ci(bfa, qid) = pi;
writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
mmiowb();
/**
/*
* Resume any pending requests in the corresponding reqq.
*/
waitq = bfa_reqq(bfa, qid);
@ -289,7 +289,7 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
if (intr) {
if (intr & __HFN_INT_LL_HALT) {
/**
/*
* If LL_HALT bit is set then FW Init Halt LL Port
* Register needs to be cleared as well so Interrupt
* Status Register will be cleared.
@ -300,7 +300,7 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
}
if (intr & __HFN_INT_ERR_PSS) {
/**
/*
* ERR_PSS bit needs to be cleared as well in case
* interrups are shared so driver's interrupt handler is
* still called eventhough it is already masked out.
@ -323,11 +323,11 @@ bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func)
bfa_isrs[mc] = isr_func;
}
/**
/*
* BFA IOC FC related functions
*/
/**
/*
* hal_ioc_pvt BFA IOC private functions
*/
@ -366,7 +366,7 @@ bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
BFA_CACHELINE_SZ);
}
/**
/*
* Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
*/
static void
@ -384,14 +384,14 @@ bfa_iocfc_send_cfg(void *bfa_arg)
bfa_iocfc_reset_queues(bfa);
/**
/*
* initialize IOC configuration info
*/
cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
cfg_info->num_cqs = cfg->fwcfg.num_cqs;
bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
/**
/*
* dma map REQ and RSP circular queues and shadow pointers
*/
for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
@ -410,7 +410,7 @@ bfa_iocfc_send_cfg(void *bfa_arg)
cpu_to_be16(cfg->drvcfg.num_rspq_elems);
}
/**
/*
* Enable interrupt coalescing if it is driver init path
* and not ioc disable/enable path.
*/
@ -419,7 +419,7 @@ bfa_iocfc_send_cfg(void *bfa_arg)
iocfc->cfgdone = BFA_FALSE;
/**
/*
* dma map IOC configuration itself
*/
bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
@ -442,7 +442,7 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
iocfc->cfg = *cfg;
/**
/*
* Initialize chip specific handlers.
*/
if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) {
@ -559,7 +559,7 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
}
}
/**
/*
* Start BFA submodules.
*/
static void
@ -573,7 +573,7 @@ bfa_iocfc_start_submod(struct bfa_s *bfa)
hal_mods[i]->start(bfa);
}
/**
/*
* Disable BFA submodules.
*/
static void
@ -623,7 +623,7 @@ bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
complete(&bfad->disable_comp);
}
/**
/*
* Update BFA configuration from firmware configuration.
*/
static void
@ -642,7 +642,7 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
iocfc->cfgdone = BFA_TRUE;
/**
/*
* Configuration is complete - initialize/start submodules
*/
bfa_fcport_init(bfa);
@ -665,7 +665,7 @@ bfa_iocfc_reset_queues(struct bfa_s *bfa)
}
}
/**
/*
* IOC enable request is complete
*/
static void
@ -684,7 +684,7 @@ bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
bfa_iocfc_send_cfg(bfa);
}
/**
/*
* IOC disable request is complete
*/
static void
@ -705,7 +705,7 @@ bfa_iocfc_disable_cbfn(void *bfa_arg)
}
}
/**
/*
* Notify sub-modules of hardware failure.
*/
static void
@ -723,7 +723,7 @@ bfa_iocfc_hbfail_cbfn(void *bfa_arg)
bfa);
}
/**
/*
* Actions on chip-reset completion.
*/
static void
@ -735,11 +735,11 @@ bfa_iocfc_reset_cbfn(void *bfa_arg)
bfa_isr_enable(bfa);
}
/**
/*
* hal_ioc_public
*/
/**
/*
* Query IOC memory requirement information.
*/
void
@ -754,7 +754,7 @@ bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
*km_len += bfa_ioc_debug_trcsz(bfa_auto_recover);
}
/**
/*
* Query IOC memory requirement information.
*/
void
@ -772,7 +772,7 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
ioc->trcmod = bfa->trcmod;
bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
/**
/*
* Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
*/
if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
@ -790,7 +790,7 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
}
/**
/*
* Query IOC memory requirement information.
*/
void
@ -799,7 +799,7 @@ bfa_iocfc_detach(struct bfa_s *bfa)
bfa_ioc_detach(&bfa->ioc);
}
/**
/*
* Query IOC memory requirement information.
*/
void
@ -809,7 +809,7 @@ bfa_iocfc_init(struct bfa_s *bfa)
bfa_ioc_enable(&bfa->ioc);
}
/**
/*
* IOC start called from bfa_start(). Called to start IOC operations
* at driver instantiation for this instance.
*/
@ -820,7 +820,7 @@ bfa_iocfc_start(struct bfa_s *bfa)
bfa_iocfc_start_submod(bfa);
}
/**
/*
* IOC stop called from bfa_stop(). Called only when driver is unloaded
* for this instance.
*/
@ -924,7 +924,7 @@ bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
}
/**
/*
* Enable IOC after it is disabled.
*/
void
@ -953,7 +953,7 @@ bfa_iocfc_is_operational(struct bfa_s *bfa)
return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
}
/**
/*
* Return boot target port wwns -- read from boot information in flash.
*/
void
@ -998,11 +998,11 @@ bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
return cfgrsp->pbc_cfg.nvports;
}
/**
/*
* hal_api
*/
/**
/*
* Use this function query the memory requirement of the BFA library.
* This function needs to be called before bfa_attach() to get the
* memory required of the BFA layer for a given driver configuration.
@ -1055,7 +1055,7 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
}
/**
/*
* Use this function to do attach the driver instance with the BFA
* library. This function will not trigger any HW initialization
* process (which will be done in bfa_init() call)
@ -1092,7 +1092,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
bfa_assert((cfg != NULL) && (meminfo != NULL));
/**
/*
* initialize all memory pointers for iterative allocation
*/
for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
@ -1109,7 +1109,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
bfa_com_port_attach(bfa, meminfo);
}
/**
/*
* Use this function to delete a BFA IOC. IOC should be stopped (by
* calling bfa_stop()) before this function call.
*
@ -1146,7 +1146,7 @@ bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog)
bfa->plog = plog;
}
/**
/*
* Initialize IOC.
*
* This function will return immediately, when the IOC initialization is
@ -1169,7 +1169,7 @@ bfa_init(struct bfa_s *bfa)
bfa_iocfc_init(bfa);
}
/**
/*
* Use this function initiate the IOC configuration setup. This function
* will return immediately.
*
@ -1183,7 +1183,7 @@ bfa_start(struct bfa_s *bfa)
bfa_iocfc_start(bfa);
}
/**
/*
* Use this function quiese the IOC. This function will return immediately,
* when the IOC is actually stopped, the bfad->comp will be set.
*
@ -1243,7 +1243,7 @@ bfa_attach_fcs(struct bfa_s *bfa)
bfa->fcs = BFA_TRUE;
}
/**
/*
* Periodic timer heart beat from driver
*/
void
@ -1252,7 +1252,7 @@ bfa_timer_tick(struct bfa_s *bfa)
bfa_timer_beat(&bfa->timer_mod);
}
/**
/*
* Return the list of PCI vendor/device id lists supported by this
* BFA instance.
*/
@ -1270,7 +1270,7 @@ bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
*pciids = __pciids;
}
/**
/*
* Use this function query the default struct bfa_iocfc_cfg_s value (compiled
* into BFA layer). The OS driver can then turn back and overwrite entries that
* have been configured by the user.
@ -1328,7 +1328,7 @@ bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr)
bfa_ioc_get_attr(&bfa->ioc, ioc_attr);
}
/**
/*
* Retrieve firmware trace information on IOC failure.
*/
bfa_status_t
@ -1337,7 +1337,7 @@ bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen)
return bfa_ioc_debug_fwsave(&bfa->ioc, trcdata, trclen);
}
/**
/*
* Clear the saved firmware trace information of an IOC.
*/
void
@ -1346,7 +1346,7 @@ bfa_debug_fwsave_clear(struct bfa_s *bfa)
bfa_ioc_debug_fwsave_clear(&bfa->ioc);
}
/**
/*
* Fetch firmware trace data.
*
* @param[in] bfa BFA instance
@ -1362,7 +1362,7 @@ bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen)
return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen);
}
/**
/*
* Dump firmware memory.
*
* @param[in] bfa BFA instance
@ -1378,7 +1378,7 @@ bfa_debug_fwcore(struct bfa_s *bfa, void *buf, u32 *offset, int *buflen)
{
return bfa_ioc_debug_fwcore(&bfa->ioc, buf, offset, buflen);
}
/**
/*
* Reset hw semaphore & usage cnt regs and initialize.
*/
void
@ -1388,7 +1388,7 @@ bfa_chip_reset(struct bfa_s *bfa)
bfa_ioc_pll_init(&bfa->ioc);
}
/**
/*
* Fetch firmware statistics data.
*
* @param[in] bfa BFA instance

View File

@ -17,7 +17,7 @@
#include "bfa_modules.h"
/**
/*
* BFA module list terminated by NULL
*/
struct bfa_module_s *hal_mods[] = {
@ -31,7 +31,7 @@ struct bfa_module_s *hal_mods[] = {
NULL
};
/**
/*
* Message handlers for various modules.
*/
bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
@ -70,7 +70,7 @@ bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
};
/**
/*
* Message handlers for mailbox command classes
*/
bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {

View File

@ -150,7 +150,7 @@ fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
fchs->s_id = (s_id);
fchs->ox_id = cpu_to_be16(ox_id);
/**
/*
* @todo no need to set ox_id for request
* no need to set rx_id for response
*/

File diff suppressed because it is too large Load Diff

View File

@ -15,7 +15,7 @@
* General Public License for more details.
*/
/**
/*
* bfa_fcs.c BFA FCS main
*/
@ -25,7 +25,7 @@
BFA_TRC_FILE(FCS, FCS);
/**
/*
* FCS sub-modules
*/
struct bfa_fcs_mod_s {
@ -43,7 +43,7 @@ static struct bfa_fcs_mod_s fcs_modules[] = {
bfa_fcs_fabric_modexit },
};
/**
/*
* fcs_api BFA FCS API
*/
@ -58,11 +58,11 @@ bfa_fcs_exit_comp(void *fcs_cbarg)
/**
/*
* fcs_api BFA FCS API
*/
/**
/*
* fcs attach -- called once to initialize data structures at driver attach time
*/
void
@ -86,7 +86,7 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
}
}
/**
/*
* fcs initialization, called once after bfa initialization is complete
*/
void
@ -110,7 +110,7 @@ bfa_fcs_init(struct bfa_fcs_s *fcs)
}
}
/**
/*
* Start FCS operations.
*/
void
@ -119,7 +119,7 @@ bfa_fcs_start(struct bfa_fcs_s *fcs)
bfa_fcs_fabric_modstart(fcs);
}
/**
/*
* brief
* FCS driver details initialization.
*
@ -138,7 +138,7 @@ bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
bfa_fcs_fabric_psymb_init(&fcs->fabric);
}
/**
/*
* brief
* FCS FDMI Driver Parameter Initialization
*
@ -154,7 +154,7 @@ bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable)
fcs->fdmi_enabled = fdmi_enable;
}
/**
/*
* brief
* FCS instance cleanup and exit.
*
@ -196,7 +196,7 @@ bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs)
bfa_wc_down(&fcs->wc);
}
/**
/*
* Fabric module implementation.
*/
@ -232,11 +232,11 @@ static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg,
u32 rsp_len,
u32 resid_len,
struct fchs_s *rspfchs);
/**
/*
* fcs_fabric_sm fabric state machine functions
*/
/**
/*
* Fabric state machine events
*/
enum bfa_fcs_fabric_event {
@ -286,7 +286,7 @@ static void bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
enum bfa_fcs_fabric_event event);
static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
enum bfa_fcs_fabric_event event);
/**
/*
* Beginning state before fabric creation.
*/
static void
@ -312,7 +312,7 @@ bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
}
}
/**
/*
* Beginning state before fabric creation.
*/
static void
@ -345,7 +345,7 @@ bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
}
}
/**
/*
* Link is down, awaiting LINK UP event from port. This is also the
* first state at fabric creation.
*/
@ -375,7 +375,7 @@ bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric,
}
}
/**
/*
* FLOGI is in progress, awaiting FLOGI reply.
*/
static void
@ -468,7 +468,7 @@ bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric,
}
}
/**
/*
* Authentication is in progress, awaiting authentication results.
*/
static void
@ -508,7 +508,7 @@ bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
}
}
/**
/*
* Authentication failed
*/
static void
@ -534,7 +534,7 @@ bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
}
}
/**
/*
* Port is in loopback mode.
*/
static void
@ -560,7 +560,7 @@ bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
}
}
/**
/*
* There is no attached fabric - private loop or NPort-to-NPort topology.
*/
static void
@ -593,7 +593,7 @@ bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
}
}
/**
/*
* Fabric is online - normal operating state.
*/
static void
@ -628,7 +628,7 @@ bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
}
}
/**
/*
* Exchanging virtual fabric parameters.
*/
static void
@ -652,7 +652,7 @@ bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric,
}
}
/**
/*
* EVFP exchange complete and VFT tagging is enabled.
*/
static void
@ -663,7 +663,7 @@ bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric,
bfa_trc(fabric->fcs, event);
}
/**
/*
* Port is isolated after EVFP exchange due to VF_ID mismatch (N and F).
*/
static void
@ -684,7 +684,7 @@ bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
fabric->event_arg.swp_vfid);
}
/**
/*
* Fabric is being deleted, awaiting vport delete completions.
*/
static void
@ -714,7 +714,7 @@ bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
/**
/*
* fcs_fabric_private fabric private functions
*/
@ -728,7 +728,7 @@ bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric)
port_cfg->pwwn = bfa_ioc_get_pwwn(&fabric->fcs->bfa->ioc);
}
/**
/*
* Port Symbolic Name Creation for base port.
*/
void
@ -789,7 +789,7 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
}
/**
/*
* bfa lps login completion callback
*/
void
@ -867,7 +867,7 @@ bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status)
bfa_trc(fabric->fcs, fabric->is_npiv);
bfa_trc(fabric->fcs, fabric->is_auth);
}
/**
/*
* Allocate and send FLOGI.
*/
static void
@ -897,7 +897,7 @@ bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric)
bfa_fcs_fabric_set_opertype(fabric);
fabric->stats.fabric_onlines++;
/**
/*
* notify online event to base and then virtual ports
*/
bfa_fcs_lport_online(&fabric->bport);
@ -917,7 +917,7 @@ bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric)
bfa_trc(fabric->fcs, fabric->fabric_name);
fabric->stats.fabric_offlines++;
/**
/*
* notify offline event first to vports and then base port.
*/
list_for_each_safe(qe, qen, &fabric->vport_q) {
@ -939,7 +939,7 @@ bfa_fcs_fabric_delay(void *cbarg)
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED);
}
/**
/*
* Delete all vports and wait for vport delete completions.
*/
static void
@ -965,11 +965,11 @@ bfa_fcs_fabric_delete_comp(void *cbarg)
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP);
}
/**
/*
* fcs_fabric_public fabric public functions
*/
/**
/*
* Attach time initialization.
*/
void
@ -980,7 +980,7 @@ bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs)
fabric = &fcs->fabric;
memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s));
/**
/*
* Initialize base fabric.
*/
fabric->fcs = fcs;
@ -989,7 +989,7 @@ bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs)
fabric->lps = bfa_lps_alloc(fcs->bfa);
bfa_assert(fabric->lps);
/**
/*
* Initialize fabric delete completion handler. Fabric deletion is
* complete when the last vport delete is complete.
*/
@ -1007,7 +1007,7 @@ bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs)
bfa_trc(fcs, 0);
}
/**
/*
* Module cleanup
*/
void
@ -1017,7 +1017,7 @@ bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs)
bfa_trc(fcs, 0);
/**
/*
* Cleanup base fabric.
*/
fabric = &fcs->fabric;
@ -1025,7 +1025,7 @@ bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs)
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE);
}
/**
/*
* Fabric module start -- kick starts FCS actions
*/
void
@ -1038,7 +1038,7 @@ bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs)
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START);
}
/**
/*
* Suspend fabric activity as part of driver suspend.
*/
void
@ -1064,7 +1064,7 @@ bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric)
return fabric->oper_type;
}
/**
/*
* Link up notification from BFA physical port module.
*/
void
@ -1074,7 +1074,7 @@ bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric)
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP);
}
/**
/*
* Link down notification from BFA physical port module.
*/
void
@ -1084,7 +1084,7 @@ bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric)
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN);
}
/**
/*
* A child vport is being created in the fabric.
*
* Call from vport module at vport creation. A list of base port and vports
@ -1099,7 +1099,7 @@ void
bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
struct bfa_fcs_vport_s *vport)
{
/**
/*
* - add vport to fabric's vport_q
*/
bfa_trc(fabric->fcs, fabric->vf_id);
@ -1109,7 +1109,7 @@ bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
bfa_wc_up(&fabric->wc);
}
/**
/*
* A child vport is being deleted from fabric.
*
* Vport is being deleted.
@ -1123,7 +1123,7 @@ bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
bfa_wc_down(&fabric->wc);
}
/**
/*
* Base port is deleted.
*/
void
@ -1133,7 +1133,7 @@ bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric)
}
/**
/*
* Check if fabric is online.
*
* param[in] fabric - Fabric instance. This can be a base fabric or vf.
@ -1146,7 +1146,7 @@ bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric)
return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online);
}
/**
/*
* brief
*
*/
@ -1158,7 +1158,7 @@ bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf, struct bfa_fcs_s *fcs,
return BFA_STATUS_OK;
}
/**
/*
* Lookup for a vport withing a fabric given its pwwn
*/
struct bfa_fcs_vport_s *
@ -1176,7 +1176,7 @@ bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn)
return NULL;
}
/**
/*
* In a given fabric, return the number of lports.
*
* param[in] fabric - Fabric instance. This can be a base fabric or vf.
@ -1214,7 +1214,7 @@ bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric)
return oui;
}
/**
/*
* Unsolicited frame receive handling.
*/
void
@ -1230,7 +1230,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
bfa_trc(fabric->fcs, len);
bfa_trc(fabric->fcs, pid);
/**
/*
* Look for our own FLOGI frames being looped back. This means an
* external loopback cable is in place. Our own FLOGI frames are
* sometimes looped back when switch port gets temporarily bypassed.
@ -1242,7 +1242,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
return;
}
/**
/*
* FLOGI/EVFP exchanges should be consumed by base fabric.
*/
if (fchs->d_id == bfa_os_hton3b(FC_FABRIC_PORT)) {
@ -1252,7 +1252,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
}
if (fabric->bport.pid == pid) {
/**
/*
* All authentication frames should be routed to auth
*/
bfa_trc(fabric->fcs, els_cmd->els_code);
@ -1266,7 +1266,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
return;
}
/**
/*
* look for a matching local port ID
*/
list_for_each(qe, &fabric->vport_q) {
@ -1280,7 +1280,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len);
}
/**
/*
* Unsolicited frames to be processed by fabric.
*/
static void
@ -1304,7 +1304,7 @@ bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
}
}
/**
/*
* Process incoming FLOGI
*/
static void
@ -1351,7 +1351,7 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
struct fchs_s fchs;
fcxp = bfa_fcs_fcxp_alloc(fabric->fcs);
/**
/*
* Do not expect this failure -- expect remote node to retry
*/
if (!fcxp)
@ -1370,7 +1370,7 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
FC_MAX_PDUSZ, 0);
}
/**
/*
* Flogi Acc completion callback.
*/
static void
@ -1417,7 +1417,7 @@ bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
}
}
/**
/*
* Returns FCS vf structure for a given vf_id.
*
* param[in] vf_id - VF_ID
@ -1435,7 +1435,7 @@ bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id)
return NULL;
}
/**
/*
* BFA FCS PPORT ( physical port)
*/
static void
@ -1465,11 +1465,11 @@ bfa_fcs_port_attach(struct bfa_fcs_s *fcs)
bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs);
}
/**
/*
* BFA FCS UF ( Unsolicited Frames)
*/
/**
/*
* BFA callback for unsolicited frame receive handler.
*
* @param[in] cbarg callback arg for receive handler
@ -1486,7 +1486,7 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
struct fc_vft_s *vft;
struct bfa_fcs_fabric_s *fabric;
/**
/*
* check for VFT header
*/
if (fchs->routing == FC_RTG_EXT_HDR &&
@ -1498,7 +1498,7 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
else
fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id);
/**
/*
* drop frame if vfid is unknown
*/
if (!fabric) {
@ -1508,7 +1508,7 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
return;
}
/**
/*
* skip vft header
*/
fchs = (struct fchs_s *) (vft + 1);

View File

@ -15,7 +15,7 @@
* General Public License for more details.
*/
/**
/*
* fcpim.c - FCP initiator mode i-t nexus state machine
*/
@ -38,7 +38,7 @@ static void bfa_fcs_itnim_prli_response(void *fcsarg,
bfa_status_t req_status, u32 rsp_len,
u32 resid_len, struct fchs_s *rsp_fchs);
/**
/*
* fcs_itnim_sm FCS itnim state machine events
*/
@ -84,7 +84,7 @@ static struct bfa_sm_table_s itnim_sm_table[] = {
{BFA_SM(bfa_fcs_itnim_sm_initiator), BFA_ITNIM_INITIATIOR},
};
/**
/*
* fcs_itnim_sm FCS itnim state machine
*/
@ -494,11 +494,11 @@ bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim)
/**
/*
* itnim_public FCS ITNIM public interfaces
*/
/**
/*
* Called by rport when a new rport is created.
*
* @param[in] rport - remote port.
@ -554,7 +554,7 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
return itnim;
}
/**
/*
* Called by rport to delete the instance of FCPIM.
*
* @param[in] rport - remote port.
@ -566,7 +566,7 @@ bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim)
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_DELETE);
}
/**
/*
* Notification from rport that PLOGI is complete to initiate FC-4 session.
*/
void
@ -586,7 +586,7 @@ bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim)
}
}
/**
/*
* Called by rport to handle a remote device offline.
*/
void
@ -596,7 +596,7 @@ bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim)
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_OFFLINE);
}
/**
/*
* Called by rport when remote port is known to be an initiator from
* PRLI received.
*/
@ -608,7 +608,7 @@ bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim)
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR);
}
/**
/*
* Called by rport to check if the itnim is online.
*/
bfa_status_t
@ -625,7 +625,7 @@ bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim)
}
}
/**
/*
* BFA completion callback for bfa_itnim_online().
*/
void
@ -637,7 +637,7 @@ bfa_cb_itnim_online(void *cbarg)
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE);
}
/**
/*
* BFA completion callback for bfa_itnim_offline().
*/
void
@ -649,7 +649,7 @@ bfa_cb_itnim_offline(void *cb_arg)
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE);
}
/**
/*
* Mark the beginning of PATH TOV handling. IO completion callbacks
* are still pending.
*/
@ -661,7 +661,7 @@ bfa_cb_itnim_tov_begin(void *cb_arg)
bfa_trc(itnim->fcs, itnim->rport->pwwn);
}
/**
/*
* Mark the end of PATH TOV handling. All pending IOs are already cleaned up.
*/
void
@ -674,7 +674,7 @@ bfa_cb_itnim_tov(void *cb_arg)
itnim_drv->state = ITNIM_STATE_TIMEOUT;
}
/**
/*
* BFA notification to FCS/driver for second level error recovery.
*
* Atleast one I/O request has timedout and target is unresponsive to

File diff suppressed because it is too large Load Diff

View File

@ -15,7 +15,7 @@
* General Public License for more details.
*/
/**
/*
* rport.c Remote port implementation.
*/
@ -75,7 +75,7 @@ static void bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport,
static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
struct fchs_s *rx_fchs, u16 len);
static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport);
/**
/*
* fcs_rport_sm FCS rport state machine events
*/
@ -172,7 +172,7 @@ static struct bfa_sm_table_s rport_sm_table[] = {
{BFA_SM(bfa_fcs_rport_sm_nsdisc_sent), BFA_RPORT_NSDISC},
};
/**
/*
* Beginning state.
*/
static void
@ -210,7 +210,7 @@ bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event)
}
}
/**
/*
* PLOGI is being sent.
*/
static void
@ -262,7 +262,7 @@ bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
}
}
/**
/*
* PLOGI is being sent.
*/
static void
@ -287,7 +287,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
case RPSM_EVENT_PLOGI_RCVD:
case RPSM_EVENT_SCN:
/**
/*
* Ignore, SCN is possibly online notification.
*/
break;
@ -309,7 +309,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
break;
case RPSM_EVENT_HCB_OFFLINE:
/**
/*
* Ignore BFA callback, on a PLOGI receive we call bfa offline.
*/
break;
@ -319,7 +319,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
}
}
/**
/*
* PLOGI is sent.
*/
static void
@ -380,7 +380,7 @@ bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
}
}
/**
/*
* PLOGI is sent.
*/
static void
@ -475,7 +475,7 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
}
}
/**
/*
* PLOGI is complete. Awaiting BFA rport online callback. FC-4s
* are offline.
*/
@ -519,7 +519,7 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
break;
case RPSM_EVENT_SCN:
/**
/*
* @todo
* Ignore SCN - PLOGI just completed, FC-4 login should detect
* device failures.
@ -531,7 +531,7 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
}
}
/**
/*
* Rport is ONLINE. FC-4s active.
*/
static void
@ -580,7 +580,7 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
}
}
/**
/*
* An SCN event is received in ONLINE state. NS query is being sent
* prior to ADISC authentication with rport. FC-4s are paused.
*/
@ -604,7 +604,7 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
break;
case RPSM_EVENT_SCN:
/**
/*
* ignore SCN, wait for response to query itself
*/
break;
@ -638,7 +638,7 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
}
}
/**
/*
* An SCN event is received in ONLINE state. NS query is sent to rport.
* FC-4s are paused.
*/
@ -697,7 +697,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
}
}
/**
/*
* An SCN event is received in ONLINE state. ADISC is being sent for
* authenticating with rport. FC-4s are paused.
*/
@ -748,7 +748,7 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
}
}
/**
/*
* An SCN event is received in ONLINE state. ADISC is to rport.
* FC-4s are paused.
*/
@ -765,7 +765,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
break;
case RPSM_EVENT_PLOGI_RCVD:
/**
/*
* Too complex to cleanup FC-4 & rport and then acc to PLOGI.
* At least go offline when a PLOGI is received.
*/
@ -787,7 +787,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
break;
case RPSM_EVENT_SCN:
/**
/*
* already processing RSCN
*/
break;
@ -810,7 +810,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
}
}
/**
/*
* Rport has sent LOGO. Awaiting FC-4 offline completion callback.
*/
static void
@ -841,7 +841,7 @@ bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
}
}
/**
/*
* LOGO needs to be sent to rport. Awaiting FC-4 offline completion
* callback.
*/
@ -864,7 +864,7 @@ bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
}
}
/**
/*
* Rport is going offline. Awaiting FC-4 offline completion callback.
*/
static void
@ -886,7 +886,7 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
case RPSM_EVENT_ADDRESS_CHANGE:
/**
/*
* rport is already going offline.
* SCN - ignore and wait till transitioning to offline state
*/
@ -901,7 +901,7 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
}
}
/**
/*
* Rport is offline. FC-4s are offline. Awaiting BFA rport offline
* callback.
*/
@ -945,7 +945,7 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
case RPSM_EVENT_SCN:
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
/**
/*
* Ignore, already offline.
*/
break;
@ -955,7 +955,7 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
}
}
/**
/*
* Rport is offline. FC-4s are offline. Awaiting BFA rport offline
* callback to send LOGO accept.
*/
@ -1009,7 +1009,7 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
/**
/*
* Ignore - already processing a LOGO.
*/
break;
@ -1019,7 +1019,7 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
}
}
/**
/*
* Rport is being deleted. FC-4s are offline.
* Awaiting BFA rport offline
* callback to send LOGO.
@ -1048,7 +1048,7 @@ bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
}
}
/**
/*
* Rport is being deleted. FC-4s are offline. LOGO is being sent.
*/
static void
@ -1082,7 +1082,7 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
}
}
/**
/*
* Rport is offline. FC-4s are offline. BFA rport is offline.
* Timer active to delete stale rport.
*/
@ -1142,7 +1142,7 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
}
}
/**
/*
* Rport address has changed. Nameserver discovery request is being sent.
*/
static void
@ -1199,7 +1199,7 @@ bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
}
}
/**
/*
* Nameserver discovery failed. Waiting for timeout to retry.
*/
static void
@ -1263,7 +1263,7 @@ bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
}
}
/**
/*
* Rport address has changed. Nameserver discovery request is sent.
*/
static void
@ -1329,13 +1329,13 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
bfa_fcs_rport_send_prlo_acc(rport);
break;
case RPSM_EVENT_SCN:
/**
/*
* ignore, wait for NS query response
*/
break;
case RPSM_EVENT_LOGO_RCVD:
/**
/*
* Not logged-in yet. Accept LOGO.
*/
bfa_fcs_rport_send_logo_acc(rport);
@ -1354,7 +1354,7 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
/**
/*
* fcs_rport_private FCS RPORT provate functions
*/
@ -1415,7 +1415,7 @@ bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
plogi_rsp = (struct fc_logi_s *) BFA_FCXP_RSP_PLD(fcxp);
/**
/*
* Check for failure first.
*/
if (plogi_rsp->els_cmd.els_code != FC_ELS_ACC) {
@ -1436,7 +1436,7 @@ bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
return;
}
/**
/*
* PLOGI is complete. Make sure this device is not one of the known
* device with a new FC port address.
*/
@ -1468,7 +1468,7 @@ bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
}
}
/**
/*
* Normal login path -- no evil twins.
*/
rport->stats.plogi_accs++;
@ -1722,7 +1722,7 @@ bfa_fcs_rport_gpnid_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
}
}
/**
/*
* Called to send a logout to the rport.
*/
static void
@ -1759,7 +1759,7 @@ bfa_fcs_rport_send_logo(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
}
/**
/*
* Send ACC for a LOGO received.
*/
static void
@ -1788,7 +1788,7 @@ bfa_fcs_rport_send_logo_acc(void *rport_cbarg)
FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
}
/**
/*
* brief
* This routine will be called by bfa_timer on timer timeouts.
*
@ -1961,7 +1961,7 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
struct bfa_fcs_rport_s *rport;
struct bfad_rport_s *rport_drv;
/**
/*
* allocate rport
*/
if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv)
@ -1979,7 +1979,7 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
rport->pid = rpid;
rport->pwwn = pwwn;
/**
/*
* allocate BFA rport
*/
rport->bfa_rport = bfa_rport_create(port->fcs->bfa, rport);
@ -1989,7 +1989,7 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
return NULL;
}
/**
/*
* allocate FC-4s
*/
bfa_assert(bfa_fcs_lport_is_initiator(port));
@ -2021,7 +2021,7 @@ bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport)
{
struct bfa_fcs_lport_s *port = rport->port;
/**
/*
* - delete FC-4s
* - delete BFA rport
* - remove from queue of rports
@ -2093,7 +2093,7 @@ bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport)
}
}
/**
/*
* Update rport parameters from PLOGI or PLOGI accept.
*/
static void
@ -2101,14 +2101,14 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
{
bfa_fcs_lport_t *port = rport->port;
/**
/*
* - port name
* - node name
*/
rport->pwwn = plogi->port_name;
rport->nwwn = plogi->node_name;
/**
/*
* - class of service
*/
rport->fc_cos = 0;
@ -2118,7 +2118,7 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
if (plogi->class2.class_valid)
rport->fc_cos |= FC_CLASS_2;
/**
/*
* - CISC
* - MAX receive frame size
*/
@ -2127,7 +2127,7 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred));
bfa_trc(port->fcs, port->fabric->bb_credit);
/**
/*
* Direct Attach P2P mode :
* This is to handle a bug (233476) in IBM targets in Direct Attach
* Mode. Basically, in FLOGI Accept the target would have
@ -2148,7 +2148,7 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
}
/**
/*
* Called to handle LOGO received from an existing remote port.
*/
static void
@ -2164,11 +2164,11 @@ bfa_fcs_rport_process_logo(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs)
/**
/*
* fcs_rport_public FCS rport public interfaces
*/
/**
/*
* Called by bport/vport to create a remote port instance for a discovered
* remote device.
*
@ -2191,7 +2191,7 @@ bfa_fcs_rport_create(struct bfa_fcs_lport_s *port, u32 rpid)
return rport;
}
/**
/*
* Called to create a rport for which only the wwn is known.
*
* @param[in] port - base port
@ -2211,7 +2211,7 @@ bfa_fcs_rport_create_by_wwn(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC);
return rport;
}
/**
/*
* Called by bport in private loop topology to indicate that a
* rport has been discovered and plogi has been completed.
*
@ -2233,7 +2233,7 @@ bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *fchs,
bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_COMP);
}
/**
/*
* Called by bport/vport to handle PLOGI received from a new remote port.
* If an existing rport does a plogi, it will be handled separately.
*/
@ -2272,7 +2272,7 @@ wwn_compare(wwn_t wwn1, wwn_t wwn2)
return 0;
}
/**
/*
* Called by bport/vport to handle PLOGI received from an existing
* remote port.
*/
@ -2280,7 +2280,7 @@ void
bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
struct fc_logi_s *plogi)
{
/**
/*
* @todo Handle P2P and initiator-initiator.
*/
@ -2289,7 +2289,7 @@ bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
rport->reply_oxid = rx_fchs->ox_id;
bfa_trc(rport->fcs, rport->reply_oxid);
/**
/*
* In Switched fabric topology,
* PLOGI to each other. If our pwwn is smaller, ignore it,
* if it is not a well known address.
@ -2307,7 +2307,7 @@ bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD);
}
/**
/*
* Called by bport/vport to delete a remote port instance.
*
* Rport delete is called under the following conditions:
@ -2321,7 +2321,7 @@ bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport)
bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
}
/**
/*
* Called by bport/vport to when a target goes offline.
*
*/
@ -2331,7 +2331,7 @@ bfa_fcs_rport_offline(struct bfa_fcs_rport_s *rport)
bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
}
/**
/*
* Called by bport in n2n when a target (attached port) becomes online.
*
*/
@ -2340,7 +2340,7 @@ bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport)
{
bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND);
}
/**
/*
* Called by bport/vport to notify SCN for the remote port
*/
void
@ -2350,7 +2350,7 @@ bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport)
bfa_sm_send_event(rport, RPSM_EVENT_SCN);
}
/**
/*
* Called by fcpim to notify that the ITN cleanup is done.
*/
void
@ -2359,7 +2359,7 @@ bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport)
bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE);
}
/**
/*
* Called by fcptm to notify that the ITN cleanup is done.
*/
void
@ -2368,7 +2368,7 @@ bfa_fcs_rport_tin_ack(struct bfa_fcs_rport_s *rport)
bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE);
}
/**
/*
* brief
* This routine BFA callback for bfa_rport_online() call.
*
@ -2391,7 +2391,7 @@ bfa_cb_rport_online(void *cbarg)
bfa_sm_send_event(rport, RPSM_EVENT_HCB_ONLINE);
}
/**
/*
* brief
* This routine BFA callback for bfa_rport_offline() call.
*
@ -2413,7 +2413,7 @@ bfa_cb_rport_offline(void *cbarg)
bfa_sm_send_event(rport, RPSM_EVENT_HCB_OFFLINE);
}
/**
/*
* brief
* This routine is a static BFA callback when there is a QoS flow_id
* change notification
@ -2437,7 +2437,7 @@ bfa_cb_rport_qos_scn_flowid(void *cbarg,
bfa_trc(rport->fcs, rport->pwwn);
}
/**
/*
* brief
* This routine is a static BFA callback when there is a QoS priority
* change notification
@ -2461,7 +2461,7 @@ bfa_cb_rport_qos_scn_prio(void *cbarg,
bfa_trc(rport->fcs, rport->pwwn);
}
/**
/*
* Called to process any unsolicted frames from this remote port
*/
void
@ -2470,7 +2470,7 @@ bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport)
bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
}
/**
/*
* Called to process any unsolicted frames from this remote port
*/
void
@ -2577,7 +2577,7 @@ bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
FC_MAX_PDUSZ, 0);
}
/**
/*
* Return state of rport.
*/
int
@ -2586,7 +2586,7 @@ bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport)
return bfa_sm_to_state(rport_sm_table, rport->sm);
}
/**
/*
* brief
* Called by the Driver to set rport delete/ageout timeout
*
@ -2613,15 +2613,15 @@ bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, u16 ox_id)
/**
/*
* Remote port implementation.
*/
/**
/*
* fcs_rport_api FCS rport API.
*/
/**
/*
* Direct API to add a target by port wwn. This interface is used, for
* example, by bios when target pwwn is known from boot lun configuration.
*/
@ -2634,7 +2634,7 @@ bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn,
return BFA_STATUS_OK;
}
/**
/*
* Direct API to remove a target and its associated resources. This
* interface is used, for example, by driver to remove target
* ports from the target list for a VM.
@ -2663,7 +2663,7 @@ bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport_in)
}
/**
/*
* Remote device status for display/debug.
*/
void
@ -2704,7 +2704,7 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
}
}
/**
/*
* Per remote device statistics.
*/
void
@ -2767,7 +2767,7 @@ bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport, bfa_port_speed_t speed)
/**
/*
* Remote port features (RPF) implementation.
*/
@ -2786,7 +2786,7 @@ static void bfa_fcs_rpf_rpsc2_response(void *fcsarg,
static void bfa_fcs_rpf_timeout(void *arg);
/**
/*
* fcs_rport_ftrs_sm FCS rport state machine events
*/
@ -2981,7 +2981,7 @@ bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
bfa_sm_fault(rport->fcs, event);
}
}
/**
/*
* Called when Rport is created.
*/
void
@ -2995,7 +2995,7 @@ bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport)
bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_uninit);
}
/**
/*
* Called when Rport becomes online
*/
void
@ -3010,7 +3010,7 @@ bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport)
bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_ONLINE);
}
/**
/*
* Called when Rport becomes offline
*/
void

View File

@ -102,7 +102,7 @@ bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
*num_vecs = __HFN_NUMINTS;
}
/**
/*
* No special setup required for crossbow -- vector assignments are implicit.
*/
void
@ -129,7 +129,7 @@ bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs)
bfa->msix.handler[i] = bfa_msix_lpu_err;
}
/**
/*
* Crossbow -- dummy, interrupts are masked
*/
void
@ -142,7 +142,7 @@ bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
{
}
/**
/*
* No special enable/disable -- vector assignments are implicit.
*/
void

View File

@ -39,7 +39,7 @@ bfa_hwct_msix_lpu_err_set(struct bfa_s *bfa, bfa_boolean_t msix, int vec)
writel(0, kva + __ct_msix_err_vec_reg[fn]);
}
/**
/*
* Dummy interrupt handler for handling spurious interrupt during chip-reinit.
*/
static void
@ -110,7 +110,7 @@ bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
*num_vecs = BFA_MSIX_CT_MAX;
}
/**
/*
* Setup MSI-X vector for catapult
*/
void
@ -156,7 +156,7 @@ bfa_hwct_msix_uninstall(struct bfa_s *bfa)
bfa->msix.handler[i] = bfa_hwct_msix_dummy;
}
/**
/*
* Enable MSI-X vectors
*/
void

View File

@ -23,7 +23,7 @@
BFA_TRC_FILE(CNA, IOC);
/**
/*
* IOC local definitions
*/
#define BFA_IOC_TOV 3000 /* msecs */
@ -49,7 +49,7 @@ BFA_TRC_FILE(CNA, IOC);
BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
#define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
/**
/*
* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
*/
@ -101,11 +101,11 @@ static void bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc);
static void bfa_ioc_pf_failed(struct bfa_ioc_s *ioc);
static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
/**
/*
* hal_ioc_sm
*/
/**
/*
* IOC state machine definitions/declarations
*/
enum ioc_event {
@ -144,7 +144,7 @@ static struct bfa_sm_table_s ioc_sm_table[] = {
{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
};
/**
/*
* IOCPF state machine definitions/declarations
*/
@ -174,7 +174,7 @@ static void bfa_iocpf_stop(struct bfa_ioc_s *ioc);
static void bfa_iocpf_timeout(void *ioc_arg);
static void bfa_iocpf_sem_timeout(void *ioc_arg);
/**
/*
* IOCPF state machine events
*/
enum iocpf_event {
@ -191,7 +191,7 @@ enum iocpf_event {
IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
};
/**
/*
* IOCPF states
*/
enum bfa_iocpf_state {
@ -232,11 +232,11 @@ static struct bfa_sm_table_s iocpf_sm_table[] = {
{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
};
/**
/*
* IOC State Machine
*/
/**
/*
* Beginning state. IOC uninit state.
*/
@ -245,7 +245,7 @@ bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
{
}
/**
/*
* IOC is in uninit state.
*/
static void
@ -262,7 +262,7 @@ bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
bfa_sm_fault(ioc, event);
}
}
/**
/*
* Reset entry actions -- initialize state machine
*/
static void
@ -271,7 +271,7 @@ bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
}
/**
/*
* IOC is in reset state.
*/
static void
@ -304,7 +304,7 @@ bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
bfa_iocpf_enable(ioc);
}
/**
/*
* Host IOC function is being enabled, awaiting response from firmware.
* Semaphore is acquired.
*/
@ -352,7 +352,7 @@ bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
bfa_ioc_send_getattr(ioc);
}
/**
/*
* IOC configuration in progress. Timer is active.
*/
static void
@ -447,7 +447,7 @@ bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
BFA_LOG(KERN_INFO, bfad, log_level, "IOC disabled\n");
}
/**
/*
* IOC is being disabled
*/
static void
@ -474,7 +474,7 @@ bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
}
}
/**
/*
* IOC disable completion entry.
*/
static void
@ -514,7 +514,7 @@ bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc)
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
}
/**
/*
* Hardware initialization failed.
*/
static void
@ -528,7 +528,7 @@ bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
break;
case IOC_E_FAILED:
/**
/*
* Initialization failure during iocpf init retry.
*/
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
@ -556,7 +556,7 @@ bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
struct bfa_ioc_hbfail_notify_s *notify;
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
/**
/*
* Notify driver and common modules registered for notification.
*/
ioc->cbfn->hbfail_cbfn(ioc->bfa);
@ -569,7 +569,7 @@ bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
"Heart Beat of IOC has failed\n");
}
/**
/*
* IOC failure.
*/
static void
@ -580,7 +580,7 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
switch (event) {
case IOC_E_FAILED:
/**
/*
* Initialization failure during iocpf recovery.
* !!! Fall through !!!
*/
@ -608,12 +608,12 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
/**
/*
* IOCPF State Machine
*/
/**
/*
* Reset entry actions -- initialize state machine
*/
static void
@ -623,7 +623,7 @@ bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
iocpf->auto_recover = bfa_auto_recover;
}
/**
/*
* Beginning state. IOC is in reset state.
*/
static void
@ -646,7 +646,7 @@ bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
}
}
/**
/*
* Semaphore should be acquired for version check.
*/
static void
@ -655,7 +655,7 @@ bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
bfa_ioc_hw_sem_get(iocpf->ioc);
}
/**
/*
* Awaiting h/w semaphore to continue with version check.
*/
static void
@ -692,7 +692,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
}
}
/**
/*
* Notify enable completion callback.
*/
static void
@ -708,7 +708,7 @@ bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
bfa_iocpf_timer_start(iocpf->ioc);
}
/**
/*
* Awaiting firmware version match.
*/
static void
@ -739,7 +739,7 @@ bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
}
}
/**
/*
* Request for semaphore.
*/
static void
@ -748,7 +748,7 @@ bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
bfa_ioc_hw_sem_get(iocpf->ioc);
}
/**
/*
* Awaiting semaphore for h/w initialzation.
*/
static void
@ -782,7 +782,7 @@ bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
bfa_ioc_reset(iocpf->ioc, BFA_FALSE);
}
/**
/*
* Hardware is being initialized. Interrupts are enabled.
* Holding hardware semaphore lock.
*/
@ -839,7 +839,7 @@ bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
bfa_ioc_send_enable(iocpf->ioc);
}
/**
/*
* Host IOC function is being enabled, awaiting response from firmware.
* Semaphore is acquired.
*/
@ -943,7 +943,7 @@ bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
bfa_ioc_send_disable(iocpf->ioc);
}
/**
/*
* IOC is being disabled
*/
static void
@ -979,7 +979,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
}
}
/**
/*
* IOC disable completion entry.
*/
static void
@ -1017,7 +1017,7 @@ bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
bfa_iocpf_timer_start(iocpf->ioc);
}
/**
/*
* Hardware initialization failed.
*/
static void
@ -1052,18 +1052,18 @@ bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
static void
bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
{
/**
/*
* Mark IOC as failed in hardware and stop firmware.
*/
bfa_ioc_lpu_stop(iocpf->ioc);
writel(BFI_IOC_FAIL, iocpf->ioc->ioc_regs.ioc_fwstate);
/**
/*
* Notify other functions on HB failure.
*/
bfa_ioc_notify_hbfail(iocpf->ioc);
/**
/*
* Flush any queued up mailbox requests.
*/
bfa_ioc_mbox_hbfail(iocpf->ioc);
@ -1072,7 +1072,7 @@ bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
bfa_iocpf_recovery_timer_start(iocpf->ioc);
}
/**
/*
* IOC is in failed state.
*/
static void
@ -1100,7 +1100,7 @@ bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
/**
/*
* hal_ioc_pvt BFA IOC private functions
*/
@ -1112,7 +1112,7 @@ bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
ioc->cbfn->disable_cbfn(ioc->bfa);
/**
/*
* Notify common modules registered for notification.
*/
list_for_each(qe, &ioc->hb_notify_q) {
@ -1154,7 +1154,7 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
{
u32 r32;
/**
/*
* First read to the semaphore register will return 0, subsequent reads
* will return 1. Semaphore is released by writing 1 to the register
*/
@ -1179,7 +1179,7 @@ bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc)
bfa_sem_timer_stop(ioc);
}
/**
/*
* Initialize LPU local memory (aka secondary memory / SRAM)
*/
static void
@ -1199,7 +1199,7 @@ bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
/**
/*
* wait for memory initialization to be complete
*/
i = 0;
@ -1208,7 +1208,7 @@ bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
i++;
} while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
/**
/*
* If memory initialization is not successful, IOC timeout will catch
* such failures.
*/
@ -1224,7 +1224,7 @@ bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
{
u32 pss_ctl;
/**
/*
* Take processor out of reset.
*/
pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
@ -1238,7 +1238,7 @@ bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
{
u32 pss_ctl;
/**
/*
* Put processors in reset.
*/
pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
@ -1247,7 +1247,7 @@ bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
}
/**
/*
* Get driver and firmware versions.
*/
void
@ -1270,7 +1270,7 @@ bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
}
}
/**
/*
* Returns TRUE if same.
*/
bfa_boolean_t
@ -1295,7 +1295,7 @@ bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
return BFA_TRUE;
}
/**
/*
* Return true if current running version is valid. Firmware signature and
* execution context (driver/bios) must match.
*/
@ -1304,7 +1304,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
{
struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
/**
/*
* If bios/efi boot (flash based) -- return true
*/
if (bfa_ioc_is_bios_optrom(ioc))
@ -1329,7 +1329,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
return bfa_ioc_fwver_cmp(ioc, &fwhdr);
}
/**
/*
* Conditionally flush any pending message from firmware at start.
*/
static void
@ -1361,7 +1361,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
boot_type = BFI_BOOT_TYPE_NORMAL;
boot_env = BFI_BOOT_LOADER_OS;
/**
/*
* Flash based firmware boot BIOS env.
*/
if (bfa_ioc_is_bios_optrom(ioc)) {
@ -1369,7 +1369,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
boot_env = BFI_BOOT_LOADER_BIOS;
}
/**
/*
* Flash based firmware boot UEFI env.
*/
if (bfa_ioc_is_uefi(ioc)) {
@ -1377,7 +1377,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
boot_env = BFI_BOOT_LOADER_UEFI;
}
/**
/*
* check if firmware is valid
*/
fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
@ -1388,7 +1388,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
return;
}
/**
/*
* If hardware initialization is in progress (initialized by other IOC),
* just wait for an initialization completion interrupt.
*/
@ -1397,7 +1397,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
return;
}
/**
/*
* If IOC function is disabled and firmware version is same,
* just re-enable IOC.
*
@ -1408,7 +1408,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
if (ioc_fwstate == BFI_IOC_DISABLED ||
(!bfa_ioc_is_bios_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
/**
/*
* When using MSI-X any pending firmware ready event should
* be flushed. Otherwise MSI-X interrupts are not delivered.
*/
@ -1418,7 +1418,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
return;
}
/**
/*
* Initialize the h/w for any other states.
*/
bfa_ioc_boot(ioc, boot_type, boot_env);
@ -1529,7 +1529,7 @@ bfa_ioc_hb_stop(struct bfa_ioc_s *ioc)
}
/**
/*
* Initiate a full firmware download.
*/
static void
@ -1542,7 +1542,7 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
u32 chunkno = 0;
u32 i;
/**
/*
* Initialize LMEM first before code download
*/
bfa_ioc_lmem_init(ioc);
@ -1563,7 +1563,7 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
}
/**
/*
* write smem
*/
bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
@ -1571,7 +1571,7 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
loff += sizeof(u32);
/**
/*
* handle page offset wrap around
*/
loff = PSS_SMEM_PGOFF(loff);
@ -1598,7 +1598,7 @@ bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force)
bfa_ioc_hwinit(ioc, force);
}
/**
/*
* Update BFA configuration from firmware configuration.
*/
static void
@ -1613,7 +1613,7 @@ bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
}
/**
/*
* Attach time initialization of mbox logic.
*/
static void
@ -1629,7 +1629,7 @@ bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
}
}
/**
/*
* Mbox poll timer -- restarts any pending mailbox requests.
*/
static void
@ -1639,27 +1639,27 @@ bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
struct bfa_mbox_cmd_s *cmd;
u32 stat;
/**
/*
* If no command pending, do nothing
*/
if (list_empty(&mod->cmd_q))
return;
/**
/*
* If previous command is not yet fetched by firmware, do nothing
*/
stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
if (stat)
return;
/**
/*
* Enqueue command to firmware.
*/
bfa_q_deq(&mod->cmd_q, &cmd);
bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
}
/**
/*
* Cleanup any pending requests.
*/
static void
@ -1672,7 +1672,7 @@ bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
bfa_q_deq(&mod->cmd_q, &cmd);
}
/**
/*
* Read data from SMEM to host through PCI memmap
*
* @param[in] ioc memory for IOC
@ -1710,7 +1710,7 @@ bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
buf[i] = be32_to_cpu(r32);
loff += sizeof(u32);
/**
/*
* handle page offset wrap around
*/
loff = PSS_SMEM_PGOFF(loff);
@ -1729,7 +1729,7 @@ bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
return BFA_STATUS_OK;
}
/**
/*
* Clear SMEM data from host through PCI memmap
*
* @param[in] ioc memory for IOC
@ -1764,7 +1764,7 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
loff += sizeof(u32);
/**
/*
* handle page offset wrap around
*/
loff = PSS_SMEM_PGOFF(loff);
@ -1783,7 +1783,7 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
return BFA_STATUS_OK;
}
/**
/*
* hal iocpf to ioc interface
*/
static void
@ -1808,7 +1808,7 @@ static void
bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
{
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
/**
/*
* Provide enable completion callback.
*/
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
@ -1819,7 +1819,7 @@ bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
/**
/*
* hal_ioc_public
*/
@ -1843,7 +1843,7 @@ bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
return BFA_STATUS_OK;
}
/**
/*
* Interface used by diag module to do firmware boot with memory test
* as the entry vector.
*/
@ -1857,7 +1857,7 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
return;
/**
/*
* Initialize IOC state of all functions on a chip reset.
*/
rb = ioc->pcidev.pci_bar_kva;
@ -1872,14 +1872,14 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
bfa_ioc_msgflush(ioc);
bfa_ioc_download_fw(ioc, boot_type, boot_env);
/**
/*
* Enable interrupts just before starting LPU
*/
ioc->cbfn->reset_cbfn(ioc->bfa);
bfa_ioc_lpu_start(ioc);
}
/**
/*
* Enable/disable IOC failure auto recovery.
*/
void
@ -1913,7 +1913,7 @@ bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
u32 r32;
int i;
/**
/*
* read the MBOX msg
*/
for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
@ -1923,7 +1923,7 @@ bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
msgp[i] = cpu_to_be32(r32);
}
/**
/*
* turn off mailbox interrupt by clearing mailbox status
*/
writel(1, ioc->ioc_regs.lpu_mbox_cmd);
@ -1966,7 +1966,7 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
}
}
/**
/*
* IOC attach time initialization and setup.
*
* @param[in] ioc memory for IOC
@ -1991,7 +1991,7 @@ bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
bfa_fsm_send_event(ioc, IOC_E_RESET);
}
/**
/*
* Driver detach time IOC cleanup.
*/
void
@ -2000,7 +2000,7 @@ bfa_ioc_detach(struct bfa_ioc_s *ioc)
bfa_fsm_send_event(ioc, IOC_E_DETACH);
}
/**
/*
* Setup IOC PCI properties.
*
* @param[in] pcidev PCI device information for this IOC
@ -2014,7 +2014,7 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
ioc->cna = ioc->ctdev && !ioc->fcmode;
/**
/*
* Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
*/
if (ioc->ctdev)
@ -2026,7 +2026,7 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
bfa_ioc_reg_init(ioc);
}
/**
/*
* Initialize IOC dma memory
*
* @param[in] dm_kva kernel virtual address of IOC dma memory
@ -2035,7 +2035,7 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
void
bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
{
/**
/*
* dma memory for firmware attribute
*/
ioc->attr_dma.kva = dm_kva;
@ -2043,7 +2043,7 @@ bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
}
/**
/*
* Return size of dma memory required.
*/
u32
@ -2068,7 +2068,7 @@ bfa_ioc_disable(struct bfa_ioc_s *ioc)
bfa_fsm_send_event(ioc, IOC_E_DISABLE);
}
/**
/*
* Returns memory required for saving firmware trace in case of crash.
* Driver must call this interface to allocate memory required for
* automatic saving of firmware trace. Driver should call
@ -2081,7 +2081,7 @@ bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover)
return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
}
/**
/*
* Initialize memory for saving firmware trace. Driver must initialize
* trace memory before call bfa_ioc_enable().
*/
@ -2104,7 +2104,7 @@ bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr)
return PSS_SMEM_PGOFF(fmaddr);
}
/**
/*
* Register mailbox message handler functions
*
* @param[in] ioc IOC instance
@ -2120,7 +2120,7 @@ bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
mod->mbhdlr[mc].cbfn = mcfuncs[mc];
}
/**
/*
* Register mailbox message handler function, to be called by common modules
*/
void
@ -2133,7 +2133,7 @@ bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
mod->mbhdlr[mc].cbarg = cbarg;
}
/**
/*
* Queue a mailbox command request to firmware. Waits if mailbox is busy.
* Responsibility of caller to serialize
*
@ -2146,7 +2146,7 @@ bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
u32 stat;
/**
/*
* If a previous command is pending, queue new command
*/
if (!list_empty(&mod->cmd_q)) {
@ -2154,7 +2154,7 @@ bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
return;
}
/**
/*
* If mailbox is busy, queue command for poll timer
*/
stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
@ -2163,13 +2163,13 @@ bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
return;
}
/**
/*
* mailbox is free -- queue command to firmware
*/
bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
}
/**
/*
* Handle mailbox interrupts
*/
void
@ -2181,7 +2181,7 @@ bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
bfa_ioc_msgget(ioc, &m);
/**
/*
* Treat IOC message class as special.
*/
mc = m.mh.msg_class;
@ -2209,7 +2209,7 @@ bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
ioc->port_id = bfa_ioc_pcifn(ioc);
}
/**
/*
* return true if IOC is disabled
*/
bfa_boolean_t
@ -2219,7 +2219,7 @@ bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
}
/**
/*
* return true if IOC firmware is different.
*/
bfa_boolean_t
@ -2238,7 +2238,7 @@ bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
((__sm) == BFI_IOC_FAIL) || \
((__sm) == BFI_IOC_CFG_DISABLED))
/**
/*
* Check if adapter is disabled -- both IOCs should be in a disabled
* state.
*/
@ -2264,7 +2264,7 @@ bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
return BFA_TRUE;
}
/**
/*
* Add to IOC heartbeat failure notification queue. To be used by common
* modules such as cee, port, diag.
*/
@ -2391,7 +2391,7 @@ bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
ioc_attr = ioc->attr;
/**
/*
* model name
*/
snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
@ -2455,7 +2455,7 @@ bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
}
/**
/*
* hal_wwn_public
*/
wwn_t
@ -2521,7 +2521,7 @@ bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
}
/**
/*
* Retrieve saved firmware trace from a prior IOC failure.
*/
bfa_status_t
@ -2541,7 +2541,7 @@ bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
return BFA_STATUS_OK;
}
/**
/*
* Clear saved firmware trace
*/
void
@ -2550,7 +2550,7 @@ bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc)
ioc->dbg_fwsave_once = BFA_TRUE;
}
/**
/*
* Retrieve saved firmware trace from a prior IOC failure.
*/
bfa_status_t
@ -2590,7 +2590,7 @@ bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
bfa_ioc_send_fwsync(ioc);
/**
/*
* After sending a fw sync mbox command wait for it to
* take effect. We will not wait for a response because
* 1. fw_sync mbox cmd doesn't have a response.
@ -2605,7 +2605,7 @@ bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
fwsync_iter--;
}
/**
/*
* Dump firmware smem
*/
bfa_status_t
@ -2625,7 +2625,7 @@ bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
loff = *offset;
dlen = *buflen;
/**
/*
* First smem read, sync smem before proceeding
* No need to sync before reading every chunk.
*/
@ -2652,7 +2652,7 @@ bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
return status;
}
/**
/*
* Firmware statistics
*/
bfa_status_t
@ -2697,7 +2697,7 @@ bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
return status;
}
/**
/*
* Save firmware trace if configured.
*/
static void
@ -2711,7 +2711,7 @@ bfa_ioc_debug_save(struct bfa_ioc_s *ioc)
}
}
/**
/*
* Firmware failure detected. Start recovery actions.
*/
static void
@ -2733,7 +2733,7 @@ bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
return;
}
/**
/*
* hal_iocpf_pvt BFA IOC PF private functions
*/
@ -2790,7 +2790,7 @@ bfa_iocpf_sem_timeout(void *ioc_arg)
bfa_ioc_hw_sem_get(ioc);
}
/**
/*
* bfa timer function
*/
void
@ -2835,7 +2835,7 @@ bfa_timer_beat(struct bfa_timer_mod_s *mod)
}
}
/**
/*
* Should be called with lock protection
*/
void
@ -2853,7 +2853,7 @@ bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
list_add_tail(&timer->qe, &mod->timer_q);
}
/**
/*
* Should be called with lock protection
*/
void

View File

@ -34,7 +34,7 @@ static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
struct bfa_ioc_hwif_s hwif_cb;
/**
/*
* Called from bfa_ioc_attach() to map asic specific calls.
*/
void
@ -52,7 +52,7 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
ioc->ioc_hwif = &hwif_cb;
}
/**
/*
* Return true if firmware of current driver matches the running firmware.
*/
static bfa_boolean_t
@ -66,7 +66,7 @@ bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc)
{
}
/**
/*
* Notify other functions on HB failure.
*/
static void
@ -76,7 +76,7 @@ bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc)
readl(ioc->ioc_regs.err_set);
}
/**
/*
* Host to LPU mailbox message addresses
*/
static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
@ -84,7 +84,7 @@ static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
{ HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }
};
/**
/*
* Host <-> LPU mailbox command/status registers
*/
static struct { u32 hfn, lpu; } iocreg_mbcmd[] = {
@ -113,7 +113,7 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
}
/**
/*
* Host <-> LPU mailbox command/status registers
*/
ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd[pcifn].hfn;
@ -133,7 +133,7 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
/**
/*
* sram memory access
*/
ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
@ -145,14 +145,14 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
}
/**
/*
* Initialize IOC to port mapping.
*/
static void
bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
{
/**
/*
* For crossbow, port id is same as pci function.
*/
ioc->port_id = bfa_ioc_pcifn(ioc);
@ -160,7 +160,7 @@ bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
bfa_trc(ioc, ioc->port_id);
}
/**
/*
* Set interrupt mode for a function: INTX or MSIX
*/
static void
@ -168,7 +168,7 @@ bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
{
}
/**
/*
* Cleanup hw semaphore and usecnt registers
*/
static void

View File

@ -34,7 +34,7 @@ static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
struct bfa_ioc_hwif_s hwif_ct;
/**
/*
* Called from bfa_ioc_attach() to map asic specific calls.
*/
void
@ -52,7 +52,7 @@ bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
ioc->ioc_hwif = &hwif_ct;
}
/**
/*
* Return true if firmware of current driver matches the running firmware.
*/
static bfa_boolean_t
@ -62,13 +62,13 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
u32 usecnt;
struct bfi_ioc_image_hdr_s fwhdr;
/**
/*
* Firmware match check is relevant only for CNA.
*/
if (!ioc->cna)
return BFA_TRUE;
/**
/*
* If bios boot (flash based) -- do not increment usage count
*/
if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
@ -78,7 +78,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
/**
/*
* If usage count is 0, always return TRUE.
*/
if (usecnt == 0) {
@ -91,12 +91,12 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
bfa_trc(ioc, ioc_fwstate);
/**
/*
* Use count cannot be non-zero and chip in uninitialized state.
*/
bfa_assert(ioc_fwstate != BFI_IOC_UNINIT);
/**
/*
* Check if another driver with a different firmware is active
*/
bfa_ioc_fwver_get(ioc, &fwhdr);
@ -106,7 +106,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
return BFA_FALSE;
}
/**
/*
* Same firmware version. Increment the reference count.
*/
usecnt++;
@ -121,20 +121,20 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
{
u32 usecnt;
/**
/*
* Firmware lock is relevant only for CNA.
*/
if (!ioc->cna)
return;
/**
/*
* If bios boot (flash based) -- do not decrement usage count
*/
if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
BFA_IOC_FWIMG_MINSZ)
return;
/**
/*
* decrement usage count
*/
bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
@ -148,7 +148,7 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
}
/**
/*
* Notify other functions on HB failure.
*/
static void
@ -164,7 +164,7 @@ bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc)
}
}
/**
/*
* Host to LPU mailbox message addresses
*/
static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
@ -174,7 +174,7 @@ static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
{ HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
};
/**
/*
* Host <-> LPU mailbox command/status registers - port 0
*/
static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
@ -184,7 +184,7 @@ static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
{ HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
};
/**
/*
* Host <-> LPU mailbox command/status registers - port 1
*/
static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
@ -236,7 +236,7 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
/**
/*
* sram memory access
*/
ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
@ -248,7 +248,7 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
}
/**
/*
* Initialize IOC to port mapping.
*/
@ -259,7 +259,7 @@ bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
void __iomem *rb = ioc->pcidev.pci_bar_kva;
u32 r32;
/**
/*
* For catapult, base port id on personality register and IOC type
*/
r32 = readl(rb + FNC_PERS_REG);
@ -270,7 +270,7 @@ bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
bfa_trc(ioc, ioc->port_id);
}
/**
/*
* Set interrupt mode for a function: INTX or MSIX
*/
static void
@ -285,7 +285,7 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
__F0_INTX_STATUS;
/**
/*
* If already in desired mode, do not change anything
*/
if (!msix && mode)
@ -303,7 +303,7 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
writel(r32, rb + FNC_PERS_REG);
}
/**
/*
* Cleanup hw semaphore and usecnt registers
*/
static void

View File

@ -46,7 +46,7 @@ bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats)
}
}
/**
/*
* bfa_port_enable_isr()
*
*
@ -63,7 +63,7 @@ bfa_port_enable_isr(struct bfa_port_s *port, bfa_status_t status)
port->endis_cbfn(port->endis_cbarg, status);
}
/**
/*
* bfa_port_disable_isr()
*
*
@ -80,7 +80,7 @@ bfa_port_disable_isr(struct bfa_port_s *port, bfa_status_t status)
port->endis_cbfn(port->endis_cbarg, status);
}
/**
/*
* bfa_port_get_stats_isr()
*
*
@ -112,7 +112,7 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
}
}
/**
/*
* bfa_port_clear_stats_isr()
*
*
@ -129,7 +129,7 @@ bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
port->stats_status = status;
port->stats_busy = BFA_FALSE;
/**
/*
* re-initialize time stamp for stats reset
*/
bfa_os_gettimeofday(&tv);
@ -141,7 +141,7 @@ bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
}
}
/**
/*
* bfa_port_isr()
*
*
@ -189,7 +189,7 @@ bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m)
}
}
/**
/*
* bfa_port_meminfo()
*
*
@ -203,7 +203,7 @@ bfa_port_meminfo(void)
return BFA_ROUNDUP(sizeof(union bfa_port_stats_u), BFA_DMA_ALIGN_SZ);
}
/**
/*
* bfa_port_mem_claim()
*
*
@ -220,7 +220,7 @@ bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva, u64 dma_pa)
port->stats_dma.pa = dma_pa;
}
/**
/*
* bfa_port_enable()
*
* Send the Port enable request to the f/w
@ -264,7 +264,7 @@ bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
return BFA_STATUS_OK;
}
/**
/*
* bfa_port_disable()
*
* Send the Port disable request to the f/w
@ -308,7 +308,7 @@ bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
return BFA_STATUS_OK;
}
/**
/*
* bfa_port_get_stats()
*
* Send the request to the f/w to fetch Port statistics.
@ -348,7 +348,7 @@ bfa_port_get_stats(struct bfa_port_s *port, union bfa_port_stats_u *stats,
return BFA_STATUS_OK;
}
/**
/*
* bfa_port_clear_stats()
*
*
@ -385,7 +385,7 @@ bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn,
return BFA_STATUS_OK;
}
/**
/*
* bfa_port_hbfail()
*
*
@ -415,7 +415,7 @@ bfa_port_hbfail(void *arg)
}
}
/**
/*
* bfa_port_attach()
*
*
@ -449,7 +449,7 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port);
bfa_ioc_hbfail_register(port->ioc, &port->hbfail);
/**
/*
* initialize time stamp for stats reset
*/
bfa_os_gettimeofday(&tv);
@ -458,7 +458,7 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
bfa_trc(port, 0);
}
/**
/*
* bfa_port_detach()
*
*

File diff suppressed because it is too large Load Diff

View File

@ -15,7 +15,7 @@
* General Public License for more details.
*/
/**
/*
* bfad.c Linux driver PCI interface module.
*/
#include <linux/module.h>
@ -151,7 +151,7 @@ bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event);
static void
bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event);
/**
/*
* Beginning state for the driver instance, awaiting the pci_probe event
*/
static void
@ -181,7 +181,7 @@ bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event)
}
}
/**
/*
* Driver Instance is created, awaiting event INIT to initialize the bfad
*/
static void
@ -364,7 +364,7 @@ bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event)
}
}
/**
/*
* BFA callbacks
*/
void
@ -376,7 +376,7 @@ bfad_hcb_comp(void *arg, bfa_status_t status)
complete(&fcomp->comp);
}
/**
/*
* bfa_init callback
*/
void
@ -401,7 +401,7 @@ bfa_cb_init(void *drv, bfa_status_t init_status)
complete(&bfad->comp);
}
/**
/*
* BFA_FCS callbacks
*/
struct bfad_port_s *
@ -457,7 +457,7 @@ bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
}
}
/**
/*
* FCS RPORT alloc callback, after successful PLOGI by FCS
*/
bfa_status_t
@ -478,7 +478,7 @@ bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport,
return rc;
}
/**
/*
* FCS PBC VPORT Create
*/
void
@ -663,7 +663,7 @@ bfad_hal_mem_alloc(struct bfad_s *bfad)
return rc;
}
/**
/*
* Create a vport under a vf.
*/
bfa_status_t
@ -1140,7 +1140,7 @@ bfad_worker(void *ptr)
return 0;
}
/**
/*
* BFA driver interrupt functions
*/
irqreturn_t
@ -1199,7 +1199,7 @@ bfad_msix(int irq, void *dev_id)
return IRQ_HANDLED;
}
/**
/*
* Initialize the MSIX entry table.
*/
static void
@ -1252,7 +1252,7 @@ bfad_install_msix_handler(struct bfad_s *bfad)
return 0;
}
/**
/*
* Setup MSIX based interrupt.
*/
int
@ -1333,7 +1333,7 @@ bfad_remove_intr(struct bfad_s *bfad)
}
}
/**
/*
* PCI probe entry.
*/
int
@ -1419,7 +1419,7 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
return error;
}
/**
/*
* PCI remove entry.
*/
void
@ -1500,7 +1500,7 @@ static struct pci_driver bfad_pci_driver = {
.remove = __devexit_p(bfad_pci_remove),
};
/**
/*
* Driver module init.
*/
static int __init
@ -1540,7 +1540,7 @@ bfad_init(void)
return error;
}
/**
/*
* Driver module exit.
*/
static void __exit

View File

@ -15,14 +15,14 @@
* General Public License for more details.
*/
/**
/*
* bfa_attr.c Linux driver configuration interface module.
*/
#include "bfad_drv.h"
#include "bfad_im.h"
/**
/*
* FC transport template entry, get SCSI target port ID.
*/
void
@ -48,7 +48,7 @@ bfad_im_get_starget_port_id(struct scsi_target *starget)
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
}
/**
/*
* FC transport template entry, get SCSI target nwwn.
*/
void
@ -74,7 +74,7 @@ bfad_im_get_starget_node_name(struct scsi_target *starget)
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
}
/**
/*
* FC transport template entry, get SCSI target pwwn.
*/
void
@ -100,7 +100,7 @@ bfad_im_get_starget_port_name(struct scsi_target *starget)
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
}
/**
/*
* FC transport template entry, get SCSI host port ID.
*/
void
@ -114,7 +114,7 @@ bfad_im_get_host_port_id(struct Scsi_Host *shost)
bfa_os_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port));
}
/**
/*
* FC transport template entry, get SCSI host port type.
*/
static void
@ -146,7 +146,7 @@ bfad_im_get_host_port_type(struct Scsi_Host *shost)
}
}
/**
/*
* FC transport template entry, get SCSI host port state.
*/
static void
@ -183,7 +183,7 @@ bfad_im_get_host_port_state(struct Scsi_Host *shost)
}
}
/**
/*
* FC transport template entry, get SCSI host active fc4s.
*/
static void
@ -202,7 +202,7 @@ bfad_im_get_host_active_fc4s(struct Scsi_Host *shost)
fc_host_active_fc4s(shost)[7] = 1;
}
/**
/*
* FC transport template entry, get SCSI host link speed.
*/
static void
@ -236,7 +236,7 @@ bfad_im_get_host_speed(struct Scsi_Host *shost)
}
}
/**
/*
* FC transport template entry, get SCSI host port type.
*/
static void
@ -253,7 +253,7 @@ bfad_im_get_host_fabric_name(struct Scsi_Host *shost)
}
/**
/*
* FC transport template entry, get BFAD statistics.
*/
static struct fc_host_statistics *
@ -304,7 +304,7 @@ bfad_im_get_stats(struct Scsi_Host *shost)
return hstats;
}
/**
/*
* FC transport template entry, reset BFAD statistics.
*/
static void
@ -331,7 +331,7 @@ bfad_im_reset_stats(struct Scsi_Host *shost)
return;
}
/**
/*
* FC transport template entry, get rport loss timeout.
*/
static void
@ -347,7 +347,7 @@ bfad_im_get_rport_loss_tmo(struct fc_rport *rport)
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
}
/**
/*
* FC transport template entry, set rport loss timeout.
*/
static void
@ -633,7 +633,7 @@ struct fc_function_template bfad_im_vport_fc_function_template = {
.set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
};
/**
/*
* Scsi_Host_attrs SCSI host attributes
*/
static ssize_t

View File

@ -15,7 +15,7 @@
* General Public License for more details.
*/
/**
/*
* bfad_im.c Linux driver IM module.
*/
@ -164,10 +164,10 @@ bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
wake_up(wq);
}
/**
/*
* Scsi_Host_template SCSI host template
*/
/**
/*
* Scsi_Host template entry, returns BFAD PCI info.
*/
static const char *
@ -196,7 +196,7 @@ bfad_im_info(struct Scsi_Host *shost)
return bfa_buf;
}
/**
/*
* Scsi_Host template entry, aborts the specified SCSI command.
*
* Returns: SUCCESS or FAILED.
@ -280,7 +280,7 @@ bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd,
return rc;
}
/**
/*
* Scsi_Host template entry, resets a LUN and abort its all commands.
*
* Returns: SUCCESS or FAILED.
@ -319,7 +319,7 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
goto out;
}
/**
/*
* Set host_scribble to NULL to avoid aborting a task command
* if happens.
*/
@ -346,7 +346,7 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
return rc;
}
/**
/*
* Scsi_Host template entry, resets the bus and abort all commands.
*/
static int
@ -396,7 +396,7 @@ bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd)
return SUCCESS;
}
/**
/*
* Scsi_Host template entry slave_destroy.
*/
static void
@ -406,11 +406,11 @@ bfad_im_slave_destroy(struct scsi_device *sdev)
return;
}
/**
/*
* BFA FCS itnim callbacks
*/
/**
/*
* BFA FCS itnim alloc callback, after successful PRLI
* Context: Interrupt
*/
@ -433,7 +433,7 @@ bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
bfad->bfad_flags |= BFAD_RPORT_ONLINE;
}
/**
/*
* BFA FCS itnim free callback.
* Context: Interrupt. bfad_lock is held
*/
@ -471,7 +471,7 @@ bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv)
queue_work(im->drv_workq, &itnim_drv->itnim_work);
}
/**
/*
* BFA FCS itnim online callback.
* Context: Interrupt. bfad_lock is held
*/
@ -492,7 +492,7 @@ bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv)
queue_work(im->drv_workq, &itnim_drv->itnim_work);
}
/**
/*
* BFA FCS itnim offline callback.
* Context: Interrupt. bfad_lock is held
*/
@ -519,7 +519,7 @@ bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv)
queue_work(im->drv_workq, &itnim_drv->itnim_work);
}
/**
/*
* Allocate a Scsi_Host for a port.
*/
int
@ -751,7 +751,7 @@ bfad_os_thread_workq(struct bfad_s *bfad)
return BFA_STATUS_OK;
}
/**
/*
* Scsi_Host template entry.
*
* Description:
@ -896,7 +896,7 @@ bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id)
return NULL;
}
/**
/*
* Scsi_Host template entry slave_alloc
*/
static int
@ -973,7 +973,7 @@ bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
sprintf(fc_host_symbolic_name(host), "%s", symname);
fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa);
fc_host_maxframe_size(host) = fcport->cfg.maxfrsize;
fc_host_maxframe_size(host) = fcport->cfg.maxfrsize;
}
static void
@ -1016,7 +1016,7 @@ bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim)
return;
}
/**
/*
* Work queue handler using FC transport service
* Context: kernel
*/
@ -1116,7 +1116,7 @@ bfad_im_itnim_work_handler(struct work_struct *work)
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
}
/**
/*
* Scsi_Host template entry, queue a SCSI command to the BFAD.
*/
static int