forked from luck/tmp_suning_uos_patched
de400d6b78
Current IRQ statistics support does not show detail counts for I/O interrupts which are processed internally only. The result is a summation count which is way off such as this one: CPU0 CPU1 CPU2 I/O: 1331 710 442 [...] QAI: 15 16 16 [I/O] QDIO Adapter Interrupt QDI: 1 0 0 [I/O] QDIO Interrupt DAS: 706 645 381 [I/O] DASD C15: 26 10 0 [I/O] 3215 C70: 0 0 0 [I/O] 3270 TAP: 0 0 0 [I/O] Tape VMR: 0 0 0 [I/O] Unit Record Devices LCS: 0 0 0 [I/O] LCS CLW: 0 0 0 [I/O] CLAW CTC: 0 0 0 [I/O] CTC APB: 0 0 0 [I/O] AP Bus Fix this by moving I/O interrupt accounting into the common I/O layer. Signed-off-by: Peter Oberparleiter <peter.oberparleiter@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
155 lines
4.7 KiB
C
155 lines
4.7 KiB
C
#ifndef S390_DEVICE_H
|
|
#define S390_DEVICE_H
|
|
|
|
#include <asm/ccwdev.h>
|
|
#include <linux/atomic.h>
|
|
#include <linux/wait.h>
|
|
#include <linux/notifier.h>
|
|
#include <linux/kernel_stat.h>
|
|
#include "io_sch.h"
|
|
|
|
/*
|
|
* states of the device statemachine
|
|
*/
|
|
enum dev_state {
|
|
DEV_STATE_NOT_OPER,
|
|
DEV_STATE_SENSE_PGID,
|
|
DEV_STATE_SENSE_ID,
|
|
DEV_STATE_OFFLINE,
|
|
DEV_STATE_VERIFY,
|
|
DEV_STATE_ONLINE,
|
|
DEV_STATE_W4SENSE,
|
|
DEV_STATE_DISBAND_PGID,
|
|
DEV_STATE_BOXED,
|
|
/* states to wait for i/o completion before doing something */
|
|
DEV_STATE_TIMEOUT_KILL,
|
|
DEV_STATE_QUIESCE,
|
|
/* special states for devices gone not operational */
|
|
DEV_STATE_DISCONNECTED,
|
|
DEV_STATE_DISCONNECTED_SENSE_ID,
|
|
DEV_STATE_CMFCHANGE,
|
|
DEV_STATE_CMFUPDATE,
|
|
DEV_STATE_STEAL_LOCK,
|
|
/* last element! */
|
|
NR_DEV_STATES
|
|
};
|
|
|
|
/*
|
|
* asynchronous events of the device statemachine
|
|
*/
|
|
enum dev_event {
|
|
DEV_EVENT_NOTOPER,
|
|
DEV_EVENT_INTERRUPT,
|
|
DEV_EVENT_TIMEOUT,
|
|
DEV_EVENT_VERIFY,
|
|
/* last element! */
|
|
NR_DEV_EVENTS
|
|
};
|
|
|
|
struct ccw_device;
|
|
|
|
/*
|
|
* action called through jumptable
|
|
*/
|
|
typedef void (fsm_func_t)(struct ccw_device *, enum dev_event);
|
|
extern fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS];
|
|
|
|
static inline void
|
|
dev_fsm_event(struct ccw_device *cdev, enum dev_event dev_event)
|
|
{
|
|
int state = cdev->private->state;
|
|
|
|
if (dev_event == DEV_EVENT_INTERRUPT) {
|
|
if (state == DEV_STATE_ONLINE)
|
|
kstat_cpu(smp_processor_id()).
|
|
irqs[cdev->private->int_class]++;
|
|
else if (state != DEV_STATE_CMFCHANGE &&
|
|
state != DEV_STATE_CMFUPDATE)
|
|
kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
|
|
}
|
|
dev_jumptable[state][dev_event](cdev, dev_event);
|
|
}
|
|
|
|
/*
|
|
* Delivers 1 if the device state is final.
|
|
*/
|
|
static inline int
|
|
dev_fsm_final_state(struct ccw_device *cdev)
|
|
{
|
|
return (cdev->private->state == DEV_STATE_NOT_OPER ||
|
|
cdev->private->state == DEV_STATE_OFFLINE ||
|
|
cdev->private->state == DEV_STATE_ONLINE ||
|
|
cdev->private->state == DEV_STATE_BOXED);
|
|
}
|
|
|
|
extern wait_queue_head_t ccw_device_init_wq;
|
|
extern atomic_t ccw_device_init_count;
|
|
int __init io_subchannel_init(void);
|
|
|
|
void io_subchannel_recog_done(struct ccw_device *cdev);
|
|
void io_subchannel_init_config(struct subchannel *sch);
|
|
|
|
int ccw_device_cancel_halt_clear(struct ccw_device *);
|
|
|
|
int ccw_device_is_orphan(struct ccw_device *);
|
|
|
|
void ccw_device_recognition(struct ccw_device *);
|
|
int ccw_device_online(struct ccw_device *);
|
|
int ccw_device_offline(struct ccw_device *);
|
|
void ccw_device_update_sense_data(struct ccw_device *);
|
|
int ccw_device_test_sense_data(struct ccw_device *);
|
|
void ccw_device_schedule_sch_unregister(struct ccw_device *);
|
|
int ccw_purge_blacklisted(void);
|
|
void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo);
|
|
|
|
/* Function prototypes for device status and basic sense stuff. */
|
|
void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
|
|
void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
|
|
int ccw_device_accumulate_and_sense(struct ccw_device *, struct irb *);
|
|
int ccw_device_do_sense(struct ccw_device *, struct irb *);
|
|
|
|
/* Function prototype for internal request handling. */
|
|
int lpm_adjust(int lpm, int mask);
|
|
void ccw_request_start(struct ccw_device *);
|
|
int ccw_request_cancel(struct ccw_device *cdev);
|
|
void ccw_request_handler(struct ccw_device *cdev);
|
|
void ccw_request_timeout(struct ccw_device *cdev);
|
|
void ccw_request_notoper(struct ccw_device *cdev);
|
|
|
|
/* Function prototypes for sense id stuff. */
|
|
void ccw_device_sense_id_start(struct ccw_device *);
|
|
void ccw_device_sense_id_done(struct ccw_device *, int);
|
|
|
|
/* Function prototypes for path grouping stuff. */
|
|
void ccw_device_verify_start(struct ccw_device *);
|
|
void ccw_device_verify_done(struct ccw_device *, int);
|
|
|
|
void ccw_device_disband_start(struct ccw_device *);
|
|
void ccw_device_disband_done(struct ccw_device *, int);
|
|
|
|
void ccw_device_stlck_start(struct ccw_device *, void *, void *, void *);
|
|
void ccw_device_stlck_done(struct ccw_device *, void *, int);
|
|
|
|
int ccw_device_call_handler(struct ccw_device *);
|
|
|
|
int ccw_device_stlck(struct ccw_device *);
|
|
|
|
/* Helper function for machine check handling. */
|
|
void ccw_device_trigger_reprobe(struct ccw_device *);
|
|
void ccw_device_kill_io(struct ccw_device *);
|
|
int ccw_device_notify(struct ccw_device *, int);
|
|
void ccw_device_set_disconnected(struct ccw_device *cdev);
|
|
void ccw_device_set_notoper(struct ccw_device *cdev);
|
|
|
|
/* qdio needs this. */
|
|
void ccw_device_set_timeout(struct ccw_device *, int);
|
|
extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
|
|
|
|
/* Channel measurement facility related */
|
|
void retry_set_schib(struct ccw_device *cdev);
|
|
void cmf_retry_copy_block(struct ccw_device *);
|
|
int cmf_reenable(struct ccw_device *);
|
|
int ccw_set_cmf(struct ccw_device *cdev, int enable);
|
|
extern struct device_attribute dev_attr_cmb_enable;
|
|
#endif
|