72
72
#include <linux/moduleparam.h>
73
73
#include <linux/libata.h>
74
74
#include <linux/hdreg.h>
75
+ #include <linux/reboot.h>
75
76
#include <linux/stringify.h>
76
77
#include <asm/io.h>
77
78
#include <asm/irq.h>
@@ -92,7 +93,6 @@ static unsigned int ipr_max_speed = 1;
92
93
static int ipr_testmode = 0 ;
93
94
static unsigned int ipr_fastfail = 0 ;
94
95
static unsigned int ipr_transop_timeout = 0 ;
95
- static unsigned int ipr_enable_cache = 1 ;
96
96
static unsigned int ipr_debug = 0 ;
97
97
static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS ;
98
98
static unsigned int ipr_dual_ioa_raid = 1 ;
@@ -175,8 +175,6 @@ module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
175
175
MODULE_PARM_DESC (fastfail , "Reduce timeouts and retries" );
176
176
module_param_named (transop_timeout , ipr_transop_timeout , int , 0 );
177
177
MODULE_PARM_DESC (transop_timeout , "Time in seconds to wait for adapter to come operational (default: 300)" );
178
- module_param_named (enable_cache , ipr_enable_cache , int , 0 );
179
- MODULE_PARM_DESC (enable_cache , "Enable adapter's non-volatile write cache (default: 1)" );
180
178
module_param_named (debug , ipr_debug , int , S_IRUGO | S_IWUSR );
181
179
MODULE_PARM_DESC (debug , "Enable device driver debugging logging. Set to 1 to enable. (default: 0)" );
182
180
module_param_named (dual_ioa_raid , ipr_dual_ioa_raid , int , 0 );
@@ -3097,105 +3095,6 @@ static struct bin_attribute ipr_trace_attr = {
3097
3095
};
3098
3096
#endif
3099
3097
3100
- static const struct {
3101
- enum ipr_cache_state state ;
3102
- char * name ;
3103
- } cache_state [] = {
3104
- { CACHE_NONE , "none" },
3105
- { CACHE_DISABLED , "disabled" },
3106
- { CACHE_ENABLED , "enabled" }
3107
- };
3108
-
3109
- /**
3110
- * ipr_show_write_caching - Show the write caching attribute
3111
- * @dev: device struct
3112
- * @buf: buffer
3113
- *
3114
- * Return value:
3115
- * number of bytes printed to buffer
3116
- **/
3117
- static ssize_t ipr_show_write_caching (struct device * dev ,
3118
- struct device_attribute * attr , char * buf )
3119
- {
3120
- struct Scsi_Host * shost = class_to_shost (dev );
3121
- struct ipr_ioa_cfg * ioa_cfg = (struct ipr_ioa_cfg * )shost -> hostdata ;
3122
- unsigned long lock_flags = 0 ;
3123
- int i , len = 0 ;
3124
-
3125
- spin_lock_irqsave (ioa_cfg -> host -> host_lock , lock_flags );
3126
- for (i = 0 ; i < ARRAY_SIZE (cache_state ); i ++ ) {
3127
- if (cache_state [i ].state == ioa_cfg -> cache_state ) {
3128
- len = snprintf (buf , PAGE_SIZE , "%s\n" , cache_state [i ].name );
3129
- break ;
3130
- }
3131
- }
3132
- spin_unlock_irqrestore (ioa_cfg -> host -> host_lock , lock_flags );
3133
- return len ;
3134
- }
3135
-
3136
-
3137
- /**
3138
- * ipr_store_write_caching - Enable/disable adapter write cache
3139
- * @dev: device struct
3140
- * @buf: buffer
3141
- * @count: buffer size
3142
- *
3143
- * This function will enable/disable adapter write cache.
3144
- *
3145
- * Return value:
3146
- * count on success / other on failure
3147
- **/
3148
- static ssize_t ipr_store_write_caching (struct device * dev ,
3149
- struct device_attribute * attr ,
3150
- const char * buf , size_t count )
3151
- {
3152
- struct Scsi_Host * shost = class_to_shost (dev );
3153
- struct ipr_ioa_cfg * ioa_cfg = (struct ipr_ioa_cfg * )shost -> hostdata ;
3154
- unsigned long lock_flags = 0 ;
3155
- enum ipr_cache_state new_state = CACHE_INVALID ;
3156
- int i ;
3157
-
3158
- if (!capable (CAP_SYS_ADMIN ))
3159
- return - EACCES ;
3160
- if (ioa_cfg -> cache_state == CACHE_NONE )
3161
- return - EINVAL ;
3162
-
3163
- for (i = 0 ; i < ARRAY_SIZE (cache_state ); i ++ ) {
3164
- if (!strncmp (cache_state [i ].name , buf , strlen (cache_state [i ].name ))) {
3165
- new_state = cache_state [i ].state ;
3166
- break ;
3167
- }
3168
- }
3169
-
3170
- if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED )
3171
- return - EINVAL ;
3172
-
3173
- spin_lock_irqsave (ioa_cfg -> host -> host_lock , lock_flags );
3174
- if (ioa_cfg -> cache_state == new_state ) {
3175
- spin_unlock_irqrestore (ioa_cfg -> host -> host_lock , lock_flags );
3176
- return count ;
3177
- }
3178
-
3179
- ioa_cfg -> cache_state = new_state ;
3180
- dev_info (& ioa_cfg -> pdev -> dev , "%s adapter write cache.\n" ,
3181
- new_state == CACHE_ENABLED ? "Enabling" : "Disabling" );
3182
- if (!ioa_cfg -> in_reset_reload )
3183
- ipr_initiate_ioa_reset (ioa_cfg , IPR_SHUTDOWN_NORMAL );
3184
- spin_unlock_irqrestore (ioa_cfg -> host -> host_lock , lock_flags );
3185
- wait_event (ioa_cfg -> reset_wait_q , !ioa_cfg -> in_reset_reload );
3186
-
3187
- return count ;
3188
- }
3189
-
3190
- static struct device_attribute ipr_ioa_cache_attr = {
3191
- .attr = {
3192
- .name = "write_cache" ,
3193
- .mode = S_IRUGO | S_IWUSR ,
3194
- },
3195
- .show = ipr_show_write_caching ,
3196
- .store = ipr_store_write_caching
3197
- };
3198
-
3199
3098
/**
3200
3099
* ipr_show_fw_version - Show the firmware version
3201
3100
* @dev: class device struct
@@ -3797,7 +3696,6 @@ static struct device_attribute *ipr_ioa_attrs[] = {
3797
3696
& ipr_ioa_state_attr ,
3798
3697
& ipr_ioa_reset_attr ,
3799
3698
& ipr_update_fw_attr ,
3800
- & ipr_ioa_cache_attr ,
3801
3699
NULL ,
3802
3700
};
3803
3701
@@ -6292,36 +6190,6 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6292
6190
return IPR_RC_JOB_CONTINUE ;
6293
6191
}
6294
6192
6295
- /**
6296
- * ipr_setup_write_cache - Disable write cache if needed
6297
- * @ipr_cmd: ipr command struct
6298
- *
6299
- * This function sets up adapters write cache to desired setting
6300
- *
6301
- * Return value:
6302
- * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6303
- **/
6304
- static int ipr_setup_write_cache (struct ipr_cmnd * ipr_cmd )
6305
- {
6306
- struct ipr_ioa_cfg * ioa_cfg = ipr_cmd -> ioa_cfg ;
6307
-
6308
- ipr_cmd -> job_step = ipr_set_supported_devs ;
6309
- ipr_cmd -> u .res = list_entry (ioa_cfg -> used_res_q .next ,
6310
- struct ipr_resource_entry , queue );
6311
-
6312
- if (ioa_cfg -> cache_state != CACHE_DISABLED )
6313
- return IPR_RC_JOB_CONTINUE ;
6314
-
6315
- ipr_cmd -> ioarcb .res_handle = cpu_to_be32 (IPR_IOA_RES_HANDLE );
6316
- ipr_cmd -> ioarcb .cmd_pkt .request_type = IPR_RQTYPE_IOACMD ;
6317
- ipr_cmd -> ioarcb .cmd_pkt .cdb [0 ] = IPR_IOA_SHUTDOWN ;
6318
- ipr_cmd -> ioarcb .cmd_pkt .cdb [1 ] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL ;
6319
-
6320
- ipr_do_req (ipr_cmd , ipr_reset_ioa_job , ipr_timeout , IPR_INTERNAL_TIMEOUT );
6321
-
6322
- return IPR_RC_JOB_RETURN ;
6323
- }
6324
-
6325
6193
/**
6326
6194
* ipr_get_mode_page - Locate specified mode page
6327
6195
* @mode_pages: mode page buffer
@@ -6522,7 +6390,9 @@ static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6522
6390
ioa_cfg -> vpd_cbs_dma + offsetof(struct ipr_misc_cbs , mode_pages ),
6523
6391
length );
6524
6392
6525
- ipr_cmd -> job_step = ipr_setup_write_cache ;
6393
+ ipr_cmd -> job_step = ipr_set_supported_devs ;
6394
+ ipr_cmd -> u .res = list_entry (ioa_cfg -> used_res_q .next ,
6395
+ struct ipr_resource_entry , queue );
6526
6396
ipr_do_req (ipr_cmd , ipr_reset_ioa_job , ipr_timeout , IPR_INTERNAL_TIMEOUT );
6527
6397
6528
6398
LEAVE ;
@@ -6590,10 +6460,13 @@ static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6590
6460
**/
6591
6461
static int ipr_reset_mode_sense_failed (struct ipr_cmnd * ipr_cmd )
6592
6462
{
6463
+ struct ipr_ioa_cfg * ioa_cfg = ipr_cmd -> ioa_cfg ;
6593
6464
u32 ioasc = be32_to_cpu (ipr_cmd -> ioasa .ioasc );
6594
6465
6595
6466
if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT ) {
6596
- ipr_cmd -> job_step = ipr_setup_write_cache ;
6467
+ ipr_cmd -> job_step = ipr_set_supported_devs ;
6468
+ ipr_cmd -> u .res = list_entry (ioa_cfg -> used_res_q .next ,
6469
+ struct ipr_resource_entry , queue );
6597
6470
return IPR_RC_JOB_CONTINUE ;
6598
6471
}
6599
6472
@@ -6944,13 +6817,9 @@ static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
6944
6817
static int ipr_ioafp_page3_inquiry (struct ipr_cmnd * ipr_cmd )
6945
6818
{
6946
6819
struct ipr_ioa_cfg * ioa_cfg = ipr_cmd -> ioa_cfg ;
6947
- struct ipr_inquiry_page0 * page0 = & ioa_cfg -> vpd_cbs -> page0_data ;
6948
6820
6949
6821
ENTER ;
6950
6822
6951
- if (!ipr_inquiry_page_supported (page0 , 1 ))
6952
- ioa_cfg -> cache_state = CACHE_NONE ;
6953
-
6954
6823
ipr_cmd -> job_step = ipr_ioafp_cap_inquiry ;
6955
6824
6956
6825
ipr_ioafp_inquiry (ipr_cmd , 1 , 3 ,
@@ -8209,10 +8078,6 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8209
8078
init_waitqueue_head (& ioa_cfg -> reset_wait_q );
8210
8079
init_waitqueue_head (& ioa_cfg -> msi_wait_q );
8211
8080
ioa_cfg -> sdt_state = INACTIVE ;
8212
- if (ipr_enable_cache )
8213
- ioa_cfg -> cache_state = CACHE_ENABLED ;
8214
- else
8215
- ioa_cfg -> cache_state = CACHE_DISABLED ;
8216
8081
8217
8082
ipr_initialize_bus_attr (ioa_cfg );
8218
8083
ioa_cfg -> max_devs_supported = ipr_max_devs ;
@@ -8841,6 +8706,61 @@ static struct pci_driver ipr_driver = {
8841
8706
.err_handler = & ipr_err_handler ,
8842
8707
};
8843
8708
8709
+ /**
8710
+ * ipr_halt_done - Shutdown prepare completion
8711
+ *
8712
+ * Return value:
8713
+ * none
8714
+ **/
8715
+ static void ipr_halt_done (struct ipr_cmnd * ipr_cmd )
8716
+ {
8717
+ struct ipr_ioa_cfg * ioa_cfg = ipr_cmd -> ioa_cfg ;
8718
+
8719
+ list_add_tail (& ipr_cmd -> queue , & ioa_cfg -> free_q );
8720
+ }
8721
+
8722
+ /**
8723
+ * ipr_halt - Issue shutdown prepare to all adapters
8724
+ *
8725
+ * Return value:
8726
+ * NOTIFY_OK on success / NOTIFY_DONE on failure
8727
+ **/
8728
+ static int ipr_halt (struct notifier_block * nb , ulong event , void * buf )
8729
+ {
8730
+ struct ipr_cmnd * ipr_cmd ;
8731
+ struct ipr_ioa_cfg * ioa_cfg ;
8732
+ unsigned long flags = 0 ;
8733
+
8734
+ if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF )
8735
+ return NOTIFY_DONE ;
8736
+
8737
+ spin_lock (& ipr_driver_lock );
8738
+
8739
+ list_for_each_entry (ioa_cfg , & ipr_ioa_head , queue ) {
8740
+ spin_lock_irqsave (ioa_cfg -> host -> host_lock , flags );
8741
+ if (!ioa_cfg -> allow_cmds ) {
8742
+ spin_unlock_irqrestore (ioa_cfg -> host -> host_lock , flags );
8743
+ continue ;
8744
+ }
8745
+
8746
+ ipr_cmd = ipr_get_free_ipr_cmnd (ioa_cfg );
8747
+ ipr_cmd -> ioarcb .res_handle = cpu_to_be32 (IPR_IOA_RES_HANDLE );
8748
+ ipr_cmd -> ioarcb .cmd_pkt .request_type = IPR_RQTYPE_IOACMD ;
8749
+ ipr_cmd -> ioarcb .cmd_pkt .cdb [0 ] = IPR_IOA_SHUTDOWN ;
8750
+ ipr_cmd -> ioarcb .cmd_pkt .cdb [1 ] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL ;
8751
+
8752
+ ipr_do_req (ipr_cmd , ipr_halt_done , ipr_timeout , IPR_DEVICE_RESET_TIMEOUT );
8753
+ spin_unlock_irqrestore (ioa_cfg -> host -> host_lock , flags );
8754
+ }
8755
+ spin_unlock (& ipr_driver_lock );
8756
+
8757
+ return NOTIFY_OK ;
8758
+ }
8759
+
8760
+ static struct notifier_block ipr_notifier = {
8761
+ ipr_halt , NULL , 0
8762
+ };
8763
+
8844
8764
/**
8845
8765
* ipr_init - Module entry point
8846
8766
*
@@ -8852,6 +8772,7 @@ static int __init ipr_init(void)
8852
8772
ipr_info ("IBM Power RAID SCSI Device Driver version: %s %s\n" ,
8853
8773
IPR_DRIVER_VERSION , IPR_DRIVER_DATE );
8854
8774
8775
+ register_reboot_notifier (& ipr_notifier );
8855
8776
return pci_register_driver (& ipr_driver );
8856
8777
}
8857
8778
@@ -8865,6 +8786,7 @@ static int __init ipr_init(void)
8865
8786
**/
8866
8787
static void __exit ipr_exit (void )
8867
8788
{
8789
+ unregister_reboot_notifier (& ipr_notifier );
8868
8790
pci_unregister_driver (& ipr_driver );
8869
8791
}
8870
8792
0 commit comments