@@ -56,6 +56,8 @@ static int ipmi_init_msghandler(void);
56
56
static void smi_recv_tasklet (unsigned long );
57
57
static void handle_new_recv_msgs (ipmi_smi_t intf );
58
58
static void need_waiter (ipmi_smi_t intf );
59
+ static int handle_one_recv_msg (ipmi_smi_t intf ,
60
+ struct ipmi_smi_msg * msg );
59
61
60
62
static int initialized ;
61
63
@@ -324,6 +326,9 @@ struct ipmi_smi {
324
326
325
327
struct kref refcount ;
326
328
329
+ /* Set when the interface is being unregistered. */
330
+ bool in_shutdown ;
331
+
327
332
/* Used for a list of interfaces. */
328
333
struct list_head link ;
329
334
@@ -382,6 +387,11 @@ struct ipmi_smi {
382
387
atomic_t watchdog_pretimeouts_to_deliver ;
383
388
struct tasklet_struct recv_tasklet ;
384
389
390
+ spinlock_t xmit_msgs_lock ;
391
+ struct list_head xmit_msgs ;
392
+ struct ipmi_smi_msg * curr_msg ;
393
+ struct list_head hp_xmit_msgs ;
394
+
385
395
/*
386
396
* The list of command receivers that are registered for commands
387
397
* on this interface.
@@ -1488,7 +1498,25 @@ static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1488
1498
static void smi_send (ipmi_smi_t intf , struct ipmi_smi_handlers * handlers ,
1489
1499
struct ipmi_smi_msg * smi_msg , int priority )
1490
1500
{
1491
- handlers -> sender (intf -> send_info , smi_msg , 0 );
1501
+ int run_to_completion = intf -> run_to_completion ;
1502
+ unsigned long flags ;
1503
+
1504
+ if (!run_to_completion )
1505
+ spin_lock_irqsave (& intf -> xmit_msgs_lock , flags );
1506
+ if (intf -> curr_msg ) {
1507
+ if (priority > 0 )
1508
+ list_add_tail (& smi_msg -> link , & intf -> hp_xmit_msgs );
1509
+ else
1510
+ list_add_tail (& smi_msg -> link , & intf -> xmit_msgs );
1511
+ smi_msg = NULL ;
1512
+ } else {
1513
+ intf -> curr_msg = smi_msg ;
1514
+ }
1515
+ if (!run_to_completion )
1516
+ spin_unlock_irqrestore (& intf -> xmit_msgs_lock , flags );
1517
+
1518
+ if (smi_msg )
1519
+ handlers -> sender (intf -> send_info , smi_msg , 0 );
1492
1520
}
1493
1521
1494
1522
/*
@@ -1515,7 +1543,6 @@ static int i_ipmi_request(ipmi_user_t user,
1515
1543
struct ipmi_smi_msg * smi_msg ;
1516
1544
struct ipmi_recv_msg * recv_msg ;
1517
1545
unsigned long flags ;
1518
- struct ipmi_smi_handlers * handlers ;
1519
1546
1520
1547
1521
1548
if (supplied_recv )
@@ -1538,8 +1565,7 @@ static int i_ipmi_request(ipmi_user_t user,
1538
1565
}
1539
1566
1540
1567
rcu_read_lock ();
1541
- handlers = intf -> handlers ;
1542
- if (!handlers ) {
1568
+ if (intf -> in_shutdown ) {
1543
1569
rv = - ENODEV ;
1544
1570
goto out_err ;
1545
1571
}
@@ -1874,7 +1900,7 @@ static int i_ipmi_request(ipmi_user_t user,
1874
1900
}
1875
1901
#endif
1876
1902
1877
- smi_send (intf , handlers , smi_msg , priority );
1903
+ smi_send (intf , intf -> handlers , smi_msg , priority );
1878
1904
rcu_read_unlock ();
1879
1905
1880
1906
return 0 ;
@@ -2810,6 +2836,9 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2810
2836
smi_recv_tasklet ,
2811
2837
(unsigned long ) intf );
2812
2838
atomic_set (& intf -> watchdog_pretimeouts_to_deliver , 0 );
2839
+ spin_lock_init (& intf -> xmit_msgs_lock );
2840
+ INIT_LIST_HEAD (& intf -> xmit_msgs );
2841
+ INIT_LIST_HEAD (& intf -> hp_xmit_msgs );
2813
2842
spin_lock_init (& intf -> events_lock );
2814
2843
atomic_set (& intf -> event_waiters , 0 );
2815
2844
intf -> ticks_to_req_ev = IPMI_REQUEST_EV_TIME ;
@@ -2909,12 +2938,50 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2909
2938
}
2910
2939
EXPORT_SYMBOL (ipmi_register_smi );
2911
2940
2941
+ static void deliver_smi_err_response (ipmi_smi_t intf ,
2942
+ struct ipmi_smi_msg * msg ,
2943
+ unsigned char err )
2944
+ {
2945
+ msg -> rsp [0 ] = msg -> data [0 ] | 4 ;
2946
+ msg -> rsp [1 ] = msg -> data [1 ];
2947
+ msg -> rsp [2 ] = err ;
2948
+ msg -> rsp_size = 3 ;
2949
+ /* It's an error, so it will never requeue, no need to check return. */
2950
+ handle_one_recv_msg (intf , msg );
2951
+ }
2952
+
2912
2953
static void cleanup_smi_msgs (ipmi_smi_t intf )
2913
2954
{
2914
2955
int i ;
2915
2956
struct seq_table * ent ;
2957
+ struct ipmi_smi_msg * msg ;
2958
+ struct list_head * entry ;
2959
+ struct list_head tmplist ;
2960
+
2961
+ /* Clear out our transmit queues and hold the messages. */
2962
+ INIT_LIST_HEAD (& tmplist );
2963
+ list_splice_tail (& intf -> hp_xmit_msgs , & tmplist );
2964
+ list_splice_tail (& intf -> xmit_msgs , & tmplist );
2965
+
2966
+ /* Current message first, to preserve order */
2967
+ while (intf -> curr_msg && !list_empty (& intf -> waiting_rcv_msgs )) {
2968
+ /* Wait for the message to clear out. */
2969
+ schedule_timeout (1 );
2970
+ }
2916
2971
2917
2972
/* No need for locks, the interface is down. */
2973
+
2974
+ /*
2975
+ * Return errors for all pending messages in queue and in the
2976
+ * tables waiting for remote responses.
2977
+ */
2978
+ while (!list_empty (& tmplist )) {
2979
+ entry = tmplist .next ;
2980
+ list_del (entry );
2981
+ msg = list_entry (entry , struct ipmi_smi_msg , link );
2982
+ deliver_smi_err_response (intf , msg , IPMI_ERR_UNSPECIFIED );
2983
+ }
2984
+
2918
2985
for (i = 0 ; i < IPMI_IPMB_NUM_SEQ ; i ++ ) {
2919
2986
ent = & (intf -> seq_table [i ]);
2920
2987
if (!ent -> inuse )
@@ -2926,20 +2993,33 @@ static void cleanup_smi_msgs(ipmi_smi_t intf)
2926
2993
int ipmi_unregister_smi (ipmi_smi_t intf )
2927
2994
{
2928
2995
struct ipmi_smi_watcher * w ;
2929
- int intf_num = intf -> intf_num ;
2996
+ int intf_num = intf -> intf_num ;
2997
+ ipmi_user_t user ;
2930
2998
2931
2999
ipmi_bmc_unregister (intf );
2932
3000
2933
3001
mutex_lock (& smi_watchers_mutex );
2934
3002
mutex_lock (& ipmi_interfaces_mutex );
2935
3003
intf -> intf_num = -1 ;
2936
- intf -> handlers = NULL ;
3004
+ intf -> in_shutdown = true ;
2937
3005
list_del_rcu (& intf -> link );
2938
3006
mutex_unlock (& ipmi_interfaces_mutex );
2939
3007
synchronize_rcu ();
2940
3008
2941
3009
cleanup_smi_msgs (intf );
2942
3010
3011
+ /* Clean up the effects of users on the lower-level software. */
3012
+ mutex_lock (& ipmi_interfaces_mutex );
3013
+ rcu_read_lock ();
3014
+ list_for_each_entry_rcu (user , & intf -> users , link ) {
3015
+ module_put (intf -> handlers -> owner );
3016
+ if (intf -> handlers -> dec_usecount )
3017
+ intf -> handlers -> dec_usecount (intf -> send_info );
3018
+ }
3019
+ rcu_read_unlock ();
3020
+ intf -> handlers = NULL ;
3021
+ mutex_unlock (& ipmi_interfaces_mutex );
3022
+
2943
3023
remove_proc_entries (intf );
2944
3024
2945
3025
/*
@@ -3029,7 +3109,6 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
3029
3109
ipmi_user_t user = NULL ;
3030
3110
struct ipmi_ipmb_addr * ipmb_addr ;
3031
3111
struct ipmi_recv_msg * recv_msg ;
3032
- struct ipmi_smi_handlers * handlers ;
3033
3112
3034
3113
if (msg -> rsp_size < 10 ) {
3035
3114
/* Message not big enough, just ignore it. */
@@ -3083,9 +3162,8 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
3083
3162
}
3084
3163
#endif
3085
3164
rcu_read_lock ();
3086
- handlers = intf -> handlers ;
3087
- if (handlers ) {
3088
- smi_send (intf , handlers , msg , 0 );
3165
+ if (!intf -> in_shutdown ) {
3166
+ smi_send (intf , intf -> handlers , msg , 0 );
3089
3167
/*
3090
3168
* We used the message, so return the value
3091
3169
* that causes it to not be freed or
@@ -3756,25 +3834,24 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
3756
3834
while (!list_empty (& intf -> waiting_rcv_msgs )) {
3757
3835
smi_msg = list_entry (intf -> waiting_rcv_msgs .next ,
3758
3836
struct ipmi_smi_msg , link );
3759
- list_del (& smi_msg -> link );
3760
3837
if (!run_to_completion )
3761
3838
spin_unlock_irqrestore (& intf -> waiting_rcv_msgs_lock ,
3762
3839
flags );
3763
3840
rv = handle_one_recv_msg (intf , smi_msg );
3764
3841
if (!run_to_completion )
3765
3842
spin_lock_irqsave (& intf -> waiting_rcv_msgs_lock , flags );
3766
- if (rv == 0 ) {
3767
- /* Message handled */
3768
- ipmi_free_smi_msg (smi_msg );
3769
- } else if (rv < 0 ) {
3770
- /* Fatal error on the message, del but don't free. */
3771
- } else {
3843
+ if (rv > 0 ) {
3772
3844
/*
3773
3845
* To preserve message order, quit if we
3774
3846
* can't handle a message.
3775
3847
*/
3776
- list_add (& smi_msg -> link , & intf -> waiting_rcv_msgs );
3777
3848
break ;
3849
+ } else {
3850
+ list_del (& smi_msg -> link );
3851
+ if (rv == 0 )
3852
+ /* Message handled */
3853
+ ipmi_free_smi_msg (smi_msg );
3854
+ /* If rv < 0, fatal error, del but don't free. */
3778
3855
}
3779
3856
}
3780
3857
if (!run_to_completion )
@@ -3799,21 +3876,58 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
3799
3876
3800
3877
static void smi_recv_tasklet (unsigned long val )
3801
3878
{
3802
- handle_new_recv_msgs ((ipmi_smi_t ) val );
3879
+ unsigned long flags = 0 ; /* keep us warning-free. */
3880
+ ipmi_smi_t intf = (ipmi_smi_t ) val ;
3881
+ int run_to_completion = intf -> run_to_completion ;
3882
+ struct ipmi_smi_msg * newmsg = NULL ;
3883
+
3884
+ /*
3885
+ * Start the next message if available.
3886
+ *
3887
+ * Do this here, not in the actual receiver, because we may deadlock
3888
+ * because the lower layer is allowed to hold locks while calling
3889
+ * message delivery.
3890
+ */
3891
+ if (!run_to_completion )
3892
+ spin_lock_irqsave (& intf -> xmit_msgs_lock , flags );
3893
+ if (intf -> curr_msg == NULL && !intf -> in_shutdown ) {
3894
+ struct list_head * entry = NULL ;
3895
+
3896
+ /* Pick the high priority queue first. */
3897
+ if (!list_empty (& intf -> hp_xmit_msgs ))
3898
+ entry = intf -> hp_xmit_msgs .next ;
3899
+ else if (!list_empty (& intf -> xmit_msgs ))
3900
+ entry = intf -> xmit_msgs .next ;
3901
+
3902
+ if (entry ) {
3903
+ list_del (entry );
3904
+ newmsg = list_entry (entry , struct ipmi_smi_msg , link );
3905
+ intf -> curr_msg = newmsg ;
3906
+ }
3907
+ }
3908
+ if (!run_to_completion )
3909
+ spin_unlock_irqrestore (& intf -> xmit_msgs_lock , flags );
3910
+ if (newmsg )
3911
+ intf -> handlers -> sender (intf -> send_info , newmsg , 0 );
3912
+
3913
+ handle_new_recv_msgs (intf );
3803
3914
}
3804
3915
3805
3916
/* Handle a new message from the lower layer. */
3806
3917
void ipmi_smi_msg_received (ipmi_smi_t intf ,
3807
3918
struct ipmi_smi_msg * msg )
3808
3919
{
3809
3920
unsigned long flags = 0 ; /* keep us warning-free. */
3810
- int run_to_completion ;
3811
-
3921
+ int run_to_completion = intf -> run_to_completion ;
3812
3922
3813
3923
if ((msg -> data_size >= 2 )
3814
3924
&& (msg -> data [0 ] == (IPMI_NETFN_APP_REQUEST << 2 ))
3815
3925
&& (msg -> data [1 ] == IPMI_SEND_MSG_CMD )
3816
3926
&& (msg -> user_data == NULL )) {
3927
+
3928
+ if (intf -> in_shutdown )
3929
+ goto free_msg ;
3930
+
3817
3931
/*
3818
3932
* This is the local response to a command send, start
3819
3933
* the timer for these. The user_data will not be
@@ -3849,29 +3963,40 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
3849
3963
/* The message was sent, start the timer. */
3850
3964
intf_start_seq_timer (intf , msg -> msgid );
3851
3965
3966
+ free_msg :
3852
3967
ipmi_free_smi_msg (msg );
3853
- goto out ;
3968
+ } else {
3969
+ /*
3970
+ * To preserve message order, we keep a queue and deliver from
3971
+ * a tasklet.
3972
+ */
3973
+ if (!run_to_completion )
3974
+ spin_lock_irqsave (& intf -> waiting_rcv_msgs_lock , flags );
3975
+ list_add_tail (& msg -> link , & intf -> waiting_rcv_msgs );
3976
+ if (!run_to_completion )
3977
+ spin_unlock_irqrestore (& intf -> waiting_rcv_msgs_lock ,
3978
+ flags );
3854
3979
}
3855
3980
3856
- /*
3857
- * To preserve message order, if the list is not empty, we
3858
- * tack this message onto the end of the list.
3859
- */
3860
- run_to_completion = intf -> run_to_completion ;
3861
3981
if (!run_to_completion )
3862
- spin_lock_irqsave (& intf -> waiting_rcv_msgs_lock , flags );
3863
- list_add_tail (& msg -> link , & intf -> waiting_rcv_msgs );
3982
+ spin_lock_irqsave (& intf -> xmit_msgs_lock , flags );
3983
+ if (msg == intf -> curr_msg )
3984
+ intf -> curr_msg = NULL ;
3864
3985
if (!run_to_completion )
3865
- spin_unlock_irqrestore (& intf -> waiting_rcv_msgs_lock , flags );
3986
+ spin_unlock_irqrestore (& intf -> xmit_msgs_lock , flags );
3866
3987
3867
- tasklet_schedule (& intf -> recv_tasklet );
3868
- out :
3869
- return ;
3988
+ if (run_to_completion )
3989
+ smi_recv_tasklet ((unsigned long ) intf );
3990
+ else
3991
+ tasklet_schedule (& intf -> recv_tasklet );
3870
3992
}
3871
3993
EXPORT_SYMBOL (ipmi_smi_msg_received );
3872
3994
3873
3995
void ipmi_smi_watchdog_pretimeout (ipmi_smi_t intf )
3874
3996
{
3997
+ if (intf -> in_shutdown )
3998
+ return ;
3999
+
3875
4000
atomic_set (& intf -> watchdog_pretimeouts_to_deliver , 1 );
3876
4001
tasklet_schedule (& intf -> recv_tasklet );
3877
4002
}
@@ -3913,7 +4038,7 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3913
4038
struct ipmi_recv_msg * msg ;
3914
4039
struct ipmi_smi_handlers * handlers ;
3915
4040
3916
- if (intf -> intf_num == -1 )
4041
+ if (intf -> in_shutdown )
3917
4042
return ;
3918
4043
3919
4044
if (!ent -> inuse )
@@ -4040,15 +4165,12 @@ static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period)
4040
4165
4041
4166
static void ipmi_request_event (ipmi_smi_t intf )
4042
4167
{
4043
- struct ipmi_smi_handlers * handlers ;
4044
-
4045
4168
/* No event requests when in maintenance mode. */
4046
4169
if (intf -> maintenance_mode_enable )
4047
4170
return ;
4048
4171
4049
- handlers = intf -> handlers ;
4050
- if (handlers )
4051
- handlers -> request_events (intf -> send_info );
4172
+ if (!intf -> in_shutdown )
4173
+ intf -> handlers -> request_events (intf -> send_info );
4052
4174
}
4053
4175
4054
4176
static struct timer_list ipmi_timer ;
0 commit comments