Skip to content

Commit 7ea0ed2

Browse files
committed
ipmi: Make the message handler easier to use for SMI interfaces
The message handler expected the SMI interface to keep a queue of messages, but that was kind of silly, the queue would be easier to manage in the message handler itself. As part of that, fix the message cleanup to make sure no messages are outstanding when an SMI interface is unregistered. This makes it easier for an SMI interface to unregister, it just has to call ipmi_unregister_smi() first and all processing from the message handler will be cleaned up. Signed-off-by: Corey Minyard <[email protected]>
1 parent 7f4a1c8 commit 7ea0ed2

File tree

1 file changed

+162
-40
lines changed

1 file changed

+162
-40
lines changed

drivers/char/ipmi/ipmi_msghandler.c

+162-40
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,8 @@ static int ipmi_init_msghandler(void);
5656
static void smi_recv_tasklet(unsigned long);
5757
static void handle_new_recv_msgs(ipmi_smi_t intf);
5858
static void need_waiter(ipmi_smi_t intf);
59+
static int handle_one_recv_msg(ipmi_smi_t intf,
60+
struct ipmi_smi_msg *msg);
5961

6062
static int initialized;
6163

@@ -324,6 +326,9 @@ struct ipmi_smi {
324326

325327
struct kref refcount;
326328

329+
/* Set when the interface is being unregistered. */
330+
bool in_shutdown;
331+
327332
/* Used for a list of interfaces. */
328333
struct list_head link;
329334

@@ -382,6 +387,11 @@ struct ipmi_smi {
382387
atomic_t watchdog_pretimeouts_to_deliver;
383388
struct tasklet_struct recv_tasklet;
384389

390+
spinlock_t xmit_msgs_lock;
391+
struct list_head xmit_msgs;
392+
struct ipmi_smi_msg *curr_msg;
393+
struct list_head hp_xmit_msgs;
394+
385395
/*
386396
* The list of command receivers that are registered for commands
387397
* on this interface.
@@ -1488,7 +1498,25 @@ static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
14881498
static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers,
14891499
struct ipmi_smi_msg *smi_msg, int priority)
14901500
{
1491-
handlers->sender(intf->send_info, smi_msg, 0);
1501+
int run_to_completion = intf->run_to_completion;
1502+
unsigned long flags;
1503+
1504+
if (!run_to_completion)
1505+
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1506+
if (intf->curr_msg) {
1507+
if (priority > 0)
1508+
list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
1509+
else
1510+
list_add_tail(&smi_msg->link, &intf->xmit_msgs);
1511+
smi_msg = NULL;
1512+
} else {
1513+
intf->curr_msg = smi_msg;
1514+
}
1515+
if (!run_to_completion)
1516+
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
1517+
1518+
if (smi_msg)
1519+
handlers->sender(intf->send_info, smi_msg, 0);
14921520
}
14931521

14941522
/*
@@ -1515,7 +1543,6 @@ static int i_ipmi_request(ipmi_user_t user,
15151543
struct ipmi_smi_msg *smi_msg;
15161544
struct ipmi_recv_msg *recv_msg;
15171545
unsigned long flags;
1518-
struct ipmi_smi_handlers *handlers;
15191546

15201547

15211548
if (supplied_recv)
@@ -1538,8 +1565,7 @@ static int i_ipmi_request(ipmi_user_t user,
15381565
}
15391566

15401567
rcu_read_lock();
1541-
handlers = intf->handlers;
1542-
if (!handlers) {
1568+
if (intf->in_shutdown) {
15431569
rv = -ENODEV;
15441570
goto out_err;
15451571
}
@@ -1874,7 +1900,7 @@ static int i_ipmi_request(ipmi_user_t user,
18741900
}
18751901
#endif
18761902

1877-
smi_send(intf, handlers, smi_msg, priority);
1903+
smi_send(intf, intf->handlers, smi_msg, priority);
18781904
rcu_read_unlock();
18791905

18801906
return 0;
@@ -2810,6 +2836,9 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
28102836
smi_recv_tasklet,
28112837
(unsigned long) intf);
28122838
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
2839+
spin_lock_init(&intf->xmit_msgs_lock);
2840+
INIT_LIST_HEAD(&intf->xmit_msgs);
2841+
INIT_LIST_HEAD(&intf->hp_xmit_msgs);
28132842
spin_lock_init(&intf->events_lock);
28142843
atomic_set(&intf->event_waiters, 0);
28152844
intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
@@ -2909,12 +2938,50 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
29092938
}
29102939
EXPORT_SYMBOL(ipmi_register_smi);
29112940

2941+
static void deliver_smi_err_response(ipmi_smi_t intf,
2942+
struct ipmi_smi_msg *msg,
2943+
unsigned char err)
2944+
{
2945+
msg->rsp[0] = msg->data[0] | 4;
2946+
msg->rsp[1] = msg->data[1];
2947+
msg->rsp[2] = err;
2948+
msg->rsp_size = 3;
2949+
/* It's an error, so it will never requeue, no need to check return. */
2950+
handle_one_recv_msg(intf, msg);
2951+
}
2952+
29122953
static void cleanup_smi_msgs(ipmi_smi_t intf)
29132954
{
29142955
int i;
29152956
struct seq_table *ent;
2957+
struct ipmi_smi_msg *msg;
2958+
struct list_head *entry;
2959+
struct list_head tmplist;
2960+
2961+
/* Clear out our transmit queues and hold the messages. */
2962+
INIT_LIST_HEAD(&tmplist);
2963+
list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
2964+
list_splice_tail(&intf->xmit_msgs, &tmplist);
2965+
2966+
/* Current message first, to preserve order */
2967+
while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
2968+
/* Wait for the message to clear out. */
2969+
schedule_timeout(1);
2970+
}
29162971

29172972
/* No need for locks, the interface is down. */
2973+
2974+
/*
2975+
* Return errors for all pending messages in queue and in the
2976+
* tables waiting for remote responses.
2977+
*/
2978+
while (!list_empty(&tmplist)) {
2979+
entry = tmplist.next;
2980+
list_del(entry);
2981+
msg = list_entry(entry, struct ipmi_smi_msg, link);
2982+
deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
2983+
}
2984+
29182985
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
29192986
ent = &(intf->seq_table[i]);
29202987
if (!ent->inuse)
@@ -2926,20 +2993,33 @@ static void cleanup_smi_msgs(ipmi_smi_t intf)
29262993
int ipmi_unregister_smi(ipmi_smi_t intf)
29272994
{
29282995
struct ipmi_smi_watcher *w;
2929-
int intf_num = intf->intf_num;
2996+
int intf_num = intf->intf_num;
2997+
ipmi_user_t user;
29302998

29312999
ipmi_bmc_unregister(intf);
29323000

29333001
mutex_lock(&smi_watchers_mutex);
29343002
mutex_lock(&ipmi_interfaces_mutex);
29353003
intf->intf_num = -1;
2936-
intf->handlers = NULL;
3004+
intf->in_shutdown = true;
29373005
list_del_rcu(&intf->link);
29383006
mutex_unlock(&ipmi_interfaces_mutex);
29393007
synchronize_rcu();
29403008

29413009
cleanup_smi_msgs(intf);
29423010

3011+
/* Clean up the effects of users on the lower-level software. */
3012+
mutex_lock(&ipmi_interfaces_mutex);
3013+
rcu_read_lock();
3014+
list_for_each_entry_rcu(user, &intf->users, link) {
3015+
module_put(intf->handlers->owner);
3016+
if (intf->handlers->dec_usecount)
3017+
intf->handlers->dec_usecount(intf->send_info);
3018+
}
3019+
rcu_read_unlock();
3020+
intf->handlers = NULL;
3021+
mutex_unlock(&ipmi_interfaces_mutex);
3022+
29433023
remove_proc_entries(intf);
29443024

29453025
/*
@@ -3029,7 +3109,6 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
30293109
ipmi_user_t user = NULL;
30303110
struct ipmi_ipmb_addr *ipmb_addr;
30313111
struct ipmi_recv_msg *recv_msg;
3032-
struct ipmi_smi_handlers *handlers;
30333112

30343113
if (msg->rsp_size < 10) {
30353114
/* Message not big enough, just ignore it. */
@@ -3083,9 +3162,8 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
30833162
}
30843163
#endif
30853164
rcu_read_lock();
3086-
handlers = intf->handlers;
3087-
if (handlers) {
3088-
smi_send(intf, handlers, msg, 0);
3165+
if (!intf->in_shutdown) {
3166+
smi_send(intf, intf->handlers, msg, 0);
30893167
/*
30903168
* We used the message, so return the value
30913169
* that causes it to not be freed or
@@ -3756,25 +3834,24 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
37563834
while (!list_empty(&intf->waiting_rcv_msgs)) {
37573835
smi_msg = list_entry(intf->waiting_rcv_msgs.next,
37583836
struct ipmi_smi_msg, link);
3759-
list_del(&smi_msg->link);
37603837
if (!run_to_completion)
37613838
spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
37623839
flags);
37633840
rv = handle_one_recv_msg(intf, smi_msg);
37643841
if (!run_to_completion)
37653842
spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
3766-
if (rv == 0) {
3767-
/* Message handled */
3768-
ipmi_free_smi_msg(smi_msg);
3769-
} else if (rv < 0) {
3770-
/* Fatal error on the message, del but don't free. */
3771-
} else {
3843+
if (rv > 0) {
37723844
/*
37733845
* To preserve message order, quit if we
37743846
* can't handle a message.
37753847
*/
3776-
list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
37773848
break;
3849+
} else {
3850+
list_del(&smi_msg->link);
3851+
if (rv == 0)
3852+
/* Message handled */
3853+
ipmi_free_smi_msg(smi_msg);
3854+
/* If rv < 0, fatal error, del but don't free. */
37783855
}
37793856
}
37803857
if (!run_to_completion)
@@ -3799,21 +3876,58 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
37993876

38003877
static void smi_recv_tasklet(unsigned long val)
38013878
{
3802-
handle_new_recv_msgs((ipmi_smi_t) val);
3879+
unsigned long flags = 0; /* keep us warning-free. */
3880+
ipmi_smi_t intf = (ipmi_smi_t) val;
3881+
int run_to_completion = intf->run_to_completion;
3882+
struct ipmi_smi_msg *newmsg = NULL;
3883+
3884+
/*
3885+
* Start the next message if available.
3886+
*
3887+
* Do this here, not in the actual receiver, because we may deadlock
3888+
* because the lower layer is allowed to hold locks while calling
3889+
* message delivery.
3890+
*/
3891+
if (!run_to_completion)
3892+
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
3893+
if (intf->curr_msg == NULL && !intf->in_shutdown) {
3894+
struct list_head *entry = NULL;
3895+
3896+
/* Pick the high priority queue first. */
3897+
if (!list_empty(&intf->hp_xmit_msgs))
3898+
entry = intf->hp_xmit_msgs.next;
3899+
else if (!list_empty(&intf->xmit_msgs))
3900+
entry = intf->xmit_msgs.next;
3901+
3902+
if (entry) {
3903+
list_del(entry);
3904+
newmsg = list_entry(entry, struct ipmi_smi_msg, link);
3905+
intf->curr_msg = newmsg;
3906+
}
3907+
}
3908+
if (!run_to_completion)
3909+
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
3910+
if (newmsg)
3911+
intf->handlers->sender(intf->send_info, newmsg, 0);
3912+
3913+
handle_new_recv_msgs(intf);
38033914
}
38043915

38053916
/* Handle a new message from the lower layer. */
38063917
void ipmi_smi_msg_received(ipmi_smi_t intf,
38073918
struct ipmi_smi_msg *msg)
38083919
{
38093920
unsigned long flags = 0; /* keep us warning-free. */
3810-
int run_to_completion;
3811-
3921+
int run_to_completion = intf->run_to_completion;
38123922

38133923
if ((msg->data_size >= 2)
38143924
&& (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
38153925
&& (msg->data[1] == IPMI_SEND_MSG_CMD)
38163926
&& (msg->user_data == NULL)) {
3927+
3928+
if (intf->in_shutdown)
3929+
goto free_msg;
3930+
38173931
/*
38183932
* This is the local response to a command send, start
38193933
* the timer for these. The user_data will not be
@@ -3849,29 +3963,40 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
38493963
/* The message was sent, start the timer. */
38503964
intf_start_seq_timer(intf, msg->msgid);
38513965

3966+
free_msg:
38523967
ipmi_free_smi_msg(msg);
3853-
goto out;
3968+
} else {
3969+
/*
3970+
* To preserve message order, we keep a queue and deliver from
3971+
* a tasklet.
3972+
*/
3973+
if (!run_to_completion)
3974+
spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
3975+
list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
3976+
if (!run_to_completion)
3977+
spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
3978+
flags);
38543979
}
38553980

3856-
/*
3857-
* To preserve message order, if the list is not empty, we
3858-
* tack this message onto the end of the list.
3859-
*/
3860-
run_to_completion = intf->run_to_completion;
38613981
if (!run_to_completion)
3862-
spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
3863-
list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
3982+
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
3983+
if (msg == intf->curr_msg)
3984+
intf->curr_msg = NULL;
38643985
if (!run_to_completion)
3865-
spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
3986+
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
38663987

3867-
tasklet_schedule(&intf->recv_tasklet);
3868-
out:
3869-
return;
3988+
if (run_to_completion)
3989+
smi_recv_tasklet((unsigned long) intf);
3990+
else
3991+
tasklet_schedule(&intf->recv_tasklet);
38703992
}
38713993
EXPORT_SYMBOL(ipmi_smi_msg_received);
38723994

38733995
void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
38743996
{
3997+
if (intf->in_shutdown)
3998+
return;
3999+
38754000
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
38764001
tasklet_schedule(&intf->recv_tasklet);
38774002
}
@@ -3913,7 +4038,7 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
39134038
struct ipmi_recv_msg *msg;
39144039
struct ipmi_smi_handlers *handlers;
39154040

3916-
if (intf->intf_num == -1)
4041+
if (intf->in_shutdown)
39174042
return;
39184043

39194044
if (!ent->inuse)
@@ -4040,15 +4165,12 @@ static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period)
40404165

40414166
static void ipmi_request_event(ipmi_smi_t intf)
40424167
{
4043-
struct ipmi_smi_handlers *handlers;
4044-
40454168
/* No event requests when in maintenance mode. */
40464169
if (intf->maintenance_mode_enable)
40474170
return;
40484171

4049-
handlers = intf->handlers;
4050-
if (handlers)
4051-
handlers->request_events(intf->send_info);
4172+
if (!intf->in_shutdown)
4173+
intf->handlers->request_events(intf->send_info);
40524174
}
40534175

40544176
static struct timer_list ipmi_timer;

0 commit comments

Comments
 (0)