@@ -804,7 +804,6 @@ struct hci_conn_params {
804
804
extern struct list_head hci_dev_list ;
805
805
extern struct list_head hci_cb_list ;
806
806
extern rwlock_t hci_dev_list_lock ;
807
- extern struct mutex hci_cb_list_lock ;
808
807
809
808
#define hci_dev_set_flag (hdev , nr ) set_bit((nr), (hdev)->dev_flags)
810
809
#define hci_dev_clear_flag (hdev , nr ) clear_bit((nr), (hdev)->dev_flags)
@@ -2017,68 +2016,103 @@ struct hci_cb {
2017
2016
2018
2017
char * name ;
2019
2018
2019
+ bool (* match ) (struct hci_conn * conn );
2020
2020
void (* connect_cfm ) (struct hci_conn * conn , __u8 status );
2021
2021
void (* disconn_cfm ) (struct hci_conn * conn , __u8 status );
2022
2022
void (* security_cfm ) (struct hci_conn * conn , __u8 status ,
2023
- __u8 encrypt );
2023
+ __u8 encrypt );
2024
2024
void (* key_change_cfm ) (struct hci_conn * conn , __u8 status );
2025
2025
void (* role_switch_cfm ) (struct hci_conn * conn , __u8 status , __u8 role );
2026
2026
};
2027
2027
2028
+ static inline void hci_cb_lookup (struct hci_conn * conn , struct list_head * list )
2029
+ {
2030
+ struct hci_cb * cb , * cpy ;
2031
+
2032
+ rcu_read_lock ();
2033
+ list_for_each_entry_rcu (cb , & hci_cb_list , list ) {
2034
+ if (cb -> match && cb -> match (conn )) {
2035
+ cpy = kmalloc (sizeof (* cpy ), GFP_ATOMIC );
2036
+ if (!cpy )
2037
+ break ;
2038
+
2039
+ * cpy = * cb ;
2040
+ INIT_LIST_HEAD (& cpy -> list );
2041
+ list_add_rcu (& cpy -> list , list );
2042
+ }
2043
+ }
2044
+ rcu_read_unlock ();
2045
+ }
2046
+
2028
2047
static inline void hci_connect_cfm (struct hci_conn * conn , __u8 status )
2029
2048
{
2030
- struct hci_cb * cb ;
2049
+ struct list_head list ;
2050
+ struct hci_cb * cb , * tmp ;
2051
+
2052
+ INIT_LIST_HEAD (& list );
2053
+ hci_cb_lookup (conn , & list );
2031
2054
2032
- mutex_lock (& hci_cb_list_lock );
2033
- list_for_each_entry (cb , & hci_cb_list , list ) {
2055
+ list_for_each_entry_safe (cb , tmp , & list , list ) {
2034
2056
if (cb -> connect_cfm )
2035
2057
cb -> connect_cfm (conn , status );
2058
+ kfree (cb );
2036
2059
}
2037
- mutex_unlock (& hci_cb_list_lock );
2038
2060
2039
2061
if (conn -> connect_cfm_cb )
2040
2062
conn -> connect_cfm_cb (conn , status );
2041
2063
}
2042
2064
2043
2065
static inline void hci_disconn_cfm (struct hci_conn * conn , __u8 reason )
2044
2066
{
2045
- struct hci_cb * cb ;
2067
+ struct list_head list ;
2068
+ struct hci_cb * cb , * tmp ;
2069
+
2070
+ INIT_LIST_HEAD (& list );
2071
+ hci_cb_lookup (conn , & list );
2046
2072
2047
- mutex_lock (& hci_cb_list_lock );
2048
- list_for_each_entry (cb , & hci_cb_list , list ) {
2073
+ list_for_each_entry_safe (cb , tmp , & list , list ) {
2049
2074
if (cb -> disconn_cfm )
2050
2075
cb -> disconn_cfm (conn , reason );
2076
+ kfree (cb );
2051
2077
}
2052
- mutex_unlock (& hci_cb_list_lock );
2053
2078
2054
2079
if (conn -> disconn_cfm_cb )
2055
2080
conn -> disconn_cfm_cb (conn , reason );
2056
2081
}
2057
2082
2058
- static inline void hci_auth_cfm (struct hci_conn * conn , __u8 status )
2083
+ static inline void hci_security_cfm (struct hci_conn * conn , __u8 status ,
2084
+ __u8 encrypt )
2059
2085
{
2060
- struct hci_cb * cb ;
2061
- __u8 encrypt ;
2062
-
2063
- if (test_bit (HCI_CONN_ENCRYPT_PEND , & conn -> flags ))
2064
- return ;
2086
+ struct list_head list ;
2087
+ struct hci_cb * cb , * tmp ;
2065
2088
2066
- encrypt = test_bit (HCI_CONN_ENCRYPT , & conn -> flags ) ? 0x01 : 0x00 ;
2089
+ INIT_LIST_HEAD (& list );
2090
+ hci_cb_lookup (conn , & list );
2067
2091
2068
- mutex_lock (& hci_cb_list_lock );
2069
- list_for_each_entry (cb , & hci_cb_list , list ) {
2092
+ list_for_each_entry_safe (cb , tmp , & list , list ) {
2070
2093
if (cb -> security_cfm )
2071
2094
cb -> security_cfm (conn , status , encrypt );
2095
+ kfree (cb );
2072
2096
}
2073
- mutex_unlock (& hci_cb_list_lock );
2074
2097
2075
2098
if (conn -> security_cfm_cb )
2076
2099
conn -> security_cfm_cb (conn , status );
2077
2100
}
2078
2101
2102
+ static inline void hci_auth_cfm (struct hci_conn * conn , __u8 status )
2103
+ {
2104
+ __u8 encrypt ;
2105
+
2106
+ if (test_bit (HCI_CONN_ENCRYPT_PEND , & conn -> flags ))
2107
+ return ;
2108
+
2109
+ encrypt = test_bit (HCI_CONN_ENCRYPT , & conn -> flags ) ? 0x01 : 0x00 ;
2110
+
2111
+ hci_security_cfm (conn , status , encrypt );
2112
+ }
2113
+
2079
2114
static inline void hci_encrypt_cfm (struct hci_conn * conn , __u8 status )
2080
2115
{
2081
- struct hci_cb * cb ;
2082
2116
__u8 encrypt ;
2083
2117
2084
2118
if (conn -> state == BT_CONFIG ) {
@@ -2105,40 +2139,38 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status)
2105
2139
conn -> sec_level = conn -> pending_sec_level ;
2106
2140
}
2107
2141
2108
- mutex_lock (& hci_cb_list_lock );
2109
- list_for_each_entry (cb , & hci_cb_list , list ) {
2110
- if (cb -> security_cfm )
2111
- cb -> security_cfm (conn , status , encrypt );
2112
- }
2113
- mutex_unlock (& hci_cb_list_lock );
2114
-
2115
- if (conn -> security_cfm_cb )
2116
- conn -> security_cfm_cb (conn , status );
2142
+ hci_security_cfm (conn , status , encrypt );
2117
2143
}
2118
2144
2119
2145
static inline void hci_key_change_cfm (struct hci_conn * conn , __u8 status )
2120
2146
{
2121
- struct hci_cb * cb ;
2147
+ struct list_head list ;
2148
+ struct hci_cb * cb , * tmp ;
2149
+
2150
+ INIT_LIST_HEAD (& list );
2151
+ hci_cb_lookup (conn , & list );
2122
2152
2123
- mutex_lock (& hci_cb_list_lock );
2124
- list_for_each_entry (cb , & hci_cb_list , list ) {
2153
+ list_for_each_entry_safe (cb , tmp , & list , list ) {
2125
2154
if (cb -> key_change_cfm )
2126
2155
cb -> key_change_cfm (conn , status );
2156
+ kfree (cb );
2127
2157
}
2128
- mutex_unlock (& hci_cb_list_lock );
2129
2158
}
2130
2159
2131
2160
static inline void hci_role_switch_cfm (struct hci_conn * conn , __u8 status ,
2132
2161
__u8 role )
2133
2162
{
2134
- struct hci_cb * cb ;
2163
+ struct list_head list ;
2164
+ struct hci_cb * cb , * tmp ;
2165
+
2166
+ INIT_LIST_HEAD (& list );
2167
+ hci_cb_lookup (conn , & list );
2135
2168
2136
- mutex_lock (& hci_cb_list_lock );
2137
- list_for_each_entry (cb , & hci_cb_list , list ) {
2169
+ list_for_each_entry_safe (cb , tmp , & list , list ) {
2138
2170
if (cb -> role_switch_cfm )
2139
2171
cb -> role_switch_cfm (conn , status , role );
2172
+ kfree (cb );
2140
2173
}
2141
- mutex_unlock (& hci_cb_list_lock );
2142
2174
}
2143
2175
2144
2176
static inline bool hci_bdaddr_is_rpa (bdaddr_t * bdaddr , u8 addr_type )
0 commit comments