@@ -1101,6 +1101,14 @@ struct ftrace_func_entry {
11011101 unsigned long ip ;
11021102};
11031103
1104+ struct ftrace_func_probe {
1105+ struct ftrace_probe_ops * probe_ops ;
1106+ struct ftrace_ops ops ;
1107+ struct trace_array * tr ;
1108+ struct list_head list ;
1109+ int ref ;
1110+ };
1111+
11041112/*
11051113 * We make these constant because no one should touch them,
11061114 * but they are used as the default "empty hash", to avoid allocating
@@ -3054,7 +3062,7 @@ struct ftrace_iterator {
30543062 loff_t func_pos ;
30553063 struct ftrace_page * pg ;
30563064 struct dyn_ftrace * func ;
3057- struct ftrace_probe_ops * probe ;
3065+ struct ftrace_func_probe * probe ;
30583066 struct ftrace_func_entry * probe_entry ;
30593067 struct trace_parser parser ;
30603068 struct ftrace_hash * hash ;
@@ -3088,7 +3096,7 @@ t_probe_next(struct seq_file *m, loff_t *pos)
30883096
30893097 if (!iter -> probe ) {
30903098 next = func_probes -> next ;
3091- iter -> probe = list_entry (next , struct ftrace_probe_ops , list );
3099+ iter -> probe = list_entry (next , struct ftrace_func_probe , list );
30923100 }
30933101
30943102 if (iter -> probe_entry )
@@ -3102,7 +3110,7 @@ t_probe_next(struct seq_file *m, loff_t *pos)
31023110 if (iter -> probe -> list .next == func_probes )
31033111 return NULL ;
31043112 next = iter -> probe -> list .next ;
3105- iter -> probe = list_entry (next , struct ftrace_probe_ops , list );
3113+ iter -> probe = list_entry (next , struct ftrace_func_probe , list );
31063114 hash = iter -> probe -> ops .func_hash -> filter_hash ;
31073115 size = 1 << hash -> size_bits ;
31083116 iter -> pidx = 0 ;
@@ -3166,19 +3174,23 @@ static void *t_probe_start(struct seq_file *m, loff_t *pos)
31663174static int
31673175t_probe_show (struct seq_file * m , struct ftrace_iterator * iter )
31683176{
3169- struct ftrace_probe_ops * probe ;
31703177 struct ftrace_func_entry * probe_entry ;
3178+ struct ftrace_probe_ops * probe_ops ;
3179+ struct ftrace_func_probe * probe ;
31713180
31723181 probe = iter -> probe ;
31733182 probe_entry = iter -> probe_entry ;
31743183
31753184 if (WARN_ON_ONCE (!probe || !probe_entry ))
31763185 return - EIO ;
31773186
3178- if (probe -> print )
3179- return probe -> print (m , probe_entry -> ip , probe , NULL );
3187+ probe_ops = probe -> probe_ops ;
3188+
3189+ if (probe_ops -> print )
3190+ return probe_ops -> print (m , probe_entry -> ip , probe_ops , NULL );
31803191
3181- seq_printf (m , "%ps:%ps\n" , (void * )probe_entry -> ip , (void * )probe -> func );
3192+ seq_printf (m , "%ps:%ps\n" , (void * )probe_entry -> ip ,
3193+ (void * )probe_ops -> func );
31823194
31833195 return 0 ;
31843196}
@@ -3791,17 +3803,18 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
37913803 struct ftrace_ops * op , struct pt_regs * pt_regs )
37923804{
37933805 struct ftrace_probe_ops * probe_ops ;
3794- struct trace_array * tr = op -> private ;
3806+ struct ftrace_func_probe * probe ;
37953807
3796- probe_ops = container_of (op , struct ftrace_probe_ops , ops );
3808+ probe = container_of (op , struct ftrace_func_probe , ops );
3809+ probe_ops = probe -> probe_ops ;
37973810
37983811 /*
37993812 * Disable preemption for these calls to prevent a RCU grace
38003813 * period. This syncs the hash iteration and freeing of items
38013814 * on the hash. rcu_read_lock is too dangerous here.
38023815 */
38033816 preempt_disable_notrace ();
3804- probe_ops -> func (ip , parent_ip , tr , probe_ops , NULL );
3817+ probe_ops -> func (ip , parent_ip , probe -> tr , probe_ops , NULL );
38053818 preempt_enable_notrace ();
38063819}
38073820
@@ -3946,11 +3959,41 @@ void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
39463959 free_ftrace_hash (& mapper -> hash );
39473960}
39483961
3962+ static void release_probe (struct ftrace_func_probe * probe )
3963+ {
3964+ struct ftrace_probe_ops * probe_ops ;
3965+
3966+ mutex_lock (& ftrace_lock );
3967+
3968+ WARN_ON (probe -> ref <= 0 );
3969+
3970+ /* Subtract the ref that was used to protect this instance */
3971+ probe -> ref -- ;
3972+
3973+ if (!probe -> ref ) {
3974+ probe_ops = probe -> probe_ops ;
3975+ list_del (& probe -> list );
3976+ kfree (probe );
3977+ }
3978+ mutex_unlock (& ftrace_lock );
3979+ }
3980+
3981+ static void acquire_probe_locked (struct ftrace_func_probe * probe )
3982+ {
3983+ /*
3984+ * Add one ref to keep it from being freed when releasing the
3985+ * ftrace_lock mutex.
3986+ */
3987+ probe -> ref ++ ;
3988+ }
3989+
39493990int
39503991register_ftrace_function_probe (char * glob , struct trace_array * tr ,
3951- struct ftrace_probe_ops * ops , void * data )
3992+ struct ftrace_probe_ops * probe_ops ,
3993+ void * data )
39523994{
39533995 struct ftrace_func_entry * entry ;
3996+ struct ftrace_func_probe * probe ;
39543997 struct ftrace_hash * * orig_hash ;
39553998 struct ftrace_hash * old_hash ;
39563999 struct ftrace_hash * hash ;
@@ -3966,16 +4009,33 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
39664009 if (WARN_ON (glob [0 ] == '!' ))
39674010 return - EINVAL ;
39684011
3969- if (!(ops -> ops .flags & FTRACE_OPS_FL_INITIALIZED )) {
3970- ops -> ops .func = function_trace_probe_call ;
3971- ftrace_ops_init (& ops -> ops );
3972- INIT_LIST_HEAD (& ops -> list );
3973- ops -> ops .private = tr ;
4012+
4013+ mutex_lock (& ftrace_lock );
4014+ /* Check if the probe_ops is already registered */
4015+ list_for_each_entry (probe , & tr -> func_probes , list ) {
4016+ if (probe -> probe_ops == probe_ops )
4017+ break ;
39744018 }
4019+ if (& probe -> list == & tr -> func_probes ) {
4020+ probe = kzalloc (sizeof (* probe ), GFP_KERNEL );
4021+ if (!probe ) {
4022+ mutex_unlock (& ftrace_lock );
4023+ return - ENOMEM ;
4024+ }
4025+ probe -> probe_ops = probe_ops ;
4026+ probe -> ops .func = function_trace_probe_call ;
4027+ probe -> tr = tr ;
4028+ ftrace_ops_init (& probe -> ops );
4029+ list_add (& probe -> list , & tr -> func_probes );
4030+ }
4031+
4032+ acquire_probe_locked (probe );
39754033
3976- mutex_lock (& ops -> ops .func_hash -> regex_lock );
4034+ mutex_unlock (& ftrace_lock );
4035+
4036+ mutex_lock (& probe -> ops .func_hash -> regex_lock );
39774037
3978- orig_hash = & ops -> ops .func_hash -> filter_hash ;
4038+ orig_hash = & probe -> ops .func_hash -> filter_hash ;
39794039 old_hash = * orig_hash ;
39804040 hash = alloc_and_copy_ftrace_hash (FTRACE_HASH_DEFAULT_BITS , old_hash );
39814041
@@ -3998,8 +4058,9 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
39984058 * for each function we find. We call the callback
39994059 * to give the caller an opportunity to do so.
40004060 */
4001- if (ops -> init ) {
4002- ret = ops -> init (ops , tr , entry -> ip , data );
4061+ if (probe_ops -> init ) {
4062+ ret = probe_ops -> init (probe_ops , tr ,
4063+ entry -> ip , data );
40034064 if (ret < 0 )
40044065 goto out ;
40054066 }
@@ -4009,64 +4070,69 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
40094070
40104071 mutex_lock (& ftrace_lock );
40114072
4012- ret = ftrace_hash_move_and_update_ops (& ops -> ops , orig_hash ,
4013- hash , 1 );
4073+ if (!count ) {
4074+ /* Nothing was added? */
4075+ ret = - EINVAL ;
4076+ goto out_unlock ;
4077+ }
4078+
4079+ ret = ftrace_hash_move_and_update_ops (& probe -> ops , orig_hash ,
4080+ hash , 1 );
40144081 if (ret < 0 )
40154082 goto err_unlock ;
40164083
4017- if ( list_empty ( & ops -> list ))
4018- list_add ( & ops -> list , & tr -> func_probes ) ;
4084+ /* One ref for each new function traced */
4085+ probe -> ref += count ;
40194086
4020- if (!(ops -> ops .flags & FTRACE_OPS_FL_ENABLED ))
4021- ret = ftrace_startup (& ops -> ops , 0 );
4087+ if (!(probe -> ops .flags & FTRACE_OPS_FL_ENABLED ))
4088+ ret = ftrace_startup (& probe -> ops , 0 );
40224089
40234090 out_unlock :
40244091 mutex_unlock (& ftrace_lock );
40254092
40264093 if (!ret )
40274094 ret = count ;
40284095 out :
4029- mutex_unlock (& ops -> ops .func_hash -> regex_lock );
4096+ mutex_unlock (& probe -> ops .func_hash -> regex_lock );
40304097 free_ftrace_hash (hash );
40314098
4099+ release_probe (probe );
4100+
40324101 return ret ;
40334102
40344103 err_unlock :
4035- if (!ops -> free )
4104+ if (!probe_ops -> free || ! count )
40364105 goto out_unlock ;
40374106
40384107 /* Failed to do the move, need to call the free functions */
40394108 for (i = 0 ; i < size ; i ++ ) {
40404109 hlist_for_each_entry (entry , & hash -> buckets [i ], hlist ) {
40414110 if (ftrace_lookup_ip (old_hash , entry -> ip ))
40424111 continue ;
4043- ops -> free (ops , tr , entry -> ip , NULL );
4112+ probe_ops -> free (probe_ops , tr , entry -> ip , NULL );
40444113 }
40454114 }
40464115 goto out_unlock ;
40474116}
40484117
40494118int
4050- unregister_ftrace_function_probe_func (char * glob , struct ftrace_probe_ops * ops )
4119+ unregister_ftrace_function_probe_func (char * glob , struct trace_array * tr ,
4120+ struct ftrace_probe_ops * probe_ops )
40514121{
40524122 struct ftrace_ops_hash old_hash_ops ;
40534123 struct ftrace_func_entry * entry ;
4124+ struct ftrace_func_probe * probe ;
40544125 struct ftrace_glob func_g ;
40554126 struct ftrace_hash * * orig_hash ;
40564127 struct ftrace_hash * old_hash ;
40574128 struct ftrace_hash * hash = NULL ;
40584129 struct hlist_node * tmp ;
40594130 struct hlist_head hhd ;
4060- struct trace_array * tr ;
40614131 char str [KSYM_SYMBOL_LEN ];
4062- int i , ret ;
4132+ int count = 0 ;
4133+ int i , ret = - ENODEV ;
40634134 int size ;
40644135
4065- if (!(ops -> ops .flags & FTRACE_OPS_FL_INITIALIZED ))
4066- return - EINVAL ;
4067-
4068- tr = ops -> ops .private ;
4069-
40704136 if (glob && (strcmp (glob , "*" ) == 0 || !strlen (glob )))
40714137 func_g .search = NULL ;
40724138 else if (glob ) {
@@ -4082,12 +4148,28 @@ unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
40824148 return - EINVAL ;
40834149 }
40844150
4085- mutex_lock (& ops -> ops .func_hash -> regex_lock );
4151+ mutex_lock (& ftrace_lock );
4152+ /* Check if the probe_ops is already registered */
4153+ list_for_each_entry (probe , & tr -> func_probes , list ) {
4154+ if (probe -> probe_ops == probe_ops )
4155+ break ;
4156+ }
4157+ if (& probe -> list == & tr -> func_probes )
4158+ goto err_unlock_ftrace ;
4159+
4160+ ret = - EINVAL ;
4161+ if (!(probe -> ops .flags & FTRACE_OPS_FL_INITIALIZED ))
4162+ goto err_unlock_ftrace ;
4163+
4164+ acquire_probe_locked (probe );
40864165
4087- orig_hash = & ops -> ops .func_hash -> filter_hash ;
4166+ mutex_unlock (& ftrace_lock );
4167+
4168+ mutex_lock (& probe -> ops .func_hash -> regex_lock );
4169+
4170+ orig_hash = & probe -> ops .func_hash -> filter_hash ;
40884171 old_hash = * orig_hash ;
40894172
4090- ret = - EINVAL ;
40914173 if (ftrace_hash_empty (old_hash ))
40924174 goto out_unlock ;
40934175
@@ -4112,46 +4194,54 @@ unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
41124194 if (!ftrace_match (str , & func_g ))
41134195 continue ;
41144196 }
4115-
4197+ count ++ ;
41164198 remove_hash_entry (hash , entry );
41174199 hlist_add_head (& entry -> hlist , & hhd );
41184200 }
41194201 }
41204202
41214203 /* Nothing found? */
4122- if (hlist_empty ( & hhd ) ) {
4204+ if (! count ) {
41234205 ret = - EINVAL ;
41244206 goto out_unlock ;
41254207 }
41264208
41274209 mutex_lock (& ftrace_lock );
41284210
4129- if (ftrace_hash_empty (hash )) {
4130- ftrace_shutdown (& ops -> ops , 0 );
4131- list_del_init (& ops -> list );
4132- }
4211+ WARN_ON (probe -> ref < count );
41334212
4213+ probe -> ref -= count ;
41344214
4135- ret = ftrace_hash_move_and_update_ops (& ops -> ops , orig_hash ,
4215+ if (ftrace_hash_empty (hash ))
4216+ ftrace_shutdown (& probe -> ops , 0 );
4217+
4218+ ret = ftrace_hash_move_and_update_ops (& probe -> ops , orig_hash ,
41364219 hash , 1 );
41374220
41384221 /* still need to update the function call sites */
41394222 if (ftrace_enabled && !ftrace_hash_empty (hash ))
4140- ftrace_run_modify_code (& ops -> ops , FTRACE_UPDATE_CALLS ,
4223+ ftrace_run_modify_code (& probe -> ops , FTRACE_UPDATE_CALLS ,
41414224 & old_hash_ops );
41424225 synchronize_sched ();
41434226
41444227 hlist_for_each_entry_safe (entry , tmp , & hhd , hlist ) {
41454228 hlist_del (& entry -> hlist );
4146- if (ops -> free )
4147- ops -> free (ops , tr , entry -> ip , NULL );
4229+ if (probe_ops -> free )
4230+ probe_ops -> free (probe_ops , tr , entry -> ip , NULL );
41484231 kfree (entry );
41494232 }
41504233 mutex_unlock (& ftrace_lock );
41514234
41524235 out_unlock :
4153- mutex_unlock (& ops -> ops .func_hash -> regex_lock );
4236+ mutex_unlock (& probe -> ops .func_hash -> regex_lock );
41544237 free_ftrace_hash (hash );
4238+
4239+ release_probe (probe );
4240+
4241+ return ret ;
4242+
4243+ err_unlock_ftrace :
4244+ mutex_unlock (& ftrace_lock );
41554245 return ret ;
41564246}
41574247
0 commit comments