Neil Horman | 9a8afc8 | 2009-03-11 09:51:26 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Monitoring code for network dropped packet alerts |
| 3 | * |
| 4 | * Copyright (C) 2009 Neil Horman <nhorman@tuxdriver.com> |
| 5 | */ |
| 6 | |
| 7 | #include <linux/netdevice.h> |
| 8 | #include <linux/etherdevice.h> |
| 9 | #include <linux/string.h> |
| 10 | #include <linux/if_arp.h> |
| 11 | #include <linux/inetdevice.h> |
| 12 | #include <linux/inet.h> |
| 13 | #include <linux/interrupt.h> |
| 14 | #include <linux/netpoll.h> |
| 15 | #include <linux/sched.h> |
| 16 | #include <linux/delay.h> |
| 17 | #include <linux/types.h> |
| 18 | #include <linux/workqueue.h> |
| 19 | #include <linux/netlink.h> |
| 20 | #include <linux/net_dropmon.h> |
| 21 | #include <linux/percpu.h> |
| 22 | #include <linux/timer.h> |
| 23 | #include <linux/bitops.h> |
| 24 | #include <net/genetlink.h> |
| 25 | |
| 26 | #include <trace/skb.h> |
| 27 | |
| 28 | #include <asm/unaligned.h> |
| 29 | |
| 30 | #define TRACE_ON 1 |
| 31 | #define TRACE_OFF 0 |
| 32 | |
| 33 | static void send_dm_alert(struct work_struct *unused); |
| 34 | |
| 35 | |
| 36 | /* |
| 37 | * Globals, our netlink socket pointer |
| 38 | * and the work handle that will send up |
| 39 | * netlink alerts |
| 40 | */ |
| 41 | struct sock *dm_sock; |
| 42 | |
| 43 | struct per_cpu_dm_data { |
| 44 | struct work_struct dm_alert_work; |
| 45 | struct sk_buff *skb; |
| 46 | atomic_t dm_hit_count; |
| 47 | struct timer_list send_timer; |
| 48 | }; |
| 49 | |
| 50 | static struct genl_family net_drop_monitor_family = { |
| 51 | .id = GENL_ID_GENERATE, |
| 52 | .hdrsize = 0, |
| 53 | .name = "NET_DM", |
| 54 | .version = 1, |
| 55 | .maxattr = NET_DM_CMD_MAX, |
| 56 | }; |
| 57 | |
| 58 | static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data); |
| 59 | |
| 60 | static int dm_hit_limit = 64; |
| 61 | static int dm_delay = 1; |
| 62 | |
| 63 | |
| 64 | static void reset_per_cpu_data(struct per_cpu_dm_data *data) |
| 65 | { |
| 66 | size_t al; |
| 67 | struct net_dm_alert_msg *msg; |
| 68 | |
| 69 | al = sizeof(struct net_dm_alert_msg); |
| 70 | al += dm_hit_limit * sizeof(struct net_dm_drop_point); |
| 71 | data->skb = genlmsg_new(al, GFP_KERNEL); |
| 72 | genlmsg_put(data->skb, 0, 0, &net_drop_monitor_family, |
| 73 | 0, NET_DM_CMD_ALERT); |
| 74 | msg = __nla_reserve_nohdr(data->skb, sizeof(struct net_dm_alert_msg)); |
| 75 | memset(msg, 0, al); |
| 76 | atomic_set(&data->dm_hit_count, dm_hit_limit); |
| 77 | } |
| 78 | |
| 79 | static void send_dm_alert(struct work_struct *unused) |
| 80 | { |
| 81 | struct sk_buff *skb; |
| 82 | struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); |
| 83 | |
| 84 | /* |
| 85 | * Grab the skb we're about to send |
| 86 | */ |
| 87 | skb = data->skb; |
| 88 | |
| 89 | /* |
| 90 | * Replace it with a new one |
| 91 | */ |
| 92 | reset_per_cpu_data(data); |
| 93 | |
| 94 | /* |
| 95 | * Ship it! |
| 96 | */ |
| 97 | genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL); |
| 98 | |
| 99 | } |
| 100 | |
| 101 | /* |
| 102 | * This is the timer function to delay the sending of an alert |
| 103 | * in the event that more drops will arrive during the |
| 104 | * hysteresis period. Note that it operates under the timer interrupt |
| 105 | * so we don't need to disable preemption here |
| 106 | */ |
| 107 | static void sched_send_work(unsigned long unused) |
| 108 | { |
| 109 | struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); |
| 110 | |
| 111 | schedule_work(&data->dm_alert_work); |
| 112 | } |
| 113 | |
| 114 | static void trace_kfree_skb_hit(struct sk_buff *skb, void *location) |
| 115 | { |
| 116 | struct net_dm_alert_msg *msg; |
| 117 | struct nlmsghdr *nlh; |
| 118 | int i; |
| 119 | struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); |
| 120 | |
| 121 | |
| 122 | if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) { |
| 123 | /* |
| 124 | * we're already at zero, discard this hit |
| 125 | */ |
| 126 | goto out; |
| 127 | } |
| 128 | |
| 129 | nlh = (struct nlmsghdr *)data->skb->data; |
| 130 | msg = genlmsg_data(nlmsg_data(nlh)); |
| 131 | for (i = 0; i < msg->entries; i++) { |
| 132 | if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { |
| 133 | msg->points[i].count++; |
| 134 | goto out; |
| 135 | } |
| 136 | } |
| 137 | |
| 138 | /* |
| 139 | * We need to create a new entry |
| 140 | */ |
| 141 | __nla_reserve_nohdr(data->skb, sizeof(struct net_dm_drop_point)); |
| 142 | memcpy(msg->points[msg->entries].pc, &location, sizeof(void *)); |
| 143 | msg->points[msg->entries].count = 1; |
| 144 | msg->entries++; |
| 145 | |
| 146 | if (!timer_pending(&data->send_timer)) { |
| 147 | data->send_timer.expires = jiffies + dm_delay * HZ; |
| 148 | add_timer_on(&data->send_timer, smp_processor_id()); |
| 149 | } |
| 150 | |
| 151 | out: |
| 152 | return; |
| 153 | } |
| 154 | |
| 155 | static int set_all_monitor_traces(int state) |
| 156 | { |
| 157 | int rc = 0; |
| 158 | |
| 159 | switch (state) { |
| 160 | case TRACE_ON: |
| 161 | rc |= register_trace_kfree_skb(trace_kfree_skb_hit); |
| 162 | break; |
| 163 | case TRACE_OFF: |
| 164 | rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit); |
| 165 | |
| 166 | tracepoint_synchronize_unregister(); |
| 167 | break; |
| 168 | default: |
| 169 | rc = 1; |
| 170 | break; |
| 171 | } |
| 172 | |
| 173 | if (rc) |
| 174 | return -EINPROGRESS; |
| 175 | return rc; |
| 176 | } |
| 177 | |
| 178 | |
| 179 | static int net_dm_cmd_config(struct sk_buff *skb, |
| 180 | struct genl_info *info) |
| 181 | { |
| 182 | return -ENOTSUPP; |
| 183 | } |
| 184 | |
| 185 | static int net_dm_cmd_trace(struct sk_buff *skb, |
| 186 | struct genl_info *info) |
| 187 | { |
| 188 | switch (info->genlhdr->cmd) { |
| 189 | case NET_DM_CMD_START: |
| 190 | return set_all_monitor_traces(TRACE_ON); |
| 191 | break; |
| 192 | case NET_DM_CMD_STOP: |
| 193 | return set_all_monitor_traces(TRACE_OFF); |
| 194 | break; |
| 195 | } |
| 196 | |
| 197 | return -ENOTSUPP; |
| 198 | } |
| 199 | |
| 200 | |
| 201 | static struct genl_ops dropmon_ops[] = { |
| 202 | { |
| 203 | .cmd = NET_DM_CMD_CONFIG, |
| 204 | .doit = net_dm_cmd_config, |
| 205 | }, |
| 206 | { |
| 207 | .cmd = NET_DM_CMD_START, |
| 208 | .doit = net_dm_cmd_trace, |
| 209 | }, |
| 210 | { |
| 211 | .cmd = NET_DM_CMD_STOP, |
| 212 | .doit = net_dm_cmd_trace, |
| 213 | }, |
| 214 | }; |
| 215 | |
| 216 | static int __init init_net_drop_monitor(void) |
| 217 | { |
| 218 | int cpu; |
| 219 | int rc, i, ret; |
| 220 | struct per_cpu_dm_data *data; |
| 221 | printk(KERN_INFO "Initalizing network drop monitor service\n"); |
| 222 | |
| 223 | if (sizeof(void *) > 8) { |
| 224 | printk(KERN_ERR "Unable to store program counters on this arch, Drop monitor failed\n"); |
| 225 | return -ENOSPC; |
| 226 | } |
| 227 | |
| 228 | if (genl_register_family(&net_drop_monitor_family) < 0) { |
| 229 | printk(KERN_ERR "Could not create drop monitor netlink family\n"); |
| 230 | return -EFAULT; |
| 231 | } |
| 232 | |
| 233 | rc = -EFAULT; |
| 234 | |
| 235 | for (i = 0; i < ARRAY_SIZE(dropmon_ops); i++) { |
| 236 | ret = genl_register_ops(&net_drop_monitor_family, |
| 237 | &dropmon_ops[i]); |
| 238 | if (ret) { |
| 239 | printk(KERN_CRIT "failed to register operation %d\n", |
| 240 | dropmon_ops[i].cmd); |
| 241 | goto out_unreg; |
| 242 | } |
| 243 | } |
| 244 | |
| 245 | rc = 0; |
| 246 | |
| 247 | for_each_present_cpu(cpu) { |
| 248 | data = &per_cpu(dm_cpu_data, cpu); |
| 249 | reset_per_cpu_data(data); |
| 250 | INIT_WORK(&data->dm_alert_work, send_dm_alert); |
| 251 | init_timer(&data->send_timer); |
| 252 | data->send_timer.data = cpu; |
| 253 | data->send_timer.function = sched_send_work; |
| 254 | } |
| 255 | goto out; |
| 256 | |
| 257 | out_unreg: |
| 258 | genl_unregister_family(&net_drop_monitor_family); |
| 259 | out: |
| 260 | return rc; |
| 261 | } |
| 262 | |
| 263 | late_initcall(init_net_drop_monitor); |