-
Notifications
You must be signed in to change notification settings - Fork 56
Update to match db semconv #849
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
d730c00
9460684
6f4847d
c23b622
f5fe590
876ed59
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -48,7 +48,7 @@ enum large_buf_action : u8 { | |
| }; | ||
|
|
||
| enum { | ||
| k_dns_max_len = 516, | ||
| k_dns_max_len = 512, // must be a power of 2 | ||
| }; | ||
|
|
||
| #define MAX_SPAN_NAME_LEN 64 | ||
|
|
@@ -246,17 +246,16 @@ typedef struct mongo_go_client_req { | |
|
|
||
| typedef struct dns_req { | ||
| u8 flags; // Must be first we use it to tell what kind of packet we have on the ring buffer | ||
| u8 p_type; | ||
| u8 dns_q; | ||
| u8 _pad1[1]; | ||
| u8 _pad1[2]; | ||
| u32 len; | ||
| connection_info_t conn; | ||
| u16 id; | ||
| u8 _pad2[2]; | ||
| tp_info_t tp; | ||
| u64 ts; | ||
| // we need this to filter traces from unsolicited processes that share the executable | ||
| // with other instrumented processes | ||
| pid_info pid; | ||
| unsigned char buf[k_dns_max_len]; | ||
| u8 _pad3[4]; | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I had to clamp_max a size for the BPF buffer, so I needed a power of 2. I've switched to 512 and I've added a padding. |
||
| } dns_req_t; | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -97,6 +97,36 @@ static __always_inline u8 is_dns(connection_info_t *conn) { | |
| return is_dns_port(conn->s_port) || is_dns_port(conn->d_port); | ||
| } | ||
|
|
||
| static __always_inline void populate_dns_record(dns_req_t *req, | ||
| const pid_connection_info_t *p_conn, | ||
| const u16 orig_dport, | ||
| const u32 size, | ||
| const u8 qr, | ||
| const u16 id, | ||
| const conn_pid_t *conn_pid) { | ||
| __builtin_memcpy(&req->conn, &p_conn->conn, sizeof(connection_info_t)); | ||
|
|
||
| req->flags = EVENT_DNS_REQUEST; | ||
| req->len = size; | ||
| req->dns_q = qr; | ||
| req->id = bpf_ntohs(id); | ||
| req->tp.ts = bpf_ktime_get_ns(); | ||
grcevski marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| req->pid = conn_pid->p_info; | ||
|
|
||
| trace_key_t t_key = {0}; | ||
| trace_key_from_pid_tid_with_p_key(&t_key, &conn_pid->p_key, conn_pid->id); | ||
|
|
||
| const u8 found = find_trace_for_client_request_with_t_key( | ||
| p_conn, orig_dport, &t_key, conn_pid->id, &req->tp); | ||
|
|
||
| bpf_dbg_printk("handle_dns: looking up client trace info, found %d", found); | ||
| if (found) { | ||
| urand_bytes(req->tp.span_id, SPAN_ID_SIZE_BYTES); | ||
| } else { | ||
| init_new_trace(&req->tp); | ||
| } | ||
| } | ||
|
|
||
| static __always_inline u8 handle_dns(struct __sk_buff *skb, | ||
| connection_info_t *conn, | ||
| protocol_info_t *p_info) { | ||
|
|
@@ -158,30 +188,51 @@ static __always_inline u8 handle_dns(struct __sk_buff *skb, | |
| dns_req_t *req = bpf_ringbuf_reserve(&events, sizeof(dns_req_t), 0); | ||
|
|
||
| if (req) { | ||
| __builtin_memcpy(&req->conn, conn, sizeof(connection_info_t)); | ||
|
|
||
| req->flags = EVENT_DNS_REQUEST; | ||
| req->p_type = skb->pkt_type; | ||
| req->len = skb->len; | ||
| req->dns_q = qr; | ||
| req->id = bpf_ntohs(hdr.id); | ||
| req->ts = bpf_ktime_get_ns(); | ||
| req->tp.ts = bpf_ktime_get_ns(); | ||
| req->pid = conn_pid->p_info; | ||
|
|
||
| trace_key_t t_key = {0}; | ||
| trace_key_from_pid_tid_with_p_key(&t_key, &conn_pid->p_key, conn_pid->id); | ||
|
|
||
| const u8 found = find_trace_for_client_request_with_t_key( | ||
| &p_conn, orig_dport, &t_key, conn_pid->id, &req->tp); | ||
|
|
||
| bpf_dbg_printk("handle_dns: looking up client trace info, found %d", found); | ||
| if (found) { | ||
| urand_bytes(req->tp.span_id, SPAN_ID_SIZE_BYTES); | ||
| } else { | ||
| init_new_trace(&req->tp); | ||
| } | ||
| read_skb_bytes(skb, dns_off, req->buf, sizeof(req->buf)); | ||
| u32 len = skb->len - dns_off; | ||
| bpf_clamp_umax(len, 512); | ||
| populate_dns_record(req, &p_conn, orig_dport, len, qr, hdr.id, conn_pid); | ||
|
|
||
| read_skb_bytes(skb, dns_off, req->buf, len); | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This was a nasty bug. We were reading up to the max of req->buf, but read_skb_bytes failed to grab the last chunk of 16 bytes since we weren't specifically reading exactly as we should. This left us with garbage at the end. |
||
| bpf_d_printk("sending dns trace"); | ||
| bpf_ringbuf_submit(req, get_flags()); | ||
| } | ||
|
|
||
| return 1; | ||
| } | ||
|
|
||
| return 0; | ||
| } | ||
|
|
||
| static __always_inline u8 handle_dns_buf(const unsigned char *buf, | ||
| const int size, | ||
| pid_connection_info_t *p_conn, | ||
| u16 orig_dport) { | ||
|
|
||
| if (size < sizeof(struct dnshdr)) { | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I had to add an additional way to read the DNS traffic since if OBI is configured with host network, it may not be able to read some internal pod traffic, in this case it wasn't able to see the docker internal DNS traffic between the docker network and the service itself. I reproduced it in our test examples, with setting In this scenario, the socket_filter doesn't see the traffic, but the If OBI is running in the network of the target process, we'll see the event twice, once from the sock_filter another time by the kprobes. This isn't a problem because we deduplicate the DNS requests in user space and won't let them be emitted twice. |
||
| bpf_d_printk("dns packet too small"); | ||
| return 0; | ||
| } | ||
|
|
||
| struct dnshdr hdr; | ||
| bpf_probe_read_user(&hdr, sizeof(struct dnshdr), buf); | ||
|
|
||
| const u16 flags = bpf_ntohs(hdr.flags); | ||
| const u8 qr = dns_qr(flags); | ||
|
|
||
| bpf_d_printk("QR type: %d", qr); | ||
|
|
||
| if (qr == k_dns_qr_query || qr == k_dns_qr_resp) { | ||
| conn_pid_t *conn_pid = bpf_map_lookup_elem(&sock_pids, &p_conn->conn); | ||
| if (!conn_pid) { | ||
| bpf_d_printk("can't find connection info for dns call"); | ||
| return 0; | ||
| } | ||
|
|
||
| dns_req_t *req = bpf_ringbuf_reserve(&events, sizeof(dns_req_t), 0); | ||
| if (req) { | ||
| populate_dns_record(req, p_conn, orig_dport, size, qr, hdr.id, conn_pid); | ||
|
|
||
| bpf_probe_read(req->buf, sizeof(req->buf), buf); | ||
| bpf_d_printk("sending dns trace"); | ||
| bpf_ringbuf_submit(req, get_flags()); | ||
| } | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -5,6 +5,8 @@ package store | |
|
|
||
| import ( | ||
| "sync" | ||
|
|
||
| "github.com/hashicorp/golang-lru/v2/simplelru" | ||
| ) | ||
|
|
||
| type DNSEntry struct { | ||
|
|
@@ -18,26 +20,37 @@ type InMemory struct { | |
| access sync.RWMutex | ||
| // key: IP address, values: hostname | ||
| // TODO: address scenarios where different hostnames point to a same IP | ||
| entries map[string][]string | ||
| entries *simplelru.LRU[string, []string] | ||
| } | ||
|
|
||
| func NewInMemory() *InMemory { | ||
| return &InMemory{ | ||
| entries: map[string][]string{}, | ||
| func NewInMemory(cacheSize int) (*InMemory, error) { | ||
| cache, err := simplelru.NewLRU[string, []string](cacheSize, nil) | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I changed this implementation to ensure this cache is memory capped. |
||
| if err != nil { | ||
| return nil, err | ||
| } | ||
| return &InMemory{ | ||
| entries: cache, | ||
| }, nil | ||
| } | ||
|
|
||
| func (im *InMemory) Store(entry *DNSEntry) { | ||
| im.access.Lock() | ||
| defer im.access.Unlock() | ||
| for _, ip := range entry.IPs { | ||
| // TODO: store IPv4 also with its IPv6 representation | ||
| im.entries[ip] = []string{entry.HostName} | ||
| im.entries.Add(ip, []string{entry.HostName}) | ||
| } | ||
| } | ||
|
|
||
| func (im *InMemory) StorePair(ip, name string) { | ||
| im.access.Lock() | ||
| defer im.access.Unlock() | ||
| im.entries.Add(ip, []string{name}) | ||
| } | ||
|
|
||
| func (im *InMemory) GetHostnames(ip string) ([]string, error) { | ||
| im.access.RLock() | ||
| defer im.access.RUnlock() | ||
| return im.entries[ip], nil | ||
| r, _ := im.entries.Get(ip) | ||
grcevski marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| return r, nil | ||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I removed an unused field.