Remove clock_sync field from net message

As we freeze the format let's remove this old experiment to try and make
it easier to line up traces from different mounts.   It never worked
particularly well and I think it could be argued that trying to merge
trace logs on different machines isn't a particularly meaningful thing
to do.   You care about how they interact not what they were doing at
the same time with their indepdendent resources.

Signed-off-by: Zach Brown <zab@versity.com>
This commit is contained in:
Zach Brown
2021-10-25 14:45:01 -07:00
parent 80ee2c6d57
commit 20ac2e35fa
5 changed files with 0 additions and 66 deletions

View File

@@ -970,7 +970,6 @@ struct scoutfs_net_greeting {
* response messages.
*/
struct scoutfs_net_header {
__le64 clock_sync_id;
__le64 seq;
__le64 recv_seq;
__le64 id;

View File

@@ -629,8 +629,6 @@ static void scoutfs_net_recv_worker(struct work_struct *work)
break;
}
trace_scoutfs_recv_clock_sync(nh.clock_sync_id);
data_len = le16_to_cpu(nh.data_len);
scoutfs_inc_counter(sb, net_recv_messages);
@@ -785,9 +783,6 @@ static void scoutfs_net_send_worker(struct work_struct *work)
trace_scoutfs_net_send_message(sb, &conn->sockname,
&conn->peername, &msend->nh);
msend->nh.clock_sync_id = scoutfs_clock_sync_id();
trace_scoutfs_send_clock_sync(msend->nh.clock_sync_id);
ret = sendmsg_full(conn->sock, &msend->nh, len);
spin_lock(&conn->lock);

View File

@@ -1954,32 +1954,6 @@ TRACE_EVENT(scoutfs_quorum_loop,
__entry->timeout_sec, __entry->timeout_nsec)
);
/*
* We can emit trace events to make it easier to synchronize the
* monotonic clocks in trace logs between nodes. By looking at the send
* and recv times of many messages flowing between nodes we can get
* surprisingly good estimates of the clock offset between them.
*/
DECLARE_EVENT_CLASS(scoutfs_clock_sync_class,
TP_PROTO(__le64 clock_sync_id),
TP_ARGS(clock_sync_id),
TP_STRUCT__entry(
__field(__u64, clock_sync_id)
),
TP_fast_assign(
__entry->clock_sync_id = le64_to_cpu(clock_sync_id);
),
TP_printk("clock_sync_id %016llx", __entry->clock_sync_id)
);
DEFINE_EVENT(scoutfs_clock_sync_class, scoutfs_send_clock_sync,
TP_PROTO(__le64 clock_sync_id),
TP_ARGS(clock_sync_id)
);
DEFINE_EVENT(scoutfs_clock_sync_class, scoutfs_recv_clock_sync,
TP_PROTO(__le64 clock_sync_id),
TP_ARGS(clock_sync_id)
);
TRACE_EVENT(scoutfs_trans_seq_last,
TP_PROTO(struct super_block *sb, u64 rid, u64 trans_seq),

View File

@@ -20,7 +20,6 @@
#include <linux/statfs.h>
#include <linux/sched.h>
#include <linux/debugfs.h>
#include <linux/percpu.h>
#include "super.h"
#include "block.h"
@@ -52,37 +51,6 @@
static struct dentry *scoutfs_debugfs_root;
static DEFINE_PER_CPU(u64, clock_sync_ids) = 0;
/*
* Give the caller a unique clock sync id for a message they're about to
* send. We make the ids reasonably globally unique by using randomly
* initialized per-cpu 64bit counters.
*/
__le64 scoutfs_clock_sync_id(void)
{
u64 rnd = 0;
u64 ret;
u64 *id;
retry:
preempt_disable();
id = this_cpu_ptr(&clock_sync_ids);
if (*id == 0) {
if (rnd == 0) {
preempt_enable();
get_random_bytes(&rnd, sizeof(rnd));
goto retry;
}
*id = rnd;
}
ret = ++(*id);
preempt_enable();
return cpu_to_le64(ret);
}
/* the statfs file fields can be small (and signed?) :/ */
static __statfs_word saturate_truncated_word(u64 files)
{

View File

@@ -160,6 +160,4 @@ int scoutfs_write_super(struct super_block *sb,
/* to keep this out of the ioctl.h public interface definition */
long scoutfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
__le64 scoutfs_clock_sync_id(void);
#endif