# This is a BitKeeper patch. What follows are the unified diffs for the # set of deltas contained in the patch. The rest of the patch, the part # that BitKeeper cares about, is below these diffs. # User: peda # Host: sectra.se # Root: D:/Projekt/NTP/bk/ntp-dev-perf # #--- 1.13/ports/winnt/ntpd/nt_clockstuff.c Wed Apr 16 15:18:16 2003 #+++ 1.14/ports/winnt/ntpd/nt_clockstuff.c Thu Oct 16 09:46:24 2003 #@@ -299,6 +299,179 @@ # return 0; # } # #+#define PERF_FILTER_SIZE (2048) #+static LONGLONG perf_filter[PERF_FILTER_SIZE]; #+static int perf_oldest = -1; #+static int perf_newest = -1; #+static int perf_full = 0; #+static LONGLONG perf_sum = 0; #+static LONGLONG perf_offset = 0; #+static LONGLONG perf_offset_index = -1; #+static LONGLONG perf_offset_backup = 0; #+static LONGLONG perf_offset_backup_index = -1; #+static LONGLONG perf_count; #+ #+static void #+perf_reset(LONGLONG perf_count_now) #+{ #+ perf_oldest = -1; #+ perf_newest = 0; #+ perf_full = 0; #+ perf_count = perf_count_now; #+ perf_sum = 0; #+ perf_offset_index = perf_newest; #+ perf_offset = 0; #+ perf_offset_backup_index = -1; #+} #+ #+static void #+perf_inc(int *perf_index) #+{ #+ *perf_index = (*perf_index + 1) % PERF_FILTER_SIZE; #+} #+ #+static LONGLONG #+perf_insert(LONGLONG diff) #+{ #+ LONGLONG sum; #+ LONGLONG prev_sum = perf_sum; #+ #+ perf_inc(&perf_newest); #+ if (perf_newest == perf_oldest) { #+ perf_sum -= perf_filter[perf_oldest]; #+ perf_inc(&perf_oldest); #+ perf_full = 1; #+ } #+ perf_count += diff; #+ perf_filter[perf_newest] = diff; #+ perf_sum += diff; #+ #+ if (!perf_full) { #+ int count; #+ #+ if (perf_oldest == -1) { #+ /* first call to perf_insert */ #+ perf_oldest = perf_newest; #+ return perf_count; #+ } #+ #+ count = (perf_newest - perf_oldest + PERF_FILTER_SIZE) #+ % PERF_FILTER_SIZE + 1; #+ #+ sum = (perf_sum * PERF_FILTER_SIZE + count / 2) #+ / count; #+ prev_sum = (prev_sum * PERF_FILTER_SIZE + (count - 1) / 2) #+ / (count - 1); #+ #+ if (count == 2) #+ /* Init perf_offset so that it later becomes zero #+ * and that this sample therefore is used as a base. #+ */ #+ perf_offset = sum - diff * PERF_FILTER_SIZE; #+ } #+ else #+ sum = perf_sum; #+ #+ perf_offset += diff * PERF_FILTER_SIZE - sum; #+ #+ if (perf_offset <= 0) { #+ /* new base offset */ #+ perf_offset_index = perf_newest; #+ perf_offset = 0; #+ perf_offset_backup_index = -1; #+ } #+ else { #+ if (perf_offset <= perf_offset_backup #+ || perf_offset_backup_index == -1) { #+ /* new backup offset */ #+ perf_offset_backup_index = perf_newest; #+ perf_offset_backup = perf_offset; #+ } #+ #+ if (perf_offset_index == perf_newest) { #+ /* base offset disappeared, use backup */ #+ if (perf_offset_backup_index == -1) { #+ /* Aiee! No backup, use this sample! */ #+ perf_offset_index = perf_newest; #+ perf_offset = 0; #+ } #+ else { #+ perf_offset_index = perf_offset_backup_index; #+ perf_offset -= perf_offset_backup; #+ perf_offset_backup_index = -1; #+ } #+ } #+ } #+ #+ return perf_count - perf_offset / PERF_FILTER_SIZE; #+} #+ #+static void #+performance_filter(LARGE_INTEGER *perf_count_now, ULONGLONG TimerTime) #+{ #+ LONGLONG time_diff; #+ LONGLONG perf_diff; #+ LONGLONG perf_diff_per_tick; #+ int i, ticks; #+ int diff_limit; #+ #+ if (!every) #+ return; #+ #+ if (perf_newest == -1) { #+ /* This is the 1st call */ #+ perf_reset(perf_count_now->QuadPart); #+ return; #+ } #+ #+ /* Estimate how many ticks there has been #+ * since the last call #+ */ #+ time_diff = TimerTime - LastTimerTime; #+ ticks = 0; #+ while (time_diff > every / 2) { #+ ++ticks; #+ time_diff -= every; #+ if (ticks > 200) { #+ /* Too many ticks: clock stepped forward? #+ * reset filter. #+ */ #+ msyslog(LOG_NOTICE, #+ "missed too many ticks (> 200), " #+ "diff %I64d, filter reset", #+ TimerTime - LastTimerTime); #+ perf_reset(perf_count_now->QuadPart); #+ return; #+ } #+ } #+ #+ diff_limit = every / 10; #+ if (!ticks || time_diff > diff_limit || time_diff < -diff_limit) { #+ /* No ticks: clock stepped back? #+ * Large drift: clock stepped forward? #+ * reset filter #+ */ #+ msyslog(LOG_NOTICE, #+ "%d ticks, diff %I64d, filter reset", #+ ticks, time_diff); #+ perf_reset(perf_count_now->QuadPart); #+ return; #+ } #+ #+ /* Split up the performance counter difference #+ * between these ticks #+ */ #+ perf_diff = perf_count_now->QuadPart - perf_count; #+ perf_diff_per_tick = perf_diff / ticks; #+ #+ i = ticks; #+ while (--i > 0) #+ perf_insert(perf_diff_per_tick); #+ #+ perf_count_now->QuadPart #+ = perf_insert(perf_diff - perf_diff_per_tick * (ticks - 1)); #+} #+ # static void CALLBACK # TimerApcFunction( # LPVOID lpArgToCompletionRoutine, #@@ -307,6 +480,7 @@ # ) # { # LARGE_INTEGER LargeIntNowCount; #+ ULONGLONG TimerTime; # (void) lpArgToCompletionRoutine; /* not used */ # # if (dwTimerLowValue == lastLowTimer) return; #@@ -317,6 +491,10 @@ # /* Save this for next time */ # lastLowTimer = dwTimerLowValue; # #+ TimerTime = ((ULONGLONG) dwTimerHighValue << 32) + #+ (ULONGLONG) dwTimerLowValue; #+ performance_filter(&LargeIntNowCount, TimerTime); #+ # /* Check to see if the counter has rolled. This happens # more often on Multi-CPU systems */ # #@@ -339,8 +517,7 @@ # # EnterCriticalSection(&TimerCritialSection); # LastTimerCount = (ULONGLONG) LargeIntNowCount.QuadPart; #- LastTimerTime = ((ULONGLONG) dwTimerHighValue << 32) + #- (ULONGLONG) dwTimerLowValue; #+ LastTimerTime = TimerTime; # LeaveCriticalSection(&TimerCritialSection); # } # # # Diff checksum=ac9647d6 # Patch vers: 1.3 # Patch type: REGULAR == ChangeSet == stenn@whimsy.udel.edu|ChangeSet|19990526004811|57482|8983e65c737bb465 stenn@whimsy.udel.edu|ChangeSet|20031015095142|19094 D 1.1162 03/10/16 09:46:33+02:00 peda@sectra.se +1 -0 B stenn@whimsy.udel.edu|ChangeSet|19990526004811|57482|8983e65c737bb465 C c A thread runs with high priority and detects when a tick has occured, c when it has, the thread takes a sample from the performance counter. c The performance counter sample is then used to extrapolate the system c time between ticks. c c The problem is that the thread only samples once every millisecond (on c the best of days, on a loaded system it can be more seldom) so there is c typically 1 ms jitter just because of this. c c This patch filters out the best sample in the recent past and uses that c sample as the base, until a better sample comes along, or the sample c grows too old (currently 20 seconds). c c When my system is quiet, I get accuracies around 10 microseconds, c but when the usage pattern of the computer changes, the offset can c temporarily grow to 50 us. K 18162 P ChangeSet ------------------------------------------------ 0a0 > stenn@whimsy.udel.edu|ports/winnt/ntpd/nt_clockstuff.c|19991016040804|07763|8b711e53 peda@sectra.se|ports/winnt/ntpd/nt_clockstuff.c|20031016074624|17871 == ports/winnt/ntpd/nt_clockstuff.c == stenn@whimsy.udel.edu|ports/winnt/ntpd/nt_clockstuff.c|19991016040804|07763|8b711e53 mayer@tecotoo.myibg.com|ports/winnt/ntpd/nt_clockstuff.c|20030416131816|23470 D 1.14 03/10/16 09:46:24+02:00 peda@sectra.se +179 -2 B stenn@whimsy.udel.edu|ChangeSet|19990526004811|57482|8983e65c737bb465 C c A thread runs with high priority and detects when a tick has occured, c when it has, the thread takes a sample from the performance counter. c The performance counter sample is then used to extrapolate the system c time between ticks. c c The problem is that the thread only samples once every millisecond (on c the best of days, on a loaded system it can be more seldom) so there is c typically 1 ms jitter just because of this. c c This patch filters out the best sample in the recent past and uses that c sample as the base, until a better sample comes along, or the sample c grows too old (currently 20 seconds). c c When my system is quiet, I get accuracies around 10 microseconds, c but when the usage pattern of the computer changes, the offset can c temporarily grow to 50 us. K 17871 O -rw-rw-r-- P ports/winnt/ntpd/nt_clockstuff.c ------------------------------------------------ I301 173 #define PERF_FILTER_SIZE (2048) static LONGLONG perf_filter[PERF_FILTER_SIZE]; static int perf_oldest = -1; static int perf_newest = -1; static int perf_full = 0; static LONGLONG perf_sum = 0; static LONGLONG perf_offset = 0; static LONGLONG perf_offset_index = -1; static LONGLONG perf_offset_backup = 0; static LONGLONG perf_offset_backup_index = -1; static LONGLONG perf_count; \ static void perf_reset(LONGLONG perf_count_now) { perf_oldest = -1; perf_newest = 0; perf_full = 0; perf_count = perf_count_now; perf_sum = 0; perf_offset_index = perf_newest; perf_offset = 0; perf_offset_backup_index = -1; } \ static void perf_inc(int *perf_index) { *perf_index = (*perf_index + 1) % PERF_FILTER_SIZE; } \ static LONGLONG perf_insert(LONGLONG diff) { LONGLONG sum; LONGLONG prev_sum = perf_sum; \ perf_inc(&perf_newest); if (perf_newest == perf_oldest) { perf_sum -= perf_filter[perf_oldest]; perf_inc(&perf_oldest); perf_full = 1; } perf_count += diff; perf_filter[perf_newest] = diff; perf_sum += diff; \ if (!perf_full) { int count; \ if (perf_oldest == -1) { /* first call to perf_insert */ perf_oldest = perf_newest; return perf_count; } \ count = (perf_newest - perf_oldest + PERF_FILTER_SIZE) % PERF_FILTER_SIZE + 1; \ sum = (perf_sum * PERF_FILTER_SIZE + count / 2) / count; prev_sum = (prev_sum * PERF_FILTER_SIZE + (count - 1) / 2) / (count - 1); \ if (count == 2) /* Init perf_offset so that it later becomes zero * and that this sample therefore is used as a base. */ perf_offset = sum - diff * PERF_FILTER_SIZE; } else sum = perf_sum; \ perf_offset += diff * PERF_FILTER_SIZE - sum; \ if (perf_offset <= 0) { /* new base offset */ perf_offset_index = perf_newest; perf_offset = 0; perf_offset_backup_index = -1; } else { if (perf_offset <= perf_offset_backup || perf_offset_backup_index == -1) { /* new backup offset */ perf_offset_backup_index = perf_newest; perf_offset_backup = perf_offset; } \ if (perf_offset_index == perf_newest) { /* base offset disappeared, use backup */ if (perf_offset_backup_index == -1) { /* Aiee! No backup, use this sample! */ perf_offset_index = perf_newest; perf_offset = 0; } else { perf_offset_index = perf_offset_backup_index; perf_offset -= perf_offset_backup; perf_offset_backup_index = -1; } } } \ return perf_count - perf_offset / PERF_FILTER_SIZE; } \ static void performance_filter(LARGE_INTEGER *perf_count_now, ULONGLONG TimerTime) { LONGLONG time_diff; LONGLONG perf_diff; LONGLONG perf_diff_per_tick; int i, ticks; int diff_limit; \ if (!every) return; \ if (perf_newest == -1) { /* This is the 1st call */ perf_reset(perf_count_now->QuadPart); return; } \ /* Estimate how many ticks there has been * since the last call */ time_diff = TimerTime - LastTimerTime; ticks = 0; while (time_diff > every / 2) { ++ticks; time_diff -= every; if (ticks > 200) { /* Too many ticks: clock stepped forward? * reset filter. */ msyslog(LOG_NOTICE, "missed too many ticks (> 200), " "diff %I64d, filter reset", TimerTime - LastTimerTime); perf_reset(perf_count_now->QuadPart); return; } } \ diff_limit = every / 10; if (!ticks || time_diff > diff_limit || time_diff < -diff_limit) { /* No ticks: clock stepped back? * Large drift: clock stepped forward? * reset filter */ msyslog(LOG_NOTICE, "%d ticks, diff %I64d, filter reset", ticks, time_diff); perf_reset(perf_count_now->QuadPart); return; } \ /* Split up the performance counter difference * between these ticks */ perf_diff = perf_count_now->QuadPart - perf_count; perf_diff_per_tick = perf_diff / ticks; \ i = ticks; while (--i > 0) perf_insert(perf_diff_per_tick); \ perf_count_now->QuadPart = perf_insert(perf_diff - perf_diff_per_tick * (ticks - 1)); } \ I309 1 ULONGLONG TimerTime; I319 4 TimerTime = ((ULONGLONG) dwTimerHighValue << 32) + (ULONGLONG) dwTimerLowValue; performance_filter(&LargeIntNowCount, TimerTime); \ D342 2 I343 1 LastTimerTime = TimerTime; # Patch checksum=75e4622a