Created
March 1, 2026 00:04
-
-
Save argusdusty/4c2b2e4b5b9a2cf943f2b45800fd8d0f to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| package main | |
| import ( | |
| "bufio" | |
| "bytes" | |
| "context" | |
| "flag" | |
| "fmt" | |
| "log" | |
| "math" | |
| "math/bits" | |
| "net/http" | |
| _ "net/http/pprof" | |
| "os" | |
| "os/exec" | |
| "path/filepath" | |
| "strconv" | |
| "strings" | |
| "sync" | |
| "sync/atomic" | |
| "time" | |
| ) | |
| // ── Math Utilities ────────────────────────────────────────────────────────── | |
| func ModInv(x, p uint64) uint64 { | |
| t, u := uint64(0), uint64(1) | |
| r := p | |
| for x != 0 { | |
| t, u = u, t-(r/x)*u | |
| r, x = x, r%x | |
| } | |
| if t > p { | |
| t += p | |
| } | |
| return t | |
| } | |
| func PowMod10(n, p uint64) uint64 { | |
| n %= p - 1 // b^n === b^(n%(p-1)) mod prime p | |
| if n == 0 { | |
| return 1 | |
| } | |
| res := uint64(10 % p) | |
| for i := bits.Len64(n) - 2; i >= 0; i-- { | |
| if n&(1<<i) != 0 { | |
| hi, lo := bits.Mul64(res*10, res) | |
| _, res = bits.Div64(hi, lo, p) | |
| } else { | |
| hi, lo := bits.Mul64(res, res) | |
| _, res = bits.Div64(hi, lo, p) | |
| } | |
| } | |
| return res | |
| } | |
| func reverseUint(v uint64) (r uint64) { | |
| for v > 0 { | |
| r = r*10 + (v % 10) | |
| v /= 10 | |
| } | |
| return | |
| } | |
| func kRange(kDigits int) (kMin, kMax uint64) { | |
| kMax = 1 | |
| for range kDigits { | |
| kMax *= 10 | |
| } | |
| return kMax / 10, kMax | |
| } | |
| // ── Logging ───────────────────────────────────────────────────────────────── | |
| type Logger struct { | |
| ticker *time.Ticker | |
| done chan struct{} | |
| fn func(bool) | |
| } | |
| func StartLogging(interval time.Duration, fn func(isEnd bool)) *Logger { | |
| if interval <= 0 { | |
| return &Logger{fn: fn} | |
| } | |
| l := &Logger{ticker: time.NewTicker(interval), done: make(chan struct{}), fn: fn} | |
| go func() { | |
| for { | |
| select { | |
| case <-l.ticker.C: | |
| fn(false) | |
| case <-l.done: | |
| return | |
| } | |
| } | |
| }() | |
| return l | |
| } | |
| func (l *Logger) Stop(verbose bool) { | |
| if l.ticker != nil { | |
| l.ticker.Stop() | |
| close(l.done) | |
| if verbose { | |
| l.fn(true) | |
| } | |
| } | |
| } | |
| // ── Generic Worker Pool ───────────────────────────────────────────────────── | |
| // Worker processes tasks of type T with lifecycle hooks. | |
| type Worker[T any] interface { | |
| Setup() | |
| Run(T) | |
| Finish() | |
| } | |
| // Pool distributes tasks across a fixed number of worker goroutines. | |
| type Pool[T any] struct { | |
| tasks chan T | |
| wg sync.WaitGroup | |
| } | |
| // NewPool creates a pool of numWorkers goroutines, each backed by a Worker | |
| // returned from newWorker. Workers process tasks until the pool is closed. | |
| func NewPool[T any](numWorkers int, newWorker func(id int) Worker[T]) *Pool[T] { | |
| bufSize := max(256, numWorkers*2) | |
| p := &Pool[T]{tasks: make(chan T, bufSize)} | |
| for i := range numWorkers { | |
| p.wg.Add(1) | |
| go func() { | |
| defer p.wg.Done() | |
| w := newWorker(i) | |
| w.Setup() | |
| defer w.Finish() | |
| for task := range p.tasks { | |
| w.Run(task) | |
| } | |
| }() | |
| } | |
| return p | |
| } | |
| func (p *Pool[T]) Submit(task T) { p.tasks <- task } | |
| // Close signals no more tasks and waits for all workers to finish. | |
| func (p *Pool[T]) Close() { | |
| close(p.tasks) | |
| p.wg.Wait() | |
| } | |
| // dynamicChunkSize returns a chunk size that shrinks as remaining decreases, | |
| // capped at maxSize. E.g. remaining=2^14 → 2^7, remaining=2^12 → 2^6. | |
| func dynamicChunkSize(remaining, maxSize int) int { | |
| size := 1 << (bits.Len(uint(remaining)) / 2) | |
| return min(size, maxSize) | |
| } | |
| // BroadcastPool distributes identical tasks to all worker goroutines. | |
| type BroadcastPool[T any] struct { | |
| tasks []chan T | |
| wg sync.WaitGroup | |
| } | |
| // NewBroadcastPool creates a pool of numWorkers goroutines, each backed by a Worker. | |
| func NewBroadcastPool[T any](numWorkers int, newWorker func(id int) Worker[T]) *BroadcastPool[T] { | |
| p := &BroadcastPool[T]{ | |
| tasks: make([]chan T, numWorkers), | |
| } | |
| for i := range numWorkers { | |
| p.tasks[i] = make(chan T, max(256, numWorkers*2)) | |
| p.wg.Add(1) | |
| go func(id int) { | |
| defer p.wg.Done() | |
| w := newWorker(id) | |
| w.Setup() | |
| defer w.Finish() | |
| for task := range p.tasks[id] { | |
| w.Run(task) | |
| } | |
| }(i) | |
| } | |
| return p | |
| } | |
| func (p *BroadcastPool[T]) Submit(task T) { | |
| for _, ch := range p.tasks { | |
| ch <- task | |
| } | |
| } | |
| // Close signals no more tasks and waits for all workers to finish. | |
| func (p *BroadcastPool[T]) Close() { | |
| for _, ch := range p.tasks { | |
| close(ch) | |
| } | |
| p.wg.Wait() | |
| } | |
| // ── Sieve ─────────────────────────────────────────────────────────────────── | |
| type PrimeData struct { | |
| P uint64 | |
| KStart uint64 | |
| } | |
| type SieveTask struct { | |
| Low, High int | |
| } | |
| type primeListWorker struct { | |
| basePrimes []uint64 | |
| kMin uint64 | |
| m uint64 | |
| maxSegWords int | |
| segment []uint64 | |
| out chan<- []PrimeData | |
| progress *uint64 | |
| } | |
| func (w *primeListWorker) Setup() { | |
| w.segment = make([]uint64, w.maxSegWords) | |
| } | |
| func (w *primeListWorker) Run(task SieveTask) { | |
| low_i, high_i := task.Low, task.High | |
| segmentWords := (high_i - low_i + 63) / 64 | |
| segment := w.segment[:segmentWords] | |
| for k := range segmentWords { | |
| segment[k] = ^uint64(0) | |
| } | |
| for _, p := range w.basePrimes { | |
| pi := int(p) | |
| start_i := (pi*pi - 3) / 2 | |
| if start_i >= high_i { | |
| break | |
| } | |
| if start_i < low_i { | |
| mv := (2*low_i + pi + 2) / pi | |
| mv += 1 - (mv & 1) | |
| start_i = (mv*pi - 3) / 2 | |
| } | |
| clearBits(segment, uint64(high_i-low_i), uint64(start_i-low_i), uint64(pi)) | |
| } | |
| var pd []PrimeData | |
| for i := range high_i - low_i { | |
| if segment[i>>6]&(1<<(i&63)) != 0 { | |
| p := uint64(2*(low_i+i) + 3) | |
| if p == 5 { | |
| continue | |
| } | |
| tenM := PowMod10(w.m, p) | |
| invTenM := ModInv(tenM, p) | |
| hi, lo := bits.Mul64(tenM, tenM) | |
| _, M2 := bits.Div64(hi, lo, p) | |
| hi, lo = bits.Mul64(M2, w.kMin%p) | |
| _, tenN := bits.Div64(hi, lo, p) | |
| kStart := p - (((tenN + 1) * invTenM) % p) | |
| if kStart == p { | |
| kStart = 0 | |
| } | |
| pd = append(pd, PrimeData{P: p, KStart: kStart}) | |
| } | |
| } | |
| atomic.AddUint64(w.progress, uint64(high_i-low_i)) | |
| w.out <- pd | |
| } | |
| func (w *primeListWorker) Finish() {} | |
| // clearBits performs non-atomic bit clearing. | |
| func clearBits(arr []uint64, arrLen, kStart, p uint64) { | |
| if p < 64 { | |
| mask := ((^uint64(0) / ((1 << p) - 1)) >> (64 % p)) | 1<<(64-(64%p)) | |
| startWord := kStart >> 6 | |
| for i := startWord; i < uint64(len(arr)); i++ { | |
| m := mask << ((kStart + p - (64 * i % p)) % p) | |
| if i == startWord { | |
| m &^= (1 << (kStart & 63)) - 1 | |
| } | |
| arr[i] &^= m | |
| } | |
| } else { | |
| for j := kStart; j < arrLen; j += p { | |
| arr[j>>6] &^= 1 << (j & 63) | |
| } | |
| } | |
| } | |
| type processBatchWorker struct { | |
| valid []uint64 | |
| wStart uint64 | |
| wEnd uint64 | |
| totalWords uint64 | |
| kMax uint64 | |
| doubleEmirp bool | |
| progress *uint64 | |
| } | |
| func (w *processBatchWorker) Setup() {} | |
| func (w *processBatchWorker) Finish() {} | |
| func (w *processBatchWorker) Run(primes []PrimeData) { | |
| if w.wStart >= w.wEnd { | |
| return | |
| } | |
| kBase := w.wStart * 64 | |
| bitLen := (w.wEnd - w.wStart) * 64 | |
| if w.wEnd == w.totalWords { | |
| bitLen = w.kMax - kBase | |
| } | |
| arr := w.valid[w.wStart:w.wEnd] | |
| for _, pd := range primes { | |
| p := pd.P | |
| var start uint64 | |
| // Calculate starting offset for this worker's segment | |
| if pd.KStart >= kBase { | |
| start = pd.KStart - kBase | |
| } else { | |
| start = (kBase - pd.KStart) % p | |
| if start != 0 { | |
| start = p - start | |
| } | |
| } | |
| if start < bitLen { | |
| clearBits(arr, bitLen, start, p) | |
| } | |
| } | |
| if w.doubleEmirp { | |
| for _, pd := range primes { | |
| // Calculate starting offset for double emirp condition | |
| if pd.KStart != 0 { | |
| p := pd.P | |
| dpStart := p * 2 | |
| var startD uint64 | |
| if dpStart >= kBase { | |
| startD = dpStart - kBase | |
| } else { | |
| startD = (kBase - dpStart) % p | |
| if startD != 0 { | |
| startD = p - startD | |
| } | |
| } | |
| if startD < bitLen { | |
| clearBits(arr, bitLen, startD, p) | |
| } | |
| } | |
| } | |
| } | |
| atomic.AddUint64(w.progress, uint64(len(primes))) | |
| } | |
| // RunSieve runs the segmented prime sieve and updates valid[] for each found prime. | |
| func RunSieve(sieveBits, numWorkers int, valid []uint64, kMin, kMax uint64, m int, doubleEmirp bool, logInterval time.Duration, startTime time.Time) { | |
| N := 1 << sieveBits | |
| sqrtMax := int(math.Sqrt(float64(N))) | |
| nOddBase := sqrtMax / 2 | |
| baseWords := make([]uint64, (nOddBase+63)/64) | |
| for i := range baseWords { | |
| baseWords[i] = ^uint64(0) | |
| } | |
| for i := 0; i <= (int(math.Sqrt(float64(sqrtMax)))-3)/2; i++ { | |
| if baseWords[i>>6]&(1<<(i&63)) == 0 { | |
| continue | |
| } | |
| clearBits(baseWords, uint64(nOddBase), uint64(2*i*(i+3)+3), uint64(2*i+3)) | |
| } | |
| var basePrimes []uint64 | |
| for i := range nOddBase { | |
| if baseWords[i>>6]&(1<<(i&63)) != 0 { | |
| basePrimes = append(basePrimes, uint64(2*i+3)) | |
| } | |
| } | |
| nOdd := int((N - 1) / 2) | |
| const maxS = 1 << 28 | |
| maxSegWords := int(maxS / 64) | |
| pdOut := make(chan []PrimeData, 1024) | |
| var genProgress uint64 | |
| var filterProgress uint64 | |
| var foundPrimes uint64 | |
| logger := StartLogging(logInterval, func(isEnd bool) { | |
| gen := atomic.LoadUint64(&genProgress) | |
| filt := float64(atomic.LoadUint64(&filterProgress)) / float64(numWorkers) | |
| pct := float64(0) | |
| if nOdd > 0 { | |
| pct = float64(gen) / float64(nOdd) * 100 | |
| } | |
| if isEnd { | |
| fmt.Printf("Generating sieve primes: %d/%d (%.2f%%) | Found sieve primes: %d | Filtered using primes: %d (%.2f%%) | Elapsed: %v\n", gen, nOdd, pct, foundPrimes, uint64(filt), float64(filt)/float64(foundPrimes)*100, time.Since(startTime)) | |
| } else { | |
| fmt.Printf("Generating sieve primes: %d/%d (%.2f%%) | Found sieve primes: %d | Filtered using primes (approx.): %.1f (%.2f%%) | Elapsed: %v\n", gen, nOdd, pct, foundPrimes, filt, float64(filt)/float64(foundPrimes)*100, time.Since(startTime)) | |
| } | |
| }) | |
| defer logger.Stop(true) | |
| totalWords := uint64(len(valid)) | |
| wordsPerWorker := (totalWords + uint64(numWorkers) - 1) / uint64(numWorkers) | |
| filterPool := NewBroadcastPool(numWorkers, func(id int) Worker[[]PrimeData] { | |
| wStart := uint64(id) * wordsPerWorker | |
| wEnd := min(wStart+wordsPerWorker, totalWords) | |
| return &processBatchWorker{ | |
| valid: valid, | |
| wStart: wStart, | |
| wEnd: wEnd, | |
| totalWords: totalWords, | |
| kMax: kMax, | |
| doubleEmirp: doubleEmirp, | |
| progress: &filterProgress, | |
| } | |
| }) | |
| var wgPD sync.WaitGroup | |
| wgPD.Add(1) | |
| const filterBatchSize = 262144 | |
| go func() { | |
| defer wgPD.Done() | |
| batch := make([]PrimeData, 0, 2*filterBatchSize) | |
| for pd := range pdOut { | |
| foundPrimes += uint64(len(pd)) | |
| batch = append(batch, pd...) | |
| if len(batch) >= filterBatchSize { | |
| filterPool.Submit(batch) | |
| batch = make([]PrimeData, 0, 2*filterBatchSize) // GC handles the old backing array, appending will allocate a new one (TODO: Figure out a way to cycle through pre-allocated batch pools, up to the filterPool buffer limit) | |
| } | |
| } | |
| if doubleEmirp { | |
| // Remove ks divisible by 2 or 5 | |
| batch = append(batch, PrimeData{P: 2, KStart: 4}, PrimeData{P: 5, KStart: 10}) | |
| foundPrimes += 2 | |
| } | |
| if len(batch) > 0 { | |
| filterPool.Submit(batch) | |
| } | |
| filterPool.Close() | |
| }() | |
| pool := NewPool(numWorkers, func(id int) Worker[SieveTask] { | |
| return &primeListWorker{ | |
| basePrimes: basePrimes, | |
| kMin: kMin, | |
| m: uint64(m), | |
| maxSegWords: maxSegWords, | |
| out: pdOut, | |
| progress: &genProgress, | |
| } | |
| }) | |
| pos := 0 | |
| for pos < nOdd { | |
| S := min(1<<max(bits.Len(uint(pos))/2, 1), maxS) | |
| end := min(pos+S, nOdd) | |
| pool.Submit(SieveTask{Low: pos, High: end}) | |
| pos = end | |
| } | |
| pool.Close() | |
| close(pdOut) | |
| wgPD.Wait() | |
| } | |
| // ── Extract and test Candidates ──────────────────────────────────────────────────────── | |
| type BuildTask struct { | |
| StartK, EndK uint64 | |
| } | |
| type buildWorker struct { | |
| valid []uint64 | |
| kMin uint64 | |
| out chan<- [2]uint64 | |
| countKs *uint64 | |
| } | |
| func (w *buildWorker) Setup() {} | |
| func (w *buildWorker) Finish() {} | |
| func (w *buildWorker) Run(task BuildTask) { | |
| for k := task.StartK; k < task.EndK; k++ { | |
| if w.valid[k>>6]&(1<<(k&63)) == 0 { | |
| continue | |
| } | |
| atomic.AddUint64(w.countKs, 1) | |
| r := reverseUint(k) | |
| if r < k && r >= w.kMin && (w.valid[r>>6]&(1<<(r&63)) != 0) { | |
| w.out <- [2]uint64{k, r} | |
| } | |
| } | |
| } | |
| // RunBuildCandidates extracts (k, r) candidate pairs from the valid bitset. | |
| // Returns a channel that streams pairs as they are found. | |
| func RunBuildCandidates(valid []uint64, kMin, kMax uint64, numWorkers int, countKs *uint64) <-chan [2]uint64 { | |
| out := make(chan [2]uint64, 1024) | |
| go func() { | |
| pool := NewPool(numWorkers, func(id int) Worker[BuildTask] { | |
| return &buildWorker{valid: valid, kMin: kMin, out: out, countKs: countKs} | |
| }) | |
| remaining := int(kMax - kMin) | |
| offset := uint64(0) | |
| for remaining > 0 { | |
| chunkSize := dynamicChunkSize(remaining, 1<<16) | |
| startK := kMin + offset | |
| endK := min(startK+uint64(chunkSize), kMax) | |
| pool.Submit(BuildTask{StartK: startK, EndK: endK}) | |
| offset += uint64(chunkSize) | |
| remaining -= chunkSize | |
| } | |
| pool.Close() | |
| close(out) | |
| }() | |
| return out | |
| } | |
| // ── Check ─────────────────────────────────────────────────────────────────── | |
| type checkWorker struct { | |
| id int | |
| workerDir string | |
| pfgwPath string | |
| header string | |
| tests *uint64 | |
| primes *uint64 | |
| emirps *uint64 | |
| exitOnFirst bool | |
| ctx context.Context | |
| cancel context.CancelFunc | |
| emirpsOut chan<- [2]uint64 | |
| } | |
| func (w *checkWorker) Setup() { | |
| w.workerDir = fmt.Sprintf("worker%d", w.id) | |
| os.MkdirAll(w.workerDir, 0755) | |
| } | |
| func (w *checkWorker) Finish() { | |
| os.RemoveAll(w.workerDir) | |
| } | |
| func (w *checkWorker) Run(batch [][2]uint64) { | |
| if w.ctx.Err() != nil { | |
| return | |
| } | |
| candPath := filepath.Join(w.workerDir, "helper.txt") | |
| var buf bytes.Buffer | |
| buf.WriteString(w.header + "\n") | |
| for _, pair := range batch { | |
| fmt.Fprintf(&buf, "%d %d\n", pair[0], pair[1]) | |
| } | |
| os.WriteFile(candPath, buf.Bytes(), 0644) | |
| cmdArgs := []string{"-Cverbose", "-f0", "-N", "-u0"} | |
| cmd := exec.CommandContext(w.ctx, w.pfgwPath, cmdArgs...) | |
| cmd.Dir = w.workerDir | |
| stdout, err := cmd.StdoutPipe() | |
| if err != nil || cmd.Start() != nil { | |
| return | |
| } | |
| scanner := bufio.NewScanner(stdout) | |
| scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) { | |
| if i := bytes.IndexAny(data, "\r\n"); i >= 0 { | |
| if data[i] == '\r' && i+1 < len(data) && data[i+1] == '\n' { | |
| return i + 2, data[:i], nil | |
| } | |
| return i + 1, data[:i], nil | |
| } | |
| if atEOF && len(data) > 0 { | |
| return len(data), data, nil | |
| } | |
| return 0, nil, nil | |
| }) | |
| var curK, prevK uint64 | |
| for scanner.Scan() { | |
| line := scanner.Text() | |
| if line = strings.TrimSpace(line); line == "" { | |
| continue | |
| } | |
| if strings.Contains(line, "is composite") { | |
| atomic.AddUint64(w.tests, 1) | |
| } else if strings.Contains(line, "is prime") || strings.Contains(line, "PRP") { | |
| atomic.AddUint64(w.tests, 1) | |
| atomic.AddUint64(w.primes, 1) | |
| prevK = curK | |
| curK, _ = strconv.ParseUint(strings.Split(strings.Split(strings.Split(line, " ")[0], "+")[1], "*")[0], 10, 64) | |
| } else if strings.Contains(line, "- Complete Set -") { | |
| atomic.AddUint64(w.emirps, 1) | |
| w.emirpsOut <- [2]uint64{prevK, curK} | |
| if w.exitOnFirst { | |
| w.cancel() | |
| } | |
| } | |
| } | |
| cmd.Wait() | |
| } | |
| // RunCheck reads candidate pairs from the channel, batches them, and runs pfgw. | |
| func RunCheck(candidates <-chan [2]uint64, n, m, numWorkers int, pfgwPath string, exitOnFirst bool, logInterval time.Duration, countKs *uint64, appStart time.Time) { | |
| pfgwAbs, err := filepath.Abs(pfgwPath) | |
| if _, statErr := os.Stat(pfgwAbs); err != nil || os.IsNotExist(statErr) { | |
| log.Fatalf("Error: PFGW binary %s not found", pfgwPath) | |
| } | |
| var tests, primes, emirps, totalCands uint64 | |
| startTime := time.Now() | |
| header := fmt.Sprintf("ABC 10^%d+$a*10^%d+1&10^%d+$b*10^%d+1", n, m, n, m) | |
| logger := StartLogging(logInterval, func(isEnd bool) { | |
| count := atomic.LoadUint64(&tests) - atomic.LoadUint64(&primes) + atomic.LoadUint64(&emirps) | |
| total := atomic.LoadUint64(&totalCands) | |
| pct := float64(0) | |
| if total > 0 { | |
| pct = float64(count) / float64(total) * 100 | |
| } | |
| elapsed := time.Since(startTime) | |
| var avg time.Duration | |
| if count > 0 { | |
| avg = elapsed / time.Duration(count) | |
| } | |
| fmt.Printf("Found candidates: %d | Tested candidate pairs: %d (%.4f%%) | Primes: %d | Emirps: %d | Elapsed: %v | Avg per pair: %v (%v/worker)\n", total, count, pct, atomic.LoadUint64(&primes), atomic.LoadUint64(&emirps), elapsed, avg, avg*time.Duration(numWorkers)) | |
| }) | |
| defer logger.Stop(true) | |
| ctx, cancel := context.WithCancel(context.Background()) | |
| defer cancel() | |
| emirpsOut := make(chan [2]uint64, 1024) | |
| var wgEmirp sync.WaitGroup | |
| wgEmirp.Add(1) | |
| go func() { | |
| defer wgEmirp.Done() | |
| for pair := range emirpsOut { | |
| fmt.Printf("Emirp found! 10^%d+%d*10^%d+1 -> 10^%d+%d*10^%d+1\n", n, pair[0], m, n, pair[1], m) | |
| } | |
| }() | |
| pool := NewPool(numWorkers, func(id int) Worker[[][2]uint64] { | |
| return &checkWorker{ | |
| id: id, | |
| pfgwPath: pfgwAbs, | |
| header: header, | |
| tests: &tests, | |
| primes: &primes, | |
| emirps: &emirps, | |
| exitOnFirst: exitOnFirst, | |
| ctx: ctx, | |
| cancel: cancel, | |
| emirpsOut: emirpsOut, | |
| } | |
| }) | |
| // Batch candidates from the channel and submit to the check pool. | |
| var batch [][2]uint64 | |
| // Try to get batches running between 5 and 60 seconds. | |
| batchSize := 1 << 8 | |
| for time.Duration((batchSize*n*n)/40)*time.Microsecond > time.Minute && batchSize > 2 { | |
| batchSize >>= 1 | |
| } | |
| for time.Duration((batchSize*n*n)/40)*time.Microsecond < 5*time.Second { | |
| batchSize <<= 1 | |
| } | |
| fmt.Println("Using batch size:", batchSize) | |
| for pair := range candidates { | |
| if ctx.Err() != nil { | |
| break | |
| } | |
| atomic.AddUint64(&totalCands, 1) | |
| batch = append(batch, pair) | |
| if len(batch) >= batchSize { | |
| batchCopy := make([][2]uint64, len(batch)) | |
| copy(batchCopy, batch) | |
| pool.Submit(batchCopy) | |
| batch = batch[:0] | |
| } | |
| } | |
| // Submit remaining with dynamic chunk sizing for balanced tail. | |
| if ctx.Err() == nil { | |
| size := len(batch) / numWorkers | |
| rem := len(batch) % numWorkers | |
| fmt.Println("Dividing remaining", len(batch), "candidates among", numWorkers, "workers") | |
| for i := range numWorkers { | |
| workerSize := size | |
| if i < rem { | |
| workerSize++ | |
| } | |
| batchCopy := make([][2]uint64, workerSize) | |
| copy(batchCopy, batch[:workerSize]) | |
| pool.Submit(batchCopy) | |
| batch = batch[workerSize:] | |
| } | |
| } | |
| pool.Close() | |
| close(emirpsOut) | |
| wgEmirp.Wait() | |
| elapsed := time.Since(appStart) | |
| totalKsVal := atomic.LoadUint64(countKs) | |
| tested := atomic.LoadUint64(&totalCands) | |
| var perCand time.Duration | |
| if tested > 0 { | |
| perCand = elapsed / time.Duration(tested) | |
| } | |
| defer fmt.Printf("Total valid k's: %d | Candidates tested: %d | Time per candidate: %v | Total elapsed: %v\n", totalKsVal, tested, perCand, elapsed) | |
| } | |
| // ── Estimation (unchanged) ────────────────────────────────────────────────── | |
| // exp(-2*(eulergamma+ln(ln(2)))) | |
| const invExpEulerMascheroniln2_2 = 0.6561239966346915376567180419362681197107838240377582899166952545 | |
| func EmirpExpected(n, m, sieveBits int) (E1, E2, E3, E4 float64) { | |
| const ln10_2 = math.Log10E * math.Log10E | |
| λ2 := ln10_2 / float64((n-1)*(n-1)) // Probability that an n-digit number is prime | |
| μ2 := ln10_2 / float64((m-1)*(m-1)) // Probability that an m-digit number is prime | |
| T := 9 * math.Pow(10, float64(m-2)) // Number of k's | |
| κ := (1485.0 / 32.0) // Adjustment for regular emirps | |
| κ2 := (3993.0 / 512.0) * (10.0 - float64(m&1)) // Adjustment for 'double'-emirps | |
| sieveFactor := invExpEulerMascheroniln2_2 / float64(sieveBits*sieveBits) | |
| E1 = T * κ * λ2 | |
| E2 = T * κ2 * μ2 * λ2 | |
| E3 = T * κ * sieveFactor | |
| E4 = T * κ2 * μ2 * sieveFactor | |
| return | |
| } | |
| func CalculateOptimalSieveBits(N, K, numWorkers int, trueM *int, exitOnFirst, doubleEmirp bool) { | |
| var bestM int | |
| var minSeconds float64 | |
| var minTime, estTime time.Duration | |
| var bestSieveTime, bestTestTime, estSieveTime, estTestTime time.Duration | |
| for M := 10; M <= 60; M++ { | |
| var numEmirps, numCands float64 | |
| if !doubleEmirp { | |
| numEmirps, _, numCands, _ = EmirpExpected(N, K, M) | |
| } else { | |
| _, numEmirps, _, numCands = EmirpExpected(N, K, M) | |
| } | |
| expectedFraction := 1. | |
| if exitOnFirst { | |
| expectedFraction = 1 / (numEmirps + 1) | |
| } | |
| sieveMult := 1. | |
| if doubleEmirp { | |
| sieveMult = 2. | |
| } | |
| sieveSeconds := (2.1e-8*sieveMult*math.Pow(10, float64(K))*math.Log(float64(M)) + 4.4e-8*math.Pow(2, float64(M))*math.Log(float64(N))/float64(M)) / float64(numWorkers) | |
| testSeconds := numCands * expectedFraction * (4.0e-10 * float64(N*N) * math.Log(float64(N)) * math.Log(math.Log(float64(N)))) / float64(numWorkers) | |
| totalSeconds := sieveSeconds + testSeconds | |
| if minSeconds == 0 || totalSeconds < minSeconds { | |
| minSeconds = totalSeconds | |
| bestM = M | |
| bestSieveTime = time.Duration(sieveSeconds * float64(time.Second)) | |
| bestTestTime = time.Duration(testSeconds * float64(time.Second)) | |
| } | |
| if M == *trueM { | |
| estSieveTime = time.Duration(sieveSeconds * float64(time.Second)) | |
| estTestTime = time.Duration(testSeconds * float64(time.Second)) | |
| estTime = time.Duration(float64(time.Second) * totalSeconds) | |
| } | |
| } | |
| minTime = time.Duration(minSeconds * float64(time.Second)) | |
| if *trueM == 0 { | |
| *trueM = bestM | |
| estTime = minTime | |
| estSieveTime = bestSieveTime | |
| estTestTime = bestTestTime | |
| } | |
| fmt.Printf("Calculated optimal sieve bits: %d (using %d) with expected runtime: %v (current: %v)\n", bestM, *trueM, minTime, estTime) | |
| fmt.Printf("Estimated breakdown for sieveBits=%d: Sieve: %v, Test: %v\n", bestM, bestSieveTime, bestTestTime) | |
| fmt.Printf("Estimated breakdown for sieveBits=%d: Sieve: %v, Test: %v\n", *trueM, estSieveTime, estTestTime) | |
| } | |
| // ── Main ──────────────────────────────────────────────────────────────────── | |
| func main() { | |
| var pfgwBinary string | |
| var digits, kDigits, sieveBits, numWorkers int | |
| var exitOnFirst, doubleEmirp bool | |
| var logInterval time.Duration | |
| flag.IntVar(&digits, "digits", 0, "Total digits (n = digits - 1)") | |
| flag.IntVar(&kDigits, "kDigits", 0, "Number of digits for k") | |
| flag.IntVar(&sieveBits, "sieveBits", 0, "Sieve limit (2^sieveBits) (0 to use an automatically determined value)") | |
| flag.StringVar(&pfgwBinary, "pfgw", "pfgw64.exe", "Path to PFGW executable") | |
| flag.IntVar(&numWorkers, "workers", 1, "Number of worker threads") | |
| flag.BoolVar(&exitOnFirst, "exitOnFirst", false, "Exit immediately when first emirp is found") | |
| flag.DurationVar(&logInterval, "logInterval", 30*time.Second, "Interval to print progress logs (use 0 to not log)") | |
| flag.BoolVar(&doubleEmirp, "doubleEmirp", false, "Only search for 10^n+k*10^m+1 emirps where k is also an emirp") | |
| flag.Parse() | |
| if numWorkers < 1 { | |
| log.Fatalf("workers must be > 0") | |
| } | |
| if digits == 0 { | |
| log.Fatalf("digits must be > 0") | |
| } | |
| if kDigits == 0 { | |
| log.Fatalf("kDigits must be > 0") | |
| } | |
| if (digits-kDigits)%2 != 0 { | |
| log.Fatalf("Invalid parameters: digits - kDigits must be even to compute a valid m.") | |
| } | |
| // Start pprof server for runtime profiling. | |
| go func() { | |
| log.Println(http.ListenAndServe("localhost:6060", nil)) | |
| }() | |
| n := digits - 1 | |
| m := (digits - kDigits) / 2 | |
| kMin, kMax := kRange(kDigits) | |
| CalculateOptimalSieveBits(digits, kDigits, numWorkers, &sieveBits, exitOnFirst, doubleEmirp) | |
| numEmirps, numDoubleEmirps, numCands, numDoubleCands := EmirpExpected(digits, kDigits, sieveBits) | |
| if doubleEmirp { | |
| numEmirps = numDoubleEmirps | |
| numCands = numDoubleCands | |
| } | |
| probEmirp := (1 - math.Exp(-numEmirps)) * 100 | |
| fmt.Printf("Expected number of emirp pairs in range: %.2f, probability of any emirp: %.2f%%, expected number of candidates %.2f\n", numEmirps, probEmirp, numCands) | |
| fmt.Println(numEmirps, numDoubleEmirps, numCands, numDoubleCands) | |
| start := time.Now() | |
| // ── Sieve ── | |
| valid := make([]uint64, (kMax>>6)+1) | |
| for i := range valid { | |
| valid[i] = ^uint64(0) | |
| } | |
| RunSieve(sieveBits, numWorkers, valid, kMin, kMax, m, doubleEmirp, logInterval, start) | |
| fmt.Printf("Sieving completed in %v\n", time.Since(start)) | |
| // ── Build candidates → Check ── | |
| var countKs uint64 | |
| candidates := RunBuildCandidates(valid, kMin, kMax, numWorkers, &countKs) | |
| RunCheck(candidates, n, m, numWorkers, pfgwBinary, exitOnFirst, logInterval, &countKs, start) | |
| } |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment