Skip to content

Instantly share code, notes, and snippets.

View samueleresca's full-sized avatar

Samuele Resca samueleresca

View GitHub Profile
from quoracle import QuorumSystem, Node
if __name__ == '__main__':
a, b, c = Node('a'), Node('b'), Node('c')
majority = QuorumSystem(reads=a*b + b*c + a*c)
print(majority.load(read_fraction=1)) # 2/3
print(majority.capacity(read_fraction=1)) # 3/2
import time
from typing import Iterable
import pytest
from phi_accrual_failure_detector import PhiAccrualFailureDetector
class TestPhiAccrualFailureDetector:
import math
import time
from atomos.atomic import AtomicReference
from src._heartbeat_history import _HeartbeatHistory
from src._state import _State
class PhiAccrualFailureDetector:
import math
import time
from atomos.atomic import AtomicReference
from src._heartbeat_history import _HeartbeatHistory
from src._state import _State
class PhiAccrualFailureDetector:
from src._heartbeat_history import _HeartbeatHistory
from typing import Optional
class _State:
"""
Represents the accrual failure detector's state. It is wraps the _HeartbeatHistory and the latest (most recent) timestamp.
See: https://github.com/akka/akka/blob/0326e113879f08f39ca80667512cc960f267c81b/akka-remote/src/main/scala/akka/remote/PhiAccrualFailureDetector.scala#L120
Attributes:
from __future__ import annotations
import math
class _HeartbeatHistory:
"""
Represent the sample window defined in the φ Accrual failure detector paper.
It stores a limited list of heartbeats in a list of length max_sample_size.
See:
// Lets now assume that a single partition changes. We only need to recompute the state of this
// partition in order to update the metrics for the whole table.
DataFrame updatedUsManufacturers = LoadData(new[]
{
new object[] {3, "ManufacturerDNew", "US"}, new object[] {4, null, "US"},
new object[] {5, "ManufacturerFNew http://clickme.com", "US"},
});
// Recompute state of partition
InMemoryStateProvider updatedUsStates = new InMemoryStateProvider();
DataFrame dataSetDE = LoadDataSetDE();
DataFrame dataSetUS = LoadDataSetUS();
DataFrame dataSetCN = LoadDataSetCN();
// We initialize a new check for the following data fields
var check = new Check(CheckLevel.Warning, "generic check")
.IsComplete("manufacturerName")
.ContainsURL("manufacturerName", val => val == 0.0)
.IsContainedIn("countryCode", new[] { "DE", "US", "CN" });
internal abstract class ScanShareableAnalyzer<S, M> : Analyzer<S, M>, ...
{
...
public override Option<S> ComputeStateFrom(DataFrame dataFrame)
{
IEnumerable<Column> aggregations = AggregationFunctions();
Row result = dataFrame
.Agg(aggregations.First(), aggregations.Skip(1).ToArray())
.Collect()
using Microsoft.Spark.Sql;
using Microsoft.Spark.Sql.Types;
using static Microsoft.Spark.Sql.Functions;
namespace deequ.Analyzers
{
internal sealed class MaxLength : StandardScanShareableAnalyzer<MaxState>, IFilterableAnalyzer
{
public readonly string Column;
public readonly Option<string> Where;