Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -421,7 +421,7 @@ should be conducted in.

| Property | Example value | Description |
|----------------------------------------|------------------|---------------------------------------------------------------------------------------------------------------------------------------------|
| `name` | `read_only` | The name of the test suite (overriden by test-specific values) |
| `name` | `read_only` | The name of the test suite (overridden by test-specific values) |
| `repetitions` | `1` | The number of times this test suite should run (1 or more) |
| `duration` | `300s` | The amount of time this test suite should run for (45m, 1h, 2d, etc.) |
| `clients` | `1` | The number of loadgenerator workers to use in this test suite (hosted on `loadgenerator_hosts` |
Expand Down Expand Up @@ -528,7 +528,7 @@ and preload 1 million entries with a value size of exactly 10 KB, we would edit
```yaml
test:
- class: com.hazelcast.simulator.tests.map.IntByteMapTest
# probabilites and thread count settings
# probabilities and thread count settings
minSize: 10_000
maxSize: 10_000
keyCount: 1_000_000
Expand Down Expand Up @@ -1044,7 +1044,7 @@ The `config` object can be configured as shown below:
```yaml
test:
- class: example.SomeTest
config.nestedConfig.valu: 1000
config.nestedConfig.value: 1000
```

If a property is not used in a test, the test fails during its startup. The reason is that if you would make a typing
Expand Down Expand Up @@ -1703,7 +1703,7 @@ When testing the throughput, results are constrained by factors including CPU, m
Therefore, it's crucial to know these constraints and analyse the test results in their context.

Cloud providers specify the availability of CPU and memory for different instance types,
howeveer they are much less verbose on network-related limits.
however they are much less verbose on network-related limits.

There are two main limitations in play related to network resources: bandwidth (bits/s) and packet count (packets/s).
Hazelcast-simulator contains a tool that allows measuring the limits of bandwidth and packet count, based on `iperf3`.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
# Agents go here. In case of a static configuration, add the ip address of each machine on a seperate line.
# Agents go here. In case of a static configuration, add the ip address of each machine on a separate line.
# For a local setup nothing needs
2 changes: 1 addition & 1 deletion java/drivers/driver-hazelcast4plus/conf/worker.sh
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ while IFS='=' read -r key value; do
done < "parameters"

# Determine if persistence is enabled based on mount_volume and mount_path
# If ony one is set, this is a misconfiguration and we exit to warn the user.
# If only one is set, this is a misconfiguration and we exit to warn the user.
PERSISTENCE_ENABLED=false
if [[ -n "${mount_volume:-}" && -n "${mount_path:-}" ]]; then
PERSISTENCE_ENABLED=true
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ public class IdentifiedDataSerializableFactory implements DataSerializableFactor

public static final int SAMPLE_STRING_TYPE = 1;
public static final int SAMPLE_LONG_TYPE = 2;
public static final int SAMPLE_MUTLIPLE_INTS_TYPE = 3;
public static final int SAMPLE_MULTIPLE_INTS_TYPE = 3;

@Override
public IdentifiedDataSerializable create(int typeId) {
Expand All @@ -33,7 +33,7 @@ public IdentifiedDataSerializable create(int typeId) {
return new IdentifiedDataSerializablePojo();
case SAMPLE_LONG_TYPE:
return new IdentifiedDataWithLongSerializablePojo();
case SAMPLE_MUTLIPLE_INTS_TYPE:
case SAMPLE_MULTIPLE_INTS_TYPE:
return new IdentifiedDataSerializableMultipleIntsPojo();
default:
return null;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ public int getFactoryId() {

@Override
public int getClassId() {
return IdentifiedDataSerializableFactory.SAMPLE_MUTLIPLE_INTS_TYPE;
return IdentifiedDataSerializableFactory.SAMPLE_MULTIPLE_INTS_TYPE;
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ public class ITopicTest extends HazelcastTest {
public int listenersPerTopic = 1;
public int maxProcessingDelayNanos = 0;
public int maxPublicationDelayNanos = 1000;
// the maximum period the verification process is going to wait till the correct number of messags
// the maximum period the verification process is going to wait till the correct number of messages
// have been received. A negative value indicates that no verification should be done.
public int maxVerificationTimeSeconds = 60;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
public class NpyArchiveDatasetReader extends DatasetReader {

private Path trainDatasetFilename;
private Path testDatesetFilename;
private Path testDatasetFilename;

public NpyArchiveDatasetReader(String url, String directory, boolean normalizeVector, boolean testOnly) {
super(url, directory, normalizeVector, testOnly);
Expand All @@ -29,7 +29,7 @@ public NpyArchiveDatasetReader(String url, String directory, boolean normalizeVe
@Override
protected void preprocessDatasetFile() {
this.trainDatasetFilename = Path.of(workingDirectory.toString(), "vectors.npy");
this.testDatesetFilename = Path.of(workingDirectory.toString(), "tests.jsonl");
this.testDatasetFilename = Path.of(workingDirectory.toString(), "tests.jsonl");

if (!trainDatasetFilename.toFile().exists()) {
unpack();
Expand Down Expand Up @@ -66,7 +66,7 @@ protected void parseTrainDataset() {
protected void parseTestDataset() {
try {
var parser = new JsonParser();
List<String> queryList = FileUtils.readLines(testDatesetFilename.toFile(), Charset.defaultCharset());
List<String> queryList = FileUtils.readLines(testDatasetFilename.toFile(), Charset.defaultCharset());
int size = queryList.size();
var searchVectors = new float[size][dimension];
var searchClosestIds = new int[size][];
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ public void recordValue(long latencyNanos) {

// Negative values should normally not happen.
// But it could happen when the clock jump or when there is an
// overflow. So lets convert it to a postive value and record it.
// overflow. So lets convert it to a positive value and record it.
if (latencyNanos == Long.MIN_VALUE) {
latencyNanos = HIGHEST_TRACKABLE_VALUE_NANOS;
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ public Connection newConnection(String brokerURL, ExceptionListener exceptionLis
private String toUrl(String brokerURL) {
// here we configure the 'failover'
// http://activemq.apache.org/failover-transport-reference.html
// In this case we'll allow for 30 attempts with a maximum of 1 second between the attemps.
// In this case we'll allow for 30 attempts with a maximum of 1 second between the attempts.
// so if we can't send in 30 seconds; give up.
return "failover:(" + brokerURL + ")?initialReconnectDelay=100"
+ "&maxReconnectAttempts=" + maxReconnectAttempts
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ public void test_methodProbabilitiesToMethodRatios() {
}

@Test
public void test_methodProbabilitiesToMethodRatios_highPricision() {
public void test_methodProbabilitiesToMethodRatios_highPrecision() {
int[] ratios;

ratios = methodProbabilitiesToMethodRatios(0.1, 0.9);
Expand Down Expand Up @@ -68,7 +68,7 @@ public void test_methodProbabilitiesToMethodRatios_highPricision() {
}

@Test
public void test_methodProbabilitiesToMethodRatios_simplication() {
public void test_methodProbabilitiesToMethodRatios_simplification() {
int[] ratios;

ratios = methodProbabilitiesToMethodRatios(0.10, 0.90);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ public void loadAsDouble_existing() {
}

@Test
public void loadAsDouble_existing_wihUnderscores() {
public void loadAsDouble_existing_withUnderscores() {
TestCase testCase = new TestCase("foo")
.setProperty("doubleValue", "5_0d");
PropertyBinding binding = new PropertyBinding(testCase);
Expand Down
6 changes: 3 additions & 3 deletions src/perf_analysis_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ def __init__(self, argv):
help="The number of permutations for the change point detection", type=int, default=100)
parser.add_argument("--pvalue", nargs=1,
help="The pvalue for the change point detection", type=float, default=0.05)
parser.add_argument("--n_std_treshhold", nargs=1,
parser.add_argument("--n_std_threshold", nargs=1,
help="The number of standard deviations away from the mean to be considered an anomaly",
type=float, default=4)
parser.add_argument("-o", "--output", help="The directory to write the output", nargs=1,
Expand All @@ -273,7 +273,7 @@ def __init__(self, argv):
exit_with_error("pvalue can't be larger than 1")
self.anomalies_per_metric = {}
self.changepoints_per_metric = {}
self.n_std_treshhold = args.n_std_treshhold
self.n_std_threshold = args.n_std_threshold

self.trim()
self.changepoint_detection()
Expand Down Expand Up @@ -340,7 +340,7 @@ def make_plots(self):
def anomaly_detection(self):
info("Anomaly detection")
for metric, ts in self.ts_per_metric.items():
aps = anomaly_detection(ts, min_history_length=10, max_n=self.n_std_treshhold)
aps = anomaly_detection(ts, min_history_length=10, max_n=self.n_std_threshold)
self.anomalies_per_metric[metric] = aps

def changepoint_detection(self):
Expand Down
12 changes: 6 additions & 6 deletions src/simulator/perftest_report_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,8 @@ def __init__(self, report_dir):


class ColumnDesc:
seperator = "::"
kv_seperator = "=="
separator = "::"
kv_separator = "=="

def __init__(self, group, metric, attributes=None):
if attributes is None:
Expand All @@ -62,22 +62,22 @@ def __init__(self, group, metric, attributes=None):
self.attributes = attributes

def to_string(self):
result = f"{self.group}{ColumnDesc.seperator}{self.metric_id}"
result = f"{self.group}{ColumnDesc.separator}{self.metric_id}"

if self.attributes is not None:
for key, value in self.attributes.items():
if not value is None:
result = result + f"{ColumnDesc.seperator}{key}{ColumnDesc.kv_seperator}{value}"
result = result + f"{ColumnDesc.separator}{key}{ColumnDesc.kv_separator}{value}"
return result

@staticmethod
def from_string(column_name):
args = column_name.split(ColumnDesc.seperator)
args = column_name.split(ColumnDesc.separator)
group = args[0]
metric = args[1]
attributes = {}
for k in range(2, len(args)):
pair = args[k].split(ColumnDesc.kv_seperator)
pair = args[k].split(ColumnDesc.kv_separator)
attributes[pair[0]] = pair[1]

return ColumnDesc(group, metric, attributes)
Expand Down
12 changes: 6 additions & 6 deletions templates/hazelcast5-cp-ec2/inventory_plan.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,28 +26,28 @@ nodes:
count: 3
instance_type: c5.4xlarge
# default AWS AMI
# ami: ami-05cafdf7c9f772ad2
# ami: ami-0083ee179c14acc6a
# ubuntu
ami: ami-0d527b8c289b4af7f
ami: ami-0083ee179c14acc6a
user: ubuntu
tenancy: null

loadgenerators:
count: 1
instance_type: c5.4xlarge
# default AWS AMI
# ami: ami-05cafdf7c9f772ad2
# ami: ami-0083ee179c14acc6a
# ubuntu
ami: ami-0d527b8c289b4af7f
ami: ami-0083ee179c14acc6a
user: ubuntu
tenancy: null

mc:
instance_type: c5.4xlarge
count: 1
# default AWS AMI
# ami: ami-05cafdf7c9f772ad2
# ami: ami-0083ee179c14acc6a
# ubuntu
ami: ami-0d527b8c289b4af7f
ami: ami-0083ee179c14acc6a
user: ubuntu
tenancy: null
6 changes: 3 additions & 3 deletions templates/hazelcast5-ec2/inventory_plan.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,20 +25,20 @@ keypair:
nodes:
count: 1
instance_type: c5.9xlarge
ami: ami-04e601abe3e1a910f
ami: ami-0083ee179c14acc6a
user: ubuntu
tenancy: null

loadgenerators:
count: 1
instance_type: c5.9xlarge
ami: ami-04e601abe3e1a910f
ami: ami-0083ee179c14acc6a
user: ubuntu
tenancy: null

mc:
instance_type: c5.4xlarge
count: 0
ami: ami-04e601abe3e1a910f
ami: ami-0083ee179c14acc6a
user: ubuntu
tenancy: null
12 changes: 6 additions & 6 deletions templates/hazelcast5-hd-ec2/inventory_plan.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,31 +26,31 @@ nodes:
count: 1
instance_type: c5.4xlarge
# default AWS AMI
# ami: ami-05cafdf7c9f772ad2
# ami: ami-0083ee179c14acc6a
# user: ec2-user
# ubuntu
ami: ami-04e601abe3e1a910f
ami: ami-0083ee179c14acc6a
user: ubuntu
tenancy: null

loadgenerators:
count: 1
instance_type: c5.4xlarge
# default AWS AMI
# ami: ami-05cafdf7c9f772ad2
# ami: ami-0083ee179c14acc6a
# user: ec2-user
# ubuntu
ami: ami-04e601abe3e1a910f
ami: ami-0083ee179c14acc6a
user: ubuntu
tenancy: null

mc:
instance_type: c5.4xlarge
count: 1
# default AWS AMI
# ami: ami-05cafdf7c9f772ad2
# ami: ami-0083ee179c14acc6a
# user: ec2-user
# ubuntu
ami: ami-04e601abe3e1a910f
ami: ami-0083ee179c14acc6a
user: ubuntu
tenancy: null
6 changes: 3 additions & 3 deletions templates/hazelcast5-sql-ec2-tstore/inventory_plan.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,19 +21,19 @@ keypair:
nodes:
count: 1
instance_type: i3.xlarge
ami: ami-04505e74c0741db8d
ami: ami-034568121cfdea9c3
user: ubuntu
tenancy: null

loadgenerators:
count: 1
instance_type: c5.2xlarge
ami: ami-04505e74c0741db8d
ami: ami-034568121cfdea9c3
user: ubuntu
tenancy: null

mc:
instance_type: c5.4xlarge
count: 1
ami: ami-04505e74c0741db8d
ami: ami-034568121cfdea9c3
user: ubuntu
6 changes: 3 additions & 3 deletions templates/hazelcast5-sql-ec2/inventory_plan.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,19 +23,19 @@ keypair:
nodes:
count: 4
instance_type: c5.9xlarge
ami: ami-04e601abe3e1a910f
ami: ami-0083ee179c14acc6a
user: ubuntu
tenancy: null

loadgenerators:
count: 1
instance_type: c5.9xlarge
ami: ami-04e601abe3e1a910f
ami: ami-0083ee179c14acc6a
user: ubuntu
tenancy: null

mc:
instance_type: c5.4xlarge
count: 0
ami: ami-04e601abe3e1a910f
ami: ami-0083ee179c14acc6a
user: ubuntu
6 changes: 3 additions & 3 deletions templates/hazelcast5-sql-prunability-ec2/inventory_plan.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,19 +23,19 @@ keypair:
nodes:
count: 5
instance_type: c5.9xlarge
ami: ami-04e601abe3e1a910f
ami: ami-0083ee179c14acc6a
user: ubuntu
tenancy: null

loadgenerators:
count: 1
instance_type: c5.4xlarge
ami: ami-04e601abe3e1a910f
ami: ami-0083ee179c14acc6a
user: ubuntu
tenancy: null

mc:
instance_type: c5.4xlarge
count: 0
ami: ami-04e601abe3e1a910f
ami: ami-0083ee179c14acc6a
user: ubuntu
Loading