> allConcepts, Table
/**
* select, then send buckets.
*/
- public static WorkerId sendBucket(Bucket bucket, WorkerInformation responsibleWorker) {
+ public static WriteFuture sendBucket(Bucket bucket, WorkerInformation responsibleWorker) {
responsibleWorker.awaitFreeJobQueue();
log.trace("Sending Bucket[{}] to {}", bucket.getId(), responsibleWorker.getId());
- responsibleWorker.send(new ImportBucket(bucket.getId().toString(), bucket));
+ return responsibleWorker.send(new ImportBucket(bucket.getId().toString(), bucket));
- return responsibleWorker.getId();
}
@SneakyThrows
@Override
public void addImport(Namespace namespace, InputStream inputStream) {
- handleImport(namespace, inputStream, false, datasetRegistry);
+ handleImport(namespace, inputStream, false);
}
@Override
diff --git a/backend/src/main/java/com/bakdata/conquery/models/auth/AuthorizationController.java b/backend/src/main/java/com/bakdata/conquery/models/auth/AuthorizationController.java
index 655e93da5b..72b3f186b2 100644
--- a/backend/src/main/java/com/bakdata/conquery/models/auth/AuthorizationController.java
+++ b/backend/src/main/java/com/bakdata/conquery/models/auth/AuthorizationController.java
@@ -34,11 +34,12 @@
import org.apache.shiro.SecurityUtils;
import org.apache.shiro.authc.pam.FirstSuccessfulStrategy;
import org.apache.shiro.authc.pam.ModularRealmAuthenticator;
+import org.apache.shiro.lang.util.LifecycleUtils;
import org.apache.shiro.mgt.DefaultSecurityManager;
import org.apache.shiro.realm.AuthorizingRealm;
import org.apache.shiro.realm.Realm;
-import org.apache.shiro.util.LifecycleUtils;
import org.glassfish.hk2.utilities.binding.AbstractBinder;
+import org.jetbrains.annotations.NotNull;
/**
* The central class for the initialization of authorization and authentication.
@@ -78,7 +79,7 @@ public final class AuthorizationController implements Managed {
@Getter
private DropwizardResourceConfig unprotectedAuthAdmin;
- public AuthorizationController(MetaStorage storage, ConqueryConfig config, Environment environment, AdminServlet adminServlet) {
+ public AuthorizationController(@NotNull MetaStorage storage, @NotNull ConqueryConfig config, @NotNull Environment environment, AdminServlet adminServlet) {
this.storage = storage;
this.config = config;
this.environment = environment;
diff --git a/backend/src/main/java/com/bakdata/conquery/models/auth/basic/LocalAuthenticationRealm.java b/backend/src/main/java/com/bakdata/conquery/models/auth/basic/LocalAuthenticationRealm.java
index ca4759999b..e015226e9b 100644
--- a/backend/src/main/java/com/bakdata/conquery/models/auth/basic/LocalAuthenticationRealm.java
+++ b/backend/src/main/java/com/bakdata/conquery/models/auth/basic/LocalAuthenticationRealm.java
@@ -41,8 +41,8 @@
import org.apache.shiro.authc.AuthenticationToken;
import org.apache.shiro.authc.CredentialsException;
import org.apache.shiro.authc.IncorrectCredentialsException;
+import org.apache.shiro.lang.util.Destroyable;
import org.apache.shiro.realm.AuthenticatingRealm;
-import org.apache.shiro.util.Destroyable;
/**
* This realm stores credentials in a local database ({@link XodusStore}). Upon
@@ -52,7 +52,7 @@
* authorization related user information that is saved in the
* {@link MetaStorage}. So adding or removing a user in this realm does
* not change the {@link MetaStorage}. {@link Conquery} interacts with
- * this realm using the Shiro frame work. However, endusers can interface it
+ * this realm using the Shiro framework. However, endusers can interface it
* through specific endpoints that are registerd by this realm.
*/
@Slf4j
diff --git a/backend/src/main/java/com/bakdata/conquery/models/config/ClusterConfig.java b/backend/src/main/java/com/bakdata/conquery/models/config/ClusterConfig.java
index f404aa1a48..e94c1ee3ce 100644
--- a/backend/src/main/java/com/bakdata/conquery/models/config/ClusterConfig.java
+++ b/backend/src/main/java/com/bakdata/conquery/models/config/ClusterConfig.java
@@ -1,16 +1,30 @@
package com.bakdata.conquery.models.config;
+import java.io.IOException;
import java.net.InetAddress;
+import java.net.InetSocketAddress;
import jakarta.validation.Valid;
+import jakarta.validation.constraints.Max;
import jakarta.validation.constraints.Min;
import jakarta.validation.constraints.NotNull;
+import com.bakdata.conquery.io.mina.ChunkingFilter;
+import com.bakdata.conquery.io.mina.JacksonProtocolDecoder;
+import com.bakdata.conquery.io.mina.JacksonProtocolEncoder;
+import com.bakdata.conquery.io.mina.MdcFilter;
+import com.bakdata.conquery.models.messages.network.NetworkMessage;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.databind.ObjectMapper;
import io.dropwizard.core.Configuration;
-import io.dropwizard.util.DataSize;
import io.dropwizard.util.Duration;
import io.dropwizard.validation.PortRange;
import lombok.Getter;
import lombok.Setter;
+import org.apache.mina.core.service.IoHandler;
+import org.apache.mina.filter.codec.ProtocolCodecFilter;
+import org.apache.mina.transport.socket.DefaultSocketSessionConfig;
+import org.apache.mina.transport.socket.nio.NioSocketAcceptor;
+import org.apache.mina.transport.socket.nio.NioSocketConnector;
@Getter
@Setter
@@ -22,7 +36,7 @@ public class ClusterConfig extends Configuration {
private InetAddress managerURL = InetAddress.getLoopbackAddress();
@Valid
@NotNull
- private MinaConfig mina = new MinaConfig();
+ private DefaultSocketSessionConfig mina = new DefaultSocketSessionConfig();
@Min(1)
private int entityBucketSize = 1000;
@@ -30,6 +44,24 @@ public class ClusterConfig extends Configuration {
private Duration heartbeatTimeout = Duration.minutes(1);
private Duration connectRetryTimeout = Duration.seconds(30);
+ /**
+ * Defines the maximum buffer size inclusive 4 bytes for a header. Objects larger than this size cannot be sent over the cluster.
+ *
+ * May only touch this for testing purposes.
+ */
+ @Max(Integer.MAX_VALUE - 4)
+ @Min(64) // not practical
+ private int maxIoBufferSizeBytes = Integer.MAX_VALUE - 4;
+
+ /**
+ * Defines the starting buffer allocation size. Larger can reduce reallocations, but can cause a greater memory demand.
+ *
+ * May only touch this for testing purposes.
+ */
+ @Max(Integer.MAX_VALUE - 4)
+ @Min(64) // Mina's default
+ private int initialIoBufferSizeBytes = 8192; // 8kb
+
/**
* @see com.bakdata.conquery.models.messages.namespaces.specific.CollectColumnValuesJob
*
@@ -44,27 +76,6 @@ public class ClusterConfig extends Configuration {
*/
private int networkSessionMaxQueueLength = 5;
- /**
- * {@link org.apache.mina.core.buffer.IoBuffer} size, that mina allocates.
- * We assume a pagesize of 4096 bytes == 4 kibibytes
- */
- @NotNull
- @Valid
- private DataSize messageChunkSize = DataSize.kibibytes(4);
-
- /**
- * How long the soft pool cleaner waits before reducing the pool size down to softPoolBaselineSize.
- */
- @NotNull
- @Valid
- private Duration softPoolCleanerPause = Duration.seconds(10);
-
- /**
- * The number of soft references the soft pool should retain after cleaning.
- * The actual number of {@link org.apache.mina.core.buffer.IoBuffer}
- */
- private long softPoolBaselineSize = 100;
-
/**
* Amount of backpressure before jobs can volunteer to block to send messages to their shards.
*
@@ -72,4 +83,56 @@ public class ClusterConfig extends Configuration {
*/
@Min(0)
private int backpressure = 1500;
+
+ @JsonIgnore
+ public NioSocketConnector getClusterConnector(ObjectMapper om, IoHandler ioHandler, String mdcLocation) {
+
+ final NioSocketConnector connector = new NioSocketConnector();
+
+ JacksonProtocolEncoder encoder = new JacksonProtocolEncoder(om.writerFor(NetworkMessage.class));
+ encoder.setMaxObjectSize(maxIoBufferSizeBytes);
+ encoder.setInitialBufferCapacityBytes(initialIoBufferSizeBytes);
+
+ ProtocolCodecFilter codecFilter = new ProtocolCodecFilter(
+ encoder,
+ new JacksonProtocolDecoder(om.readerFor(NetworkMessage.class))
+ );
+ connector.getFilterChain().addFirst("mdc", new MdcFilter(mdcLocation));
+ if (mina.getSendBufferSize() > 0) {
+ connector.getFilterChain().addLast("chunk", new ChunkingFilter(mina.getSendBufferSize()));
+ }
+ connector.getFilterChain().addLast("codec", codecFilter);
+
+ connector.setHandler(ioHandler);
+ connector.getSessionConfig().setAll(getMina());
+
+ return connector;
+ }
+
+ @JsonIgnore
+ public NioSocketAcceptor getClusterAcceptor(ObjectMapper om, IoHandler ioHandler, String mdcLocation) throws IOException {
+ NioSocketAcceptor acceptor = new NioSocketAcceptor();
+
+
+ JacksonProtocolEncoder encoder = new JacksonProtocolEncoder(om.writerFor(NetworkMessage.class));
+ encoder.setMaxObjectSize(maxIoBufferSizeBytes);
+ encoder.setInitialBufferCapacityBytes(initialIoBufferSizeBytes);
+
+ ProtocolCodecFilter codecFilter = new ProtocolCodecFilter(
+ encoder,
+ new JacksonProtocolDecoder(om.readerFor(NetworkMessage.class))
+ );
+
+ acceptor.getFilterChain().addFirst("mdc", new MdcFilter(mdcLocation));
+ if (mina.getSendBufferSize() > 0) {
+ acceptor.getFilterChain().addLast("chunk", new ChunkingFilter(mina.getSendBufferSize()));
+ }
+ acceptor.getFilterChain().addLast("codec", codecFilter);
+
+ acceptor.setHandler(ioHandler);
+ acceptor.getSessionConfig().setAll(getMina());
+ acceptor.bind(new InetSocketAddress(getPort()));
+
+ return acceptor;
+ }
}
diff --git a/backend/src/main/java/com/bakdata/conquery/models/config/XodusStoreFactory.java b/backend/src/main/java/com/bakdata/conquery/models/config/XodusStoreFactory.java
index 4488c38e11..236cf62019 100644
--- a/backend/src/main/java/com/bakdata/conquery/models/config/XodusStoreFactory.java
+++ b/backend/src/main/java/com/bakdata/conquery/models/config/XodusStoreFactory.java
@@ -221,7 +221,7 @@ public SingletonStore createIdMappingStore(String pathName, ObjectM
openStoresInEnv.put(bigStore.getDataXodusStore().getEnvironment(), bigStore.getDataXodusStore());
openStoresInEnv.put(bigStore.getMetaXodusStore().getEnvironment(), bigStore.getMetaXodusStore());
- return new SingletonStore<>(bigStore);
+ return new SingletonStore<>(new CachedStore<>(bigStore));
}
}
diff --git a/backend/src/main/java/com/bakdata/conquery/models/forms/util/DateContext.java b/backend/src/main/java/com/bakdata/conquery/models/forms/util/DateContext.java
index 1a1c1c2f82..96270441f3 100644
--- a/backend/src/main/java/com/bakdata/conquery/models/forms/util/DateContext.java
+++ b/backend/src/main/java/com/bakdata/conquery/models/forms/util/DateContext.java
@@ -96,7 +96,7 @@ public static Function> getDateRangeSubdivider(Align
int alignedPerResolution = resolution.getAmountForAlignment(alignment).orElseThrow(() -> new ConqueryError.ExecutionCreationPlanDateContextError(alignment, resolution));
if (alignedPerResolution == 1) {
- // When the alignment fits the resolution we can use the the alignment subdivision directly
+ // When the alignment fits the resolution we can use the alignment subdivision directly
return (dateRange) -> alignment.getSubdivider().apply(dateRange);
}
diff --git a/backend/src/main/java/com/bakdata/conquery/models/messages/namespaces/specific/CollectColumnValuesJob.java b/backend/src/main/java/com/bakdata/conquery/models/messages/namespaces/specific/CollectColumnValuesJob.java
index 95ca03ecf4..526be8e7b5 100644
--- a/backend/src/main/java/com/bakdata/conquery/models/messages/namespaces/specific/CollectColumnValuesJob.java
+++ b/backend/src/main/java/com/bakdata/conquery/models/messages/namespaces/specific/CollectColumnValuesJob.java
@@ -36,6 +36,7 @@
import lombok.Getter;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
+import org.apache.commons.lang3.concurrent.BasicThreadFactory;
import org.apache.mina.core.future.WriteFuture;
/**
@@ -67,7 +68,8 @@ public void react(Worker context) throws Exception {
final Map> table2Buckets = context.getStorage().getAllBuckets()
.collect(Collectors.groupingBy(Bucket::getTable));
- final ListeningExecutorService jobsExecutorService = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(MAX_THREADS));
+ BasicThreadFactory threadFactory = (new BasicThreadFactory.Builder()).namingPattern(this.getClass().getSimpleName() + "-Worker-%d").build();
+ final ListeningExecutorService jobsExecutorService = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(MAX_THREADS, threadFactory));
final AtomicInteger done = new AtomicInteger();
@@ -84,7 +86,7 @@ public void react(Worker context) throws Exception {
.flatMap(bucket -> ((StringStore) bucket.getStore(column)).streamValues())
.collect(Collectors.toSet());
- log.trace("Finished collections values for column {} as number {}", column, done.incrementAndGet());
+ log.trace("Finished collecting {} values for column {}", values.size(), column);
// Chunk values, to produce smaller messages
Iterable> partition = Iterables.partition(values, columValueChunkSize);
@@ -93,14 +95,18 @@ public void react(Worker context) throws Exception {
column.getId(), values.size(), columValueChunkSize
);
+ int i = 0;
for (List chunk : partition) {
// Send values to manager
RegisterColumnValues message =
new RegisterColumnValues(getMessageId(), context.getInfo().getId(), column.getId(), chunk);
WriteFuture send = context.send(message);
- send.awaitUninterruptibly();
+ log.trace("Finished sending chunk {} for column '{}'", i++, column.getId());
}
+
+ getProgressReporter().report(1);
+ log.trace("Finished collections values for column {} as number {}", column, done.incrementAndGet());
});
}
)
@@ -124,6 +130,7 @@ public void react(Worker context) throws Exception {
// We may do this, because we own this specific ExecutorService.
jobsExecutorService.shutdown();
+ getProgressReporter().done();
log.info("Finished collecting values from these columns: {}", Arrays.toString(columns.toArray()));
context.send(new FinalizeReactionMessage(getMessageId(), context.getInfo().getId()));
diff --git a/backend/src/main/java/com/bakdata/conquery/models/messages/namespaces/specific/UpdateMatchingStatsMessage.java b/backend/src/main/java/com/bakdata/conquery/models/messages/namespaces/specific/UpdateMatchingStatsMessage.java
index 94af290f66..c59b181ea2 100644
--- a/backend/src/main/java/com/bakdata/conquery/models/messages/namespaces/specific/UpdateMatchingStatsMessage.java
+++ b/backend/src/main/java/com/bakdata/conquery/models/messages/namespaces/specific/UpdateMatchingStatsMessage.java
@@ -80,7 +80,6 @@ public void execute() throws Exception {
calculateConceptMatches(resolved, matchingStats, worker);
final WriteFuture writeFuture = worker.send(new UpdateElementMatchingStats(worker.getInfo().getId(), matchingStats));
- writeFuture.awaitUninterruptibly();
progressReporter.report(1);
}, worker.getJobsExecutorService())
diff --git a/backend/src/main/java/com/bakdata/conquery/models/query/results/FormShardResult.java b/backend/src/main/java/com/bakdata/conquery/models/query/results/FormShardResult.java
index 4984115649..5404597fa7 100644
--- a/backend/src/main/java/com/bakdata/conquery/models/query/results/FormShardResult.java
+++ b/backend/src/main/java/com/bakdata/conquery/models/query/results/FormShardResult.java
@@ -27,7 +27,7 @@ public FormShardResult(ManagedExecutionId formId, ManagedExecutionId subQueryId,
* Distribute the result to a sub query.
*/
@Override
- public void addResult(DistributedExecutionManager executionManager) {
+ protected void addResult(DistributedExecutionManager executionManager) {
final ManagedInternalForm> managedInternalForm = (ManagedInternalForm>) executionManager.getExecution(getFormId());
final ManagedQuery subQuery = managedInternalForm.getSubQuery(getQueryId());
diff --git a/backend/src/main/java/com/bakdata/conquery/models/query/results/ShardResult.java b/backend/src/main/java/com/bakdata/conquery/models/query/results/ShardResult.java
index 8c5a192593..4c827857d6 100644
--- a/backend/src/main/java/com/bakdata/conquery/models/query/results/ShardResult.java
+++ b/backend/src/main/java/com/bakdata/conquery/models/query/results/ShardResult.java
@@ -82,7 +82,7 @@ public synchronized void finish(@NonNull List results, Optional> nodeProvider;
- public void addRoles(List roles) {
-
- for (Role role : roles) {
- try {
- addRole(role);
- }
- catch (Exception e) {
- log.error(String.format("Failed to add Role: %s", role), e);
- }
- }
- }
-
public synchronized void addRole(Role role) throws JSONException {
ValidatorHelper.failOnError(log, validator.validate(role));
log.trace("New role:\tLabel: {}\tName: {}\tId: {} ", role.getLabel(), role.getName(), role.getId());
@@ -296,7 +284,7 @@ public Object executeScript(String script) {
groovy.setProperty("managerNode", getManagerNode());
groovy.setProperty("datasetRegistry", getDatasetRegistry());
groovy.setProperty("jobManager", getJobManager());
- groovy.setProperty("config", getConfig());
+ groovy.setProperty("conqueryConfig", getConfig());
groovy.setProperty("storage", getStorage());
try {
diff --git a/backend/src/main/java/com/bakdata/conquery/tasks/PermissionCleanupTask.java b/backend/src/main/java/com/bakdata/conquery/tasks/PermissionCleanupTask.java
index 98c00a29ad..784e8f9620 100644
--- a/backend/src/main/java/com/bakdata/conquery/tasks/PermissionCleanupTask.java
+++ b/backend/src/main/java/com/bakdata/conquery/tasks/PermissionCleanupTask.java
@@ -133,7 +133,7 @@ public static & Owned, ID extends Id> int del
if (wpermission.getInstances().size() != 1) {
log.trace("Skipping permission {} because it refers to multiple instances.", wpermission);
}
- ID executionId = null;
+ ID executionId;
try {
executionId = idParser.parse(wpermission.getInstances().iterator().next());
}
@@ -144,14 +144,17 @@ public static & Owned, ID extends Id> int del
E execution = instanceStorageExtractor.apply(executionId);
if (execution == null) {
- log.trace("The execution referenced in permission {} does not exist. Skipping permission");
+ log.trace("The execution referenced in permission {} does not exist. Skipping permission", wpermission);
continue;
}
if (!user.isOwner(execution)) {
- log.trace("The user is not owner of the instance. Keeping the permission. User: {}, Owner: {}, Instance: {}, Permission: {}", user.getId(), execution
- .getOwner(), execution
- .getId(), wpermission);
+ log.trace("The user is not owner of the instance. Keeping the permission. User: {}, Owner: {}, Instance: {}, Permission: {}",
+ user.getId(),
+ execution.getOwner(),
+ execution.getId(),
+ wpermission
+ );
continue;
}
diff --git a/backend/src/main/java/com/bakdata/conquery/util/SoftPool.java b/backend/src/main/java/com/bakdata/conquery/util/SoftPool.java
deleted file mode 100644
index 9f58bd4d72..0000000000
--- a/backend/src/main/java/com/bakdata/conquery/util/SoftPool.java
+++ /dev/null
@@ -1,93 +0,0 @@
-package com.bakdata.conquery.util;
-
-import java.lang.ref.SoftReference;
-import java.util.concurrent.ConcurrentLinkedDeque;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.function.Supplier;
-
-import com.bakdata.conquery.models.config.ClusterConfig;
-import com.google.common.util.concurrent.Uninterruptibles;
-import lombok.extern.slf4j.Slf4j;
-
-@Slf4j
-public class SoftPool {
-
- private final ConcurrentLinkedDeque> pool = new ConcurrentLinkedDeque<>();
- private final AtomicLong poolSize = new AtomicLong(0);
- private final Supplier supplier;
- private final Thread poolCleaner;
- private final long softPoolBaselineSize;
- private final long cleanerPauseSeconds;
-
- public SoftPool(ClusterConfig config, Supplier supplier) {
- this.supplier = supplier;
-
- softPoolBaselineSize = config.getSoftPoolBaselineSize();
- cleanerPauseSeconds = config.getSoftPoolCleanerPause().toSeconds();
-
- if (softPoolBaselineSize <= 0 || cleanerPauseSeconds <= 0) {
- log.debug("Not creating a Cleaner.");
- poolCleaner = null;
- return;
- }
-
- poolCleaner = new Thread(this::cleanPool, "SoftPool Cleaner");
- // Should not prevent the JVM shutdown -> daemon
- poolCleaner.setDaemon(true);
- poolCleaner.start();
- }
-
- /**
- * Offer/return a reusable object to the pool.
- *
- * @param v the object to return to the pool.
- */
- public void offer(T v) {
- pool.addLast(new SoftReference<>(v));
-
- final long currentPoolSize = poolSize.incrementAndGet();
-
- log.trace("Pool size: {} (offer)", currentPoolSize);
- }
-
- /**
- * Returns a reusable element from the pool if available or
- * returns a new element from the provided supplier.
- */
- public T borrow() {
- SoftReference result;
-
- // First check the pool for available/returned elements
- while ((result = pool.poll()) != null) {
- final long currentPoolSize = poolSize.decrementAndGet();
-
- log.trace("Pool size: {} (borrow)", currentPoolSize);
-
- // The pool had an element, inspect if it is still valid
- final T elem = result.get();
- if (elem != null) {
- // Return valid element
- return elem;
- }
- // Referenced element was already garbage collected. Poll further
- }
- // Pool was empty -- request a new element
- return supplier.get();
- }
-
- /**
- * Trims the pool in a custom interval so that soft references get purged earlier
- */
- private void cleanPool() {
- while (true) {
- Uninterruptibles.sleepUninterruptibly(cleanerPauseSeconds, TimeUnit.SECONDS);
-
- log.trace("Running pool cleaner");
- while (poolSize.get() > softPoolBaselineSize) {
- // Poll until we reached the baseline
- borrow();
- }
- }
- }
-}
diff --git a/backend/src/test/java/com/bakdata/conquery/integration/sql/SqlStandaloneCommand.java b/backend/src/test/java/com/bakdata/conquery/integration/sql/SqlStandaloneCommand.java
index 6504e804a2..5b21e5491b 100644
--- a/backend/src/test/java/com/bakdata/conquery/integration/sql/SqlStandaloneCommand.java
+++ b/backend/src/test/java/com/bakdata/conquery/integration/sql/SqlStandaloneCommand.java
@@ -3,7 +3,6 @@
import java.util.Collections;
import java.util.List;
-import com.bakdata.conquery.Conquery;
import com.bakdata.conquery.commands.ManagerNode;
import com.bakdata.conquery.commands.ShardNode;
import com.bakdata.conquery.commands.StandaloneCommand;
@@ -12,9 +11,9 @@
import com.bakdata.conquery.mode.local.LocalManagerProvider;
import com.bakdata.conquery.models.config.ConqueryConfig;
import com.bakdata.conquery.models.worker.LocalNamespace;
+import com.bakdata.conquery.util.commands.NoOpConquery;
import com.bakdata.conquery.util.io.ConqueryMDC;
import io.dropwizard.core.cli.ServerCommand;
-import io.dropwizard.core.setup.Bootstrap;
import io.dropwizard.core.setup.Environment;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
@@ -24,27 +23,11 @@
@Getter
public class SqlStandaloneCommand extends ServerCommand implements StandaloneCommand {
- private final Conquery conquery;
- private ManagerNode managerNode = new ManagerNode();
+ private final ManagerNode managerNode = new ManagerNode();
private DelegateManager manager;
- private Environment environment;
- public SqlStandaloneCommand(Conquery conquery) {
- super(conquery, "standalone", "starts a sql server and a client at the same time.");
- this.conquery = conquery;
- }
-
- @Override
- public void startStandalone(Environment environment, Namespace namespace, ConqueryConfig config) throws Exception {
- ConqueryMDC.setLocation("ManagerNode");
- log.debug("Starting ManagerNode");
- this.manager = new LocalManagerProvider(new TestSqlDialectFactory()).provideManager(config, environment);
- this.conquery.setManagerNode(managerNode);
- this.conquery.run(manager);
- // starts the Jersey Server
- log.debug("Starting REST Server");
- ConqueryMDC.setLocation(null);
- super.run(environment, namespace, config);
+ public SqlStandaloneCommand() {
+ super(new NoOpConquery(), "standalone", "starts a sql server and a client at the same time.");
}
@Override
@@ -53,23 +36,14 @@ public List getShardNodes() {
}
@Override
- public void run(Bootstrap bootstrap, Namespace namespace, ConqueryConfig configuration) throws Exception {
- environment = new Environment(
- bootstrap.getApplication().getName(),
- bootstrap.getObjectMapper(),
- bootstrap.getValidatorFactory(),
- bootstrap.getMetricRegistry(),
- bootstrap.getClassLoader(),
- bootstrap.getHealthCheckRegistry(),
- configuration
- );
- configuration.getMetricsFactory().configure(environment.lifecycle(), bootstrap.getMetricRegistry());
- configuration.getServerFactory().configure(environment);
-
- bootstrap.run(configuration, environment);
- startStandalone(environment, namespace, configuration);
+ protected void run(Environment environment, Namespace namespace, ConqueryConfig configuration) throws Exception {
+ ConqueryMDC.setLocation("ManagerNode");
+ log.debug("Starting ManagerNode");
+ this.manager = new LocalManagerProvider(new TestSqlDialectFactory()).provideManager(configuration, environment);
+ managerNode.run(manager);
+ // starts the Jersey Server
+ log.debug("Starting REST Server");
+ ConqueryMDC.setLocation(null);
+ super.run(environment, namespace, configuration);
}
-
-
-
}
diff --git a/backend/src/test/java/com/bakdata/conquery/integration/tests/ScriptEndTest.java b/backend/src/test/java/com/bakdata/conquery/integration/tests/ScriptEndTest.java
new file mode 100644
index 0000000000..2aa4ff5f99
--- /dev/null
+++ b/backend/src/test/java/com/bakdata/conquery/integration/tests/ScriptEndTest.java
@@ -0,0 +1,30 @@
+package com.bakdata.conquery.integration.tests;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.net.URI;
+import jakarta.ws.rs.client.Entity;
+import jakarta.ws.rs.core.MediaType;
+import jakarta.ws.rs.core.Response;
+
+import com.bakdata.conquery.integration.IntegrationTest;
+import com.bakdata.conquery.io.storage.MetaStorage;
+import com.bakdata.conquery.resources.admin.rest.AdminResource;
+import com.bakdata.conquery.resources.hierarchies.HierarchyHelper;
+import com.bakdata.conquery.util.support.StandaloneSupport;
+
+public class ScriptEndTest extends IntegrationTest.Simple implements ProgrammaticIntegrationTest {
+ @Override
+ public void execute(StandaloneSupport conquery) throws Exception {
+ final URI scriptUri = HierarchyHelper.hierarchicalPath(conquery.defaultAdminURIBuilder()
+ , AdminResource.class, "executeScript")
+ .build();
+
+ try(Response resp = conquery.getClient().target(scriptUri).request(MediaType.TEXT_PLAIN_TYPE).post(Entity.entity("storage", MediaType.TEXT_PLAIN_TYPE))){
+ assertThat(resp.getStatusInfo().getFamily()).isEqualTo(Response.Status.Family.SUCCESSFUL);
+
+ assertThat(resp.readEntity(String.class))
+ .contains(MetaStorage.class.getSimpleName());
+ }
+ }
+}
diff --git a/backend/src/test/java/com/bakdata/conquery/io/mina/MinaStackTest.java b/backend/src/test/java/com/bakdata/conquery/io/mina/MinaStackTest.java
new file mode 100644
index 0000000000..4c7f35048e
--- /dev/null
+++ b/backend/src/test/java/com/bakdata/conquery/io/mina/MinaStackTest.java
@@ -0,0 +1,280 @@
+package com.bakdata.conquery.io.mina;
+
+import static java.lang.Math.toIntExact;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.fail;
+import static org.awaitility.Awaitility.await;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Stream;
+
+import com.bakdata.conquery.io.cps.CPSType;
+import com.bakdata.conquery.io.jackson.Jackson;
+import com.bakdata.conquery.models.config.ClusterConfig;
+import com.bakdata.conquery.models.messages.network.NetworkMessage;
+import com.bakdata.conquery.models.messages.network.NetworkMessageContext;
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import io.dropwizard.util.DataSize;
+import lombok.EqualsAndHashCode;
+import lombok.Getter;
+import lombok.RequiredArgsConstructor;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.mina.core.future.ConnectFuture;
+import org.apache.mina.core.future.WriteFuture;
+import org.apache.mina.core.service.IoHandlerAdapter;
+import org.apache.mina.core.session.IoSession;
+import org.apache.mina.transport.socket.nio.NioSocketAcceptor;
+import org.apache.mina.transport.socket.nio.NioSocketConnector;
+import org.assertj.core.api.Assertions;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
+
+@Slf4j
+public class MinaStackTest {
+
+ private static final ClusterConfig CLUSTER_CONFIG = new ClusterConfig();
+ private static final ObjectMapper OM = Jackson.BINARY_MAPPER.copy();
+ private static final ConcurrentLinkedQueue> SERVER_RECEIVED_MESSAGES = new ConcurrentLinkedQueue<>();
+
+ private static NioSocketAcceptor SERVER;
+
+ @BeforeAll
+ public static void beforeAll() throws IOException {
+
+ CLUSTER_CONFIG.setPort(0);
+ CLUSTER_CONFIG.setMaxIoBufferSizeBytes(toIntExact(DataSize.mebibytes(10).toBytes()));
+
+ // This enables the Chunking filter, which triggers for messages > 1 MebiByte
+ CLUSTER_CONFIG.getMina().setSendBufferSize(toIntExact(DataSize.mebibytes(1).toBytes()));
+
+ // Server
+ SERVER = CLUSTER_CONFIG.getClusterAcceptor(OM, new IoHandlerAdapter() {
+ @Override
+ public void sessionOpened(IoSession session) {
+ log.info("Session to {} established", session.getRemoteAddress());
+ }
+
+ @Override
+ public void messageReceived(IoSession session, Object message) {
+ SERVER_RECEIVED_MESSAGES.add((NetworkMessage>) message);
+ log.trace("Received {} messages", SERVER_RECEIVED_MESSAGES.size());
+ }
+
+ @Override
+ public void exceptionCaught(IoSession session, Throwable cause) {
+ fail("Server caught an Exception", cause);
+ }
+ }, "Server");
+
+ }
+
+ @BeforeEach
+ public void beforeEach() {
+ SERVER_RECEIVED_MESSAGES.clear();
+ }
+
+ @Test
+ void smokeTest() {
+
+ NioSocketConnector client = CLUSTER_CONFIG.getClusterConnector(OM, new IoHandlerAdapter() {
+ @Override
+ public void sessionOpened(IoSession session) {
+ log.info("Session to {} established", session.getRemoteAddress());
+ }
+ }, "Client");
+
+ try {
+
+ ConnectFuture connect = client.connect(SERVER.getLocalAddress());
+
+ connect.awaitUninterruptibly();
+ IoSession clientSession = connect.getSession();
+
+ NetworkMessage> input = new TestMessage(RandomStringUtils.randomAscii(1000));
+
+ WriteFuture write = clientSession.write(input);
+
+ write.awaitUninterruptibly();
+
+ await().atMost(1, TimeUnit.SECONDS).until(() -> !SERVER_RECEIVED_MESSAGES.isEmpty());
+ assertThat(SERVER_RECEIVED_MESSAGES).containsExactlyInAnyOrder(input);
+
+ clientSession.closeNow().awaitUninterruptibly();
+ }
+ finally {
+ client.dispose();
+
+ }
+ }
+
+ /**
+ * This test requires a little RAM because we hold the messages twice to compare sender and receiver payloads.
+ */
+ @Test
+ void concurrentWriting(){
+ final int clientCount = 20;
+ final int messagesPerClient = 500;
+ final int minMessageLength = toIntExact(DataSize.kibibytes(1).toBytes());
+ final int maxMessageLength = toIntExact(DataSize.kibibytes(100).toBytes());
+
+ final ConcurrentLinkedQueue> messagesWritten = new ConcurrentLinkedQueue<>();
+ final List> clientThreads = new ArrayList<>();
+
+ ExecutorService executorService = Executors.newFixedThreadPool(10);
+ try {
+ for (int clientI = 0; clientI < clientCount; clientI++) {
+ final int clientNumber = clientI;
+ CompletableFuture> clientThread = CompletableFuture.runAsync(() -> {
+ NioSocketConnector client = CLUSTER_CONFIG.getClusterConnector(OM, new IoHandlerAdapter() {
+ @Override
+ public void sessionOpened(IoSession session) {
+ log.info("Session to {} established", session.getRemoteAddress());
+ }
+
+ @Override
+ public void messageSent(IoSession session, Object message) {
+ log.trace("Message written: {} bytes", ((TestMessage)message).data.getBytes().length);
+ }
+
+ @Override
+ public void exceptionCaught(IoSession session, Throwable cause) {
+ fail("Client[%d] caught an Exception".formatted(clientNumber), cause);
+ }
+ }, "Client");
+ try {
+ // Connect
+ ConnectFuture connect = client.connect(SERVER.getLocalAddress());
+ connect.awaitUninterruptibly();
+ IoSession clientSession = connect.getSession();
+
+ for (int i = 0; i < messagesPerClient; i++) {
+ NetworkMessage> input = new TestMessage(RandomStringUtils.randomAscii(minMessageLength, maxMessageLength));
+
+ WriteFuture writeFuture = clientSession.write(input);
+ writeFuture.addListener((f) -> {
+ if (!((WriteFuture) f).isWritten()) {
+ fail("Failed to write a message");
+ }
+ messagesWritten.add(input);
+ });
+ writeFuture.awaitUninterruptibly();
+ }
+ }
+ finally {
+ client.dispose();
+ }
+ }, executorService);
+ clientThreads.add(clientThread);
+ }
+
+ // Wait until all clients completed writing
+ CompletableFuture.allOf(clientThreads.toArray(new CompletableFuture[0])).join();
+
+ log.info("Waiting to receive all send messages");
+ // Wait until all messages are received
+ await().atMost(10,TimeUnit.SECONDS).alias("Send and received same amount of messages").until(() -> SERVER_RECEIVED_MESSAGES.size() == messagesWritten.size());
+
+ // Check that the messages are correct
+ assertThat(SERVER_RECEIVED_MESSAGES).containsExactlyInAnyOrderElementsOf(messagesWritten);
+
+ }
+ finally {
+ executorService.shutdownNow();
+ }
+
+ }
+
+ private static Stream dataSizes() {
+ return Stream.of(
+ Arguments.of(DataSize.bytes(10), true),
+ Arguments.of(DataSize.kibibytes(10), true),
+ Arguments.of(DataSize.mebibytes(9), true), // Uses chunking
+ Arguments.of(DataSize.mebibytes(10), false) // Is too large for jackson encoder
+ );
+ }
+
+ @ParameterizedTest
+ @MethodSource("dataSizes")
+ void messageSizes(DataSize dataSize, boolean shouldPass) {
+ NioSocketConnector client = CLUSTER_CONFIG.getClusterConnector(OM, new IoHandlerAdapter() {
+ @Override
+ public void sessionOpened(IoSession session) {
+ log.info("Session to {} established", session.getRemoteAddress());
+ }
+
+ @Override
+ public void exceptionCaught(IoSession session, Throwable cause) {
+ log.trace("Failed to write message (probably expected)",cause);
+ }
+ }, "Client");
+
+ try {
+
+ ConnectFuture connect = client.connect(SERVER.getLocalAddress());
+
+ connect.awaitUninterruptibly();
+ IoSession clientSession = connect.getSession();
+
+ NetworkMessage> input = new TestMessage(RandomStringUtils.randomAscii(toIntExact(dataSize.toBytes())));
+
+ WriteFuture write = clientSession.write(input);
+
+ write.awaitUninterruptibly();
+
+ assertThat(write.isWritten())
+ .describedAs(() -> write.getException().getMessage())
+ .isEqualTo(shouldPass);
+
+ Assertions.setMaxStackTraceElementsDisplayed(200);
+ if (!shouldPass) {
+ assertThat(write.getException()).hasCauseInstanceOf(IllegalArgumentException.class);
+ }
+
+ clientSession.closeNow().awaitUninterruptibly();
+ }
+ finally {
+ client.dispose();
+
+ }
+ }
+
+ @AfterAll
+ public static void afterAll() {
+ SERVER.dispose();
+ }
+
+ public static class TestNetworkMessageContext extends NetworkMessageContext {
+
+ public TestNetworkMessageContext(NetworkSession session) {
+ super(session, 0);
+ }
+ }
+
+ @CPSType(id = "TEST_MSG", base = NetworkMessage.class)
+ @RequiredArgsConstructor(onConstructor_ = @JsonCreator)
+ @Getter
+ @EqualsAndHashCode(callSuper = false)
+ public static class TestMessage extends NetworkMessage {
+
+ private final String data;
+
+ @Override
+ public void react(TestNetworkMessageContext context) {
+ // Do nothing
+ }
+ }
+}
diff --git a/backend/src/test/java/com/bakdata/conquery/models/auth/LocalAuthRealmTest.java b/backend/src/test/java/com/bakdata/conquery/models/auth/LocalAuthRealmTest.java
index fa560e3c01..1c51ac9d9b 100644
--- a/backend/src/test/java/com/bakdata/conquery/models/auth/LocalAuthRealmTest.java
+++ b/backend/src/test/java/com/bakdata/conquery/models/auth/LocalAuthRealmTest.java
@@ -21,7 +21,7 @@
import org.apache.shiro.authc.BearerToken;
import org.apache.shiro.authc.CredentialsException;
import org.apache.shiro.authc.IncorrectCredentialsException;
-import org.apache.shiro.util.LifecycleUtils;
+import org.apache.shiro.lang.util.LifecycleUtils;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeAll;
diff --git a/backend/src/test/java/com/bakdata/conquery/util/support/TestConquery.java b/backend/src/test/java/com/bakdata/conquery/util/support/TestConquery.java
index 90b6f61be1..955cf7300a 100644
--- a/backend/src/test/java/com/bakdata/conquery/util/support/TestConquery.java
+++ b/backend/src/test/java/com/bakdata/conquery/util/support/TestConquery.java
@@ -16,7 +16,6 @@
import jakarta.ws.rs.client.Client;
import jakarta.ws.rs.core.UriBuilder;
-import com.bakdata.conquery.Conquery;
import com.bakdata.conquery.commands.DistributedStandaloneCommand;
import com.bakdata.conquery.commands.ShardNode;
import com.bakdata.conquery.commands.StandaloneCommand;
@@ -177,12 +176,12 @@ public void waitUntilWorkDone() {
if (Duration.ofNanos(System.nanoTime() - started).toSeconds() > 10) {
started = System.nanoTime();
- log.warn("waiting for done work for a long time", new Exception());
+ log.warn("Waiting for done work for a long time", new Exception("This Exception marks the stacktrace, to show where we are waiting."));
}
} while (true);
}
- log.trace("all jobs finished");
+ log.trace("All jobs finished");
}
public UriBuilder defaultAdminURIBuilder() {
@@ -230,10 +229,10 @@ public void beforeAll() throws Exception {
// define server
dropwizard = new DropwizardTestSupport<>(TestBootstrappingConquery.class, config, app -> {
if (config.getSqlConnectorConfig().isEnabled()) {
- standaloneCommand = new SqlStandaloneCommand((Conquery) app);
+ standaloneCommand = new SqlStandaloneCommand();
}
else {
- standaloneCommand = new DistributedStandaloneCommand((Conquery) app);
+ standaloneCommand = new DistributedStandaloneCommand();
}
return (Command) standaloneCommand;
});
diff --git a/pom.xml b/pom.xml
index 9329496399..935b4718d3 100644
--- a/pom.xml
+++ b/pom.xml
@@ -17,12 +17,12 @@
- 17
- 1.18.24
+ 21
+ 1.18.36
UTF-8
${java.required}
${java.required}
- 3.3.0
+ 3.8.1
yyyy-MM-dd'T'HH:mm:ssXXX
${maven.build.timestamp}
0.0.0-SNAPSHOT
|