Commit b99be67b authored by calocedre TAC's avatar calocedre TAC
Browse files

Merge branch 'feature/clea-batch/unit-tests' into 'develop'

Unit tests - configuration properties - code cleanup

See merge request stemcovid19/tac-server/backend-server!220
parents 4df8604a 1adcc866
......@@ -5,11 +5,6 @@ import lombok.experimental.UtilityClass;
@UtilityClass
public class BatchConstants {
public static final String EXPOSED_VISITS_TABLE = "exposed_visits";
public static final String SINGLE_PLACE_CLUSTER_PERIOD_TABLE = "cluster_periods";
public static final String PERIOD_COLUMN = "period_start";
public static final String TIMESLOT_COLUMN = "timeslot";
public static final String LTID_COL = "ltid";
public static final String VENUE_TYPE_COL = "venue_type";
public static final String VENUE_CAT1_COL = "venue_category1";
......@@ -25,4 +20,17 @@ public class BatchConstants {
public static final String PREFIXES_PARTITION_KEY = "prefixes";
public static final String LTIDS_LIST_PARTITION_KEY = "ltids";
// SQL properties
public static final String EXPOSED_VISITS_TABLE = "exposed_visits";
public static final String SINGLE_PLACE_CLUSTER_PERIOD_TABLE = "cluster_periods";
public static final String PERIOD_COLUMN = "period_start";
public static final String TIMESLOT_COLUMN = "timeslot";
// JDBC SQL Queries
public static final String SQL_SELECT_BY_LTID_IN_SINGLEPLACECLUSTERPERIOD = "select * from " + SINGLE_PLACE_CLUSTER_PERIOD_TABLE + " WHERE ltid= ?";
public static final String SQL_SELECT_DISTINCT_LTID_FROM_EXPOSEDVISITS = "select distinct " + LTID_COL + " from " + EXPOSED_VISITS_TABLE + " order by " + LTID_COL;
public static final String SQL_SELECT_DISTINCT_FROM_CLUSTERPERIODS_ORDERBY_LTID = "select distinct " + LTID_COL + " from " + SINGLE_PLACE_CLUSTER_PERIOD_TABLE + " ORDER BY " + LTID_COL;
public static final String SQL_SELECT_FROM_EXPOSEDVISITS_WHERE_LTID_ORDERBY_PERIOD_AND_TIMESLOT = "select * from " + EXPOSED_VISITS_TABLE + " WHERE ltid= ? ORDER BY " + PERIOD_COLUMN + ", " + TIMESLOT_COLUMN;
public static final String SQL_TRUNCATE_TABLE_CLUSTERPERIODS = "truncate table " + SINGLE_PLACE_CLUSTER_PERIOD_TABLE + ";";
}
......@@ -27,4 +27,6 @@ public class BatchProperties {
private int identificationStepChunkSize;
private int indexationStepChunkSize;
private int prefixesComputingStepChunkSize;
}
......@@ -22,15 +22,18 @@ public class ClusterIndexGenerationStepBatchConfig {
@Autowired
private BatchProperties batchProperties;
@Autowired
private ObjectMapper objectMapper;
@Bean
public Step clusterIndexGeneration(final ObjectMapper objectMapper) {
public Step clusterIndexGeneration() {
return stepBuilderFactory.get("clusterIndexGeneration")
.tasklet(generateClusterIndex(objectMapper))
.tasklet(generateClusterIndex())
.build();
}
@Bean
public Tasklet generateClusterIndex(final ObjectMapper objectMapper) {
public Tasklet generateClusterIndex() {
return new GenerateClusterIndexTasklet(batchProperties, prefixesStorageService, objectMapper);
}
}
......@@ -22,12 +22,12 @@ import org.springframework.context.annotation.Configuration;
import org.springframework.core.task.SimpleAsyncTaskExecutor;
import org.springframework.core.task.TaskExecutor;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.namedparam.NamedParameterJdbcTemplate;
import javax.sql.DataSource;
import java.util.List;
import static fr.gouv.clea.config.BatchConstants.EXPOSED_VISITS_TABLE;
import static fr.gouv.clea.config.BatchConstants.LTID_COL;
import static fr.gouv.clea.config.BatchConstants.SQL_SELECT_DISTINCT_LTID_FROM_EXPOSEDVISITS;
@Configuration
public class IdentificationStepBatchConfig {
......@@ -62,7 +62,7 @@ public class IdentificationStepBatchConfig {
.<String, List<SinglePlaceClusterPeriod>>chunk(properties.getIdentificationStepChunkSize())
.reader(reader)
.processor(compositeProcessor)
.writer(new SinglePlaceClusterPeriodListWriter(dataSource))
.writer(new SinglePlaceClusterPeriodListWriter(new NamedParameterJdbcTemplate(dataSource)))
.taskExecutor(taskExecutor())
.throttleLimit(20)
.build();
......@@ -73,7 +73,7 @@ public class IdentificationStepBatchConfig {
JdbcCursorItemReader<String> reader = new JdbcCursorItemReader<>();
reader.setDataSource(dataSource);
reader.setSql("select distinct " + LTID_COL + " from " + EXPOSED_VISITS_TABLE + " order by " + LTID_COL);
reader.setSql(SQL_SELECT_DISTINCT_LTID_FROM_EXPOSEDVISITS);
reader.setRowMapper((rs, i) -> rs.getString(1));
return reader;
}
......
......@@ -21,6 +21,7 @@ import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.task.SimpleAsyncTaskExecutor;
import org.springframework.jdbc.core.JdbcTemplate;
import javax.sql.DataSource;
import java.util.List;
......@@ -41,16 +42,19 @@ public class IndexationStepBatchConfig {
private BatchProperties properties;
@Autowired
private DataSource dataSource;
private ClusterPeriodModelsMapper mapper;
@Autowired
private ClusterPeriodModelsMapper mapper;
private ObjectMapper objectMapper;
@Autowired
private JdbcTemplate jdbcTemplate;
@Bean
public Step clustersIndexation(final ObjectMapper objectMapper) {
public Step clustersIndexation() {
return this.stepBuilderFactory.get("clustersIndexation")
.partitioner("partitioner", prefixPartitioner())
.partitionHandler(partitionHandler(objectMapper))
.partitionHandler(partitionHandler())
.build();
}
......@@ -61,16 +65,16 @@ public class IndexationStepBatchConfig {
}
@Bean
public TaskExecutorPartitionHandler partitionHandler(final ObjectMapper objectMapper) {
public TaskExecutorPartitionHandler partitionHandler() {
final TaskExecutorPartitionHandler partitionHandler = new TaskExecutorPartitionHandler();
partitionHandler.setGridSize(properties.getGridSize());
partitionHandler.setStep(partitionedClustersIndexation(objectMapper));
partitionHandler.setStep(partitionedClustersIndexation());
partitionHandler.setTaskExecutor(indexationTaskExecutor());
return partitionHandler;
}
@Bean
public Step partitionedClustersIndexation(final ObjectMapper objectMapper) {
public Step partitionedClustersIndexation() {
return stepBuilderFactory.get("partitionedClustersIndexation")
.<Map.Entry<String, List<String>>, ClusterFile>chunk(properties.getIndexationStepChunkSize())
.reader(memoryMapItemReader(null, null))
......@@ -90,7 +94,7 @@ public class IndexationStepBatchConfig {
@Bean
public ItemProcessor<Map.Entry<String, List<String>>, ClusterFile> singlePlaceClusterBuilder() {
return new SinglePlaceClusterBuilder(dataSource, mapper, properties);
return new SinglePlaceClusterBuilder(jdbcTemplate, mapper);
}
@Bean
......
......@@ -7,13 +7,11 @@ import org.springframework.batch.core.repository.support.JobRepositoryFactoryBea
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import java.io.IOException;
import javax.sql.DataSource;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Map;
import javax.sql.DataSource;
@Component
public class NoPersistenceBatchConfigurer extends DefaultBatchConfigurer {
......
......@@ -14,8 +14,7 @@ import org.springframework.context.annotation.Configuration;
import javax.sql.DataSource;
import java.util.List;
import static fr.gouv.clea.config.BatchConstants.LTID_COL;
import static fr.gouv.clea.config.BatchConstants.SINGLE_PLACE_CLUSTER_PERIOD_TABLE;
import static fr.gouv.clea.config.BatchConstants.SQL_SELECT_DISTINCT_FROM_CLUSTERPERIODS_ORDERBY_LTID;
@Configuration
public class PrefixesStepBatchConfig {
......@@ -35,7 +34,7 @@ public class PrefixesStepBatchConfig {
@Bean
public Step prefixesComputing() {
return stepBuilderFactory.get("prefixesComputing")
.<List<String>, List<String>>chunk(1000)
.<List<String>, List<String>>chunk(properties.getPrefixesComputingStepChunkSize())
.reader(ltidListDBReader())
.writer(new PrefixesMemoryWriter(properties, prefixesStorageService))
.build();
......@@ -52,7 +51,7 @@ public class PrefixesStepBatchConfig {
reader.setSaveState(false);
reader.setDataSource(dataSource);
reader.setVerifyCursorPosition(false);
reader.setSql("select distinct " + LTID_COL + " from " + SINGLE_PLACE_CLUSTER_PERIOD_TABLE + " ORDER BY " + LTID_COL);
reader.setSql(SQL_SELECT_DISTINCT_FROM_CLUSTERPERIODS_ORDERBY_LTID);
reader.setRowMapper((rs, i) -> rs.getString(1));
return new ListItemReader(reader);
}
......
......@@ -7,12 +7,9 @@ import org.springframework.batch.repeat.RepeatStatus;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.jdbc.core.JdbcOperations;
import org.springframework.jdbc.core.JdbcTemplate;
import javax.sql.DataSource;
import static fr.gouv.clea.config.BatchConstants.SINGLE_PLACE_CLUSTER_PERIOD_TABLE;
import static fr.gouv.clea.config.BatchConstants.SQL_TRUNCATE_TABLE_CLUSTERPERIODS;
@Configuration
public class PurgeIntermediateTableStepBatchConfig {
......@@ -20,17 +17,20 @@ public class PurgeIntermediateTableStepBatchConfig {
@Autowired
private StepBuilderFactory stepBuilderFactory;
@Autowired
private JdbcTemplate jdbcTemplate;
@Bean
public Step purgeIntermediateTable(final JdbcTemplate jdbcTemplate) {
public Step purgeIntermediateTable() {
return stepBuilderFactory.get("purgeIntermediateTable")
.tasklet(clearTable(jdbcTemplate))
.tasklet(clearTable())
.build();
}
@Bean
public Tasklet clearTable(final JdbcTemplate jdbcTemplate) {
public Tasklet clearTable() {
return (contribution, chunkContext) -> {
jdbcTemplate.execute("truncate table " + SINGLE_PLACE_CLUSTER_PERIOD_TABLE + ";");
jdbcTemplate.execute(SQL_TRUNCATE_TABLE_CLUSTERPERIODS);
return RepeatStatus.FINISHED;
};
}
......
......@@ -11,12 +11,11 @@ import org.springframework.batch.item.ExecutionContext;
import org.springframework.batch.item.ItemProcessor;
import org.springframework.jdbc.core.JdbcTemplate;
import javax.sql.DataSource;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicLong;
import static fr.gouv.clea.config.BatchConstants.*;
import static fr.gouv.clea.config.BatchConstants.SQL_SELECT_FROM_EXPOSEDVISITS_WHERE_LTID_ORDERBY_PERIOD_AND_TIMESLOT;
/**
* This class is executing in many Threads
......@@ -38,8 +37,7 @@ public class SinglePlaceExposedVisitsBuilder implements ItemProcessor<String, Si
@Override
public SinglePlaceExposedVisits process(final String ltid) {
final List<ExposedVisit> list = jdbcTemplate.query("select * from " + EXPOSED_VISITS_TABLE
+ " WHERE ltid= ? ORDER BY " + PERIOD_COLUMN + ", " + TIMESLOT_COLUMN,
final List<ExposedVisit> list = jdbcTemplate.query(SQL_SELECT_FROM_EXPOSEDVISITS_WHERE_LTID_ORDERBY_PERIOD_AND_TIMESLOT,
rowMapper, UUID.fromString(ltid));
ExposedVisit firstExposedVisit = list.stream().findFirst().orElse(null);
if (null != firstExposedVisit) {
......
package fr.gouv.clea.identification.writer;
import fr.gouv.clea.dto.SinglePlaceClusterPeriod;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.batch.item.ItemWriter;
import org.springframework.jdbc.core.namedparam.NamedParameterJdbcOperations;
import org.springframework.jdbc.core.namedparam.NamedParameterJdbcTemplate;
import org.springframework.jdbc.core.namedparam.SqlParameterSource;
import org.springframework.jdbc.core.namedparam.SqlParameterSourceUtils;
import javax.sql.DataSource;
import java.util.List;
import java.util.stream.Collectors;
import static fr.gouv.clea.config.BatchConstants.*;
@Slf4j
@RequiredArgsConstructor
public class SinglePlaceClusterPeriodListWriter implements ItemWriter<List<SinglePlaceClusterPeriod>> {
private final NamedParameterJdbcOperations jdbcTemplate;
public SinglePlaceClusterPeriodListWriter(DataSource datasource) {
this.jdbcTemplate = new NamedParameterJdbcTemplate(datasource);
}
@Override
public void write(List<? extends List<SinglePlaceClusterPeriod>> lists) {
final List<SinglePlaceClusterPeriod> flatList = lists.stream().flatMap(List::stream).collect(Collectors.toList());
......
......@@ -74,12 +74,13 @@ public class IndexationPartitioner implements Partitioner {
}
private void dispatchRemainingEntries(final Iterator<Map.Entry<String, List<String>>> prefixLtidsMapIterator, final Map<String, ExecutionContext> result) {
final int partitionIndex = 0;
int partitionIndex = 0;
while (prefixLtidsMapIterator.hasNext()) {
final Map.Entry<String, List<String>> currentValue = prefixLtidsMapIterator.next();
final ExecutionContext executionContext = result.get("partition-" + partitionIndex);
((List<String>) executionContext.get(PREFIXES_PARTITION_KEY)).add(currentValue.getKey());
((List<List<String>>) executionContext.get(LTIDS_LIST_PARTITION_KEY)).add(currentValue.getValue());
partitionIndex++;
}
}
}
package fr.gouv.clea.indexation.index;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.SerializationFeature;
import fr.gouv.clea.config.BatchProperties;
import fr.gouv.clea.indexation.model.output.ClusterFileIndex;
import fr.gouv.clea.service.PrefixesStorageService;
......@@ -46,7 +45,7 @@ public class GenerateClusterIndexTasklet implements Tasklet {
return RepeatStatus.FINISHED;
}
private void generateClusterIndex(final Long jobId, final Set<String> prefixes) throws IOException {
void generateClusterIndex(final Long jobId, final Set<String> prefixes) throws IOException {
ClusterFileIndex clusterFileIndex = ClusterFileIndex.builder()
.iteration(jobId.intValue())
......@@ -55,9 +54,8 @@ public class GenerateClusterIndexTasklet implements Tasklet {
log.info("Generating cluster index : " + outputPath + File.separator + CLUSTER_INDEX_FILENAME);
Path jsonPath = Paths.get(outputPath + File.separator + CLUSTER_INDEX_FILENAME);
Path jsonPath = Path.of(outputPath, CLUSTER_INDEX_FILENAME);
File jsonIndex = jsonPath.toFile();
objectMapper.enable(SerializationFeature.INDENT_OUTPUT);
objectMapper.writeValue(jsonIndex, clusterFileIndex);
}
}
package fr.gouv.clea.indexation.processor;
import fr.gouv.clea.config.BatchProperties;
import fr.gouv.clea.dto.ClusterPeriod;
import fr.gouv.clea.dto.SinglePlaceCluster;
import fr.gouv.clea.dto.SinglePlaceClusterPeriod;
import fr.gouv.clea.indexation.SinglePlaceClusterPeriodRowMapper;
import fr.gouv.clea.indexation.model.output.ClusterFile;
import fr.gouv.clea.indexation.model.output.ClusterFileItem;
import fr.gouv.clea.indexation.model.output.Prefix;
import fr.gouv.clea.mapper.ClusterPeriodModelsMapper;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.batch.item.ItemProcessor;
import org.springframework.jdbc.core.JdbcTemplate;
import javax.sql.DataSource;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import static fr.gouv.clea.config.BatchConstants.SINGLE_PLACE_CLUSTER_PERIOD_TABLE;
import static fr.gouv.clea.config.BatchConstants.SQL_SELECT_BY_LTID_IN_SINGLEPLACECLUSTERPERIOD;
@Slf4j
@RequiredArgsConstructor
public class SinglePlaceClusterBuilder implements ItemProcessor<Map.Entry<String, List<String>>, ClusterFile> {
private final JdbcTemplate jdbcTemplate;
private final ClusterPeriodModelsMapper mapper;
private final BatchProperties properties;
public SinglePlaceClusterBuilder(
final DataSource dataSource,
final ClusterPeriodModelsMapper mapper,
final BatchProperties properties) {
jdbcTemplate = new JdbcTemplate(dataSource);
this.mapper = mapper;
this.properties = properties;
}
@Override
public ClusterFile process(final Map.Entry<String, List<String>> prefixLtidsEntry) {
log.debug("Processing prefix {} files...", prefixLtidsEntry.getKey());
ClusterFile clusterFile = new ClusterFile();
clusterFile.setName(Prefix.of(prefixLtidsEntry.getValue().get(0), properties.getStaticPrefixLength()));
clusterFile.setName(prefixLtidsEntry.getKey());
prefixLtidsEntry.getValue().forEach(createClusterFile(clusterFile));
return clusterFile;
......@@ -61,7 +50,7 @@ public class SinglePlaceClusterBuilder implements ItemProcessor<Map.Entry<String
};
}
private ClusterFileItem createClusterFileItem(SinglePlaceClusterPeriod firstPeriod, List<ClusterPeriod> clusterPeriods) {
ClusterFileItem createClusterFileItem(SinglePlaceClusterPeriod firstPeriod, List<ClusterPeriod> clusterPeriods) {
return ClusterFileItem.ofCluster(SinglePlaceCluster.builder()
.locationTemporaryPublicId(firstPeriod.getLocationTemporaryPublicId())
.venueCategory1(firstPeriod.getVenueCategory1())
......@@ -71,9 +60,8 @@ public class SinglePlaceClusterBuilder implements ItemProcessor<Map.Entry<String
.build());
}
private List<SinglePlaceClusterPeriod> getSinglePlaceClusterPeriods(final String ltid) {
return jdbcTemplate.query("select * from " + SINGLE_PLACE_CLUSTER_PERIOD_TABLE + " WHERE ltid= ?",
new SinglePlaceClusterPeriodRowMapper(), UUID.fromString(ltid));
List<SinglePlaceClusterPeriod> getSinglePlaceClusterPeriods(final String ltid) {
return jdbcTemplate.query(SQL_SELECT_BY_LTID_IN_SINGLEPLACECLUSTERPERIOD, new SinglePlaceClusterPeriodRowMapper(), UUID.fromString(ltid));
}
private List<ClusterPeriod> buildClusterPeriods(final List<SinglePlaceClusterPeriod> clusterPeriodList) {
......
package fr.gouv.clea.indexation.reader;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.springframework.batch.item.ItemReader;
......@@ -10,6 +11,7 @@ import java.util.Map;
@Slf4j
public class StepExecutionContextReader implements ItemReader<Map.Entry<String, List<String>>> {
@Getter
private int index = 0;
private final List<String> prefixes;
......@@ -24,9 +26,9 @@ public class StepExecutionContextReader implements ItemReader<Map.Entry<String,
@Override
public Map.Entry<String, List<String>> read() {
if (!prefixes.isEmpty() && index < prefixes.size()) {
AbstractMap.SimpleEntry<String, List<String>> singleMap = new AbstractMap.SimpleEntry<>(prefixes.get(index), ltids.get(index));
AbstractMap.SimpleEntry<String, List<String>> mapEntry = new AbstractMap.SimpleEntry<>(prefixes.get(index), ltids.get(index));
index++;
return singleMap;
return mapEntry;
} else {
return null;
}
......
package fr.gouv.clea.indexation.writer;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.SerializationFeature;
import fr.gouv.clea.config.BatchProperties;
import fr.gouv.clea.indexation.model.output.ClusterFile;
import lombok.extern.slf4j.Slf4j;
......@@ -14,7 +13,6 @@ import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;
import static fr.gouv.clea.config.BatchConstants.JSON_FILE_EXTENSION;
......@@ -48,16 +46,14 @@ public class IndexationWriter implements ItemWriter<ClusterFile> {
log.info("Creating directories if not exists: {}", jobDirectoryOutputPath);
Files.createDirectories(jobDirectoryOutputPath);
clusterFile.forEach(clusterFile1 -> generateClusterFile(clusterFile1, jobDirectoryOutputPath));
}
private void generateClusterFile(final ClusterFile clusterFile, final Path directoryOutputPath) {
void generateClusterFile(final ClusterFile clusterFile, final Path directoryOutputPath) {
final Path jsonClusterPath = Path.of(directoryOutputPath.toString(), clusterFile.getName()+JSON_FILE_EXTENSION);
final Path jsonClusterPath = Path.of(directoryOutputPath.toString(), clusterFile.getName() + JSON_FILE_EXTENSION);
log.debug("Generating cluster file : {}", jsonClusterPath);
File jsonClusterFile = jsonClusterPath.toFile();
objectMapper.enable(SerializationFeature.INDENT_OUTPUT);
try {
objectMapper.writeValue(jsonClusterFile, clusterFile.getItems());
} catch (IOException e) {
......@@ -65,5 +61,4 @@ public class IndexationWriter implements ItemWriter<ClusterFile> {
e.printStackTrace();
}
}
}
......@@ -6,7 +6,6 @@ spring:
jpa:
hibernate.ddl-auto: none
database-platform: org.hibernate.dialect.PostgreSQL95Dialect # even with h2 database
batch:
# Manage it's batch metadata tables
initialize-schema: never
......@@ -17,6 +16,9 @@ spring:
enabled: "false"
baseline-on-migrate: "true"
locations: classpath:db/migration/{vendor}
jackson:
serialization:
indent-output: true
clea:
batch:
......@@ -24,7 +26,7 @@ clea:
duration-unit-in-seconds: 1800
static-prefix-length: ${CLEA_BATCH_PREFIX_LENGTH:2}
files-output-path: ${CLEA_BATCH_OUTPUT_PATH:/tmp/v1}
#TODO: totally arbitrary values, find out actual plausible values
grid-size: 6
identification-step-chunk-size: 1000
indexation-step-chunk-size: 1000
\ No newline at end of file
indexation-step-chunk-size: 1000
prefixes-computing-step-chunk-size: 1000
\ No newline at end of file
......@@ -18,6 +18,7 @@ import java.util.List;
import java.util.Random;
import java.util.UUID;
@Disabled("for local development purpose")
@SpringBootTest
//@ExtendWith(SpringExtension.class)
//@RunWith(SpringRunner.class)
......@@ -43,8 +44,7 @@ public class ExposedVisitGenerator {
// }
@Test
@Disabled("for local development purpose")
public void fillRandomVisits() {
void fillRandomVisits() {
// hour of now : 3826008000
// hour of 21-01-01 : 3818448000
// diff: 7560000
......@@ -57,9 +57,9 @@ public class ExposedVisitGenerator {
final long janv21 = 3818448000l;
JdbcTemplate jdbcTemplate = new JdbcTemplate(ds);
log.info("Starting to fill EXPOSED_VISITS...");
for (int l = 0; l < NB_LOCATIONS; l++) {
UUID lieu = UUID.randomUUID();
int venueType = r.nextInt(18)+1; // 1 to 18
......@@ -108,7 +108,7 @@ public class ExposedVisitGenerator {
}
});
//@formatter:on
}
log.info("Nb records in EXPOSED_VISITS: " + jdbcTemplate.queryForObject("select count(*) from EXPOSED_VISITS", Integer.class));
......
......@@ -5,12 +5,13 @@ import fr.gouv.clea.dto.SinglePlaceCluster;
import fr.gouv.clea.dto.SinglePlaceClusterPeriod;
import fr.gouv.clea.mapper.ClusterPeriodModelsMapper;
import fr.gouv.clea.mapper.ClusterPeriodModelsMapperImpl;
import org.assertj.core.api.Assertions;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.DisplayNameGeneration;
import org.junit.jupiter.api.DisplayNameGenerator;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.junit.jupiter.MockitoExtension;
import org.springframework.boot.autoconfigure.data.redis.RedisProperties;
import java.util.List;
import java.util.UUID;
......@@ -37,11 +38,12 @@ class ClusterToPeriodsProcessorTest {
final int lastTimeSlot = 1001;
final int riskLevel = 4;
final long periodStart = 1L;
final ClusterPeriod clusterPeriod = buildPeriod(clusterStart, clusterDurationInSeconds, firstTimeSlot, lastTimeSlot, riskLevel, periodStart);
final SinglePlaceCluster cluster = buildCluster(ltid, venueType, venueCat1, venueCat2, clusterPeriod);
final ClusterPeriod clusterPeriod = new ClusterPeriod(periodStart, firstTimeSlot, lastTimeSlot, clusterStart, clusterDurationInSeconds, riskLevel);