-
Notifications
You must be signed in to change notification settings - Fork 47
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Кислов Данил ИТМО ФИТиП HW6 #203
Changes from all commits
539a511
d1b578c
358cb52
027bb2e
d4290ec
86ef1b8
ede656b
9526d2f
509c89d
b36c845
efb1235
fdad055
90040e7
37d8300
100ef07
7690126
eb160dd
f1f9441
13e06b4
88a9bf1
87071b6
e529b8d
7da9134
57ef90b
088f7d8
f4d9204
1169831
b6e87d8
b24abec
82e5a8a
deaf6c0
bb9693a
bde8718
cbafa19
1d82a96
5ebcc2d
e301c0d
de11933
fc75fe3
b6b12a7
8865e46
4b307a4
3abbeb1
8098cf7
ac5066e
c1e7684
8aa31ca
c5d88ef
2625f87
ca8e3f7
c123bed
cba8894
2336b9d
b0b5320
b1b9f26
f288aef
6e6c1f0
40861fc
049c1e1
7cd7b19
59999cb
08c7d3a
6825310
947a1ea
d93a444
08a6cdc
c4f1e91
500c21d
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,8 @@ | ||
wrk.method = "GET" | ||
|
||
math.randomseed(os.time()) | ||
|
||
request = function() | ||
path = "/v0/entities?start=0" | ||
return wrk.format(nil, path) | ||
end |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,54 @@ | ||
./wrk http://localhost:8080 -s getrange.lua -d 1m -R 10000 -t 4 -c 64 -L | ||
Running 1m test @ http://localhost:8080 | ||
4 threads and 64 connections | ||
Thread calibration: mean lat.: 6892.544ms, rate sampling interval: 18874ms | ||
Thread calibration: mean lat.: 6873.429ms, rate sampling interval: 18907ms | ||
Thread calibration: mean lat.: 6961.834ms, rate sampling interval: 18907ms | ||
Thread calibration: mean lat.: 6934.528ms, rate sampling interval: 18825ms | ||
Thread Stats Avg Stdev Max +/- Stdev | ||
Latency 12.84s 87.18ms 12.97s 62.50% | ||
Req/Sec 0.00 0.00 0.00 100.00% | ||
Latency Distribution (HdrHistogram - Recorded Latency) | ||
50.000% 12.85s | ||
75.000% 12.89s | ||
90.000% 12.94s | ||
99.000% 12.98s | ||
99.900% 12.98s | ||
99.990% 12.98s | ||
99.999% 12.98s | ||
100.000% 12.98s | ||
|
||
Detailed Percentile spectrum: | ||
Value Percentile TotalCount 1/(1-Percentile) | ||
|
||
12656.639 0.000000 1 1.00 | ||
12681.215 0.100000 2 1.11 | ||
12795.903 0.200000 4 1.25 | ||
12828.671 0.300000 5 1.43 | ||
12853.247 0.400000 8 1.67 | ||
12853.247 0.500000 8 2.00 | ||
12877.823 0.550000 9 2.22 | ||
12886.015 0.600000 12 2.50 | ||
12886.015 0.650000 12 2.86 | ||
12886.015 0.700000 12 3.33 | ||
12886.015 0.750000 12 4.00 | ||
12894.207 0.775000 13 4.44 | ||
12894.207 0.800000 13 5.00 | ||
12943.359 0.825000 14 5.71 | ||
12943.359 0.850000 14 6.67 | ||
12943.359 0.875000 14 8.00 | ||
12959.743 0.887500 15 8.89 | ||
12959.743 0.900000 15 10.00 | ||
12959.743 0.912500 15 11.43 | ||
12959.743 0.925000 15 13.33 | ||
12959.743 0.937500 15 16.00 | ||
12976.127 0.943750 16 17.78 | ||
12976.127 1.000000 16 inf | ||
#[Mean = 12844.544, StdDeviation = 87.177] | ||
#[Max = 12967.936, Total count = 16] | ||
#[Buckets = 27, SubBuckets = 2048] | ||
---------------------------------------------------------- | ||
64 requests in 1.00m, 9.07GB read | ||
Socket errors: connect 0, read 0, write 0, timeout 1856 | ||
Requests/sec: 1.07 | ||
Transfer/sec: 154.59MB |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,64 @@ | ||
./wrk http://localhost:8080 -s getrange.lua -d 1m -R 10000 -t 4 -c 64 -L | ||
Running 1m test @ http://localhost:8080 | ||
4 threads and 64 connections | ||
Thread calibration: mean lat.: 9223372036854776.000ms, rate sampling interval: 10ms | ||
Thread calibration: mean lat.: 9223372036854776.000ms, rate sampling interval: 10ms | ||
Thread calibration: mean lat.: 9223372036854776.000ms, rate sampling interval: 10ms | ||
Thread calibration: mean lat.: 9223372036854776.000ms, rate sampling interval: 10ms | ||
Thread Stats Avg Stdev Max +/- Stdev | ||
Latency 34.19s 13.56s 0.92m 40.62% | ||
Req/Sec 0.34 6.14 200.00 99.68% | ||
Latency Distribution (HdrHistogram - Recorded Latency) | ||
50.000% 37.26s | ||
75.000% 41.68s | ||
90.000% 0.90m | ||
99.000% 0.92m | ||
99.900% 0.92m | ||
99.990% 0.92m | ||
99.999% 0.92m | ||
100.000% 0.92m | ||
|
||
Detailed Percentile spectrum: | ||
Value Percentile TotalCount 1/(1-Percentile) | ||
|
||
16318.463 0.000000 1 1.00 | ||
18317.311 0.100000 7 1.11 | ||
19333.119 0.200000 13 1.25 | ||
19464.191 0.300000 20 1.43 | ||
27508.735 0.400000 26 1.67 | ||
37257.215 0.500000 32 2.00 | ||
38305.791 0.550000 36 2.22 | ||
38567.935 0.600000 39 2.50 | ||
39288.831 0.650000 42 2.86 | ||
40108.031 0.700000 45 3.33 | ||
41680.895 0.750000 48 4.00 | ||
47579.135 0.775000 50 4.44 | ||
51085.311 0.800000 52 5.00 | ||
51544.063 0.825000 53 5.71 | ||
53051.391 0.850000 55 6.67 | ||
53084.159 0.875000 56 8.00 | ||
53870.591 0.887500 57 8.89 | ||
54263.807 0.900000 58 10.00 | ||
54329.343 0.912500 59 11.43 | ||
54558.719 0.925000 60 13.33 | ||
54558.719 0.937500 60 16.00 | ||
54657.023 0.943750 62 17.78 | ||
54657.023 0.950000 62 20.00 | ||
54657.023 0.956250 62 22.86 | ||
54657.023 0.962500 62 26.67 | ||
54657.023 0.968750 62 32.00 | ||
55050.239 0.971875 63 35.56 | ||
55050.239 0.975000 63 40.00 | ||
55050.239 0.978125 63 45.71 | ||
55050.239 0.981250 63 53.33 | ||
55050.239 0.984375 63 64.00 | ||
55181.311 0.985938 64 71.11 | ||
55181.311 1.000000 64 inf | ||
#[Mean = 34186.048, StdDeviation = 13561.120] | ||
#[Max = 55148.544, Total count = 64] | ||
#[Buckets = 27, SubBuckets = 2048] | ||
---------------------------------------------------------- | ||
64 requests in 1.00m, 9.07GB read | ||
Socket errors: connect 0, read 0, write 0, timeout 1840 | ||
Requests/sec: 1.06 | ||
Transfer/sec: 154.47MB |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,48 @@ | ||
# Отчёт о нагрузочном тестировании | ||
|
||
## Этап 6 | ||
|
||
* Тестирование производилось при 10000 RPS(GET) на 4 потока с одним 64 соединяниями. | ||
* Один инстанс базы данных | ||
* flushThresholdBytes 10Mb | ||
* База заполнена на 190 Mb всеми ключами от 0 до 100000. | ||
* Обработкой запросов занимется ThreadPoolExecutor с очередью на 100000 задач, | ||
пулом 24 потока | ||
* Для тестирования была использована утилита wrk2. | ||
* Для профилирования был использован async-profiler внутри IntelliJ IDEA | ||
|
||
### Скрипты | ||
|
||
* [getrange.lua](../scripts/getrange.lua) | ||
|
||
### Результаты | ||
|
||
[Вывод wrk2 для GET](getrange.txt) | ||
|
||
 | ||
|
||
#### Флеймграфы для GET запросов | ||
|
||
##### CPU | ||
|
||
 | ||
|
||
##### Allocations | ||
|
||
 | ||
|
||
### Вывод | ||
|
||
Изначальная реализация DAO не читала в оперативную память | ||
весь range, поэтому проблем с решением для этапа не возникло. | ||
Большая часть времени уходит на передачу данных; чтобы уменьшить latency | ||
можно попробовать добавить сжатие данных или как-то ускорить передачу | ||
данных | ||
|
||
Можно заметить, что довольно много памяти выделяется при отправке | ||
чанка, т.к. его содержание копируется в буфер. Это осознанное решение, | ||
если отправлять данные последовательно вызывая session.write на массивах байтов | ||
[уходит в 4-5 раз больше времени](getrange_sqeuntial_write.txt). | ||
Нагрузочное тестирование с wrk2 на данном этапе имеет мало смысла т.к. почти всё | ||
время уходит на запись 190Mb в сессию и для всех запросов уходит одинаковое время, | ||
приложил вывод только чтобы можно было посмотреть latency такого запроса. |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,43 @@ | ||
package ru.vk.itmo.test.kislovdanil.service; | ||
|
||
import one.nio.http.HttpSession; | ||
import ru.vk.itmo.test.kislovdanil.dao.Entry; | ||
|
||
import java.io.IOException; | ||
import java.lang.foreign.MemorySegment; | ||
import java.lang.foreign.ValueLayout; | ||
import java.nio.ByteBuffer; | ||
import java.nio.charset.StandardCharsets; | ||
|
||
public class ChunkTransformUtility { | ||
private static final byte[] CHUNK_SEPARATOR = "\r\n".getBytes(StandardCharsets.UTF_8); | ||
private static final byte[] KEY_VALUE_SEPARATOR = "\n".getBytes(StandardCharsets.UTF_8); | ||
public static final byte[] EMPTY_CONTENT = "0\r\n\r\n".getBytes(StandardCharsets.UTF_8); | ||
public static final byte[] HEADERS = """ | ||
HTTP/1.1 200 OK\r | ||
Transfer-Encoding: chunked\r | ||
\r | ||
""".getBytes(StandardCharsets.UTF_8); | ||
|
||
private ChunkTransformUtility() { | ||
|
||
} | ||
|
||
private static void writeFull(byte[] data, HttpSession session) throws IOException { | ||
session.write(data, 0, data.length); | ||
} | ||
|
||
public static void writeContent(Entry<MemorySegment> entry, HttpSession session) throws IOException { | ||
int entrySize = (int) (entry.key().byteSize() + entry.value().byteSize()) + KEY_VALUE_SEPARATOR.length; | ||
byte[] entrySizeHex = Long.toHexString(entrySize).getBytes(StandardCharsets.UTF_8); | ||
byte[] content = new byte[entrySize + entrySizeHex.length + CHUNK_SEPARATOR.length * 2]; | ||
ByteBuffer buffer = ByteBuffer.wrap(content); | ||
buffer.put(entrySizeHex) | ||
.put(CHUNK_SEPARATOR) | ||
.put(entry.key().toArray(ValueLayout.JAVA_BYTE)) | ||
.put(KEY_VALUE_SEPARATOR) | ||
.put(entry.value().toArray(ValueLayout.JAVA_BYTE)) | ||
.put(CHUNK_SEPARATOR); | ||
writeFull(content, session); | ||
} | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -19,6 +19,7 @@ | |
import java.lang.foreign.MemorySegment; | ||
import java.lang.foreign.ValueLayout; | ||
import java.nio.charset.StandardCharsets; | ||
import java.util.Iterator; | ||
import java.util.List; | ||
import java.util.concurrent.CompletableFuture; | ||
import java.util.concurrent.ExecutionException; | ||
|
@@ -30,8 +31,9 @@ public class DatabaseHttpServer extends HttpServer { | |
private final PersistentDao dao; | ||
private final Sharder sharder; | ||
private static final String ENTITY_ACCESS_URL = "/v0/entity"; | ||
private static final int CORE_POOL_SIZE = 12; | ||
private static final int MAX_POOL_SIZE = 12; | ||
private static final String ENTITIES_ACCESS_URL = "/v0/entities"; | ||
private static final int CORE_POOL_SIZE = 24; | ||
private static final int MAX_POOL_SIZE = 24; | ||
private static final int KEEP_ALIVE_TIME_MS = 50; | ||
private final ThreadPoolExecutor queryExecutor = new ThreadPoolExecutor(CORE_POOL_SIZE, MAX_POOL_SIZE, | ||
KEEP_ALIVE_TIME_MS, TimeUnit.MILLISECONDS, new LinkedBlockingStack<>()); | ||
|
@@ -110,7 +112,7 @@ public void handleEntityRequest(Request request, HttpSession session, | |
from = fromParam == null ? clusterSize : fromParam; | ||
acknowledge = acknowledgeParam == null ? from / 2 + 1 : acknowledgeParam; | ||
final boolean notProxy = notProxyParam != null && notProxyParam; | ||
if (acknowledge <= 0 || acknowledge > from || from > clusterSize) { | ||
if (acknowledge <= 0 || acknowledge > from || from > clusterSize || entityKey.isEmpty()) { | ||
sendResponse(new Response(Response.BAD_REQUEST, Response.EMPTY), session); | ||
} | ||
try { | ||
|
@@ -123,6 +125,37 @@ public void handleEntityRequest(Request request, HttpSession session, | |
} | ||
} | ||
|
||
private void handleEntitiesRequestTask(MemorySegment start, MemorySegment end, HttpSession session) { | ||
try { | ||
session.write(ChunkTransformUtility.HEADERS, 0, ChunkTransformUtility.HEADERS.length); | ||
for (Iterator<Entry<MemorySegment>> it = dao.get(start, end); it.hasNext(); ) { | ||
ChunkTransformUtility.writeContent(it.next(), session); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Так, а если в какой-то записи лежит очень большое value. Ты будешь его целиком в сокет запихивать? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Да, а какие варианты? Доставать куски из MemorySegment и их по очереди записывать чтобы не переполнить память? Настолько большой value в текущей логике, кажется, просто не получится записать в бд There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Поговорил с коллегами. Обработку данного случая, мы от вас требовать не будем. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. C Однако я считаю, что перебор от вас такое требовать) |
||
} | ||
session.write(ChunkTransformUtility.EMPTY_CONTENT, 0, ChunkTransformUtility.EMPTY_CONTENT.length); | ||
session.close(); | ||
} catch (IOException e) { | ||
throw new NetworkException(); | ||
} | ||
} | ||
|
||
@Path(ENTITIES_ACCESS_URL) | ||
public void handleEntitiesRequest(Request request, HttpSession session, | ||
@Param(value = "start", required = true) String start, | ||
@Param(value = "end") String end) { | ||
if (start.isBlank()) { | ||
sendResponse(new Response(Response.BAD_REQUEST, Response.EMPTY), session); | ||
} | ||
try { | ||
queryExecutor.execute(() -> handleEntitiesRequestTask(fromString(start), | ||
fromString(end), session)); | ||
} catch (RejectedExecutionException e) { | ||
sendResponse(new Response(Response.SERVICE_UNAVAILABLE, | ||
"Service temporary unavailable, retry later" | ||
.getBytes(StandardCharsets.UTF_8)), session); | ||
} | ||
|
||
} | ||
|
||
private Response putEntity(MemorySegment entityKey, byte[] entityValue) { | ||
dao.upsert(new BaseEntry<>(entityKey, MemorySegment.ofArray(entityValue), System.currentTimeMillis())); | ||
return new Response(Response.CREATED, Response.EMPTY); | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Непонятно поможет ли сжатие, так как тогда добавится время на компрессию и декомпрессию.
Из того, что я видел, обычно сжатие используют не для уменьшения latency, а для реализации холодного хранилища. В этом случае о latency как раз не думают, оно напротив увеличивается. Здесь оптимизируется занимаемое место.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
В целом да, хотя я не уверен что нет алгоритмов которые бы сжимали бы быстрее чем было бы передавать, просто гипотетическое предположение