1
1
mirror of https://github.com/MarginaliaSearch/MarginaliaSearch.git synced 2025-10-05 21:22:39 +02:00

Compare commits

...

272 Commits

Author SHA1 Message Date
Viktor Lofgren
bc49406881 (build) Compatibility hack debian server 2025-08-11 23:26:53 +02:00
Viktor Lofgren
90325be447 (minor) Fix comments 2025-08-11 23:19:53 +02:00
Viktor Lofgren
dc89587af3 (index) Improve disk locality of the positions data 2025-08-11 21:17:12 +02:00
Viktor Lofgren
7b552afd6b (index) Improve disk locality of the positions data 2025-08-11 20:59:11 +02:00
Viktor Lofgren
73557edc67 (index) Improve disk locality of the positions data 2025-08-11 20:57:32 +02:00
Viktor Lofgren
83919e448a (index) Use O_DIRECT buffered reads for spans 2025-08-11 18:04:25 +02:00
Viktor Lofgren
6f5b75b84d (cleanup) Remove accidentally committed print stmt 2025-08-11 18:04:25 +02:00
Viktor Lofgren
db315e2813 (index) Use O_DIRECT position reads 2025-08-11 18:04:25 +02:00
Viktor Lofgren
e9977e08b7 (index) Block-align positions data
This will make reads more efficient, and possibly pave way for O_DIRECT reads of this data
2025-08-11 14:36:45 +02:00
Viktor Lofgren
1df3757e5f (native) Clean up io_uring code and check in execution queue, currently unused but nifty 2025-08-11 13:54:05 +02:00
Viktor Lofgren
ca283f9684 (native) Clean up native helpers and break them into their own library 2025-08-10 20:55:34 +02:00
Viktor Lofgren
85360e61b2 (index) Grow span writer buffer size
Apparently outlier spans can grow considerably large.
2025-08-10 17:20:38 +02:00
Viktor Lofgren
e2ccff21bc (index) Wait until ranking is finished in query execution 2025-08-09 23:40:30 +02:00
Viktor Lofgren
c5b5b0c699 (index) Permit fast termination of rejection filter execution 2025-08-09 23:36:59 +02:00
Viktor Lofgren
9a65946e22 (uring) Reduce queue size to 2048 to avoid ENOMEM on systems with default ulimits 2025-08-09 20:41:24 +02:00
Viktor Lofgren
1d2ab21e27 (index) Aggregate termdata reads into a single io_uring operation instead of one for each term 2025-08-09 17:43:18 +02:00
Viktor Lofgren
0610cc19ad (index) Fix double close errors 2025-08-09 17:05:38 +02:00
Viktor Lofgren
a676306a7f (skiplist) Fix bugs in seek operations 2025-08-09 17:00:27 +02:00
Viktor Lofgren
8d68cd14fb (skiplist) Even more aggressive forward pointers 2025-08-09 16:11:41 +02:00
Viktor Lofgren
4773c5a52b (index) Backport some changes made during performance evaluations 2025-08-09 15:19:41 +02:00
Viktor Lofgren
74bd562ae4 (index) Move I/O to separate threads to hopefully reduce contention a bit 2025-08-09 15:19:41 +02:00
Viktor Lofgren
c9751287b0 (index) Boost the buffer size used in PrioIndexEntrySource 2025-08-09 01:46:12 +02:00
Viktor Lofgren
5da24e3fc4 (index) Segregate full and priority query ranking 2025-08-09 00:39:31 +02:00
Viktor Lofgren
20a4e86eec (index) Use a confined arena in IndexResultRankingService 2025-08-08 22:08:35 +02:00
Viktor Lofgren
477a184948 (experiment) Allow early termination of include conditions in lookups 2025-08-08 19:12:54 +02:00
Viktor Lofgren
8940ce99db (perf) More statistics in perf testi 2025-08-08 18:57:25 +02:00
Viktor Lofgren
0ac0fa4dca (perf) More statistics in perf testi 2025-08-08 18:56:17 +02:00
Viktor Lofgren
942f15ef14 (skiplist) Use a linear-quadratic forward pointer scheme instead of an exponential 2025-08-08 16:57:15 +02:00
Viktor Lofgren
f668f33d5b (index) Tweaks and optimizations 2025-08-08 15:32:23 +02:00
Viktor Lofgren
6789975cd2 (index) Tweaks and optimizations 2025-08-08 15:30:48 +02:00
Viktor Lofgren
c3ba608776 (index) Split up evaluation tasks 2025-08-08 15:20:33 +02:00
Viktor Lofgren
733d2687fe (skiplist) Roll back the design change that segregated the values associated with documents into a separate file 2025-08-08 14:45:11 +02:00
Viktor Lofgren
f6daac8ed0 (index) MADVISE_RANDOM the index btrees 2025-08-07 21:14:28 +02:00
Viktor Lofgren
c2eeee4a06 (uring) Disable result set combination 2025-08-07 21:13:30 +02:00
Viktor Lofgren
3b0c701df4 (uring) Update uring timeout threshold 2025-08-07 20:13:25 +02:00
Viktor Lofgren
c6fb2db43b (index) Use a more SLA-aware execution scheduler 2025-08-07 20:13:15 +02:00
Viktor Lofgren
9bc8fe05ae (skiplist) Clean up search logic 2025-08-07 19:35:25 +02:00
Viktor Lofgren
440ffcf6f8 (skiplist) Fix bug in intersection-like algorithms 2025-08-07 02:18:14 +02:00
Viktor Lofgren
b07709cc72 (native) Disable expensive debug checks from uring code 2025-08-06 21:05:28 +02:00
Viktor Lofgren
9a6acdcbe0 (skiplist) Tag slow fuzz test as "slow" 2025-08-06 20:59:52 +02:00
Viktor Lofgren
23b9b0bf1b (index) Parametrize skip list block size and buffer pool sizes 2025-08-06 20:59:33 +02:00
Viktor Lofgren
749c8ed954 (pool) Correct buffer pool alignment 2025-08-06 20:56:34 +02:00
Viktor Lofgren
9f4b6939ca (skiplist) Fix condition for truncated block writing 2025-08-06 16:25:53 +02:00
Viktor Lofgren
1d08e44e8d (uring) Fadvise random access for uring buffered reads 2025-08-06 15:54:24 +02:00
Viktor Lofgren
fc2e156e78 (skiplist) Ensure docs file is a multiple BLOCK_SIZE bytes 2025-08-06 15:13:32 +02:00
Viktor Lofgren
5e68a89e9f (index) Improve error handling 2025-08-06 15:05:16 +02:00
Viktor Lofgren
d380661307 (index) Improve error handling 2025-08-06 14:31:06 +02:00
Viktor Lofgren
cccdf5c329 (pool) Check interrupt status in PoolLru's reclamation thread 2025-08-06 13:26:00 +02:00
Viktor Lofgren
f085b4ea12 (skiplist) Fix tests 2025-08-06 13:24:14 +02:00
Viktor Lofgren
e208f7d3ba (skiplist) Code clean up an added validation 2025-08-06 12:55:04 +02:00
Viktor Lofgren
b577085cb2 (pool) Use one contiguous memory allocation to encourage a HugePage allocation and reduce TLB thrashing 2025-08-06 12:49:46 +02:00
Viktor Lofgren
b9240476f6 (pool) Use one contiguous memory allocation to encourage a HugePage allocation and reduce TLB thrashing 2025-08-06 12:48:14 +02:00
Viktor Lofgren
8f50f86d0b (index) Fix error handling 2025-08-05 22:19:23 +02:00
Viktor Lofgren
e3b7ead7a9 (skiplist) Fix aggessive forward pointering 2025-08-05 20:47:38 +02:00
Viktor Lofgren
9a845ba604 (skiplist) EXPERIMENTAL - Store data in a separate file from document ids 2025-08-05 19:10:58 +02:00
Viktor Lofgren
b9381f1603 (skiplist) EXPERIMENTAL - Store data in a separate file from document ids 2025-08-05 17:35:13 +02:00
Viktor Lofgren
6a60127267 (skiplist) EXPERIMENTAL - Store data in a separate file from document ids 2025-08-05 16:54:39 +02:00
Viktor Lofgren
e8ffcfbb19 (skiplist) Correct binary search implementation, fix intersection logic 2025-08-04 14:49:09 +02:00
Viktor Lofgren
caf0850f81 (index) Clean up code 2025-08-04 00:12:35 +02:00
Viktor Lofgren
62e3bb675e (btree) Remove O_DIRECT btree implementation 2025-08-03 23:43:31 +02:00
Viktor Lofgren
4dc3e7da7a (perf) Remove warmup from perf test, it's not doing much 2025-08-03 21:19:54 +02:00
Viktor Lofgren
92b09883ec (index) Switch from AIO to io_uring
Turns AIO is just bad especially with buffered I/O, io_uring performs strictly better in this scenario.
2025-08-03 21:19:54 +02:00
Viktor Lofgren
87082b4ef8 (index) Use AIO for reading spans and positions
This performs slightly worse in benchmarks, but that's likely caused by hitting the page cache.

AIO will tend to perform better when we see cache misses, which is the expected case in production on real-world data.
2025-08-03 21:19:54 +02:00
Viktor Lofgren
84d3f6087f (skiplist) Parametrize skip list block size, increase to 4K pages 2025-08-03 21:19:54 +02:00
Viktor Lofgren
f93ba371a5 (pool) Fix the LRU to not deadlock and be shit 2025-08-03 21:19:54 +02:00
Viktor Lofgren
5eec27c68d (pool) Fix for 32 bit rollover in clockHand for LRU 2025-08-03 21:19:54 +02:00
Viktor Lofgren
ab01576f91 (pool) Use one global buffer pool instead of many small ones, improved LRU with gclock reclamation, skip list optimization 2025-08-03 21:19:54 +02:00
Viktor Lofgren
054e5ccf44 (pool) Testing synchronized to see if I can find the deadlock 2025-08-03 21:19:54 +02:00
Viktor Lofgren
4351ea5128 (pool) Fix buffer leak 2025-08-03 21:19:54 +02:00
Viktor Lofgren
49cfa3a5e9 (pool) Decrease LQB size 2025-08-03 21:19:54 +02:00
Viktor Lofgren
683854b23f (pool) Fix logging 2025-08-03 21:19:54 +02:00
Viktor Lofgren
e880fa8945 (pool) Simplify locking in PoolLru 2025-08-03 21:19:54 +02:00
Viktor Lofgren
2482dc572e (pool) Grow free queue size 2025-08-03 21:19:54 +02:00
Viktor Lofgren
4589f11898 (pool) More stats 2025-08-03 21:19:54 +02:00
Viktor Lofgren
e43b6e610b (pool) Adjust pool reclamation strategy 2025-08-03 21:19:53 +02:00
Viktor Lofgren
4772117a1f (skiplist) First stab at a skiplist replacement for btrees in the documents lists 2025-08-03 21:19:53 +02:00
Viktor Lofgren
3fc7ea521c (pool) Remove readahead and simplify the code 2025-08-03 21:19:53 +02:00
Viktor Lofgren
4372f5af03 (pool) More performant LRU pool + better instructions queue 2025-08-03 21:19:53 +02:00
Viktor Lofgren
4ad89b6c75 (pool) More performant LRU pool 2025-08-03 21:19:53 +02:00
Viktor Lofgren
ad0519e031 (index) Optimizations 2025-08-03 21:19:53 +02:00
Viktor Lofgren
596ece1230 (pool) Fix deadlock during pool starvation 2025-08-03 21:19:53 +02:00
Viktor Lofgren
07b6e1585b (pool) Bump pool sizes 2025-08-03 21:19:53 +02:00
Viktor Lofgren
cb5e2778eb (pool) Align the buffers with 512b 2025-08-03 21:19:53 +02:00
Viktor Lofgren
8f5ea7896c (btree) More debug information on numEntries = 0 scenario 2025-08-03 21:19:53 +02:00
Viktor Lofgren
76c398e0b1 (index) Fix lingering issues with previous optimizations 2025-08-03 21:19:53 +02:00
Viktor Lofgren
4a94f04a8d (btree) Debug logging 2025-08-03 21:19:53 +02:00
Viktor Lofgren
df72f670d4 (btree) Fix queryData 2025-08-03 21:19:53 +02:00
Viktor Lofgren
eaa22c2f5a (*) Logging 2025-08-03 21:19:53 +02:00
Viktor Lofgren
7be173aeca (pool) Only dump statistics if they say anything 2025-08-03 21:19:53 +02:00
Viktor Lofgren
36685bdca7 (btree) Fix retain implementation 2025-08-03 21:19:53 +02:00
Viktor Lofgren
ad04057609 (btree) Add short circuits when retain/rejecting on an empty tree 2025-08-03 21:19:53 +02:00
Viktor Lofgren
eb76ae22e2 (perf) Use lqb size 512 in perf test 2025-08-03 21:19:53 +02:00
Viktor Lofgren
4b858ab341 (btree) Cache retain/reject reads 2025-08-03 21:19:53 +02:00
Viktor Lofgren
c6e3c8aa3b (index) Focus pools to try to increase reuse 2025-08-03 21:19:53 +02:00
Viktor Lofgren
9128d3907c (index) Periodically dump buffer metrics 2025-08-03 21:19:53 +02:00
Viktor Lofgren
4ef16d13d4 (index) O_DIRECT based buffer pool for index reads 2025-07-30 15:04:23 +02:00
Viktor Lofgren
838a5626ec (index) Reduce query buffer size 2025-07-27 21:42:04 +02:00
Viktor Lofgren
6b426209c7 (index) Restore threshold for work stealing in query execution 2025-07-27 21:41:46 +02:00
Viktor Lofgren
452b5731d9 (index) Lower threshold for work stealing in query execution 2025-07-27 21:35:11 +02:00
Viktor Lofgren
c91cf49630 (search) Disable scribe.rip substitution
It does not appear to work well
2025-07-27 19:40:58 +02:00
Viktor Lofgren
8503030f18 (search) Fix rare exception in scribe.rip substitution 2025-07-27 19:38:52 +02:00
Viktor Lofgren
744f7d3ef7 (search) Fix rare exception in scribe.rip substitution 2025-07-27 19:34:03 +02:00
Viktor Lofgren
215e12afe9 (index) Shrink query buffer size 2025-07-27 17:33:46 +02:00
Viktor Lofgren
2716bce918 (index) Adjust timeout logic for evaluation 2025-07-27 17:28:34 +02:00
Viktor Lofgren
caf2e6fbb7 (index) Adjust timeout logic for evaluation 2025-07-27 17:27:07 +02:00
Viktor Lofgren
233f0acfb1 (index) Further reduce query buffer size 2025-07-27 17:13:08 +02:00
Viktor Lofgren
e3a4ff02e9 (index) Abandon ongoing evaluation tasks if time is up 2025-07-27 17:04:01 +02:00
Viktor Lofgren
c786283ae1 (index) Reduce quer buffer size 2025-07-27 16:57:55 +02:00
Viktor Lofgren
a3f65ac0e0 (deploy) Trigger index deployment 2025-07-27 16:50:23 +02:00
Viktor
aba1a32af0 Merge pull request #217 from MarginaliaSearch/uncompressed-spans-file
Index optimizations
2025-07-27 16:49:27 +02:00
Viktor Lofgren
c9c442345b (perf) Change execution test to use processing rate instead of count 2025-07-27 16:39:51 +02:00
Viktor Lofgren
2e126ba30e (perf) Change execution test to use processing rate instead of count 2025-07-27 16:37:20 +02:00
Viktor Lofgren
2087985f49 (index) Implement work stealing in IndexQueryExecution as a better approach to backpressure 2025-07-27 16:29:57 +02:00
Viktor Lofgren
2b13ebd18b (index) Tweak evaluation backlog handling 2025-07-27 16:08:16 +02:00
Viktor Lofgren
6d92c125fe (perf) Fix perf test 2025-07-27 15:50:28 +02:00
Viktor Lofgren
f638cfa39a (index) Avoid possibility of negative timeout 2025-07-27 15:39:12 +02:00
Viktor Lofgren
89447c12af (index) Avoid possibility of negative timeout 2025-07-27 15:24:47 +02:00
Viktor Lofgren
c71fc46f04 (perf) Update perf test with execution scenario 2025-07-27 15:22:07 +02:00
Viktor Lofgren
f96874d828 (sequence) Implement a largestValue abort condition for minDistance()
This is something like 3500% faster in certain common scenarios
2025-07-27 15:05:50 +02:00
Viktor Lofgren
583a84d5a0 (index) Clean up of the index query execution logic 2025-07-27 15:05:50 +02:00
Viktor Lofgren
f65b946448 (index) Clean up code 2025-07-27 15:05:50 +02:00
Viktor Lofgren
3682815855 (index) Optimize sequence intersection for the n=1 case 2025-07-26 19:14:32 +02:00
Viktor Lofgren
3a94357660 (index) Perf test tool (WIP!) 2025-07-26 11:49:33 +02:00
Viktor Lofgren
673b0d3de1 (index) Perf test tool (WIP!) 2025-07-26 11:49:31 +02:00
Viktor Lofgren
ea942bc664 (spans) Add signature to the footer of the spans file, including a version byte so we can detect whether ot use the old or new decoding logic 2025-07-25 12:07:18 +02:00
Viktor Lofgren
7ed5083c54 (index) Don't split results into chunks 2025-07-25 11:45:07 +02:00
Viktor Lofgren
08bb2c097b (refac) Clean up the data model used in the index service 2025-07-25 10:54:07 +02:00
Viktor Lofgren
495fb325be (sequence) Correct sequence intersection bug introduced in optimizations 2025-07-25 10:48:33 +02:00
Viktor Lofgren
05c25bbaec (chore) Clean up 2025-07-24 23:43:27 +02:00
Viktor Lofgren
2a028b84f3 (chore) Clean up 2025-07-24 20:12:56 +02:00
Viktor Lofgren
a091a23623 (ranking) Remove unnecessary metadata retrievals 2025-07-24 20:08:09 +02:00
Viktor Lofgren
e8897acb45 (ranking) Remove unnecessary metadata retrievals 2025-07-24 20:05:39 +02:00
Viktor Lofgren
b89ffcf2be (index) Evaluate hash based idx mapping in ForwardIndexReader 2025-07-24 19:47:27 +02:00
Viktor Lofgren
dbcc9055b0 (index) Evaluate using MinMaxPriorityQueue as guts of ResultPriorityQueue 2025-07-24 19:31:51 +02:00
Viktor Lofgren
d9740557f4 (sequence) Optimize intersection logic with a fast abort condition 2025-07-24 19:04:10 +02:00
Viktor Lofgren
0d6cd015fd (index) Evaluate reading all spans at once 2025-07-24 18:34:11 +02:00
Viktor Lofgren
c6034efcc8 (index) Cache value of bitset cardinality for speed 2025-07-24 17:24:55 +02:00
Viktor Lofgren
76068014ad (index) More spans optimizations 2025-07-24 15:03:43 +02:00
Viktor Lofgren
1c3ed67127 (index) Byte align document spans 2025-07-24 14:06:14 +02:00
Viktor Lofgren
fc0cb6bd9a (index) Reserve a larger size for IntArrayList in SeqenceOperations.findIntersections 2025-07-24 14:03:44 +02:00
Viktor Lofgren
c2601bac78 (converter) Remove unnecessary allocation of a 16 KB byte buffer 2025-07-24 13:25:37 +02:00
Viktor Lofgren
f5641b72e9 (index) Fix broken test 2025-07-24 13:21:05 +02:00
Viktor Lofgren
36efe2e219 (index) Optimize PositionsFileReader for concurrent reads
In benchmarks this is roughly twice as fast as the previous approach.  Main caveat being we need multiple file descriptors to avoid read instruction serialization by the kernel.  This is undesirable since the reads are complete scattershot and can't be reordered by the kernel in a way that optimizes anything.
2025-07-24 13:20:54 +02:00
Viktor Lofgren
983fe3829e (spans) Evaluate uncompressed spans files
Span decompression appears to be somewhat of a performance bottleneck.  This change removes compression of the spans file.  The spans are still compressed in transit between the converter and index constructor at this stage.  The change is intentionally kept small to just evaluate the performance implications, change in file sizes, etc.
2025-07-23 18:10:41 +02:00
Viktor Lofgren
668c87aa86 (ssr) Drop Executor from SSR as it no longer exists 2025-07-23 13:55:41 +02:00
Viktor Lofgren
9d3f9adb05 Force redeploy of everything 2025-07-23 13:36:02 +02:00
Viktor
a43a1773f1 Merge pull request #216 from MarginaliaSearch/deprecate-executor
Architecture: Remove the separate executor service and roll it into the index service.
2025-07-23 13:32:42 +02:00
Viktor Lofgren
1e7a3a3c4f (docs) Update docs to reflect the change 2025-07-23 13:18:23 +02:00
Viktor Lofgren
62b696b1c3 (architecture) Remove the separate executor service and merge it into the index service
The primary motivation for this is that in production, the large number of partitioned services has lead to an intermittent exhaustion of available database connections, as each service has a connection pool.

The decision to have a separate executor service dates back from when the index service was very slow to start, and the executor didn't always spin off its memory-hungry tasks into separate processes, which meant the executor would sometimes OOM and crash, and it was undesirable to bring the index down with it.
2025-07-23 12:57:13 +02:00
Viktor Lofgren
f1a900f383 (search) Clean up front page mobile design a bit 2025-07-23 12:20:40 +02:00
Viktor Lofgren
700364b86d (sample) Remove debug logging
The problem sat in the desk chair all along
2025-07-21 15:08:20 +02:00
Viktor Lofgren
7e725ddaed (sample) Remove debug logging
The problem sat in the desk chair all along
2025-07-21 14:41:59 +02:00
Viktor Lofgren
120209e138 (sample) Diagnosing compression errors 2025-07-21 14:34:08 +02:00
Viktor Lofgren
a771a5b6ce (sample) Test different approach to decoding 2025-07-21 14:19:01 +02:00
Viktor Lofgren
dac5b54128 (sample) Better logging for sample errors 2025-07-21 14:03:58 +02:00
Viktor Lofgren
6cfb143c15 (sample) Compress sample HTML data and introduce new API for only getting requests 2025-07-21 13:55:25 +02:00
Viktor Lofgren
23c818281b (converter) Reduce DomSample logging for NOT_FOUND 2025-07-21 13:37:55 +02:00
Viktor Lofgren
8aad253cf6 (converter) Add more logging around dom sample data retrieval errors 2025-07-21 13:26:38 +02:00
Viktor Lofgren
556d7af9dc Reapply "(grpc) Use grpc-netty instead of grpc-netty-shaded"
This reverts commit b7a5219ed3.
2025-07-21 13:23:32 +02:00
Viktor Lofgren
b7a5219ed3 Revert "(grpc) Use grpc-netty instead of grpc-netty-shaded"
Reverting this change to see if it's the cause of some instability issues observed.
2025-07-21 13:10:41 +02:00
Viktor Lofgren
a23ec521fe (converter) Ensure features is mutable on DetailsWithWords as this is assumed later 2025-07-21 12:50:04 +02:00
Viktor Lofgren
fff3babc6d (classier) Add rule for */pixel.gif as likely tracking pixels 2025-07-21 12:35:57 +02:00
Viktor Lofgren
b2bfb8217c (special) Trigger CD run 2025-07-21 12:28:24 +02:00
Viktor
3b2ac414dc Merge pull request #210 from MarginaliaSearch/ads-fingerprinting
Implement advertisement and popover identification based on DOM sample data
2025-07-21 12:25:31 +02:00
Viktor Lofgren
0ba6515a01 (converter) Ensure converter works well even when dom sample data is unavailable 2025-07-21 12:11:17 +02:00
Viktor Lofgren
16c6b0f151 (search) Add link to new discord community 2025-07-20 20:54:42 +02:00
Viktor Lofgren
e998692900 (converter) Ensure converter works well even when dom sample data is unavailable 2025-07-20 19:24:40 +02:00
Viktor Lofgren
eeb1695a87 (search) Clean up dead code 2025-07-20 19:15:01 +02:00
Viktor Lofgren
a0ab910940 (search) Clean up code 2025-07-20 19:14:13 +02:00
Viktor Lofgren
b9f31048d7 (search) Clean up overlong class names 2025-07-20 19:13:04 +02:00
Viktor Lofgren
12c304289a (grpc) Use grpc-netty instead of grpc-netty-shaded
This will help reduce runaway thread pool sizes
2025-07-20 17:36:25 +02:00
Viktor Lofgren
6ee01dabea (search) Drastically reduce worker thread count in search-service 2025-07-20 17:16:58 +02:00
Viktor Lofgren
1b80e282a7 (search) Drastically reduce worker thread count in search-service 2025-07-20 16:58:33 +02:00
Viktor Lofgren
a65d18f1d1 (client) Use virtual threads in a few more clients 2025-07-20 14:10:02 +02:00
Viktor Lofgren
90a1ff220b (ui) Clean up UI 2025-07-19 18:41:36 +02:00
Viktor Lofgren
d6c7092335 (classifier) More rules 2025-07-19 18:41:36 +02:00
Viktor Lofgren
b716333856 (classifier) Match regexes against the path + query only, as well as the full URL 2025-07-19 18:41:36 +02:00
Viktor Lofgren
b504b8482c (classifier) Add new tracker 2025-07-19 18:41:36 +02:00
Viktor Lofgren
80da1e9ad1 (ui) UI cleanup 2025-07-19 18:41:36 +02:00
Viktor Lofgren
d3f744a441 (ui) Add traffic report to overview menu 2025-07-19 18:41:36 +02:00
Viktor Lofgren
60fb539875 (ui) Add explanatory blurb 2025-07-19 18:41:35 +02:00
Viktor Lofgren
7f5094fedf (ui) Clean up UI 2025-07-19 18:41:35 +02:00
Viktor Lofgren
45066636a5 (classifier) Add classification for domains that make 3rd party requests 2025-07-19 18:41:35 +02:00
Viktor Lofgren
e2d6898c51 (search) Change tag colors to more pleasant ones 2025-07-19 18:41:35 +02:00
Viktor Lofgren
58ef767b94 (search) Improve traffic report UI 2025-07-19 18:41:35 +02:00
Viktor Lofgren
f9f268c67a (grpc) Improve error handling 2025-07-19 18:41:35 +02:00
Viktor Lofgren
f44c2bdee9 (chore) Cleanup 2025-07-19 18:41:35 +02:00
Viktor Lofgren
6fdf477c18 (refac) Move DomSampleClassification to top level 2025-07-19 18:41:35 +02:00
Viktor Lofgren
6b6e455e3f (classifier) Clean up xml 2025-07-19 18:41:35 +02:00
Viktor Lofgren
a3a126540c (classifier) Add README.md 2025-07-19 18:41:35 +02:00
Viktor Lofgren
842b19da40 (search) Mobile layout + phrasing 2025-07-19 18:41:35 +02:00
Viktor Lofgren
2a30e93bf0 (classifier) 2025-07-19 18:41:34 +02:00
Viktor Lofgren
3d998f12c0 (search) Use display name where possible 2025-07-19 18:41:34 +02:00
Viktor Lofgren
cbccc2ac23 (classification) Add /ccm/collect as an ads-related request 2025-07-19 18:41:34 +02:00
Viktor Lofgren
2cfc23f9b7 (search) Fix layout for mobile 2025-07-18 19:06:23 +02:00
Viktor Lofgren
88fe394cdb (request-classifier) Add rule for /pagead/ 2025-07-18 19:01:33 +02:00
Viktor Lofgren
f30fcebd4f Remove dead code 2025-07-18 18:56:42 +02:00
Viktor Lofgren
5d885927b4 (search) Fix layout and presentation 2025-07-18 17:54:47 +02:00
Viktor Lofgren
7622c8358e (request-classifier) Adjust flagging of a few hosts 2025-07-18 17:54:46 +02:00
Viktor Lofgren
69ed9aef47 (ddgt) Load global tracker data 2025-07-18 17:02:50 +02:00
Viktor Lofgren
4c78c223da (search) Fix endpoint collection 2025-07-18 16:59:05 +02:00
Viktor Lofgren
71b9935dd6 (search) Add warmup to programmatic tailwind classes, fix word break 2025-07-18 16:49:31 +02:00
Viktor Lofgren
ad38f2fd83 (search) Hide classification tag on unclassified requests 2025-07-18 15:45:40 +02:00
Viktor Lofgren
9c47388846 (search) Improve display ordering 2025-07-18 15:44:55 +02:00
Viktor Lofgren
d9ab10e33f (search) Fix tracker data for the correct domain 2025-07-18 15:29:15 +02:00
Viktor Lofgren
e13ea7f42b (search) Sort results by classifications 2025-07-18 14:51:35 +02:00
Viktor Lofgren
f38daeb036 (WIP) First stab at a GUI for viewing network traffic
The change also moves the dom classifier to a separate package so that it can be accessed from both the search service and converter.

The change also adds a parser for DDG's tracker radar data.
2025-07-18 13:58:57 +02:00
Viktor Lofgren
6e214293e5 (ping) Fix backoff value overflow 2025-07-16 19:50:12 +02:00
Viktor Lofgren
52582a6d7d (experiment) Also add clients to loom experiment 2025-07-16 18:08:00 +02:00
Viktor Lofgren
ec0e39ad32 (experiment) Also add clients to loom experiment 2025-07-16 17:28:57 +02:00
Viktor Lofgren
6a15aee4b0 (ping) Fix arithmetic errors in backoff strategy due to long overflow 2025-07-16 17:23:36 +02:00
Viktor Lofgren
bd5111e8a2 (experimental) Add flag for using loom/virtual threads in gRPC executor 2025-07-16 17:12:07 +02:00
Viktor Lofgren
1ecbeb0272 (doc) Update ROADMAP.md 2025-07-14 13:38:34 +02:00
Viktor Lofgren
b91354925d (converter) Index documents even when they are short
... but assign short documents a special flag and penalize them in index lookups
2025-07-14 12:24:25 +02:00
Viktor Lofgren
3f85c9c154 (refac) Clean up code 2025-07-14 11:55:21 +02:00
Viktor Lofgren
390f053406 (api) Add query parameter 'dc' for specifying the max number of results per domain 2025-07-14 10:09:30 +02:00
Viktor Lofgren
89e03d6914 (chore) Idiomatic error handling in gRPC clients
responseObserver.onError(...) should be passed Status.WHATEVER.foo().asRuntimeException() and not random throwables as was done before.
2025-07-13 02:59:22 +02:00
Viktor Lofgren
14e0bc9f26 (index) Add comment about encoding caveat 2025-07-13 02:47:00 +02:00
Viktor Lofgren
7065b46c6f (index) Add penalties for new feature flags from dom sample 2025-07-13 02:37:30 +02:00
Viktor Lofgren
0372190c90 (index, refac) Move domain ranking to a better named package 2025-07-13 02:37:29 +02:00
Viktor Lofgren
ceaf32fb90 (converter) Integrate dom sample features into the converter 2025-07-13 01:38:28 +02:00
Viktor Lofgren
b03c43224c (search) Fix redirects in new search UI 2025-07-11 23:44:45 +02:00
Viktor Lofgren
9b4ce9e9eb (search) Fix !w redirect 2025-07-11 23:28:09 +02:00
Viktor
81ac02a695 Merge pull request #209 from us3r1d/master
added converter.insertFoundDomains property
2025-07-11 21:34:04 +02:00
krystal
47f624fb3b changed converter.insertFoundDomains to loader.insertFoundDomains 2025-07-11 12:13:45 -07:00
Viktor Lofgren
b57db01415 (converter) Clean out some old and redundant advertisement and tracking detection code 2025-07-11 19:32:25 +02:00
Viktor Lofgren
ce7d522608 (converter) First basic hook-in of the new dom sample classifier into the converter workflow 2025-07-11 16:57:37 +02:00
Viktor Lofgren
18649b6ee9 (converter) Move DomSampleClassifier to converter's code tree 2025-07-11 16:12:48 +02:00
Viktor Lofgren
f6417aef1a (converter) Additional code cleanup 2025-07-11 15:58:48 +02:00
Viktor Lofgren
2aa7e376b0 (converter) Clean up code around document deduplication 2025-07-11 15:54:28 +02:00
Viktor Lofgren
f33bc44860 (dom-sample) Create API for fetching DOM sample data across services 2025-07-11 15:41:10 +02:00
Viktor Lofgren
a2826efd44 (dom-sample) First stab at classifying outgoing requests from DOM sample data 2025-07-11 15:41:10 +02:00
krystal
c866f19cbb added converter.insertFoundDomains property 2025-07-10 15:36:59 -07:00
Viktor Lofgren
518278493b (converter) Increase the max byte length when parsing crawled documents to 500 kB from 200 kB. 2025-07-08 21:22:02 +02:00
Viktor Lofgren
1ac0bab0b8 (converter) Also exclude length checks when lenient processing is enabled 2025-07-08 20:37:53 +02:00
Viktor Lofgren
08b45ed10a (converter) Add system property converter.lenientProcessing to disable most disqualification checks 2025-07-08 19:44:51 +02:00
Viktor Lofgren
f2cfb91973 (converter) Add audit log of converter errors and rejections 2025-07-08 19:15:41 +02:00
Viktor Lofgren
2f79524eb3 (refac) Rename ProcessService to ProcessSpawnerService for clarity 2025-07-07 15:48:44 +02:00
Viktor Lofgren
3b00142c96 (search) Don't say unknown domains are in the crawler queue 2025-07-06 18:42:36 +02:00
Viktor Lofgren
294ab19177 (status) Use old-search for status service instead of marginalia-search.com 2025-07-06 15:40:53 +02:00
Viktor Lofgren
6f1659ecb2 (control) Add GUI for NSFW Filter Update trigger 2025-06-25 16:03:27 +02:00
Viktor Lofgren
982dcb28f0 (live-crawler) Use Apache HttpClient + code cleanup 2025-06-24 13:04:19 +02:00
Viktor Lofgren
fc686d8b2e (live-crawler) Fix startup race condition
The fix makes sure we wait for the feeds API to be available before fetching from it, so that the process doesn't crash on a cold system reboot.
2025-06-24 11:42:41 +02:00
Viktor Lofgren
69ef0f334a (rss) Make feed fetcher use Apache's HttpClient 2025-06-23 18:49:55 +02:00
Viktor Lofgren
446746f3bd (control) Fix so that sideload actions show up in Mixed profile nodes 2025-06-23 18:08:09 +02:00
Viktor Lofgren
24ab8398bb (ndp) Use LinkGraphClient to populate NDP table 2025-06-23 16:44:38 +02:00
Viktor Lofgren
d2ceeff4cf (ndp) Add toggle for excluding nodes from assignment via NDP 2025-06-23 15:38:02 +02:00
Viktor Lofgren
cf64214b1c (ndp) Update documentation 2025-06-23 15:18:35 +02:00
Viktor Lofgren
e50d09cc01 (crawler) Remove illegal requests when denied via robots.txt
The commit removes attempts at probing the root document, feed URLs, and favicon if we are not permitted to do so via robots.txt
2025-06-22 17:10:44 +02:00
Viktor Lofgren
bce3892ce0 (ndp) Simplify code 2025-06-22 16:08:55 +02:00
Viktor Lofgren
36581b25c2 (ndp) Fix process tracking in domain discovery process 2025-06-21 14:35:25 +02:00
Viktor Lofgren
52ff7fb4dd (ndp) Add a process for adding new domains to be crawled
This is a working "work in progress" commit, will need more refinement, but given the usual difficulties in testing crawler-adjacent code without actually crawling, it needs some maturation time in production.
2025-06-21 14:10:27 +02:00
Viktor Lofgren
a4e49e658a (ping) Add README for ping 2025-06-19 11:21:52 +02:00
Viktor Lofgren
e2c56dc3ca (search) Clean up the rate limiting
We fail quietly to make life harder for the bot farmers
2025-06-18 11:26:30 +02:00
Viktor Lofgren
470b866008 (search) Clean up the rate limiting
We fail quietly to make life harder for the bot farmers
2025-06-18 11:22:26 +02:00
Viktor Lofgren
4895a2ac7a (search) Clean up the rate limiting
We fail quietly to make life harder for the bot farmers
2025-06-18 11:20:24 +02:00
Viktor Lofgren
fd32ae9fa7 (search) Add automatic rate limiting to /site
Fix typo
2025-06-18 11:10:08 +02:00
Viktor Lofgren
470651ea4c (search) Add automatic rate limiting to /site 2025-06-18 11:04:36 +02:00
Viktor Lofgren
8d4829e783 (ping) Change cookie specification to ignore cookies 2025-06-17 12:26:34 +02:00
Viktor Lofgren
1290bc15dc (ping) Reduce retries for SocketException and pals 2025-06-16 22:35:33 +02:00
Viktor Lofgren
e7fa558954 (ping) Disable some cert validation logic for now 2025-06-16 22:00:32 +02:00
Viktor Lofgren
720685bf3f (ping) Persist more detailed information about why a cert is invalid
The change also alters the validator to be less judgemental, and accept some invalid chains based on looking like we've simply not got access to a (valid) intermediate cert.
2025-06-16 19:44:22 +02:00
Viktor Lofgren
cbec63c7da (ping) Pull root certificates from cacerts.pem 2025-06-16 19:21:05 +02:00
Viktor Lofgren
b03ca75785 (ping) Correct test so that it does not spam an innocent webmaster with requests 2025-06-16 17:06:14 +02:00
Viktor Lofgren
184aedc071 (ping) Deploy new custom cert validator for fingerprinting purposes 2025-06-16 16:36:23 +02:00
Viktor Lofgren
0275bad281 (ping) Limit SSL certificate validity dates to a maximum timestamp as permitted by database 2025-06-16 00:32:03 +02:00
Viktor Lofgren
fd83a9d0b8 (ping) Handle null case for Subject Alternative Names in SSL certificates 2025-06-16 00:27:37 +02:00
Viktor Lofgren
d556f8ae3a (ping) Ping server should not validate certificates 2025-06-16 00:08:30 +02:00
Viktor Lofgren
e37559837b (crawler) Crawler should validate certificates 2025-06-16 00:06:57 +02:00
Viktor Lofgren
3564c4aaee (ping) Route SSLHandshakeException to ConnectionError as well
This will mean we re-try these as an unencrypted Http connection
2025-06-15 20:31:33 +02:00
Viktor Lofgren
92c54563ab (ping) Reduce retry count on connection errors 2025-06-15 18:39:54 +02:00
Viktor Lofgren
d7a5d90b07 (ping) Store redirect location in availability record 2025-06-15 18:39:33 +02:00
338 changed files with 11332 additions and 3378 deletions

View File

@@ -48,10 +48,6 @@ filter for any API consumer.
I've talked to the stract dev and he does not think it's a good idea to mimic their optics language, which is quite ad-hoc, but instead to work together to find some new common description language for this.
## Show favicons next to search results
This is expected from search engines. Basic proof of concept sketch of fetching this data has been done, but the feature is some way from being reality.
## Specialized crawler for github
One of the search engine's biggest limitations right now is that it does not index github at all. A specialized crawler that fetches at least the readme.md would go a long way toward providing search capabilities in this domain.
@@ -66,6 +62,10 @@ The documents database probably should have some sort of flag indicating it's a
PDF parsing is known to be a bit of a security liability so some thought needs to be put in
that direction as well.
## Show favicons next to search results (COMPLETED 2025-03)
This is expected from search engines. Basic proof of concept sketch of fetching this data has been done, but the feature is some way from being reality.
## Web Design Overhaul (COMPLETED 2025-01)
The design is kinda clunky and hard to maintain, and needlessly outdated-looking.

View File

@@ -45,7 +45,7 @@ public class NodeConfigurationService {
public List<NodeConfiguration> getAll() {
try (var conn = dataSource.getConnection();
var qs = conn.prepareStatement("""
SELECT ID, DESCRIPTION, ACCEPT_QUERIES, AUTO_CLEAN, PRECESSION, KEEP_WARCS, NODE_PROFILE, DISABLED
SELECT ID, DESCRIPTION, ACCEPT_QUERIES, AUTO_CLEAN, PRECESSION, AUTO_ASSIGN_DOMAINS, KEEP_WARCS, NODE_PROFILE, DISABLED
FROM NODE_CONFIGURATION
""")) {
var rs = qs.executeQuery();
@@ -59,6 +59,7 @@ public class NodeConfigurationService {
rs.getBoolean("ACCEPT_QUERIES"),
rs.getBoolean("AUTO_CLEAN"),
rs.getBoolean("PRECESSION"),
rs.getBoolean("AUTO_ASSIGN_DOMAINS"),
rs.getBoolean("KEEP_WARCS"),
NodeProfile.valueOf(rs.getString("NODE_PROFILE")),
rs.getBoolean("DISABLED")
@@ -75,7 +76,7 @@ public class NodeConfigurationService {
public NodeConfiguration get(int nodeId) throws SQLException {
try (var conn = dataSource.getConnection();
var qs = conn.prepareStatement("""
SELECT ID, DESCRIPTION, ACCEPT_QUERIES, AUTO_CLEAN, PRECESSION, KEEP_WARCS, NODE_PROFILE, DISABLED
SELECT ID, DESCRIPTION, ACCEPT_QUERIES, AUTO_CLEAN, PRECESSION, AUTO_ASSIGN_DOMAINS, KEEP_WARCS, NODE_PROFILE, DISABLED
FROM NODE_CONFIGURATION
WHERE ID=?
""")) {
@@ -88,6 +89,7 @@ public class NodeConfigurationService {
rs.getBoolean("ACCEPT_QUERIES"),
rs.getBoolean("AUTO_CLEAN"),
rs.getBoolean("PRECESSION"),
rs.getBoolean("AUTO_ASSIGN_DOMAINS"),
rs.getBoolean("KEEP_WARCS"),
NodeProfile.valueOf(rs.getString("NODE_PROFILE")),
rs.getBoolean("DISABLED")
@@ -102,7 +104,7 @@ public class NodeConfigurationService {
try (var conn = dataSource.getConnection();
var us = conn.prepareStatement("""
UPDATE NODE_CONFIGURATION
SET DESCRIPTION=?, ACCEPT_QUERIES=?, AUTO_CLEAN=?, PRECESSION=?, KEEP_WARCS=?, DISABLED=?, NODE_PROFILE=?
SET DESCRIPTION=?, ACCEPT_QUERIES=?, AUTO_CLEAN=?, PRECESSION=?, AUTO_ASSIGN_DOMAINS=?, KEEP_WARCS=?, DISABLED=?, NODE_PROFILE=?
WHERE ID=?
"""))
{
@@ -110,10 +112,11 @@ public class NodeConfigurationService {
us.setBoolean(2, config.acceptQueries());
us.setBoolean(3, config.autoClean());
us.setBoolean(4, config.includeInPrecession());
us.setBoolean(5, config.keepWarcs());
us.setBoolean(6, config.disabled());
us.setString(7, config.profile().name());
us.setInt(8, config.node());
us.setBoolean(5, config.autoAssignDomains());
us.setBoolean(6, config.keepWarcs());
us.setBoolean(7, config.disabled());
us.setString(8, config.profile().name());
us.setInt(9, config.node());
if (us.executeUpdate() <= 0)
throw new IllegalStateException("Failed to update configuration");

View File

@@ -5,6 +5,7 @@ public record NodeConfiguration(int node,
boolean acceptQueries,
boolean autoClean,
boolean includeInPrecession,
boolean autoAssignDomains,
boolean keepWarcs,
NodeProfile profile,
boolean disabled

View File

@@ -20,9 +20,7 @@ public enum NodeProfile {
}
public boolean permitBatchCrawl() {
return isBatchCrawl() ||isMixed();
}
public boolean permitSideload() {
return isMixed() || isSideload();
return isBatchCrawl() || isMixed();
}
public boolean permitSideload() { return isSideload() || isMixed(); }
}

View File

@@ -2,6 +2,7 @@ package nu.marginalia.nodecfg;
import com.zaxxer.hikari.HikariConfig;
import com.zaxxer.hikari.HikariDataSource;
import nu.marginalia.nodecfg.model.NodeConfiguration;
import nu.marginalia.nodecfg.model.NodeProfile;
import nu.marginalia.test.TestMigrationLoader;
import org.junit.jupiter.api.BeforeAll;
@@ -62,6 +63,63 @@ public class NodeConfigurationServiceTest {
assertEquals(2, list.size());
assertEquals(a, list.get(0));
assertEquals(b, list.get(1));
}
// Test all the fields that are only exposed via save()
@Test
public void testSaveChanges() throws SQLException {
var original = nodeConfigurationService.create(1, "Test", false, false, NodeProfile.MIXED);
assertEquals(1, original.node());
assertEquals("Test", original.description());
assertFalse(original.acceptQueries());
var precession = new NodeConfiguration(
original.node(),
"Foo",
true,
original.autoClean(),
original.includeInPrecession(),
!original.autoAssignDomains(),
original.keepWarcs(),
original.profile(),
original.disabled()
);
nodeConfigurationService.save(precession);
precession = nodeConfigurationService.get(original.node());
assertNotEquals(original.autoAssignDomains(), precession.autoAssignDomains());
var autoClean = new NodeConfiguration(
original.node(),
"Foo",
true,
!original.autoClean(),
original.includeInPrecession(),
original.autoAssignDomains(),
original.keepWarcs(),
original.profile(),
original.disabled()
);
nodeConfigurationService.save(autoClean);
autoClean = nodeConfigurationService.get(original.node());
assertNotEquals(original.autoClean(), autoClean.autoClean());
var disabled = new NodeConfiguration(
original.node(),
"Foo",
true,
autoClean.autoClean(),
autoClean.includeInPrecession(),
autoClean.autoAssignDomains(),
autoClean.keepWarcs(),
autoClean.profile(),
!autoClean.disabled()
);
nodeConfigurationService.save(disabled);
disabled = nodeConfigurationService.get(original.node());
assertNotEquals(autoClean.disabled(), disabled.disabled());
}
}

View File

@@ -0,0 +1,7 @@
-- Add additional summary columns to DOMAIN_SECURITY_INFORMATION table
-- to make it easier to get more information about the SSL certificate's validity
ALTER TABLE DOMAIN_SECURITY_INFORMATION ADD COLUMN SSL_CHAIN_VALID BOOLEAN DEFAULT NULL;
ALTER TABLE DOMAIN_SECURITY_INFORMATION ADD COLUMN SSL_HOST_VALID BOOLEAN DEFAULT NULL;
ALTER TABLE DOMAIN_SECURITY_INFORMATION ADD COLUMN SSL_DATE_VALID BOOLEAN DEFAULT NULL;
OPTIMIZE TABLE DOMAIN_SECURITY_INFORMATION;

View File

@@ -0,0 +1,12 @@
-- Table holding domains to be processed by the NDP in order to figure out whether to add them to
-- be crawled.
CREATE TABLE IF NOT EXISTS NDP_NEW_DOMAINS(
DOMAIN_ID INT NOT NULL PRIMARY KEY,
STATE ENUM ('NEW', 'ACCEPTED', 'REJECTED') NOT NULL DEFAULT 'NEW',
PRIORITY INT NOT NULL DEFAULT 0,
TS_CHANGE TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
CHECK_COUNT INT NOT NULL DEFAULT 0
);
CREATE INDEX IF NOT EXISTS NDP_NEW_DOMAINS__STATE_PRIORITY ON NDP_NEW_DOMAINS (STATE, PRIORITY DESC);

View File

@@ -0,0 +1,3 @@
-- Migration script to add AUTO_ASSIGN_DOMAINS column to NODE_CONFIGURATION table
ALTER TABLE NODE_CONFIGURATION ADD COLUMN AUTO_ASSIGN_DOMAINS BOOLEAN NOT NULL DEFAULT TRUE;

View File

@@ -5,13 +5,15 @@ import java.util.Collection;
public enum HtmlFeature {
// Note, the first 32 of these features are bit encoded in the database
// so be sure to keep anything that's potentially important toward the top
// of the list
// of the list; but adding new values will shift the encoded values and break
// binary compatibility! Scroll down for a marker where you should add new values
// if they need to be accessible from IndexResultScoreCalculator!
MEDIA( "special:media"),
JS("special:scripts"),
AFFILIATE_LINK( "special:affiliate"),
TRACKING("special:tracking"),
TRACKING_ADTECH("special:ads"), // We'll call this ads for now
TRACKING_ADTECH("special:adtech"),
KEBAB_CASE_URL("special:kcurl"), // https://www.example.com/urls-that-look-like-this/
LONG_URL("special:longurl"),
@@ -30,6 +32,15 @@ public enum HtmlFeature {
PDF("format:pdf"),
POPOVER("special:popover"),
CONSENT("special:consent"),
SHORT_DOCUMENT("special:shorty"),
THIRD_PARTY_REQUESTS("special:3pr"),
// Here! It is generally safe to add additional values here without
// disrupting the encoded values used by the DocumentValuator
// class in the index!
/** For fingerprinting and ranking */
OPENGRAPH("special:opengraph"),
OPENGRAPH_IMAGE("special:opengraph:image"),
@@ -67,6 +78,7 @@ public enum HtmlFeature {
S3_FEATURE("special:s3"),
MISSING_DOM_SAMPLE("special:nosample"),
UNKNOWN("special:uncategorized");

View File

@@ -7,7 +7,6 @@ public enum ServiceId {
Search("search-service"),
Index("index-service"),
Query("query-service"),
Executor("executor-service"),
Control("control-service"),

View File

@@ -13,6 +13,7 @@ import nu.marginalia.service.discovery.property.ServicePartition;
import nu.marginalia.util.NamedExecutorFactory;
import java.util.concurrent.Executor;
import java.util.concurrent.Executors;
import java.util.function.Function;
@Singleton
@@ -20,10 +21,15 @@ public class GrpcChannelPoolFactory {
private final NodeConfigurationWatcher nodeConfigurationWatcher;
private final ServiceRegistryIf serviceRegistryIf;
private static final Executor executor = NamedExecutorFactory.createFixed("gRPC-Channel-Pool",
Math.clamp(Runtime.getRuntime().availableProcessors() / 2, 2, 32));
private static final Executor offloadExecutor = NamedExecutorFactory.createFixed("gRPC-Offload-Pool",
Math.clamp(Runtime.getRuntime().availableProcessors() / 2, 2, 32));
private static final boolean useLoom = Boolean.getBoolean("system.experimentalUseLoom");
private static final Executor executor = useLoom
? Executors.newVirtualThreadPerTaskExecutor()
: NamedExecutorFactory.createFixed("gRPC-Channel-Pool", Math.clamp(Runtime.getRuntime().availableProcessors() / 2, 2, 32));
private static final Executor offloadExecutor = useLoom
? Executors.newVirtualThreadPerTaskExecutor()
: NamedExecutorFactory.createFixed("gRPC-Offload-Pool", Math.clamp(Runtime.getRuntime().availableProcessors() / 2, 2, 32));
@Inject
public GrpcChannelPoolFactory(NodeConfigurationWatcher nodeConfigurationWatcher,

View File

@@ -2,6 +2,7 @@ package nu.marginalia.service.client;
import com.google.common.collect.Sets;
import io.grpc.ManagedChannel;
import io.grpc.StatusRuntimeException;
import nu.marginalia.service.discovery.ServiceRegistryIf;
import nu.marginalia.service.discovery.monitor.ServiceChangeMonitor;
import nu.marginalia.service.discovery.property.PartitionTraits;
@@ -206,6 +207,11 @@ public class GrpcSingleNodeChannelPool<STUB> extends ServiceChangeMonitor {
}
for (var e : exceptions) {
if (e instanceof StatusRuntimeException se) {
throw se; // Re-throw SRE as-is
}
// If there are other exceptions, log them
logger.error(grpcMarker, "Failed to call service {}", serviceKey, e);
}

View File

@@ -1,9 +1,9 @@
package nu.marginalia.service.server;
import io.grpc.Server;
import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder;
import io.grpc.netty.shaded.io.netty.channel.nio.NioEventLoopGroup;
import io.grpc.netty.shaded.io.netty.channel.socket.nio.NioServerSocketChannel;
import io.grpc.netty.NettyServerBuilder;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import nu.marginalia.service.discovery.ServiceRegistryIf;
import nu.marginalia.service.discovery.property.ServiceKey;
import nu.marginalia.service.discovery.property.ServicePartition;
@@ -13,9 +13,14 @@ import nu.marginalia.util.NamedExecutorFactory;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
public class GrpcServer {
private final Server server;
private static final boolean useLoom = Boolean.getBoolean("system.experimentalUseLoom");
public GrpcServer(ServiceConfiguration config,
ServiceRegistryIf serviceRegistry,
ServicePartition partition,
@@ -26,13 +31,19 @@ public class GrpcServer {
int nThreads = Math.clamp(Runtime.getRuntime().availableProcessors() / 2, 2, 16);
// Start the gRPC server
ExecutorService workExecutor = useLoom ?
Executors.newVirtualThreadPerTaskExecutor() :
NamedExecutorFactory.createFixed("nettyExecutor", nThreads);
var grpcServerBuilder = NettyServerBuilder.forAddress(new InetSocketAddress(config.bindAddress(), port))
.executor(NamedExecutorFactory.createFixed("nettyExecutor", nThreads))
.executor(workExecutor)
.workerEventLoopGroup(new NioEventLoopGroup(nThreads, NamedExecutorFactory.createFixed("Worker-ELG", nThreads)))
.bossEventLoopGroup(new NioEventLoopGroup(nThreads, NamedExecutorFactory.createFixed("Boss-ELG", nThreads)))
.channelType(NioServerSocketChannel.class);
for (var grpcService : grpcServices) {
if (!grpcService.shouldRegisterService()) {
continue;
}

View File

@@ -125,8 +125,7 @@ public class JoobyService {
// Set a cap on the number of worker threads, as Jooby's default value does not seem to consider
// multi-tenant servers with high thread counts, and spins up an exorbitant number of threads in that
// scenario
options.setWorkerThreads(Math.min(128, options.getWorkerThreads()));
options.setWorkerThreads(Math.min(16, options.getWorkerThreads()));
jooby.setServerOptions(options);

View File

@@ -7,6 +7,7 @@
<MarkerFilter marker="QUERY" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="HTTP" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="CRAWLER" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="CONVERTER" onMatch="DENY" onMismatch="NEUTRAL" />
</Filters>
</Console>
<Console name="ProcessConsole" target="SYSTEM_OUT">
@@ -23,6 +24,7 @@
<MarkerFilter marker="HTTP" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="CRAWLER" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="PROCESS" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="CONVERTER" onMatch="DENY" onMismatch="NEUTRAL" />
</Filters>
<SizeBasedTriggeringPolicy size="10MB" />
</RollingFile>
@@ -36,6 +38,16 @@
<MarkerFilter marker="CRAWLER" onMatch="ALLOW" onMismatch="DENY" />
</Filters>
</RollingFile>
<RollingFile name="LogToFile" fileName="${env:WMSA_LOG_DIR:-/var/log/wmsa}/converter-audit-${env:WMSA_SERVICE_NODE:-0}.log" filePattern="/var/log/wmsa/converter-audit-${env:WMSA_SERVICE_NODE:-0}-log-%d{MM-dd-yy-HH-mm-ss}-%i.log.gz"
ignoreExceptions="false">
<PatternLayout>
<Pattern>%d{yyyy-MM-dd HH:mm:ss,SSS}: %msg{nolookups}%n</Pattern>
</PatternLayout>
<SizeBasedTriggeringPolicy size="100MB" />
<Filters>
<MarkerFilter marker="CONVERTER" onMatch="ALLOW" onMismatch="DENY" />
</Filters>
</RollingFile>
</Appenders>
<Loggers>
<Logger name="org.apache.zookeeper" level="WARN" />

View File

@@ -8,6 +8,7 @@
<MarkerFilter marker="QUERY" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="HTTP" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="CRAWLER" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="CONVERTER" onMatch="DENY" onMismatch="NEUTRAL" />
</Filters>
</Console>
<Console name="ConsoleWarn" target="SYSTEM_OUT">
@@ -18,6 +19,7 @@
<MarkerFilter marker="QUERY" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="HTTP" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="CRAWLER" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="CONVERTER" onMatch="DENY" onMismatch="NEUTRAL" />
</Filters>
</Console>
<Console name="ConsoleError" target="SYSTEM_OUT">
@@ -28,6 +30,7 @@
<MarkerFilter marker="QUERY" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="HTTP" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="CRAWLER" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="CONVERTER" onMatch="DENY" onMismatch="NEUTRAL" />
</Filters>
</Console>
<Console name="ConsoleFatal" target="SYSTEM_OUT">
@@ -38,6 +41,7 @@
<MarkerFilter marker="QUERY" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="HTTP" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="CRAWLER" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="CONVERTER" onMatch="DENY" onMismatch="NEUTRAL" />
</Filters>
</Console>
<Console name="ProcessConsole" target="SYSTEM_OUT">
@@ -57,6 +61,7 @@
<MarkerFilter marker="QUERY" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="HTTP" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="CRAWLER" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="CONVERTER" onMatch="DENY" onMismatch="NEUTRAL" />
</Filters>
</RollingFile>
<RollingFile name="LogToFile" fileName="${env:WMSA_LOG_DIR:-/var/log/wmsa}/crawler-audit-${env:WMSA_SERVICE_NODE:-0}.log" filePattern="/var/log/wmsa/crawler-audit-${env:WMSA_SERVICE_NODE:-0}-log-%d{MM-dd-yy-HH-mm-ss}-%i.log.gz"
@@ -69,6 +74,16 @@
<MarkerFilter marker="CRAWLER" onMatch="ALLOW" onMismatch="DENY" />
</Filters>
</RollingFile>
<RollingFile name="LogToFile" fileName="${env:WMSA_LOG_DIR:-/var/log/wmsa}/converter-audit-${env:WMSA_SERVICE_NODE:-0}.log" filePattern="/var/log/wmsa/converter-audit-${env:WMSA_SERVICE_NODE:-0}-log-%d{MM-dd-yy-HH-mm-ss}-%i.log.gz"
ignoreExceptions="false">
<PatternLayout>
<Pattern>%d{yyyy-MM-dd HH:mm:ss,SSS}: %msg{nolookups}%n</Pattern>
</PatternLayout>
<SizeBasedTriggeringPolicy size="100MB" />
<Filters>
<MarkerFilter marker="CONVERTER" onMatch="ALLOW" onMismatch="DENY" />
</Filters>
</RollingFile>
</Appenders>
<Loggers>
<Logger name="org.apache.zookeeper" level="WARN" />

View File

@@ -9,6 +9,7 @@ import nu.marginalia.executor.storage.FileStorageFile;
import nu.marginalia.executor.upload.UploadDirContents;
import nu.marginalia.executor.upload.UploadDirItem;
import nu.marginalia.functions.execution.api.*;
import nu.marginalia.mq.persistence.MqPersistence;
import nu.marginalia.service.ServiceId;
import nu.marginalia.service.client.GrpcChannelPoolFactory;
import nu.marginalia.service.client.GrpcMultiNodeChannelPool;
@@ -25,27 +26,37 @@ import java.net.URISyntaxException;
import java.net.URL;
import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
import java.time.Duration;
import java.util.List;
import static nu.marginalia.functions.execution.api.ExecutorApiGrpc.ExecutorApiBlockingStub;
@Singleton
public class ExecutorClient {
private final MqPersistence persistence;
private final GrpcMultiNodeChannelPool<ExecutorApiBlockingStub> channelPool;
private static final Logger logger = LoggerFactory.getLogger(ExecutorClient.class);
private final ServiceRegistryIf registry;
@Inject
public ExecutorClient(ServiceRegistryIf registry,
MqPersistence persistence,
GrpcChannelPoolFactory grpcChannelPoolFactory)
{
this.registry = registry;
this.persistence = persistence;
this.channelPool = grpcChannelPoolFactory
.createMulti(
ServiceKey.forGrpcApi(ExecutorApiGrpc.class, ServicePartition.multi()),
ExecutorApiGrpc::newBlockingStub);
}
private long createTrackingTokenMsg(String task, int node, Duration ttl) throws Exception {
return persistence.sendNewMessage("task-tracking[" + node + "]", "export-client", null, task, "", ttl);
}
public void startFsm(int node, String actorName) {
channelPool.call(ExecutorApiBlockingStub::startFsm)
.forNode(node)
@@ -96,6 +107,16 @@ public class ExecutorClient {
.build());
}
public long updateNsfwFilters() throws Exception {
long msgId = createTrackingTokenMsg("nsfw-filters", 1, Duration.ofHours(6));
channelPool.call(ExecutorApiBlockingStub::updateNsfwFilters)
.forNode(1)
.run(RpcUpdateNsfwFilters.newBuilder().setMsgId(msgId).build());
return msgId;
}
public ActorRunStates getActorStates(int node) {
try {
var rs = channelPool.call(ExecutorApiBlockingStub::getActorStates)
@@ -168,7 +189,7 @@ public class ExecutorClient {
String uriPath = "/transfer/file/" + fileStorage.id();
String uriQuery = "path=" + URLEncoder.encode(path, StandardCharsets.UTF_8);
var endpoints = registry.getEndpoints(ServiceKey.forRest(ServiceId.Executor, fileStorage.node()));
var endpoints = registry.getEndpoints(ServiceKey.forRest(ServiceId.Index, fileStorage.node()));
if (endpoints.isEmpty()) {
throw new RuntimeException("No endpoints for node " + fileStorage.node());
}

View File

@@ -18,6 +18,8 @@ service ExecutorApi {
rpc calculateAdjacencies(Empty) returns (Empty) {}
rpc restoreBackup(RpcFileStorageId) returns (Empty) {}
rpc updateNsfwFilters(RpcUpdateNsfwFilters) returns (Empty) {}
rpc restartExecutorService(Empty) returns (Empty) {}
}
@@ -66,6 +68,9 @@ message RpcExportRequest {
int64 fileStorageId = 1;
int64 msgId = 2;
}
message RpcUpdateNsfwFilters {
int64 msgId = 1;
}
message RpcFileStorageIdWithDomainName {
int64 fileStorageId = 1;
string targetDomainName = 2;

View File

@@ -20,6 +20,7 @@ dependencies {
implementation project(':code:processes:live-crawling-process')
implementation project(':code:processes:loading-process')
implementation project(':code:processes:ping-process')
implementation project(':code:processes:new-domain-process')
implementation project(':code:processes:converting-process')
implementation project(':code:processes:index-constructor-process')
@@ -41,7 +42,6 @@ dependencies {
implementation project(':code:functions:nsfw-domain-filter')
implementation project(':code:execution:api')
implementation project(':code:processes:crawling-process:model')
implementation project(':code:processes:crawling-process:model')
implementation project(':code:processes:crawling-process:ft-link-parser')
implementation project(':code:index:index-journal')

View File

@@ -2,10 +2,11 @@ package nu.marginalia.actor;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import nu.marginalia.functions.execution.api.*;
import nu.marginalia.functions.execution.api.RpcFsmName;
import nu.marginalia.functions.execution.api.RpcProcessId;
import nu.marginalia.mq.MqMessageState;
import nu.marginalia.mq.persistence.MqPersistence;
import nu.marginalia.process.ProcessService;
import nu.marginalia.process.ProcessSpawnerService;
import nu.marginalia.service.module.ServiceConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -14,18 +15,18 @@ import spark.Spark;
@Singleton
public class ActorApi {
private final ExecutorActorControlService actors;
private final ProcessService processService;
private final ProcessSpawnerService processSpawnerService;
private final MqPersistence mqPersistence;
private final ServiceConfiguration serviceConfiguration;
private final Logger logger = LoggerFactory.getLogger(getClass());
@Inject
public ActorApi(ExecutorActorControlService actors,
ProcessService processService,
ProcessSpawnerService processSpawnerService,
MqPersistence mqPersistence,
ServiceConfiguration serviceConfiguration)
{
this.actors = actors;
this.processService = processService;
this.processSpawnerService = processSpawnerService;
this.mqPersistence = mqPersistence;
this.serviceConfiguration = serviceConfiguration;
}
@@ -43,7 +44,7 @@ public class ActorApi {
}
public Object stopProcess(RpcProcessId processId) {
ProcessService.ProcessId id = ProcessService.translateExternalIdBase(processId.getProcessId());
ProcessSpawnerService.ProcessId id = ProcessSpawnerService.translateExternalIdBase(processId.getProcessId());
try {
String inbox = id.name().toLowerCase() + ":" + serviceConfiguration.node();
@@ -60,7 +61,7 @@ public class ActorApi {
}
}
processService.kill(id);
processSpawnerService.kill(id);
}
catch (Exception ex) {
logger.error("Failed to stop process {}", id, ex);

View File

@@ -6,7 +6,7 @@ import java.util.Set;
public enum ExecutorActor {
PREC_EXPORT_ALL(NodeProfile.BATCH_CRAWL, NodeProfile.MIXED),
SYNC_NSFW_LISTS(NodeProfile.BATCH_CRAWL, NodeProfile.MIXED),
UPDATE_NSFW_LISTS(NodeProfile.BATCH_CRAWL, NodeProfile.MIXED, NodeProfile.SIDELOAD, NodeProfile.REALTIME),
CRAWL(NodeProfile.BATCH_CRAWL, NodeProfile.MIXED),
RECRAWL(NodeProfile.BATCH_CRAWL, NodeProfile.MIXED),
@@ -14,6 +14,7 @@ public enum ExecutorActor {
PROC_CRAWLER_SPAWNER(NodeProfile.BATCH_CRAWL, NodeProfile.MIXED),
PROC_PING_SPAWNER(NodeProfile.BATCH_CRAWL, NodeProfile.MIXED, NodeProfile.REALTIME),
PROC_EXPORT_TASKS_SPAWNER(NodeProfile.BATCH_CRAWL, NodeProfile.MIXED),
PROC_NDP_SPAWNER(NodeProfile.MIXED, NodeProfile.REALTIME),
ADJACENCY_CALCULATION(NodeProfile.BATCH_CRAWL, NodeProfile.MIXED),
EXPORT_DATA(NodeProfile.BATCH_CRAWL, NodeProfile.MIXED),
EXPORT_SEGMENTATION_MODEL(NodeProfile.BATCH_CRAWL, NodeProfile.MIXED),

View File

@@ -49,6 +49,7 @@ public class ExecutorActorControlService {
RecrawlSingleDomainActor recrawlSingleDomainActor,
RestoreBackupActor restoreBackupActor,
ConverterMonitorActor converterMonitorFSM,
NdpMonitorActor ndpMonitorActor,
PingMonitorActor pingMonitorActor,
CrawlerMonitorActor crawlerMonitorActor,
LiveCrawlerMonitorActor liveCrawlerMonitorActor,
@@ -93,7 +94,7 @@ public class ExecutorActorControlService {
register(ExecutorActor.PROC_PING_SPAWNER, pingMonitorActor);
register(ExecutorActor.PROC_LIVE_CRAWL_SPAWNER, liveCrawlerMonitorActor);
register(ExecutorActor.PROC_EXPORT_TASKS_SPAWNER, exportTasksMonitorActor);
register(ExecutorActor.PROC_NDP_SPAWNER, ndpMonitorActor);
register(ExecutorActor.MONITOR_PROCESS_LIVENESS, processMonitorFSM);
register(ExecutorActor.MONITOR_FILE_STORAGE, fileStorageMonitorActor);
@@ -112,7 +113,7 @@ public class ExecutorActorControlService {
register(ExecutorActor.UPDATE_RSS, updateRssActor);
register(ExecutorActor.MIGRATE_CRAWL_DATA, migrateCrawlDataActor);
register(ExecutorActor.SYNC_NSFW_LISTS, updateNsfwFiltersActor);
register(ExecutorActor.UPDATE_NSFW_LISTS, updateNsfwFiltersActor);
if (serviceConfiguration.node() == 1) {
register(ExecutorActor.PREC_EXPORT_ALL, exportAllPrecessionActor);

View File

@@ -4,11 +4,14 @@ import com.google.gson.Gson;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import nu.marginalia.actor.prototype.RecordActorPrototype;
import nu.marginalia.actor.state.*;
import nu.marginalia.mq.persistence.MqMessageHandlerRegistry;
import nu.marginalia.process.ProcessService;
import nu.marginalia.actor.state.ActorResumeBehavior;
import nu.marginalia.actor.state.ActorStep;
import nu.marginalia.actor.state.Resume;
import nu.marginalia.actor.state.Terminal;
import nu.marginalia.mq.MqMessageState;
import nu.marginalia.mq.persistence.MqMessageHandlerRegistry;
import nu.marginalia.mq.persistence.MqPersistence;
import nu.marginalia.process.ProcessSpawnerService;
import nu.marginalia.service.module.ServiceConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -24,13 +27,13 @@ import java.util.concurrent.atomic.AtomicBoolean;
public class AbstractProcessSpawnerActor extends RecordActorPrototype {
private final MqPersistence persistence;
private final ProcessService processService;
private final ProcessSpawnerService processSpawnerService;
private final Logger logger = LoggerFactory.getLogger(getClass());
public static final int MAX_ATTEMPTS = 3;
private final String inboxName;
private final ProcessService.ProcessId processId;
private final ProcessSpawnerService.ProcessId processId;
private final ExecutorService executorService = Executors.newSingleThreadExecutor();
private final int node;
@@ -50,7 +53,7 @@ public class AbstractProcessSpawnerActor extends RecordActorPrototype {
for (;;) {
var messages = persistence.eavesdrop(inboxName, 1);
if (messages.isEmpty() && !processService.isRunning(processId)) {
if (messages.isEmpty() && !processSpawnerService.isRunning(processId)) {
synchronized (processId) {
processId.wait(5000);
}
@@ -92,7 +95,7 @@ public class AbstractProcessSpawnerActor extends RecordActorPrototype {
catch (InterruptedException ex) {
// We get this exception when the process is cancelled by the user
processService.kill(processId);
processSpawnerService.kill(processId);
setCurrentMessageToDead();
yield new Aborted();
@@ -112,13 +115,13 @@ public class AbstractProcessSpawnerActor extends RecordActorPrototype {
public AbstractProcessSpawnerActor(Gson gson,
ServiceConfiguration configuration,
MqPersistence persistence,
ProcessService processService,
ProcessSpawnerService processSpawnerService,
String inboxName,
ProcessService.ProcessId processId) {
ProcessSpawnerService.ProcessId processId) {
super(gson);
this.node = configuration.node();
this.persistence = persistence;
this.processService = processService;
this.processSpawnerService = processSpawnerService;
this.inboxName = inboxName + ":" + node;
this.processId = processId;
}
@@ -149,7 +152,7 @@ public class AbstractProcessSpawnerActor extends RecordActorPrototype {
// Run this call in a separate thread so that this thread can be interrupted waiting for it
executorService.submit(() -> {
try {
processService.trigger(processId);
processSpawnerService.trigger(processId);
} catch (Exception e) {
logger.warn("Error in triggering process", e);
error.set(true);

View File

@@ -4,9 +4,9 @@ import com.google.gson.Gson;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import nu.marginalia.actor.monitor.AbstractProcessSpawnerActor;
import nu.marginalia.process.ProcessService;
import nu.marginalia.mq.persistence.MqPersistence;
import nu.marginalia.mqapi.ProcessInboxNames;
import nu.marginalia.process.ProcessSpawnerService;
import nu.marginalia.service.module.ServiceConfiguration;
@Singleton
@@ -17,13 +17,13 @@ public class ConverterMonitorActor extends AbstractProcessSpawnerActor {
public ConverterMonitorActor(Gson gson,
ServiceConfiguration configuration,
MqPersistence persistence,
ProcessService processService) {
ProcessSpawnerService processSpawnerService) {
super(gson,
configuration,
persistence,
processService,
processSpawnerService,
ProcessInboxNames.CONVERTER_INBOX,
ProcessService.ProcessId.CONVERTER);
ProcessSpawnerService.ProcessId.CONVERTER);
}

View File

@@ -4,9 +4,9 @@ import com.google.gson.Gson;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import nu.marginalia.actor.monitor.AbstractProcessSpawnerActor;
import nu.marginalia.process.ProcessService;
import nu.marginalia.mq.persistence.MqPersistence;
import nu.marginalia.mqapi.ProcessInboxNames;
import nu.marginalia.process.ProcessSpawnerService;
import nu.marginalia.service.module.ServiceConfiguration;
@Singleton
@@ -16,13 +16,13 @@ public class CrawlerMonitorActor extends AbstractProcessSpawnerActor {
public CrawlerMonitorActor(Gson gson,
ServiceConfiguration configuration,
MqPersistence persistence,
ProcessService processService) {
ProcessSpawnerService processSpawnerService) {
super(gson,
configuration,
persistence,
processService,
processSpawnerService,
ProcessInboxNames.CRAWLER_INBOX,
ProcessService.ProcessId.CRAWLER);
ProcessSpawnerService.ProcessId.CRAWLER);
}

View File

@@ -6,7 +6,7 @@ import com.google.inject.Singleton;
import nu.marginalia.actor.monitor.AbstractProcessSpawnerActor;
import nu.marginalia.mq.persistence.MqPersistence;
import nu.marginalia.mqapi.ProcessInboxNames;
import nu.marginalia.process.ProcessService;
import nu.marginalia.process.ProcessSpawnerService;
import nu.marginalia.service.module.ServiceConfiguration;
@Singleton
@@ -16,13 +16,13 @@ public class ExportTaskMonitorActor extends AbstractProcessSpawnerActor {
public ExportTaskMonitorActor(Gson gson,
ServiceConfiguration configuration,
MqPersistence persistence,
ProcessService processService) {
ProcessSpawnerService processSpawnerService) {
super(gson,
configuration,
persistence,
processService,
processSpawnerService,
ProcessInboxNames.EXPORT_TASK_INBOX,
ProcessService.ProcessId.EXPORT_TASKS);
ProcessSpawnerService.ProcessId.EXPORT_TASKS);
}

View File

@@ -4,9 +4,9 @@ import com.google.gson.Gson;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import nu.marginalia.actor.monitor.AbstractProcessSpawnerActor;
import nu.marginalia.process.ProcessService;
import nu.marginalia.mq.persistence.MqPersistence;
import nu.marginalia.mqapi.ProcessInboxNames;
import nu.marginalia.process.ProcessSpawnerService;
import nu.marginalia.service.module.ServiceConfiguration;
@Singleton
@@ -17,13 +17,13 @@ public class IndexConstructorMonitorActor extends AbstractProcessSpawnerActor {
public IndexConstructorMonitorActor(Gson gson,
ServiceConfiguration configuration,
MqPersistence persistence,
ProcessService processService) {
ProcessSpawnerService processSpawnerService) {
super(gson,
configuration,
persistence,
processService,
processSpawnerService,
ProcessInboxNames.INDEX_CONSTRUCTOR_INBOX,
ProcessService.ProcessId.INDEX_CONSTRUCTOR);
ProcessSpawnerService.ProcessId.INDEX_CONSTRUCTOR);
}

View File

@@ -6,7 +6,7 @@ import com.google.inject.Singleton;
import nu.marginalia.actor.monitor.AbstractProcessSpawnerActor;
import nu.marginalia.mq.persistence.MqPersistence;
import nu.marginalia.mqapi.ProcessInboxNames;
import nu.marginalia.process.ProcessService;
import nu.marginalia.process.ProcessSpawnerService;
import nu.marginalia.service.module.ServiceConfiguration;
@Singleton
@@ -16,13 +16,13 @@ public class LiveCrawlerMonitorActor extends AbstractProcessSpawnerActor {
public LiveCrawlerMonitorActor(Gson gson,
ServiceConfiguration configuration,
MqPersistence persistence,
ProcessService processService) {
ProcessSpawnerService processSpawnerService) {
super(gson,
configuration,
persistence,
processService,
processSpawnerService,
ProcessInboxNames.LIVE_CRAWLER_INBOX,
ProcessService.ProcessId.LIVE_CRAWLER);
ProcessSpawnerService.ProcessId.LIVE_CRAWLER);
}

View File

@@ -4,9 +4,9 @@ import com.google.gson.Gson;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import nu.marginalia.actor.monitor.AbstractProcessSpawnerActor;
import nu.marginalia.process.ProcessService;
import nu.marginalia.mq.persistence.MqPersistence;
import nu.marginalia.mqapi.ProcessInboxNames;
import nu.marginalia.process.ProcessSpawnerService;
import nu.marginalia.service.module.ServiceConfiguration;
@Singleton
@@ -17,13 +17,13 @@ public class LoaderMonitorActor extends AbstractProcessSpawnerActor {
public LoaderMonitorActor(Gson gson,
ServiceConfiguration configuration,
MqPersistence persistence,
ProcessService processService) {
ProcessSpawnerService processSpawnerService) {
super(gson,
configuration,
persistence, processService,
persistence, processSpawnerService,
ProcessInboxNames.LOADER_INBOX,
ProcessService.ProcessId.LOADER);
ProcessSpawnerService.ProcessId.LOADER);
}
}

View File

@@ -0,0 +1,29 @@
package nu.marginalia.actor.proc;
import com.google.gson.Gson;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import nu.marginalia.actor.monitor.AbstractProcessSpawnerActor;
import nu.marginalia.mq.persistence.MqPersistence;
import nu.marginalia.mqapi.ProcessInboxNames;
import nu.marginalia.process.ProcessSpawnerService;
import nu.marginalia.service.module.ServiceConfiguration;
@Singleton
public class NdpMonitorActor extends AbstractProcessSpawnerActor {
@Inject
public NdpMonitorActor(Gson gson,
ServiceConfiguration configuration,
MqPersistence persistence,
ProcessSpawnerService processSpawnerService) {
super(gson,
configuration,
persistence,
processSpawnerService,
ProcessInboxNames.NDP_INBOX,
ProcessSpawnerService.ProcessId.NDP);
}
}

View File

@@ -13,7 +13,7 @@ import nu.marginalia.mq.persistence.MqMessageHandlerRegistry;
import nu.marginalia.mq.persistence.MqPersistence;
import nu.marginalia.mqapi.ProcessInboxNames;
import nu.marginalia.mqapi.ping.PingRequest;
import nu.marginalia.process.ProcessService;
import nu.marginalia.process.ProcessSpawnerService;
import nu.marginalia.service.module.ServiceConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -25,17 +25,21 @@ import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
// Unlike other monitor actors, the ping monitor will not merely wait for a request
// to be sent, but send one itself, hence we can't extend AbstractProcessSpawnerActor
// but have to reimplement a lot of the same logic ourselves.
@Singleton
public class PingMonitorActor extends RecordActorPrototype {
private final MqPersistence persistence;
private final ProcessService processService;
private final ProcessSpawnerService processSpawnerService;
private final Logger logger = LoggerFactory.getLogger(getClass());
public static final int MAX_ATTEMPTS = 3;
private final String inboxName;
private final ProcessService.ProcessId processId;
private final ProcessSpawnerService.ProcessId processId;
private final ExecutorService executorService = Executors.newSingleThreadExecutor();
private final int node;
private final Gson gson;
@@ -53,7 +57,6 @@ public class PingMonitorActor extends RecordActorPrototype {
return switch (self) {
case Initial i -> {
PingRequest request = new PingRequest();
persistence.sendNewMessage(inboxName, null, null,
"PingRequest",
gson.toJson(request),
@@ -65,7 +68,7 @@ public class PingMonitorActor extends RecordActorPrototype {
for (;;) {
var messages = persistence.eavesdrop(inboxName, 1);
if (messages.isEmpty() && !processService.isRunning(processId)) {
if (messages.isEmpty() && !processSpawnerService.isRunning(processId)) {
synchronized (processId) {
processId.wait(5000);
}
@@ -107,7 +110,7 @@ public class PingMonitorActor extends RecordActorPrototype {
catch (InterruptedException ex) {
// We get this exception when the process is cancelled by the user
processService.kill(processId);
processSpawnerService.kill(processId);
setCurrentMessageToDead();
yield new Aborted();
@@ -127,14 +130,14 @@ public class PingMonitorActor extends RecordActorPrototype {
public PingMonitorActor(Gson gson,
ServiceConfiguration configuration,
MqPersistence persistence,
ProcessService processService) throws SQLException {
ProcessSpawnerService processSpawnerService) throws SQLException {
super(gson);
this.gson = gson;
this.node = configuration.node();
this.persistence = persistence;
this.processService = processService;
this.processSpawnerService = processSpawnerService;
this.inboxName = ProcessInboxNames.PING_INBOX + ":" + node;
this.processId = ProcessService.ProcessId.PING;
this.processId = ProcessSpawnerService.ProcessId.PING;
}
/** Sets the message to dead in the database to avoid
@@ -163,7 +166,7 @@ public class PingMonitorActor extends RecordActorPrototype {
// Run this call in a separate thread so that this thread can be interrupted waiting for it
executorService.submit(() -> {
try {
processService.trigger(processId);
processSpawnerService.trigger(processId);
} catch (Exception e) {
logger.warn("Error in triggering process", e);
error.set(true);

View File

@@ -8,7 +8,7 @@ import nu.marginalia.actor.prototype.RecordActorPrototype;
import nu.marginalia.actor.state.ActorResumeBehavior;
import nu.marginalia.actor.state.ActorStep;
import nu.marginalia.actor.state.Resume;
import nu.marginalia.process.ProcessService;
import nu.marginalia.process.ProcessSpawnerService;
import nu.marginalia.service.control.ServiceEventLog;
import nu.marginalia.service.module.ServiceConfiguration;
@@ -21,7 +21,7 @@ import java.util.concurrent.TimeUnit;
public class ProcessLivenessMonitorActor extends RecordActorPrototype {
private final ServiceEventLog eventLogService;
private final ProcessService processService;
private final ProcessSpawnerService processSpawnerService;
private final HikariDataSource dataSource;
private final int node;
@@ -49,7 +49,7 @@ public class ProcessLivenessMonitorActor extends RecordActorPrototype {
var processId = heartbeat.getProcessId();
if (null == processId) continue;
if (processService.isRunning(processId) && heartbeat.lastSeenMillis() < 10_000)
if (processSpawnerService.isRunning(processId) && heartbeat.lastSeenMillis() < 10_000)
continue;
flagProcessAsStopped(heartbeat);
@@ -72,12 +72,12 @@ public class ProcessLivenessMonitorActor extends RecordActorPrototype {
public ProcessLivenessMonitorActor(Gson gson,
ServiceEventLog eventLogService,
ServiceConfiguration configuration,
ProcessService processService,
ProcessSpawnerService processSpawnerService,
HikariDataSource dataSource) {
super(gson);
this.node = configuration.node();
this.eventLogService = eventLogService;
this.processService = processService;
this.processSpawnerService = processSpawnerService;
this.dataSource = dataSource;
}
@@ -208,8 +208,8 @@ public class ProcessLivenessMonitorActor extends RecordActorPrototype {
public boolean isRunning() {
return "RUNNING".equals(status);
}
public ProcessService.ProcessId getProcessId() {
return ProcessService.translateExternalIdBase(processBase);
public ProcessSpawnerService.ProcessId getProcessId() {
return ProcessSpawnerService.translateExternalIdBase(processBase);
}
}

View File

@@ -47,6 +47,8 @@ public class ScrapeFeedsActor extends RecordActorPrototype {
private final Path feedPath = WmsaHome.getHomePath().resolve("data/scrape-urls.txt");
private static boolean insertFoundDomains = Boolean.getBoolean("loader.insertFoundDomains");
public record Initial() implements ActorStep {}
@Resume(behavior = ActorResumeBehavior.RETRY)
public record Wait(String ts) implements ActorStep {}
@@ -57,6 +59,8 @@ public class ScrapeFeedsActor extends RecordActorPrototype {
public ActorStep transition(ActorStep self) throws Exception {
return switch(self) {
case Initial() -> {
if (!insertFoundDomains) yield new Error("Domain insertion prohibited, aborting");
if (nodeConfigurationService.get(nodeId).profile() != NodeProfile.REALTIME) {
yield new Error("Invalid node profile for RSS update");
}

View File

@@ -3,11 +3,11 @@ package nu.marginalia.actor.task;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import nu.marginalia.actor.state.ActorControlFlowException;
import nu.marginalia.mq.MqMessageState;
import nu.marginalia.mq.persistence.MqPersistence;
import nu.marginalia.process.ProcessService;
import nu.marginalia.mq.MqMessage;
import nu.marginalia.mq.MqMessageState;
import nu.marginalia.mq.outbox.MqOutbox;
import nu.marginalia.mq.persistence.MqPersistence;
import nu.marginalia.process.ProcessSpawnerService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -20,13 +20,13 @@ public class ActorProcessWatcher {
private static final Logger logger = LoggerFactory.getLogger(ActorProcessWatcher.class);
private final MqPersistence persistence;
private final ProcessService processService;
private final ProcessSpawnerService processSpawnerService;
@Inject
public ActorProcessWatcher(MqPersistence persistence,
ProcessService processService) {
ProcessSpawnerService processSpawnerService) {
this.persistence = persistence;
this.processService = processService;
this.processSpawnerService = processSpawnerService;
}
/** Wait for a process to start, and then wait for a response from the process,
@@ -36,7 +36,7 @@ public class ActorProcessWatcher {
* <p>
* When interrupted, the process is killed and the message is marked as dead.
*/
public MqMessage waitResponse(MqOutbox outbox, ProcessService.ProcessId processId, long msgId)
public MqMessage waitResponse(MqOutbox outbox, ProcessSpawnerService.ProcessId processId, long msgId)
throws ActorControlFlowException, InterruptedException, SQLException
{
// enums values only have a single instance,
@@ -65,7 +65,7 @@ public class ActorProcessWatcher {
// This will prevent the monitor process from attempting to respawn the process as we kill it
outbox.flagAsDead(msgId);
processService.kill(processId);
processSpawnerService.kill(processId);
logger.info("Process {} killed due to interrupt", processId);
}
@@ -94,12 +94,12 @@ public class ActorProcessWatcher {
}
/** Wait the specified time for the specified process to start running (does not start the process) */
private boolean waitForProcess(ProcessService.ProcessId processId, TimeUnit unit, int duration) throws InterruptedException {
private boolean waitForProcess(ProcessSpawnerService.ProcessId processId, TimeUnit unit, int duration) throws InterruptedException {
// Wait for process to start
long deadline = System.currentTimeMillis() + unit.toMillis(duration);
while (System.currentTimeMillis() < deadline) {
if (processService.isRunning(processId))
if (processSpawnerService.isRunning(processId))
return true;
TimeUnit.MILLISECONDS.sleep(100);

View File

@@ -12,7 +12,7 @@ import nu.marginalia.mq.MqMessageState;
import nu.marginalia.mq.outbox.MqOutbox;
import nu.marginalia.mqapi.converting.ConvertRequest;
import nu.marginalia.process.ProcessOutboxes;
import nu.marginalia.process.ProcessService;
import nu.marginalia.process.ProcessSpawnerService;
import nu.marginalia.sideload.RedditSideloadHelper;
import nu.marginalia.sideload.SideloadHelper;
import nu.marginalia.sideload.StackExchangeSideloadHelper;
@@ -218,7 +218,7 @@ public class ConvertActor extends RecordActorPrototype {
);
}
case ConvertWait(FileStorageId destFid, long msgId) -> {
var rsp = processWatcher.waitResponse(mqConverterOutbox, ProcessService.ProcessId.CONVERTER, msgId);
var rsp = processWatcher.waitResponse(mqConverterOutbox, ProcessSpawnerService.ProcessId.CONVERTER, msgId);
if (rsp.state() != MqMessageState.OK) {
yield new Error("Converter failed");

View File

@@ -18,7 +18,7 @@ import nu.marginalia.mqapi.index.IndexName;
import nu.marginalia.mqapi.loading.LoadRequest;
import nu.marginalia.nodecfg.NodeConfigurationService;
import nu.marginalia.process.ProcessOutboxes;
import nu.marginalia.process.ProcessService;
import nu.marginalia.process.ProcessSpawnerService;
import nu.marginalia.service.module.ServiceConfiguration;
import nu.marginalia.storage.FileStorageService;
import nu.marginalia.storage.model.FileStorageId;
@@ -95,7 +95,7 @@ public class ConvertAndLoadActor extends RecordActorPrototype {
case Convert(FileStorageId crawlId, FileStorageId processedId, long msgId) when msgId < 0 ->
new Convert(crawlId, processedId, mqConverterOutbox.sendAsync(ConvertRequest.forCrawlData(crawlId, processedId)));
case Convert(FileStorageId crawlId, FileStorageId processedId, long msgId) -> {
var rsp = processWatcher.waitResponse(mqConverterOutbox, ProcessService.ProcessId.CONVERTER, msgId);
var rsp = processWatcher.waitResponse(mqConverterOutbox, ProcessSpawnerService.ProcessId.CONVERTER, msgId);
if (rsp.state() != MqMessageState.OK)
yield new Error("Converter failed");
@@ -129,7 +129,7 @@ public class ConvertAndLoadActor extends RecordActorPrototype {
yield new Load(processedIds, id);
}
case Load(List<FileStorageId> processedIds, long msgId) -> {
var rsp = processWatcher.waitResponse(mqLoaderOutbox, ProcessService.ProcessId.LOADER, msgId);
var rsp = processWatcher.waitResponse(mqLoaderOutbox, ProcessSpawnerService.ProcessId.LOADER, msgId);
if (rsp.state() != MqMessageState.OK) {
yield new Error("Loader failed");
@@ -165,7 +165,7 @@ public class ConvertAndLoadActor extends RecordActorPrototype {
}
case ReindexFwd(long id) when id < 0 -> new ReindexFwd(createIndex(IndexName.FORWARD));
case ReindexFwd(long id) -> {
var rsp = processWatcher.waitResponse(mqIndexConstructorOutbox, ProcessService.ProcessId.INDEX_CONSTRUCTOR, id);
var rsp = processWatcher.waitResponse(mqIndexConstructorOutbox, ProcessSpawnerService.ProcessId.INDEX_CONSTRUCTOR, id);
if (rsp.state() != MqMessageState.OK)
yield new Error("Forward index construction failed");
@@ -174,7 +174,7 @@ public class ConvertAndLoadActor extends RecordActorPrototype {
}
case ReindexFull(long id) when id < 0 -> new ReindexFull(createIndex(IndexName.REVERSE_FULL));
case ReindexFull(long id) -> {
var rsp = processWatcher.waitResponse(mqIndexConstructorOutbox, ProcessService.ProcessId.INDEX_CONSTRUCTOR, id);
var rsp = processWatcher.waitResponse(mqIndexConstructorOutbox, ProcessSpawnerService.ProcessId.INDEX_CONSTRUCTOR, id);
if (rsp.state() != MqMessageState.OK)
yield new Error("Full index construction failed");
@@ -183,7 +183,7 @@ public class ConvertAndLoadActor extends RecordActorPrototype {
}
case ReindexPrio(long id) when id < 0 -> new ReindexPrio(createIndex(IndexName.REVERSE_PRIO));
case ReindexPrio(long id) -> {
var rsp = processWatcher.waitResponse(mqIndexConstructorOutbox, ProcessService.ProcessId.INDEX_CONSTRUCTOR, id);
var rsp = processWatcher.waitResponse(mqIndexConstructorOutbox, ProcessSpawnerService.ProcessId.INDEX_CONSTRUCTOR, id);
if (rsp.state() != MqMessageState.OK)
yield new Error("Prio index construction failed");

View File

@@ -13,7 +13,7 @@ import nu.marginalia.mq.MqMessageState;
import nu.marginalia.mq.outbox.MqOutbox;
import nu.marginalia.mqapi.crawling.CrawlRequest;
import nu.marginalia.process.ProcessOutboxes;
import nu.marginalia.process.ProcessService;
import nu.marginalia.process.ProcessSpawnerService;
import nu.marginalia.storage.FileStorageService;
import nu.marginalia.storage.model.FileStorageId;
import nu.marginalia.storage.model.FileStorageType;
@@ -76,7 +76,7 @@ public class CrawlActor extends RecordActorPrototype {
case Crawl (long msgId, FileStorageId fid, boolean cascadeLoad) -> {
var rsp = processWatcher.waitResponse(
mqCrawlerOutbox,
ProcessService.ProcessId.CRAWLER,
ProcessSpawnerService.ProcessId.CRAWLER,
msgId);
if (rsp.state() != MqMessageState.OK) {

View File

@@ -10,7 +10,7 @@ import nu.marginalia.mq.outbox.MqOutbox;
import nu.marginalia.mq.persistence.MqPersistence;
import nu.marginalia.mqapi.tasks.ExportTaskRequest;
import nu.marginalia.process.ProcessOutboxes;
import nu.marginalia.process.ProcessService;
import nu.marginalia.process.ProcessSpawnerService;
import nu.marginalia.storage.FileStorageService;
import nu.marginalia.storage.model.FileStorageId;
import nu.marginalia.storage.model.FileStorageState;
@@ -55,7 +55,7 @@ public class ExportAtagsActor extends RecordActorPrototype {
yield new Run(responseMsgId, crawlId, destId, newMsgId);
}
case Run(long responseMsgId, FileStorageId crawlId, FileStorageId destId, long msgId) -> {
var rsp = processWatcher.waitResponse(exportTasksOutbox, ProcessService.ProcessId.EXPORT_TASKS, msgId);
var rsp = processWatcher.waitResponse(exportTasksOutbox, ProcessSpawnerService.ProcessId.EXPORT_TASKS, msgId);
if (rsp.state() != MqMessageState.OK) {
storageService.flagFileForDeletion(destId);

View File

@@ -10,7 +10,7 @@ import nu.marginalia.mq.outbox.MqOutbox;
import nu.marginalia.mq.persistence.MqPersistence;
import nu.marginalia.mqapi.tasks.ExportTaskRequest;
import nu.marginalia.process.ProcessOutboxes;
import nu.marginalia.process.ProcessService;
import nu.marginalia.process.ProcessSpawnerService;
import nu.marginalia.storage.FileStorageService;
import nu.marginalia.storage.model.FileStorageId;
import nu.marginalia.storage.model.FileStorageState;
@@ -54,7 +54,7 @@ public class ExportFeedsActor extends RecordActorPrototype {
yield new Run(responseMsgId, crawlId, destId, newMsgId);
}
case Run(long responseMsgId, _, FileStorageId destId, long msgId) -> {
var rsp = processWatcher.waitResponse(exportTasksOutbox, ProcessService.ProcessId.EXPORT_TASKS, msgId);
var rsp = processWatcher.waitResponse(exportTasksOutbox, ProcessSpawnerService.ProcessId.EXPORT_TASKS, msgId);
if (rsp.state() != MqMessageState.OK) {
storageService.flagFileForDeletion(destId);

View File

@@ -9,7 +9,7 @@ import nu.marginalia.mq.MqMessageState;
import nu.marginalia.mq.outbox.MqOutbox;
import nu.marginalia.mqapi.tasks.ExportTaskRequest;
import nu.marginalia.process.ProcessOutboxes;
import nu.marginalia.process.ProcessService;
import nu.marginalia.process.ProcessSpawnerService;
import nu.marginalia.storage.FileStorageService;
import nu.marginalia.storage.model.FileStorageId;
import nu.marginalia.storage.model.FileStorageState;
@@ -52,7 +52,7 @@ public class ExportSampleDataActor extends RecordActorPrototype {
yield new Run(crawlId, destId, size, ctFilter, name, newMsgId);
}
case Run(_, FileStorageId destId, _, _, _, long msgId) -> {
var rsp = processWatcher.waitResponse(exportTasksOutbox, ProcessService.ProcessId.EXPORT_TASKS, msgId);
var rsp = processWatcher.waitResponse(exportTasksOutbox, ProcessSpawnerService.ProcessId.EXPORT_TASKS, msgId);
if (rsp.state() != MqMessageState.OK) {
storageService.flagFileForDeletion(destId);

View File

@@ -10,7 +10,7 @@ import nu.marginalia.mq.outbox.MqOutbox;
import nu.marginalia.mq.persistence.MqPersistence;
import nu.marginalia.mqapi.tasks.ExportTaskRequest;
import nu.marginalia.process.ProcessOutboxes;
import nu.marginalia.process.ProcessService;
import nu.marginalia.process.ProcessSpawnerService;
import nu.marginalia.storage.FileStorageService;
import nu.marginalia.storage.model.FileStorageId;
import nu.marginalia.storage.model.FileStorageState;
@@ -52,7 +52,7 @@ public class ExportTermFreqActor extends RecordActorPrototype {
yield new Run(responseMsgId, crawlId, destId, newMsgId);
}
case Run(long responseMsgId, _, FileStorageId destId, long msgId) -> {
var rsp = processWatcher.waitResponse(exportTasksOutbox, ProcessService.ProcessId.EXPORT_TASKS, msgId);
var rsp = processWatcher.waitResponse(exportTasksOutbox, ProcessSpawnerService.ProcessId.EXPORT_TASKS, msgId);
if (rsp.state() != MqMessageState.OK) {
storageService.flagFileForDeletion(destId);

View File

@@ -13,7 +13,7 @@ import nu.marginalia.mq.MqMessageState;
import nu.marginalia.mq.outbox.MqOutbox;
import nu.marginalia.mqapi.crawling.LiveCrawlRequest;
import nu.marginalia.process.ProcessOutboxes;
import nu.marginalia.process.ProcessService;
import nu.marginalia.process.ProcessSpawnerService;
import nu.marginalia.storage.FileStorageService;
import org.apache.commons.io.FileUtils;
import org.slf4j.Logger;
@@ -44,7 +44,6 @@ public class LiveCrawlActor extends RecordActorPrototype {
@Override
public ActorStep transition(ActorStep self) throws Exception {
logger.info("{}", self);
return switch (self) {
case Initial() -> {
yield new Monitor("-");
@@ -75,7 +74,7 @@ public class LiveCrawlActor extends RecordActorPrototype {
yield new LiveCrawl(feedsHash, id);
}
case LiveCrawl(String feedsHash, long msgId) -> {
var rsp = processWatcher.waitResponse(mqLiveCrawlerOutbox, ProcessService.ProcessId.LIVE_CRAWLER, msgId);
var rsp = processWatcher.waitResponse(mqLiveCrawlerOutbox, ProcessSpawnerService.ProcessId.LIVE_CRAWLER, msgId);
if (rsp.state() != MqMessageState.OK) {
yield new Error("Crawler failed");

View File

@@ -11,7 +11,7 @@ import nu.marginalia.mq.MqMessageState;
import nu.marginalia.mq.outbox.MqOutbox;
import nu.marginalia.mqapi.crawling.CrawlRequest;
import nu.marginalia.process.ProcessOutboxes;
import nu.marginalia.process.ProcessService;
import nu.marginalia.process.ProcessSpawnerService;
import nu.marginalia.storage.FileStorageService;
import nu.marginalia.storage.model.FileStorageId;
import nu.marginalia.storage.model.FileStorageType;
@@ -51,7 +51,7 @@ public class RecrawlSingleDomainActor extends RecordActorPrototype {
case Crawl (long msgId) -> {
var rsp = processWatcher.waitResponse(
mqCrawlerOutbox,
ProcessService.ProcessId.CRAWLER,
ProcessSpawnerService.ProcessId.CRAWLER,
msgId);
if (rsp.state() != MqMessageState.OK) {

View File

@@ -9,7 +9,7 @@ import nu.marginalia.mq.MqMessageState;
import nu.marginalia.mq.outbox.MqOutbox;
import nu.marginalia.mqapi.tasks.ExportTaskRequest;
import nu.marginalia.process.ProcessOutboxes;
import nu.marginalia.process.ProcessService;
import nu.marginalia.process.ProcessSpawnerService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -34,7 +34,7 @@ public class TriggerAdjacencyCalculationActor extends RecordActorPrototype {
yield new Run(newMsgId);
}
case Run(long msgId) -> {
var rsp = processWatcher.waitResponse(exportTasksOutbox, ProcessService.ProcessId.EXPORT_TASKS, msgId);
var rsp = processWatcher.waitResponse(exportTasksOutbox, ProcessSpawnerService.ProcessId.EXPORT_TASKS, msgId);
if (rsp.state() != MqMessageState.OK) {
yield new Error("Exporter failed");

View File

@@ -5,6 +5,8 @@ import com.google.inject.Inject;
import com.google.inject.Singleton;
import nu.marginalia.actor.prototype.RecordActorPrototype;
import nu.marginalia.actor.state.ActorStep;
import nu.marginalia.mq.MqMessageState;
import nu.marginalia.mq.persistence.MqPersistence;
import nu.marginalia.nsfw.NsfwDomainFilter;
import nu.marginalia.service.module.ServiceConfiguration;
@@ -12,23 +14,26 @@ import nu.marginalia.service.module.ServiceConfiguration;
public class UpdateNsfwFiltersActor extends RecordActorPrototype {
private final ServiceConfiguration serviceConfiguration;
private final NsfwDomainFilter nsfwDomainFilter;
private final MqPersistence persistence;
public record Initial() implements ActorStep {}
public record Run() implements ActorStep {}
public record Initial(long respondMsgId) implements ActorStep {}
public record Run(long respondMsgId) implements ActorStep {}
@Override
public ActorStep transition(ActorStep self) throws Exception {
return switch(self) {
case Initial() -> {
case Initial(long respondMsgId) -> {
if (serviceConfiguration.node() != 1) {
persistence.updateMessageState(respondMsgId, MqMessageState.ERR);
yield new Error("This actor can only run on node 1");
}
else {
yield new Run();
yield new Run(respondMsgId);
}
}
case Run() -> {
case Run(long respondMsgId) -> {
nsfwDomainFilter.fetchLists();
persistence.updateMessageState(respondMsgId, MqMessageState.OK);
yield new End();
}
default -> new Error();
@@ -43,11 +48,13 @@ public class UpdateNsfwFiltersActor extends RecordActorPrototype {
@Inject
public UpdateNsfwFiltersActor(Gson gson,
ServiceConfiguration serviceConfiguration,
NsfwDomainFilter nsfwDomainFilter)
NsfwDomainFilter nsfwDomainFilter,
MqPersistence persistence)
{
super(gson);
this.serviceConfiguration = serviceConfiguration;
this.nsfwDomainFilter = nsfwDomainFilter;
this.persistence = persistence;
}
}

View File

@@ -1,6 +1,7 @@
package nu.marginalia.execution;
import com.google.inject.Inject;
import io.grpc.Status;
import io.grpc.stub.StreamObserver;
import nu.marginalia.actor.ExecutorActor;
import nu.marginalia.actor.ExecutorActorControlService;
@@ -36,7 +37,7 @@ public class ExecutorCrawlGrpcService
responseObserver.onCompleted();
}
catch (Exception e) {
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
@@ -52,7 +53,7 @@ public class ExecutorCrawlGrpcService
responseObserver.onCompleted();
}
catch (Exception e) {
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
@@ -66,7 +67,7 @@ public class ExecutorCrawlGrpcService
responseObserver.onCompleted();
}
catch (Exception e) {
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
@@ -80,7 +81,7 @@ public class ExecutorCrawlGrpcService
responseObserver.onCompleted();
}
catch (Exception e) {
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
@@ -98,7 +99,7 @@ public class ExecutorCrawlGrpcService
responseObserver.onCompleted();
}
catch (Exception e) {
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}

View File

@@ -2,6 +2,7 @@ package nu.marginalia.execution;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import io.grpc.Status;
import io.grpc.stub.StreamObserver;
import nu.marginalia.actor.ExecutorActor;
import nu.marginalia.actor.ExecutorActorControlService;
@@ -38,7 +39,7 @@ public class ExecutorExportGrpcService
responseObserver.onCompleted();
}
catch (Exception e) {
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
@@ -57,7 +58,7 @@ public class ExecutorExportGrpcService
responseObserver.onCompleted();
}
catch (Exception e) {
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
@@ -73,7 +74,7 @@ public class ExecutorExportGrpcService
responseObserver.onCompleted();
}
catch (Exception e) {
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
@@ -87,7 +88,7 @@ public class ExecutorExportGrpcService
responseObserver.onCompleted();
}
catch (Exception e) {
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
@@ -99,7 +100,7 @@ public class ExecutorExportGrpcService
responseObserver.onCompleted();
}
catch (Exception e) {
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
@@ -114,14 +115,14 @@ public class ExecutorExportGrpcService
responseObserver.onCompleted();
}
catch (Exception e) {
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
@Override
public void exportAllAtags(Empty request, StreamObserver<Empty> responseObserver) {
if (serviceConfiguration.node() != 1) {
responseObserver.onError(new IllegalArgumentException("Export all atags is only available on node 1"));
responseObserver.onError(Status.UNAVAILABLE.withDescription("Export all atags is only available on node 1").asRuntimeException());
}
try {
actorControlService.startFrom(ExecutorActor.PREC_EXPORT_ALL,
@@ -131,7 +132,7 @@ public class ExecutorExportGrpcService
responseObserver.onCompleted();
}
catch (Exception e) {
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
@@ -145,7 +146,7 @@ public class ExecutorExportGrpcService
responseObserver.onCompleted();
}
catch (Exception e) {
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
@@ -159,7 +160,7 @@ public class ExecutorExportGrpcService
responseObserver.onCompleted();
}
catch (Exception e) {
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
}

View File

@@ -1,6 +1,7 @@
package nu.marginalia.execution;
import com.google.inject.Inject;
import io.grpc.Status;
import io.grpc.stub.StreamObserver;
import nu.marginalia.WmsaHome;
import nu.marginalia.actor.ActorApi;
@@ -10,6 +11,7 @@ import nu.marginalia.actor.state.ActorStateInstance;
import nu.marginalia.actor.task.DownloadSampleActor;
import nu.marginalia.actor.task.RestoreBackupActor;
import nu.marginalia.actor.task.TriggerAdjacencyCalculationActor;
import nu.marginalia.actor.task.UpdateNsfwFiltersActor;
import nu.marginalia.functions.execution.api.*;
import nu.marginalia.service.module.ServiceConfiguration;
import nu.marginalia.service.server.DiscoverableService;
@@ -57,7 +59,7 @@ public class ExecutorGrpcService
responseObserver.onCompleted();
}
catch (Exception e) {
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
@@ -69,7 +71,7 @@ public class ExecutorGrpcService
responseObserver.onCompleted();
}
catch (Exception e) {
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
@@ -81,7 +83,7 @@ public class ExecutorGrpcService
responseObserver.onCompleted();
}
catch (Exception e) {
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
@@ -95,7 +97,7 @@ public class ExecutorGrpcService
responseObserver.onCompleted();
}
catch (Exception e) {
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
@@ -111,7 +113,7 @@ public class ExecutorGrpcService
responseObserver.onCompleted();
}
catch (Exception e) {
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
@@ -127,7 +129,7 @@ public class ExecutorGrpcService
responseObserver.onCompleted();
}
catch (Exception e) {
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
@@ -202,7 +204,7 @@ public class ExecutorGrpcService
responseObserver.onCompleted();
}
catch (Exception e) {
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
@@ -228,7 +230,7 @@ public class ExecutorGrpcService
responseObserver.onCompleted();
}
catch (Exception e) {
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
@@ -263,4 +265,19 @@ public class ExecutorGrpcService
System.exit(0);
}
@Override
public void updateNsfwFilters(RpcUpdateNsfwFilters request, StreamObserver<Empty> responseObserver) {
logger.info("Got request {}", request);
try {
actorControlService.startFrom(ExecutorActor.UPDATE_NSFW_LISTS,
new UpdateNsfwFiltersActor.Initial(request.getMsgId()));
responseObserver.onNext(Empty.getDefaultInstance());
responseObserver.onCompleted();
}
catch (Exception e) {
logger.error("Failed to update nsfw filters", e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
}

View File

@@ -1,6 +1,7 @@
package nu.marginalia.execution;
import com.google.inject.Inject;
import io.grpc.Status;
import io.grpc.stub.StreamObserver;
import nu.marginalia.actor.ExecutorActor;
import nu.marginalia.actor.ExecutorActorControlService;
@@ -33,7 +34,7 @@ public class ExecutorSideloadGrpcService
responseObserver.onCompleted();
}
catch (Exception e) {
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
@@ -48,7 +49,7 @@ public class ExecutorSideloadGrpcService
responseObserver.onCompleted();
}
catch (Exception e) {
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
@@ -63,7 +64,7 @@ public class ExecutorSideloadGrpcService
responseObserver.onCompleted();
}
catch (Exception e) {
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
@@ -78,7 +79,7 @@ public class ExecutorSideloadGrpcService
responseObserver.onCompleted();
}
catch (Exception e) {
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
@@ -93,7 +94,7 @@ public class ExecutorSideloadGrpcService
responseObserver.onCompleted();
}
catch (Exception e) {
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}

View File

@@ -8,6 +8,7 @@ import nu.marginalia.crawl.CrawlerMain;
import nu.marginalia.index.IndexConstructorMain;
import nu.marginalia.livecrawler.LiveCrawlerMain;
import nu.marginalia.loading.LoaderMain;
import nu.marginalia.ndp.NdpMain;
import nu.marginalia.ping.PingMain;
import nu.marginalia.service.control.ServiceEventLog;
import nu.marginalia.service.server.BaseServiceParams;
@@ -28,7 +29,7 @@ import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
@Singleton
public class ProcessService {
public class ProcessSpawnerService {
private final Logger logger = LoggerFactory.getLogger(getClass());
private final Marker processMarker = MarkerFactory.getMarker("PROCESS");
@@ -57,6 +58,7 @@ public class ProcessService {
CONVERTER(ConverterMain.class),
LOADER(LoaderMain.class),
INDEX_CONSTRUCTOR(IndexConstructorMain.class),
NDP(NdpMain.class),
EXPORT_TASKS(ExportTasksMain.class),
;
@@ -72,6 +74,7 @@ public class ProcessService {
case CONVERTER -> "CONVERTER_PROCESS_OPTS";
case LOADER -> "LOADER_PROCESS_OPTS";
case PING -> "PING_PROCESS_OPTS";
case NDP -> "NDP_PROCESS_OPTS";
case INDEX_CONSTRUCTOR -> "INDEX_CONSTRUCTION_PROCESS_OPTS";
case EXPORT_TASKS -> "EXPORT_TASKS_PROCESS_OPTS";
};
@@ -85,7 +88,7 @@ public class ProcessService {
}
@Inject
public ProcessService(BaseServiceParams params) {
public ProcessSpawnerService(BaseServiceParams params) {
this.eventLog = params.eventLog;
this.node = params.configuration.node();
}

View File

@@ -1,4 +1,4 @@
package nu.marginalia.executor;
package nu.marginalia.svc;
import com.google.inject.Inject;
import nu.marginalia.storage.FileStorageService;

View File

@@ -1,5 +1,5 @@
The execution subsystem is responsible for the execution of long running tasks on each
index node. It lives in the [executor-service](../services-core/executor-service) module.
index node. It lives in the [index-service](../services-core/index-service) module.
It accomplishes this using the [message queue and actor library](../libraries/message-queue/),
which permits program state to survive crashes and reboots.

View File

@@ -1,4 +1,4 @@
package nu.marginalia.executor;
package nu.marginalia.svc;
import nu.marginalia.storage.FileStorageService;
import nu.marginalia.storage.model.FileStorage;

View File

@@ -2,6 +2,8 @@ package nu.marginalia.api.domains;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import nu.marginalia.api.domains.model.DomainInformation;
import nu.marginalia.api.domains.model.SimilarDomain;
import nu.marginalia.service.client.GrpcChannelPoolFactory;
import nu.marginalia.service.client.GrpcSingleNodeChannelPool;
import nu.marginalia.service.discovery.property.ServiceKey;
@@ -10,16 +12,19 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import java.util.concurrent.*;
import nu.marginalia.api.domains.model.*;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
@Singleton
public class DomainInfoClient {
private static final Logger logger = LoggerFactory.getLogger(DomainInfoClient.class);
private final GrpcSingleNodeChannelPool<DomainInfoAPIGrpc.DomainInfoAPIBlockingStub> channelPool;
private final ExecutorService executor = Executors.newWorkStealingPool(8);
private static final boolean useLoom = Boolean.getBoolean("system.experimentalUseLoom");
private static final ExecutorService executor = useLoom ? Executors.newVirtualThreadPerTaskExecutor() : Executors.newWorkStealingPool(8);
@Inject
public DomainInfoClient(GrpcChannelPoolFactory factory) {

View File

@@ -0,0 +1,114 @@
package nu.marginalia.api.domsample;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import nu.marginalia.service.client.GrpcChannelPoolFactory;
import nu.marginalia.service.client.GrpcSingleNodeChannelPool;
import nu.marginalia.service.discovery.property.ServiceKey;
import nu.marginalia.service.discovery.property.ServicePartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
@Singleton
public class DomSampleClient {
private final GrpcSingleNodeChannelPool<DomSampleApiGrpc.DomSampleApiBlockingStub> channelPool;
private static final Logger logger = LoggerFactory.getLogger(DomSampleClient.class);
@Inject
public DomSampleClient(GrpcChannelPoolFactory factory) {
// The client is only interested in the primary node
var key = ServiceKey.forGrpcApi(DomSampleApiGrpc.class, ServicePartition.any());
this.channelPool = factory.createSingle(key, DomSampleApiGrpc::newBlockingStub);
}
public Optional<RpcDomainSample> getSample(String domainName) {
try {
var val = channelPool.call(DomSampleApiGrpc.DomSampleApiBlockingStub::getSample)
.run(RpcDomainName.newBuilder().setDomainName(domainName).build());
return Optional.of(val);
}
catch (StatusRuntimeException sre) {
if (sre.getStatus() != Status.NOT_FOUND) {
logger.error("Failed to fetch DOM sample", sre);
}
return Optional.empty();
}
}
public Optional<RpcDomainSampleRequests> getSampleRequests(String domainName) {
try {
var val = channelPool.call(DomSampleApiGrpc.DomSampleApiBlockingStub::getSampleRequests)
.run(RpcDomainName.newBuilder().setDomainName(domainName).build());
return Optional.of(val);
}
catch (StatusRuntimeException sre) {
if (sre.getStatus() != Status.NOT_FOUND) {
logger.error("Failed to fetch DOM sample", sre);
}
return Optional.empty();
}
}
public boolean hasSample(String domainName) {
try {
return channelPool.call(DomSampleApiGrpc.DomSampleApiBlockingStub::hasSample)
.run(RpcDomainName.newBuilder().setDomainName(domainName).build())
.getAnswer();
}
catch (StatusRuntimeException sre) {
return false;
}
}
public CompletableFuture<Boolean> hasSample(String domainName, ExecutorService executor) {
try {
return channelPool.call(DomSampleApiGrpc.DomSampleApiBlockingStub::hasSample)
.async(executor)
.run(RpcDomainName.newBuilder().setDomainName(domainName).build())
.thenApply(RpcBooleanRsp::getAnswer);
}
catch (StatusRuntimeException sre) {
return CompletableFuture.completedFuture(false);
}
}
public CompletableFuture<RpcDomainSample> getSampleAsync(String domainName, ExecutorService executorService) {
return channelPool.call(DomSampleApiGrpc.DomSampleApiBlockingStub::getSample)
.async(executorService)
.run(RpcDomainName.newBuilder().setDomainName(domainName).build());
}
public List<RpcDomainSample> getAllSamples(String domainName) {
try {
Iterator<RpcDomainSample> val = channelPool.call(DomSampleApiGrpc.DomSampleApiBlockingStub::getAllSamples)
.run(RpcDomainName.newBuilder().setDomainName(domainName).build());
List<RpcDomainSample> ret = new ArrayList<>();
val.forEachRemaining(ret::add);
return ret;
}
catch (StatusRuntimeException sre) {
logger.error("Failed to fetch DOM sample");
return List.of();
}
}
public boolean waitReady(Duration duration) throws InterruptedException {
return channelPool.awaitChannel(duration);
}
}

View File

@@ -11,6 +11,7 @@ import nu.marginalia.service.discovery.property.ServicePartition;
import nu.marginalia.service.module.ServiceConfiguration;
import javax.annotation.CheckReturnValue;
import java.time.Duration;
import java.time.Instant;
import java.util.ArrayList;
import java.util.List;
@@ -23,7 +24,9 @@ import java.util.function.BiConsumer;
@Singleton
public class FeedsClient {
private final ExecutorService executorService = Executors.newCachedThreadPool();
private static final boolean useLoom = Boolean.getBoolean("system.experimentalUseLoom");
private static final ExecutorService executorService = useLoom ? Executors.newVirtualThreadPerTaskExecutor() : Executors.newCachedThreadPool();
private final GrpcSingleNodeChannelPool<FeedApiGrpc.FeedApiBlockingStub> channelPool;
private final MqOutbox updateFeedsOutbox;
@@ -59,6 +62,11 @@ public class FeedsClient {
.forEachRemaining(rsp -> consumer.accept(rsp.getDomain(), new ArrayList<>(rsp.getUrlList())));
}
public boolean waitReady(Duration duration) throws InterruptedException {
return channelPool.awaitChannel(duration);
}
/** Get the hash of the feed data, for identifying when the data has been updated */
public String getFeedDataHash() {
return channelPool.call(FeedApiGrpc.FeedApiBlockingStub::getFeedDataHash)

View File

@@ -0,0 +1,47 @@
syntax="proto3";
package nu.marginalia.api.domsample;
option java_package="nu.marginalia.api.domsample";
option java_multiple_files=true;
service DomSampleApi {
rpc getSample(RpcDomainName) returns (RpcDomainSample) {}
rpc getSampleRequests(RpcDomainName) returns (RpcDomainSampleRequests) {}
rpc hasSample(RpcDomainName) returns (RpcBooleanRsp) {}
rpc getAllSamples(RpcDomainName) returns (stream RpcDomainSample) {}
}
message RpcDomainName {
string domainName = 1;
}
message RpcBooleanRsp {
bool answer = 1;
}
message RpcDomainSampleRequests {
string domainName = 1;
string url = 2;
repeated RpcOutgoingRequest outgoingRequests = 5;
}
message RpcDomainSample {
string domainName = 1;
string url = 2;
bytes htmlSampleZstd = 3;
bool accepted_popover = 4;
repeated RpcOutgoingRequest outgoingRequests = 5;
}
message RpcOutgoingRequest {
RequestMethod method = 1;
int64 timestamp = 2;
string url = 3;
enum RequestMethod {
GET = 0;
POST = 1;
OTHER = 2;
};
}

View File

@@ -31,10 +31,12 @@ dependencies {
implementation libs.jsoup
implementation libs.opencsv
implementation libs.slop
implementation libs.zstd
implementation libs.sqlite
implementation libs.bundles.slf4j
implementation libs.commons.lang3
implementation libs.commons.io
implementation libs.httpclient
implementation libs.wiremock
implementation libs.prometheus

View File

@@ -0,0 +1,176 @@
package nu.marginalia.domsample;
import com.github.luben.zstd.Zstd;
import com.google.inject.Inject;
import com.google.protobuf.ByteString;
import io.grpc.Status;
import io.grpc.stub.StreamObserver;
import nu.marginalia.api.domsample.*;
import nu.marginalia.domsample.db.DomSampleDb;
import nu.marginalia.service.server.DiscoverableService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.nio.charset.StandardCharsets;
import java.util.List;
public class DomSampleGrpcService
extends DomSampleApiGrpc.DomSampleApiImplBase
implements DiscoverableService
{
private static final Logger logger = LoggerFactory.getLogger(DomSampleGrpcService.class);
private final DomSampleDb domSampleDb;
@Inject
public DomSampleGrpcService(DomSampleDb domSampleDb) {
this.domSampleDb = domSampleDb;
}
@Override
public void getSample(RpcDomainName request, StreamObserver<RpcDomainSample> responseObserver) {
String domainName = request.getDomainName();
if (domainName.isBlank()) {
responseObserver.onError(Status.INVALID_ARGUMENT
.withDescription("Invalid domain name")
.asRuntimeException());
return;
}
try {
List<DomSampleDb.Sample> dbRecords = domSampleDb.getSamples(domainName);
if (dbRecords.isEmpty()) {
responseObserver.onError(Status.NOT_FOUND.withDescription("No sample found").asRuntimeException());
return;
}
// Grab the first sample
RpcDomainSample.Builder response = convertFullSample(dbRecords.getFirst());
responseObserver.onNext(response.build());
responseObserver.onCompleted();
}
catch (Exception e) {
logger.error("Error in getSample()", e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
@Override
public void getSampleRequests(RpcDomainName request, StreamObserver<RpcDomainSampleRequests> responseObserver) {
String domainName = request.getDomainName();
if (domainName.isBlank()) {
responseObserver.onError(Status.INVALID_ARGUMENT
.withDescription("Invalid domain name")
.asRuntimeException());
return;
}
try {
List<DomSampleDb.Sample> dbRecords = domSampleDb.getSamples(domainName);
if (dbRecords.isEmpty()) {
responseObserver.onError(Status.NOT_FOUND.withDescription("No sample found").asRuntimeException());
return;
}
// Grab the first sample
RpcDomainSampleRequests.Builder response = convertRequestData(dbRecords.getFirst());
responseObserver.onNext(response.build());
responseObserver.onCompleted();
}
catch (Exception e) {
logger.error("Error in getSample()", e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
@Override
public void hasSample(RpcDomainName request, StreamObserver<RpcBooleanRsp> responseObserver) {
String domainName = request.getDomainName();
if (domainName.isBlank()) {
responseObserver.onError(Status.INVALID_ARGUMENT
.withDescription("Invalid domain name")
.asRuntimeException());
return;
}
try {
responseObserver.onNext(RpcBooleanRsp.newBuilder()
.setAnswer(domSampleDb.hasSample(domainName)).build());
responseObserver.onCompleted();
}
catch (Exception e) {
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
@Override
public void getAllSamples(RpcDomainName request, StreamObserver<RpcDomainSample> responseObserver) {
String domainName = request.getDomainName();
if (domainName.isBlank()) {
responseObserver.onError(Status.INVALID_ARGUMENT
.withDescription("Invalid domain name")
.asRuntimeException());
return;
}
try {
List<DomSampleDb.Sample> dbRecords = domSampleDb.getSamples(domainName);
for (var record : dbRecords) {
responseObserver.onNext(convertFullSample(record).build());
}
responseObserver.onCompleted();
}
catch (Exception e) {
logger.error("Error in getSample()", e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
private RpcDomainSample.Builder convertFullSample(DomSampleDb.Sample dbSample) {
ByteString htmlZstd = ByteString.copyFrom(Zstd.compress(dbSample.sample().getBytes(StandardCharsets.UTF_8)));
var sampleBuilder = RpcDomainSample.newBuilder()
.setDomainName(dbSample.domain())
.setAcceptedPopover(dbSample.acceptedPopover())
.setHtmlSampleZstd(htmlZstd);
for (var req : dbSample.parseRequests()) {
sampleBuilder.addOutgoingRequestsBuilder()
.setUrl(req.uri().toString())
.setMethod(switch (req.method().toUpperCase())
{
case "GET" -> RpcOutgoingRequest.RequestMethod.GET;
case "POST" -> RpcOutgoingRequest.RequestMethod.POST;
default -> RpcOutgoingRequest.RequestMethod.OTHER;
})
.setTimestamp(req.timestamp());
}
return sampleBuilder;
}
private RpcDomainSampleRequests.Builder convertRequestData(DomSampleDb.Sample dbSample) {
var sampleBuilder = RpcDomainSampleRequests.newBuilder()
.setDomainName(dbSample.domain());
for (var req : dbSample.parseRequests()) {
sampleBuilder.addOutgoingRequestsBuilder()
.setUrl(req.uri().toString())
.setMethod(switch (req.method().toUpperCase())
{
case "GET" -> RpcOutgoingRequest.RequestMethod.GET;
case "POST" -> RpcOutgoingRequest.RequestMethod.POST;
default -> RpcOutgoingRequest.RequestMethod.OTHER;
})
.setTimestamp(req.timestamp());
}
return sampleBuilder;
}
}

View File

@@ -1,17 +1,28 @@
package nu.marginalia.domsample.db;
import nu.marginalia.WmsaHome;
import nu.marginalia.model.EdgeUrl;
import org.apache.commons.lang3.StringUtils;
import org.jsoup.Jsoup;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.file.Path;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.util.*;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.function.Predicate;
public class DomSampleDb implements AutoCloseable {
private static final String dbFileName = "dom-sample.db";
private final Connection connection;
private static final Logger logger = LoggerFactory.getLogger(DomSampleDb.class);
public DomSampleDb() throws SQLException{
this(WmsaHome.getDataPath().resolve(dbFileName));
@@ -88,14 +99,78 @@ public class DomSampleDb implements AutoCloseable {
}
public record Sample(String url, String domain, String sample, String requests, boolean acceptedPopover) {}
public record Sample(String url, String domain, String sample, String requests, boolean acceptedPopover) {
public List<SampleRequest> parseRequests() {
List<SampleRequest> requests = new ArrayList<>();
// Request format is METHOD\tTIMESTAMP\tURI\n
for (var line : StringUtils.split(this.requests, '\n')) {
String[] parts = StringUtils.split(line, "\t", 3);
if (parts.length != 3) continue;
try {
String method = parts[0];
long ts = Long.parseLong(parts[1]);
String linkUrl = parts[2];
URI uri = parseURI(linkUrl);
requests.add(new SampleRequest(method, ts, uri));
}
catch (Exception e) {
logger.warn("Failed to parse requests", e);
}
}
return requests;
}
private static URI parseURI(String uri) throws URISyntaxException {
try {
return new URI(uri);
}
catch (URISyntaxException ex) {
return new EdgeUrl(uri).asURI();
}
}
}
public record SampleRequest(String method, long timestamp, URI uri) {}
/**
* @param consumer - consume the sample, return true to continue consumption
* @throws SQLException
*/
public void forEachSample(Predicate<Sample> consumer) throws SQLException {
try (var stmt = connection.prepareStatement("""
SELECT url, domain, sample, requests, accepted_popover
FROM samples
"""))
{
var rs = stmt.executeQuery();
while (rs.next()) {
var sample = new Sample(
rs.getString("url"),
rs.getString("domain"),
rs.getString("sample"),
rs.getString("requests"),
rs.getBoolean("accepted_popover")
);
if (!consumer.test(sample)) break;
}
}
}
public List<Sample> getSamples(String domain) throws SQLException {
List<Sample> samples = new ArrayList<>();
try (var stmt = connection.prepareStatement("""
SELECT url, sample, requests, accepted_popover
FROM samples
FROM samples
WHERE domain = ?
"""))
{
@@ -116,6 +191,21 @@ public class DomSampleDb implements AutoCloseable {
return samples;
}
public boolean hasSample(String domain) throws SQLException {
try (var stmt = connection.prepareStatement("""
SELECT 1
FROM samples
WHERE domain = ?
"""))
{
stmt.setString(1, domain);
var rs = stmt.executeQuery();
return rs.next();
}
}
public void saveSample(String domain, String url, String rawContent) throws SQLException {
var doc = Jsoup.parse(rawContent);

View File

@@ -20,19 +20,36 @@ import nu.marginalia.storage.FileStorageService;
import nu.marginalia.storage.model.FileStorage;
import nu.marginalia.storage.model.FileStorageType;
import nu.marginalia.util.SimpleBlockingThreadPool;
import org.apache.hc.client5.http.ConnectionKeepAliveStrategy;
import org.apache.hc.client5.http.classic.HttpClient;
import org.apache.hc.client5.http.config.ConnectionConfig;
import org.apache.hc.client5.http.config.RequestConfig;
import org.apache.hc.client5.http.cookie.StandardCookieSpec;
import org.apache.hc.client5.http.impl.classic.HttpClients;
import org.apache.hc.client5.http.impl.io.PoolingHttpClientConnectionManagerBuilder;
import org.apache.hc.core5.http.Header;
import org.apache.hc.core5.http.HeaderElement;
import org.apache.hc.core5.http.HeaderElements;
import org.apache.hc.core5.http.HttpResponse;
import org.apache.hc.core5.http.io.SocketConfig;
import org.apache.hc.core5.http.io.entity.EntityUtils;
import org.apache.hc.core5.http.io.support.ClassicRequestBuilder;
import org.apache.hc.core5.http.message.MessageSupport;
import org.apache.hc.core5.http.protocol.HttpContext;
import org.apache.hc.core5.util.TimeValue;
import org.apache.hc.core5.util.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nullable;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.http.HttpClient;
import java.net.http.HttpRequest;
import java.net.http.HttpResponse;
import java.sql.SQLException;
import java.time.*;
import java.time.Instant;
import java.time.LocalDateTime;
import java.time.ZoneId;
import java.time.ZonedDateTime;
import java.time.format.DateTimeFormatter;
import java.util.*;
import java.util.concurrent.ExecutorService;
@@ -55,6 +72,8 @@ public class FeedFetcherService {
private final DomainCoordinator domainCoordinator;
private final HttpClient httpClient;
private volatile boolean updating;
@Inject
@@ -71,6 +90,83 @@ public class FeedFetcherService {
this.serviceHeartbeat = serviceHeartbeat;
this.executorClient = executorClient;
this.domainCoordinator = domainCoordinator;
final ConnectionConfig connectionConfig = ConnectionConfig.custom()
.setSocketTimeout(15, TimeUnit.SECONDS)
.setConnectTimeout(15, TimeUnit.SECONDS)
.setValidateAfterInactivity(TimeValue.ofSeconds(5))
.build();
var connectionManager = PoolingHttpClientConnectionManagerBuilder.create()
.setMaxConnPerRoute(2)
.setMaxConnTotal(50)
.setDefaultConnectionConfig(connectionConfig)
.build();
connectionManager.setDefaultSocketConfig(SocketConfig.custom()
.setSoLinger(TimeValue.ofSeconds(-1))
.setSoTimeout(Timeout.ofSeconds(10))
.build()
);
Thread.ofPlatform().daemon(true).start(() -> {
try {
for (;;) {
TimeUnit.SECONDS.sleep(15);
logger.info("Connection pool stats: {}", connectionManager.getTotalStats());
}
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
});
final RequestConfig defaultRequestConfig = RequestConfig.custom()
.setCookieSpec(StandardCookieSpec.IGNORE)
.setResponseTimeout(10, TimeUnit.SECONDS)
.setConnectionRequestTimeout(5, TimeUnit.MINUTES)
.build();
httpClient = HttpClients.custom()
.setDefaultRequestConfig(defaultRequestConfig)
.setConnectionManager(connectionManager)
.setUserAgent(WmsaHome.getUserAgent().uaIdentifier())
.setConnectionManager(connectionManager)
.setKeepAliveStrategy(new ConnectionKeepAliveStrategy() {
// Default keep-alive duration is 3 minutes, but this is too long for us,
// as we are either going to re-use it fairly quickly or close it for a long time.
//
// So we set it to 30 seconds or clamp the server-provided value to a minimum of 10 seconds.
private static final TimeValue defaultValue = TimeValue.ofSeconds(30);
@Override
public TimeValue getKeepAliveDuration(HttpResponse response, HttpContext context) {
final Iterator<HeaderElement> it = MessageSupport.iterate(response, HeaderElements.KEEP_ALIVE);
while (it.hasNext()) {
final HeaderElement he = it.next();
final String param = he.getName();
final String value = he.getValue();
if (value == null)
continue;
if (!"timeout".equalsIgnoreCase(param))
continue;
try {
long timeout = Long.parseLong(value);
timeout = Math.clamp(timeout, 30, defaultValue.toSeconds());
return TimeValue.ofSeconds(timeout);
} catch (final NumberFormatException ignore) {
break;
}
}
return defaultValue;
}
})
.build();
}
public enum UpdateMode {
@@ -86,13 +182,7 @@ public class FeedFetcherService {
try (FeedDbWriter writer = feedDb.createWriter();
HttpClient client = HttpClient.newBuilder()
.connectTimeout(Duration.ofSeconds(15))
.executor(Executors.newCachedThreadPool())
.followRedirects(HttpClient.Redirect.NORMAL)
.version(HttpClient.Version.HTTP_2)
.build();
ExecutorService fetchExecutor = Executors.newCachedThreadPool();
ExecutorService fetchExecutor = Executors.newVirtualThreadPerTaskExecutor();
FeedJournal feedJournal = FeedJournal.create();
var heartbeat = serviceHeartbeat.createServiceAdHocTaskHeartbeat("Update Rss Feeds")
) {
@@ -137,7 +227,8 @@ public class FeedFetcherService {
FetchResult feedData;
try (DomainLock domainLock = domainCoordinator.lockDomain(new EdgeDomain(feed.domain()))) {
feedData = fetchFeedData(feed, client, fetchExecutor, ifModifiedSinceDate, ifNoneMatchTag);
feedData = fetchFeedData(feed, fetchExecutor, ifModifiedSinceDate, ifNoneMatchTag);
TimeUnit.SECONDS.sleep(1); // Sleep before we yield the lock to avoid hammering the server from multiple processes
} catch (Exception ex) {
feedData = new FetchResult.TransientError();
}
@@ -216,7 +307,6 @@ public class FeedFetcherService {
}
private FetchResult fetchFeedData(FeedDefinition feed,
HttpClient client,
ExecutorService executorService,
@Nullable String ifModifiedSinceDate,
@Nullable String ifNoneMatchTag)
@@ -224,59 +314,63 @@ public class FeedFetcherService {
try {
URI uri = new URI(feed.feedUrl());
HttpRequest.Builder requestBuilder = HttpRequest.newBuilder()
.GET()
.uri(uri)
.header("User-Agent", WmsaHome.getUserAgent().uaIdentifier())
.header("Accept-Encoding", "gzip")
.header("Accept", "text/*, */*;q=0.9")
.timeout(Duration.ofSeconds(15))
;
var requestBuilder = ClassicRequestBuilder.get(uri)
.setHeader("User-Agent", WmsaHome.getUserAgent().uaIdentifier())
.setHeader("Accept-Encoding", "gzip")
.setHeader("Accept", "text/*, */*;q=0.9");
// Set the If-Modified-Since or If-None-Match headers if we have them
// though since there are certain idiosyncrasies in server implementations,
// we avoid setting both at the same time as that may turn a 304 into a 200.
if (ifNoneMatchTag != null) {
requestBuilder.header("If-None-Match", ifNoneMatchTag);
requestBuilder.addHeader("If-None-Match", ifNoneMatchTag);
} else if (ifModifiedSinceDate != null) {
requestBuilder.header("If-Modified-Since", ifModifiedSinceDate);
requestBuilder.addHeader("If-Modified-Since", ifModifiedSinceDate);
}
return httpClient.execute(requestBuilder.build(), rsp -> {
try {
logger.info("Code: {}, URL: {}", rsp.getCode(), uri);
HttpRequest getRequest = requestBuilder.build();
switch (rsp.getCode()) {
case 200 -> {
if (rsp.getEntity() == null) {
return new FetchResult.TransientError(); // No content to read, treat as transient error
}
byte[] responseData = EntityUtils.toByteArray(rsp.getEntity());
for (int i = 0; i < 3; i++) {
// Decode the response body based on the Content-Type header
Header contentTypeHeader = rsp.getFirstHeader("Content-Type");
if (contentTypeHeader == null) {
return new FetchResult.TransientError();
}
String contentType = contentTypeHeader.getValue();
String bodyText = DocumentBodyToString.getStringData(ContentType.parse(contentType), responseData);
/* Note we need to use an executor to time-limit the send() method in HttpClient, as
* its support for timeouts only applies to the time until response starts to be received,
* and does not catch the case when the server starts to send data but then hangs.
*/
HttpResponse<byte[]> rs = executorService.submit(
() -> client.send(getRequest, HttpResponse.BodyHandlers.ofByteArray()))
.get(15, TimeUnit.SECONDS);
// Grab the ETag header if it exists
Header etagHeader = rsp.getFirstHeader("ETag");
String newEtagValue = etagHeader == null ? null : etagHeader.getValue();
if (rs.statusCode() == 429) { // Too Many Requests
int retryAfter = Integer.parseInt(rs.headers().firstValue("Retry-After").orElse("2"));
Thread.sleep(Duration.ofSeconds(Math.clamp(retryAfter, 1, 5)));
continue;
}
String newEtagValue = rs.headers().firstValue("ETag").orElse("");
return switch (rs.statusCode()) {
case 200 -> {
byte[] responseData = getResponseData(rs);
String contentType = rs.headers().firstValue("Content-Type").orElse("");
String bodyText = DocumentBodyToString.getStringData(ContentType.parse(contentType), responseData);
yield new FetchResult.Success(bodyText, newEtagValue);
return new FetchResult.Success(bodyText, newEtagValue);
}
case 304 -> {
return new FetchResult.NotModified(); // via If-Modified-Since semantics
}
case 404 -> {
return new FetchResult.PermanentError(); // never try again
}
default -> {
return new FetchResult.TransientError(); // we try again later
}
}
case 304 -> new FetchResult.NotModified(); // via If-Modified-Since semantics
case 404 -> new FetchResult.PermanentError(); // never try again
default -> new FetchResult.TransientError(); // we try again later
};
}
}
catch (Exception ex) {
return new FetchResult.PermanentError(); // treat as permanent error
}
finally {
EntityUtils.consumeQuietly(rsp.getEntity());
}
});
}
catch (Exception ex) {
logger.debug("Error fetching feed", ex);
@@ -285,19 +379,6 @@ public class FeedFetcherService {
return new FetchResult.TransientError();
}
private byte[] getResponseData(HttpResponse<byte[]> response) throws IOException {
String encoding = response.headers().firstValue("Content-Encoding").orElse("");
if ("gzip".equals(encoding)) {
try (var stream = new GZIPInputStream(new ByteArrayInputStream(response.body()))) {
return stream.readAllBytes();
}
}
else {
return response.body();
}
}
public sealed interface FetchResult {
record Success(String value, String etag) implements FetchResult {}
record NotModified() implements FetchResult {}

View File

@@ -1,6 +1,7 @@
package nu.marginalia.rss.svc;
import com.google.inject.Inject;
import io.grpc.Status;
import io.grpc.stub.StreamObserver;
import nu.marginalia.api.feeds.*;
import nu.marginalia.db.DbDomainQueries;
@@ -69,7 +70,7 @@ public class FeedsGrpcService extends FeedApiGrpc.FeedApiImplBase implements Dis
@Override
public void getFeedDataHash(Empty request, StreamObserver<RpcFeedDataHash> responseObserver) {
if (!feedDb.isEnabled()) {
responseObserver.onError(new IllegalStateException("Feed database is disabled on this node"));
responseObserver.onError(Status.INTERNAL.withDescription("Feed database is disabled on this node").asRuntimeException());
return;
}
@@ -80,7 +81,7 @@ public class FeedsGrpcService extends FeedApiGrpc.FeedApiImplBase implements Dis
}
catch (Exception e) {
logger.error("Error getting feed data hash", e);
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
@@ -101,7 +102,7 @@ public class FeedsGrpcService extends FeedApiGrpc.FeedApiImplBase implements Dis
}
catch (Exception e) {
logger.error("Error getting updated links", e);
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}
@@ -109,13 +110,13 @@ public class FeedsGrpcService extends FeedApiGrpc.FeedApiImplBase implements Dis
public void getFeed(RpcDomainId request,
StreamObserver<RpcFeed> responseObserver) {
if (!feedDb.isEnabled()) {
responseObserver.onError(new IllegalStateException("Feed database is disabled on this node"));
responseObserver.onError(Status.INTERNAL.withDescription("Feed database is disabled on this node").asRuntimeException());
return;
}
Optional<EdgeDomain> domainName = domainQueries.getDomain(request.getDomainId());
if (domainName.isEmpty()) {
responseObserver.onError(new IllegalArgumentException("Domain not found"));
responseObserver.onError(Status.NOT_FOUND.withDescription("Domain not found").asRuntimeException());
return;
}

View File

@@ -5,6 +5,8 @@ import com.google.inject.Guice;
import com.google.inject.name.Names;
import com.zaxxer.hikari.HikariConfig;
import com.zaxxer.hikari.HikariDataSource;
import nu.marginalia.coordination.DomainCoordinator;
import nu.marginalia.coordination.LocalDomainCoordinator;
import nu.marginalia.model.EdgeDomain;
import nu.marginalia.rss.db.FeedDb;
import nu.marginalia.rss.model.FeedItems;
@@ -82,9 +84,10 @@ class FeedFetcherServiceTest extends AbstractModule {
}
public void configure() {
bind(DomainCoordinator.class).to(LocalDomainCoordinator.class);
bind(HikariDataSource.class).toInstance(dataSource);
bind(ServiceRegistryIf.class).toInstance(Mockito.mock(ServiceRegistryIf.class));
bind(ServiceConfiguration.class).toInstance(new ServiceConfiguration(ServiceId.Executor, 1, "", "", 0, UUID.randomUUID()));
bind(ServiceConfiguration.class).toInstance(new ServiceConfiguration(ServiceId.Index, 1, "", "", 0, UUID.randomUUID()));
bind(Integer.class).annotatedWith(Names.named("wmsa-system-node")).toInstance(1);
}

View File

@@ -26,7 +26,9 @@ public class MathClient {
private static final Logger logger = LoggerFactory.getLogger(MathClient.class);
private final GrpcSingleNodeChannelPool<MathApiGrpc.MathApiBlockingStub> channelPool;
private final ExecutorService executor = Executors.newWorkStealingPool(8);
private static final boolean useLoom = Boolean.getBoolean("system.experimentalUseLoom");
private static final ExecutorService executor = useLoom ? Executors.newVirtualThreadPerTaskExecutor() : Executors.newWorkStealingPool(8);
@Inject
public MathClient(GrpcChannelPoolFactory factory) {

View File

@@ -304,7 +304,6 @@ public class QueryProtobufCodec {
IndexProtobufCodec.convertRpcQuery(specs.getQuery()),
specs.getDomainsList(),
specs.getSearchSetIdentifier(),
specs.getHumanQuery(),
IndexProtobufCodec.convertSpecLimit(specs.getQuality()),
IndexProtobufCodec.convertSpecLimit(specs.getYear()),
IndexProtobufCodec.convertSpecLimit(specs.getSize()),

View File

@@ -18,8 +18,6 @@ public class SearchSpecification {
public String searchSetIdentifier;
public final String humanQuery;
public SpecificationLimit quality;
public SpecificationLimit year;
public SpecificationLimit size;
@@ -35,7 +33,6 @@ public class SearchSpecification {
public SearchSpecification(SearchQuery query,
List<Integer> domains,
String searchSetIdentifier,
String humanQuery,
SpecificationLimit quality,
SpecificationLimit year,
SpecificationLimit size,
@@ -47,7 +44,6 @@ public class SearchSpecification {
this.query = query;
this.domains = domains;
this.searchSetIdentifier = searchSetIdentifier;
this.humanQuery = humanQuery;
this.quality = quality;
this.year = year;
this.size = size;
@@ -73,10 +69,6 @@ public class SearchSpecification {
return this.searchSetIdentifier;
}
public String getHumanQuery() {
return this.humanQuery;
}
public SpecificationLimit getQuality() {
return this.quality;
}
@@ -106,14 +98,13 @@ public class SearchSpecification {
}
public String toString() {
return "SearchSpecification(query=" + this.getQuery() + ", domains=" + this.getDomains() + ", searchSetIdentifier=" + this.getSearchSetIdentifier() + ", humanQuery=" + this.getHumanQuery() + ", quality=" + this.getQuality() + ", year=" + this.getYear() + ", size=" + this.getSize() + ", rank=" + this.getRank() + ", queryLimits=" + this.getQueryLimits() + ", queryStrategy=" + this.getQueryStrategy() + ", rankingParams=" + this.getRankingParams() + ")";
return "SearchSpecification(query=" + this.getQuery() + ", domains=" + this.getDomains() + ", searchSetIdentifier=" + this.getSearchSetIdentifier() + ", quality=" + this.getQuality() + ", year=" + this.getYear() + ", size=" + this.getSize() + ", rank=" + this.getRank() + ", queryLimits=" + this.getQueryLimits() + ", queryStrategy=" + this.getQueryStrategy() + ", rankingParams=" + this.getRankingParams() + ")";
}
public static class SearchSpecificationBuilder {
private SearchQuery query;
private List<Integer> domains;
private String searchSetIdentifier;
private String humanQuery;
private SpecificationLimit quality$value;
private boolean quality$set;
private SpecificationLimit year$value;
@@ -144,11 +135,6 @@ public class SearchSpecification {
return this;
}
public SearchSpecificationBuilder humanQuery(String humanQuery) {
this.humanQuery = humanQuery;
return this;
}
public SearchSpecificationBuilder quality(SpecificationLimit quality) {
this.quality$value = quality;
this.quality$set = true;
@@ -205,11 +191,7 @@ public class SearchSpecification {
if (!this.rank$set) {
rank$value = SpecificationLimit.none();
}
return new SearchSpecification(this.query, this.domains, this.searchSetIdentifier, this.humanQuery, quality$value, year$value, size$value, rank$value, this.queryLimits, this.queryStrategy, this.rankingParams);
}
public String toString() {
return "SearchSpecification.SearchSpecificationBuilder(query=" + this.query + ", domains=" + this.domains + ", searchSetIdentifier=" + this.searchSetIdentifier + ", humanQuery=" + this.humanQuery + ", quality$value=" + this.quality$value + ", year$value=" + this.year$value + ", size$value=" + this.size$value + ", rank$value=" + this.rank$value + ", queryLimits=" + this.queryLimits + ", queryStrategy=" + this.queryStrategy + ", rankingParams=" + this.rankingParams + ")";
return new SearchSpecification(this.query, this.domains, this.searchSetIdentifier, quality$value, year$value, size$value, rank$value, this.queryLimits, this.queryStrategy, this.rankingParams);
}
}
}

View File

@@ -1,56 +0,0 @@
package nu.marginalia.api.searchquery.model.results;
import nu.marginalia.api.searchquery.RpcResultRankingParameters;
import nu.marginalia.api.searchquery.model.compiled.CqDataInt;
import java.util.BitSet;
public class ResultRankingContext {
private final int docCount;
public final RpcResultRankingParameters params;
public final BitSet regularMask;
public final BitSet ngramsMask;
/** CqDataInt associated with frequency information of the terms in the query
* in the full index. The dataset is indexed by the compiled query. */
public final CqDataInt fullCounts;
/** CqDataInt associated with frequency information of the terms in the query
* in the full index. The dataset is indexed by the compiled query. */
public final CqDataInt priorityCounts;
public ResultRankingContext(int docCount,
RpcResultRankingParameters params,
BitSet ngramsMask,
BitSet regularMask,
CqDataInt fullCounts,
CqDataInt prioCounts)
{
this.docCount = docCount;
this.params = params;
this.ngramsMask = ngramsMask;
this.regularMask = regularMask;
this.fullCounts = fullCounts;
this.priorityCounts = prioCounts;
}
public int termFreqDocCount() {
return docCount;
}
@Override
public String toString() {
return "ResultRankingContext{" +
"docCount=" + docCount +
", params=" + params +
", regularMask=" + regularMask +
", ngramsMask=" + ngramsMask +
", fullCounts=" + fullCounts +
", priorityCounts=" + priorityCounts +
'}';
}
}

View File

@@ -34,8 +34,6 @@ public class QueryFactory {
this.queryExpansion = queryExpansion;
}
public ProcessedQuery createQuery(QueryParams params,
@Nullable RpcResultRankingParameters rankingParams) {
final var query = params.humanQuery();
@@ -153,7 +151,6 @@ public class QueryFactory {
var specsBuilder = SearchSpecification.builder()
.query(queryBuilder.build())
.humanQuery(query)
.quality(qualityLimit)
.year(year)
.size(size)

View File

@@ -3,6 +3,7 @@ package nu.marginalia.functions.searchquery;
import com.google.common.collect.Lists;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import io.grpc.Status;
import io.grpc.stub.StreamObserver;
import io.prometheus.client.Histogram;
import nu.marginalia.api.searchquery.*;
@@ -93,7 +94,7 @@ public class QueryGRPCService
});
} catch (Exception e) {
logger.error("Exception", e);
responseObserver.onError(e);
responseObserver.onError(Status.INTERNAL.withCause(e).asRuntimeException());
}
}

View File

@@ -241,7 +241,6 @@ public class QueryFactoryTest {
Assertions.assertTrue(subquery.query.compiledQuery.contains(" bob "));
Assertions.assertFalse(subquery.query.compiledQuery.contains(" bob's "));
Assertions.assertEquals("\"bob's cars\"", subquery.humanQuery);
}
@Test

View File

@@ -38,7 +38,9 @@ public class IndexClient {
.help("Count of results filtered by NSFW tier")
.register();
private static final ExecutorService executor = Executors.newCachedThreadPool();
private static final boolean useLoom = Boolean.getBoolean("system.experimentalUseLoom");
private static final ExecutorService executor = useLoom ? Executors.newVirtualThreadPerTaskExecutor() : Executors.newCachedThreadPool();
@Inject
public IndexClient(GrpcChannelPoolFactory channelPoolFactory,

View File

@@ -14,6 +14,7 @@ apply from: "$rootProject.projectDir/srcsets.gradle"
dependencies {
implementation project(':code:libraries:array')
implementation project(':code:libraries:native')
implementation project(':code:libraries:btree')
implementation project(':code:libraries:coded-sequence')
implementation project(':code:libraries:language-processing')

View File

@@ -1,9 +1,11 @@
package nu.marginalia.index.forward;
import it.unimi.dsi.fastutil.longs.Long2IntOpenHashMap;
import nu.marginalia.array.LongArray;
import nu.marginalia.array.LongArrayFactory;
import nu.marginalia.ffi.LinuxSystemCalls;
import nu.marginalia.index.forward.spans.DocumentSpans;
import nu.marginalia.index.forward.spans.ForwardIndexSpansReader;
import nu.marginalia.index.forward.spans.IndexSpansReader;
import nu.marginalia.model.id.UrlIdCodec;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -22,16 +24,15 @@ import static nu.marginalia.index.forward.ForwardIndexParameters.*;
* and a mapping between document identifiers to the index into the
* data array.
* <p/>
* Since the total data is relatively small, this is kept in memory to
* reduce the amount of disk thrashing.
* <p/>
* The metadata is a binary encoding of {@see nu.marginalia.idx.DocumentMetadata}
*/
public class ForwardIndexReader {
private final LongArray ids;
private final LongArray data;
private final ForwardIndexSpansReader spansReader;
private volatile Long2IntOpenHashMap idsMap;
private final IndexSpansReader spansReader;
private final Logger logger = LoggerFactory.getLogger(getClass());
@@ -64,7 +65,22 @@ public class ForwardIndexReader {
ids = loadIds(idsFile);
data = loadData(dataFile);
spansReader = new ForwardIndexSpansReader(spansFile);
LinuxSystemCalls.madviseRandom(data.getMemorySegment());
LinuxSystemCalls.madviseRandom(ids.getMemorySegment());
spansReader = IndexSpansReader.open(spansFile);
Thread.ofPlatform().start(this::createIdsMap);
}
private void createIdsMap() {
Long2IntOpenHashMap idsMap = new Long2IntOpenHashMap((int) ids.size());
for (int i = 0; i < ids.size(); i++) {
idsMap.put(ids.get(i), i);
}
this.idsMap = idsMap;
logger.info("Forward index loaded into RAM");
}
private static LongArray loadIds(Path idsFile) throws IOException {
@@ -106,7 +122,11 @@ public class ForwardIndexReader {
private int idxForDoc(long docId) {
assert UrlIdCodec.getRank(docId) == 0 : "Forward Index Reader fed dirty reverse index id";
long offset = ids.binarySearch(docId, 0, ids.size());
if (idsMap != null) {
return idsMap.getOrDefault(docId, -1);
}
long offset = ids.binarySearch2(docId, 0, ids.size());
if (offset >= ids.size() || offset < 0 || ids.get(offset) != docId) {
if (getClass().desiredAssertionStatus()) {
@@ -118,22 +138,27 @@ public class ForwardIndexReader {
return (int) offset;
}
public DocumentSpans getDocumentSpans(Arena arena, long docId) {
long offset = idxForDoc(docId);
if (offset < 0) return new DocumentSpans();
long encodedOffset = data.get(ENTRY_SIZE * offset + SPANS_OFFSET);
public DocumentSpans[] getDocumentSpans(Arena arena, long[] docIds) {
long[] offsets = new long[docIds.length];
for (int i = 0; i < docIds.length; i++) {
long offset = idxForDoc(docIds[i]);
if (offset >= 0) {
offsets[i] = data.get(ENTRY_SIZE * offset + SPANS_OFFSET);
}
else {
offsets[i] = -1;
}
}
try {
return spansReader.readSpans(arena, encodedOffset);
return spansReader.readSpans(arena, offsets);
}
catch (IOException ex) {
logger.error("Failed to read spans for doc " + docId, ex);
return new DocumentSpans();
logger.error("Failed to read spans for docIds", ex);
return new DocumentSpans[docIds.length];
}
}
public int totalDocCount() {
return (int) ids.size();
}
@@ -141,6 +166,8 @@ public class ForwardIndexReader {
public void close() {
if (data != null)
data.close();
if (ids != null)
ids.close();
}
public boolean isLoaded() {

View File

@@ -5,7 +5,7 @@ import nu.marginalia.array.LongArray;
import nu.marginalia.array.LongArrayFactory;
import nu.marginalia.index.domainrankings.DomainRankings;
import nu.marginalia.index.forward.ForwardIndexParameters;
import nu.marginalia.index.forward.spans.ForwardIndexSpansWriter;
import nu.marginalia.index.forward.spans.IndexSpansWriter;
import nu.marginalia.index.journal.IndexJournal;
import nu.marginalia.model.id.UrlIdCodec;
import nu.marginalia.model.idx.DocumentMetadata;
@@ -65,7 +65,7 @@ public class ForwardIndexConverter {
logger.info("Domain Rankings size = {}", domainRankings.size());
try (var progress = heartbeat.createProcessTaskHeartbeat(TaskSteps.class, "forwardIndexConverter");
var spansWriter = new ForwardIndexSpansWriter(outputFileSpansData)
var spansWriter = new IndexSpansWriter(outputFileSpansData)
) {
progress.progress(TaskSteps.GET_DOC_IDS);

View File

@@ -11,6 +11,9 @@ public class DocumentSpan {
/** A list of the interlaced start and end positions of each span in the document of this type */
private final IntList startsEnds;
public DocumentSpan(IntList startsEnds) {
this.startsEnds = startsEnds;
}
public DocumentSpan(CodedSequence startsEnds) {
this.startsEnds = startsEnds.values();
}

View File

@@ -1,5 +1,6 @@
package nu.marginalia.index.forward.spans;
import it.unimi.dsi.fastutil.ints.IntList;
import nu.marginalia.language.sentence.tag.HtmlTag;
import nu.marginalia.sequence.CodedSequence;
@@ -39,6 +40,23 @@ public class DocumentSpans {
return EMPTY_SPAN;
}
void accept(byte code, IntList positions) {
if (code == HtmlTag.HEADING.code)
this.heading = new DocumentSpan(positions);
else if (code == HtmlTag.TITLE.code)
this.title = new DocumentSpan(positions);
else if (code == HtmlTag.NAV.code)
this.nav = new DocumentSpan(positions);
else if (code == HtmlTag.CODE.code)
this.code = new DocumentSpan(positions);
else if (code == HtmlTag.ANCHOR.code)
this.anchor = new DocumentSpan(positions);
else if (code == HtmlTag.EXTERNAL_LINKTEXT.code)
this.externalLinkText = new DocumentSpan(positions);
else if (code == HtmlTag.BODY.code)
this.body = new DocumentSpan(positions);
}
void accept(byte code, CodedSequence positions) {
if (code == HtmlTag.HEADING.code)
this.heading = new DocumentSpan(positions);

View File

@@ -0,0 +1,25 @@
package nu.marginalia.index.forward.spans;
import java.io.IOException;
import java.lang.foreign.Arena;
import java.nio.file.Path;
public interface IndexSpansReader extends AutoCloseable {
DocumentSpans readSpans(Arena arena, long encodedOffset) throws IOException;
DocumentSpans[] readSpans(Arena arena, long[] encodedOffsets) throws IOException;
static IndexSpansReader open(Path fileName) throws IOException {
int version = SpansCodec.parseSpanFilesFooter(fileName);
if (version == SpansCodec.SpansCodecVersion.COMPRESSED.ordinal()) {
return new IndexSpansReaderCompressed(fileName);
}
else if (version == SpansCodec.SpansCodecVersion.PLAIN.ordinal()) {
return new IndexSpansReaderPlain(fileName);
}
else {
throw new IllegalArgumentException("Unsupported spans file version: " + version);
}
}
void close() throws IOException;
}

View File

@@ -10,11 +10,11 @@ import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
@SuppressWarnings("preview")
public class ForwardIndexSpansReader implements AutoCloseable {
@Deprecated
public class IndexSpansReaderCompressed implements AutoCloseable, IndexSpansReader {
private final FileChannel spansFileChannel;
public ForwardIndexSpansReader(Path spansFile) throws IOException {
public IndexSpansReaderCompressed(Path spansFile) throws IOException {
this.spansFileChannel = (FileChannel) Files.newByteChannel(spansFile, StandardOpenOption.READ);
}
@@ -51,6 +51,17 @@ public class ForwardIndexSpansReader implements AutoCloseable {
return ret;
}
@Override
public DocumentSpans[] readSpans(Arena arena, long[] encodedOffsets) throws IOException {
DocumentSpans[] ret = new DocumentSpans[encodedOffsets.length];
for (int i = 0; i < encodedOffsets.length; i++) {
if (encodedOffsets[i] >= 0) {
ret[i] = readSpans(arena, encodedOffsets[i]);
}
}
return ret;
}
@Override
public void close() throws IOException {
spansFileChannel.close();

View File

@@ -0,0 +1,95 @@
package nu.marginalia.index.forward.spans;
import it.unimi.dsi.fastutil.ints.IntArrayList;
import nu.marginalia.uring.UringFileReader;
import java.io.IOException;
import java.lang.foreign.Arena;
import java.lang.foreign.MemorySegment;
import java.lang.foreign.ValueLayout;
import java.nio.file.Path;
import java.util.List;
public class IndexSpansReaderPlain implements IndexSpansReader {
private final UringFileReader uringReader;
public IndexSpansReaderPlain(Path spansFile) throws IOException {
uringReader = new UringFileReader(spansFile, true);
uringReader.fadviseWillneed();
}
@Override
public DocumentSpans readSpans(Arena arena, long encodedOffset) throws IOException {
// for testing, slow
return readSpans(arena, new long[] { encodedOffset})[0];
}
public DocumentSpans decode(MemorySegment ms) {
int count = ms.get(ValueLayout.JAVA_INT, 0);
int pos = 4;
DocumentSpans ret = new DocumentSpans();
// Decode each span
for (int spanIdx = 0; spanIdx < count; spanIdx++) {
byte code = ms.get(ValueLayout.JAVA_BYTE, pos);
short len = ms.get(ValueLayout.JAVA_SHORT, pos+2);
IntArrayList values = new IntArrayList(len);
pos += 4;
for (int i = 0; i < len; i++) {
values.add(ms.get(ValueLayout.JAVA_INT, pos + 4*i));
}
ret.accept(code, values);
pos += 4*len;
}
return ret;
}
@Override
public DocumentSpans[] readSpans(Arena arena, long[] encodedOffsets) {
int readCnt = 0;
for (long offset : encodedOffsets) {
if (offset < 0)
continue;
readCnt ++;
}
if (readCnt == 0) {
return new DocumentSpans[encodedOffsets.length];
}
long[] offsets = new long[readCnt];
int[] sizes = new int[readCnt];
for (int idx = 0, j = 0; idx < encodedOffsets.length; idx++) {
if (encodedOffsets[idx] < 0)
continue;
long offset = encodedOffsets[idx];
offsets[j] = SpansCodec.decodeStartOffset(offset);
sizes[j] = SpansCodec.decodeSize(offset);
j++;
}
List<MemorySegment> buffers = uringReader.readUnalignedInDirectMode(arena, offsets, sizes, 4096);
DocumentSpans[] ret = new DocumentSpans[encodedOffsets.length];
for (int idx = 0, j = 0; idx < encodedOffsets.length; idx++) {
if (encodedOffsets[idx] < 0)
continue;
ret[idx] = decode(buffers.get(j++));
}
return ret;
}
@Override
public void close() throws IOException {
uringReader.close();
}
}

View File

@@ -1,20 +1,23 @@
package nu.marginalia.index.forward.spans;
import nu.marginalia.sequence.VarintCodedSequence;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.channels.FileChannel;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
public class ForwardIndexSpansWriter implements AutoCloseable {
public class IndexSpansWriter implements AutoCloseable {
private final FileChannel outputChannel;
private final ByteBuffer work = ByteBuffer.allocate(32);
private final ByteBuffer work = ByteBuffer.allocate(4*1024*1024).order(ByteOrder.nativeOrder());
private long stateStartOffset = -1;
private int stateLength = -1;
public ForwardIndexSpansWriter(Path outputFileSpansData) throws IOException {
public IndexSpansWriter(Path outputFileSpansData) throws IOException {
this.outputChannel = (FileChannel) Files.newByteChannel(outputFileSpansData, StandardOpenOption.READ, StandardOpenOption.WRITE, StandardOpenOption.CREATE);
}
@@ -23,7 +26,7 @@ public class ForwardIndexSpansWriter implements AutoCloseable {
stateLength = 0;
work.clear();
work.put((byte) count);
work.putInt(count);
work.flip();
while (work.hasRemaining())
@@ -33,12 +36,17 @@ public class ForwardIndexSpansWriter implements AutoCloseable {
public void writeSpan(byte spanCode, ByteBuffer sequenceData) throws IOException {
work.clear();
work.put(spanCode);
work.putShort((short) sequenceData.remaining());
work.put((byte) 0); // Ensure we're byte aligned
var sequence = new VarintCodedSequence(sequenceData);
work.putShort((short) sequence.valueCount());
var iter = sequence.iterator();
while (iter.hasNext()) {
work.putInt(iter.nextInt());
}
work.flip();
while (work.hasRemaining() || sequenceData.hasRemaining()) {
stateLength += (int) outputChannel.write(new ByteBuffer[]{work, sequenceData});
}
stateLength += outputChannel.write(work);
}
public long endRecord() {
@@ -47,6 +55,11 @@ public class ForwardIndexSpansWriter implements AutoCloseable {
@Override
public void close() throws IOException {
ByteBuffer footer = SpansCodec.createSpanFilesFooter(SpansCodec.SpansCodecVersion.PLAIN, (int) (4096 - (outputChannel.position() & 4095)));
outputChannel.position(outputChannel.size());
while (footer.hasRemaining()) {
outputChannel.write(footer, outputChannel.size());
}
outputChannel.close();
}
}

View File

@@ -1,6 +1,21 @@
package nu.marginalia.index.forward.spans;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
public class SpansCodec {
public static int MAGIC_INT = 0xF000F000;
public static int FOOTER_SIZE = 8;
public enum SpansCodecVersion {
@Deprecated
COMPRESSED,
PLAIN
}
public static long encode(long startOffset, long size) {
assert size < 0x1000_0000L : "Size must be less than 2^28";
@@ -11,7 +26,39 @@ public class SpansCodec {
return encoded >>> 28;
}
public static long decodeSize(long encoded) {
return encoded & 0x0FFF_FFFFL;
public static int decodeSize(long encoded) {
return (int) (encoded & 0x0FFF_FFFFL);
}
public static ByteBuffer createSpanFilesFooter(SpansCodecVersion version, int padSize) {
if (padSize < FOOTER_SIZE) {
padSize += 4096;
}
ByteBuffer footer = ByteBuffer.allocate(padSize);
footer.position(padSize - FOOTER_SIZE);
footer.putInt(SpansCodec.MAGIC_INT);
footer.put((byte) version.ordinal());
footer.put((byte) 0);
footer.put((byte) 0);
footer.put((byte) 0);
footer.flip();
return footer;
}
public static int parseSpanFilesFooter(Path spansFile) throws IOException {
ByteBuffer buffer = ByteBuffer.allocate(FOOTER_SIZE);
try (var fc = FileChannel.open(spansFile, StandardOpenOption.READ)) {
if (fc.size() < FOOTER_SIZE) return 0;
fc.read(buffer, fc.size() - buffer.capacity());
buffer.flip();
int magic = buffer.getInt();
if (magic != MAGIC_INT) {
return 0;
}
return buffer.get();
}
}
}

View File

@@ -1,8 +1,9 @@
package nu.marginalia.index.forward;
import it.unimi.dsi.fastutil.ints.IntList;
import nu.marginalia.index.forward.spans.ForwardIndexSpansReader;
import nu.marginalia.index.forward.spans.ForwardIndexSpansWriter;
import nu.marginalia.index.forward.spans.IndexSpansReader;
import nu.marginalia.index.forward.spans.IndexSpansReaderPlain;
import nu.marginalia.index.forward.spans.IndexSpansWriter;
import nu.marginalia.language.sentence.tag.HtmlTag;
import nu.marginalia.sequence.VarintCodedSequence;
import org.junit.jupiter.api.AfterEach;
@@ -17,10 +18,10 @@ import java.nio.file.Path;
import static org.junit.jupiter.api.Assertions.*;
class ForwardIndexSpansReaderTest {
class IndexSpansReaderTest {
Path testFile = Files.createTempFile("test", ".idx");
ForwardIndexSpansReaderTest() throws IOException {
IndexSpansReaderTest() throws IOException {
}
@AfterEach
@@ -34,7 +35,7 @@ class ForwardIndexSpansReaderTest {
long offset1;
long offset2;
try (var writer = new ForwardIndexSpansWriter(testFile)) {
try (var writer = new IndexSpansWriter(testFile)) {
writer.beginRecord(1);
writer.writeSpan(HtmlTag.HEADING.code, VarintCodedSequence.generate(1, 3, 5, 8).buffer());
offset1 = writer.endRecord();
@@ -46,7 +47,7 @@ class ForwardIndexSpansReaderTest {
offset2 = writer.endRecord();
}
try (var reader = new ForwardIndexSpansReader(testFile);
try (var reader = IndexSpansReader.open(testFile);
var arena = Arena.ofConfined()
) {
var spans1 = reader.readSpans(arena, offset1);
@@ -77,13 +78,13 @@ class ForwardIndexSpansReaderTest {
@Test
void testContainsRange() throws IOException {
long offset1;
try (var writer = new ForwardIndexSpansWriter(testFile)) {
try (var writer = new IndexSpansWriter(testFile)) {
writer.beginRecord(1);
writer.writeSpan(HtmlTag.HEADING.code, VarintCodedSequence.generate( 1, 2, 10, 15, 20, 25).buffer());
offset1 = writer.endRecord();
}
try (var reader = new ForwardIndexSpansReader(testFile);
try (var reader = new IndexSpansReaderPlain(testFile);
var arena = Arena.ofConfined()
) {
var spans1 = reader.readSpans(arena, offset1);
@@ -104,13 +105,13 @@ class ForwardIndexSpansReaderTest {
@Test
void testContainsRangeExact() throws IOException {
long offset1;
try (var writer = new ForwardIndexSpansWriter(testFile)) {
try (var writer = new IndexSpansWriter(testFile)) {
writer.beginRecord(1);
writer.writeSpan(HtmlTag.HEADING.code, VarintCodedSequence.generate( 1, 2, 10, 15, 20, 25).buffer());
offset1 = writer.endRecord();
}
try (var reader = new ForwardIndexSpansReader(testFile);
try (var reader = new IndexSpansReaderPlain(testFile);
var arena = Arena.ofConfined()
) {
var spans1 = reader.readSpans(arena, offset1);
@@ -131,13 +132,13 @@ class ForwardIndexSpansReaderTest {
@Test
void testCountRangeMatches() throws IOException {
long offset1;
try (var writer = new ForwardIndexSpansWriter(testFile)) {
try (var writer = new IndexSpansWriter(testFile)) {
writer.beginRecord(1);
writer.writeSpan(HtmlTag.HEADING.code, VarintCodedSequence.generate( 1, 2, 10, 15, 20, 25).buffer());
offset1 = writer.endRecord();
}
try (var reader = new ForwardIndexSpansReader(testFile);
try (var reader = new IndexSpansReaderPlain(testFile);
var arena = Arena.ofConfined()
) {
var spans1 = reader.readSpans(arena, offset1);

View File

@@ -0,0 +1,54 @@
plugins {
id 'java'
id 'application'
id 'jvm-test-suite'
}
java {
toolchain {
languageVersion.set(JavaLanguageVersion.of(rootProject.ext.jvmVersion))
}
}
application {
mainClass = 'nu.marginalia.index.perftest.PerfTestMain'
}
apply from: "$rootProject.projectDir/srcsets.gradle"
dependencies {
implementation project(':code:common:config')
implementation project(':code:common:db')
implementation project(':code:libraries:array')
implementation project(':code:libraries:native')
implementation project(':code:libraries:btree')
implementation project(':code:libraries:term-frequency-dict')
implementation project(':code:common:linkdb')
implementation project(':code:index')
implementation project(':code:index:query')
implementation project(':code:index:index-forward')
implementation project(':code:index:index-reverse')
implementation project(':third-party:commons-codec')
implementation project(':code:functions:search-query')
implementation project(':code:functions:search-query:api')
implementation libs.slop
implementation libs.roaringbitmap
implementation libs.bundles.slf4j
implementation libs.guava
libs.bundles.grpc.get().each {
implementation dependencies.create(it) {
exclude group: 'com.google.guava'
}
}
implementation libs.notnull
implementation libs.trove
implementation libs.fastutil
implementation libs.bundles.gson
implementation libs.bundles.mariadb
}

View File

@@ -0,0 +1,262 @@
package nu.marginalia.index.perftest;
import nu.marginalia.ffi.LinuxSystemCalls;
import nu.marginalia.uring.UringFileReader;
import java.io.IOException;
import java.lang.foreign.Arena;
import java.lang.foreign.MemorySegment;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import java.util.stream.LongStream;
public class IoPatternsMain {
static void testBuffered(int sz, int small, int large, int iters) {
try {
Path largeFile = Path.of("/home/vlofgren/largefile.dat");
long fileSize = Files.size(largeFile);
Random r = new Random();
List<MemorySegment> segments = new ArrayList<>();
for (int i = 0; i < sz; i++) {
if (small == large) {
segments.add(Arena.ofAuto().allocate(small));
}
else {
segments.add(Arena.ofAuto().allocate(r.nextInt(small, large)));
}
}
List<Long> offsets = new ArrayList<>();
long[] samples = new long[1000];
int si = 0;
try (UringFileReader reader = new UringFileReader(largeFile, false)) {
for (int iter = 0; iter < iters; ) {
if (si == samples.length) {
Arrays.sort(samples);
double p1 = samples[10] / 1_000.;
double p10 = samples[100] / 1_000.;
double p90 = samples[900] / 1_000.;
double p99 = samples[990] / 1_000.;
double avg = LongStream.of(samples).average().getAsDouble() / 1000.;
System.out.println("B"+"\t"+avg+"\t"+p1 + " " + p10 + " " + p90 + " " + p99);
si = 0;
iter++;
}
offsets.clear();
for (int i = 0; i < sz; i++) {
offsets.add(r.nextLong(0, fileSize - 256));
}
long st = System.nanoTime();
reader.read(segments, offsets);
long et = System.nanoTime();
samples[si++] = et - st;
}
}
}
catch (IOException e) {
e.printStackTrace();
}
}
static void testBufferedPread(int sz, int iters) {
try {
Path largeFile = Path.of("/home/vlofgren/largefile.dat");
long fileSize = Files.size(largeFile);
Random r = new Random();
List<MemorySegment> segments = new ArrayList<>();
for (int i = 0; i < sz; i++) {
segments.add(Arena.ofAuto().allocate(r.nextInt(24, 256)));
}
List<Long> offsets = new ArrayList<>();
long[] samples = new long[1000];
int si = 0;
int fd = -1;
try {
fd = LinuxSystemCalls.openBuffered(largeFile);
LinuxSystemCalls.fadviseRandom(fd);
for (int iter = 0; iter < iters; ) {
if (si == samples.length) {
Arrays.sort(samples);
double p1 = samples[10] / 1_000.;
double p10 = samples[100] / 1_000.;
double p90 = samples[900] / 1_000.;
double p99 = samples[990] / 1_000.;
double avg = LongStream.of(samples).average().getAsDouble() / 1000.;
System.out.println("BP"+"\t"+avg+"\t"+p1 + " " + p10 + " " + p90 + " " + p99);
si = 0;
iter++;
}
offsets.clear();
for (int i = 0; i < sz; i++) {
offsets.add(r.nextLong(0, fileSize - 256));
}
long st = System.nanoTime();
for (int i = 0; i < sz; i++) {
LinuxSystemCalls.readAt(fd, segments.get(i), offsets.get(i));
}
long et = System.nanoTime();
samples[si++] = et - st;
}
}
finally {
LinuxSystemCalls.closeFd(fd);
}
}
catch (IOException e) {
e.printStackTrace();
}
}
static void testDirect(int blockSize, int sz, int iters) {
try {
Path largeFile = Path.of("/home/vlofgren/largefile.dat");
int fileSizeBlocks = (int) ((Files.size(largeFile) & -blockSize) / blockSize);
Random r = new Random();
List<MemorySegment> segments = new ArrayList<>();
for (int i = 0; i < sz; i++) {
segments.add(Arena.ofAuto().allocate(blockSize, blockSize));
}
List<Long> offsets = new ArrayList<>();
long[] samples = new long[1000];
int si = 0;
try (UringFileReader reader = new UringFileReader(largeFile, true)) {
for (int iter = 0; iter < iters; ) {
if (si == samples.length) {
Arrays.sort(samples);
double p1 = samples[10] / 1_000.;
double p10 = samples[100] / 1_000.;
double p90 = samples[900] / 1_000.;
double p99 = samples[990] / 1_000.;
double avg = LongStream.of(samples).average().getAsDouble() / 1000.;
System.out.println("DN"+blockSize+"\t"+avg+"\t"+p1 + " " + p10 + " " + p90 + " " + p99);
si = 0;
iters++;
}
offsets.clear();
for (int i = 0; i < sz; i++) {
offsets.add(blockSize * r.nextLong(0, fileSizeBlocks));
}
long st = System.nanoTime();
reader.read(segments, offsets);
long et = System.nanoTime();
samples[si++] = et - st;
}
}
}
catch (IOException e) {
e.printStackTrace();
}
}
static void testDirect1(int blockSize, int iters) {
try {
Path largeFile = Path.of("/home/vlofgren/largefile.dat");
int fileSizeBlocks = (int) ((Files.size(largeFile) & -blockSize) / blockSize);
Random r = new Random();
MemorySegment segment = Arena.global().allocate(blockSize, blockSize);
long[] samples = new long[1000];
int si = 0;
int fd = LinuxSystemCalls.openDirect(largeFile);
if (fd < 0) {
throw new IOException("open failed");
}
try {
for (int iter = 0; iter < iters; ) {
if (si == samples.length) {
Arrays.sort(samples);
double p1 = samples[10] / 1_000.;
double p10 = samples[100] / 1_000.;
double p90 = samples[900] / 1_000.;
double p99 = samples[990] / 1_000.;
double avg = LongStream.of(samples).average().getAsDouble() / 1000.;
System.out.println("D1"+blockSize+"\t"+avg+"\t"+p1 + " " + p10 + " " + p90 + " " + p99);
si = 0;
iters++;
}
long st = System.nanoTime();
int ret;
long readOffset = blockSize * r.nextLong(0, fileSizeBlocks);
if (blockSize != (ret = LinuxSystemCalls.readAt(fd, segment, readOffset))) {
throw new IOException("pread failed: " + ret);
}
long et = System.nanoTime();
samples[si++] = et - st;
}
}
finally {
LinuxSystemCalls.closeFd(fd);
}
}
catch (IOException e) {
e.printStackTrace();
}
}
public static void main(String[] args) throws Exception {
// Thread.ofPlatform().start(() -> testBuffered(128, 32, 65536,1000));
Thread.ofPlatform().start(() -> testDirect(8192*4, 128,1000));
// Thread.ofPlatform().start(() -> testBuffered(128, 1000));
// Thread.ofPlatform().start(() -> testBuffered(128, 1000));
// Thread.ofPlatform().start(() -> testBuffered(128, 1000));
// Thread.ofPlatform().start(() -> testBufferedPread(128, 1000));
// Thread.ofPlatform().start(() -> testDirect1(1024, 1000));
// Thread.ofPlatform().start(() -> testDirect1(1024, 1000));
// Thread.ofPlatform().start(() -> testDirect1(1024, 1000));
// Thread.ofPlatform().start(() -> testDirect1(1024*1024, 1000));
// Thread.ofPlatform().start(() -> testDirect1(1024*1024, 1000));
// Thread.ofPlatform().start(() -> testDirect(512, 512,1000));
// Thread.ofPlatform().start(() -> testDirect(512, 512,1000));
// Thread.ofPlatform().start(() -> testDirect(512, 512,1000));
// Thread.ofPlatform().start(() -> testDirect(512, 100));
// Thread.ofPlatform().start(() -> testDirect(512, 100));
// Thread.ofPlatform().start(() -> testDirect(512, 100));
// Thread.ofPlatform().start(() -> testDirect(512, 100));
// Thread.ofPlatform().start(() -> testBuffered(512, 1000));
// Thread.ofPlatform().start(() -> testBuffered(512, 1000));
// Thread.ofPlatform().start(() -> testBuffered(512, 1000));
// Thread.ofPlatform().start(() -> testBuffered(512, 1000));
// Thread.ofPlatform().start(() -> testBuffered(100));
// Thread.ofPlatform().start(() -> testBuffered(100));
for (;;);
// testBuffered(100);
}
}

View File

@@ -0,0 +1,313 @@
package nu.marginalia.index.perftest;
import gnu.trove.list.array.TLongArrayList;
import nu.marginalia.api.searchquery.RpcQueryLimits;
import nu.marginalia.api.searchquery.model.query.NsfwFilterTier;
import nu.marginalia.api.searchquery.model.query.QueryParams;
import nu.marginalia.api.searchquery.model.query.SearchSpecification;
import nu.marginalia.api.searchquery.model.results.PrototypeRankingParameters;
import nu.marginalia.array.page.LongQueryBuffer;
import nu.marginalia.functions.searchquery.QueryFactory;
import nu.marginalia.functions.searchquery.query_parser.QueryExpansion;
import nu.marginalia.index.FullReverseIndexReader;
import nu.marginalia.index.IndexQueryExecution;
import nu.marginalia.index.PrioReverseIndexReader;
import nu.marginalia.index.forward.ForwardIndexReader;
import nu.marginalia.index.index.CombinedIndexReader;
import nu.marginalia.index.index.StatefulIndex;
import nu.marginalia.index.model.ResultRankingContext;
import nu.marginalia.index.model.SearchParameters;
import nu.marginalia.index.model.SearchTerms;
import nu.marginalia.index.positions.PositionsFileReader;
import nu.marginalia.index.query.IndexQuery;
import nu.marginalia.index.query.IndexSearchBudget;
import nu.marginalia.index.results.DomainRankingOverrides;
import nu.marginalia.index.results.IndexResultRankingService;
import nu.marginalia.index.results.model.ids.CombinedDocIdList;
import nu.marginalia.index.searchset.SearchSetAny;
import nu.marginalia.linkdb.docs.DocumentDbReader;
import nu.marginalia.segmentation.NgramLexicon;
import nu.marginalia.term_frequency_dict.TermFrequencyDict;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.sql.SQLException;
import java.time.Duration;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.TimeoutException;
public class PerfTestMain {
static Duration warmupTime = Duration.ofMinutes(1);
static Duration runTime = Duration.ofMinutes(10);
public static void main(String[] args) {
if (args.length != 4) {
System.err.println("Arguments: home-dir index-dir query");
System.exit(255);
}
try {
Path indexDir = Paths.get(args[0]);
if (!Files.isDirectory(indexDir)) {
System.err.println("Index directory is not a directory");
System.exit(255);
}
Path homeDir = Paths.get(args[1]);
String scenario = args[2];
String query = args[3];
switch (scenario) {
case "valuation" -> runValuation(indexDir, homeDir, query);
case "lookup" -> runLookup(indexDir, homeDir, query);
case "execution" -> runExecution(indexDir, homeDir, query);
}
System.exit(0);
}
catch (NumberFormatException e) {
System.err.println("Arguments: data-dir index-dir query");
System.exit(255);
}
catch (Exception ex) {
System.err.println("Error during testing");
ex.printStackTrace();
System.exit(255);
}
System.out.println(Arrays.toString(args));
}
private static CombinedIndexReader createCombinedIndexReader(Path indexDir) throws IOException {
return new CombinedIndexReader(
new ForwardIndexReader(
indexDir.resolve("ir/fwd-doc-id.dat"),
indexDir.resolve("ir/fwd-doc-data.dat"),
indexDir.resolve("ir/fwd-spans.dat")
),
new FullReverseIndexReader(
"full",
indexDir.resolve("ir/rev-words.dat"),
indexDir.resolve("ir/rev-docs.dat"),
new PositionsFileReader(indexDir.resolve("ir/rev-positions.dat"))
),
new PrioReverseIndexReader(
"prio",
indexDir.resolve("ir/rev-prio-words.dat"),
indexDir.resolve("ir/rev-prio-docs.dat")
)
);
}
private static IndexResultRankingService createIndexResultRankingService(Path indexDir, CombinedIndexReader combinedIndexReader) throws IOException, SQLException {
return new IndexResultRankingService(
new DocumentDbReader(indexDir.resolve("ldbr/documents.db")),
new StatefulIndex(combinedIndexReader),
new DomainRankingOverrides(null, Path.of("xxxx"))
);
}
static QueryFactory createQueryFactory(Path homeDir) throws IOException {
return new QueryFactory(
new QueryExpansion(
new TermFrequencyDict(homeDir.resolve("model/tfreq-new-algo3.bin")),
new NgramLexicon()
)
);
}
public static void runValuation(Path homeDir,
Path indexDir,
String rawQuery) throws IOException, SQLException, TimeoutException {
CombinedIndexReader indexReader = createCombinedIndexReader(indexDir);
QueryFactory queryFactory = createQueryFactory(homeDir);
IndexResultRankingService rankingService = createIndexResultRankingService(indexDir, indexReader);
var queryLimits = RpcQueryLimits.newBuilder()
.setTimeoutMs(10_000)
.setResultsTotal(1000)
.setResultsByDomain(10)
.setFetchSize(4096)
.build();
SearchSpecification parsedQuery = queryFactory.createQuery(new QueryParams(rawQuery, queryLimits, "NONE", NsfwFilterTier.OFF), PrototypeRankingParameters.sensibleDefaults()).specs;
System.out.println("Query compiled to: " + parsedQuery.query.compiledQuery);
SearchParameters searchParameters = new SearchParameters(parsedQuery, new SearchSetAny());
List<IndexQuery> queries = indexReader.createQueries(new SearchTerms(searchParameters.query, searchParameters.compiledQueryIds), searchParameters.queryParams, new IndexSearchBudget(10_000));
TLongArrayList allResults = new TLongArrayList();
LongQueryBuffer buffer = new LongQueryBuffer(512);
for (var query : queries) {
while (query.hasMore() && allResults.size() < 512 ) {
query.getMoreResults(buffer);
allResults.addAll(buffer.copyData());
}
if (allResults.size() >= 512)
break;
}
allResults.sort();
if (allResults.size() > 512) {
allResults.subList(512, allResults.size()).clear();
}
var rankingContext = ResultRankingContext.create(indexReader, searchParameters);
var rankingData = rankingService.prepareRankingData(rankingContext, new CombinedDocIdList(allResults.toArray()), null);
int sum = 0;
Instant runEndTime = Instant.now().plus(runTime);
Instant runStartTime = Instant.now();
int sum2 = 0;
List<Double> times = new ArrayList<>();
int iter;
for (iter = 0;; iter++) {
IndexSearchBudget budget = new IndexSearchBudget(10000);
long start = System.nanoTime();
sum2 += rankingService.rankResults(budget, rankingContext, rankingData, false).size();
long end = System.nanoTime();
times.add((end - start)/1_000_000.);
if ((iter % 100) == 0) {
if (Instant.now().isAfter(runEndTime)) {
break;
}
if (times.size() > 100) {
double[] timesSample = times.stream().mapToDouble(Double::doubleValue).skip(times.size() - 100).sorted().toArray();
System.out.format("P1: %f P10: %f, P90: %f, P99: %f\n", timesSample[1], timesSample[10], timesSample[90], timesSample[99]);
}
System.out.println(Duration.between(runStartTime, Instant.now()).toMillis() / 1000. + " best times: " + (allResults.size() / 512.) * times.stream().mapToDouble(Double::doubleValue).sorted().limit(3).average().orElse(-1));
}
}
System.out.println("Benchmark complete after " + iter + " iters!");
System.out.println("Best times: " + (allResults.size() / 512.) * times.stream().mapToDouble(Double::doubleValue).sorted().limit(3).average().orElse(-1));
System.out.println("Warmup sum: " + sum);
System.out.println("Main sum: " + sum2);
System.out.println(rankingData.size());
}
public static void runExecution(Path homeDir,
Path indexDir,
String rawQuery) throws IOException, SQLException, InterruptedException {
CombinedIndexReader indexReader = createCombinedIndexReader(indexDir);
QueryFactory queryFactory = createQueryFactory(homeDir);
IndexResultRankingService rankingService = createIndexResultRankingService(indexDir, indexReader);
var queryLimits = RpcQueryLimits.newBuilder()
.setTimeoutMs(50)
.setResultsTotal(1000)
.setResultsByDomain(10)
.setFetchSize(4096)
.build();
SearchSpecification parsedQuery = queryFactory.createQuery(new QueryParams(rawQuery, queryLimits, "NONE", NsfwFilterTier.OFF), PrototypeRankingParameters.sensibleDefaults()).specs;
System.out.println("Query compiled to: " + parsedQuery.query.compiledQuery);
System.out.println("Running warmup loop!");
int sum = 0;
Instant runEndTime = Instant.now().plus(runTime);
Instant runStartTime = Instant.now();
int sum2 = 0;
List<Double> rates = new ArrayList<>();
List<Double> times = new ArrayList<>();
int iter;
for (iter = 0;; iter++) {
SearchParameters searchParameters = new SearchParameters(parsedQuery, new SearchSetAny());
var execution = new IndexQueryExecution(searchParameters, rankingService, indexReader);
long start = System.nanoTime();
execution.run();
long end = System.nanoTime();
sum2 += execution.itemsProcessed();
rates.add(execution.itemsProcessed() / ((end - start)/1_000_000_000.));
times.add((end - start)/1_000_000.);
indexReader.reset();
if ((iter % 100) == 0) {
if (Instant.now().isAfter(runEndTime)) {
break;
}
if (times.size() > 100) {
double[] timesSample = times.stream().mapToDouble(Double::doubleValue).skip(times.size() - 100).sorted().toArray();
System.out.format("P1: %f P10: %f, P90: %f, P99: %f\n", timesSample[1], timesSample[10], timesSample[90], timesSample[99]);
}
System.out.println(Duration.between(runStartTime, Instant.now()).toMillis() / 1000. + " best rates: " + rates.stream().mapToDouble(Double::doubleValue).map(i -> -i).sorted().map(i -> -i).limit(3).average().orElse(-1));
}
}
System.out.println("Benchmark complete after " + iter + " iters!");
System.out.println("Best counts: " + rates.stream().mapToDouble(Double::doubleValue).map(i -> -i).sorted().map(i -> -i).limit(3).average().orElse(-1));
System.out.println("Warmup sum: " + sum);
System.out.println("Main sum: " + sum2);
}
public static void runLookup(Path homeDir,
Path indexDir,
String rawQuery) throws IOException, SQLException
{
CombinedIndexReader indexReader = createCombinedIndexReader(indexDir);
QueryFactory queryFactory = createQueryFactory(homeDir);
var queryLimits = RpcQueryLimits.newBuilder()
.setTimeoutMs(10_000)
.setResultsTotal(1000)
.setResultsByDomain(10)
.setFetchSize(4096)
.build();
SearchSpecification parsedQuery = queryFactory.createQuery(new QueryParams(rawQuery, queryLimits, "NONE", NsfwFilterTier.OFF), PrototypeRankingParameters.sensibleDefaults()).specs;
System.out.println("Query compiled to: " + parsedQuery.query.compiledQuery);
SearchParameters searchParameters = new SearchParameters(parsedQuery, new SearchSetAny());
Instant runEndTime = Instant.now().plus(runTime);
LongQueryBuffer buffer = new LongQueryBuffer(512);
int sum1 = 0;
int iter;
Instant runStartTime = Instant.now();
int sum2 = 0;
List<Double> times = new ArrayList<>();
for (iter = 0;; iter++) {
indexReader.reset();
List<IndexQuery> queries = indexReader.createQueries(new SearchTerms(searchParameters.query, searchParameters.compiledQueryIds), searchParameters.queryParams, new IndexSearchBudget(150));
long start = System.nanoTime();
for (var query : queries) {
while (query.hasMore()) {
query.getMoreResults(buffer);
sum1 += buffer.end;
buffer.reset();
}
}
long end = System.nanoTime();
times.add((end - start)/1_000_000_000.);
if ((iter % 10) == 0) {
if (Instant.now().isAfter(runEndTime)) {
break;
}
if (times.size() > 100) {
double[] timesSample = times.stream().mapToDouble(Double::doubleValue).skip(times.size() - 100).sorted().toArray();
System.out.format("P1: %f P10: %f, P90: %f, P99: %f\n", timesSample[1], timesSample[10], timesSample[90], timesSample[99]);
}
System.out.println(Duration.between(runStartTime, Instant.now()).toMillis() / 1000. + " best times: " + times.stream().mapToDouble(Double::doubleValue).sorted().limit(3).average().orElse(-1));
}
}
System.out.println("Benchmark complete after " + iter + " iters!");
System.out.println("Best times: " + times.stream().mapToDouble(Double::doubleValue).sorted().limit(3).average().orElse(-1));
System.out.println("Warmup sum: " + sum1);
System.out.println("Main sum: " + sum2);
}
}

View File

@@ -15,6 +15,7 @@ apply from: "$rootProject.projectDir/srcsets.gradle"
dependencies {
implementation project(':code:libraries:array')
implementation project(':code:libraries:native')
implementation project(':code:libraries:btree')
implementation project(':code:libraries:coded-sequence')
implementation project(':code:libraries:random-write-funnel')

View File

@@ -1,32 +1,26 @@
package nu.marginalia.index;
import nu.marginalia.array.page.LongQueryBuffer;
import nu.marginalia.btree.BTreeReader;
import nu.marginalia.index.query.EntrySource;
import static java.lang.Math.min;
import nu.marginalia.skiplist.SkipListReader;
public class FullIndexEntrySource implements EntrySource {
private final String name;
private final BTreeReader reader;
int pos;
int endOffset;
final int entrySize;
private final SkipListReader reader;
private final long wordId;
public FullIndexEntrySource(String name,
BTreeReader reader,
int entrySize,
SkipListReader reader,
long wordId) {
this.name = name;
this.reader = reader;
this.entrySize = entrySize;
this.wordId = wordId;
pos = 0;
endOffset = pos + entrySize * reader.numEntries();
}
@Override
@@ -36,32 +30,14 @@ public class FullIndexEntrySource implements EntrySource {
@Override
public void read(LongQueryBuffer buffer) {
buffer.reset();
buffer.end = min(buffer.end, endOffset - pos);
reader.readData(buffer.data, buffer.end, pos);
pos += buffer.end;
destagger(buffer);
buffer.uniq();
}
private void destagger(LongQueryBuffer buffer) {
if (entrySize == 1)
return;
for (int ri = entrySize, wi=1; ri < buffer.end ; ri+=entrySize, wi++) {
buffer.data.set(wi, buffer.data.get(ri));
}
buffer.end /= entrySize;
reader.getData(buffer);
}
@Override
public boolean hasMore() {
return pos < endOffset;
return !reader.atEnd();
}
@Override
public String indexName() {
return name + ":" + Long.toHexString(wordId);

View File

@@ -2,16 +2,17 @@ package nu.marginalia.index;
import nu.marginalia.array.LongArray;
import nu.marginalia.array.LongArrayFactory;
import nu.marginalia.array.pool.BufferPool;
import nu.marginalia.btree.BTreeReader;
import nu.marginalia.index.positions.TermData;
import nu.marginalia.ffi.LinuxSystemCalls;
import nu.marginalia.index.positions.PositionsFileReader;
import nu.marginalia.index.query.EmptyEntrySource;
import nu.marginalia.index.query.EntrySource;
import nu.marginalia.index.query.ReverseIndexRejectFilter;
import nu.marginalia.index.query.ReverseIndexRetainFilter;
import nu.marginalia.index.positions.TermData;
import nu.marginalia.index.query.*;
import nu.marginalia.index.query.filter.QueryFilterLetThrough;
import nu.marginalia.index.query.filter.QueryFilterNoPass;
import nu.marginalia.index.query.filter.QueryFilterStepIf;
import nu.marginalia.skiplist.SkipListConstants;
import nu.marginalia.skiplist.SkipListReader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -20,10 +21,12 @@ import java.lang.foreign.Arena;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.concurrent.Executors;
import java.util.function.Consumer;
public class FullReverseIndexReader {
private final LongArray words;
private final LongArray documents;
private final long wordsDataOffset;
private final Logger logger = LoggerFactory.getLogger(getClass());
private final BTreeReader wordsBTreeReader;
@@ -31,6 +34,8 @@ public class FullReverseIndexReader {
private final PositionsFileReader positionsFileReader;
private final BufferPool dataPool;
public FullReverseIndexReader(String name,
Path words,
Path documents,
@@ -44,6 +49,7 @@ public class FullReverseIndexReader {
this.documents = null;
this.wordsBTreeReader = null;
this.wordsDataOffset = -1;
this.dataPool = null;
return;
}
@@ -52,6 +58,11 @@ public class FullReverseIndexReader {
this.words = LongArrayFactory.mmapForReadingShared(words);
this.documents = LongArrayFactory.mmapForReadingShared(documents);
LinuxSystemCalls.madviseRandom(this.words.getMemorySegment());
LinuxSystemCalls.madviseRandom(this.documents.getMemorySegment());
dataPool = new BufferPool(documents, SkipListConstants.BLOCK_SIZE, (int) (Long.getLong("index.bufferPoolSize", 512*1024*1024L) / SkipListConstants.BLOCK_SIZE));
wordsBTreeReader = new BTreeReader(this.words, ReverseIndexParameters.wordsBTreeContext, 0);
wordsDataOffset = wordsBTreeReader.getHeader().dataOffsetLongs();
@@ -62,6 +73,11 @@ public class FullReverseIndexReader {
}
}
public void reset() {
dataPool.reset();
}
private void selfTest() {
logger.info("Running self test program");
@@ -76,6 +92,15 @@ public class FullReverseIndexReader {
ReverseIndexSelfTest.runSelfTest6(wordsDataRange, documents);
}
public void eachDocRange(Consumer<LongArray> eachDocRange) {
long wordsDataSize = wordsBTreeReader.getHeader().numEntries() * 2L;
var wordsDataRange = words.range(wordsDataOffset, wordsDataOffset + wordsDataSize);
for (long i = 1; i < wordsDataRange.size(); i+=2) {
var docsBTreeReader = new BTreeReader(documents, ReverseIndexParameters.fullDocsBTreeContext, wordsDataRange.get(i));
eachDocRange.accept(docsBTreeReader.data());
}
}
/** Calculate the offset of the word in the documents.
* If the return-value is negative, the term does not exist
@@ -101,27 +126,27 @@ public class FullReverseIndexReader {
if (offset < 0) // No documents
return new EmptyEntrySource();
return new FullIndexEntrySource(name, createReaderNew(offset), 2, termId);
return new FullIndexEntrySource(name, getReader(offset), termId);
}
/** Create a filter step requiring the specified termId to exist in the documents */
public QueryFilterStepIf also(long termId) {
public QueryFilterStepIf also(long termId, IndexSearchBudget budget) {
long offset = wordOffset(termId);
if (offset < 0) // No documents
return new QueryFilterNoPass();
return new ReverseIndexRetainFilter(createReaderNew(offset), name, termId);
return new ReverseIndexRetainFilter(getReader(offset), name, termId, budget);
}
/** Create a filter step requiring the specified termId to be absent from the documents */
public QueryFilterStepIf not(long termId) {
public QueryFilterStepIf not(long termId, IndexSearchBudget budget) {
long offset = wordOffset(termId);
if (offset < 0) // No documents
return new QueryFilterLetThrough();
return new ReverseIndexRejectFilter(createReaderNew(offset));
return new ReverseIndexRejectFilter(getReader(offset), budget);
}
/** Return the number of documents with the termId in the index */
@@ -131,15 +156,39 @@ public class FullReverseIndexReader {
if (offset < 0)
return 0;
return createReaderNew(offset).numEntries();
return getReader(offset).estimateSize();
}
/** Create a BTreeReader for the document offset associated with a termId */
private BTreeReader createReaderNew(long offset) {
return new BTreeReader(
documents,
ReverseIndexParameters.fullDocsBTreeContext,
offset);
private SkipListReader getReader(long offset) {
return new SkipListReader(dataPool, offset);
}
public TermData[] getTermData(Arena arena,
long[] termIds,
long[] docIds)
{
long[] offsetsAll = new long[termIds.length * docIds.length];
for (int i = 0; i < termIds.length; i++) {
long termId = termIds[i];
long offset = wordOffset(termId);
if (offset < 0) {
// This is likely a bug in the code, but we can't throw an exception here
logger.debug("Missing offset for word {}", termId);
continue;
}
var reader = getReader(offset);
// Read the size and offset of the position data
var offsetsForTerm = reader.getValueOffsets(docIds);
System.arraycopy(offsetsForTerm, 0, offsetsAll, i * docIds.length, docIds.length);
}
return positionsFileReader.getTermData(arena, offsetsAll);
}
public TermData[] getTermData(Arena arena,
@@ -156,20 +205,22 @@ public class FullReverseIndexReader {
return ret;
}
var reader = createReaderNew(offset);
var reader = getReader(offset);
// Read the size and offset of the position data
var offsets = reader.queryData(docIds, 1);
var offsets = reader.getValueOffsets(docIds);
for (int i = 0; i < docIds.length; i++) {
if (offsets[i] == 0)
continue;
ret[i] = positionsFileReader.getTermData(arena, offsets[i]);
}
return ret;
return positionsFileReader.getTermData(arena, offsets);
}
public void close() {
try {
dataPool.close();
}
catch (Exception e) {
logger.warn("Error while closing bufferPool", e);
}
if (documents != null)
documents.close();

View File

@@ -13,7 +13,7 @@ import java.nio.channels.FileChannel;
public class PrioIndexEntrySource implements EntrySource {
private final String name;
private final ByteBuffer readData = ByteBuffer.allocate(1024);
private final ByteBuffer readData = ByteBuffer.allocate(8*1024);
private final BitReader bitReader = new BitReader(readData, this::fillReadBuffer);
private final FileChannel docsFileChannel;

View File

@@ -3,6 +3,7 @@ package nu.marginalia.index;
import nu.marginalia.array.LongArray;
import nu.marginalia.array.LongArrayFactory;
import nu.marginalia.btree.BTreeReader;
import nu.marginalia.ffi.LinuxSystemCalls;
import nu.marginalia.index.query.EmptyEntrySource;
import nu.marginalia.index.query.EntrySource;
import org.slf4j.Logger;
@@ -40,6 +41,8 @@ public class PrioReverseIndexReader {
this.words = LongArrayFactory.mmapForReadingShared(words);
LinuxSystemCalls.madviseRandom(this.words.getMemorySegment());
wordsBTreeReader = new BTreeReader(this.words, ReverseIndexParameters.wordsBTreeContext, 0);
wordsDataOffset = wordsBTreeReader.getHeader().dataOffsetLongs();

View File

@@ -5,7 +5,7 @@ import nu.marginalia.btree.model.BTreeContext;
public class ReverseIndexParameters
{
public static final BTreeContext prioDocsBTreeContext = new BTreeContext(5, 1, BTreeBlockSize.BS_2048);
public static final BTreeContext fullDocsBTreeContext = new BTreeContext(5, 2, BTreeBlockSize.BS_2048);
public static final BTreeContext wordsBTreeContext = new BTreeContext(5, 2, BTreeBlockSize.BS_2048);
public static final BTreeContext prioDocsBTreeContext = new BTreeContext(5, 1, BTreeBlockSize.BS_512);
public static final BTreeContext fullDocsBTreeContext = new BTreeContext(5, 2, BTreeBlockSize.BS_512);
public static final BTreeContext wordsBTreeContext = new BTreeContext(5, 2, BTreeBlockSize.BS_512);
}

View File

@@ -14,62 +14,103 @@ import java.nio.file.StandardOpenOption;
*
* The positions data is concatenated in the file, with each term's metadata
* followed by its positions. The metadata is a single byte, and the positions
* are encoded using the Elias Gamma code, with zero padded bits at the end to
* get octet alignment.
*
* are encoded varints.
* <p></p>
*
* It is the responsibility of the caller to keep track of the byte offset of
* each posting in the file.
*/
public class PositionsFileConstructor implements AutoCloseable {
private final ByteBuffer workBuffer = ByteBuffer.allocate(65536);
private final Path file;
private final FileChannel channel;
private long offset;
public PositionsFileConstructor(Path file) throws IOException {
this.file = file;
channel = FileChannel.open(file, StandardOpenOption.CREATE, StandardOpenOption.WRITE);
}
/** Represents a block of positions lists. Each writer thread should hold on to
* a block object to ensure the locality of its positions lists.
* When finished, commit() must be run.
* */
public class PositionsFileBlock {
private final ByteBuffer workBuffer = ByteBuffer.allocate(1024*1024*16);
private long position;
public PositionsFileBlock(long position) {
this.position = position;
}
public boolean fitsData(int size) {
return workBuffer.remaining() >= size;
}
public void commit() throws IOException {
workBuffer.position(0);
workBuffer.limit(workBuffer.capacity());
int pos = 0;
while (workBuffer.hasRemaining()) {
pos += channel.write(workBuffer, this.position + pos + workBuffer.position());
}
}
private void relocate() throws IOException {
workBuffer.clear();
position = channel.position();
while (workBuffer.hasRemaining()) {
channel.write(workBuffer);
}
workBuffer.clear();
}
public long position() {
return this.position + workBuffer.position();
}
public void put(byte b) {
workBuffer.put(b);
}
public void put(ByteBuffer buffer) {
workBuffer.put(buffer);
}
}
public PositionsFileBlock getBlock() throws IOException {
synchronized (this) {
var block = new PositionsFileBlock(channel.position());
block.relocate();
return block;
}
}
/** Add a term to the positions file
*
* @param block a block token to ensure data locality
* @param termMeta the term metadata
* @param positionsBuffer the positions of the term
*
* @return the offset of the term in the file, with the size of the data in the highest byte
*/
public long add(byte termMeta, ByteBuffer positionsBuffer) throws IOException {
synchronized (file) {
int size = 1 + positionsBuffer.remaining();
public long add(PositionsFileBlock block, byte termMeta, ByteBuffer positionsBuffer) throws IOException {
int size = 1 + positionsBuffer.remaining();
if (workBuffer.remaining() < size) {
workBuffer.flip();
channel.write(workBuffer);
workBuffer.clear();
if (!block.fitsData(size)) {
synchronized (this) {
block.commit();
block.relocate();
}
}
synchronized (file) {
long offset = block.position();
workBuffer.put(termMeta);
workBuffer.put(positionsBuffer);
block.put(termMeta);
block.put(positionsBuffer);
long ret = PositionCodec.encode(size, offset);
offset += size;
return ret;
return PositionCodec.encode(size, offset);
}
}
public void close() throws IOException {
if (workBuffer.hasRemaining()) {
workBuffer.flip();
while (workBuffer.hasRemaining())
channel.write(workBuffer);
}
channel.force(false);
channel.close();
}

View File

@@ -1,46 +0,0 @@
package nu.marginalia.index.construction.full;
import nu.marginalia.array.LongArray;
import nu.marginalia.array.algo.LongArrayTransformations;
import nu.marginalia.btree.BTreeWriter;
import nu.marginalia.btree.model.BTreeContext;
import java.io.IOException;
/** Constructs the BTrees in a reverse index */
public class FullIndexBTreeTransformer implements LongArrayTransformations.LongIOTransformer {
private final BTreeWriter writer;
private final int entrySize;
private final LongArray documentsArray;
long start = 0;
long writeOffset = 0;
public FullIndexBTreeTransformer(LongArray urlsFileMap,
int entrySize,
BTreeContext bTreeContext,
LongArray documentsArray) {
this.documentsArray = documentsArray;
this.writer = new BTreeWriter(urlsFileMap, bTreeContext);
this.entrySize = entrySize;
}
@Override
public long transform(long pos, long end) throws IOException {
final int size = (int) ((end - start) / entrySize);
if (size == 0) {
return -1;
}
final long offsetForBlock = writeOffset;
writeOffset += writer.write(writeOffset, size,
mapRegion -> mapRegion.transferFrom(documentsArray, start, 0, end - start)
);
start = end;
return offsetForBlock;
}
}

View File

@@ -0,0 +1,40 @@
package nu.marginalia.index.construction.full;
import nu.marginalia.array.LongArray;
import nu.marginalia.array.algo.LongArrayTransformations;
import nu.marginalia.skiplist.SkipListWriter;
import java.io.IOException;
import java.nio.file.Path;
/** Constructs the BTrees in a reverse index */
public class FullIndexSkipListTransformer implements LongArrayTransformations.LongIOTransformer, AutoCloseable {
private final SkipListWriter writer;
private final LongArray documentsArray;
long start = 0;
public FullIndexSkipListTransformer(Path docsOutputFile,
LongArray documentsArray) throws IOException {
this.documentsArray = documentsArray;
this.writer = new SkipListWriter(docsOutputFile);
}
@Override
public long transform(long pos, long end) throws IOException {
final int size = (int) ((end - start) / 2);
if (size == 0) {
return -1;
}
long offset = writer.writeList(documentsArray, start, size);
start = end;
return offset;
}
public void close() throws IOException {
writer.close();
}
}

View File

@@ -6,7 +6,6 @@ import nu.marginalia.btree.BTreeWriter;
import nu.marginalia.index.ReverseIndexParameters;
import nu.marginalia.index.construction.CountToOffsetTransformer;
import nu.marginalia.index.construction.DocIdRewriter;
import nu.marginalia.index.construction.IndexSizeEstimator;
import nu.marginalia.index.construction.PositionsFileConstructor;
import nu.marginalia.index.journal.IndexJournalPage;
import org.slf4j.Logger;
@@ -81,15 +80,11 @@ public class FullPreindex {
// Estimate the size of the docs index data
offsets.transformEach(0, offsets.size(), new CountToOffsetTransformer(2));
IndexSizeEstimator sizeEstimator = new IndexSizeEstimator(ReverseIndexParameters.fullDocsBTreeContext, 2);
offsets.fold(0, 0, offsets.size(), sizeEstimator);
// Write the docs file
LongArray finalDocs = LongArrayFactory.mmapForWritingConfined(outputFileDocs, sizeEstimator.size);
offsets.transformEachIO(0, offsets.size(),
new FullIndexBTreeTransformer(finalDocs, 2,
ReverseIndexParameters.fullDocsBTreeContext,
documents.documents));
try (var transformer = new FullIndexSkipListTransformer(outputFileDocs, documents.documents)) {
offsets.transformEachIO(0, offsets.size(), transformer);
}
LongArray wordIds = segments.wordIds;
@@ -102,7 +97,7 @@ public class FullPreindex {
// Estimate the size of the words index data
long wordsSize = ReverseIndexParameters.wordsBTreeContext.calculateSize((int) offsets.size());
// Construct the tree
// Construct the keywords tree
LongArray wordsArray = LongArrayFactory.mmapForWritingConfined(outputFileWords, wordsSize);
new BTreeWriter(wordsArray, ReverseIndexParameters.wordsBTreeContext)
@@ -113,8 +108,6 @@ public class FullPreindex {
}
});
finalDocs.force();
finalDocs.close();
wordsArray.force();
wordsArray.close();

View File

@@ -12,10 +12,8 @@ import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import java.util.List;
/** A LongArray with document data, segmented according to
@@ -52,11 +50,6 @@ public class FullPreindexDocuments {
return new FullPreindexDocuments(docsFileMap, docsFile);
}
public FileChannel createDocumentsFileChannel() throws IOException {
return (FileChannel) Files.newByteChannel(file, StandardOpenOption.READ);
}
public LongArray slice(long start, long end) {
return documents.range(start, end);
}
@@ -86,6 +79,8 @@ public class FullPreindexDocuments {
var offsetMap = segments.asMap(RECORD_SIZE_LONGS);
offsetMap.defaultReturnValue(0);
var positionsBlock = positionsFileConstructor.getBlock();
while (docIds.hasRemaining()) {
long docId = docIds.get();
long rankEncodedId = docIdRewriter.rewriteDocId(docId);
@@ -101,12 +96,13 @@ public class FullPreindexDocuments {
ByteBuffer pos = tPos.get(i);
long offset = offsetMap.addTo(termId, RECORD_SIZE_LONGS);
long encodedPosOffset = positionsFileConstructor.add(meta, pos);
long encodedPosOffset = positionsFileConstructor.add(positionsBlock, meta, pos);
assembly.put(offset + 0, rankEncodedId);
assembly.put(offset + 1, encodedPosOffset);
}
}
positionsBlock.commit();
assembly.write(docsFile);
}

Some files were not shown because too many files have changed in this diff Show More