1
1
mirror of https://github.com/MarginaliaSearch/MarginaliaSearch.git synced 2025-10-06 07:32:38 +02:00

Compare commits

...

99 Commits

Author SHA1 Message Date
Viktor
24022c5adc Merge pull request #203 from MarginaliaSearch/nsfw-domain-lists
Nsfw blocking via UT1 domain lists
2025-06-07 13:24:05 +02:00
Viktor Lofgren
1de9ecc0b6 (nsfw) Add metrics to the filtering so we can monitor it 2025-06-07 13:17:05 +02:00
Viktor Lofgren
9b80245ea0 (nsfw) Move filtering to the IndexApiClient, and add filtering options to the internal APIs and public API. 2025-06-07 12:54:20 +02:00
Viktor Lofgren
4e1595c1a6 (nsfw) Initial work on adding UT1-based domain filtering 2025-06-06 14:23:37 +02:00
Viktor Lofgren
0be8585fa5 Add tag format hint to deploy script 2025-06-06 10:03:18 +02:00
Viktor Lofgren
a0fe070fe7 Redeploy browserless and assistant. 2025-06-06 09:51:39 +02:00
Viktor Lofgren
abe9da0fc6 (search) Ensure the new search UI sets the correct content-type for opensearch.xml 2025-05-29 12:44:55 +02:00
Viktor Lofgren
56d0128b0a (dom-sample) Remove redundant code 2025-05-28 17:43:46 +02:00
Viktor Lofgren
840b68ac55 (dom-sample) Minor cleanups 2025-05-28 16:27:27 +02:00
Viktor Lofgren
c34ff6d6c3 (dom-sample) Use WAL journal for dom sample db 2025-05-28 16:16:28 +02:00
Viktor Lofgren
32780967d8 (dom-sample) Initialize dom sampler 2025-05-28 16:06:05 +02:00
Viktor Lofgren
7330bc489d (deploy) Correct deploy script for browserless 2025-05-28 15:58:12 +02:00
Viktor Lofgren
ea23f33738 (deploy) Correct deploy script for headlesschrome 2025-05-28 15:56:05 +02:00
Viktor Lofgren
4a8a028118 (deploy) Deploy assistant and browserless 2025-05-28 15:50:26 +02:00
Viktor
a25bc647be Merge pull request #201 from MarginaliaSearch/website-capture
Capture website snapshots
2025-05-28 15:49:03 +02:00
Viktor Lofgren
a720dba3a2 (deploy) Add browserless to deploy script 2025-05-28 15:48:32 +02:00
Viktor Lofgren
284f382867 (dom-sample) Fix initialization to work the same as screenshot capture 2025-05-28 15:40:09 +02:00
Viktor Lofgren
a80717f138 (dom-sample) Cleanup 2025-05-28 15:32:54 +02:00
Viktor Lofgren
d6da715fa4 (dom-sample) Add basic retrieval logic
First iteration is single threaded for simplicity
2025-05-28 15:18:15 +02:00
Viktor Lofgren
c1ec7aa491 (dom-sample) Add a boolean to the sample db when we've accepted a cookie dialogue 2025-05-28 14:45:19 +02:00
Viktor Lofgren
3daf37e283 (dom-sample) Improve storage of DOM sample data 2025-05-28 14:34:34 +02:00
Viktor Lofgren
44a774d3a8 (browserless) Add --pull option to Docker build command
This ensures we fetch the latest base image when we build.
2025-05-28 14:09:32 +02:00
Viktor Lofgren
597aeaf496 (website-capture) Correct manifest
run_at is set at the content_script level, not the root object.
2025-05-28 14:05:16 +02:00
Viktor Lofgren
06df7892c2 (website-capture) Clean up code 2025-05-27 15:56:59 +02:00
Viktor Lofgren
dc26854268 (website-capture) Add a marker to the network log when we've accepted a cookie dialog 2025-05-27 15:21:02 +02:00
Viktor Lofgren
9f16326cba (website-capture) Add logic that automatically identifies and agrees to cookie consent popovers
Oftentimes, ads don't load until after you've agreed to the popover.
2025-05-27 15:11:47 +02:00
Viktor Lofgren
ed66d0b3a7 (website-capture) Amend the extension to also capture web request information 2025-05-26 14:00:43 +02:00
Viktor Lofgren
c3afc82dad (website-capture) Rename scripts to be more consistent with extension terminology 2025-05-26 13:13:11 +02:00
Viktor Lofgren
08e25e539e (website-capture) Minor cleanups 2025-05-21 14:55:03 +02:00
Viktor Lofgren
4946044dd0 (website-capture) Update BrowserlesClient to use the new image 2025-05-21 14:14:18 +02:00
Viktor Lofgren
edf382e1c5 (website-capture) Add a custom docker image with a new custom extension for DOM capture
The original approach of injecting javascript into the page directly didn't work with pages that reloaded themselves.  To work around this, a chrome extension is used instead that does the same work, but subscribes to reload events and re-installs the change listener.
2025-05-21 14:13:54 +02:00
Viktor Lofgren
644cba32e4 (website-capture) Remove dead imports 2025-05-20 16:08:48 +02:00
Viktor Lofgren
34b76390b2 (website-capture) Add storage object for DOM samples 2025-05-20 16:05:54 +02:00
Viktor Lofgren
43cd507971 (crawler) Add a migration workaround so we can still open old slop crawl data with the new column added 2025-05-19 14:47:38 +02:00
Viktor Lofgren
cc40e99fdc (crawler) Add a migration workaround so we can still open old slop crawl data with the new column added 2025-05-19 14:37:59 +02:00
Viktor Lofgren
8a944cf4c6 (crawler) Add request time to crawl data
This is an interesting indicator of website quality.
2025-05-19 14:07:41 +02:00
Viktor Lofgren
1c128e6d82 (crawler) Add request time to crawl data
This is an interesting indicator of website quality.
2025-05-19 14:02:03 +02:00
Viktor Lofgren
be039d1a8c (live-capture) Add a new function for capturing the DOM of a website after rendering
The new code injects a javascript that attempts to trigger popovers, and then alters the DOM to add attributes containing CSS elements with position and visibility.
2025-05-19 13:26:07 +02:00
Viktor Lofgren
4edc0d3267 (converter) Increase work buffer for converter
Conversion on index node  7 in production is crashing ostensibly because this buffer is too small.
2025-05-18 13:22:44 +02:00
Viktor Lofgren
890f521d0d (pdf) Fix crash for some bold lines 2025-05-18 13:05:05 +02:00
Viktor Lofgren
b1814a30f7 (deploy) Redeploy all services. 2025-05-17 13:11:51 +02:00
Viktor Lofgren
f59a9eb025 (legacy-search) Soften domain limit constraints in URL deduplication 2025-05-17 00:04:27 +02:00
Viktor Lofgren
599534806b (search) Soften domain limit constraints in URL deduplication 2025-05-17 00:00:42 +02:00
Viktor Lofgren
7e8253dac7 (search) Clean up debug logging 2025-05-17 00:00:28 +02:00
Viktor Lofgren
97a6780ea3 (search) Add debug logging for specific query 2025-05-16 23:41:35 +02:00
Viktor Lofgren
eb634beec8 (search) Add debug logging for specific query 2025-05-16 23:34:03 +02:00
Viktor Lofgren
269ebd1654 Revert "(query) Add debug logging for specific query"
This reverts commit 39ce40bfeb.
2025-05-16 23:29:06 +02:00
Viktor Lofgren
39ce40bfeb (query) Add debug logging for specific query 2025-05-16 23:23:53 +02:00
Viktor Lofgren
c187b2e1c1 (search) Re-enable clustering 2025-05-16 23:20:16 +02:00
Viktor Lofgren
42eaa4588b (search) Disable clustering for a moment 2025-05-16 23:17:01 +02:00
Viktor Lofgren
4f40a5fbeb (search) Reduce log spam 2025-05-16 23:15:07 +02:00
Viktor Lofgren
3f3d42bc01 (search) Re-enable deduplication 2025-05-16 23:14:54 +02:00
Viktor Lofgren
61c8d53e1b (search) Disable deduplication for a moment 2025-05-16 23:10:32 +02:00
Viktor Lofgren
a7a3d85be9 (search) Increase search timeout by 50ms 2025-05-16 22:54:12 +02:00
Viktor Lofgren
306232fb54 (pdf) Fix handling of a few corner cases
Deal better with documents which change font on blank spaces.
2025-05-13 18:44:28 +02:00
Viktor Lofgren
5aef844f0d (dependency) Increase slop version to 0.0.11
v0.0.11 uses atomic moves.  This ensures we don't encounter a race condition in the backup service with lingering .tmp-files that should have been renamed.
2025-05-12 14:09:16 +02:00
Viktor
d56b5c828a Merge pull request #198 from MarginaliaSearch/process-pdf-files
Add support for processing PDF files. The changeset adds a dependency on pdfbox, and vendors/modifies its PDFTextStripper to extract additional semantics from the documents.

Since PDF documents aren't a text based format, but a graphical format which may contain a stream of characters and positions (sometimes overlapping, rotated, out of order) identifying something like a header or a paragraph is a non-trivial task, let alone extracting any text at all. A number of heuristics are used to try to accomplish this task, they aren't perfect, but about as good as you're going to get without going to something like a vision based LLM, which would be ridiculously expensive to apply at an internet search engine scale.

The change also adds format information to the JSON API, as well as indicators in the GUI for PDF files.
2025-05-11 16:43:25 +02:00
Viktor Lofgren
ab58a4636f (pdf) Disable tests that require specific sample data that can't go in the repo 2025-05-11 16:42:23 +02:00
Viktor Lofgren
00be269238 (search) Add PDF indicator in "also from"-segment 2025-05-11 16:35:52 +02:00
Viktor Lofgren
879e6a9424 (pdf) Identify additional headings based on font weight 2025-05-11 16:35:52 +02:00
Viktor Lofgren
fba3455732 (pdf) Clean up code 2025-05-11 16:35:52 +02:00
Viktor Lofgren
14283da7f5 (pdf) Clean up generated DOM
Sometimes empty <p>-tags are inserted, which messes with the header joining process.  Removes those nodes.
2025-05-11 15:12:09 +02:00
Viktor Lofgren
93df4d1fc0 (pdf) Improve summary extraction for PDFs 2025-05-11 14:33:11 +02:00
Viktor Lofgren
b12a0b998c (pdf) Use smarter heuristics for paragraph splitting
We look at the median line distance, with outliers removed, to figure out when to break lines, as the original approach works poorly with e.g. double line spaced documents.
2025-05-11 14:29:42 +02:00
Viktor Lofgren
3b6f4e321b (search) Add red PDF indicator to search UI 2025-05-11 13:32:14 +02:00
Viktor Lofgren
8428111771 (pdf) Fix for exception when no text positions are available 2025-05-10 15:12:02 +02:00
Viktor Lofgren
e9fd4415ef (pdf) Merge consecutive headings.
Headings don't follow the same indentation rules as prose and tend to be cut off into multiple "paragraphs" by the text extractor.
2025-05-10 14:38:43 +02:00
Viktor Lofgren
4c95c3dcad (pdf) Don't look for headings below 75% of the max y-position 2025-05-10 14:38:02 +02:00
Viktor Lofgren
c5281536fb (api) Add format field to JSON search results
API consumers might want to filter out PDF results, etc.
2025-05-10 13:56:22 +02:00
Viktor Lofgren
4431dae7ac (refac) Rename HtmlStandard -> DocumentFormat
The old model made some sense when we only supported HTML and to some extent plain text, but having PDF in an enum called HtmlFormat is a bit of a stretch.
2025-05-10 13:47:26 +02:00
Viktor Lofgren
4df4d0a7a8 (pdf) Increase line spacing tolerance for better paragraph handling 2025-05-10 13:34:04 +02:00
Viktor Lofgren
9f05083b94 (pdf) Add the capability to identify headings
This change vendors pdfbox'es PDFTextStripper and modifies it to be able to heuristically identify headings based on their font size, as this is a very useful relevance signal for the search engine, and helps identify the correct title of the article.
2025-05-09 14:04:04 +02:00
Viktor Lofgren
fc92e9b9c0 (feeds) Correct link handling in atom feeds
This addresses issue #199
2025-05-09 13:00:07 +02:00
Viktor Lofgren
328fb5d927 (feeds) Correct link handling in atom feeds
This addresses issue #199
2025-05-09 12:55:28 +02:00
Viktor Lofgren
36889950e8 (pdf) Migrate to PDFBox 3.0.5 and suppress log spam
PDFBox 2.x uses commons logging, which does not route through SLF4j, and thus is a hassle to configure; and is extremely verbose in its default logging settings.

Migrating to PDFBox 3.x lets us use slf4j to address the log spam by filtering out the noisy methods.
2025-05-08 18:03:26 +02:00
Viktor Lofgren
c96a94878b (pdf) Add feature to make pdf-files searchable with format:pdf 2025-05-08 18:03:26 +02:00
Viktor Lofgren
1c57d7d73a (pdf) Clean up code 2025-05-08 18:03:26 +02:00
Viktor Lofgren
a443d22356 (pdf) Flag the file as a PDF file in the GUI 2025-05-08 18:03:26 +02:00
Viktor Lofgren
aa59d4afa4 (pdf) Somewhat improve title and summary extraction 2025-05-08 18:03:26 +02:00
Viktor Lofgren
df0f18d0e7 (pdf) Read title 2025-05-08 18:03:26 +02:00
Viktor Lofgren
0819d46f97 (pdf) Minimal protytype to get PDFs working 2025-05-08 18:03:26 +02:00
Viktor Lofgren
5e2b63473e (logging) Change to a terser log format
The old log format would often span several screen widths, especially when subprocesses logged.  Switching to a terser format that should be much easier to read.
2025-05-08 18:02:22 +02:00
Viktor
f9590703f1 Merge pull request #197 from MarginaliaSearch/crawl-markdown
(markdown) Support crawling markdown
2025-05-08 13:35:00 +02:00
Viktor Lofgren
f12fc11337 (markdown) Support crawling markdown 2025-05-08 13:26:22 +02:00
Viktor Lofgren
c309030184 (sample) Ensure we finalize the slop.zip file creation when filtering 2025-05-06 14:52:48 +02:00
Viktor Lofgren
fd5af01629 (sample) Ensure we flush the log before adding it to the tar file 2025-05-06 14:43:47 +02:00
Viktor Lofgren
d4c43c7a79 (crawler) Test case for fetching PDFs 2025-05-06 13:45:16 +02:00
Viktor Lofgren
18700e1919 (sample) Fix bug where slop files would not be saved despite containing data 2025-05-06 13:38:21 +02:00
Viktor Lofgren
120b431998 (crawler) Fix outdated assumptions about content types and http status codes always being 200 when good.
We now sometimes get 206 when good.
2025-05-06 13:18:30 +02:00
Viktor Lofgren
71dad99326 (crawler) Revisitor should not demand a 200, but support a 206 as well 2025-05-06 13:11:52 +02:00
Viktor Lofgren
c1e8afdf86 (crawler) Remove domains from pending crawl tasks queue when retrying 2025-05-06 12:56:30 +02:00
Viktor Lofgren
fa32dddc24 (sample-actor) Make content type matching lenient with regard to ct parameters such as charset 2025-05-06 12:48:09 +02:00
Viktor Lofgren
a266fcbf30 (sample-actor) Clean up debris from previous runs to avoid errors on re-runs 2025-05-05 13:16:37 +02:00
Viktor Lofgren
6e47e58e0e (sample-actor) Add progress tracking to sample export actor 2025-05-05 13:04:14 +02:00
Viktor Lofgren
9dc43d8b4a (sample-actor) Update the actor export sample actor to not generate empty files when the filter is not applicable. 2025-05-05 12:56:12 +02:00
Viktor Lofgren
83967e3305 (sample-actor) Update the actor export sample actor to not generate empty files when the filter is not applicable. 2025-05-05 12:50:21 +02:00
Viktor Lofgren
4db980a291 (jooby-service) Set an upper limit on the number of worker threads 2025-05-05 12:40:31 +02:00
Viktor Lofgren
089b177868 (deploy) Executor partition 4. 2025-05-05 12:21:27 +02:00
Viktor Lofgren
9c8e9a68d5 (deploy) Executor partition 4. 2025-05-05 12:00:05 +02:00
115 changed files with 5370 additions and 347 deletions

View File

@@ -0,0 +1,5 @@
CREATE TABLE IF NOT EXISTS WMSA_prod.NSFW_DOMAINS (
ID INT NOT NULL AUTO_INCREMENT,
TIER INT NOT NULL,
PRIMARY KEY (ID)
);

View File

@@ -0,0 +1,24 @@
package nu.marginalia.model;
public enum DocumentFormat {
PLAIN(0, 1, "text"),
PDF(0, 1, "pdf"),
UNKNOWN(0, 1, "???"),
HTML123(0, 1, "html"),
HTML4(-0.1, 1.05, "html"),
XHTML(-0.1, 1.05, "html"),
HTML5(0.5, 1.1, "html");
/** Used to tune quality score */
public final double offset;
/** Used to tune quality score */
public final double scale;
public final String shortFormat;
DocumentFormat(double offset, double scale, String shortFormat) {
this.offset = offset;
this.scale = scale;
this.shortFormat = shortFormat;
}
}

View File

@@ -112,14 +112,6 @@ public class EdgeDomain implements Serializable {
return topDomain; return topDomain;
} }
public String getDomainKey() {
int cutPoint = topDomain.indexOf('.');
if (cutPoint < 0) {
return topDomain;
}
return topDomain.substring(0, cutPoint).toLowerCase();
}
/** If possible, try to provide an alias domain, /** If possible, try to provide an alias domain,
* i.e. a domain name that is very likely to link to this one * i.e. a domain name that is very likely to link to this one
* */ * */

View File

@@ -28,6 +28,8 @@ public enum HtmlFeature {
GA_SPAM("special:gaspam"), GA_SPAM("special:gaspam"),
PDF("format:pdf"),
/** For fingerprinting and ranking */ /** For fingerprinting and ranking */
OPENGRAPH("special:opengraph"), OPENGRAPH("special:opengraph"),
OPENGRAPH_IMAGE("special:opengraph:image"), OPENGRAPH_IMAGE("special:opengraph:image"),

View File

@@ -1,22 +0,0 @@
package nu.marginalia.model.html;
// This class really doesn't belong anywhere, but will squat here for now
public enum HtmlStandard {
PLAIN(0, 1),
UNKNOWN(0, 1),
HTML123(0, 1),
HTML4(-0.1, 1.05),
XHTML(-0.1, 1.05),
HTML5(0.5, 1.1);
/** Used to tune quality score */
public final double offset;
/** Used to tune quality score */
public final double scale;
HtmlStandard(double offset, double scale) {
this.offset = offset;
this.scale = scale;
}
}

View File

@@ -9,7 +9,7 @@ public enum DocumentFlags {
GeneratorForum, GeneratorForum,
GeneratorWiki, GeneratorWiki,
Sideloaded, Sideloaded,
Unused7, PdfFile,
Unused8, Unused8,
; ;

View File

@@ -8,14 +8,6 @@ import static org.junit.jupiter.api.Assertions.assertEquals;
class EdgeDomainTest { class EdgeDomainTest {
@Test
public void testSkepdic() throws URISyntaxException {
var domain = new EdgeUrl("http://www.skepdic.com/astrology.html");
assertEquals("skepdic", domain.getDomain().getDomainKey());
var domain2 = new EdgeUrl("http://skepdic.com/astrology.html");
assertEquals("skepdic", domain2.getDomain().getDomainKey());
}
@Test @Test
public void testHkDomain() throws URISyntaxException { public void testHkDomain() throws URISyntaxException {
var domain = new EdgeUrl("http://l7072i3.l7c.net"); var domain = new EdgeUrl("http://l7072i3.l7c.net");

View File

@@ -122,6 +122,11 @@ public class JoobyService {
// single digit percentage difference since HTML already compresses very well with level = 1. // single digit percentage difference since HTML already compresses very well with level = 1.
options.setCompressionLevel(1); options.setCompressionLevel(1);
// Set a cap on the number of worker threads, as Jooby's default value does not seem to consider
// multi-tenant servers with high thread counts, and spins up an exorbitant number of threads in that
// scenario
options.setWorkerThreads(Math.min(128, options.getWorkerThreads()));
jooby.setServerOptions(options); jooby.setServerOptions(options);

View File

@@ -3,11 +3,18 @@
<Console name="Console" target="SYSTEM_OUT"> <Console name="Console" target="SYSTEM_OUT">
<PatternLayout pattern="%d{HH:mm:ss,SSS} %style{%-8markerSimpleName}{FG_Cyan} %highlight{%-5level}{FATAL=red, ERROR=red, WARN=yellow} %-24t %-20c{1} -- %msg%n"/> <PatternLayout pattern="%d{HH:mm:ss,SSS} %style{%-8markerSimpleName}{FG_Cyan} %highlight{%-5level}{FATAL=red, ERROR=red, WARN=yellow} %-24t %-20c{1} -- %msg%n"/>
<Filters> <Filters>
<MarkerFilter marker="PROCESS" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="QUERY" onMatch="DENY" onMismatch="NEUTRAL" /> <MarkerFilter marker="QUERY" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="HTTP" onMatch="DENY" onMismatch="NEUTRAL" /> <MarkerFilter marker="HTTP" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="CRAWLER" onMatch="DENY" onMismatch="NEUTRAL" /> <MarkerFilter marker="CRAWLER" onMatch="DENY" onMismatch="NEUTRAL" />
</Filters> </Filters>
</Console> </Console>
<Console name="ProcessConsole" target="SYSTEM_OUT">
<PatternLayout pattern="%style{P}{FG_Cyan} %msg%n"/>
<Filters>
<MarkerFilter marker="PROCESS" onMatch="ALLOW" onMismatch="DENY" />
</Filters>
</Console>
<RollingFile name="LogToFile" fileName="${env:WMSA_LOG_DIR:-/var/log/wmsa}/wmsa-${sys:service-name}-${env:WMSA_SERVICE_NODE:-0}.log" filePattern="/var/log/wmsa/wmsa-${sys:service-name}-${env:WMSA_SERVICE_NODE:-0}-log-%d{MM-dd-yy-HH-mm-ss}-%i.log.gz" <RollingFile name="LogToFile" fileName="${env:WMSA_LOG_DIR:-/var/log/wmsa}/wmsa-${sys:service-name}-${env:WMSA_SERVICE_NODE:-0}.log" filePattern="/var/log/wmsa/wmsa-${sys:service-name}-${env:WMSA_SERVICE_NODE:-0}-log-%d{MM-dd-yy-HH-mm-ss}-%i.log.gz"
ignoreExceptions="false"> ignoreExceptions="false">
<JSONLayout compact="true" eventEol="true" properties="true" stacktraceAsString="true" includeTimeMillis="true"/> <JSONLayout compact="true" eventEol="true" properties="true" stacktraceAsString="true" includeTimeMillis="true"/>
@@ -15,6 +22,7 @@
<MarkerFilter marker="QUERY" onMatch="DENY" onMismatch="NEUTRAL" /> <MarkerFilter marker="QUERY" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="HTTP" onMatch="DENY" onMismatch="NEUTRAL" /> <MarkerFilter marker="HTTP" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="CRAWLER" onMatch="DENY" onMismatch="NEUTRAL" /> <MarkerFilter marker="CRAWLER" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="PROCESS" onMatch="DENY" onMismatch="NEUTRAL" />
</Filters> </Filters>
<SizeBasedTriggeringPolicy size="10MB" /> <SizeBasedTriggeringPolicy size="10MB" />
</RollingFile> </RollingFile>
@@ -31,9 +39,11 @@
</Appenders> </Appenders>
<Loggers> <Loggers>
<Logger name="org.apache.zookeeper" level="WARN" /> <Logger name="org.apache.zookeeper" level="WARN" />
<Logger name="org.apache.pdfbox" level="ERROR" />
<Logger name="org.apache.fontbox.ttf" level="ERROR" />
<Root level="info"> <Root level="info">
<AppenderRef ref="Console"/> <AppenderRef ref="Console"/>
<AppenderRef ref="ProcessConsole"/>
<AppenderRef ref="LogToFile"/> <AppenderRef ref="LogToFile"/>
</Root> </Root>
</Loggers> </Loggers>

View File

@@ -1,13 +1,51 @@
<Configuration xmlns="http://logging.apache.org/log4j/2.0/config" > <Configuration xmlns="http://logging.apache.org/log4j/2.0/config" >
<Appenders> <Appenders>
<Console name="Console" target="SYSTEM_OUT"> <Console name="ConsoleInfo" target="SYSTEM_OUT">
<PatternLayout pattern="%d{HH:mm:ss,SSS} %style{%-8markerSimpleName}{FG_Cyan} %highlight{%-5level}{FATAL=red, ERROR=red, WARN=yellow} %-24t %-20c{1} -- %msg%n"/> <PatternLayout pattern="- %d{HH:mm:ss,SSS} %-20c{1} -- %msg%n"/>
<Filters> <Filters>
<LevelMatchFilter level="INFO" onMatch="ALLOW" onMismatch="DENY"/>
<MarkerFilter marker="PROCESS" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="QUERY" onMatch="DENY" onMismatch="NEUTRAL" /> <MarkerFilter marker="QUERY" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="HTTP" onMatch="DENY" onMismatch="NEUTRAL" /> <MarkerFilter marker="HTTP" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="CRAWLER" onMatch="DENY" onMismatch="NEUTRAL" /> <MarkerFilter marker="CRAWLER" onMatch="DENY" onMismatch="NEUTRAL" />
</Filters> </Filters>
</Console> </Console>
<Console name="ConsoleWarn" target="SYSTEM_OUT">
<PatternLayout pattern="⚠ %d{HH:mm:ss,SSS} %-20c{1} -- %msg%n"/>
<Filters>
<LevelMatchFilter level="WARN" onMatch="ALLOW" onMismatch="DENY"/>
<MarkerFilter marker="PROCESS" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="QUERY" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="HTTP" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="CRAWLER" onMatch="DENY" onMismatch="NEUTRAL" />
</Filters>
</Console>
<Console name="ConsoleError" target="SYSTEM_OUT">
<PatternLayout pattern="🔥 %d{HH:mm:ss,SSS} %-20c{1} -- %msg%n"/>
<Filters>
<LevelMatchFilter level="ERROR" onMatch="ALLOW" onMismatch="DENY"/>
<MarkerFilter marker="PROCESS" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="QUERY" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="HTTP" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="CRAWLER" onMatch="DENY" onMismatch="NEUTRAL" />
</Filters>
</Console>
<Console name="ConsoleFatal" target="SYSTEM_OUT">
<PatternLayout pattern="💀 %d{HH:mm:ss,SSS} %-20c{1} -- %msg%n"/>
<Filters>
<LevelMatchFilter level="FATAL" onMatch="ALLOW" onMismatch="DENY"/>
<MarkerFilter marker="PROCESS" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="QUERY" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="HTTP" onMatch="DENY" onMismatch="NEUTRAL" />
<MarkerFilter marker="CRAWLER" onMatch="DENY" onMismatch="NEUTRAL" />
</Filters>
</Console>
<Console name="ProcessConsole" target="SYSTEM_OUT">
<PatternLayout pattern="%style{%msg%n}{FG_Cyan}"/>
<Filters>
<MarkerFilter marker="PROCESS" onMatch="ALLOW" onMismatch="DENY" />
</Filters>
</Console>
<RollingFile name="LogToFile" fileName="${env:WMSA_LOG_DIR:-/var/log/wmsa}/wmsa-${sys:service-name}-${env:WMSA_SERVICE_NODE:-0}.log" filePattern="/var/log/wmsa/wmsa-${sys:service-name}-${env:WMSA_SERVICE_NODE:-0}-log-%d{MM-dd-yy-HH-mm-ss}-%i.log.gz" <RollingFile name="LogToFile" fileName="${env:WMSA_LOG_DIR:-/var/log/wmsa}/wmsa-${sys:service-name}-${env:WMSA_SERVICE_NODE:-0}.log" filePattern="/var/log/wmsa/wmsa-${sys:service-name}-${env:WMSA_SERVICE_NODE:-0}-log-%d{MM-dd-yy-HH-mm-ss}-%i.log.gz"
ignoreExceptions="false"> ignoreExceptions="false">
<PatternLayout> <PatternLayout>
@@ -34,9 +72,14 @@
</Appenders> </Appenders>
<Loggers> <Loggers>
<Logger name="org.apache.zookeeper" level="WARN" /> <Logger name="org.apache.zookeeper" level="WARN" />
<Logger name="org.apache.pdfbox" level="ERROR" />
<Logger name="org.apache.fontbox.ttf" level="ERROR" />
<Root level="info"> <Root level="info">
<AppenderRef ref="Console"/> <AppenderRef ref="ConsoleInfo"/>
<AppenderRef ref="ConsoleWarn"/>
<AppenderRef ref="ConsoleError"/>
<AppenderRef ref="ConsoleFatal"/>
<AppenderRef ref="ProcessConsole"/>
<AppenderRef ref="LogToFile"/> <AppenderRef ref="LogToFile"/>
</Root> </Root>
</Loggers> </Loggers>

View File

@@ -1,15 +1,50 @@
<Configuration xmlns="http://logging.apache.org/log4j/2.0/config" > <Configuration xmlns="http://logging.apache.org/log4j/2.0/config" >
<Appenders> <Appenders>
<Console name="Console" target="SYSTEM_OUT"> <Console name="ConsoleInfo" target="SYSTEM_OUT">
<PatternLayout pattern="%d{HH:mm:ss,SSS} %style{%-8markerSimpleName}{FG_Cyan} %highlight{%-5level}{FATAL=red, ERROR=red, WARN=yellow} %-24t %-20c{1} -- %msg%n"/> <PatternLayout pattern="- %d{HH:mm:ss,SSS} %-20c{1} -- %msg%n"/>
<Filters>
<LevelMatchFilter level="INFO" onMatch="ALLOW" onMismatch="DENY"/>
<MarkerFilter marker="PROCESS" onMatch="DENY" onMismatch="NEUTRAL" />
</Filters>
</Console>
<Console name="ConsoleWarn" target="SYSTEM_OUT">
<PatternLayout pattern="⚠ %d{HH:mm:ss,SSS} %-20c{1} -- %msg%n"/>
<Filters>
<LevelMatchFilter level="WARN" onMatch="ALLOW" onMismatch="DENY"/>
<MarkerFilter marker="PROCESS" onMatch="DENY" onMismatch="NEUTRAL" />
</Filters>
</Console>
<Console name="ConsoleError" target="SYSTEM_OUT">
<PatternLayout pattern="🔥 %d{HH:mm:ss,SSS} %-20c{1} -- %msg%n"/>
<Filters>
<LevelMatchFilter level="ERROR" onMatch="ALLOW" onMismatch="DENY"/>
<MarkerFilter marker="PROCESS" onMatch="DENY" onMismatch="NEUTRAL" />
</Filters>
</Console>
<Console name="ConsoleFatal" target="SYSTEM_OUT">
<PatternLayout pattern="💀 %d{HH:mm:ss,SSS} %-20c{1} -- %msg%n"/>
<Filters>
<LevelMatchFilter level="FATAL" onMatch="ALLOW" onMismatch="DENY"/>
<MarkerFilter marker="PROCESS" onMatch="DENY" onMismatch="NEUTRAL" />
</Filters>
</Console>
<Console name="ProcessConsole" target="SYSTEM_OUT">
<PatternLayout pattern="%style{%msg%n}{FG_Cyan}"/>
<Filters>
<MarkerFilter marker="PROCESS" onMatch="ALLOW" onMismatch="DENY" />
</Filters>
</Console> </Console>
</Appenders> </Appenders>
<Loggers> <Loggers>
<Logger name="org.apache.zookeeper" level="WARN" /> <Logger name="org.apache.zookeeper" level="WARN" />
<Logger name="org.apache.pdfbox" level="ERROR" />
<Logger name="org.apache.fontbox.ttf" level="ERROR" />
<Root level="info"> <Root level="info">
<AppenderRef ref="Console"/> <AppenderRef ref="ConsoleInfo"/>
<AppenderRef ref="LogToFile"/> <AppenderRef ref="ConsoleWarn"/>
<AppenderRef ref="ConsoleError"/>
<AppenderRef ref="ConsoleFatal"/>
<AppenderRef ref="ProcessConsole"/>
</Root> </Root>
</Loggers> </Loggers>
</Configuration> </Configuration>

View File

@@ -37,6 +37,7 @@ dependencies {
implementation project(':code:functions:link-graph:api') implementation project(':code:functions:link-graph:api')
implementation project(':code:functions:live-capture:api') implementation project(':code:functions:live-capture:api')
implementation project(':code:functions:search-query') implementation project(':code:functions:search-query')
implementation project(':code:functions:nsfw-domain-filter')
implementation project(':code:execution:api') implementation project(':code:execution:api')
implementation project(':code:processes:crawling-process:model') implementation project(':code:processes:crawling-process:model')

View File

@@ -6,6 +6,7 @@ import java.util.Set;
public enum ExecutorActor { public enum ExecutorActor {
PREC_EXPORT_ALL(NodeProfile.BATCH_CRAWL, NodeProfile.MIXED), PREC_EXPORT_ALL(NodeProfile.BATCH_CRAWL, NodeProfile.MIXED),
SYNC_NSFW_LISTS(NodeProfile.BATCH_CRAWL, NodeProfile.MIXED),
CRAWL(NodeProfile.BATCH_CRAWL, NodeProfile.MIXED), CRAWL(NodeProfile.BATCH_CRAWL, NodeProfile.MIXED),
RECRAWL(NodeProfile.BATCH_CRAWL, NodeProfile.MIXED), RECRAWL(NodeProfile.BATCH_CRAWL, NodeProfile.MIXED),
@@ -35,7 +36,8 @@ public enum ExecutorActor {
LIVE_CRAWL(NodeProfile.REALTIME), LIVE_CRAWL(NodeProfile.REALTIME),
PROC_LIVE_CRAWL_SPAWNER(NodeProfile.REALTIME), PROC_LIVE_CRAWL_SPAWNER(NodeProfile.REALTIME),
SCRAPE_FEEDS(NodeProfile.REALTIME), SCRAPE_FEEDS(NodeProfile.REALTIME),
UPDATE_RSS(NodeProfile.REALTIME); UPDATE_RSS(NodeProfile.REALTIME)
;
public String id() { public String id() {
return "fsm:" + name().toLowerCase(); return "fsm:" + name().toLowerCase();

View File

@@ -68,6 +68,7 @@ public class ExecutorActorControlService {
ExecutorActorStateMachines stateMachines, ExecutorActorStateMachines stateMachines,
MigrateCrawlDataActor migrateCrawlDataActor, MigrateCrawlDataActor migrateCrawlDataActor,
ExportAllPrecessionActor exportAllPrecessionActor, ExportAllPrecessionActor exportAllPrecessionActor,
UpdateNsfwFiltersActor updateNsfwFiltersActor,
UpdateRssActor updateRssActor) throws SQLException { UpdateRssActor updateRssActor) throws SQLException {
this.messageQueueFactory = messageQueueFactory; this.messageQueueFactory = messageQueueFactory;
this.eventLog = baseServiceParams.eventLog; this.eventLog = baseServiceParams.eventLog;
@@ -109,6 +110,7 @@ public class ExecutorActorControlService {
register(ExecutorActor.UPDATE_RSS, updateRssActor); register(ExecutorActor.UPDATE_RSS, updateRssActor);
register(ExecutorActor.MIGRATE_CRAWL_DATA, migrateCrawlDataActor); register(ExecutorActor.MIGRATE_CRAWL_DATA, migrateCrawlDataActor);
register(ExecutorActor.SYNC_NSFW_LISTS, updateNsfwFiltersActor);
if (serviceConfiguration.node() == 1) { if (serviceConfiguration.node() == 1) {
register(ExecutorActor.PREC_EXPORT_ALL, exportAllPrecessionActor); register(ExecutorActor.PREC_EXPORT_ALL, exportAllPrecessionActor);

View File

@@ -0,0 +1,53 @@
package nu.marginalia.actor.task;
import com.google.gson.Gson;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import nu.marginalia.actor.prototype.RecordActorPrototype;
import nu.marginalia.actor.state.ActorStep;
import nu.marginalia.nsfw.NsfwDomainFilter;
import nu.marginalia.service.module.ServiceConfiguration;
@Singleton
public class UpdateNsfwFiltersActor extends RecordActorPrototype {
private final ServiceConfiguration serviceConfiguration;
private final NsfwDomainFilter nsfwDomainFilter;
public record Initial() implements ActorStep {}
public record Run() implements ActorStep {}
@Override
public ActorStep transition(ActorStep self) throws Exception {
return switch(self) {
case Initial() -> {
if (serviceConfiguration.node() != 1) {
yield new Error("This actor can only run on node 1");
}
else {
yield new Run();
}
}
case Run() -> {
nsfwDomainFilter.fetchLists();
yield new End();
}
default -> new Error();
};
}
@Override
public String describe() {
return "Sync NSFW filters";
}
@Inject
public UpdateNsfwFiltersActor(Gson gson,
ServiceConfiguration serviceConfiguration,
NsfwDomainFilter nsfwDomainFilter)
{
super(gson);
this.serviceConfiguration = serviceConfiguration;
this.nsfwDomainFilter = nsfwDomainFilter;
}
}

View File

@@ -25,9 +25,9 @@ dependencies {
implementation project(':code:execution:api') implementation project(':code:execution:api')
implementation project(':code:processes:crawling-process:ft-content-type') implementation project(':code:processes:crawling-process:ft-content-type')
implementation project(':third-party:rssreader')
implementation libs.jsoup implementation libs.jsoup
implementation project(':third-party:rssreader')
implementation libs.opencsv implementation libs.opencsv
implementation libs.slop implementation libs.slop
implementation libs.sqlite implementation libs.sqlite
@@ -57,8 +57,6 @@ dependencies {
implementation libs.bundles.gson implementation libs.bundles.gson
implementation libs.bundles.mariadb implementation libs.bundles.mariadb
testImplementation libs.bundles.slf4j.test testImplementation libs.bundles.slf4j.test
testImplementation libs.bundles.junit testImplementation libs.bundles.junit
testImplementation libs.mockito testImplementation libs.mockito

View File

@@ -0,0 +1,126 @@
package nu.marginalia.domsample;
import com.google.inject.Inject;
import com.zaxxer.hikari.HikariDataSource;
import jakarta.inject.Named;
import nu.marginalia.domsample.db.DomSampleDb;
import nu.marginalia.livecapture.BrowserlessClient;
import nu.marginalia.service.module.ServiceConfiguration;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.URI;
import java.net.URISyntaxException;
import java.time.Duration;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.TimeUnit;
public class DomSampleService {
private final DomSampleDb db;
private final HikariDataSource mariadbDataSource;
private final URI browserlessURI;
private static final Logger logger = LoggerFactory.getLogger(DomSampleService.class);
@Inject
public DomSampleService(DomSampleDb db,
HikariDataSource mariadbDataSource,
@Named("browserless-uri") String browserlessAddress,
ServiceConfiguration serviceConfiguration)
throws URISyntaxException
{
this.db = db;
this.mariadbDataSource = mariadbDataSource;
if (StringUtils.isEmpty(browserlessAddress) || serviceConfiguration.node() > 1) {
logger.warn("Live capture service will not run");
browserlessURI = null;
}
else {
browserlessURI = new URI(browserlessAddress);
}
}
public void start() {
if (browserlessURI == null) {
logger.warn("DomSampleService is not enabled due to missing browserless URI or multi-node configuration");
return;
}
Thread.ofPlatform().daemon().start(this::run);
}
public void syncDomains() {
Set<String> dbDomains = new HashSet<>();
logger.info("Fetching domains from database...");
try (var conn = mariadbDataSource.getConnection();
var stmt = conn.prepareStatement("""
SELECT DOMAIN_NAME
FROM EC_DOMAIN
WHERE NODE_AFFINITY>0
""")
) {
var rs = stmt.executeQuery();
while (rs.next()) {
dbDomains.add(rs.getString("DOMAIN_NAME"));
}
} catch (Exception e) {
throw new RuntimeException("Failed to sync domains", e);
}
logger.info("Found {} domains in database", dbDomains.size());
db.syncDomains(dbDomains);
logger.info("Synced domains to sqlite");
}
public void run() {
try (var client = new BrowserlessClient(browserlessURI)) {
while (!Thread.currentThread().isInterrupted()) {
try {
// Grace sleep in case we're operating on an empty domain list
TimeUnit.SECONDS.sleep(15);
syncDomains();
var domains = db.getScheduledDomains();
for (var domain : domains) {
updateDomain(client, domain);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
logger.info("DomSampleService interrupted, stopping...");
return;
} catch (Exception e) {
logger.error("Error in DomSampleService run loop", e);
}
}
}
}
private void updateDomain(BrowserlessClient client, String domain) {
var rootUrl = "https://" + domain + "/";
try {
var content = client.annotatedContent(rootUrl, new BrowserlessClient.GotoOptions("load", Duration.ofSeconds(10).toMillis()));
if (content.isPresent()) {
db.saveSample(domain, rootUrl, content.get());
}
} catch (Exception e) {
logger.error("Failed to process domain: " + domain, e);
}
finally {
db.flagDomainAsFetched(domain);
}
}
}

View File

@@ -0,0 +1,174 @@
package nu.marginalia.domsample.db;
import nu.marginalia.WmsaHome;
import org.jsoup.Jsoup;
import java.nio.file.Path;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.util.*;
public class DomSampleDb implements AutoCloseable {
private static final String dbFileName = "dom-sample.db";
private final Connection connection;
public DomSampleDb() throws SQLException{
this(WmsaHome.getDataPath().resolve(dbFileName));
}
public DomSampleDb(Path dbPath) throws SQLException {
String dbUrl = "jdbc:sqlite:" + dbPath.toAbsolutePath();
connection = DriverManager.getConnection(dbUrl);
try (var stmt = connection.createStatement()) {
stmt.executeUpdate("CREATE TABLE IF NOT EXISTS samples (url TEXT PRIMARY KEY, domain TEXT, sample BLOB, requests BLOB, accepted_popover BOOLEAN DEFAULT FALSE)");
stmt.executeUpdate("CREATE INDEX IF NOT EXISTS domain_index ON samples (domain)");
stmt.executeUpdate("CREATE TABLE IF NOT EXISTS schedule (domain TEXT PRIMARY KEY, last_fetch TIMESTAMP DEFAULT NULL)");
stmt.execute("PRAGMA journal_mode=WAL");
}
}
public void syncDomains(Set<String> domains) {
Set<String> currentDomains = new HashSet<>();
try (var stmt = connection.prepareStatement("SELECT domain FROM schedule")) {
var rs = stmt.executeQuery();
while (rs.next()) {
currentDomains.add(rs.getString("domain"));
}
} catch (SQLException e) {
throw new RuntimeException("Failed to sync domains", e);
}
Set<String> toRemove = new HashSet<>(currentDomains);
Set<String> toAdd = new HashSet<>(domains);
toRemove.removeAll(domains);
toAdd.removeAll(currentDomains);
try (var removeStmt = connection.prepareStatement("DELETE FROM schedule WHERE domain = ?");
var addStmt = connection.prepareStatement("INSERT OR IGNORE INTO schedule (domain) VALUES (?)")
) {
for (String domain : toRemove) {
removeStmt.setString(1, domain);
removeStmt.executeUpdate();
}
for (String domain : toAdd) {
addStmt.setString(1, domain);
addStmt.executeUpdate();
}
} catch (SQLException e) {
throw new RuntimeException("Failed to remove domains", e);
}
}
public List<String> getScheduledDomains() {
List<String> domains = new ArrayList<>();
try (var stmt = connection.prepareStatement("SELECT domain FROM schedule ORDER BY last_fetch IS NULL DESC, last_fetch ASC")) {
var rs = stmt.executeQuery();
while (rs.next()) {
domains.add(rs.getString("domain"));
}
} catch (SQLException e) {
throw new RuntimeException("Failed to get scheduled domains", e);
}
return domains;
}
public void flagDomainAsFetched(String domain) {
try (var stmt = connection.prepareStatement("INSERT OR REPLACE INTO schedule (domain, last_fetch) VALUES (?, CURRENT_TIMESTAMP)")) {
stmt.setString(1, domain);
stmt.executeUpdate();
} catch (SQLException e) {
throw new RuntimeException("Failed to flag domain as fetched", e);
}
}
public record Sample(String url, String domain, String sample, String requests, boolean acceptedPopover) {}
public List<Sample> getSamples(String domain) throws SQLException {
List<Sample> samples = new ArrayList<>();
try (var stmt = connection.prepareStatement("""
SELECT url, sample, requests, accepted_popover
FROM samples
WHERE domain = ?
"""))
{
stmt.setString(1, domain);
var rs = stmt.executeQuery();
while (rs.next()) {
samples.add(
new Sample(
rs.getString("url"),
domain,
rs.getString("sample"),
rs.getString("requests"),
rs.getBoolean("accepted_popover")
)
);
}
}
return samples;
}
public void saveSample(String domain, String url, String rawContent) throws SQLException {
var doc = Jsoup.parse(rawContent);
var networkRequests = doc.getElementById("marginalia-network-requests");
boolean acceptedPopover = false;
StringBuilder requestTsv = new StringBuilder();
if (networkRequests != null) {
acceptedPopover = !networkRequests.getElementsByClass("marginalia-agreed-cookies").isEmpty();
for (var request : networkRequests.getElementsByClass("network-request")) {
String method = request.attr("data-method");
String urlAttr = request.attr("data-url");
String timestamp = request.attr("data-timestamp");
requestTsv
.append(method)
.append('\t')
.append(timestamp)
.append('\t')
.append(urlAttr.replace('\n', ' '))
.append("\n");
}
networkRequests.remove();
}
doc.body().removeAttr("id");
String sample = doc.html();
saveSampleRaw(domain, url, sample, requestTsv.toString().trim(), acceptedPopover);
}
public void saveSampleRaw(String domain, String url, String sample, String requests, boolean acceptedPopover) throws SQLException {
try (var stmt = connection.prepareStatement("""
INSERT OR REPLACE
INTO samples (domain, url, sample, requests, accepted_popover)
VALUES (?, ?, ?, ?, ?)
""")) {
stmt.setString(1, domain);
stmt.setString(2, url);
stmt.setString(3, sample);
stmt.setString(4, requests);
stmt.setBoolean(5, acceptedPopover);
stmt.executeUpdate();
}
}
public void close() throws SQLException {
connection.close();
}
}

View File

@@ -8,10 +8,13 @@ import org.slf4j.LoggerFactory;
import java.io.IOException; import java.io.IOException;
import java.net.URI; import java.net.URI;
import java.net.URLEncoder;
import java.net.http.HttpClient; import java.net.http.HttpClient;
import java.net.http.HttpRequest; import java.net.http.HttpRequest;
import java.net.http.HttpResponse; import java.net.http.HttpResponse;
import java.nio.charset.StandardCharsets;
import java.time.Duration; import java.time.Duration;
import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Optional; import java.util.Optional;
@@ -60,6 +63,42 @@ public class BrowserlessClient implements AutoCloseable {
return Optional.of(rsp.body()); return Optional.of(rsp.body());
} }
/** Fetches content with a marginalia hack extension loaded that decorates the DOM with attributes for
* certain CSS attributes, to be able to easier identify popovers and other nuisance elements.
*/
public Optional<String> annotatedContent(String url, GotoOptions gotoOptions) throws IOException, InterruptedException {
Map<String, Object> requestData = Map.of(
"url", url,
"userAgent", userAgent,
"gotoOptions", gotoOptions,
"waitForSelector", Map.of("selector", "#marginaliahack", "timeout", 15000)
);
// Launch parameters for the browserless instance to load the extension
Map<String, Object> launchParameters = Map.of(
"args", List.of("--load-extension=/dom-export")
);
String launchParametersStr = URLEncoder.encode(gson.toJson(launchParameters), StandardCharsets.UTF_8);
var request = HttpRequest.newBuilder()
.uri(browserlessURI.resolve("/content?token="+BROWSERLESS_TOKEN+"&launch="+launchParametersStr))
.method("POST", HttpRequest.BodyPublishers.ofString(
gson.toJson(requestData)
))
.header("Content-type", "application/json")
.build();
var rsp = httpClient.send(request, HttpResponse.BodyHandlers.ofString());
if (rsp.statusCode() >= 300) {
logger.info("Failed to fetch annotated content for {}, status {}", url, rsp.statusCode());
return Optional.empty();
}
return Optional.of(rsp.body());
}
public byte[] screenshot(String url, GotoOptions gotoOptions, ScreenshotOptions screenshotOptions) public byte[] screenshot(String url, GotoOptions gotoOptions, ScreenshotOptions screenshotOptions)
throws IOException, InterruptedException { throws IOException, InterruptedException {

View File

@@ -126,7 +126,6 @@ public class LiveCaptureGrpcService
} }
else { else {
EdgeDomain domain = domainNameOpt.get(); EdgeDomain domain = domainNameOpt.get();
String domainNameStr = domain.toString();
if (!isValidDomainForCapture(domain)) { if (!isValidDomainForCapture(domain)) {
ScreenshotDbOperations.flagDomainAsFetched(conn, domain); ScreenshotDbOperations.flagDomainAsFetched(conn, domain);

View File

@@ -79,9 +79,17 @@ public class SimpleFeedParser {
if (!link.isBlank()) if (!link.isBlank())
break; break;
var tag = element.getElementsByTag(attr).first(); var tag = element.getElementsByTag(attr).first();
if (tag != null) { if (tag != null) {
link = tag.text(); String linkText = tag.text();
if (linkText.isBlank()) {
linkText = tag.attr("href");
}
link = linkText;
} }
} }
ret.add(new ItemData(title, description, link, pubDate)); ret.add(new ItemData(title, description, link, pubDate));

View File

@@ -0,0 +1,113 @@
package nu.marginalia.domsample.db;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.testcontainers.shaded.org.apache.commons.io.FileUtils;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.*;
import static org.junit.jupiter.api.Assertions.*;
class DomSampleDbTest {
Path tempDir;
@BeforeEach
void setUp() throws Exception {
tempDir = Files.createTempDirectory("test");
}
@AfterEach
void tearDown() throws IOException {
FileUtils.deleteDirectory(tempDir.toFile());
}
@Test
public void testSetUp() {
var dbPath = tempDir.resolve("test.db");
try (var db = new DomSampleDb(dbPath)) {
}
catch (Exception e) {
fail("Failed to set up database: " + e.getMessage());
}
}
@Test
public void testSyncDomains() {
var dbPath = tempDir.resolve("test.db");
try (var db = new DomSampleDb(dbPath)) {
db.syncDomains(Set.of("example.com", "test.com", "foobar.com"));
assertEquals(Set.of("example.com", "test.com", "foobar.com"), new HashSet<>(db.getScheduledDomains()));
db.syncDomains(Set.of("example.com", "test.com"));
assertEquals(Set.of("example.com", "test.com"), new HashSet<>(db.getScheduledDomains()));
db.syncDomains(Set.of("foobar.com", "test.com"));
assertEquals(Set.of("foobar.com", "test.com"), new HashSet<>(db.getScheduledDomains()));
}
catch (Exception e) {
fail("Failed to sync domains: " + e.getMessage());
}
}
@Test
public void testFetchDomains() {
var dbPath = tempDir.resolve("test.db");
try (var db = new DomSampleDb(dbPath)) {
db.syncDomains(Set.of("example.com", "test.com", "foobar.com"));
db.flagDomainAsFetched("example.com");
db.flagDomainAsFetched("test.com");
db.flagDomainAsFetched("foobar.com");
assertEquals(List.of("example.com", "test.com", "foobar.com"), db.getScheduledDomains());
db.flagDomainAsFetched("test.com");
assertEquals(List.of("example.com", "foobar.com", "test.com"), db.getScheduledDomains());
}
catch (Exception e) {
fail("Failed to sync domains: " + e.getMessage());
}
}
@Test
public void saveLoadSingle() {
var dbPath = tempDir.resolve("test.db");
try (var db = new DomSampleDb(dbPath)) {
db.saveSampleRaw("example.com", "http://example.com/sample", "sample data", "requests data", true);
var samples = db.getSamples("example.com");
assertEquals(1, samples.size());
var sample = samples.getFirst();
assertEquals("example.com", sample.domain());
assertEquals("http://example.com/sample", sample.url());
assertEquals("sample data", sample.sample());
assertEquals("requests data", sample.requests());
assertTrue(sample.acceptedPopover());
}
catch (Exception e) {
fail("Failed to save/load sample: " + e.getMessage());
}
}
@Test
public void saveLoadTwo() {
var dbPath = tempDir.resolve("test.db");
try (var db = new DomSampleDb(dbPath)) {
db.saveSampleRaw("example.com", "http://example.com/sample", "sample data", "r1", true);
db.saveSampleRaw("example.com", "http://example.com/sample2", "sample data2", "r2", false);
var samples = db.getSamples("example.com");
assertEquals(2, samples.size());
Map<String, String> samplesByUrl = new HashMap<>();
for (var sample : samples) {
samplesByUrl.put(sample.url(), sample.sample());
}
assertEquals("sample data", samplesByUrl.get("http://example.com/sample"));
assertEquals("sample data2", samplesByUrl.get("http://example.com/sample2"));
}
catch (Exception e) {
fail("Failed to save/load sample: " + e.getMessage());
}
}
}

View File

@@ -3,17 +3,21 @@ package nu.marginalia.livecapture;
import com.github.tomakehurst.wiremock.WireMockServer; import com.github.tomakehurst.wiremock.WireMockServer;
import com.github.tomakehurst.wiremock.core.WireMockConfiguration; import com.github.tomakehurst.wiremock.core.WireMockConfiguration;
import nu.marginalia.WmsaHome; import nu.marginalia.WmsaHome;
import nu.marginalia.domsample.db.DomSampleDb;
import nu.marginalia.service.module.ServiceConfigurationModule; import nu.marginalia.service.module.ServiceConfigurationModule;
import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
import org.testcontainers.containers.GenericContainer; import org.testcontainers.containers.GenericContainer;
import org.testcontainers.images.PullPolicy;
import org.testcontainers.junit.jupiter.Testcontainers; import org.testcontainers.junit.jupiter.Testcontainers;
import org.testcontainers.utility.DockerImageName; import org.testcontainers.utility.DockerImageName;
import java.io.IOException; import java.io.IOException;
import java.net.URI; import java.net.URI;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Map; import java.util.Map;
import static com.github.tomakehurst.wiremock.client.WireMock.*; import static com.github.tomakehurst.wiremock.client.WireMock.*;
@@ -22,9 +26,14 @@ import static com.github.tomakehurst.wiremock.client.WireMock.*;
@Testcontainers @Testcontainers
@Tag("slow") @Tag("slow")
public class BrowserlessClientTest { public class BrowserlessClientTest {
static GenericContainer<?> container = new GenericContainer<>(DockerImageName.parse("browserless/chrome")) // Run gradle docker if this image is not available
static GenericContainer<?> container = new GenericContainer<>(DockerImageName.parse("marginalia-browserless"))
.withEnv(Map.of("TOKEN", "BROWSERLESS_TOKEN")) .withEnv(Map.of("TOKEN", "BROWSERLESS_TOKEN"))
.withImagePullPolicy(PullPolicy.defaultPolicy())
.withNetworkMode("bridge") .withNetworkMode("bridge")
.withLogConsumer(frame -> {
System.out.print(frame.getUtf8String());
})
.withExposedPorts(3000); .withExposedPorts(3000);
static WireMockServer wireMockServer = static WireMockServer wireMockServer =
@@ -34,6 +43,7 @@ public class BrowserlessClientTest {
static String localIp; static String localIp;
static URI browserlessURI; static URI browserlessURI;
static URI browserlessWssURI;
@BeforeAll @BeforeAll
public static void setup() throws IOException { public static void setup() throws IOException {
@@ -44,6 +54,12 @@ public class BrowserlessClientTest {
container.getMappedPort(3000)) container.getMappedPort(3000))
); );
browserlessWssURI = URI.create(String.format("ws://%s:%d/?token=BROWSERLESS_TOKEN",
container.getHost(),
container.getMappedPort(3000))
);
wireMockServer.start(); wireMockServer.start();
wireMockServer.stubFor(get("/").willReturn(aResponse().withStatus(200).withBody("Ok"))); wireMockServer.stubFor(get("/").willReturn(aResponse().withStatus(200).withBody("Ok")));
@@ -85,6 +101,30 @@ public class BrowserlessClientTest {
} }
} }
@Test
public void testAnnotatedContent() throws Exception {
try (var client = new BrowserlessClient(browserlessURI);
DomSampleDb dbop = new DomSampleDb(Path.of("/tmp/dom-sample.db"))
) {
var content = client.annotatedContent("https://marginalia.nu/", BrowserlessClient.GotoOptions.defaultValues()).orElseThrow();
dbop.saveSample("marginalia.nu", "https://marginalia.nu/", content);
System.out.println(content);
Assertions.assertFalse(content.isBlank(), "Content should not be empty");
dbop.getSamples("marginalia.nu").forEach(sample -> {
System.out.println("Sample URL: " + sample.url());
System.out.println("Sample Content: " + sample.sample());
System.out.println("Sample Requests: " + sample.requests());
System.out.println("Accepted Popover: " + sample.acceptedPopover());
});
}
finally {
Files.deleteIfExists(Path.of("/tmp/dom-sample.db"));
}
}
@Test @Test
public void testScreenshot() throws Exception { public void testScreenshot() throws Exception {
try (var client = new BrowserlessClient(browserlessURI)) { try (var client = new BrowserlessClient(browserlessURI)) {

View File

@@ -0,0 +1,43 @@
plugins {
id 'java'
id 'jvm-test-suite'
}
java {
toolchain {
languageVersion.set(JavaLanguageVersion.of(rootProject.ext.jvmVersion))
}
}
apply from: "$rootProject.projectDir/srcsets.gradle"
dependencies {
implementation project(':code:common:config')
implementation project(':code:common:model')
implementation project(':code:common:db')
implementation libs.bundles.slf4j
implementation libs.prometheus
implementation libs.guava
implementation libs.commons.lang3
implementation dependencies.create(libs.guice.get()) {
exclude group: 'com.google.guava'
}
implementation libs.notnull
implementation libs.fastutil
implementation libs.bundles.mariadb
testImplementation libs.bundles.slf4j.test
testImplementation libs.bundles.junit
testImplementation libs.mockito
testImplementation platform('org.testcontainers:testcontainers-bom:1.17.4')
testImplementation libs.commons.codec
testImplementation project(':code:common:service')
testImplementation 'org.testcontainers:mariadb:1.17.4'
testImplementation 'org.testcontainers:junit-jupiter:1.17.4'
testImplementation project(':code:libraries:test-helpers')
}

View File

@@ -0,0 +1,192 @@
package nu.marginalia.nsfw;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.google.inject.name.Named;
import com.zaxxer.hikari.HikariDataSource;
import it.unimi.dsi.fastutil.ints.IntOpenHashSet;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.InputStreamReader;
import java.net.http.HttpClient;
import java.net.http.HttpRequest;
import java.net.http.HttpResponse;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.zip.GZIPInputStream;
@Singleton
public class NsfwDomainFilter {
private final HikariDataSource dataSource;
private final List<String> dangerLists;
private final List<String> smutLists;
private volatile IntOpenHashSet blockedDomainIdsTier1 = new IntOpenHashSet();
private volatile IntOpenHashSet blockedDomainIdsTier2 = new IntOpenHashSet();
private static final Logger logger = LoggerFactory.getLogger(NsfwDomainFilter.class);
public static final int NSFW_DISABLE = 0;
public static final int NSFW_BLOCK_DANGER = 1;
public static final int NSFW_BLOCK_SMUT = 2;
@Inject
public NsfwDomainFilter(HikariDataSource dataSource,
@Named("nsfw.dangerLists") List<String> dangerLists,
@Named("nsfw.smutLists") List<String> smutLists
) {
this.dataSource = dataSource;
this.dangerLists = dangerLists;
this.smutLists = smutLists;
Thread.ofPlatform().daemon().name("NsfwDomainFilterSync").start(() -> {
while (true) {
sync();
try {
TimeUnit.HOURS.sleep(1);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
break; // Exit the loop if interrupted
}
}
});
}
public boolean isBlocked(int domainId, int tier) {
if (tier == 0)
return false;
if (tier >= 1 && blockedDomainIdsTier1.contains(domainId))
return true;
if (tier >= 2 && blockedDomainIdsTier2.contains(domainId))
return true;
return false;
}
private synchronized void sync() {
try (var conn = dataSource.getConnection();
var stmt = conn.prepareStatement("SELECT ID, TIER FROM NSFW_DOMAINS")
) {
var rs = stmt.executeQuery();
IntOpenHashSet tier1 = new IntOpenHashSet();
IntOpenHashSet tier2 = new IntOpenHashSet();
while (rs.next()) {
int domainId = rs.getInt("ID");
int tier = rs.getInt("TIER");
switch (tier) {
case 1 -> tier1.add(domainId);
case 2 -> tier2.add(domainId);
}
}
this.blockedDomainIdsTier1 = tier1;
this.blockedDomainIdsTier2 = tier2;
logger.info("NSFW domain filter synced: {} tier 1, {} tier 2", tier1.size(), tier2.size());
}
catch (SQLException ex) {
logger.error("Failed to sync NSFW domain filter", ex);
}
}
public synchronized void fetchLists() {
try (var conn = dataSource.getConnection();
HttpClient client = HttpClient.newBuilder()
.followRedirects(HttpClient.Redirect.ALWAYS)
.build();
var stmt = conn.createStatement();
var insertStmt = conn.prepareStatement("INSERT INTO NSFW_DOMAINS_TMP (ID, TIER) SELECT ID, ? FROM EC_DOMAIN WHERE DOMAIN_NAME = ?")) {
stmt.execute("DROP TABLE IF EXISTS NSFW_DOMAINS_TMP");
stmt.execute("CREATE TABLE NSFW_DOMAINS_TMP LIKE NSFW_DOMAINS");
List<String> combinedDangerList = new ArrayList<>(10_000);
for (var dangerListUrl : dangerLists) {
combinedDangerList.addAll(fetchList(client, dangerListUrl));
}
for (String domain : combinedDangerList) {
insertStmt.setInt(1, NSFW_BLOCK_DANGER);
insertStmt.setString(2, domain);
insertStmt.execute();
}
List<String> combinedSmutList = new ArrayList<>(10_000);
for (var smutListUrl : smutLists) {
combinedSmutList.addAll(fetchList(client, smutListUrl));
}
for (String domain : combinedSmutList) {
insertStmt.setInt(1, NSFW_BLOCK_SMUT);
insertStmt.setString(2, domain);
insertStmt.addBatch();
insertStmt.execute();
}
stmt.execute("""
DROP TABLE IF EXISTS NSFW_DOMAINS
""");
stmt.execute("""
RENAME TABLE NSFW_DOMAINS_TMP TO NSFW_DOMAINS
""");
sync();
}
catch (SQLException ex) {
logger.error("Failed to fetch NSFW domain lists", ex);
}
}
public List<String> fetchList(HttpClient client, String url) {
logger.info("Fetching NSFW domain list from {}", url);
var request = HttpRequest.newBuilder()
.uri(java.net.URI.create(url))
.build();
try {
if (url.endsWith(".gz")) {
var response = client.send(request, HttpResponse.BodyHandlers.ofByteArray());
byte[] body = response.body();
try (var reader = new BufferedReader(new InputStreamReader(new GZIPInputStream(new ByteArrayInputStream(body))))) {
return reader.lines()
.filter(StringUtils::isNotEmpty)
.toList();
} catch (Exception e) {
logger.error("Error reading GZIP response from {}", url, e);
}
} else {
var response = client.send(request, HttpResponse.BodyHandlers.ofString());
if (response.statusCode() == 200) {
return Arrays.stream(StringUtils.split(response.body(), "\n"))
.filter(StringUtils::isNotEmpty)
.toList();
} else {
logger.warn("Failed to fetch list from {}: HTTP {}", url, response.statusCode());
}
}
}
catch (Exception e) {
logger.error("Error fetching NSFW domain list from {}", url, e);
}
return List.of();
}
}

View File

@@ -0,0 +1,30 @@
package nu.marginalia.nsfw;
import com.google.inject.AbstractModule;
import com.google.inject.Provides;
import jakarta.inject.Named;
import java.util.List;
public class NsfwFilterModule extends AbstractModule {
@Provides
@Named("nsfw.dangerLists")
public List<String> nsfwDomainLists1() {
return List.of(
"https://raw.githubusercontent.com/olbat/ut1-blacklists/refs/heads/master/blacklists/cryptojacking/domains",
"https://raw.githubusercontent.com/olbat/ut1-blacklists/refs/heads/master/blacklists/malware/domains",
"https://raw.githubusercontent.com/olbat/ut1-blacklists/refs/heads/master/blacklists/phishing/domains"
);
}
@Provides
@Named("nsfw.smutLists")
public List<String> nsfwDomainLists2() {
return List.of(
"https://github.com/olbat/ut1-blacklists/raw/refs/heads/master/blacklists/adult/domains.gz",
"https://raw.githubusercontent.com/olbat/ut1-blacklists/refs/heads/master/blacklists/gambling/domains"
);
}
public void configure() {}
}

View File

@@ -0,0 +1,108 @@
package nu.marginalia.nsfw;
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.google.inject.Provides;
import com.zaxxer.hikari.HikariConfig;
import com.zaxxer.hikari.HikariDataSource;
import jakarta.inject.Named;
import nu.marginalia.test.TestMigrationLoader;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
import org.testcontainers.containers.MariaDBContainer;
import org.testcontainers.junit.jupiter.Container;
import org.testcontainers.junit.jupiter.Testcontainers;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.List;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
@Tag("slow")
@Testcontainers
class NsfwDomainFilterTest extends AbstractModule {
@Container
static MariaDBContainer<?> mariaDBContainer = new MariaDBContainer<>("mariadb")
.withDatabaseName("WMSA_prod")
.withUsername("wmsa")
.withPassword("wmsa")
.withNetworkAliases("mariadb");
static HikariDataSource dataSource;
static Path tempDir;
@BeforeAll
public static void setUpDb() throws IOException {
tempDir = Files.createTempDirectory(NsfwDomainFilterTest.class.getSimpleName());
System.setProperty("system.homePath", tempDir.toString());
HikariConfig config = new HikariConfig();
config.setJdbcUrl(mariaDBContainer.getJdbcUrl());
config.setUsername("wmsa");
config.setPassword("wmsa");
dataSource = new HikariDataSource(config);
TestMigrationLoader.flywayMigration(dataSource);
try (var conn = dataSource.getConnection();
var stmt = conn.prepareStatement("INSERT INTO EC_DOMAIN (DOMAIN_NAME, DOMAIN_TOP, NODE_AFFINITY) VALUES (?, ?, 1)")
) {
// Ensure the database is ready
conn.createStatement().execute("SELECT 1");
stmt.setString(1, "www.google.com");
stmt.setString(2, "google.com");
stmt.executeUpdate();
stmt.setString(1, "www.bing.com");
stmt.setString(2, "bing.com");
stmt.executeUpdate();
} catch (Exception e) {
throw new RuntimeException("Failed to connect to the database", e);
}
}
@Provides
@Named("nsfw.dangerLists")
public List<String> nsfwDomainLists1() {
return List.of(
"https://downloads.marginalia.nu/test/list1"
);
}
@Provides
@Named("nsfw.smutLists")
public List<String> nsfwDomainLists2() {
return List.of(
"https://downloads.marginalia.nu/test/list2.gz"
);
}
public void configure() {
bind(HikariDataSource.class).toInstance(dataSource);
}
@Test
public void test() {
var filter = Guice
.createInjector(this)
.getInstance(NsfwDomainFilter.class);
filter.fetchLists();
assertTrue(filter.isBlocked(1, NsfwDomainFilter.NSFW_BLOCK_DANGER));
assertTrue(filter.isBlocked(1, NsfwDomainFilter.NSFW_BLOCK_SMUT));
assertFalse(filter.isBlocked(2, NsfwDomainFilter.NSFW_BLOCK_DANGER));
assertTrue(filter.isBlocked(2, NsfwDomainFilter.NSFW_BLOCK_SMUT));
}
}

View File

@@ -1,9 +1,6 @@
package nu.marginalia.api.searchquery; package nu.marginalia.api.searchquery;
import nu.marginalia.api.searchquery.model.query.ProcessedQuery; import nu.marginalia.api.searchquery.model.query.*;
import nu.marginalia.api.searchquery.model.query.QueryParams;
import nu.marginalia.api.searchquery.model.query.QueryResponse;
import nu.marginalia.api.searchquery.model.query.SearchSpecification;
import nu.marginalia.api.searchquery.model.results.DecoratedSearchResultItem; import nu.marginalia.api.searchquery.model.results.DecoratedSearchResultItem;
import nu.marginalia.api.searchquery.model.results.PrototypeRankingParameters; import nu.marginalia.api.searchquery.model.results.PrototypeRankingParameters;
import nu.marginalia.api.searchquery.model.results.SearchResultItem; import nu.marginalia.api.searchquery.model.results.SearchResultItem;
@@ -32,6 +29,8 @@ public class QueryProtobufCodec {
builder.setSearchSetIdentifier(query.specs.searchSetIdentifier); builder.setSearchSetIdentifier(query.specs.searchSetIdentifier);
builder.setHumanQuery(request.getHumanQuery()); builder.setHumanQuery(request.getHumanQuery());
builder.setNsfwFilterTierValue(request.getNsfwFilterTierValue());
builder.setQuality(IndexProtobufCodec.convertSpecLimit(query.specs.quality)); builder.setQuality(IndexProtobufCodec.convertSpecLimit(query.specs.quality));
builder.setYear(IndexProtobufCodec.convertSpecLimit(query.specs.year)); builder.setYear(IndexProtobufCodec.convertSpecLimit(query.specs.year));
builder.setSize(IndexProtobufCodec.convertSpecLimit(query.specs.size)); builder.setSize(IndexProtobufCodec.convertSpecLimit(query.specs.size));
@@ -78,6 +77,8 @@ public class QueryProtobufCodec {
builder.setSearchSetIdentifier(query.specs.searchSetIdentifier); builder.setSearchSetIdentifier(query.specs.searchSetIdentifier);
builder.setHumanQuery(humanQuery); builder.setHumanQuery(humanQuery);
builder.setNsfwFilterTier(RpcIndexQuery.NSFW_FILTER_TIER.DANGER);
builder.setQuality(IndexProtobufCodec.convertSpecLimit(query.specs.quality)); builder.setQuality(IndexProtobufCodec.convertSpecLimit(query.specs.quality));
builder.setYear(IndexProtobufCodec.convertSpecLimit(query.specs.year)); builder.setYear(IndexProtobufCodec.convertSpecLimit(query.specs.year));
builder.setSize(IndexProtobufCodec.convertSpecLimit(query.specs.size)); builder.setSize(IndexProtobufCodec.convertSpecLimit(query.specs.size));
@@ -112,6 +113,7 @@ public class QueryProtobufCodec {
request.getSearchSetIdentifier(), request.getSearchSetIdentifier(),
QueryStrategy.valueOf(request.getQueryStrategy()), QueryStrategy.valueOf(request.getQueryStrategy()),
RpcTemporalBias.Bias.valueOf(request.getTemporalBias().getBias().name()), RpcTemporalBias.Bias.valueOf(request.getTemporalBias().getBias().name()),
NsfwFilterTier.fromCodedValue(request.getNsfwFilterTierValue()),
request.getPagination().getPage() request.getPagination().getPage()
); );
} }
@@ -327,6 +329,7 @@ public class QueryProtobufCodec {
.setRank(IndexProtobufCodec.convertSpecLimit(params.rank())) .setRank(IndexProtobufCodec.convertSpecLimit(params.rank()))
.setSearchSetIdentifier(params.identifier()) .setSearchSetIdentifier(params.identifier())
.setQueryStrategy(params.queryStrategy().name()) .setQueryStrategy(params.queryStrategy().name())
.setNsfwFilterTierValue(params.filterTier().getCodedValue())
.setTemporalBias(RpcTemporalBias.newBuilder() .setTemporalBias(RpcTemporalBias.newBuilder()
.setBias(RpcTemporalBias.Bias.valueOf(params.temporalBias().name())) .setBias(RpcTemporalBias.Bias.valueOf(params.temporalBias().name()))
.build()) .build())

View File

@@ -0,0 +1,26 @@
package nu.marginalia.api.searchquery.model.query;
public enum NsfwFilterTier {
OFF(0),
DANGER(1),
PORN_AND_GAMBLING(2);
private final int codedValue; // same as ordinal() for now, but can be changed later if needed
NsfwFilterTier(int codedValue) {
this.codedValue = codedValue;
}
public static NsfwFilterTier fromCodedValue(int codedValue) {
for (NsfwFilterTier tier : NsfwFilterTier.values()) {
if (tier.codedValue == codedValue) {
return tier;
}
}
throw new IllegalArgumentException("Invalid coded value for NsfwFilterTirer: " + codedValue);
}
public int getCodedValue() {
return codedValue;
}
}

View File

@@ -25,10 +25,11 @@ public record QueryParams(
String identifier, String identifier,
QueryStrategy queryStrategy, QueryStrategy queryStrategy,
RpcTemporalBias.Bias temporalBias, RpcTemporalBias.Bias temporalBias,
NsfwFilterTier filterTier,
int page int page
) )
{ {
public QueryParams(String query, RpcQueryLimits limits, String identifier) { public QueryParams(String query, RpcQueryLimits limits, String identifier, NsfwFilterTier filterTier) {
this(query, null, this(query, null,
List.of(), List.of(),
List.of(), List.of(),
@@ -43,6 +44,7 @@ public record QueryParams(
identifier, identifier,
QueryStrategy.AUTO, QueryStrategy.AUTO,
RpcTemporalBias.Bias.NONE, RpcTemporalBias.Bias.NONE,
filterTier,
1 // page 1 // page
); );
} }

View File

@@ -1,6 +1,7 @@
package nu.marginalia.api.searchquery.model.results; package nu.marginalia.api.searchquery.model.results;
import nu.marginalia.api.searchquery.model.results.debug.ResultRankingDetails; import nu.marginalia.api.searchquery.model.results.debug.ResultRankingDetails;
import nu.marginalia.model.DocumentFormat;
import nu.marginalia.model.EdgeUrl; import nu.marginalia.model.EdgeUrl;
import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.NotNull;
@@ -161,4 +162,14 @@ public class DecoratedSearchResultItem implements Comparable<DecoratedSearchResu
public String toString() { public String toString() {
return "DecoratedSearchResultItem(rawIndexResult=" + this.getRawIndexResult() + ", url=" + this.getUrl() + ", title=" + this.getTitle() + ", description=" + this.getDescription() + ", urlQuality=" + this.getUrlQuality() + ", format=" + this.getFormat() + ", features=" + this.getFeatures() + ", pubYear=" + this.getPubYear() + ", dataHash=" + this.getDataHash() + ", wordsTotal=" + this.getWordsTotal() + ", bestPositions=" + this.getBestPositions() + ", rankingScore=" + this.getRankingScore() + ", resultsFromDomain=" + this.getResultsFromDomain() + ", rankingDetails=" + this.getRankingDetails() + ")"; return "DecoratedSearchResultItem(rawIndexResult=" + this.getRawIndexResult() + ", url=" + this.getUrl() + ", title=" + this.getTitle() + ", description=" + this.getDescription() + ", urlQuality=" + this.getUrlQuality() + ", format=" + this.getFormat() + ", features=" + this.getFeatures() + ", pubYear=" + this.getPubYear() + ", dataHash=" + this.getDataHash() + ", wordsTotal=" + this.getWordsTotal() + ", bestPositions=" + this.getBestPositions() + ", rankingScore=" + this.getRankingScore() + ", resultsFromDomain=" + this.getResultsFromDomain() + ", rankingDetails=" + this.getRankingDetails() + ")";
} }
public String getShortFormat() {
try {
var df = DocumentFormat.valueOf(format);
return df.shortFormat;
}
catch (IllegalArgumentException e) {
return DocumentFormat.UNKNOWN.shortFormat;
}
}
} }

View File

@@ -32,6 +32,14 @@ message RpcQsQuery {
RpcTemporalBias temporalBias = 16; RpcTemporalBias temporalBias = 16;
RpcQsQueryPagination pagination = 17; RpcQsQueryPagination pagination = 17;
NSFW_FILTER_TIER nsfwFilterTier = 18;
enum NSFW_FILTER_TIER {
NONE = 0;
DANGER = 1;
PORN_AND_GAMBLING = 2;
};
} }
/* Query service query response */ /* Query service query response */
@@ -78,8 +86,17 @@ message RpcIndexQuery {
RpcQueryLimits queryLimits = 10; RpcQueryLimits queryLimits = 10;
string queryStrategy = 11; // Named query configuration string queryStrategy = 11; // Named query configuration
RpcResultRankingParameters parameters = 12; RpcResultRankingParameters parameters = 12;
NSFW_FILTER_TIER nsfwFilterTier = 13;
enum NSFW_FILTER_TIER {
NONE = 0;
DANGER = 1;
PORN_AND_GAMBLING = 2;
};
} }
/* A tagged union encoding some limit on a field */ /* A tagged union encoding some limit on a field */
message RpcSpecLimit { message RpcSpecLimit {
int32 value = 1; int32 value = 1;

View File

@@ -19,6 +19,7 @@ dependencies {
implementation project(':code:common:model') implementation project(':code:common:model')
implementation project(':code:common:service') implementation project(':code:common:service')
implementation project(':code:functions:nsfw-domain-filter')
implementation project(':code:functions:search-query:api') implementation project(':code:functions:search-query:api')
implementation project(':code:index:query') implementation project(':code:index:query')

View File

@@ -11,6 +11,7 @@ import nu.marginalia.api.searchquery.model.query.QueryParams;
import nu.marginalia.api.searchquery.model.results.DecoratedSearchResultItem; import nu.marginalia.api.searchquery.model.results.DecoratedSearchResultItem;
import nu.marginalia.api.searchquery.model.results.PrototypeRankingParameters; import nu.marginalia.api.searchquery.model.results.PrototypeRankingParameters;
import nu.marginalia.index.api.IndexClient; import nu.marginalia.index.api.IndexClient;
import nu.marginalia.nsfw.NsfwDomainFilter;
import nu.marginalia.service.server.DiscoverableService; import nu.marginalia.service.server.DiscoverableService;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@@ -34,13 +35,16 @@ public class QueryGRPCService
private final QueryFactory queryFactory; private final QueryFactory queryFactory;
private final NsfwDomainFilter nsfwDomainFilter;
private final IndexClient indexClient; private final IndexClient indexClient;
@Inject @Inject
public QueryGRPCService(QueryFactory queryFactory, public QueryGRPCService(QueryFactory queryFactory,
NsfwDomainFilter nsfwDomainFilter,
IndexClient indexClient) IndexClient indexClient)
{ {
this.queryFactory = queryFactory; this.queryFactory = queryFactory;
this.nsfwDomainFilter = nsfwDomainFilter;
this.indexClient = indexClient; this.indexClient = indexClient;
} }

View File

@@ -3,6 +3,7 @@ package nu.marginalia.query.svc;
import nu.marginalia.WmsaHome; import nu.marginalia.WmsaHome;
import nu.marginalia.api.searchquery.RpcQueryLimits; import nu.marginalia.api.searchquery.RpcQueryLimits;
import nu.marginalia.api.searchquery.RpcTemporalBias; import nu.marginalia.api.searchquery.RpcTemporalBias;
import nu.marginalia.api.searchquery.model.query.NsfwFilterTier;
import nu.marginalia.api.searchquery.model.query.QueryParams; import nu.marginalia.api.searchquery.model.query.QueryParams;
import nu.marginalia.api.searchquery.model.query.SearchSpecification; import nu.marginalia.api.searchquery.model.query.SearchSpecification;
import nu.marginalia.functions.searchquery.QueryFactory; import nu.marginalia.functions.searchquery.QueryFactory;
@@ -58,6 +59,7 @@ public class QueryFactoryTest {
"NONE", "NONE",
QueryStrategy.AUTO, QueryStrategy.AUTO,
RpcTemporalBias.Bias.NONE, RpcTemporalBias.Bias.NONE,
NsfwFilterTier.OFF,
0), null).specs; 0), null).specs;
} }

View File

@@ -17,6 +17,7 @@ dependencies {
implementation project(':code:common:service') implementation project(':code:common:service')
implementation project(':code:common:db') implementation project(':code:common:db')
implementation project(':code:libraries:message-queue') implementation project(':code:libraries:message-queue')
implementation project(':code:functions:nsfw-domain-filter')
implementation project(':code:functions:search-query:api') implementation project(':code:functions:search-query:api')
implementation libs.bundles.slf4j implementation libs.bundles.slf4j

View File

@@ -2,11 +2,13 @@ package nu.marginalia.index.api;
import com.google.inject.Inject; import com.google.inject.Inject;
import com.google.inject.Singleton; import com.google.inject.Singleton;
import io.prometheus.client.Counter;
import nu.marginalia.api.searchquery.IndexApiGrpc; import nu.marginalia.api.searchquery.IndexApiGrpc;
import nu.marginalia.api.searchquery.RpcDecoratedResultItem; import nu.marginalia.api.searchquery.RpcDecoratedResultItem;
import nu.marginalia.api.searchquery.RpcIndexQuery; import nu.marginalia.api.searchquery.RpcIndexQuery;
import nu.marginalia.db.DomainBlacklistImpl; import nu.marginalia.db.DomainBlacklistImpl;
import nu.marginalia.model.id.UrlIdCodec; import nu.marginalia.model.id.UrlIdCodec;
import nu.marginalia.nsfw.NsfwDomainFilter;
import nu.marginalia.service.client.GrpcChannelPoolFactory; import nu.marginalia.service.client.GrpcChannelPoolFactory;
import nu.marginalia.service.client.GrpcMultiNodeChannelPool; import nu.marginalia.service.client.GrpcMultiNodeChannelPool;
import nu.marginalia.service.discovery.property.ServiceKey; import nu.marginalia.service.discovery.property.ServiceKey;
@@ -28,14 +30,26 @@ public class IndexClient {
private static final Logger logger = LoggerFactory.getLogger(IndexClient.class); private static final Logger logger = LoggerFactory.getLogger(IndexClient.class);
private final GrpcMultiNodeChannelPool<IndexApiGrpc.IndexApiBlockingStub> channelPool; private final GrpcMultiNodeChannelPool<IndexApiGrpc.IndexApiBlockingStub> channelPool;
private final DomainBlacklistImpl blacklist; private final DomainBlacklistImpl blacklist;
private final NsfwDomainFilter nsfwDomainFilter;
Counter wmsa_index_query_count = Counter.build()
.name("wmsa_nsfw_filter_result_count")
.labelNames("tier")
.help("Count of results filtered by NSFW tier")
.register();
private static final ExecutorService executor = Executors.newCachedThreadPool(); private static final ExecutorService executor = Executors.newCachedThreadPool();
@Inject @Inject
public IndexClient(GrpcChannelPoolFactory channelPoolFactory, DomainBlacklistImpl blacklist) { public IndexClient(GrpcChannelPoolFactory channelPoolFactory,
DomainBlacklistImpl blacklist,
NsfwDomainFilter nsfwDomainFilter
) {
this.channelPool = channelPoolFactory.createMulti( this.channelPool = channelPoolFactory.createMulti(
ServiceKey.forGrpcApi(IndexApiGrpc.class, ServicePartition.multi()), ServiceKey.forGrpcApi(IndexApiGrpc.class, ServicePartition.multi()),
IndexApiGrpc::newBlockingStub); IndexApiGrpc::newBlockingStub);
this.blacklist = blacklist; this.blacklist = blacklist;
this.nsfwDomainFilter = nsfwDomainFilter;
} }
private static final Comparator<RpcDecoratedResultItem> comparator = private static final Comparator<RpcDecoratedResultItem> comparator =
@@ -52,7 +66,7 @@ public class IndexClient {
public AggregateQueryResponse executeQueries(RpcIndexQuery indexRequest, Pagination pagination) { public AggregateQueryResponse executeQueries(RpcIndexQuery indexRequest, Pagination pagination) {
final int requestedMaxResults = indexRequest.getQueryLimits().getResultsTotal(); final int requestedMaxResults = indexRequest.getQueryLimits().getResultsTotal();
int filterTier = indexRequest.getNsfwFilterTierValue();
AtomicInteger totalNumResults = new AtomicInteger(0); AtomicInteger totalNumResults = new AtomicInteger(0);
List<RpcDecoratedResultItem> results = List<RpcDecoratedResultItem> results =
@@ -74,7 +88,7 @@ public class IndexClient {
} }
}) })
.flatMap(List::stream) .flatMap(List::stream)
.filter(item -> !isBlacklisted(item)) .filter(item -> !isBlacklisted(item, filterTier))
.sorted(comparator) .sorted(comparator)
.skip(Math.max(0, (pagination.page - 1) * pagination.pageSize)) .skip(Math.max(0, (pagination.page - 1) * pagination.pageSize))
.limit(pagination.pageSize) .limit(pagination.pageSize)
@@ -83,8 +97,23 @@ public class IndexClient {
return new AggregateQueryResponse(results, pagination.page(), totalNumResults.get()); return new AggregateQueryResponse(results, pagination.page(), totalNumResults.get());
} }
private boolean isBlacklisted(RpcDecoratedResultItem item) { static String[] tierNames = {
return blacklist.isBlacklisted(UrlIdCodec.getDomainId(item.getRawItem().getCombinedId())); "OFF",
"DANGER",
"NSFW"
};
private boolean isBlacklisted(RpcDecoratedResultItem item, int filterTier) {
int domainId = UrlIdCodec.getDomainId(item.getRawItem().getCombinedId());
if (blacklist.isBlacklisted(domainId)) {
return true;
}
if (nsfwDomainFilter.isBlocked(domainId, filterTier)) {
wmsa_index_query_count.labels(tierNames[filterTier]).inc();
return true;
}
return false;
} }
} }

View File

@@ -84,7 +84,7 @@ public class ForwardIndexConverter {
LongArray docFileData = LongArrayFactory.mmapForWritingConfined(outputFileDocsData, ForwardIndexParameters.ENTRY_SIZE * docsFileId.size()); LongArray docFileData = LongArrayFactory.mmapForWritingConfined(outputFileDocsData, ForwardIndexParameters.ENTRY_SIZE * docsFileId.size());
ByteBuffer workArea = ByteBuffer.allocate(65536); ByteBuffer workArea = ByteBuffer.allocate(1024*1024*100);
for (var instance : journal.pages()) { for (var instance : journal.pages()) {
try (var slopTable = new SlopTable(instance.baseDir(), instance.page())) try (var slopTable = new SlopTable(instance.baseDir(), instance.page()))
{ {

View File

@@ -62,6 +62,7 @@ dependencies {
implementation libs.jwarc implementation libs.jwarc
implementation libs.jsoup implementation libs.jsoup
implementation libs.pdfbox
implementation libs.guava implementation libs.guava
implementation dependencies.create(libs.guice.get()) { implementation dependencies.create(libs.guice.get()) {

View File

@@ -1,8 +1,8 @@
package nu.marginalia.converting.model; package nu.marginalia.converting.model;
import nu.marginalia.model.DocumentFormat;
import nu.marginalia.model.EdgeUrl; import nu.marginalia.model.EdgeUrl;
import nu.marginalia.model.crawl.HtmlFeature; import nu.marginalia.model.crawl.HtmlFeature;
import nu.marginalia.model.html.HtmlStandard;
import nu.marginalia.model.idx.DocumentMetadata; import nu.marginalia.model.idx.DocumentMetadata;
import javax.annotation.Nullable; import javax.annotation.Nullable;
@@ -21,7 +21,7 @@ public class ProcessedDocumentDetails {
public long hashCode; public long hashCode;
public Set<HtmlFeature> features; public Set<HtmlFeature> features;
public HtmlStandard standard; public DocumentFormat format;
public List<EdgeUrl> linksInternal; public List<EdgeUrl> linksInternal;
public List<EdgeUrl> linksExternal; public List<EdgeUrl> linksExternal;
@@ -30,6 +30,6 @@ public class ProcessedDocumentDetails {
public GeneratorType generator; public GeneratorType generator;
public String toString() { public String toString() {
return "ProcessedDocumentDetails(title=" + this.title + ", description=" + this.description + ", pubYear=" + this.pubYear + ", length=" + this.length + ", quality=" + this.quality + ", hashCode=" + this.hashCode + ", features=" + this.features + ", standard=" + this.standard + ", linksInternal=" + this.linksInternal + ", linksExternal=" + this.linksExternal + ", metadata=" + this.metadata + ", generator=" + this.generator + ")"; return "ProcessedDocumentDetails(title=" + this.title + ", description=" + this.description + ", pubYear=" + this.pubYear + ", length=" + this.length + ", quality=" + this.quality + ", hashCode=" + this.hashCode + ", features=" + this.features + ", standard=" + this.format + ", linksInternal=" + this.linksInternal + ", linksExternal=" + this.linksExternal + ", metadata=" + this.metadata + ", generator=" + this.generator + ")";
} }
} }

View File

@@ -7,6 +7,7 @@ import nu.marginalia.converting.model.DisqualifiedException;
import nu.marginalia.converting.model.ProcessedDocument; import nu.marginalia.converting.model.ProcessedDocument;
import nu.marginalia.converting.processor.plugin.AbstractDocumentProcessorPlugin; import nu.marginalia.converting.processor.plugin.AbstractDocumentProcessorPlugin;
import nu.marginalia.converting.processor.plugin.HtmlDocumentProcessorPlugin; import nu.marginalia.converting.processor.plugin.HtmlDocumentProcessorPlugin;
import nu.marginalia.converting.processor.plugin.PdfDocumentProcessorPlugin;
import nu.marginalia.converting.processor.plugin.PlainTextDocumentProcessorPlugin; import nu.marginalia.converting.processor.plugin.PlainTextDocumentProcessorPlugin;
import nu.marginalia.keyword.LinkTexts; import nu.marginalia.keyword.LinkTexts;
import nu.marginalia.model.EdgeDomain; import nu.marginalia.model.EdgeDomain;
@@ -33,7 +34,8 @@ public class DocumentProcessor {
private static final Set<String> acceptedContentTypes = Set.of("application/xhtml+xml", private static final Set<String> acceptedContentTypes = Set.of("application/xhtml+xml",
"application/xhtml", "application/xhtml",
"text/html", "text/html",
"text/plain"); "text/plain",
"application/pdf");
private final List<AbstractDocumentProcessorPlugin> processorPlugins = new ArrayList<>(); private final List<AbstractDocumentProcessorPlugin> processorPlugins = new ArrayList<>();
@@ -42,12 +44,14 @@ public class DocumentProcessor {
@Inject @Inject
public DocumentProcessor(HtmlDocumentProcessorPlugin htmlDocumentProcessorPlugin, public DocumentProcessor(HtmlDocumentProcessorPlugin htmlDocumentProcessorPlugin,
PlainTextDocumentProcessorPlugin plainTextDocumentProcessorPlugin, PlainTextDocumentProcessorPlugin plainTextDocumentProcessorPlugin,
PdfDocumentProcessorPlugin pdfDocumentProcessorPlugin,
AnchorTextKeywords anchorTextKeywords) AnchorTextKeywords anchorTextKeywords)
{ {
this.anchorTextKeywords = anchorTextKeywords; this.anchorTextKeywords = anchorTextKeywords;
processorPlugins.add(htmlDocumentProcessorPlugin); processorPlugins.add(htmlDocumentProcessorPlugin);
processorPlugins.add(plainTextDocumentProcessorPlugin); processorPlugins.add(plainTextDocumentProcessorPlugin);
processorPlugins.add(pdfDocumentProcessorPlugin);
} }
public ProcessedDocument process(CrawledDocument crawledDocument, public ProcessedDocument process(CrawledDocument crawledDocument,

View File

@@ -2,9 +2,9 @@ package nu.marginalia.converting.processor.logic;
import crawlercommons.utils.Strings; import crawlercommons.utils.Strings;
import nu.marginalia.converting.model.DisqualifiedException; import nu.marginalia.converting.model.DisqualifiedException;
import nu.marginalia.model.DocumentFormat;
import nu.marginalia.model.crawl.HtmlFeature; import nu.marginalia.model.crawl.HtmlFeature;
import nu.marginalia.model.crawldata.CrawledDocument; import nu.marginalia.model.crawldata.CrawledDocument;
import nu.marginalia.model.html.HtmlStandard;
import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.NotNull;
import org.jsoup.nodes.Document; import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element; import org.jsoup.nodes.Element;
@@ -17,7 +17,7 @@ import java.util.Set;
public class DocumentValuator { public class DocumentValuator {
public double getQuality(CrawledDocument crawledDocument, public double getQuality(CrawledDocument crawledDocument,
HtmlStandard htmlStandard, DocumentFormat htmlStandard,
Document parsedDocument, Document parsedDocument,
int textLength) throws DisqualifiedException { int textLength) throws DisqualifiedException {

View File

@@ -1,7 +1,7 @@
package nu.marginalia.converting.processor.logic; package nu.marginalia.converting.processor.logic;
import com.google.common.base.Strings; import com.google.common.base.Strings;
import nu.marginalia.model.html.HtmlStandard; import nu.marginalia.model.DocumentFormat;
import org.jsoup.nodes.Document; import org.jsoup.nodes.Document;
import org.jsoup.nodes.DocumentType; import org.jsoup.nodes.DocumentType;
import org.slf4j.Logger; import org.slf4j.Logger;
@@ -12,54 +12,54 @@ public class HtmlStandardExtractor {
private static final Logger logger = LoggerFactory.getLogger(HtmlStandardExtractor.class); private static final Logger logger = LoggerFactory.getLogger(HtmlStandardExtractor.class);
public static HtmlStandard parseDocType(DocumentType docType) { public static DocumentFormat parseDocType(DocumentType docType) {
if (null == docType) { if (null == docType) {
return HtmlStandard.UNKNOWN; return DocumentFormat.UNKNOWN;
} }
String publicId = docType.publicId(); String publicId = docType.publicId();
if (Strings.isNullOrEmpty(publicId)) if (Strings.isNullOrEmpty(publicId))
return HtmlStandard.HTML5; return DocumentFormat.HTML5;
publicId = publicId.toUpperCase(); publicId = publicId.toUpperCase();
if (publicId.startsWith("-//SOFTQUAD SOFTWARE//DTD") && publicId.contains("HTML 4")) { if (publicId.startsWith("-//SOFTQUAD SOFTWARE//DTD") && publicId.contains("HTML 4")) {
return HtmlStandard.HTML4; return DocumentFormat.HTML4;
} }
if (publicId.startsWith("-//SOFTQUAD SOFTWARE//DTD") && publicId.contains("HTML 3")) { if (publicId.startsWith("-//SOFTQUAD SOFTWARE//DTD") && publicId.contains("HTML 3")) {
return HtmlStandard.HTML123; return DocumentFormat.HTML123;
} }
if (publicId.startsWith("-//INTERNET/RFC XXXX//EN")) if (publicId.startsWith("-//INTERNET/RFC XXXX//EN"))
return HtmlStandard.HTML123; return DocumentFormat.HTML123;
if (publicId.startsWith("-//NETSCAPE COMM. CORP")) if (publicId.startsWith("-//NETSCAPE COMM. CORP"))
return HtmlStandard.HTML123; return DocumentFormat.HTML123;
if (publicId.startsWith("-//SQ//DTD HTML 2")) if (publicId.startsWith("-//SQ//DTD HTML 2"))
return HtmlStandard.HTML123; return DocumentFormat.HTML123;
if (publicId.startsWith("-//SOFTQUAD//DTD HTML 2")) if (publicId.startsWith("-//SOFTQUAD//DTD HTML 2"))
return HtmlStandard.HTML123; return DocumentFormat.HTML123;
if (publicId.startsWith("-//W3O//DTD W3 HTML 2")) if (publicId.startsWith("-//W3O//DTD W3 HTML 2"))
return HtmlStandard.HTML123; return DocumentFormat.HTML123;
if (publicId.startsWith("-//IETF//DTD HTML 2")) if (publicId.startsWith("-//IETF//DTD HTML 2"))
return HtmlStandard.HTML123; return DocumentFormat.HTML123;
if (publicId.startsWith("-//IETF//DTD HTML//EN")) if (publicId.startsWith("-//IETF//DTD HTML//EN"))
return HtmlStandard.HTML123; return DocumentFormat.HTML123;
if (publicId.startsWith("-/W3C//DTD HTML 3")) if (publicId.startsWith("-/W3C//DTD HTML 3"))
return HtmlStandard.HTML123; return DocumentFormat.HTML123;
if (publicId.startsWith("-/W3C/DTD HTML 3")) if (publicId.startsWith("-/W3C/DTD HTML 3"))
return HtmlStandard.HTML123; return DocumentFormat.HTML123;
if (publicId.startsWith("-//IETF//DTD HTML 3")) if (publicId.startsWith("-//IETF//DTD HTML 3"))
return HtmlStandard.HTML123; return DocumentFormat.HTML123;
if (publicId.startsWith("-//W3C//DTD XHTML")) if (publicId.startsWith("-//W3C//DTD XHTML"))
return HtmlStandard.XHTML; return DocumentFormat.XHTML;
if (publicId.startsWith("ISO/IEC 15445:2000//DTD")) if (publicId.startsWith("ISO/IEC 15445:2000//DTD"))
return HtmlStandard.XHTML; return DocumentFormat.XHTML;
if (publicId.startsWith("-//W3C//DTD HTML")) if (publicId.startsWith("-//W3C//DTD HTML"))
return HtmlStandard.HTML4; return DocumentFormat.HTML4;
logger.debug("Unknown publicID standard {}", publicId); logger.debug("Unknown publicID standard {}", publicId);
return HtmlStandard.UNKNOWN; return DocumentFormat.UNKNOWN;
} }
public static HtmlStandard sniffHtmlStandard(Document parsed) { public static DocumentFormat sniffHtmlStandard(Document parsed) {
int html4Attributes = 0; int html4Attributes = 0;
int html5Attributes = 0; int html5Attributes = 0;
@@ -73,11 +73,11 @@ public class HtmlStandardExtractor {
html4Attributes++; html4Attributes++;
} }
if (html5Attributes > 0) { if (html5Attributes > 0) {
return HtmlStandard.HTML5; return DocumentFormat.HTML5;
} }
if (html4Attributes > 0) { if (html4Attributes > 0) {
return HtmlStandard.HTML4; return DocumentFormat.HTML4;
} }
return HtmlStandard.HTML123; return DocumentFormat.HTML123;
} }
} }

View File

@@ -7,11 +7,11 @@ import nu.marginalia.keyword.LinkTexts;
import nu.marginalia.keyword.model.DocumentKeywordsBuilder; import nu.marginalia.keyword.model.DocumentKeywordsBuilder;
import nu.marginalia.language.filter.LanguageFilter; import nu.marginalia.language.filter.LanguageFilter;
import nu.marginalia.language.model.DocumentLanguageData; import nu.marginalia.language.model.DocumentLanguageData;
import nu.marginalia.model.DocumentFormat;
import nu.marginalia.model.EdgeUrl; import nu.marginalia.model.EdgeUrl;
import nu.marginalia.model.crawl.HtmlFeature; import nu.marginalia.model.crawl.HtmlFeature;
import nu.marginalia.model.crawl.PubDate; import nu.marginalia.model.crawl.PubDate;
import nu.marginalia.model.crawldata.CrawledDocument; import nu.marginalia.model.crawldata.CrawledDocument;
import nu.marginalia.model.html.HtmlStandard;
import javax.annotation.Nullable; import javax.annotation.Nullable;
import java.io.IOException; import java.io.IOException;
@@ -73,7 +73,7 @@ public abstract class AbstractDocumentProcessorPlugin {
return this; return this;
} }
public MetaTagsBuilder addFormat(HtmlStandard standard) { public MetaTagsBuilder addFormat(DocumentFormat standard) {
add("format", standard); add("format", standard);

View File

@@ -25,12 +25,12 @@ import nu.marginalia.language.model.DocumentLanguageData;
import nu.marginalia.language.sentence.ThreadLocalSentenceExtractorProvider; import nu.marginalia.language.sentence.ThreadLocalSentenceExtractorProvider;
import nu.marginalia.link_parser.FeedExtractor; import nu.marginalia.link_parser.FeedExtractor;
import nu.marginalia.link_parser.LinkParser; import nu.marginalia.link_parser.LinkParser;
import nu.marginalia.model.DocumentFormat;
import nu.marginalia.model.EdgeDomain; import nu.marginalia.model.EdgeDomain;
import nu.marginalia.model.EdgeUrl; import nu.marginalia.model.EdgeUrl;
import nu.marginalia.model.crawl.HtmlFeature; import nu.marginalia.model.crawl.HtmlFeature;
import nu.marginalia.model.crawl.PubDate; import nu.marginalia.model.crawl.PubDate;
import nu.marginalia.model.crawldata.CrawledDocument; import nu.marginalia.model.crawldata.CrawledDocument;
import nu.marginalia.model.html.HtmlStandard;
import nu.marginalia.model.idx.DocumentFlags; import nu.marginalia.model.idx.DocumentFlags;
import nu.marginalia.model.idx.DocumentMetadata; import nu.marginalia.model.idx.DocumentMetadata;
import org.jsoup.nodes.Document; import org.jsoup.nodes.Document;
@@ -137,8 +137,8 @@ public class HtmlDocumentProcessorPlugin extends AbstractDocumentProcessorPlugin
final int length = getLength(doc); final int length = getLength(doc);
final HtmlStandard standard = getHtmlStandard(doc); final DocumentFormat format = getDocumentFormat(doc);
final double quality = documentValuator.getQuality(crawledDocument, standard, doc, length); final double quality = documentValuator.getQuality(crawledDocument, format, doc, length);
if (isDisqualified(documentClass, url, quality, doc.title())) { if (isDisqualified(documentClass, url, quality, doc.title())) {
throw new DisqualifiedException(DisqualificationReason.QUALITY); throw new DisqualifiedException(DisqualificationReason.QUALITY);
@@ -152,7 +152,7 @@ public class HtmlDocumentProcessorPlugin extends AbstractDocumentProcessorPlugin
var ret = new ProcessedDocumentDetails(); var ret = new ProcessedDocumentDetails();
ret.length = length; ret.length = length;
ret.standard = standard; ret.format = format;
ret.title = specialization.getTitle(doc, dld, crawledDocument.url); ret.title = specialization.getTitle(doc, dld, crawledDocument.url);
final Set<HtmlFeature> features = featureExtractor.getFeatures(url, doc, documentHeaders, dld); final Set<HtmlFeature> features = featureExtractor.getFeatures(url, doc, documentHeaders, dld);
@@ -161,7 +161,7 @@ public class HtmlDocumentProcessorPlugin extends AbstractDocumentProcessorPlugin
ret.quality = documentValuator.adjustQuality(quality, features); ret.quality = documentValuator.adjustQuality(quality, features);
ret.hashCode = dld.localitySensitiveHashCode(); ret.hashCode = dld.localitySensitiveHashCode();
PubDate pubDate = pubDateSniffer.getPubDate(documentHeaders, url, doc, standard, true); PubDate pubDate = pubDateSniffer.getPubDate(documentHeaders, url, doc, format, true);
EnumSet<DocumentFlags> documentFlags = documentFlags(features, generatorParts.type()); EnumSet<DocumentFlags> documentFlags = documentFlags(features, generatorParts.type());
@@ -180,7 +180,7 @@ public class HtmlDocumentProcessorPlugin extends AbstractDocumentProcessorPlugin
.addPubDate(pubDate) .addPubDate(pubDate)
.addUrl(url) .addUrl(url)
.addFeatures(features) .addFeatures(features)
.addFormat(standard) .addFormat(format)
.addGenerator(generatorParts.keywords()) .addGenerator(generatorParts.keywords())
.build(); .build();
@@ -316,12 +316,12 @@ public class HtmlDocumentProcessorPlugin extends AbstractDocumentProcessorPlugin
return linkTerms; return linkTerms;
} }
private HtmlStandard getHtmlStandard(Document doc) { private DocumentFormat getDocumentFormat(Document doc) {
HtmlStandard htmlStandard = HtmlStandardExtractor.parseDocType(doc.documentType()); DocumentFormat format = HtmlStandardExtractor.parseDocType(doc.documentType());
if (HtmlStandard.UNKNOWN.equals(htmlStandard)) { if (DocumentFormat.UNKNOWN.equals(format)) {
return HtmlStandardExtractor.sniffHtmlStandard(doc); return HtmlStandardExtractor.sniffHtmlStandard(doc);
} }
return htmlStandard; return format;
} }
private int getLength(Document doc) { private int getLength(Document doc) {

View File

@@ -0,0 +1,286 @@
package nu.marginalia.converting.processor.plugin;
import com.google.inject.Inject;
import com.google.inject.name.Named;
import nu.marginalia.converting.model.DisqualifiedException;
import nu.marginalia.converting.model.ProcessedDocumentDetails;
import nu.marginalia.converting.processor.DocumentClass;
import nu.marginalia.converting.processor.logic.DocumentLengthLogic;
import nu.marginalia.converting.processor.plugin.specialization.DefaultSpecialization;
import nu.marginalia.keyword.DocumentKeywordExtractor;
import nu.marginalia.keyword.LinkTexts;
import nu.marginalia.keyword.model.DocumentKeywordsBuilder;
import nu.marginalia.language.filter.LanguageFilter;
import nu.marginalia.language.model.DocumentLanguageData;
import nu.marginalia.language.sentence.ThreadLocalSentenceExtractorProvider;
import nu.marginalia.model.DocumentFormat;
import nu.marginalia.model.EdgeUrl;
import nu.marginalia.model.crawl.HtmlFeature;
import nu.marginalia.model.crawl.PubDate;
import nu.marginalia.model.crawldata.CrawledDocument;
import nu.marginalia.model.idx.DocumentFlags;
import nu.marginalia.model.idx.DocumentMetadata;
import org.apache.commons.lang3.StringUtils;
import org.apache.pdfbox.Loader;
import org.apache.pdfbox.text.HeadingAwarePDFTextStripper;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.URISyntaxException;
import java.time.LocalDate;
import java.util.*;
public class PdfDocumentProcessorPlugin extends AbstractDocumentProcessorPlugin {
private final int maxTitleLength;
private final DocumentKeywordExtractor keywordExtractor;
private final ThreadLocalSentenceExtractorProvider sentenceExtractorProvider;
private final DocumentLengthLogic documentLengthLogic;
private final DefaultSpecialization defaultSpecialization;
private static final Logger logger = LoggerFactory.getLogger(PdfDocumentProcessorPlugin.class);
@Inject
public PdfDocumentProcessorPlugin(@Named("max-title-length") Integer maxTitleLength,
LanguageFilter languageFilter,
ThreadLocalSentenceExtractorProvider sentenceExtractorProvider,
DocumentKeywordExtractor keywordExtractor,
DocumentLengthLogic documentLengthLogic,
DefaultSpecialization defaultSpecialization)
{
super(languageFilter);
this.sentenceExtractorProvider = sentenceExtractorProvider;
this.documentLengthLogic = documentLengthLogic;
this.maxTitleLength = maxTitleLength;
this.keywordExtractor = keywordExtractor;
this.defaultSpecialization = defaultSpecialization;
}
@Override
public boolean isApplicable(CrawledDocument doc) {
String contentType = doc.contentType.toLowerCase();
if (contentType.equals("application/pdf"))
return true;
if (contentType.startsWith("application/pdf;")) // charset=blabla
return true;
return false;
}
@Override
public DetailsWithWords createDetails(CrawledDocument crawledDocument,
LinkTexts linkTexts,
DocumentClass documentClass)
throws DisqualifiedException, URISyntaxException, IOException {
String documentBody = crawledDocument.documentBody();
if (languageFilter.isBlockedUnicodeRange(documentBody)) {
throw new DisqualifiedException(DisqualifiedException.DisqualificationReason.LANGUAGE);
}
final EdgeUrl url = new EdgeUrl(crawledDocument.url);
Document doc;
try {
doc = convertPdfToHtml(crawledDocument.documentBodyBytes);
} catch (IOException e) {
logger.error("Failed to convert PDF file {} - {}", url, e.getMessage());
throw new DisqualifiedException(DisqualifiedException.DisqualificationReason.ERROR);
}
DocumentLanguageData dld = sentenceExtractorProvider.get().extractSentences(doc);
checkDocumentLanguage(dld);
documentLengthLogic.validateLength(dld, 1.0);
var ret = new ProcessedDocumentDetails();
ret.length = documentBody.length();
ret.format = DocumentFormat.PDF;
ret.title = StringUtils.truncate(defaultSpecialization.getTitle(doc, dld, url.toString()), maxTitleLength);
ret.quality = -5;
ret.features = Set.of(HtmlFeature.PDF);
ret.description = getDescription(doc);
ret.hashCode = dld.localitySensitiveHashCode();
final PubDate pubDate = new PubDate(LocalDate.ofYearDay(1993, 1));
EnumSet<DocumentFlags> documentFlags = EnumSet.of(DocumentFlags.PdfFile);
ret.metadata = new DocumentMetadata(
documentLengthLogic.getEncodedAverageLength(dld),
pubDate.yearByte(),
(int) -ret.quality,
documentFlags);
DocumentKeywordsBuilder words = keywordExtractor.extractKeywords(dld, linkTexts, url);
var tagWords = new MetaTagsBuilder()
.addPubDate(pubDate)
.addUrl(url)
.addFeatures(ret.features)
.addFormat(ret.format)
.build();
words.addAllSyntheticTerms(tagWords);
if (pubDate.hasYear()) {
ret.pubYear = pubDate.year();
}
/* These are assumed to be populated */
ret.linksInternal = new ArrayList<>();
ret.linksExternal = new ArrayList<>();
return new DetailsWithWords(ret, words);
}
private String getDescription(Document doc) {
int cnt = 0;
boolean useNext = false;
for (var ptag : doc.getElementsByTag("p")) {
String text = ptag.text();
// Many academic documents have an abstract at the start of the document,
// which makes a nice summary. Though they tend to bleed into the text,
// so we check for the word "Abstract" at the start of the paragraph.
if (text.startsWith("Abstract ")) {
return StringUtils.abbreviate(text.substring("Abstract ".length()), "...", 255);
}
else if (text.equals("Abstract")) {
useNext = true;
}
else if (useNext) {
return StringUtils.abbreviate(text, "...", 255);
}
if (++cnt > 15) { // Don't scan the entire document
break;
}
}
// Fall back to the default specialization
return defaultSpecialization.getSummary(doc, Set.of());
}
/** Convert the provided PDF bytes into a HTML rendering that can be fed
* to the HTML processor.
*/
Document convertPdfToHtml(byte[] pdfBytes) throws IOException {
try (var doc = Loader.loadPDF(pdfBytes)) {
String docMetaTitle = Objects.requireNonNullElse(doc.getDocumentInformation().getTitle(), "");
var stripper = new HeadingAwarePDFTextStripper();
stripper.setStartPage(1);
stripper.setSortByPosition(true);
stripper.setWordSeparator(" ");
// Increase the tolerance for line spacing to deal better with paragraphs.
stripper.setDropThreshold(5f);
stripper.setPageStart("<div>");
stripper.setParagraphStart("<p>");
stripper.setParagraphEnd("</p>\n");
stripper.setPageEnd("</div>\n");
stripper.setHeadingStart("<h1>");
stripper.setHeadingEnd("</h1>\n");
stripper.setLineSeparator("\n");
String text = stripper.getText(doc);
StringBuilder htmlBuilder = new StringBuilder(text.length() + 1024);
htmlBuilder.append("<html><body>")
.append(text)
.append("</body></html>");
var parsed = Jsoup.parse(htmlBuilder.toString());
repairDOM(parsed);
for (var heading : parsed.getElementsByTag("h1")) {
String headingText = heading.text();
if (headingText.length() > 2) {
parsed.title(headingText);
break;
}
}
if (parsed.title().isEmpty()) {
// Prefer setting the title to the first paragraph in the
// document, as this is almost always correct. Otherwise,
// we fall back on the metadata title, which is almost always
// useless
var firstP = parsed.getElementsByTag("p").first();
if (firstP != null) parsed.title(firstP.text());
else parsed.title(docMetaTitle);
}
return parsed;
}
}
/** Repair the DOM to remove some common issues with PDF conversion,
* including empty paragraphs, and multiline headers that are split into multiple
* conescutive h1 tags.
*/
private void repairDOM(Document parsed) {
// <p><h1>...</h1></p> -> <h1>...</h1>
parsed.getElementsByTag("h1").forEach(h1 -> {
var parent = h1.parent();
if (parent == null || !"p".equals(parent.tagName())) {
return;
}
if (parent.childrenSize() == 1) {
parent.replaceWith(h1);
}
});
// Remove empty <p> tags
parsed.getElementsByTag("p").forEach(p -> {
if (p.childrenSize() == 0 && !p.hasText()) {
p.remove();
}
});
// <h1>...</h1><h1>...</h1> -> <h1>...</h1>
parsed.getElementsByTag("h1").forEach(h1 -> {
var nextSibling = h1.nextElementSibling();
if (nextSibling == null || !"h1".equals(nextSibling.tagName())) {
return; // Short-circuit to avoid unnecessary work
}
StringJoiner joiner = new StringJoiner(" ");
joiner.add(h1.text());
for (var sibling : h1.nextElementSiblings()) {
if (!"h1".equals(sibling.tagName()))
break;
joiner.add(sibling.text());
sibling.remove();
}
h1.text(joiner.toString());
});
}
}

View File

@@ -13,10 +13,10 @@ import nu.marginalia.keyword.LinkTexts;
import nu.marginalia.keyword.model.DocumentKeywordsBuilder; import nu.marginalia.keyword.model.DocumentKeywordsBuilder;
import nu.marginalia.language.filter.LanguageFilter; import nu.marginalia.language.filter.LanguageFilter;
import nu.marginalia.language.sentence.ThreadLocalSentenceExtractorProvider; import nu.marginalia.language.sentence.ThreadLocalSentenceExtractorProvider;
import nu.marginalia.model.DocumentFormat;
import nu.marginalia.model.EdgeUrl; import nu.marginalia.model.EdgeUrl;
import nu.marginalia.model.crawl.PubDate; import nu.marginalia.model.crawl.PubDate;
import nu.marginalia.model.crawldata.CrawledDocument; import nu.marginalia.model.crawldata.CrawledDocument;
import nu.marginalia.model.html.HtmlStandard;
import nu.marginalia.model.idx.DocumentFlags; import nu.marginalia.model.idx.DocumentFlags;
import nu.marginalia.model.idx.DocumentMetadata; import nu.marginalia.model.idx.DocumentMetadata;
import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.StringUtils;
@@ -91,7 +91,7 @@ public class PlainTextDocumentProcessorPlugin extends AbstractDocumentProcessorP
ret.length = documentBody.length(); ret.length = documentBody.length();
ret.standard = HtmlStandard.PLAIN; ret.format = DocumentFormat.PLAIN;
ret.title = StringUtils.truncate(plainTextLogic.getTitle(url, firstFewLines), maxTitleLength); ret.title = StringUtils.truncate(plainTextLogic.getTitle(url, firstFewLines), maxTitleLength);
ret.quality = -1; ret.quality = -1;
@@ -113,7 +113,7 @@ public class PlainTextDocumentProcessorPlugin extends AbstractDocumentProcessorP
.addPubDate(pubDate) .addPubDate(pubDate)
.addUrl(url) .addUrl(url)
.addFeatures(ret.features) .addFeatures(ret.features)
.addFormat(ret.standard) .addFormat(ret.format)
.build(); .build();
words.addAllSyntheticTerms(tagWords); words.addAllSyntheticTerms(tagWords);

View File

@@ -1,12 +1,13 @@
package nu.marginalia.converting.processor.pubdate; package nu.marginalia.converting.processor.pubdate;
import nu.marginalia.model.html.HtmlStandard; import nu.marginalia.model.DocumentFormat;
public class PubDateFromHtmlStandard { public class PubDateFromHtmlStandard {
/** Used to bias pub date heuristics */ /** Used to bias pub date heuristics */
public static int blindGuess(HtmlStandard standard) { public static int blindGuess(DocumentFormat format) {
return switch (standard) { return switch (format) {
case PLAIN -> 1993; case PLAIN -> 1993;
case PDF -> 2010;
case HTML123 -> 1997; case HTML123 -> 1997;
case HTML4, XHTML -> 2006; case HTML4, XHTML -> 2006;
case HTML5 -> 2018; case HTML5 -> 2018;
@@ -21,8 +22,8 @@ public class PubDateFromHtmlStandard {
* Discovering publication year involves a lot of guesswork, this helps * Discovering publication year involves a lot of guesswork, this helps
* keep the guesses relatively sane. * keep the guesses relatively sane.
*/ */
public static boolean isGuessPlausible(HtmlStandard standard, int year) { public static boolean isGuessPlausible(DocumentFormat format, int year) {
switch (standard) { switch (format) {
case HTML123: case HTML123:
return year <= 2000; return year <= 2000;
case XHTML: case XHTML:

View File

@@ -1,14 +1,14 @@
package nu.marginalia.converting.processor.pubdate; package nu.marginalia.converting.processor.pubdate;
import nu.marginalia.converting.model.DocumentHeaders; import nu.marginalia.converting.model.DocumentHeaders;
import nu.marginalia.model.DocumentFormat;
import nu.marginalia.model.EdgeUrl; import nu.marginalia.model.EdgeUrl;
import nu.marginalia.model.crawl.PubDate; import nu.marginalia.model.crawl.PubDate;
import nu.marginalia.model.html.HtmlStandard;
import org.jsoup.nodes.Document; import org.jsoup.nodes.Document;
import java.util.Optional; import java.util.Optional;
public interface PubDateHeuristic { public interface PubDateHeuristic {
Optional<PubDate> apply(PubDateEffortLevel effortLevel, DocumentHeaders headers, EdgeUrl url, Document document, HtmlStandard htmlStandard); Optional<PubDate> apply(PubDateEffortLevel effortLevel, DocumentHeaders headers, EdgeUrl url, Document document, DocumentFormat htmlStandard);
} }

View File

@@ -1,7 +1,7 @@
package nu.marginalia.converting.processor.pubdate; package nu.marginalia.converting.processor.pubdate;
import nu.marginalia.model.DocumentFormat;
import nu.marginalia.model.crawl.PubDate; import nu.marginalia.model.crawl.PubDate;
import nu.marginalia.model.html.HtmlStandard;
import java.time.DateTimeException; import java.time.DateTimeException;
import java.time.LocalDate; import java.time.LocalDate;
@@ -26,7 +26,7 @@ public class PubDateParser {
.filter(PubDateParser::validateDate); .filter(PubDateParser::validateDate);
} }
public static Optional<PubDate> attemptParseDate(String date, HtmlStandard standard) { public static Optional<PubDate> attemptParseDate(String date, DocumentFormat standard) {
return Optional.ofNullable(date) return Optional.ofNullable(date)
.filter(str -> str.length() >= 4 && str.length() < 32) .filter(str -> str.length() >= 4 && str.length() < 32)
.flatMap(str -> .flatMap(str ->
@@ -81,7 +81,7 @@ public class PubDateParser {
} }
public static Optional<PubDate> dateFromHighestYearLookingSubstringWithGuess(String maybe, HtmlStandard standard) { public static Optional<PubDate> dateFromHighestYearLookingSubstringWithGuess(String maybe, DocumentFormat standard) {
int guess = PubDateFromHtmlStandard.blindGuess(standard); int guess = PubDateFromHtmlStandard.blindGuess(standard);
var matcher = yearPattern.matcher(maybe); var matcher = yearPattern.matcher(maybe);
@@ -135,7 +135,7 @@ public class PubDateParser {
return (max + min) / 2; return (max + min) / 2;
} }
public static int guessYear(HtmlStandard standard) { public static int guessYear(DocumentFormat standard) {
// Create some jitter to avoid having documents piling up in the same four years // Create some jitter to avoid having documents piling up in the same four years
// as this would make searching in those years disproportionately useless // as this would make searching in those years disproportionately useless

View File

@@ -2,9 +2,9 @@ package nu.marginalia.converting.processor.pubdate;
import nu.marginalia.converting.model.DocumentHeaders; import nu.marginalia.converting.model.DocumentHeaders;
import nu.marginalia.converting.processor.pubdate.heuristic.*; import nu.marginalia.converting.processor.pubdate.heuristic.*;
import nu.marginalia.model.DocumentFormat;
import nu.marginalia.model.EdgeUrl; import nu.marginalia.model.EdgeUrl;
import nu.marginalia.model.crawl.PubDate; import nu.marginalia.model.crawl.PubDate;
import nu.marginalia.model.html.HtmlStandard;
import org.jsoup.nodes.Document; import org.jsoup.nodes.Document;
import java.util.ArrayList; import java.util.ArrayList;
@@ -38,7 +38,7 @@ public class PubDateSniffer {
heuristics.add(new PubDateHeuristicGuessFromHtmlStandard()); heuristics.add(new PubDateHeuristicGuessFromHtmlStandard());
} }
public PubDate getPubDate(DocumentHeaders headers, EdgeUrl url, Document document, HtmlStandard htmlStandard, boolean runExpensive) { public PubDate getPubDate(DocumentHeaders headers, EdgeUrl url, Document document, DocumentFormat htmlStandard, boolean runExpensive) {
final PubDateEffortLevel effortLevel = runExpensive ? PubDateEffortLevel.HIGH : PubDateEffortLevel.LOW; final PubDateEffortLevel effortLevel = runExpensive ? PubDateEffortLevel.HIGH : PubDateEffortLevel.LOW;
for (var heuristic : heuristics) { for (var heuristic : heuristics) {

View File

@@ -4,9 +4,9 @@ import nu.marginalia.converting.model.DocumentHeaders;
import nu.marginalia.converting.processor.pubdate.PubDateEffortLevel; import nu.marginalia.converting.processor.pubdate.PubDateEffortLevel;
import nu.marginalia.converting.processor.pubdate.PubDateHeuristic; import nu.marginalia.converting.processor.pubdate.PubDateHeuristic;
import nu.marginalia.converting.processor.pubdate.PubDateParser; import nu.marginalia.converting.processor.pubdate.PubDateParser;
import nu.marginalia.model.DocumentFormat;
import nu.marginalia.model.EdgeUrl; import nu.marginalia.model.EdgeUrl;
import nu.marginalia.model.crawl.PubDate; import nu.marginalia.model.crawl.PubDate;
import nu.marginalia.model.html.HtmlStandard;
import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.NotNull;
import org.jsoup.nodes.Document; import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element; import org.jsoup.nodes.Element;
@@ -19,7 +19,7 @@ import java.util.Optional;
public class PubDateHeuristicDOMParsingPass1 implements PubDateHeuristic { public class PubDateHeuristicDOMParsingPass1 implements PubDateHeuristic {
@Override @Override
public Optional<PubDate> apply(PubDateEffortLevel effortLevel, DocumentHeaders headers, EdgeUrl url, Document document, HtmlStandard htmlStandard) { public Optional<PubDate> apply(PubDateEffortLevel effortLevel, DocumentHeaders headers, EdgeUrl url, Document document, DocumentFormat htmlStandard) {
if (effortLevel == PubDateEffortLevel.LOW) if (effortLevel == PubDateEffortLevel.LOW)
return Optional.empty(); return Optional.empty();
@@ -33,9 +33,9 @@ public class PubDateHeuristicDOMParsingPass1 implements PubDateHeuristic {
private static class DateExtractingNodeVisitorPass implements NodeFilter { private static class DateExtractingNodeVisitorPass implements NodeFilter {
public PubDate pubDate; public PubDate pubDate;
private final HtmlStandard htmlStandard; private final DocumentFormat htmlStandard;
private DateExtractingNodeVisitorPass(HtmlStandard htmlStandard) { private DateExtractingNodeVisitorPass(DocumentFormat htmlStandard) {
this.htmlStandard = htmlStandard; this.htmlStandard = htmlStandard;
} }
@@ -135,7 +135,7 @@ public class PubDateHeuristicDOMParsingPass1 implements PubDateHeuristic {
} }
private void parse(String text) { private void parse(String text) {
if (htmlStandard == HtmlStandard.UNKNOWN) { if (htmlStandard == DocumentFormat.UNKNOWN) {
PubDateParser PubDateParser
.dateFromHighestYearLookingSubstring(text) .dateFromHighestYearLookingSubstring(text)
.ifPresent(this::setPubDate); .ifPresent(this::setPubDate);

View File

@@ -5,9 +5,9 @@ import nu.marginalia.converting.processor.pubdate.PubDateEffortLevel;
import nu.marginalia.converting.processor.pubdate.PubDateFromHtmlStandard; import nu.marginalia.converting.processor.pubdate.PubDateFromHtmlStandard;
import nu.marginalia.converting.processor.pubdate.PubDateHeuristic; import nu.marginalia.converting.processor.pubdate.PubDateHeuristic;
import nu.marginalia.converting.processor.pubdate.PubDateParser; import nu.marginalia.converting.processor.pubdate.PubDateParser;
import nu.marginalia.model.DocumentFormat;
import nu.marginalia.model.EdgeUrl; import nu.marginalia.model.EdgeUrl;
import nu.marginalia.model.crawl.PubDate; import nu.marginalia.model.crawl.PubDate;
import nu.marginalia.model.html.HtmlStandard;
import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.NotNull;
import org.jsoup.nodes.Document; import org.jsoup.nodes.Document;
import org.jsoup.nodes.Node; import org.jsoup.nodes.Node;
@@ -19,7 +19,7 @@ import java.util.Optional;
public class PubDateHeuristicDOMParsingPass2 implements PubDateHeuristic { public class PubDateHeuristicDOMParsingPass2 implements PubDateHeuristic {
@Override @Override
public Optional<PubDate> apply(PubDateEffortLevel effortLevel, DocumentHeaders headers, EdgeUrl url, Document document, HtmlStandard htmlStandard) { public Optional<PubDate> apply(PubDateEffortLevel effortLevel, DocumentHeaders headers, EdgeUrl url, Document document, DocumentFormat htmlStandard) {
if (effortLevel == PubDateEffortLevel.LOW) if (effortLevel == PubDateEffortLevel.LOW)
return Optional.empty(); return Optional.empty();
@@ -33,9 +33,9 @@ public class PubDateHeuristicDOMParsingPass2 implements PubDateHeuristic {
private static class DateExtractingNodeVisitor implements NodeFilter { private static class DateExtractingNodeVisitor implements NodeFilter {
public PubDate pubDate; public PubDate pubDate;
private final HtmlStandard htmlStandard; private final DocumentFormat htmlStandard;
private DateExtractingNodeVisitor(HtmlStandard htmlStandard) { private DateExtractingNodeVisitor(DocumentFormat htmlStandard) {
this.htmlStandard = htmlStandard; this.htmlStandard = htmlStandard;
} }
@@ -73,7 +73,7 @@ public class PubDateHeuristicDOMParsingPass2 implements PubDateHeuristic {
} }
private void parse(String text) { private void parse(String text) {
if (htmlStandard == HtmlStandard.UNKNOWN) { if (htmlStandard == DocumentFormat.UNKNOWN) {
PubDateParser PubDateParser
.dateFromHighestYearLookingSubstring(text) .dateFromHighestYearLookingSubstring(text)
.ifPresent(this::setPubDate); .ifPresent(this::setPubDate);

View File

@@ -4,9 +4,9 @@ import nu.marginalia.converting.model.DocumentHeaders;
import nu.marginalia.converting.processor.pubdate.PubDateEffortLevel; import nu.marginalia.converting.processor.pubdate.PubDateEffortLevel;
import nu.marginalia.converting.processor.pubdate.PubDateHeuristic; import nu.marginalia.converting.processor.pubdate.PubDateHeuristic;
import nu.marginalia.converting.processor.pubdate.PubDateParser; import nu.marginalia.converting.processor.pubdate.PubDateParser;
import nu.marginalia.model.DocumentFormat;
import nu.marginalia.model.EdgeUrl; import nu.marginalia.model.EdgeUrl;
import nu.marginalia.model.crawl.PubDate; import nu.marginalia.model.crawl.PubDate;
import nu.marginalia.model.html.HtmlStandard;
import org.jsoup.nodes.Document; import org.jsoup.nodes.Document;
import java.util.Optional; import java.util.Optional;
@@ -14,8 +14,8 @@ import java.util.Optional;
public class PubDateHeuristicGuessFromHtmlStandard implements PubDateHeuristic { public class PubDateHeuristicGuessFromHtmlStandard implements PubDateHeuristic {
@Override @Override
public Optional<PubDate> apply(PubDateEffortLevel effortLevel, DocumentHeaders headers, EdgeUrl url, Document document, HtmlStandard htmlStandard) { public Optional<PubDate> apply(PubDateEffortLevel effortLevel, DocumentHeaders headers, EdgeUrl url, Document document, DocumentFormat htmlStandard) {
if (htmlStandard == HtmlStandard.UNKNOWN) if (htmlStandard == DocumentFormat.UNKNOWN)
return Optional.empty(); return Optional.empty();
return Optional.of(new PubDate(null, PubDateParser.guessYear(htmlStandard))); return Optional.of(new PubDate(null, PubDateParser.guessYear(htmlStandard)));

View File

@@ -4,9 +4,9 @@ import nu.marginalia.converting.model.DocumentHeaders;
import nu.marginalia.converting.processor.pubdate.PubDateEffortLevel; import nu.marginalia.converting.processor.pubdate.PubDateEffortLevel;
import nu.marginalia.converting.processor.pubdate.PubDateHeuristic; import nu.marginalia.converting.processor.pubdate.PubDateHeuristic;
import nu.marginalia.converting.processor.pubdate.PubDateParser; import nu.marginalia.converting.processor.pubdate.PubDateParser;
import nu.marginalia.model.DocumentFormat;
import nu.marginalia.model.EdgeUrl; import nu.marginalia.model.EdgeUrl;
import nu.marginalia.model.crawl.PubDate; import nu.marginalia.model.crawl.PubDate;
import nu.marginalia.model.html.HtmlStandard;
import org.jsoup.nodes.Document; import org.jsoup.nodes.Document;
import java.util.Optional; import java.util.Optional;
@@ -14,7 +14,7 @@ import java.util.Optional;
public class PubDateHeuristicHtml5AnyTimeTag implements PubDateHeuristic { public class PubDateHeuristicHtml5AnyTimeTag implements PubDateHeuristic {
@Override @Override
public Optional<PubDate> apply(PubDateEffortLevel effortLevel, DocumentHeaders headers, EdgeUrl url, Document document, HtmlStandard htmlStandard) { public Optional<PubDate> apply(PubDateEffortLevel effortLevel, DocumentHeaders headers, EdgeUrl url, Document document, DocumentFormat htmlStandard) {
// HTML5, alternative approach // HTML5, alternative approach
for (var tag : document.select("time")) { for (var tag : document.select("time")) {
var maybeDate = PubDateParser.attemptParseDate(tag.attr("datetime")); var maybeDate = PubDateParser.attemptParseDate(tag.attr("datetime"));

View File

@@ -4,9 +4,9 @@ import nu.marginalia.converting.model.DocumentHeaders;
import nu.marginalia.converting.processor.pubdate.PubDateEffortLevel; import nu.marginalia.converting.processor.pubdate.PubDateEffortLevel;
import nu.marginalia.converting.processor.pubdate.PubDateHeuristic; import nu.marginalia.converting.processor.pubdate.PubDateHeuristic;
import nu.marginalia.converting.processor.pubdate.PubDateParser; import nu.marginalia.converting.processor.pubdate.PubDateParser;
import nu.marginalia.model.DocumentFormat;
import nu.marginalia.model.EdgeUrl; import nu.marginalia.model.EdgeUrl;
import nu.marginalia.model.crawl.PubDate; import nu.marginalia.model.crawl.PubDate;
import nu.marginalia.model.html.HtmlStandard;
import org.jsoup.nodes.Document; import org.jsoup.nodes.Document;
import java.util.Optional; import java.util.Optional;
@@ -14,7 +14,7 @@ import java.util.Optional;
public class PubDateHeuristicHtml5ArticleDateTag implements PubDateHeuristic { public class PubDateHeuristicHtml5ArticleDateTag implements PubDateHeuristic {
@Override @Override
public Optional<PubDate> apply(PubDateEffortLevel effortLevel, DocumentHeaders headers, EdgeUrl url, Document document, HtmlStandard htmlStandard) { public Optional<PubDate> apply(PubDateEffortLevel effortLevel, DocumentHeaders headers, EdgeUrl url, Document document, DocumentFormat htmlStandard) {
// HTML5 // HTML5
for (var tag : document.select("time[pubdate=\"pubdate\"]")) { for (var tag : document.select("time[pubdate=\"pubdate\"]")) {
var maybeDate = PubDateParser.attemptParseDate(tag.attr("datetime")); var maybeDate = PubDateParser.attemptParseDate(tag.attr("datetime"));

View File

@@ -4,9 +4,9 @@ import nu.marginalia.converting.model.DocumentHeaders;
import nu.marginalia.converting.processor.pubdate.PubDateEffortLevel; import nu.marginalia.converting.processor.pubdate.PubDateEffortLevel;
import nu.marginalia.converting.processor.pubdate.PubDateHeuristic; import nu.marginalia.converting.processor.pubdate.PubDateHeuristic;
import nu.marginalia.converting.processor.pubdate.PubDateParser; import nu.marginalia.converting.processor.pubdate.PubDateParser;
import nu.marginalia.model.DocumentFormat;
import nu.marginalia.model.EdgeUrl; import nu.marginalia.model.EdgeUrl;
import nu.marginalia.model.crawl.PubDate; import nu.marginalia.model.crawl.PubDate;
import nu.marginalia.model.html.HtmlStandard;
import org.jsoup.nodes.Document; import org.jsoup.nodes.Document;
import java.util.Optional; import java.util.Optional;
@@ -14,7 +14,7 @@ import java.util.Optional;
public class PubDateHeuristicHtml5ItempropDateTag implements PubDateHeuristic { public class PubDateHeuristicHtml5ItempropDateTag implements PubDateHeuristic {
@Override @Override
public Optional<PubDate> apply(PubDateEffortLevel effortLevel, DocumentHeaders headers, EdgeUrl url, Document document, HtmlStandard htmlStandard) { public Optional<PubDate> apply(PubDateEffortLevel effortLevel, DocumentHeaders headers, EdgeUrl url, Document document, DocumentFormat htmlStandard) {
for (var tag : document.select("time[itemprop=\"datePublished\"]")) { for (var tag : document.select("time[itemprop=\"datePublished\"]")) {
var maybeDate = PubDateParser.attemptParseDate(tag.attr("content")); var maybeDate = PubDateParser.attemptParseDate(tag.attr("content"));
if (maybeDate.isPresent()) { if (maybeDate.isPresent()) {

View File

@@ -8,9 +8,9 @@ import nu.marginalia.converting.model.DocumentHeaders;
import nu.marginalia.converting.processor.pubdate.PubDateEffortLevel; import nu.marginalia.converting.processor.pubdate.PubDateEffortLevel;
import nu.marginalia.converting.processor.pubdate.PubDateHeuristic; import nu.marginalia.converting.processor.pubdate.PubDateHeuristic;
import nu.marginalia.converting.processor.pubdate.PubDateParser; import nu.marginalia.converting.processor.pubdate.PubDateParser;
import nu.marginalia.model.DocumentFormat;
import nu.marginalia.model.EdgeUrl; import nu.marginalia.model.EdgeUrl;
import nu.marginalia.model.crawl.PubDate; import nu.marginalia.model.crawl.PubDate;
import nu.marginalia.model.html.HtmlStandard;
import org.jsoup.nodes.Document; import org.jsoup.nodes.Document;
import java.util.Collections; import java.util.Collections;
@@ -21,7 +21,7 @@ import java.util.Optional;
public class PubDateHeuristicJSONLD implements PubDateHeuristic { public class PubDateHeuristicJSONLD implements PubDateHeuristic {
@Override @Override
public Optional<PubDate> apply(PubDateEffortLevel effortLevel, DocumentHeaders headers, EdgeUrl url, Document document, HtmlStandard htmlStandard) { public Optional<PubDate> apply(PubDateEffortLevel effortLevel, DocumentHeaders headers, EdgeUrl url, Document document, DocumentFormat htmlStandard) {
for (var tag : document.select("script[type=\"application/ld+json\"]")) { for (var tag : document.select("script[type=\"application/ld+json\"]")) {
var maybeDate = parseLdJson(tag.data()) var maybeDate = parseLdJson(tag.data())
.flatMap(PubDateParser::attemptParseDate); .flatMap(PubDateParser::attemptParseDate);

View File

@@ -4,9 +4,9 @@ import nu.marginalia.converting.model.DocumentHeaders;
import nu.marginalia.converting.processor.pubdate.PubDateEffortLevel; import nu.marginalia.converting.processor.pubdate.PubDateEffortLevel;
import nu.marginalia.converting.processor.pubdate.PubDateHeuristic; import nu.marginalia.converting.processor.pubdate.PubDateHeuristic;
import nu.marginalia.converting.processor.pubdate.PubDateParser; import nu.marginalia.converting.processor.pubdate.PubDateParser;
import nu.marginalia.model.DocumentFormat;
import nu.marginalia.model.EdgeUrl; import nu.marginalia.model.EdgeUrl;
import nu.marginalia.model.crawl.PubDate; import nu.marginalia.model.crawl.PubDate;
import nu.marginalia.model.html.HtmlStandard;
import org.jsoup.nodes.Document; import org.jsoup.nodes.Document;
import java.util.List; import java.util.List;
@@ -15,7 +15,7 @@ import java.util.Optional;
public class PubDateHeuristicLastModified implements PubDateHeuristic { public class PubDateHeuristicLastModified implements PubDateHeuristic {
@Override @Override
public Optional<PubDate> apply(PubDateEffortLevel effortLevel, DocumentHeaders headers, EdgeUrl url, Document document, HtmlStandard htmlStandard) { public Optional<PubDate> apply(PubDateEffortLevel effortLevel, DocumentHeaders headers, EdgeUrl url, Document document, DocumentFormat htmlStandard) {
List<String> lastModified = headers.get("last-modified"); List<String> lastModified = headers.get("last-modified");
if (lastModified.isEmpty()) if (lastModified.isEmpty())
return Optional.empty(); return Optional.empty();

View File

@@ -4,9 +4,9 @@ import nu.marginalia.converting.model.DocumentHeaders;
import nu.marginalia.converting.processor.pubdate.PubDateEffortLevel; import nu.marginalia.converting.processor.pubdate.PubDateEffortLevel;
import nu.marginalia.converting.processor.pubdate.PubDateHeuristic; import nu.marginalia.converting.processor.pubdate.PubDateHeuristic;
import nu.marginalia.converting.processor.pubdate.PubDateParser; import nu.marginalia.converting.processor.pubdate.PubDateParser;
import nu.marginalia.model.DocumentFormat;
import nu.marginalia.model.EdgeUrl; import nu.marginalia.model.EdgeUrl;
import nu.marginalia.model.crawl.PubDate; import nu.marginalia.model.crawl.PubDate;
import nu.marginalia.model.html.HtmlStandard;
import org.jsoup.nodes.Document; import org.jsoup.nodes.Document;
import java.util.Optional; import java.util.Optional;
@@ -14,7 +14,7 @@ import java.util.Optional;
public class PubDateHeuristicMicrodata implements PubDateHeuristic { public class PubDateHeuristicMicrodata implements PubDateHeuristic {
@Override @Override
public Optional<PubDate> apply(PubDateEffortLevel effortLevel, DocumentHeaders headers, EdgeUrl url, Document document, HtmlStandard htmlStandard) { public Optional<PubDate> apply(PubDateEffortLevel effortLevel, DocumentHeaders headers, EdgeUrl url, Document document, DocumentFormat htmlStandard) {
for (var tag : document.select("meta[itemprop=\"datePublished\"]")) { for (var tag : document.select("meta[itemprop=\"datePublished\"]")) {
var maybeDate = PubDateParser.attemptParseDate(tag.attr("content")); var maybeDate = PubDateParser.attemptParseDate(tag.attr("content"));

View File

@@ -4,9 +4,9 @@ import nu.marginalia.converting.model.DocumentHeaders;
import nu.marginalia.converting.processor.pubdate.PubDateEffortLevel; import nu.marginalia.converting.processor.pubdate.PubDateEffortLevel;
import nu.marginalia.converting.processor.pubdate.PubDateHeuristic; import nu.marginalia.converting.processor.pubdate.PubDateHeuristic;
import nu.marginalia.converting.processor.pubdate.PubDateParser; import nu.marginalia.converting.processor.pubdate.PubDateParser;
import nu.marginalia.model.DocumentFormat;
import nu.marginalia.model.EdgeUrl; import nu.marginalia.model.EdgeUrl;
import nu.marginalia.model.crawl.PubDate; import nu.marginalia.model.crawl.PubDate;
import nu.marginalia.model.html.HtmlStandard;
import org.jsoup.nodes.Document; import org.jsoup.nodes.Document;
import java.util.Optional; import java.util.Optional;
@@ -14,7 +14,7 @@ import java.util.Optional;
public class PubDateHeuristicOpenGraph implements PubDateHeuristic { public class PubDateHeuristicOpenGraph implements PubDateHeuristic {
@Override @Override
public Optional<PubDate> apply(PubDateEffortLevel effortLevel, DocumentHeaders headers, EdgeUrl url, Document document, HtmlStandard htmlStandard) { public Optional<PubDate> apply(PubDateEffortLevel effortLevel, DocumentHeaders headers, EdgeUrl url, Document document, DocumentFormat htmlStandard) {
// OG // OG
for (var tag : document.select("meta[property=\"article:published_time\"]")) { for (var tag : document.select("meta[property=\"article:published_time\"]")) {
var maybeDate = PubDateParser.attemptParseDate(tag.attr("content")); var maybeDate = PubDateParser.attemptParseDate(tag.attr("content"));

View File

@@ -4,9 +4,9 @@ import nu.marginalia.converting.model.DocumentHeaders;
import nu.marginalia.converting.processor.pubdate.PubDateEffortLevel; import nu.marginalia.converting.processor.pubdate.PubDateEffortLevel;
import nu.marginalia.converting.processor.pubdate.PubDateHeuristic; import nu.marginalia.converting.processor.pubdate.PubDateHeuristic;
import nu.marginalia.converting.processor.pubdate.PubDateParser; import nu.marginalia.converting.processor.pubdate.PubDateParser;
import nu.marginalia.model.DocumentFormat;
import nu.marginalia.model.EdgeUrl; import nu.marginalia.model.EdgeUrl;
import nu.marginalia.model.crawl.PubDate; import nu.marginalia.model.crawl.PubDate;
import nu.marginalia.model.html.HtmlStandard;
import org.jsoup.nodes.Document; import org.jsoup.nodes.Document;
import java.util.Optional; import java.util.Optional;
@@ -14,7 +14,7 @@ import java.util.Optional;
public class PubDateHeuristicRDFaTag implements PubDateHeuristic { public class PubDateHeuristicRDFaTag implements PubDateHeuristic {
@Override @Override
public Optional<PubDate> apply(PubDateEffortLevel effortLevel, DocumentHeaders headers, EdgeUrl url, Document document, HtmlStandard htmlStandard) { public Optional<PubDate> apply(PubDateEffortLevel effortLevel, DocumentHeaders headers, EdgeUrl url, Document document, DocumentFormat htmlStandard) {
for (var tag : document.select("meta[property=\"datePublished\"]")) { for (var tag : document.select("meta[property=\"datePublished\"]")) {
var maybeDate = PubDateParser.attemptParseDate(tag.attr("content")); var maybeDate = PubDateParser.attemptParseDate(tag.attr("content"));
if (maybeDate.isPresent()) { if (maybeDate.isPresent()) {

View File

@@ -4,9 +4,9 @@ import nu.marginalia.converting.model.DocumentHeaders;
import nu.marginalia.converting.processor.pubdate.PubDateEffortLevel; import nu.marginalia.converting.processor.pubdate.PubDateEffortLevel;
import nu.marginalia.converting.processor.pubdate.PubDateHeuristic; import nu.marginalia.converting.processor.pubdate.PubDateHeuristic;
import nu.marginalia.converting.processor.pubdate.PubDateParser; import nu.marginalia.converting.processor.pubdate.PubDateParser;
import nu.marginalia.model.DocumentFormat;
import nu.marginalia.model.EdgeUrl; import nu.marginalia.model.EdgeUrl;
import nu.marginalia.model.crawl.PubDate; import nu.marginalia.model.crawl.PubDate;
import nu.marginalia.model.html.HtmlStandard;
import org.jsoup.nodes.Document; import org.jsoup.nodes.Document;
import java.util.Optional; import java.util.Optional;
@@ -21,7 +21,7 @@ public class PubDateHeuristicUrlPatternPass1 implements PubDateHeuristic {
private static final int MIN_URL_PATTERN_YEAR = 2000; private static final int MIN_URL_PATTERN_YEAR = 2000;
@Override @Override
public Optional<PubDate> apply(PubDateEffortLevel effortLevel, DocumentHeaders headers, EdgeUrl url, Document document, HtmlStandard htmlStandard) { public Optional<PubDate> apply(PubDateEffortLevel effortLevel, DocumentHeaders headers, EdgeUrl url, Document document, DocumentFormat htmlStandard) {
final String urlString = url.path; final String urlString = url.path;
var matcher = yearUrlPattern.matcher(urlString); var matcher = yearUrlPattern.matcher(urlString);

View File

@@ -4,9 +4,9 @@ import nu.marginalia.converting.model.DocumentHeaders;
import nu.marginalia.converting.processor.pubdate.PubDateEffortLevel; import nu.marginalia.converting.processor.pubdate.PubDateEffortLevel;
import nu.marginalia.converting.processor.pubdate.PubDateHeuristic; import nu.marginalia.converting.processor.pubdate.PubDateHeuristic;
import nu.marginalia.converting.processor.pubdate.PubDateParser; import nu.marginalia.converting.processor.pubdate.PubDateParser;
import nu.marginalia.model.DocumentFormat;
import nu.marginalia.model.EdgeUrl; import nu.marginalia.model.EdgeUrl;
import nu.marginalia.model.crawl.PubDate; import nu.marginalia.model.crawl.PubDate;
import nu.marginalia.model.html.HtmlStandard;
import org.jsoup.nodes.Document; import org.jsoup.nodes.Document;
import java.util.Optional; import java.util.Optional;
@@ -19,7 +19,7 @@ public class PubDateHeuristicUrlPatternPass2 implements PubDateHeuristic {
@Override @Override
public Optional<PubDate> apply(PubDateEffortLevel effortLevel, DocumentHeaders headers, EdgeUrl url, public Optional<PubDate> apply(PubDateEffortLevel effortLevel, DocumentHeaders headers, EdgeUrl url,
Document document, HtmlStandard htmlStandard) { Document document, DocumentFormat htmlStandard) {
final String urlString = url.path; final String urlString = url.path;
var matcher = yearUrlPattern.matcher(urlString); var matcher = yearUrlPattern.matcher(urlString);

View File

@@ -8,12 +8,12 @@ import nu.marginalia.converting.model.ProcessedDocument;
import nu.marginalia.converting.processor.DocumentClass; import nu.marginalia.converting.processor.DocumentClass;
import nu.marginalia.converting.processor.plugin.HtmlDocumentProcessorPlugin; import nu.marginalia.converting.processor.plugin.HtmlDocumentProcessorPlugin;
import nu.marginalia.keyword.LinkTexts; import nu.marginalia.keyword.LinkTexts;
import nu.marginalia.model.DocumentFormat;
import nu.marginalia.model.EdgeUrl; import nu.marginalia.model.EdgeUrl;
import nu.marginalia.model.crawl.HtmlFeature; import nu.marginalia.model.crawl.HtmlFeature;
import nu.marginalia.model.crawl.PubDate; import nu.marginalia.model.crawl.PubDate;
import nu.marginalia.model.crawl.UrlIndexingState; import nu.marginalia.model.crawl.UrlIndexingState;
import nu.marginalia.model.crawldata.CrawledDocument; import nu.marginalia.model.crawldata.CrawledDocument;
import nu.marginalia.model.html.HtmlStandard;
import nu.marginalia.model.idx.DocumentFlags; import nu.marginalia.model.idx.DocumentFlags;
import nu.marginalia.model.idx.DocumentMetadata; import nu.marginalia.model.idx.DocumentMetadata;
import nu.marginalia.model.idx.WordFlags; import nu.marginalia.model.idx.WordFlags;
@@ -53,6 +53,7 @@ public class SideloaderProcessing {
"", "",
body.getBytes(StandardCharsets.UTF_8), body.getBytes(StandardCharsets.UTF_8),
false, false,
-1,
null, null,
null null
); );
@@ -83,7 +84,7 @@ public class SideloaderProcessing {
// that we can't get from the sideloaded data since it's // that we can't get from the sideloaded data since it's
// so stripped down // so stripped down
ret.details.standard = HtmlStandard.HTML5; ret.details.format = DocumentFormat.HTML5;
ret.details.pubYear = pubYear; ret.details.pubYear = pubYear;
ret.details.features.add(HtmlFeature.JS); ret.details.features.add(HtmlFeature.JS);
ret.details.features.add(HtmlFeature.TRACKING); ret.details.features.add(HtmlFeature.TRACKING);

View File

@@ -9,13 +9,13 @@ import nu.marginalia.integration.stackexchange.sqlite.StackExchangePostsDb;
import nu.marginalia.keyword.DocumentKeywordExtractor; import nu.marginalia.keyword.DocumentKeywordExtractor;
import nu.marginalia.keyword.LinkTexts; import nu.marginalia.keyword.LinkTexts;
import nu.marginalia.language.sentence.ThreadLocalSentenceExtractorProvider; import nu.marginalia.language.sentence.ThreadLocalSentenceExtractorProvider;
import nu.marginalia.model.DocumentFormat;
import nu.marginalia.model.EdgeDomain; import nu.marginalia.model.EdgeDomain;
import nu.marginalia.model.EdgeUrl; import nu.marginalia.model.EdgeUrl;
import nu.marginalia.model.crawl.DomainIndexingState; import nu.marginalia.model.crawl.DomainIndexingState;
import nu.marginalia.model.crawl.HtmlFeature; import nu.marginalia.model.crawl.HtmlFeature;
import nu.marginalia.model.crawl.PubDate; import nu.marginalia.model.crawl.PubDate;
import nu.marginalia.model.crawl.UrlIndexingState; import nu.marginalia.model.crawl.UrlIndexingState;
import nu.marginalia.model.html.HtmlStandard;
import nu.marginalia.model.idx.DocumentFlags; import nu.marginalia.model.idx.DocumentFlags;
import nu.marginalia.model.idx.DocumentMetadata; import nu.marginalia.model.idx.DocumentMetadata;
import nu.marginalia.model.idx.WordFlags; import nu.marginalia.model.idx.WordFlags;
@@ -165,7 +165,7 @@ public class StackexchangeSideloader implements SideloadSource {
ret.details.description = StringUtils.truncate(doc.body().text(), 255); ret.details.description = StringUtils.truncate(doc.body().text(), 255);
ret.details.length = 128; ret.details.length = 128;
ret.details.standard = HtmlStandard.HTML5; ret.details.format = DocumentFormat.HTML5;
ret.details.linksExternal = List.of(); ret.details.linksExternal = List.of();
ret.details.linksInternal = List.of(); ret.details.linksInternal = List.of();
ret.state = UrlIndexingState.OK; ret.state = UrlIndexingState.OK;

View File

@@ -124,7 +124,7 @@ public class ConverterBatchWriter implements AutoCloseable, ConverterBatchWriter
document.details.title, document.details.title,
document.details.description, document.details.description,
HtmlFeature.encode(document.details.features), HtmlFeature.encode(document.details.features),
document.details.standard.name(), document.details.format.name(),
document.details.length, document.details.length,
document.details.hashCode, document.details.hashCode,
(float) document.details.quality, (float) document.details.quality,

View File

@@ -6,6 +6,7 @@ import com.google.inject.Injector;
import nu.marginalia.converting.model.ProcessedDocument; import nu.marginalia.converting.model.ProcessedDocument;
import nu.marginalia.converting.processor.DomainProcessor; import nu.marginalia.converting.processor.DomainProcessor;
import nu.marginalia.io.SerializableCrawlDataStream; import nu.marginalia.io.SerializableCrawlDataStream;
import nu.marginalia.model.DocumentFormat;
import nu.marginalia.model.EdgeDomain; import nu.marginalia.model.EdgeDomain;
import nu.marginalia.model.crawl.DomainIndexingState; import nu.marginalia.model.crawl.DomainIndexingState;
import nu.marginalia.model.crawl.PubDate; import nu.marginalia.model.crawl.PubDate;
@@ -13,7 +14,6 @@ import nu.marginalia.model.crawl.UrlIndexingState;
import nu.marginalia.model.crawldata.CrawledDocument; import nu.marginalia.model.crawldata.CrawledDocument;
import nu.marginalia.model.crawldata.CrawledDomain; import nu.marginalia.model.crawldata.CrawledDomain;
import nu.marginalia.model.crawldata.SerializableCrawlData; import nu.marginalia.model.crawldata.SerializableCrawlData;
import nu.marginalia.model.html.HtmlStandard;
import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
@@ -91,7 +91,7 @@ public class ConvertingIntegrationTest {
assertTrue(details.title.length() > 4); assertTrue(details.title.length() > 4);
assertTrue(details.description.length() > 4); assertTrue(details.description.length() > 4);
assertEquals(HtmlStandard.HTML5, details.standard); assertEquals(DocumentFormat.HTML5, details.format);
} }
} }
@@ -125,7 +125,7 @@ public class ConvertingIntegrationTest {
assertTrue(details.metadata.size() > 0); assertTrue(details.metadata.size() > 0);
assertTrue(details.title.length() > 4); assertTrue(details.title.length() > 4);
assertTrue(details.description.length() > 4); assertTrue(details.description.length() > 4);
assertEquals(HtmlStandard.HTML5, details.standard); assertEquals(DocumentFormat.HTML5, details.format);
} }
} }
@@ -148,6 +148,7 @@ public class ConvertingIntegrationTest {
"", "",
readClassPathFile(p.toString()).getBytes(), readClassPathFile(p.toString()).getBytes(),
false, false,
-1,
null, null,
null null
); );

View File

@@ -0,0 +1,95 @@
package nu.marginalia.converting.processor.plugin;
import nu.marginalia.WmsaHome;
import nu.marginalia.converting.processor.DocumentClass;
import nu.marginalia.converting.processor.logic.DocumentLengthLogic;
import nu.marginalia.converting.processor.logic.TitleExtractor;
import nu.marginalia.converting.processor.plugin.specialization.DefaultSpecialization;
import nu.marginalia.converting.processor.summary.SummaryExtractor;
import nu.marginalia.converting.processor.summary.heuristic.*;
import nu.marginalia.keyword.DocumentKeywordExtractor;
import nu.marginalia.keyword.LinkTexts;
import nu.marginalia.language.filter.LanguageFilter;
import nu.marginalia.language.sentence.ThreadLocalSentenceExtractorProvider;
import nu.marginalia.model.crawldata.CrawledDocument;
import nu.marginalia.term_frequency_dict.TermFrequencyDict;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.time.Instant;
@Tag("flaky")
class PdfDocumentProcessorPluginTest {
static PdfDocumentProcessorPlugin plugin;
@BeforeAll
static void setUpBeforeClass() throws Exception {
var lm = WmsaHome.getLanguageModels();
plugin = new PdfDocumentProcessorPlugin(255,
new LanguageFilter(lm),
new ThreadLocalSentenceExtractorProvider(lm),
new DocumentKeywordExtractor(new TermFrequencyDict(lm)),
new DocumentLengthLogic(100),
new DefaultSpecialization(new SummaryExtractor(
255,
new DomFilterHeuristic(255),
new TagDensityHeuristic(255),
new OpenGraphDescriptionHeuristic(),
new MetaDescriptionHeuristic(),
new FallbackHeuristic()
),
new TitleExtractor(255)
));
}
public AbstractDocumentProcessorPlugin.DetailsWithWords testPdfFile(byte[] pdfBytes) throws Exception {
var doc = new CrawledDocument("test", "https://www.example.com/sample.pdf", "application/pdf", Instant.now().toString(), 200, "OK", "OK", "", pdfBytes, false, -1, null, null);
return plugin.createDetails(doc, new LinkTexts(), DocumentClass.NORMAL);
}
public AbstractDocumentProcessorPlugin.DetailsWithWords testPdfFile(Path file) throws Exception {
return testPdfFile(Files.readAllBytes(file));
}
private byte[] downloadPDF(String url) throws IOException, URISyntaxException {
HttpURLConnection conn = (HttpURLConnection) new URI(url).toURL().openConnection();
try {
return conn.getInputStream().readAllBytes();
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
conn.disconnect();
}
}
@Disabled
@Test
void testingTool() throws Exception {
System.out.println(testPdfFile(Path.of("/home/st_work/Work/sample.pdf")).details().title);
System.out.println(testPdfFile(Path.of("/home/st_work/Work/sample2.pdf")).details().title);
System.out.println(testPdfFile(Path.of("/home/st_work/Work/sample3.pdf")).details().title);
System.out.println(testPdfFile(Path.of("/home/st_work/Work/sample4.pdf")).details().title);
System.out.println(testPdfFile(Path.of("/home/st_work/Work/sample5.pdf")).details().title);
System.out.println(testPdfFile(Path.of("/home/st_work/Work/sample6.pdf")).details().title);
}
@Disabled
@Test
void testingTool2() throws Exception {
System.out.println(plugin.convertPdfToHtml(Files.readAllBytes(Path.of("/home/st_work/Work/sample6.pdf"))));
}
@Test
void testMarginaliaSample() throws Exception {
var doc = plugin.convertPdfToHtml(downloadPDF("https://www.marginalia.nu/junk/test.pdf"));
System.out.println(doc.html());
}
}

View File

@@ -3,8 +3,8 @@ package nu.marginalia.converting.processor.pubdate;
import nu.marginalia.WmsaHome; import nu.marginalia.WmsaHome;
import nu.marginalia.converting.model.DocumentHeaders; import nu.marginalia.converting.model.DocumentHeaders;
import nu.marginalia.converting.processor.pubdate.heuristic.PubDateHeuristicDOMParsingPass2; import nu.marginalia.converting.processor.pubdate.heuristic.PubDateHeuristicDOMParsingPass2;
import nu.marginalia.model.DocumentFormat;
import nu.marginalia.model.EdgeUrl; import nu.marginalia.model.EdgeUrl;
import nu.marginalia.model.html.HtmlStandard;
import org.jsoup.Jsoup; import org.jsoup.Jsoup;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
@@ -74,7 +74,7 @@ class PubDateSnifferTest {
<time pubdate="pubdate" datetime="2022-08-24">time</time> <time pubdate="pubdate" datetime="2022-08-24">time</time>
Wow, sure lor 'em boss Wow, sure lor 'em boss
</article> </article>
"""), HtmlStandard.UNKNOWN, true); """), DocumentFormat.UNKNOWN, true);
assertFalse(ret.isEmpty()); assertFalse(ret.isEmpty());
assertEquals("2022-08-24", ret.dateIso8601()); assertEquals("2022-08-24", ret.dateIso8601());
@@ -90,7 +90,7 @@ class PubDateSnifferTest {
<time>2022-08-24</time> <time>2022-08-24</time>
Wow, sure lor 'em boss Wow, sure lor 'em boss
</article> </article>
"""), HtmlStandard.UNKNOWN, true); """), DocumentFormat.UNKNOWN, true);
assertFalse(ret.isEmpty()); assertFalse(ret.isEmpty());
assertEquals("2022-08-24", ret.dateIso8601()); assertEquals("2022-08-24", ret.dateIso8601());
@@ -106,7 +106,7 @@ class PubDateSnifferTest {
<time class="published" datetime="July 13, 2006">July 13, 2006</time> <time class="published" datetime="July 13, 2006">July 13, 2006</time>
Wow, sure lor 'em boss Wow, sure lor 'em boss
</article> </article>
"""), HtmlStandard.UNKNOWN, true); """), DocumentFormat.UNKNOWN, true);
assertFalse(ret.isEmpty()); assertFalse(ret.isEmpty());
assertEquals(2006, ret.year()); assertEquals(2006, ret.year());
@@ -116,14 +116,14 @@ class PubDateSnifferTest {
public void testProblemCases() throws IOException, URISyntaxException { public void testProblemCases() throws IOException, URISyntaxException {
var ret = dateSniffer.getPubDate(new DocumentHeaders(""), var ret = dateSniffer.getPubDate(new DocumentHeaders(""),
new EdgeUrl("https://www.example.com/"), new EdgeUrl("https://www.example.com/"),
Jsoup.parse(Files.readString(WmsaHome.getHomePath().resolve("test-data/The Switch to Linux Begins .html"))), HtmlStandard.HTML5, true); Jsoup.parse(Files.readString(WmsaHome.getHomePath().resolve("test-data/The Switch to Linux Begins .html"))), DocumentFormat.HTML5, true);
assertFalse(ret.isEmpty()); assertFalse(ret.isEmpty());
assertEquals(2006, ret.year()); assertEquals(2006, ret.year());
ret = dateSniffer.getPubDate(new DocumentHeaders(""), ret = dateSniffer.getPubDate(new DocumentHeaders(""),
new EdgeUrl("https://www.example.com/"), new EdgeUrl("https://www.example.com/"),
Jsoup.parse(Files.readString(WmsaHome.getHomePath().resolve("test-data/Black Hat USA 2010 Understanding and Deploying DNSSEC by Paul Wouters and Patrick Nauber.html"))), HtmlStandard.XHTML, true); Jsoup.parse(Files.readString(WmsaHome.getHomePath().resolve("test-data/Black Hat USA 2010 Understanding and Deploying DNSSEC by Paul Wouters and Patrick Nauber.html"))), DocumentFormat.XHTML, true);
assertFalse(ret.isEmpty()); assertFalse(ret.isEmpty());
assertEquals(2010, ret.year()); assertEquals(2010, ret.year());
@@ -146,7 +146,7 @@ class PubDateSnifferTest {
<!doctype html> <!doctype html>
<html> <html>
<meta itemprop="datePublished" content="2022-08-24" /> <meta itemprop="datePublished" content="2022-08-24" />
"""), HtmlStandard.UNKNOWN, true); """), DocumentFormat.UNKNOWN, true);
assertFalse(ret.isEmpty()); assertFalse(ret.isEmpty());
assertEquals("2022-08-24", ret.dateIso8601()); assertEquals("2022-08-24", ret.dateIso8601());
@@ -160,7 +160,7 @@ class PubDateSnifferTest {
<!doctype html> <!doctype html>
<html> <html>
<meta property="datePublished" content="2022-08-24" /> <meta property="datePublished" content="2022-08-24" />
"""), HtmlStandard.UNKNOWN, true); """), DocumentFormat.UNKNOWN, true);
assertFalse(ret.isEmpty()); assertFalse(ret.isEmpty());
assertEquals("2022-08-24", ret.dateIso8601()); assertEquals("2022-08-24", ret.dateIso8601());
@@ -174,7 +174,7 @@ class PubDateSnifferTest {
<!doctype html> <!doctype html>
<html> <html>
<script type="application/ld+json">{"@context":"https:\\/\\/schema.org","@type":"Article","name":"In the Year 2525","url":"https:\\/\\/en.wikipedia.org\\/wiki\\/In_the_Year_2525","sameAs":"http:\\/\\/www.wikidata.org\\/entity\\/Q145269","mainEntity":"http:\\/\\/www.wikidata.org\\/entity\\/Q145269","author":{"@type":"Organization","name":"Contributors to Wikimedia projects"},"publisher":{"@type":"Organization","name":"Wikimedia Foundation, Inc.","logo":{"@type":"ImageObject","url":"https:\\/\\/www.wikimedia.org\\/static\\/images\\/wmf-hor-googpub.png"}},"datePublished":"2004-08-24T14:39:14Z","dateModified":"2022-10-20T11:54:37Z","image":"https:\\/\\/upload.wikimedia.org\\/wikipedia\\/commons\\/4\\/4a\\/In_the_Year_2525_by_Zager_and_Evans_US_vinyl_Side-A_RCA_release.png","headline":"song written and compsoed by Rick Evans, originally recorded by Zager and Evans and released in 1969"}</script><script type="application/ld+json">{"@context":"https:\\/\\/schema.org","@type":"Article","name":"In the Year 2525","url":"https:\\/\\/en.wikipedia.org\\/wiki\\/In_the_Year_2525","sameAs":"http:\\/\\/www.wikidata.org\\/entity\\/Q145269","mainEntity":"http:\\/\\/www.wikidata.org\\/entity\\/Q145269","author":{"@type":"Organization","name":"Contributors to Wikimedia projects"},"publisher":{"@type":"Organization","name":"Wikimedia Foundation, Inc.","logo":{"@type":"ImageObject","url":"https:\\/\\/www.wikimedia.org\\/static\\/images\\/wmf-hor-googpub.png"}},"datePublished":"2004-08-24T14:39:14Z","dateModified":"2022-10-20T11:54:37Z","image":"https:\\/\\/upload.wikimedia.org\\/wikipedia\\/commons\\/4\\/4a\\/In_the_Year_2525_by_Zager_and_Evans_US_vinyl_Side-A_RCA_release.png","headline":"song written and compsoed by Rick Evans, originally recorded by Zager and Evans and released in 1969"}</script> <script type="application/ld+json">{"@context":"https:\\/\\/schema.org","@type":"Article","name":"In the Year 2525","url":"https:\\/\\/en.wikipedia.org\\/wiki\\/In_the_Year_2525","sameAs":"http:\\/\\/www.wikidata.org\\/entity\\/Q145269","mainEntity":"http:\\/\\/www.wikidata.org\\/entity\\/Q145269","author":{"@type":"Organization","name":"Contributors to Wikimedia projects"},"publisher":{"@type":"Organization","name":"Wikimedia Foundation, Inc.","logo":{"@type":"ImageObject","url":"https:\\/\\/www.wikimedia.org\\/static\\/images\\/wmf-hor-googpub.png"}},"datePublished":"2004-08-24T14:39:14Z","dateModified":"2022-10-20T11:54:37Z","image":"https:\\/\\/upload.wikimedia.org\\/wikipedia\\/commons\\/4\\/4a\\/In_the_Year_2525_by_Zager_and_Evans_US_vinyl_Side-A_RCA_release.png","headline":"song written and compsoed by Rick Evans, originally recorded by Zager and Evans and released in 1969"}</script><script type="application/ld+json">{"@context":"https:\\/\\/schema.org","@type":"Article","name":"In the Year 2525","url":"https:\\/\\/en.wikipedia.org\\/wiki\\/In_the_Year_2525","sameAs":"http:\\/\\/www.wikidata.org\\/entity\\/Q145269","mainEntity":"http:\\/\\/www.wikidata.org\\/entity\\/Q145269","author":{"@type":"Organization","name":"Contributors to Wikimedia projects"},"publisher":{"@type":"Organization","name":"Wikimedia Foundation, Inc.","logo":{"@type":"ImageObject","url":"https:\\/\\/www.wikimedia.org\\/static\\/images\\/wmf-hor-googpub.png"}},"datePublished":"2004-08-24T14:39:14Z","dateModified":"2022-10-20T11:54:37Z","image":"https:\\/\\/upload.wikimedia.org\\/wikipedia\\/commons\\/4\\/4a\\/In_the_Year_2525_by_Zager_and_Evans_US_vinyl_Side-A_RCA_release.png","headline":"song written and compsoed by Rick Evans, originally recorded by Zager and Evans and released in 1969"}</script>
"""), HtmlStandard.UNKNOWN, true); """), DocumentFormat.UNKNOWN, true);
assertFalse(ret.isEmpty()); assertFalse(ret.isEmpty());
assertEquals("2004-08-24", ret.dateIso8601()); assertEquals("2004-08-24", ret.dateIso8601());
@@ -188,7 +188,7 @@ class PubDateSnifferTest {
<!doctype html> <!doctype html>
<html> <html>
<script type="application/ld+json" class="aioseop-schema">{"@context":"https://schema.org","@graph":[{"@type":"Organization","@id":"https://socialnomics.net/#organization","url":"https://socialnomics.net/","name":"Socialnomics","sameAs":[]},{"@type":"WebSite","@id":"https://socialnomics.net/#website","url":"https://socialnomics.net/","name":"Socialnomics","publisher":{"@id":"https://socialnomics.net/#organization"}},{"@type":"WebPage","@id":"https://socialnomics.net/2016/12/27/3-reasons-why-you-should-adopt-java-based-technology-for-your-business/#webpage","url":"https://socialnomics.net/2016/12/27/3-reasons-why-you-should-adopt-java-based-technology-for-your-business/","inLanguage":"en-US","name":"3 Reasons Why You Should Adopt Java-based Technology For Your Business","isPartOf":{"@id":"https://socialnomics.net/#website"},"breadcrumb":{"@id":"https://socialnomics.net/2016/12/27/3-reasons-why-you-should-adopt-java-based-technology-for-your-business/#breadcrumblist"},"datePublished":"2016-12-27T21:01:36-06:00","dateModified":"2016-12-22T21:02:32-06:00"},{"@type":"Article","@id":"https://socialnomics.net/2016/12/27/3-reasons-why-you-should-adopt-java-based-technology-for-your-business/#article","isPartOf":{"@id":"https://socialnomics.net/2016/12/27/3-reasons-why-you-should-adopt-java-based-technology-for-your-business/#webpage"},"author":{"@id":"https://socialnomics.net/author/rahis-saifi/#author"},"headline":"3 Reasons Why You Should Adopt Java-based Technology For Your Business","datePublished":"2016-12-27T21:01:36-06:00","dateModified":"2016-12-22T21:02:32-06:00","commentCount":0,"mainEntityOfPage":{"@id":"https://socialnomics.net/2016/12/27/3-reasons-why-you-should-adopt-java-based-technology-for-your-business/#webpage"},"publisher":{"@id":"https://socialnomics.net/#organization"},"articleSection":"Business, business, java, Java Developers, programming languages"},{"@type":"Person","@id":"https://socialnomics.net/author/rahis-saifi/#author","name":"Rahis Saifi","sameAs":["https://www.facebook.com/RahisSaifiOfficial","https://www.twitter.com/57rahis"],"image":{"@type":"ImageObject","@id":"https://socialnomics.net/#personlogo","url":"https://secure.gravatar.com/avatar/e67f630f0b8bc87e59e111d5e955961d?s=96&d=mm&r=g","width":96,"height":96,"caption":"Rahis Saifi"}},{"@type":"BreadcrumbList","@id":"https://socialnomics.net/2016/12/27/3-reasons-why-you-should-adopt-java-based-technology-for-your-business/#breadcrumblist","itemListElement":[{"@type":"ListItem","position":1,"item":{"@type":"WebPage","@id":"https://socialnomics.net/","url":"https://socialnomics.net/","name":"Socialnomics Blog"}},{"@type":"ListItem","position":2,"item":{"@type":"WebPage","@id":"https://socialnomics.net/2016/12/27/3-reasons-why-you-should-adopt-java-based-technology-for-your-business/","url":"https://socialnomics.net/2016/12/27/3-reasons-why-you-should-adopt-java-based-technology-for-your-business/","name":"3 Reasons Why You Should Adopt Java-based Technology For Your Business"}}]}]}</script> <script type="application/ld+json" class="aioseop-schema">{"@context":"https://schema.org","@graph":[{"@type":"Organization","@id":"https://socialnomics.net/#organization","url":"https://socialnomics.net/","name":"Socialnomics","sameAs":[]},{"@type":"WebSite","@id":"https://socialnomics.net/#website","url":"https://socialnomics.net/","name":"Socialnomics","publisher":{"@id":"https://socialnomics.net/#organization"}},{"@type":"WebPage","@id":"https://socialnomics.net/2016/12/27/3-reasons-why-you-should-adopt-java-based-technology-for-your-business/#webpage","url":"https://socialnomics.net/2016/12/27/3-reasons-why-you-should-adopt-java-based-technology-for-your-business/","inLanguage":"en-US","name":"3 Reasons Why You Should Adopt Java-based Technology For Your Business","isPartOf":{"@id":"https://socialnomics.net/#website"},"breadcrumb":{"@id":"https://socialnomics.net/2016/12/27/3-reasons-why-you-should-adopt-java-based-technology-for-your-business/#breadcrumblist"},"datePublished":"2016-12-27T21:01:36-06:00","dateModified":"2016-12-22T21:02:32-06:00"},{"@type":"Article","@id":"https://socialnomics.net/2016/12/27/3-reasons-why-you-should-adopt-java-based-technology-for-your-business/#article","isPartOf":{"@id":"https://socialnomics.net/2016/12/27/3-reasons-why-you-should-adopt-java-based-technology-for-your-business/#webpage"},"author":{"@id":"https://socialnomics.net/author/rahis-saifi/#author"},"headline":"3 Reasons Why You Should Adopt Java-based Technology For Your Business","datePublished":"2016-12-27T21:01:36-06:00","dateModified":"2016-12-22T21:02:32-06:00","commentCount":0,"mainEntityOfPage":{"@id":"https://socialnomics.net/2016/12/27/3-reasons-why-you-should-adopt-java-based-technology-for-your-business/#webpage"},"publisher":{"@id":"https://socialnomics.net/#organization"},"articleSection":"Business, business, java, Java Developers, programming languages"},{"@type":"Person","@id":"https://socialnomics.net/author/rahis-saifi/#author","name":"Rahis Saifi","sameAs":["https://www.facebook.com/RahisSaifiOfficial","https://www.twitter.com/57rahis"],"image":{"@type":"ImageObject","@id":"https://socialnomics.net/#personlogo","url":"https://secure.gravatar.com/avatar/e67f630f0b8bc87e59e111d5e955961d?s=96&d=mm&r=g","width":96,"height":96,"caption":"Rahis Saifi"}},{"@type":"BreadcrumbList","@id":"https://socialnomics.net/2016/12/27/3-reasons-why-you-should-adopt-java-based-technology-for-your-business/#breadcrumblist","itemListElement":[{"@type":"ListItem","position":1,"item":{"@type":"WebPage","@id":"https://socialnomics.net/","url":"https://socialnomics.net/","name":"Socialnomics Blog"}},{"@type":"ListItem","position":2,"item":{"@type":"WebPage","@id":"https://socialnomics.net/2016/12/27/3-reasons-why-you-should-adopt-java-based-technology-for-your-business/","url":"https://socialnomics.net/2016/12/27/3-reasons-why-you-should-adopt-java-based-technology-for-your-business/","name":"3 Reasons Why You Should Adopt Java-based Technology For Your Business"}}]}]}</script>
"""), HtmlStandard.UNKNOWN, true); """), DocumentFormat.UNKNOWN, true);
assertFalse(ret.isEmpty()); assertFalse(ret.isEmpty());
assertEquals("2016-12-27", ret.dateIso8601()); assertEquals("2016-12-27", ret.dateIso8601());
@@ -202,7 +202,7 @@ class PubDateSnifferTest {
<!doctype html> <!doctype html>
<html> <html>
<title>No date in the HTML</title> <title>No date in the HTML</title>
"""), HtmlStandard.UNKNOWN, true); """), DocumentFormat.UNKNOWN, true);
assertFalse(ret.isEmpty()); assertFalse(ret.isEmpty());
assertNull(ret.dateIso8601()); assertNull(ret.dateIso8601());
@@ -217,7 +217,7 @@ class PubDateSnifferTest {
<!doctype html> <!doctype html>
<html> <html>
<title>No date in the HTML</title> <title>No date in the HTML</title>
"""), HtmlStandard.UNKNOWN, true); """), DocumentFormat.UNKNOWN, true);
assertFalse(ret.isEmpty()); assertFalse(ret.isEmpty());
assertEquals("2022-02-03", ret.dateIso8601()); assertEquals("2022-02-03", ret.dateIso8601());
@@ -232,7 +232,7 @@ class PubDateSnifferTest {
<!doctype html> <!doctype html>
<html> <html>
<p>Published 2003, updated 2022</p> <p>Published 2003, updated 2022</p>
"""), HtmlStandard.HTML5, true); """), DocumentFormat.HTML5, true);
assertFalse(ret.isEmpty()); assertFalse(ret.isEmpty());
assertNull(ret.dateIso8601()); assertNull(ret.dateIso8601());
@@ -258,7 +258,7 @@ class PubDateSnifferTest {
<!doctype html> <!doctype html>
<html> <html>
<div style="float: left;">&nbsp;<b>Post subject:</b> Keyboards.</div><div style="float: right;"><span class="postdetails"><b><img src="./styles/subsilver2/imageset/icon_post_target.gif" width="12" height="9" alt="Post" title="Post" /> <a href="./viewtopic.php?p=34580&amp;sid=cf0c13dedebb4fea1f03fa73e510cd9f#p34580">#1</a></b></span>&nbsp;<b>Posted:</b> Sun Oct 03, 2010 5:37 pm&nbsp;</div> <div style="float: left;">&nbsp;<b>Post subject:</b> Keyboards.</div><div style="float: right;"><span class="postdetails"><b><img src="./styles/subsilver2/imageset/icon_post_target.gif" width="12" height="9" alt="Post" title="Post" /> <a href="./viewtopic.php?p=34580&amp;sid=cf0c13dedebb4fea1f03fa73e510cd9f#p34580">#1</a></b></span>&nbsp;<b>Posted:</b> Sun Oct 03, 2010 5:37 pm&nbsp;</div>
"""), HtmlStandard.UNKNOWN, true); """), DocumentFormat.UNKNOWN, true);
assertFalse(ret.isEmpty()); assertFalse(ret.isEmpty());
assertNull(ret.dateIso8601()); assertNull(ret.dateIso8601());

View File

@@ -67,8 +67,6 @@ dependencies {
testImplementation libs.mockito testImplementation libs.mockito
testImplementation libs.wiremock testImplementation libs.wiremock
testImplementation project(':code:processes:test-data') testImplementation project(':code:processes:test-data')
} }

View File

@@ -448,13 +448,7 @@ public class CrawlerMain extends ProcessMainClass {
// We don't have a lock, so we can't run this task // We don't have a lock, so we can't run this task
// we return to avoid blocking the pool for too long // we return to avoid blocking the pool for too long
if (lock.isEmpty()) { if (lock.isEmpty()) {
if (retryQueue.remainingCapacity() > 0) { pendingCrawlTasks.remove(domain);
// Sleep a moment to avoid busy looping via the retry queue
// in the case when few tasks remain and almost all are ineligible for
// immediate restart
Thread.sleep(5);
}
retryQueue.put(this); retryQueue.put(this);
return; return;
} }

View File

@@ -10,6 +10,7 @@ import java.net.http.HttpClient;
import java.net.http.HttpHeaders; import java.net.http.HttpHeaders;
import java.net.http.HttpResponse; import java.net.http.HttpResponse;
import java.nio.charset.StandardCharsets; import java.nio.charset.StandardCharsets;
import java.time.Duration;
import java.util.*; import java.util.*;
import java.util.stream.Collectors; import java.util.stream.Collectors;
@@ -90,8 +91,8 @@ public class WarcProtocolReconstructor {
return "HTTP/" + version + " " + statusCode + " " + statusMessage + "\r\n" + headerString + "\r\n\r\n"; return "HTTP/" + version + " " + statusCode + " " + statusMessage + "\r\n" + headerString + "\r\n\r\n";
} }
static String getResponseHeader(ClassicHttpResponse response, long size) { static String getResponseHeader(ClassicHttpResponse response, Duration responseDuration, long size) {
String headerString = getHeadersAsString(response.getHeaders(), size); String headerString = getHeadersAsString(response.getHeaders(), responseDuration, size);
return response.getVersion().format() + " " + response.getCode() + " " + response.getReasonPhrase() + "\r\n" + headerString + "\r\n\r\n"; return response.getVersion().format() + " " + response.getCode() + " " + response.getReasonPhrase() + "\r\n" + headerString + "\r\n\r\n";
} }
@@ -160,7 +161,7 @@ public class WarcProtocolReconstructor {
static private String getHeadersAsString(Header[] headers, long responseSize) { static private String getHeadersAsString(Header[] headers, Duration responseDuration, long responseSize) {
StringJoiner joiner = new StringJoiner("\r\n"); StringJoiner joiner = new StringJoiner("\r\n");
for (var header : headers) { for (var header : headers) {
@@ -176,6 +177,7 @@ public class WarcProtocolReconstructor {
if (headerCapitalized.equals("Content-Encoding")) if (headerCapitalized.equals("Content-Encoding"))
continue; continue;
// Since we're transparently decoding gzip, we need to update the Content-Length header // Since we're transparently decoding gzip, we need to update the Content-Length header
// to reflect the actual size of the response body. We'll do this at the end. // to reflect the actual size of the response body. We'll do this at the end.
if (headerCapitalized.equals("Content-Length")) if (headerCapitalized.equals("Content-Length"))
@@ -184,6 +186,7 @@ public class WarcProtocolReconstructor {
joiner.add(headerCapitalized + ": " + header.getValue()); joiner.add(headerCapitalized + ": " + header.getValue());
} }
joiner.add("X-Marginalia-Response-Time: " + responseDuration.toMillis());
joiner.add("Content-Length: " + responseSize); joiner.add("Content-Length: " + responseSize);
return joiner.toString(); return joiner.toString();

View File

@@ -93,7 +93,7 @@ public class WarcRecorder implements AutoCloseable {
WarcDigestBuilder responseDigestBuilder = new WarcDigestBuilder(); WarcDigestBuilder responseDigestBuilder = new WarcDigestBuilder();
WarcDigestBuilder payloadDigestBuilder = new WarcDigestBuilder(); WarcDigestBuilder payloadDigestBuilder = new WarcDigestBuilder();
Instant date = Instant.now(); Instant requestDate = Instant.now();
// Not entirely sure why we need to do this, but keeping it due to Chesterton's Fence // Not entirely sure why we need to do this, but keeping it due to Chesterton's Fence
Map<String, List<String>> extraHeaders = new HashMap<>(request.getHeaders().length); Map<String, List<String>> extraHeaders = new HashMap<>(request.getHeaders().length);
@@ -108,6 +108,8 @@ public class WarcRecorder implements AutoCloseable {
try (WarcInputBuffer inputBuffer = WarcInputBuffer.forResponse(response, request, timeout); try (WarcInputBuffer inputBuffer = WarcInputBuffer.forResponse(response, request, timeout);
InputStream inputStream = inputBuffer.read()) { InputStream inputStream = inputBuffer.read()) {
Instant responseDate = Instant.now();
cookies.updateCookieStore(response); cookies.updateCookieStore(response);
// Build and write the request // Build and write the request
@@ -126,7 +128,7 @@ public class WarcRecorder implements AutoCloseable {
WarcRequest warcRequest = new WarcRequest.Builder(requestUri) WarcRequest warcRequest = new WarcRequest.Builder(requestUri)
.blockDigest(requestDigestBuilder.build()) .blockDigest(requestDigestBuilder.build())
.date(date) .date(requestDate)
.body(MediaType.HTTP_REQUEST, httpRequestString) .body(MediaType.HTTP_REQUEST, httpRequestString)
.build(); .build();
@@ -138,7 +140,9 @@ public class WarcRecorder implements AutoCloseable {
response.addHeader("X-Has-Cookies", 1); response.addHeader("X-Has-Cookies", 1);
} }
byte[] responseHeaders = WarcProtocolReconstructor.getResponseHeader(response, inputBuffer.size()).getBytes(StandardCharsets.UTF_8); byte[] responseHeaders = WarcProtocolReconstructor.getResponseHeader(response,
Duration.between(requestDate, responseDate),
inputBuffer.size()).getBytes(StandardCharsets.UTF_8);
ResponseDataBuffer responseDataBuffer = new ResponseDataBuffer(inputBuffer.size() + responseHeaders.length); ResponseDataBuffer responseDataBuffer = new ResponseDataBuffer(inputBuffer.size() + responseHeaders.length);
@@ -169,7 +173,7 @@ public class WarcRecorder implements AutoCloseable {
WarcResponse.Builder responseBuilder = new WarcResponse.Builder(responseUri) WarcResponse.Builder responseBuilder = new WarcResponse.Builder(responseUri)
.blockDigest(responseDigestBuilder.build()) .blockDigest(responseDigestBuilder.build())
.date(date) .date(responseDate)
.concurrentTo(warcRequest.id()) .concurrentTo(warcRequest.id())
.body(MediaType.HTTP_RESPONSE, responseDataBuffer.copyBytes()); .body(MediaType.HTTP_RESPONSE, responseDataBuffer.copyBytes());
@@ -184,7 +188,7 @@ public class WarcRecorder implements AutoCloseable {
warcResponse.http(); // force HTTP header to be parsed before body is consumed so that caller can use it warcResponse.http(); // force HTTP header to be parsed before body is consumed so that caller can use it
writer.write(warcResponse); writer.write(warcResponse);
if (Duration.between(date, Instant.now()).compareTo(Duration.ofSeconds(9)) > 0 if (Duration.between(requestDate, Instant.now()).compareTo(Duration.ofSeconds(9)) > 0
&& inputBuffer.size() < 2048 && inputBuffer.size() < 2048
&& !requestUri.getPath().endsWith("robots.txt")) // don't bail on robots.txt && !requestUri.getPath().endsWith("robots.txt")) // don't bail on robots.txt
{ {
@@ -196,7 +200,7 @@ public class WarcRecorder implements AutoCloseable {
logger.warn("URL {} took too long to fetch ({}s) and was too small for the effort ({}b)", logger.warn("URL {} took too long to fetch ({}s) and was too small for the effort ({}b)",
requestUri, requestUri,
Duration.between(date, Instant.now()).getSeconds(), Duration.between(requestDate, Instant.now()).getSeconds(),
inputBuffer.size() inputBuffer.size()
); );

View File

@@ -74,7 +74,7 @@ public class CrawlerRevisitor {
// If the reference document is empty or the HTTP status is not 200, we'll skip it since it's // If the reference document is empty or the HTTP status is not 200, we'll skip it since it's
// unlikely to produce anything meaningful for us. // unlikely to produce anything meaningful for us.
if (doc.httpStatus != 200) if (doc.httpStatus != 200 && doc.httpStatus != 206)
continue; continue;
if (!doc.hasBody()) if (!doc.hasBody())
continue; continue;

View File

@@ -58,7 +58,7 @@ public record DocumentWithReference(
if (null == doc) if (null == doc)
return ContentTags.empty(); return ContentTags.empty();
if (doc.documentBodyBytes.length == 0 || doc.httpStatus != 200) if (doc.documentBodyBytes.length == 0 || (doc.httpStatus != 200 && doc.httpStatus != 206))
return ContentTags.empty(); return ContentTags.empty();
String lastmod = doc.getLastModified(); String lastmod = doc.getLastModified();

View File

@@ -1,19 +1,23 @@
package nu.marginalia; package nu.marginalia;
import org.apache.commons.lang3.StringUtils;
import java.util.Set; import java.util.Set;
public class ContentTypes { public class ContentTypes {
public static final Set<String> acceptedContentTypes = Set.of("application/xhtml+xml", public static final Set<String> acceptedContentTypes = Set.of("application/xhtml+xml",
"application/xhtml", "application/xhtml",
"text/html", "text/html",
"text/markdown",
"text/x-markdown",
"application/pdf", "application/pdf",
"image/x-icon", "image/x-icon",
"text/plain"); "text/plain");
public static boolean isAccepted(String contentTypeHeader) { public static boolean isAccepted(String contentTypeHeader) {
String lcHeader = contentTypeHeader.toLowerCase(); String lcHeader = StringUtils.substringBefore(contentTypeHeader.toLowerCase(), ';');
for (var type : acceptedContentTypes) { for (var type : acceptedContentTypes) {
if (lcHeader.startsWith(type)) { if (lcHeader.equals(type)) {
return true; return true;
} }
} }
@@ -21,7 +25,7 @@ public class ContentTypes {
} }
public static boolean isBinary(String contentTypeHeader) { public static boolean isBinary(String contentTypeHeader) {
String lcHeader = contentTypeHeader.toLowerCase(); String lcHeader = StringUtils.substringBefore(contentTypeHeader.toLowerCase(), ';');
return lcHeader.startsWith("application/pdf"); return lcHeader.startsWith("application/pdf");
} }

View File

@@ -148,6 +148,7 @@ public class ParquetSerializableCrawlDataStream implements AutoCloseable, Serial
nextRecord.body, nextRecord.body,
// this field isn't actually used, maybe we can skip calculating it? // this field isn't actually used, maybe we can skip calculating it?
nextRecord.cookies, nextRecord.cookies,
-1,
lastModified, lastModified,
etag)); etag));
} }

View File

@@ -166,6 +166,7 @@ public class SlopSerializableCrawlDataStream implements AutoCloseable, Serializa
nextRecord.body(), nextRecord.body(),
// this field isn't actually used, maybe we can skip calculating it? // this field isn't actually used, maybe we can skip calculating it?
nextRecord.cookies(), nextRecord.cookies(),
nextRecord.requestTimeMs(),
null, null,
null)); null));
} }

View File

@@ -23,6 +23,7 @@ public final class CrawledDocument implements SerializableCrawlData {
public String crawlerStatus; public String crawlerStatus;
public String crawlerStatusDesc; public String crawlerStatusDesc;
public int requestTimeMs;
@Nullable @Nullable
public String headers; public String headers;
@@ -82,7 +83,7 @@ public final class CrawledDocument implements SerializableCrawlData {
public String lastModifiedMaybe; public String lastModifiedMaybe;
public String etagMaybe; public String etagMaybe;
public CrawledDocument(String crawlId, String url, String contentType, String timestamp, int httpStatus, String crawlerStatus, String crawlerStatusDesc, @Nullable String headers, byte[] documentBodyBytes, Boolean hasCookies, String lastModifiedMaybe, String etagMaybe) { public CrawledDocument(String crawlId, String url, String contentType, String timestamp, int httpStatus, String crawlerStatus, String crawlerStatusDesc, @Nullable String headers, byte[] documentBodyBytes, Boolean hasCookies, int requestTimeMs, String lastModifiedMaybe, String etagMaybe) {
this.crawlId = crawlId; this.crawlId = crawlId;
this.url = url; this.url = url;
this.contentType = contentType; this.contentType = contentType;
@@ -94,6 +95,7 @@ public final class CrawledDocument implements SerializableCrawlData {
this.documentBodyBytes = Objects.requireNonNullElse(documentBodyBytes, new byte[] {}); this.documentBodyBytes = Objects.requireNonNullElse(documentBodyBytes, new byte[] {});
this.hasCookies = hasCookies; this.hasCookies = hasCookies;
this.lastModifiedMaybe = lastModifiedMaybe; this.lastModifiedMaybe = lastModifiedMaybe;
this.requestTimeMs = requestTimeMs;
this.etagMaybe = etagMaybe; this.etagMaybe = etagMaybe;
} }
@@ -173,6 +175,7 @@ public final class CrawledDocument implements SerializableCrawlData {
private byte[] documentBodyBytes = new byte[0]; private byte[] documentBodyBytes = new byte[0];
private String recrawlState; private String recrawlState;
private Boolean hasCookies; private Boolean hasCookies;
private int requestTimeMs;
private String lastModifiedMaybe; private String lastModifiedMaybe;
private String etagMaybe; private String etagMaybe;
@@ -248,8 +251,13 @@ public final class CrawledDocument implements SerializableCrawlData {
return this; return this;
} }
public CrawledDocumentBuilder requestTimeMs(int requestTimeMs) {
this.requestTimeMs = requestTimeMs;
return this;
}
public CrawledDocument build() { public CrawledDocument build() {
return new CrawledDocument(this.crawlId, this.url, this.contentType, this.timestamp, this.httpStatus, this.crawlerStatus, this.crawlerStatusDesc, this.headers, this.documentBodyBytes, this.hasCookies, this.lastModifiedMaybe, this.etagMaybe); return new CrawledDocument(this.crawlId, this.url, this.contentType, this.timestamp, this.httpStatus, this.crawlerStatus, this.crawlerStatusDesc, this.headers, this.documentBodyBytes, this.hasCookies, this.requestTimeMs, this.lastModifiedMaybe, this.etagMaybe);
} }
public String toString() { public String toString() {

View File

@@ -9,6 +9,7 @@ import nu.marginalia.parquet.crawldata.CrawledDocumentParquetRecord;
import nu.marginalia.parquet.crawldata.CrawledDocumentParquetRecordFileReader; import nu.marginalia.parquet.crawldata.CrawledDocumentParquetRecordFileReader;
import nu.marginalia.slop.column.array.ByteArrayColumn; import nu.marginalia.slop.column.array.ByteArrayColumn;
import nu.marginalia.slop.column.primitive.ByteColumn; import nu.marginalia.slop.column.primitive.ByteColumn;
import nu.marginalia.slop.column.primitive.IntColumn;
import nu.marginalia.slop.column.primitive.LongColumn; import nu.marginalia.slop.column.primitive.LongColumn;
import nu.marginalia.slop.column.primitive.ShortColumn; import nu.marginalia.slop.column.primitive.ShortColumn;
import nu.marginalia.slop.column.string.EnumColumn; import nu.marginalia.slop.column.string.EnumColumn;
@@ -39,6 +40,7 @@ public record SlopCrawlDataRecord(String domain,
long timestamp, long timestamp,
String contentType, String contentType,
byte[] body, byte[] body,
int requestTimeMs,
String headers) String headers)
{ {
private static final EnumColumn domainColumn = new EnumColumn("domain", StandardCharsets.UTF_8, StorageType.ZSTD); private static final EnumColumn domainColumn = new EnumColumn("domain", StandardCharsets.UTF_8, StorageType.ZSTD);
@@ -49,6 +51,7 @@ public record SlopCrawlDataRecord(String domain,
private static final LongColumn timestampColumn = new LongColumn("timestamp"); private static final LongColumn timestampColumn = new LongColumn("timestamp");
private static final EnumColumn contentTypeColumn = new EnumColumn("contentType", StandardCharsets.UTF_8); private static final EnumColumn contentTypeColumn = new EnumColumn("contentType", StandardCharsets.UTF_8);
private static final ByteArrayColumn bodyColumn = new ByteArrayColumn("body", StorageType.ZSTD); private static final ByteArrayColumn bodyColumn = new ByteArrayColumn("body", StorageType.ZSTD);
private static final ShortColumn requestTimeColumn = new ShortColumn("requestTimeMs");
private static final StringColumn headerColumn = new StringColumn("header", StandardCharsets.UTF_8, StorageType.ZSTD); private static final StringColumn headerColumn = new StringColumn("header", StandardCharsets.UTF_8, StorageType.ZSTD);
public SlopCrawlDataRecord(CrawledDocumentParquetRecord parquetRecord) { public SlopCrawlDataRecord(CrawledDocumentParquetRecord parquetRecord) {
@@ -60,6 +63,7 @@ public record SlopCrawlDataRecord(String domain,
parquetRecord.timestamp.toEpochMilli(), parquetRecord.timestamp.toEpochMilli(),
parquetRecord.contentType, parquetRecord.contentType,
parquetRecord.body, parquetRecord.body,
-1,
parquetRecord.headers parquetRecord.headers
); );
} }
@@ -74,6 +78,7 @@ public record SlopCrawlDataRecord(String domain,
date.toEpochMilli(), date.toEpochMilli(),
"x-marginalia/advisory;state=redirect", "x-marginalia/advisory;state=redirect",
new byte[0], new byte[0],
-1,
"" ""
); );
} }
@@ -87,6 +92,7 @@ public record SlopCrawlDataRecord(String domain,
date.toEpochMilli(), date.toEpochMilli(),
"x-marginalia/advisory;state=error", "x-marginalia/advisory;state=error",
errorStatus.getBytes(), errorStatus.getBytes(),
-1,
"" ""
); );
} }
@@ -100,6 +106,7 @@ public record SlopCrawlDataRecord(String domain,
date.toEpochMilli(), date.toEpochMilli(),
errorStatus, errorStatus,
new byte[0], new byte[0],
-1,
"" ""
); );
} }
@@ -158,11 +165,12 @@ public record SlopCrawlDataRecord(String domain,
// and is used to store old responses from previous crawls; in this part of the logic // and is used to store old responses from previous crawls; in this part of the logic
// we treat them the same as a normal response // we treat them the same as a normal response
if (!filterResponse(uaString, response)) { var filterStatus = filterResponse(uaString, response);
if (filterStatus.isRejected()) {
continue; continue;
} }
slopWriter.write(domain, response); slopWriter.write(domain, filterStatus, response);
} else if (record instanceof WarcXEntityRefused refused) { } else if (record instanceof WarcXEntityRefused refused) {
slopWriter.write(domain, refused); slopWriter.write(domain, refused);
} else if (record instanceof Warcinfo warcinfo) { } else if (record instanceof Warcinfo warcinfo) {
@@ -187,25 +195,35 @@ public record SlopCrawlDataRecord(String domain,
} }
} }
sealed interface ResponseFilterResult {
default boolean isRejected() { return false; }
record Accept() implements ResponseFilterResult {}
record AcceptWithContentType(String contentType) implements ResponseFilterResult {}
record AcceptIfPlainText(String contentType) implements ResponseFilterResult {}
record Reject() implements ResponseFilterResult {
@Override
public boolean isRejected() { return true; }
}
}
/** Return true if the WarcResponse should be excluded from conversion */ /** Return true if the WarcResponse should be excluded from conversion */
private static boolean filterResponse(String uaString, WarcResponse response) throws IOException { private static ResponseFilterResult filterResponse(String uaString, WarcResponse response) throws IOException {
// We don't want to store robots.txt files, as they are not // We don't want to store robots.txt files, as they are not
// interesting for the analysis we want to do. This is important // interesting for the analysis we want to do. This is important
// since txt-files in general are interesting, and we don't want to // since txt-files in general are interesting, and we don't want to
// exclude them as a class. // exclude them as a class.
if (response.targetURI().getPath().equals("/robots.txt")) { String uriPath = response.targetURI().getPath();
return false; if (uriPath.equals("/robots.txt")) {
return new ResponseFilterResult.Reject();
} }
var headers = response.http().headers(); var headers = response.http().headers();
var robotsTags = headers.all("X-Robots-Tag"); var robotsTags = headers.all("X-Robots-Tag");
if (!isXRobotsTagsPermitted(robotsTags, uaString)) { if (!isXRobotsTagsPermitted(robotsTags, uaString)) {
return false; return new ResponseFilterResult.Reject();
} }
// Strip out responses with content types we aren't interested in // Strip out responses with content types we aren't interested in
@@ -213,15 +231,29 @@ public record SlopCrawlDataRecord(String domain,
String contentType = headers.first("Content-Type").orElse("text/plain").toLowerCase(); String contentType = headers.first("Content-Type").orElse("text/plain").toLowerCase();
if (!ContentTypes.isAccepted(contentType)) { if (!ContentTypes.isAccepted(contentType)) {
return false; String contentTypeWithoutParams = StringUtils.substringBefore(contentType, ";");
// Some servers don't understand what a markdown file is
if (contentTypeWithoutParams.equals("application/octet-stream")) {
if (uriPath.endsWith(".md")) {
// This is a markdown file, which we want to keep
return new ResponseFilterResult.AcceptIfPlainText("text/markdown");
}
else if (uriPath.endsWith(".pdf")) {
// This is a text file, which we want to keep
return new ResponseFilterResult.AcceptWithContentType("application/pdf");
}
}
return new ResponseFilterResult.Reject();
} }
// If the format is binary, we don't want to translate it if the response is truncated // If the format is binary, we don't want to translate it if the response is truncated
if (response.truncated() != WarcTruncationReason.NOT_TRUNCATED && ContentTypes.isBinary(contentType)) { if (response.truncated() != WarcTruncationReason.NOT_TRUNCATED && ContentTypes.isBinary(contentType)) {
return false; return new ResponseFilterResult.Reject();
} }
return true; return new ResponseFilterResult.Accept();
} }
/** Check X-Robots-Tag header tag to see if we are allowed to index this page. /** Check X-Robots-Tag header tag to see if we are allowed to index this page.
@@ -277,7 +309,8 @@ public record SlopCrawlDataRecord(String domain,
try (var table = new SlopTable(path)) { try (var table = new SlopTable(path)) {
ShortColumn.Reader statusReader = statusColumn.open(table); ShortColumn.Reader statusReader = statusColumn.open(table);
while (statusReader.hasRemaining()) { while (statusReader.hasRemaining()) {
if (statusReader.get() == 200) { int status = statusReader.get();
if (status == 200 || status == 206) {
cnt++; cnt++;
} }
} }
@@ -295,6 +328,7 @@ public record SlopCrawlDataRecord(String domain,
private final LongColumn.Writer timestampColumnWriter; private final LongColumn.Writer timestampColumnWriter;
private final EnumColumn.Writer contentTypeColumnWriter; private final EnumColumn.Writer contentTypeColumnWriter;
private final ByteArrayColumn.Writer bodyColumnWriter; private final ByteArrayColumn.Writer bodyColumnWriter;
private final ShortColumn.Writer requestTimeColumnWriter;
private final StringColumn.Writer headerColumnWriter; private final StringColumn.Writer headerColumnWriter;
public Writer(Path path) throws IOException { public Writer(Path path) throws IOException {
@@ -308,6 +342,7 @@ public record SlopCrawlDataRecord(String domain,
timestampColumnWriter = timestampColumn.create(this); timestampColumnWriter = timestampColumn.create(this);
contentTypeColumnWriter = contentTypeColumn.create(this); contentTypeColumnWriter = contentTypeColumn.create(this);
bodyColumnWriter = bodyColumn.create(this); bodyColumnWriter = bodyColumn.create(this);
requestTimeColumnWriter = requestTimeColumn.create(this);
headerColumnWriter = headerColumn.create(this); headerColumnWriter = headerColumn.create(this);
} }
@@ -320,10 +355,11 @@ public record SlopCrawlDataRecord(String domain,
timestampColumnWriter.put(record.timestamp); timestampColumnWriter.put(record.timestamp);
contentTypeColumnWriter.put(record.contentType); contentTypeColumnWriter.put(record.contentType);
bodyColumnWriter.put(record.body); bodyColumnWriter.put(record.body);
requestTimeColumnWriter.put((short) record.requestTimeMs);
headerColumnWriter.put(record.headers); headerColumnWriter.put(record.headers);
} }
public void write(String domain, WarcResponse response) throws IOException { public void write(String domain, ResponseFilterResult filterStatus, WarcResponse response) throws IOException {
HttpFetchResult result = HttpFetchResult.importWarc(response); HttpFetchResult result = HttpFetchResult.importWarc(response);
if (!(result instanceof HttpFetchResult.ResultOk fetchOk)) { if (!(result instanceof HttpFetchResult.ResultOk fetchOk)) {
@@ -346,14 +382,39 @@ public record SlopCrawlDataRecord(String domain,
contentType = ""; contentType = "";
} }
switch (filterStatus) {
case ResponseFilterResult.AcceptWithContentType(String ct) -> contentType = ct;
case ResponseFilterResult.AcceptIfPlainText(String ct) -> {
try {
// Parse the body as UTF-8
new String(bodyBytes, StandardCharsets.UTF_8);
contentType = ct;
}
catch (RuntimeException ex) { // UTF-8 decoding failed
return;
}
}
default -> {}
}
boolean hasCookies = false; boolean hasCookies = false;
String headersStr; String headersStr;
StringJoiner headersStrBuilder = new StringJoiner("\n"); StringJoiner headersStrBuilder = new StringJoiner("\n");
int requestTimeMs = -1;
for (var header : headers) { for (var header : headers) {
if (header.getName().equalsIgnoreCase("X-Cookies") && "1".equals(header.getValue())) { if (header.getName().equalsIgnoreCase("X-Cookies") && "1".equals(header.getValue())) {
hasCookies = true; hasCookies = true;
} }
if (header.getName().equals("X-Marginalia-Response-Time")) {
try {
requestTimeMs = Integer.parseInt(header.getValue());
}
catch (NumberFormatException ex) {
logger.warn("Failed to parse X-Marginalia-Response-Time header: {}", header.getValue());
}
continue;
}
headersStrBuilder.add(header.getName() + ": " + header.getValue()); headersStrBuilder.add(header.getName() + ": " + header.getValue());
} }
headersStr = headersStrBuilder.toString(); headersStr = headersStrBuilder.toString();
@@ -368,6 +429,7 @@ public record SlopCrawlDataRecord(String domain,
response.date().toEpochMilli(), response.date().toEpochMilli(),
contentType, contentType,
bodyBytes, bodyBytes,
requestTimeMs,
headersStr headersStr
) )
); );
@@ -420,6 +482,7 @@ public record SlopCrawlDataRecord(String domain,
private final LongColumn.Reader timestampColumnReader; private final LongColumn.Reader timestampColumnReader;
private final EnumColumn.Reader contentTypeColumnReader; private final EnumColumn.Reader contentTypeColumnReader;
private final ByteArrayColumn.Reader bodyColumnReader; private final ByteArrayColumn.Reader bodyColumnReader;
private final ShortColumn.Reader requestTimeColumnReader;
private final StringColumn.Reader headerColumnReader; private final StringColumn.Reader headerColumnReader;
public Reader(Path path) throws IOException { public Reader(Path path) throws IOException {
@@ -434,6 +497,17 @@ public record SlopCrawlDataRecord(String domain,
contentTypeColumnReader = contentTypeColumn.open(this); contentTypeColumnReader = contentTypeColumn.open(this);
bodyColumnReader = bodyColumn.open(this); bodyColumnReader = bodyColumn.open(this);
headerColumnReader = headerColumn.open(this); headerColumnReader = headerColumn.open(this);
// FIXME: After 2025-06-XX, we can remove this migration workaround
ShortColumn.Reader timeColumnReader;
try {
timeColumnReader = requestTimeColumn.open(this);
}
catch (Exception ex) {
// Migration workaround
timeColumnReader = null;
}
requestTimeColumnReader = timeColumnReader;
} }
public SlopCrawlDataRecord get() throws IOException { public SlopCrawlDataRecord get() throws IOException {
@@ -446,6 +520,7 @@ public record SlopCrawlDataRecord(String domain,
timestampColumnReader.get(), timestampColumnReader.get(),
contentTypeColumnReader.get(), contentTypeColumnReader.get(),
bodyColumnReader.get(), bodyColumnReader.get(),
requestTimeColumnReader != null ? requestTimeColumnReader.get() : -1,
headerColumnReader.get() headerColumnReader.get()
); );
} }
@@ -465,6 +540,7 @@ public record SlopCrawlDataRecord(String domain,
private final LongColumn.Reader timestampColumnReader; private final LongColumn.Reader timestampColumnReader;
private final EnumColumn.Reader contentTypeColumnReader; private final EnumColumn.Reader contentTypeColumnReader;
private final ByteArrayColumn.Reader bodyColumnReader; private final ByteArrayColumn.Reader bodyColumnReader;
private final ShortColumn.Reader requestTimeColumnReader;
private final StringColumn.Reader headerColumnReader; private final StringColumn.Reader headerColumnReader;
private SlopCrawlDataRecord next = null; private SlopCrawlDataRecord next = null;
@@ -481,6 +557,17 @@ public record SlopCrawlDataRecord(String domain,
contentTypeColumnReader = contentTypeColumn.open(this); contentTypeColumnReader = contentTypeColumn.open(this);
bodyColumnReader = bodyColumn.open(this); bodyColumnReader = bodyColumn.open(this);
headerColumnReader = headerColumn.open(this); headerColumnReader = headerColumn.open(this);
// FIXME: After 2025-06-XX, we can remove this migration workaround
ShortColumn.Reader timeColumnReader;
try {
timeColumnReader = requestTimeColumn.open(this);
}
catch (Exception ex) {
// Migration workaround
timeColumnReader = null;
}
requestTimeColumnReader = timeColumnReader;
} }
public abstract boolean filter(String url, int status, String contentType); public abstract boolean filter(String url, int status, String contentType);
@@ -507,6 +594,7 @@ public record SlopCrawlDataRecord(String domain,
boolean cookies = cookiesColumnReader.get() == 1; boolean cookies = cookiesColumnReader.get() == 1;
int status = statusColumnReader.get(); int status = statusColumnReader.get();
long timestamp = timestampColumnReader.get(); long timestamp = timestampColumnReader.get();
int requestTimeMs = requestTimeColumnReader != null ? requestTimeColumnReader.get() : -1;
String contentType = contentTypeColumnReader.get(); String contentType = contentTypeColumnReader.get();
LargeItem<byte[]> body = bodyColumnReader.getLarge(); LargeItem<byte[]> body = bodyColumnReader.getLarge();
@@ -514,7 +602,7 @@ public record SlopCrawlDataRecord(String domain,
if (filter(url, status, contentType)) { if (filter(url, status, contentType)) {
next = new SlopCrawlDataRecord( next = new SlopCrawlDataRecord(
domain, url, ip, cookies, status, timestamp, contentType, body.get(), headers.get() domain, url, ip, cookies, status, timestamp, contentType, body.get(), requestTimeMs, headers.get()
); );
return true; return true;
} }

View File

@@ -117,6 +117,100 @@ class CrawlerRetreiverTest {
} }
} }
@Test
public void verifyFileFormatSupport() throws IOException {
List<String> urls = List.of(
"https://www.marginalia.nu/junk/test.pdf",
"https://www.marginalia.nu/junk/test.md"
);
var specs = CrawlerMain.CrawlSpecRecord
.builder()
.crawlDepth(5)
.domain("www.marginalia.nu")
.urls(urls)
.build();
Path tempFile = null;
Path slopFile = null;
try {
tempFile = Files.createTempFile("crawling-process", "warc");
slopFile = Files.createTempFile("crawling-process", ".slop.zip");
doCrawl(tempFile, specs);
Set<String> requests = new HashSet<>();
Set<String> responses = new HashSet<>();
// Inspect the WARC file
try (var reader = new WarcReader(tempFile)) {
reader.forEach(record -> {
if (record instanceof WarcRequest req) {
requests.add(req.target());
System.out.println(req.type() + ":" + req.target());
}
else if (record instanceof WarcResponse rsp) {
responses.add(rsp.target());
try {
System.out.println(rsp.type() + ":" + rsp.target() + ":" + rsp.http().contentType());
} catch (IOException e) {
throw new RuntimeException(e);
}
}
else {
System.out.println(record.type());
}
});
}
for (var url : urls) {
assertTrue(requests.contains(url), "Should have requested " + url);
}
assertEquals(requests, responses);
// Convert the WARC file to a Slop file
SlopCrawlDataRecord
.convertWarc("www.marginalia.nu", new UserAgent("test.marginalia.nu", "test.marginalia.nu"), tempFile, slopFile);
CrawledDomain domain = null;
Map<String, CrawledDocument> documents = new HashMap<>();
// Extract the contents of the Slop file
try (var stream = SerializableCrawlDataStream.openDataStream(slopFile)) {
while (stream.hasNext()) {
var doc = stream.next();
if (doc instanceof CrawledDomain dr) {
assertNull(domain);
domain = dr;
}
else if (doc instanceof CrawledDocument dc) {
System.out.println(dc.url + "\t" + dc.crawlerStatus + "\t" + dc.httpStatus);
documents.put(dc.url, dc);
}
}
} catch (Exception e) {
throw new RuntimeException(e);
}
for (var url : urls) {
// Verify we have the downloaded files in the Slop file
assertNotNull(domain);
var fetchedDoc = documents.get(url);
assertNotNull(fetchedDoc, "Should have a document for " + url);
assertEquals(url, fetchedDoc.url);
assertTrue(fetchedDoc.httpStatus == 200 || fetchedDoc.httpStatus == 206, "Should be 200 or 206 for " + url);
assertTrue(fetchedDoc.documentBodyBytes.length > 32, "Should have a body for " + url);
}
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
if (tempFile != null)
Files.deleteIfExists(tempFile);
if (slopFile != null)
Files.deleteIfExists(slopFile);
}
}
@Test @Test
public void testWarcOutputNoKnownUrls() throws IOException { public void testWarcOutputNoKnownUrls() throws IOException {
var specs = CrawlerMain.CrawlSpecRecord var specs = CrawlerMain.CrawlSpecRecord

View File

@@ -1,6 +1,7 @@
package nu.marginalia.extractor; package nu.marginalia.extractor;
import com.google.inject.Inject; import com.google.inject.Inject;
import nu.marginalia.process.control.ProcessHeartbeat;
import nu.marginalia.process.log.WorkLog; import nu.marginalia.process.log.WorkLog;
import nu.marginalia.process.log.WorkLogEntry; import nu.marginalia.process.log.WorkLogEntry;
import nu.marginalia.slop.SlopCrawlDataRecord; import nu.marginalia.slop.SlopCrawlDataRecord;
@@ -20,17 +21,18 @@ import java.nio.file.StandardCopyOption;
import java.nio.file.StandardOpenOption; import java.nio.file.StandardOpenOption;
import java.nio.file.attribute.PosixFilePermissions; import java.nio.file.attribute.PosixFilePermissions;
import java.sql.SQLException; import java.sql.SQLException;
import java.util.ArrayList; import java.util.*;
import java.util.Collections;
import java.util.List;
public class SampleDataExporter { public class SampleDataExporter {
private final FileStorageService storageService; private final FileStorageService storageService;
private final ProcessHeartbeat processHeartbeat;
@Inject @Inject
public SampleDataExporter(FileStorageService storageService) { public SampleDataExporter(FileStorageService storageService, ProcessHeartbeat processHeartbeat) {
this.storageService = storageService; this.storageService = storageService;
this.processHeartbeat = processHeartbeat;
} }
public void export(FileStorageId crawlId, FileStorageId destId, int size, String ctFilter, String name) throws SQLException, IOException { public void export(FileStorageId crawlId, FileStorageId destId, int size, String ctFilter, String name) throws SQLException, IOException {
FileStorage destStorage = storageService.getStorage(destId); FileStorage destStorage = storageService.getStorage(destId);
Path inputDir = storageService.getStorage(crawlId).asPath(); Path inputDir = storageService.getStorage(crawlId).asPath();
@@ -59,12 +61,6 @@ public class SampleDataExporter {
Path newCrawlerLogFile = Files.createTempFile(destStorage.asPath(), "crawler", ".log", Path newCrawlerLogFile = Files.createTempFile(destStorage.asPath(), "crawler", ".log",
PosixFilePermissions.asFileAttribute(PosixFilePermissions.fromString("rw-r--r--"))); PosixFilePermissions.asFileAttribute(PosixFilePermissions.fromString("rw-r--r--")));
try (var bw = Files.newBufferedWriter(newCrawlerLogFile, StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING)) {
for (var item : entriesAll) {
bw.write(item.id() + " " + item.ts() + " " + item.relPath() + " " + item.cnt() + "\n");
}
}
Path newManifestJsonFile = Files.createTempFile(destStorage.asPath(), "manifest", ".json", Path newManifestJsonFile = Files.createTempFile(destStorage.asPath(), "manifest", ".json",
PosixFilePermissions.asFileAttribute(PosixFilePermissions.fromString("rw-r--r--"))); PosixFilePermissions.asFileAttribute(PosixFilePermissions.fromString("rw-r--r--")));
Files.writeString(newManifestJsonFile, " { \"description\": \"" + name.replace("[\"\\]", "_") + "\",\n \"type\": \"CRAWL_DATA\" }\n"); Files.writeString(newManifestJsonFile, " { \"description\": \"" + name.replace("[\"\\]", "_") + "\",\n \"type\": \"CRAWL_DATA\" }\n");
@@ -72,29 +68,38 @@ public class SampleDataExporter {
var tmpTarFile = Files.createTempFile(destStorage.asPath(), "data", ".tar", var tmpTarFile = Files.createTempFile(destStorage.asPath(), "data", ".tar",
PosixFilePermissions.asFileAttribute(PosixFilePermissions.fromString("rw-r--r--"))); PosixFilePermissions.asFileAttribute(PosixFilePermissions.fromString("rw-r--r--")));
try (var stream = new TarArchiveOutputStream(Files.newOutputStream(tmpTarFile, StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING))) { try (var stream = new TarArchiveOutputStream(Files.newOutputStream(tmpTarFile, StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING));
for (var item : entriesAll) { var logWriter = Files.newBufferedWriter(newCrawlerLogFile, StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);
var hb = processHeartbeat.createAdHocTaskHeartbeat("Generating Sample")
) {
for (var item : hb.wrap("Scanning", entriesAll)) {
Path crawlDataPath = inputDir.resolve(item.relPath()); Path crawlDataPath = inputDir.resolve(item.relPath());
if (!Files.exists(crawlDataPath)) continue; if (!Files.exists(crawlDataPath)) continue;
if (StringUtils.isBlank(ctFilter)) { if (StringUtils.isBlank(ctFilter)) {
addFileToTar(stream, crawlDataPath, item.relPath()); addFileToTar(stream, crawlDataPath, item.relPath());
logWriter.write(item.id() + " " + item.ts() + " " + item.relPath() + " " + item.cnt() + "\n");
} }
else /* filter != null */ { else /* filter != null */ {
boolean didFilterData = false; Path filteredData = null;
try { try {
crawlDataPath = filterEntries(crawlDataPath, ctFilter); filteredData = filterEntries(crawlDataPath, ctFilter);
didFilterData = true; addFileToTar(stream, filteredData, item.relPath());
addFileToTar(stream, crawlDataPath, item.relPath()); logWriter.write(item.id() + " " + item.ts() + " " + item.relPath() + " " + item.cnt() + "\n");
}
catch (NoSuchElementException ex) {
// Ignore
} }
finally { finally {
if (didFilterData) { if (filteredData != null) {
Files.deleteIfExists(crawlDataPath); Files.deleteIfExists(filteredData);
} }
} }
} }
} }
logWriter.flush();
addFileToTar(stream, newCrawlerLogFile, "crawler.log"); addFileToTar(stream, newCrawlerLogFile, "crawler.log");
addFileToTar(stream, newManifestJsonFile, "marginalia-manifest.json"); addFileToTar(stream, newManifestJsonFile, "marginalia-manifest.json");
} }
@@ -106,34 +111,44 @@ public class SampleDataExporter {
Files.move(tmpTarFile, destStorage.asPath().resolve("crawl-data.tar"), StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING); Files.move(tmpTarFile, destStorage.asPath().resolve("crawl-data.tar"), StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING);
} }
/** Filters the entries in the crawl data file based on the content type. /** Filters the entries in the crawl data file based on the content type. */
* @param crawlDataPath The path to the crawl data file. private Path filterEntries(Path crawlDataPath, String contentTypeFilter) throws IOException, NoSuchElementException {
* @param contentTypeFilter The content type to filter by.
* @return The path to the filtered crawl data file, or null if an error occurred.
*/
private Path filterEntries(Path crawlDataPath, String contentTypeFilter) throws IOException {
Path tempDir = crawlDataPath.resolveSibling(crawlDataPath.getFileName() + ".filtered"); Path tempDir = crawlDataPath.resolveSibling(crawlDataPath.getFileName() + ".filtered");
Path tempFile = crawlDataPath.resolveSibling(crawlDataPath.getFileName() + ".filtered.slop.zip"); Path tempFile = crawlDataPath.resolveSibling(crawlDataPath.getFileName() + ".filtered.slop.zip");
// We may have debris from a previous run, so let's clean it up
if (Files.isDirectory(tempDir)) {
FileUtils.deleteDirectory(tempDir.toFile());
}
Files.createDirectory(tempDir); Files.createDirectory(tempDir);
boolean wroteEntry = false;
try (var writer = new SlopCrawlDataRecord.Writer(tempDir); try (var writer = new SlopCrawlDataRecord.Writer(tempDir);
var reader = new SlopCrawlDataRecord.FilteringReader(crawlDataPath) { var reader = new SlopCrawlDataRecord.FilteringReader(crawlDataPath) {
@Override @Override
public boolean filter(String url, int status, String contentType) { public boolean filter(String url, int status, String contentType) {
if (contentTypeFilter.equals(contentType)) return Objects.equals(StringUtils.substringBefore(contentType, ';'), contentTypeFilter)
return true; || contentType.startsWith("x-marginalia/"); // metadata records
else if (contentType.startsWith("x-marginalia/"))
// This is a metadata entry, typically domain or redirect information
// let's keep those to not confuse the consumer of the data, which might
// expect at least the domain summary
return true;
return false;
} }
} }
) { ) {
while (reader.hasRemaining()) { while (reader.hasRemaining()) {
writer.write(reader.get()); var entry = reader.get();
writer.write(entry);
wroteEntry = wroteEntry || Objects.equals(StringUtils.substringBefore(entry.contentType(), ';'), contentTypeFilter);
}
}
catch (Exception ex) {
FileUtils.deleteDirectory(tempDir.toFile());
throw ex;
}
try {
if (!wroteEntry) {
throw new NoSuchElementException("No relevant entries");
} }
SlopTablePacker.packToSlopZip(tempDir, tempFile); SlopTablePacker.packToSlopZip(tempDir, tempFile);

View File

@@ -195,6 +195,7 @@ public class LiveCrawlDataSet implements AutoCloseable {
headers, headers,
body, body,
false, false,
-1,
"", "",
"" ""
)); ));

View File

@@ -7,6 +7,7 @@ import nu.marginalia.api.model.ApiSearchResultQueryDetails;
import nu.marginalia.api.model.ApiSearchResults; import nu.marginalia.api.model.ApiSearchResults;
import nu.marginalia.api.searchquery.QueryClient; import nu.marginalia.api.searchquery.QueryClient;
import nu.marginalia.api.searchquery.RpcQueryLimits; import nu.marginalia.api.searchquery.RpcQueryLimits;
import nu.marginalia.api.searchquery.model.query.NsfwFilterTier;
import nu.marginalia.api.searchquery.model.query.QueryParams; import nu.marginalia.api.searchquery.model.query.QueryParams;
import nu.marginalia.api.searchquery.model.query.SearchSetIdentifier; import nu.marginalia.api.searchquery.model.query.SearchSetIdentifier;
import nu.marginalia.api.searchquery.model.results.DecoratedSearchResultItem; import nu.marginalia.api.searchquery.model.results.DecoratedSearchResultItem;
@@ -29,9 +30,10 @@ public class ApiSearchOperator {
public ApiSearchResults query(String query, public ApiSearchResults query(String query,
int count, int count,
int index) int index,
NsfwFilterTier filterTier)
{ {
var rsp = queryClient.search(createParams(query, count, index)); var rsp = queryClient.search(createParams(query, count, index, filterTier));
return new ApiSearchResults("RESTRICTED", query, return new ApiSearchResults("RESTRICTED", query,
rsp.results() rsp.results()
@@ -42,7 +44,7 @@ public class ApiSearchOperator {
.collect(Collectors.toList())); .collect(Collectors.toList()));
} }
private QueryParams createParams(String query, int count, int index) { private QueryParams createParams(String query, int count, int index, NsfwFilterTier filterTirer) {
SearchSetIdentifier searchSet = selectSearchSet(index); SearchSetIdentifier searchSet = selectSearchSet(index);
return new QueryParams( return new QueryParams(
@@ -53,7 +55,8 @@ public class ApiSearchOperator {
.setTimeoutMs(150) .setTimeoutMs(150)
.setFetchSize(8192) .setFetchSize(8192)
.build(), .build(),
searchSet.name()); searchSet.name(),
filterTirer);
} }
private SearchSetIdentifier selectSearchSet(int index) { private SearchSetIdentifier selectSearchSet(int index) {
@@ -90,6 +93,7 @@ public class ApiSearchOperator {
url.getTitle(), url.getTitle(),
url.getDescription(), url.getDescription(),
sanitizeNaN(url.rankingScore, -100), sanitizeNaN(url.rankingScore, -100),
url.getShortFormat(),
details details
); );
} }

View File

@@ -6,6 +6,7 @@ import io.prometheus.client.Counter;
import io.prometheus.client.Histogram; import io.prometheus.client.Histogram;
import nu.marginalia.api.model.ApiLicense; import nu.marginalia.api.model.ApiLicense;
import nu.marginalia.api.model.ApiSearchResults; import nu.marginalia.api.model.ApiSearchResults;
import nu.marginalia.api.searchquery.model.query.NsfwFilterTier;
import nu.marginalia.api.svc.LicenseService; import nu.marginalia.api.svc.LicenseService;
import nu.marginalia.api.svc.RateLimiterService; import nu.marginalia.api.svc.RateLimiterService;
import nu.marginalia.api.svc.ResponseCache; import nu.marginalia.api.svc.ResponseCache;
@@ -119,6 +120,16 @@ public class ApiService extends SparkService {
int count = intParam(request, "count", 20); int count = intParam(request, "count", 20);
int index = intParam(request, "index", 3); int index = intParam(request, "index", 3);
int nsfw = intParam(request, "nsfw", 1);
NsfwFilterTier nsfwFilterTier;
try {
nsfwFilterTier = NsfwFilterTier.fromCodedValue(nsfw);
}
catch (IllegalArgumentException e) {
Spark.halt(400, "Invalid nsfw parameter value");
return null; // Unreachable, but required to satisfy the compiler
}
logger.info(queryMarker, "{} Search {}", license.key, query); logger.info(queryMarker, "{} Search {}", license.key, query);
@@ -126,7 +137,7 @@ public class ApiService extends SparkService {
.labels(license.key) .labels(license.key)
.time(() -> .time(() ->
searchOperator searchOperator
.query(query, count, index) .query(query, count, index, nsfwFilterTier)
.withLicense(license.getLicense()) .withLicense(license.getLicense())
); );
} }

View File

@@ -8,14 +8,16 @@ public class ApiSearchResult {
public String title; public String title;
public String description; public String description;
public double quality; public double quality;
public String format; // "pdf", "html", "text", etc.
public List<List<ApiSearchResultQueryDetails>> details = new ArrayList<>(); public List<List<ApiSearchResultQueryDetails>> details = new ArrayList<>();
public ApiSearchResult(String url, String title, String description, double quality, List<List<ApiSearchResultQueryDetails>> details) { public ApiSearchResult(String url, String title, String description, double quality, String format, List<List<ApiSearchResultQueryDetails>> details) {
this.url = url; this.url = url;
this.title = title; this.title = title;
this.description = description; this.description = description;
this.quality = quality; this.quality = quality;
this.format = format;
this.details = details; this.details = details;
} }

View File

@@ -2,6 +2,7 @@ package nu.marginalia.search;
import nu.marginalia.api.searchquery.RpcQueryLimits; import nu.marginalia.api.searchquery.RpcQueryLimits;
import nu.marginalia.api.searchquery.RpcTemporalBias; import nu.marginalia.api.searchquery.RpcTemporalBias;
import nu.marginalia.api.searchquery.model.query.NsfwFilterTier;
import nu.marginalia.api.searchquery.model.query.QueryParams; import nu.marginalia.api.searchquery.model.query.QueryParams;
import nu.marginalia.api.searchquery.model.query.SearchQuery; import nu.marginalia.api.searchquery.model.query.SearchQuery;
import nu.marginalia.api.searchquery.model.query.SearchSetIdentifier; import nu.marginalia.api.searchquery.model.query.SearchSetIdentifier;
@@ -52,6 +53,7 @@ public class SearchQueryParamFactory {
profile.searchSetIdentifier.name(), profile.searchSetIdentifier.name(),
userParams.strategy(), userParams.strategy(),
userParams.temporalBias(), userParams.temporalBias(),
userParams.filterTier(),
userParams.page() userParams.page()
); );
@@ -78,6 +80,7 @@ public class SearchQueryParamFactory {
SearchSetIdentifier.NONE.name(), SearchSetIdentifier.NONE.name(),
QueryStrategy.AUTO, QueryStrategy.AUTO,
RpcTemporalBias.Bias.NONE, RpcTemporalBias.Bias.NONE,
NsfwFilterTier.OFF,
1 1
); );
} }
@@ -98,6 +101,7 @@ public class SearchQueryParamFactory {
SearchSetIdentifier.NONE.name(), SearchSetIdentifier.NONE.name(),
QueryStrategy.AUTO, QueryStrategy.AUTO,
RpcTemporalBias.Bias.NONE, RpcTemporalBias.Bias.NONE,
NsfwFilterTier.DANGER,
1 1
); );
} }
@@ -118,6 +122,7 @@ public class SearchQueryParamFactory {
SearchSetIdentifier.NONE.name(), SearchSetIdentifier.NONE.name(),
QueryStrategy.AUTO, QueryStrategy.AUTO,
RpcTemporalBias.Bias.NONE, RpcTemporalBias.Bias.NONE,
NsfwFilterTier.DANGER,
1 1
); );
} }

View File

@@ -2,6 +2,7 @@ package nu.marginalia.search.command;
import nu.marginalia.WebsiteUrl; import nu.marginalia.WebsiteUrl;
import nu.marginalia.api.searchquery.RpcTemporalBias; import nu.marginalia.api.searchquery.RpcTemporalBias;
import nu.marginalia.api.searchquery.model.query.NsfwFilterTier;
import nu.marginalia.index.query.limit.QueryStrategy; import nu.marginalia.index.query.limit.QueryStrategy;
import nu.marginalia.index.query.limit.SpecificationLimit; import nu.marginalia.index.query.limit.SpecificationLimit;
import nu.marginalia.search.model.SearchProfile; import nu.marginalia.search.model.SearchProfile;
@@ -23,6 +24,10 @@ public record SearchParameters(String query,
int page int page
) { ) {
public NsfwFilterTier filterTier() {
return NsfwFilterTier.DANGER;
}
public SearchParameters(String queryString, Request request) { public SearchParameters(String queryString, Request request) {
this( this(
queryString, queryString,

View File

@@ -73,6 +73,8 @@ public class UrlDetails implements Comparable<UrlDetails> {
return "HTML 5"; return "HTML 5";
case "PLAIN": case "PLAIN":
return "Plain Text"; return "Plain Text";
case "PDF":
return "PDF";
default: default:
return "?"; return "?";
} }

View File

@@ -61,7 +61,7 @@ public class UrlDeduplicator {
private boolean limitResultsPerDomain(DecoratedSearchResultItem details) { private boolean limitResultsPerDomain(DecoratedSearchResultItem details) {
final var domain = details.getUrl().getDomain(); final var domain = details.getUrl().getDomain();
final String key = domain.getDomainKey(); final String key = domain.toString();
return keyCount.adjustOrPutValue(key, 1, 1) <= resultsPerKey; return keyCount.adjustOrPutValue(key, 1, 1) <= resultsPerKey;
} }

View File

@@ -2,6 +2,7 @@ package nu.marginalia.search;
import nu.marginalia.api.searchquery.RpcQueryLimits; import nu.marginalia.api.searchquery.RpcQueryLimits;
import nu.marginalia.api.searchquery.RpcTemporalBias; import nu.marginalia.api.searchquery.RpcTemporalBias;
import nu.marginalia.api.searchquery.model.query.NsfwFilterTier;
import nu.marginalia.api.searchquery.model.query.QueryParams; import nu.marginalia.api.searchquery.model.query.QueryParams;
import nu.marginalia.api.searchquery.model.query.SearchQuery; import nu.marginalia.api.searchquery.model.query.SearchQuery;
import nu.marginalia.api.searchquery.model.query.SearchSetIdentifier; import nu.marginalia.api.searchquery.model.query.SearchSetIdentifier;
@@ -17,7 +18,7 @@ public class SearchQueryParamFactory {
static final RpcQueryLimits defaultLimits = RpcQueryLimits.newBuilder() static final RpcQueryLimits defaultLimits = RpcQueryLimits.newBuilder()
.setResultsTotal(100) .setResultsTotal(100)
.setResultsByDomain(5) .setResultsByDomain(5)
.setTimeoutMs(200) .setTimeoutMs(250)
.setFetchSize(8192) .setFetchSize(8192)
.build(); .build();
@@ -53,6 +54,7 @@ public class SearchQueryParamFactory {
profile.searchSetIdentifier.name(), profile.searchSetIdentifier.name(),
userParams.strategy(), userParams.strategy(),
userParams.temporalBias(), userParams.temporalBias(),
userParams.filterTier(),
userParams.page() userParams.page()
); );
@@ -79,6 +81,7 @@ public class SearchQueryParamFactory {
SearchSetIdentifier.NONE.name(), SearchSetIdentifier.NONE.name(),
QueryStrategy.AUTO, QueryStrategy.AUTO,
RpcTemporalBias.Bias.NONE, RpcTemporalBias.Bias.NONE,
NsfwFilterTier.OFF,
page page
); );
} }
@@ -99,6 +102,7 @@ public class SearchQueryParamFactory {
SearchSetIdentifier.NONE.name(), SearchSetIdentifier.NONE.name(),
QueryStrategy.AUTO, QueryStrategy.AUTO,
RpcTemporalBias.Bias.NONE, RpcTemporalBias.Bias.NONE,
NsfwFilterTier.DANGER,
page page
); );
} }
@@ -119,6 +123,7 @@ public class SearchQueryParamFactory {
SearchSetIdentifier.NONE.name(), SearchSetIdentifier.NONE.name(),
QueryStrategy.AUTO, QueryStrategy.AUTO,
RpcTemporalBias.Bias.NONE, RpcTemporalBias.Bias.NONE,
NsfwFilterTier.DANGER,
1 1
); );
} }

View File

@@ -23,7 +23,7 @@ public class SearchResultClusterer {
} }
/** No clustering, just return the results as is */ /** No clustering, just return the results as is */
private static List<ClusteredUrlDetails> noOp(List<UrlDetails> results, int total) { public static List<ClusteredUrlDetails> noOp(List<UrlDetails> results, int total) {
if (results.isEmpty()) if (results.isEmpty())
return List.of(); return List.of();

View File

@@ -18,6 +18,7 @@ import nu.marginalia.service.server.JoobyService;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import java.nio.charset.StandardCharsets;
import java.util.List; import java.util.List;
import java.util.NoSuchElementException; import java.util.NoSuchElementException;
@@ -41,6 +42,8 @@ public class SearchService extends JoobyService {
.help("Search service error count") .help("Search service error count")
.register(); .register();
private final String openSearchXML;
@Inject @Inject
public SearchService(BaseServiceParams params, public SearchService(BaseServiceParams params,
WebsiteUrl websiteUrl, WebsiteUrl websiteUrl,
@@ -69,6 +72,13 @@ public class SearchService extends JoobyService {
this.siteSubscriptionService = siteSubscriptionService; this.siteSubscriptionService = siteSubscriptionService;
this.faviconClient = faviconClient; this.faviconClient = faviconClient;
this.domainQueries = domainQueries; this.domainQueries = domainQueries;
try (var is = ClassLoader.getSystemResourceAsStream("static/opensearch.xml")) {
openSearchXML = new String(is.readAllBytes(), StandardCharsets.UTF_8);
}
catch (Exception e) {
throw new RuntimeException("Failed to load OpenSearch XML", e);
}
} }
@Override @Override
@@ -82,10 +92,14 @@ public class SearchService extends JoobyService {
jooby.get("/site/https://*", this::handleSiteUrlRedirect); jooby.get("/site/https://*", this::handleSiteUrlRedirect);
jooby.get("/site/http://*", this::handleSiteUrlRedirect); jooby.get("/site/http://*", this::handleSiteUrlRedirect);
jooby.get("/opensearch.xml", ctx -> {
ctx.setResponseType(MediaType.valueOf("application/opensearchdescription+xml"));
return openSearchXML;
});
String emptySvg = "<svg xmlns=\"http://www.w3.org/2000/svg\"></svg>"; String emptySvg = "<svg xmlns=\"http://www.w3.org/2000/svg\"></svg>";
jooby.get("/site/{domain}/favicon", ctx -> { jooby.get("/site/{domain}/favicon", ctx -> {
String domain = ctx.path("domain").value(); String domain = ctx.path("domain").value();
logger.info("Finding icon for domain {}", domain);
try { try {
DbDomainQueries.DomainIdWithNode domainIdWithNode = domainQueries.getDomainIdWithNode(new EdgeDomain(domain)); DbDomainQueries.DomainIdWithNode domainIdWithNode = domainQueries.getDomainIdWithNode(new EdgeDomain(domain));
var faviconMaybe = faviconClient.getFavicon(domain, domainIdWithNode.nodeAffinity()); var faviconMaybe = faviconClient.getFavicon(domain, domainIdWithNode.nodeAffinity());

View File

@@ -2,6 +2,7 @@ package nu.marginalia.search.command;
import nu.marginalia.WebsiteUrl; import nu.marginalia.WebsiteUrl;
import nu.marginalia.api.searchquery.RpcTemporalBias; import nu.marginalia.api.searchquery.RpcTemporalBias;
import nu.marginalia.api.searchquery.model.query.NsfwFilterTier;
import nu.marginalia.index.query.limit.QueryStrategy; import nu.marginalia.index.query.limit.QueryStrategy;
import nu.marginalia.index.query.limit.SpecificationLimit; import nu.marginalia.index.query.limit.SpecificationLimit;
import nu.marginalia.model.EdgeDomain; import nu.marginalia.model.EdgeDomain;
@@ -24,6 +25,10 @@ public record SearchParameters(WebsiteUrl url,
int page int page
) { ) {
public NsfwFilterTier filterTier() {
return NsfwFilterTier.DANGER;
}
public static SearchParameters defaultsForQuery(WebsiteUrl url, String query, int page) { public static SearchParameters defaultsForQuery(WebsiteUrl url, String query, int page) {
return new SearchParameters( return new SearchParameters(
url, url,

View File

@@ -78,6 +78,8 @@ public class UrlDetails implements Comparable<UrlDetails> {
return "HTML 5"; return "HTML 5";
case "PLAIN": case "PLAIN":
return "Plain Text"; return "Plain Text";
case "PDF":
return "PDF";
default: default:
return "?"; return "?";
} }
@@ -92,13 +94,24 @@ public class UrlDetails implements Comparable<UrlDetails> {
public String displayTitle() { public String displayTitle() {
StringBuilder sb = new StringBuilder(); StringBuilder sb = new StringBuilder();
buildDisplayTitle(sb, title);
if (sb.isEmpty()) {
buildDisplayTitle(sb, url.toDisplayString());
}
return sb.toString();
}
private void buildDisplayTitle(StringBuilder sb, String str) {
int distSinceBreak = 0; int distSinceBreak = 0;
char c = ' '; char c = ' ';
int prevC = ' '; int prevC = ' ';
for (int i = 0; i < title.length(); i++) { for (int i = 0; i < str.length(); i++) {
prevC = c; prevC = c;
c = title.charAt(i); c = str.charAt(i);
if (Character.isSpaceChar(c)) { if (Character.isSpaceChar(c)) {
distSinceBreak = 0; distSinceBreak = 0;
@@ -135,8 +148,6 @@ public class UrlDetails implements Comparable<UrlDetails> {
sb.append(c); sb.append(c);
} }
} }
return sb.toString();
} }
/** Helper that inserts hyphenation hints and escapes /** Helper that inserts hyphenation hints and escapes

View File

@@ -25,6 +25,7 @@ public class UrlDeduplicator {
} }
public boolean shouldRemove(DecoratedSearchResultItem details) { public boolean shouldRemove(DecoratedSearchResultItem details) {
if (!deduplicateOnSuperficialHash(details)) if (!deduplicateOnSuperficialHash(details))
return true; return true;
if (!deduplicateOnLSH(details)) if (!deduplicateOnLSH(details))
@@ -61,7 +62,7 @@ public class UrlDeduplicator {
private boolean limitResultsPerDomain(DecoratedSearchResultItem details) { private boolean limitResultsPerDomain(DecoratedSearchResultItem details) {
final var domain = details.getUrl().getDomain(); final var domain = details.getUrl().getDomain();
final String key = domain.getDomainKey(); final String key = domain.toString();
return keyCount.adjustOrPutValue(key, 1, 1) <= resultsPerKey; return keyCount.adjustOrPutValue(key, 1, 1) <= resultsPerKey;
} }

View File

@@ -21,6 +21,9 @@
</h2> </h2>
<div class="text-sm mt-1"> <div class="text-sm mt-1">
@if ("PDF".equals(result.first.format))
<i title="PDF" class="fas fa-file-pdf text-red-500"></i>
@endif
<a class="text-liteblue dark:text-blue-200 underline break-all" href="${result.first.url.toString()}" <a class="text-liteblue dark:text-blue-200 underline break-all" href="${result.first.url.toString()}"
rel="noopener noreferrer" tabindex="-1">$unsafe{result.first.displayUrl()}</a> rel="noopener noreferrer" tabindex="-1">$unsafe{result.first.displayUrl()}</a>
</div> </div>
@@ -53,10 +56,13 @@
<div class="flex mt-2 text-sm flex flex-col space-y-2"> <div class="flex mt-2 text-sm flex flex-col space-y-2">
<p class="text-black dark:text-white ${result.colorScheme.backgroundColor} p-1 rounded break-words hyphens-auto">Also from ${result.getDomain().toString()}:</p> <p class="text-black dark:text-white ${result.colorScheme.backgroundColor} p-1 rounded break-words hyphens-auto">Also from ${result.getDomain().toString()}:</p>
<ul class="pl-2 mt-2 underline text-liteblue dark:text-blue-200"> <ul class="pl-2 mt-2 text-liteblue dark:text-blue-200">
@for(UrlDetails item : result.rest) @for(UrlDetails item : result.rest)
<li class="-indent-4 pl-4 mb-1 break-words hyphens-auto"> <li class="-indent-4 pl-4 mb-1 break-words hyphens-auto">
<a href="${item.url.toString()}" rel="noopener noreferrer">$unsafe{item.displayTitle()}</a> @if ("PDF".equals(item.format))
<i title="PDF" class="fas fa-file-pdf text-red-500"></i>
@endif
<a href="${item.url.toString()}" class="underline" rel="noopener noreferrer">$unsafe{item.displayTitle()}</a>
</li> </li>
@endfor @endfor
</ul> </ul>
@@ -74,6 +80,9 @@
@if (DocumentFlags.PlainText.isPresent(result.getFirst().resultItem.encodedDocMetadata)) @if (DocumentFlags.PlainText.isPresent(result.getFirst().resultItem.encodedDocMetadata))
<span class="px-1 bg-blue-100 text-blue-700 dark:border dark:border-blue-600 dark:text-blue-400 dark:bg-black rounded">Plain text</span> <span class="px-1 bg-blue-100 text-blue-700 dark:border dark:border-blue-600 dark:text-blue-400 dark:bg-black rounded">Plain text</span>
@endif @endif
@if (DocumentFlags.PdfFile.isPresent(result.getFirst().resultItem.encodedDocMetadata))
<span class="px-1 bg-blue-100 text-blue-700 dark:border dark:border-blue-600 dark:text-blue-400 dark:bg-black rounded">PDF File</span>
@endif
@if (DocumentFlags.GeneratorForum.isPresent(result.getFirst().resultItem.encodedDocMetadata)) @if (DocumentFlags.GeneratorForum.isPresent(result.getFirst().resultItem.encodedDocMetadata))
<span class="px-1 bg-blue-100 text-blue-700 dark:border dark:border-blue-600 dark:text-blue-400 dark:bg-black rounded">Forum</span> <span class="px-1 bg-blue-100 text-blue-700 dark:border dark:border-blue-600 dark:text-blue-400 dark:bg-black rounded">Forum</span>
@endif @endif

View File

@@ -5,6 +5,7 @@ import com.google.inject.Inject;
import io.jooby.Context; import io.jooby.Context;
import io.jooby.Jooby; import io.jooby.Jooby;
import nu.marginalia.assistant.suggest.Suggestions; import nu.marginalia.assistant.suggest.Suggestions;
import nu.marginalia.domsample.DomSampleService;
import nu.marginalia.functions.domains.DomainInfoGrpcService; import nu.marginalia.functions.domains.DomainInfoGrpcService;
import nu.marginalia.functions.math.MathGrpcService; import nu.marginalia.functions.math.MathGrpcService;
import nu.marginalia.livecapture.LiveCaptureGrpcService; import nu.marginalia.livecapture.LiveCaptureGrpcService;
@@ -30,6 +31,7 @@ public class AssistantService extends JoobyService {
ScreenshotService screenshotService, ScreenshotService screenshotService,
DomainInfoGrpcService domainInfoGrpcService, DomainInfoGrpcService domainInfoGrpcService,
LiveCaptureGrpcService liveCaptureGrpcService, LiveCaptureGrpcService liveCaptureGrpcService,
DomSampleService domSampleService,
FeedsGrpcService feedsGrpcService, FeedsGrpcService feedsGrpcService,
MathGrpcService mathGrpcService, MathGrpcService mathGrpcService,
Suggestions suggestions) Suggestions suggestions)
@@ -41,10 +43,11 @@ public class AssistantService extends JoobyService {
liveCaptureGrpcService, liveCaptureGrpcService,
feedsGrpcService), feedsGrpcService),
List.of()); List.of());
this.screenshotService = screenshotService;
this.screenshotService = screenshotService;
this.suggestions = suggestions; this.suggestions = suggestions;
domSampleService.start();
} }
public void startJooby(Jooby jooby) { public void startJooby(Jooby jooby) {

Some files were not shown because too many files have changed in this diff Show More