1
1
mirror of https://github.com/MarginaliaSearch/MarginaliaSearch.git synced 2025-10-06 17:32:39 +02:00

Compare commits

...

6 Commits

Author SHA1 Message Date
Viktor Lofgren
fd5af01629 (sample) Ensure we flush the log before adding it to the tar file 2025-05-06 14:43:47 +02:00
Viktor Lofgren
d4c43c7a79 (crawler) Test case for fetching PDFs 2025-05-06 13:45:16 +02:00
Viktor Lofgren
18700e1919 (sample) Fix bug where slop files would not be saved despite containing data 2025-05-06 13:38:21 +02:00
Viktor Lofgren
120b431998 (crawler) Fix outdated assumptions about content types and http status codes always being 200 when good.
We now sometimes get 206 when good.
2025-05-06 13:18:30 +02:00
Viktor Lofgren
71dad99326 (crawler) Revisitor should not demand a 200, but support a 206 as well 2025-05-06 13:11:52 +02:00
Viktor Lofgren
c1e8afdf86 (crawler) Remove domains from pending crawl tasks queue when retrying 2025-05-06 12:56:30 +02:00
8 changed files with 95 additions and 36 deletions

View File

@@ -67,8 +67,6 @@ dependencies {
testImplementation libs.mockito
testImplementation libs.wiremock
testImplementation project(':code:processes:test-data')
}

View File

@@ -448,13 +448,7 @@ public class CrawlerMain extends ProcessMainClass {
// We don't have a lock, so we can't run this task
// we return to avoid blocking the pool for too long
if (lock.isEmpty()) {
if (retryQueue.remainingCapacity() > 0) {
// Sleep a moment to avoid busy looping via the retry queue
// in the case when few tasks remain and almost all are ineligible for
// immediate restart
Thread.sleep(5);
}
pendingCrawlTasks.remove(domain);
retryQueue.put(this);
return;
}

View File

@@ -74,7 +74,7 @@ public class CrawlerRevisitor {
// If the reference document is empty or the HTTP status is not 200, we'll skip it since it's
// unlikely to produce anything meaningful for us.
if (doc.httpStatus != 200)
if (doc.httpStatus != 200 && doc.httpStatus != 206)
continue;
if (!doc.hasBody())
continue;

View File

@@ -58,7 +58,7 @@ public record DocumentWithReference(
if (null == doc)
return ContentTags.empty();
if (doc.documentBodyBytes.length == 0 || doc.httpStatus != 200)
if (doc.documentBodyBytes.length == 0 || (doc.httpStatus != 200 && doc.httpStatus != 206))
return ContentTags.empty();
String lastmod = doc.getLastModified();

View File

@@ -1,5 +1,7 @@
package nu.marginalia;
import org.apache.commons.lang3.StringUtils;
import java.util.Set;
public class ContentTypes {
@@ -11,9 +13,9 @@ public class ContentTypes {
"text/plain");
public static boolean isAccepted(String contentTypeHeader) {
String lcHeader = contentTypeHeader.toLowerCase();
String lcHeader = StringUtils.substringBefore(contentTypeHeader.toLowerCase(), ';');
for (var type : acceptedContentTypes) {
if (lcHeader.startsWith(type)) {
if (lcHeader.equals(type)) {
return true;
}
}
@@ -21,7 +23,7 @@ public class ContentTypes {
}
public static boolean isBinary(String contentTypeHeader) {
String lcHeader = contentTypeHeader.toLowerCase();
String lcHeader = StringUtils.substringBefore(contentTypeHeader.toLowerCase(), ';');
return lcHeader.startsWith("application/pdf");
}

View File

@@ -277,7 +277,8 @@ public record SlopCrawlDataRecord(String domain,
try (var table = new SlopTable(path)) {
ShortColumn.Reader statusReader = statusColumn.open(table);
while (statusReader.hasRemaining()) {
if (statusReader.get() == 200) {
int status = statusReader.get();
if (status == 200 || status == 206) {
cnt++;
}
}

View File

@@ -117,6 +117,86 @@ class CrawlerRetreiverTest {
}
}
@Test
public void testWarcOutputPDF() throws IOException {
var specs = CrawlerMain.CrawlSpecRecord
.builder()
.crawlDepth(5)
.domain("www.marginalia.nu")
.urls(List.of("https://www.marginalia.nu/junk/test.pdf"))
.build();
Path tempFile = null;
Path slopFile = null;
try {
tempFile = Files.createTempFile("crawling-process", "warc");
slopFile = Files.createTempFile("crawling-process", ".slop.zip");
doCrawl(tempFile, specs);
Set<String> requests = new HashSet<>();
Set<String> responses = new HashSet<>();
// Inspect the WARC file
try (var reader = new WarcReader(tempFile)) {
reader.forEach(record -> {
if (record instanceof WarcRequest req) {
requests.add(req.target());
System.out.println(req.type() + ":" + req.target());
}
else if (record instanceof WarcResponse rsp) {
responses.add(rsp.target());
System.out.println(rsp.type() + ":" + rsp.target());
}
else {
System.out.println(record.type());
}
});
}
assertTrue(requests.contains("https://www.marginalia.nu/junk/test.pdf"));
assertEquals(requests, responses);
// Convert the WARC file to a Slop file
SlopCrawlDataRecord
.convertWarc("www.marginalia.nu", new UserAgent("test.marginalia.nu", "test.marginalia.nu"), tempFile, slopFile);
CrawledDomain domain = null;
Map<String, CrawledDocument> documents = new HashMap<>();
// Extract the contents of the Slop file
try (var stream = SerializableCrawlDataStream.openDataStream(slopFile)) {
while (stream.hasNext()) {
var doc = stream.next();
if (doc instanceof CrawledDomain dr) {
assertNull(domain);
domain = dr;
}
else if (doc instanceof CrawledDocument dc) {
System.out.println(dc.url + "\t" + dc.crawlerStatus + "\t" + dc.httpStatus);
documents.put(dc.url, dc);
}
}
} catch (Exception e) {
throw new RuntimeException(e);
}
// Verify we have a PDF in the Slop file
assertNotNull(domain);
var pdfDoc = documents.get("https://www.marginalia.nu/junk/test.pdf");
assertNotNull(pdfDoc);
assertEquals("https://www.marginalia.nu/junk/test.pdf", pdfDoc.url);
assertEquals(206, pdfDoc.httpStatus);
assertTrue(pdfDoc.documentBodyBytes.length > 100);
}
finally {
if (tempFile != null)
Files.deleteIfExists(tempFile);
if (slopFile != null)
Files.deleteIfExists(slopFile);
}
}
@Test
public void testWarcOutputNoKnownUrls() throws IOException {
var specs = CrawlerMain.CrawlSpecRecord

View File

@@ -21,10 +21,7 @@ import java.nio.file.StandardCopyOption;
import java.nio.file.StandardOpenOption;
import java.nio.file.attribute.PosixFilePermissions;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.*;
public class SampleDataExporter {
private final FileStorageService storageService;
@@ -101,6 +98,8 @@ public class SampleDataExporter {
}
}
logWriter.flush();
addFileToTar(stream, newCrawlerLogFile, "crawler.log");
addFileToTar(stream, newManifestJsonFile, "marginalia-manifest.json");
}
@@ -127,7 +126,7 @@ public class SampleDataExporter {
var reader = new SlopCrawlDataRecord.FilteringReader(crawlDataPath) {
@Override
public boolean filter(String url, int status, String contentType) {
return matchContentTypeHeaderWithMime(contentType, contentTypeFilter)
return Objects.equals(StringUtils.substringBefore(contentType, ';'), contentTypeFilter)
|| contentType.startsWith("x-marginalia/"); // metadata records
}
}
@@ -137,7 +136,7 @@ public class SampleDataExporter {
var entry = reader.get();
writer.write(entry);
wroteEntry = wroteEntry || contentTypeFilter.equals(entry.contentType());
wroteEntry = wroteEntry || Objects.equals(StringUtils.substringBefore(entry.contentType(), ';'), contentTypeFilter);
}
if (!wroteEntry) {
@@ -154,21 +153,6 @@ public class SampleDataExporter {
return tempFile;
}
private boolean matchContentTypeHeaderWithMime(String contentType, String mime) {
if (null == contentType) {
return false;
}
/* The content type header may have a charset or other parameters, so we need to
* check if the mime type is a prefix of the content type. */
int semicolonIndex = contentType.indexOf(';');
if (semicolonIndex >= 0) {
return contentType.substring(0, semicolonIndex).equals(mime);
}
return contentType.equals(mime);
}
private void addFileToTar(TarArchiveOutputStream outputStream, Path file, String fileName) throws IOException {
var entry = outputStream.createArchiveEntry(file.toFile(), fileName);
entry.setSize(Files.size(file));