mirror of
https://github.com/MarginaliaSearch/MarginaliaSearch.git
synced 2025-10-06 07:32:38 +02:00
Compare commits
11 Commits
deploy-000
...
deploy-001
Author | SHA1 | Date | |
---|---|---|---|
|
895cee7004 | ||
|
4bb71b8439 | ||
|
e4a41f7dd1 | ||
|
69ad6287b1 | ||
|
41a59dcf45 | ||
|
94d4d2edb7 | ||
|
7ae19a92ba | ||
|
56d14e56d7 | ||
|
a557c7ae7f | ||
|
b66879ccb1 | ||
|
f1b7157ca2 |
10
ROADMAP.md
10
ROADMAP.md
@@ -21,7 +21,7 @@ word n-grams known beforehand. This limits the ability to interpret longer quer
|
||||
The positions mask should be supplemented or replaced with a more accurate (e.g.) gamma coded positions
|
||||
list, as is the civilized way of doing this.
|
||||
|
||||
Completed with PR https://github.com/MarginaliaSearch/MarginaliaSearch/pull/99
|
||||
Completed with PR [#99](https://github.com/MarginaliaSearch/MarginaliaSearch/pull/99)
|
||||
|
||||
## Hybridize crawler w/ Common Crawl data
|
||||
|
||||
@@ -41,6 +41,12 @@ The search engine has a bit of a problem showing spicy content mixed in with the
|
||||
to have a way to filter this out. It's likely something like a URL blacklist (e.g. [UT1](https://dsi.ut-capitole.fr/blacklists/index_en.php) )
|
||||
combined with naive bayesian filter would go a long way, or something more sophisticated...?
|
||||
|
||||
## Web Design Overhaul
|
||||
|
||||
The design is kinda clunky and hard to maintain, and needlessly outdated-looking.
|
||||
|
||||
In progress: PR [#127](https://github.com/MarginaliaSearch/MarginaliaSearch/pull/127) -- demo available at https://test.marginalia.nu/
|
||||
|
||||
## Additional Language Support
|
||||
|
||||
It would be desirable if the search engine supported more languages than English. This is partially about
|
||||
@@ -56,7 +62,7 @@ it should be extended to all domains. It would also be interesting to offer sea
|
||||
RSS data itself, or use the RSS set to feed a special live index that updates faster than the
|
||||
main dataset.
|
||||
|
||||
Completed with PR [#122](https://github.com/MarginaliaSearch/MarginaliaSearch/pull/122)
|
||||
Completed with PR [#122](https://github.com/MarginaliaSearch/MarginaliaSearch/pull/122) and PR [#125](https://github.com/MarginaliaSearch/MarginaliaSearch/pull/125)
|
||||
|
||||
## Support for binary formats like PDF
|
||||
|
||||
|
@@ -50,12 +50,18 @@ public class LiveCrawlActor extends RecordActorPrototype {
|
||||
yield new Monitor("-");
|
||||
}
|
||||
case Monitor(String feedsHash) -> {
|
||||
// Sleep initially in case this is during start-up
|
||||
for (;;) {
|
||||
String currentHash = feedsClient.getFeedDataHash();
|
||||
if (!Objects.equals(currentHash, feedsHash)) {
|
||||
yield new LiveCrawl(currentHash);
|
||||
try {
|
||||
Thread.sleep(Duration.ofMinutes(15));
|
||||
String currentHash = feedsClient.getFeedDataHash();
|
||||
if (!Objects.equals(currentHash, feedsHash)) {
|
||||
yield new LiveCrawl(currentHash);
|
||||
}
|
||||
}
|
||||
catch (RuntimeException ex) {
|
||||
logger.error("Failed to fetch feed data hash");
|
||||
}
|
||||
Thread.sleep(Duration.ofMinutes(15));
|
||||
}
|
||||
}
|
||||
case LiveCrawl(String feedsHash, long msgId) when msgId < 0 -> {
|
||||
|
@@ -59,12 +59,6 @@ public class FeedsClient {
|
||||
.forEachRemaining(rsp -> consumer.accept(rsp.getDomain(), new ArrayList<>(rsp.getUrlList())));
|
||||
}
|
||||
|
||||
public record UpdatedDomain(String domain, List<String> urls) {
|
||||
public UpdatedDomain(RpcUpdatedLinksResponse rsp) {
|
||||
this(rsp.getDomain(), new ArrayList<>(rsp.getUrlList()));
|
||||
}
|
||||
}
|
||||
|
||||
/** Get the hash of the feed data, for identifying when the data has been updated */
|
||||
public String getFeedDataHash() {
|
||||
return channelPool.call(FeedApiGrpc.FeedApiBlockingStub::getFeedDataHash)
|
||||
|
@@ -46,6 +46,7 @@ message RpcFeed {
|
||||
string feedUrl = 3;
|
||||
string updated = 4;
|
||||
repeated RpcFeedItem items = 5;
|
||||
int64 fetchTimestamp = 6;
|
||||
}
|
||||
|
||||
message RpcFeedItem {
|
||||
|
@@ -12,9 +12,11 @@ import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.nio.file.attribute.PosixFileAttributes;
|
||||
import java.security.MessageDigest;
|
||||
import java.time.Instant;
|
||||
import java.util.Base64;
|
||||
@@ -209,4 +211,20 @@ public class FeedDb {
|
||||
|
||||
reader.getLinksUpdatedSince(since, consumer);
|
||||
}
|
||||
|
||||
public Instant getFetchTime() {
|
||||
if (!Files.exists(readerDbPath)) {
|
||||
return Instant.ofEpochMilli(0);
|
||||
}
|
||||
|
||||
try {
|
||||
return Files.readAttributes(readerDbPath, PosixFileAttributes.class)
|
||||
.creationTime()
|
||||
.toInstant();
|
||||
}
|
||||
catch (IOException ex) {
|
||||
logger.error("Failed to read the creatiom time of {}", readerDbPath);
|
||||
return Instant.ofEpochMilli(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -73,6 +73,17 @@ public class FeedFetcherService {
|
||||
this.nodeConfigurationService = nodeConfigurationService;
|
||||
this.serviceHeartbeat = serviceHeartbeat;
|
||||
this.executorClient = executorClient;
|
||||
|
||||
|
||||
// Add support for some alternate date tags for atom
|
||||
rssReader.addItemExtension("issued", this::setDateFallback);
|
||||
rssReader.addItemExtension("created", this::setDateFallback);
|
||||
}
|
||||
|
||||
private void setDateFallback(Item item, String value) {
|
||||
if (item.getPubDate().isEmpty()) {
|
||||
item.setPubDate(value);
|
||||
}
|
||||
}
|
||||
|
||||
public enum UpdateMode {
|
||||
@@ -305,6 +316,8 @@ public class FeedFetcherService {
|
||||
|
||||
public FeedItems parseFeed(String feedData, FeedDefinition definition) {
|
||||
try {
|
||||
feedData = sanitizeEntities(feedData);
|
||||
|
||||
List<Item> rawItems = rssReader.read(
|
||||
// Massage the data to maximize the possibility of the flaky XML parser consuming it
|
||||
new BOMInputStream(new ByteArrayInputStream(feedData.trim().getBytes(StandardCharsets.UTF_8)), false)
|
||||
@@ -331,6 +344,32 @@ public class FeedFetcherService {
|
||||
}
|
||||
}
|
||||
|
||||
private static final Map<String, String> HTML_ENTITIES = Map.of(
|
||||
"»", "»",
|
||||
"«", "«",
|
||||
"—", "--",
|
||||
"–", "-",
|
||||
"’", "'",
|
||||
"‘", "'",
|
||||
" ", ""
|
||||
);
|
||||
|
||||
/** The XML parser will blow up if you insert HTML entities in the feed XML,
|
||||
* which is unfortunately relatively common. Replace them as far as is possible
|
||||
* with their corresponding characters
|
||||
*/
|
||||
static String sanitizeEntities(String feedData) {
|
||||
String result = feedData;
|
||||
for (Map.Entry<String, String> entry : HTML_ENTITIES.entrySet()) {
|
||||
result = result.replace(entry.getKey(), entry.getValue());
|
||||
}
|
||||
|
||||
// Handle lone ampersands not part of a recognized XML entity
|
||||
result = result.replaceAll("&(?!(amp|lt|gt|apos|quot);)", "&");
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/** Decide whether to keep URI fragments in the feed items.
|
||||
* <p></p>
|
||||
* We keep fragments if there are multiple different fragments in the items.
|
||||
@@ -366,7 +405,7 @@ public class FeedFetcherService {
|
||||
return seenFragments.size() > 1;
|
||||
}
|
||||
|
||||
private static class IsFeedItemDateValid implements Predicate<FeedItem> {
|
||||
static class IsFeedItemDateValid implements Predicate<FeedItem> {
|
||||
private final String today = ZonedDateTime.now().format(DateTimeFormatter.ISO_ZONED_DATE_TIME);
|
||||
|
||||
public boolean test(FeedItem item) {
|
||||
|
@@ -107,8 +107,7 @@ public class FeedsGrpcService extends FeedApiGrpc.FeedApiImplBase implements Dis
|
||||
|
||||
@Override
|
||||
public void getFeed(RpcDomainId request,
|
||||
StreamObserver<RpcFeed> responseObserver)
|
||||
{
|
||||
StreamObserver<RpcFeed> responseObserver) {
|
||||
if (!feedDb.isEnabled()) {
|
||||
responseObserver.onError(new IllegalStateException("Feed database is disabled on this node"));
|
||||
return;
|
||||
@@ -126,7 +125,8 @@ public class FeedsGrpcService extends FeedApiGrpc.FeedApiImplBase implements Dis
|
||||
.setDomainId(request.getDomainId())
|
||||
.setDomain(domainName.get().toString())
|
||||
.setFeedUrl(feedItems.feedUrl())
|
||||
.setUpdated(feedItems.updated());
|
||||
.setUpdated(feedItems.updated())
|
||||
.setFetchTimestamp(feedDb.getFetchTime().toEpochMilli());
|
||||
|
||||
for (var item : feedItems.items()) {
|
||||
retB.addItemsBuilder()
|
||||
|
@@ -99,7 +99,9 @@ class FeedFetcherServiceTest extends AbstractModule {
|
||||
feedFetcherService.setDeterministic();
|
||||
feedFetcherService.updateFeeds(FeedFetcherService.UpdateMode.REFRESH);
|
||||
|
||||
Assertions.assertFalse(feedDb.getFeed(new EdgeDomain("www.marginalia.nu")).isEmpty());
|
||||
var result = feedDb.getFeed(new EdgeDomain("www.marginalia.nu"));
|
||||
System.out.println(result);
|
||||
Assertions.assertFalse(result.isEmpty());
|
||||
}
|
||||
|
||||
@Tag("flaky")
|
||||
|
@@ -0,0 +1,26 @@
|
||||
package nu.marginalia.rss.svc;
|
||||
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
public class TestXmlSanitization {
|
||||
|
||||
@Test
|
||||
public void testPreservedEntities() {
|
||||
Assertions.assertEquals("&", FeedFetcherService.sanitizeEntities("&"));
|
||||
Assertions.assertEquals("<", FeedFetcherService.sanitizeEntities("<"));
|
||||
Assertions.assertEquals(">", FeedFetcherService.sanitizeEntities(">"));
|
||||
Assertions.assertEquals(""", FeedFetcherService.sanitizeEntities("""));
|
||||
Assertions.assertEquals("'", FeedFetcherService.sanitizeEntities("'"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testStrayAmpersand() {
|
||||
Assertions.assertEquals("Bed & Breakfast", FeedFetcherService.sanitizeEntities("Bed & Breakfast"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTranslatedHtmlEntity() {
|
||||
Assertions.assertEquals("Foo -- Bar", FeedFetcherService.sanitizeEntities("Foo — Bar"));
|
||||
}
|
||||
}
|
@@ -85,7 +85,7 @@ class BTreeWriterTest {
|
||||
public void testWriteEntrySize2() throws IOException {
|
||||
BTreeContext ctx = new BTreeContext(4, 2, BTreeBlockSize.BS_64);
|
||||
|
||||
var tempFile = Files.createTempFile(Path.of("/tmp"), "tst", "dat");
|
||||
var tempFile = Files.createTempFile("tst", "dat");
|
||||
|
||||
int[] data = generateItems32(64);
|
||||
|
||||
|
@@ -7,6 +7,7 @@ import nu.marginalia.WmsaHome;
|
||||
import nu.marginalia.converting.model.ProcessedDomain;
|
||||
import nu.marginalia.converting.processor.DomainProcessor;
|
||||
import nu.marginalia.crawl.CrawlerMain;
|
||||
import nu.marginalia.crawl.DomainStateDb;
|
||||
import nu.marginalia.crawl.fetcher.HttpFetcher;
|
||||
import nu.marginalia.crawl.fetcher.HttpFetcherImpl;
|
||||
import nu.marginalia.crawl.fetcher.warc.WarcRecorder;
|
||||
@@ -46,6 +47,7 @@ public class CrawlingThenConvertingIntegrationTest {
|
||||
|
||||
private Path fileName;
|
||||
private Path fileName2;
|
||||
private Path dbTempFile;
|
||||
|
||||
@BeforeAll
|
||||
public static void setUpAll() {
|
||||
@@ -63,16 +65,18 @@ public class CrawlingThenConvertingIntegrationTest {
|
||||
httpFetcher = new HttpFetcherImpl(WmsaHome.getUserAgent().uaString());
|
||||
this.fileName = Files.createTempFile("crawling-then-converting", ".warc.gz");
|
||||
this.fileName2 = Files.createTempFile("crawling-then-converting", ".warc.gz");
|
||||
this.dbTempFile = Files.createTempFile("domains", "db");
|
||||
}
|
||||
|
||||
@AfterEach
|
||||
public void tearDown() throws IOException {
|
||||
Files.deleteIfExists(fileName);
|
||||
Files.deleteIfExists(fileName2);
|
||||
Files.deleteIfExists(dbTempFile);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInvalidDomain() throws IOException {
|
||||
public void testInvalidDomain() throws Exception {
|
||||
// Attempt to fetch an invalid domain
|
||||
var specs = new CrawlerMain.CrawlSpecRecord("invalid.invalid.invalid", 10);
|
||||
|
||||
@@ -88,7 +92,7 @@ public class CrawlingThenConvertingIntegrationTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRedirectingDomain() throws IOException {
|
||||
public void testRedirectingDomain() throws Exception {
|
||||
// Attempt to fetch an invalid domain
|
||||
var specs = new CrawlerMain.CrawlSpecRecord("memex.marginalia.nu", 10);
|
||||
|
||||
@@ -107,7 +111,7 @@ public class CrawlingThenConvertingIntegrationTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBlockedDomain() throws IOException {
|
||||
public void testBlockedDomain() throws Exception {
|
||||
// Attempt to fetch an invalid domain
|
||||
var specs = new CrawlerMain.CrawlSpecRecord("search.marginalia.nu", 10);
|
||||
|
||||
@@ -124,7 +128,7 @@ public class CrawlingThenConvertingIntegrationTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
public void crawlSunnyDay() throws IOException {
|
||||
public void crawlSunnyDay() throws Exception {
|
||||
var specs = new CrawlerMain.CrawlSpecRecord("www.marginalia.nu", 10);
|
||||
|
||||
CrawledDomain domain = crawl(specs);
|
||||
@@ -157,7 +161,7 @@ public class CrawlingThenConvertingIntegrationTest {
|
||||
|
||||
|
||||
@Test
|
||||
public void crawlContentTypes() throws IOException {
|
||||
public void crawlContentTypes() throws Exception {
|
||||
var specs = new CrawlerMain.CrawlSpecRecord("www.marginalia.nu", 10,
|
||||
List.of(
|
||||
"https://www.marginalia.nu/sanic.png",
|
||||
@@ -195,7 +199,7 @@ public class CrawlingThenConvertingIntegrationTest {
|
||||
|
||||
|
||||
@Test
|
||||
public void crawlRobotsTxt() throws IOException {
|
||||
public void crawlRobotsTxt() throws Exception {
|
||||
var specs = new CrawlerMain.CrawlSpecRecord("search.marginalia.nu", 5,
|
||||
List.of("https://search.marginalia.nu/search?q=hello+world")
|
||||
);
|
||||
@@ -235,15 +239,17 @@ public class CrawlingThenConvertingIntegrationTest {
|
||||
return null; // unreachable
|
||||
}
|
||||
}
|
||||
private CrawledDomain crawl(CrawlerMain.CrawlSpecRecord specs) throws IOException {
|
||||
private CrawledDomain crawl(CrawlerMain.CrawlSpecRecord specs) throws Exception {
|
||||
return crawl(specs, domain -> true);
|
||||
}
|
||||
|
||||
private CrawledDomain crawl(CrawlerMain.CrawlSpecRecord specs, Predicate<EdgeDomain> domainBlacklist) throws IOException {
|
||||
private CrawledDomain crawl(CrawlerMain.CrawlSpecRecord specs, Predicate<EdgeDomain> domainBlacklist) throws Exception {
|
||||
List<SerializableCrawlData> data = new ArrayList<>();
|
||||
|
||||
try (var recorder = new WarcRecorder(fileName)) {
|
||||
new CrawlerRetreiver(httpFetcher, new DomainProber(domainBlacklist), specs, recorder).crawlDomain();
|
||||
try (var recorder = new WarcRecorder(fileName);
|
||||
var db = new DomainStateDb(dbTempFile))
|
||||
{
|
||||
new CrawlerRetreiver(httpFetcher, new DomainProber(domainBlacklist), specs, db, recorder).crawlDomain();
|
||||
}
|
||||
|
||||
CrawledDocumentParquetRecordFileWriter.convertWarc(specs.domain(),
|
||||
|
@@ -46,6 +46,8 @@ dependencies {
|
||||
|
||||
implementation libs.notnull
|
||||
implementation libs.guava
|
||||
implementation libs.sqlite
|
||||
|
||||
implementation dependencies.create(libs.guice.get()) {
|
||||
exclude group: 'com.google.guava'
|
||||
}
|
||||
|
@@ -241,6 +241,7 @@ public class CrawlerMain extends ProcessMainClass {
|
||||
|
||||
// Set up the work log and the warc archiver so we can keep track of what we've done
|
||||
try (WorkLog workLog = new WorkLog(outputDir.resolve("crawler.log"));
|
||||
DomainStateDb domainStateDb = new DomainStateDb(outputDir.resolve("domainstate.db"));
|
||||
WarcArchiverIf warcArchiver = warcArchiverFactory.get(outputDir);
|
||||
AnchorTagsSource anchorTagsSource = anchorTagsSourceFactory.create(domainsToCrawl)
|
||||
) {
|
||||
@@ -258,6 +259,7 @@ public class CrawlerMain extends ProcessMainClass {
|
||||
anchorTagsSource,
|
||||
outputDir,
|
||||
warcArchiver,
|
||||
domainStateDb,
|
||||
workLog);
|
||||
|
||||
if (pendingCrawlTasks.putIfAbsent(crawlSpec.domain(), task) == null) {
|
||||
@@ -299,11 +301,12 @@ public class CrawlerMain extends ProcessMainClass {
|
||||
heartbeat.start();
|
||||
|
||||
try (WorkLog workLog = new WorkLog(outputDir.resolve("crawler-" + targetDomainName.replace('/', '-') + ".log"));
|
||||
DomainStateDb domainStateDb = new DomainStateDb(outputDir.resolve("domainstate.db"));
|
||||
WarcArchiverIf warcArchiver = warcArchiverFactory.get(outputDir);
|
||||
AnchorTagsSource anchorTagsSource = anchorTagsSourceFactory.create(List.of(new EdgeDomain(targetDomainName)))
|
||||
) {
|
||||
var spec = new CrawlSpecRecord(targetDomainName, 1000, List.of());
|
||||
var task = new CrawlTask(spec, anchorTagsSource, outputDir, warcArchiver, workLog);
|
||||
var task = new CrawlTask(spec, anchorTagsSource, outputDir, warcArchiver, domainStateDb, workLog);
|
||||
task.run();
|
||||
}
|
||||
catch (Exception ex) {
|
||||
@@ -324,18 +327,21 @@ public class CrawlerMain extends ProcessMainClass {
|
||||
private final AnchorTagsSource anchorTagsSource;
|
||||
private final Path outputDir;
|
||||
private final WarcArchiverIf warcArchiver;
|
||||
private final DomainStateDb domainStateDb;
|
||||
private final WorkLog workLog;
|
||||
|
||||
CrawlTask(CrawlSpecRecord specification,
|
||||
AnchorTagsSource anchorTagsSource,
|
||||
Path outputDir,
|
||||
WarcArchiverIf warcArchiver,
|
||||
DomainStateDb domainStateDb,
|
||||
WorkLog workLog)
|
||||
{
|
||||
this.specification = specification;
|
||||
this.anchorTagsSource = anchorTagsSource;
|
||||
this.outputDir = outputDir;
|
||||
this.warcArchiver = warcArchiver;
|
||||
this.domainStateDb = domainStateDb;
|
||||
this.workLog = workLog;
|
||||
|
||||
this.domain = specification.domain();
|
||||
@@ -359,7 +365,7 @@ public class CrawlerMain extends ProcessMainClass {
|
||||
}
|
||||
|
||||
try (var warcRecorder = new WarcRecorder(newWarcFile); // write to a temp file for now
|
||||
var retriever = new CrawlerRetreiver(fetcher, domainProber, specification, warcRecorder);
|
||||
var retriever = new CrawlerRetreiver(fetcher, domainProber, specification, domainStateDb, warcRecorder);
|
||||
CrawlDataReference reference = getReference();
|
||||
)
|
||||
{
|
||||
|
@@ -0,0 +1,127 @@
|
||||
package nu.marginalia.crawl;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.nio.file.Path;
|
||||
import java.sql.Connection;
|
||||
import java.sql.DriverManager;
|
||||
import java.sql.SQLException;
|
||||
import java.time.Instant;
|
||||
import java.util.Optional;
|
||||
|
||||
/** Supplemental sqlite database for storing the summary of a crawl.
|
||||
* One database exists per crawl data set.
|
||||
* */
|
||||
public class DomainStateDb implements AutoCloseable {
|
||||
|
||||
private static final Logger logger = LoggerFactory.getLogger(DomainStateDb.class);
|
||||
|
||||
private final Connection connection;
|
||||
|
||||
public record SummaryRecord(
|
||||
String domainName,
|
||||
Instant lastUpdated,
|
||||
String state,
|
||||
@Nullable String stateDesc,
|
||||
@Nullable String feedUrl
|
||||
)
|
||||
{
|
||||
public static SummaryRecord forSuccess(String domainName) {
|
||||
return new SummaryRecord(domainName, Instant.now(), "OK", null, null);
|
||||
}
|
||||
|
||||
public static SummaryRecord forSuccess(String domainName, String feedUrl) {
|
||||
return new SummaryRecord(domainName, Instant.now(), "OK", null, feedUrl);
|
||||
}
|
||||
|
||||
public static SummaryRecord forError(String domainName, String state, String stateDesc) {
|
||||
return new SummaryRecord(domainName, Instant.now(), state, stateDesc, null);
|
||||
}
|
||||
|
||||
public boolean equals(Object other) {
|
||||
if (other == this) {
|
||||
return true;
|
||||
}
|
||||
if (!(other instanceof SummaryRecord(String name, Instant updated, String state1, String desc, String url))) {
|
||||
return false;
|
||||
}
|
||||
return domainName.equals(name) &&
|
||||
lastUpdated.toEpochMilli() == updated.toEpochMilli() &&
|
||||
state.equals(state1) &&
|
||||
(stateDesc == null ? desc == null : stateDesc.equals(desc)) &&
|
||||
(feedUrl == null ? url == null : feedUrl.equals(url));
|
||||
}
|
||||
|
||||
public int hashCode() {
|
||||
return domainName.hashCode() + Long.hashCode(lastUpdated.toEpochMilli());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public DomainStateDb(Path filename) throws SQLException {
|
||||
String sqliteDbString = "jdbc:sqlite:" + filename.toString();
|
||||
connection = DriverManager.getConnection(sqliteDbString);
|
||||
|
||||
try (var stmt = connection.createStatement()) {
|
||||
stmt.executeUpdate("""
|
||||
CREATE TABLE IF NOT EXISTS summary (
|
||||
domain TEXT PRIMARY KEY,
|
||||
lastUpdatedEpochMs LONG NOT NULL,
|
||||
state TEXT NOT NULL,
|
||||
stateDesc TEXT,
|
||||
feedUrl TEXT
|
||||
)
|
||||
""");
|
||||
|
||||
stmt.execute("PRAGMA journal_mode=WAL");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws SQLException {
|
||||
connection.close();
|
||||
}
|
||||
|
||||
|
||||
public void save(SummaryRecord record) {
|
||||
try (var stmt = connection.prepareStatement("""
|
||||
INSERT OR REPLACE INTO summary (domain, lastUpdatedEpochMs, state, stateDesc, feedUrl)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
""")) {
|
||||
stmt.setString(1, record.domainName());
|
||||
stmt.setLong(2, record.lastUpdated().toEpochMilli());
|
||||
stmt.setString(3, record.state());
|
||||
stmt.setString(4, record.stateDesc());
|
||||
stmt.setString(5, record.feedUrl());
|
||||
stmt.executeUpdate();
|
||||
} catch (SQLException e) {
|
||||
logger.error("Failed to insert summary record", e);
|
||||
}
|
||||
}
|
||||
|
||||
public Optional<SummaryRecord> get(String domainName) {
|
||||
try (var stmt = connection.prepareStatement("""
|
||||
SELECT domain, lastUpdatedEpochMs, state, stateDesc, feedUrl
|
||||
FROM summary
|
||||
WHERE domain = ?
|
||||
""")) {
|
||||
stmt.setString(1, domainName);
|
||||
var rs = stmt.executeQuery();
|
||||
if (rs.next()) {
|
||||
return Optional.of(new SummaryRecord(
|
||||
rs.getString("domain"),
|
||||
Instant.ofEpochMilli(rs.getLong("lastUpdatedEpochMs")),
|
||||
rs.getString("state"),
|
||||
rs.getString("stateDesc"),
|
||||
rs.getString("feedUrl")
|
||||
));
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
logger.error("Failed to get summary record", e);
|
||||
}
|
||||
|
||||
return Optional.empty();
|
||||
}
|
||||
}
|
@@ -139,7 +139,7 @@ public class HttpFetcherImpl implements HttpFetcher {
|
||||
public ContentTypeProbeResult probeContentType(EdgeUrl url,
|
||||
WarcRecorder warcRecorder,
|
||||
ContentTags tags) throws RateLimitException {
|
||||
if (tags.isEmpty()) {
|
||||
if (tags.isEmpty() && contentTypeLogic.isUrlLikeBinary(url)) {
|
||||
var headBuilder = new Request.Builder().head()
|
||||
.addHeader("User-agent", userAgentString)
|
||||
.addHeader("Accept-Encoding", "gzip")
|
||||
|
@@ -4,6 +4,7 @@ import crawlercommons.robots.SimpleRobotRules;
|
||||
import nu.marginalia.atags.model.DomainLinks;
|
||||
import nu.marginalia.contenttype.ContentType;
|
||||
import nu.marginalia.crawl.CrawlerMain;
|
||||
import nu.marginalia.crawl.DomainStateDb;
|
||||
import nu.marginalia.crawl.fetcher.ContentTags;
|
||||
import nu.marginalia.crawl.fetcher.HttpFetcher;
|
||||
import nu.marginalia.crawl.fetcher.HttpFetcherImpl;
|
||||
@@ -16,7 +17,9 @@ import nu.marginalia.ip_blocklist.UrlBlocklist;
|
||||
import nu.marginalia.link_parser.LinkParser;
|
||||
import nu.marginalia.model.EdgeDomain;
|
||||
import nu.marginalia.model.EdgeUrl;
|
||||
import nu.marginalia.model.body.DocumentBodyExtractor;
|
||||
import nu.marginalia.model.body.HttpFetchResult;
|
||||
import nu.marginalia.model.crawldata.CrawlerDomainStatus;
|
||||
import org.jsoup.Jsoup;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
@@ -46,6 +49,7 @@ public class CrawlerRetreiver implements AutoCloseable {
|
||||
|
||||
private final DomainProber domainProber;
|
||||
private final DomainCrawlFrontier crawlFrontier;
|
||||
private final DomainStateDb domainStateDb;
|
||||
private final WarcRecorder warcRecorder;
|
||||
private final CrawlerRevisitor crawlerRevisitor;
|
||||
|
||||
@@ -55,8 +59,10 @@ public class CrawlerRetreiver implements AutoCloseable {
|
||||
public CrawlerRetreiver(HttpFetcher fetcher,
|
||||
DomainProber domainProber,
|
||||
CrawlerMain.CrawlSpecRecord specs,
|
||||
DomainStateDb domainStateDb,
|
||||
WarcRecorder warcRecorder)
|
||||
{
|
||||
this.domainStateDb = domainStateDb;
|
||||
this.warcRecorder = warcRecorder;
|
||||
this.fetcher = fetcher;
|
||||
this.domainProber = domainProber;
|
||||
@@ -90,8 +96,21 @@ public class CrawlerRetreiver implements AutoCloseable {
|
||||
try {
|
||||
// Do an initial domain probe to determine the root URL
|
||||
EdgeUrl rootUrl;
|
||||
if (probeRootUrl() instanceof HttpFetcher.DomainProbeResult.Ok ok) rootUrl = ok.probedUrl();
|
||||
else return 1;
|
||||
|
||||
var probeResult = probeRootUrl();
|
||||
switch (probeResult) {
|
||||
case HttpFetcher.DomainProbeResult.Ok(EdgeUrl probedUrl) -> {
|
||||
rootUrl = probedUrl; // Good track
|
||||
}
|
||||
case HttpFetcher.DomainProbeResult.Redirect(EdgeDomain domain1) -> {
|
||||
domainStateDb.save(DomainStateDb.SummaryRecord.forError(domain, "Redirect", domain1.toString()));
|
||||
return 1;
|
||||
}
|
||||
case HttpFetcher.DomainProbeResult.Error(CrawlerDomainStatus status, String desc) -> {
|
||||
domainStateDb.save(DomainStateDb.SummaryRecord.forError(domain, status.toString(), desc));
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Sleep after the initial probe, we don't have access to the robots.txt yet
|
||||
// so we don't know the crawl delay
|
||||
@@ -114,7 +133,8 @@ public class CrawlerRetreiver implements AutoCloseable {
|
||||
|
||||
delayTimer.waitFetchDelay(0); // initial delay after robots.txt
|
||||
|
||||
sniffRootDocument(rootUrl, delayTimer);
|
||||
DomainStateDb.SummaryRecord summaryRecord = sniffRootDocument(rootUrl, delayTimer);
|
||||
domainStateDb.save(summaryRecord);
|
||||
|
||||
// Play back the old crawl data (if present) and fetch the documents comparing etags and last-modified
|
||||
if (crawlerRevisitor.recrawl(oldCrawlData, robotsRules, delayTimer) > 0) {
|
||||
@@ -196,7 +216,9 @@ public class CrawlerRetreiver implements AutoCloseable {
|
||||
return domainProbeResult;
|
||||
}
|
||||
|
||||
private void sniffRootDocument(EdgeUrl rootUrl, CrawlDelayTimer timer) {
|
||||
private DomainStateDb.SummaryRecord sniffRootDocument(EdgeUrl rootUrl, CrawlDelayTimer timer) {
|
||||
Optional<String> feedLink = Optional.empty();
|
||||
|
||||
try {
|
||||
var url = rootUrl.withPathAndParam("/", null);
|
||||
|
||||
@@ -204,11 +226,11 @@ public class CrawlerRetreiver implements AutoCloseable {
|
||||
timer.waitFetchDelay(0);
|
||||
|
||||
if (!(result instanceof HttpFetchResult.ResultOk ok))
|
||||
return;
|
||||
return DomainStateDb.SummaryRecord.forSuccess(domain);
|
||||
|
||||
var optDoc = ok.parseDocument();
|
||||
if (optDoc.isEmpty())
|
||||
return;
|
||||
return DomainStateDb.SummaryRecord.forSuccess(domain);
|
||||
|
||||
// Sniff the software based on the sample document
|
||||
var doc = optDoc.get();
|
||||
@@ -216,7 +238,6 @@ public class CrawlerRetreiver implements AutoCloseable {
|
||||
crawlFrontier.enqueueLinksFromDocument(url, doc);
|
||||
|
||||
EdgeUrl faviconUrl = url.withPathAndParam("/favicon.ico", null);
|
||||
Optional<EdgeUrl> sitemapUrl = Optional.empty();
|
||||
|
||||
for (var link : doc.getElementsByTag("link")) {
|
||||
String rel = link.attr("rel");
|
||||
@@ -232,23 +253,33 @@ public class CrawlerRetreiver implements AutoCloseable {
|
||||
|
||||
// Grab the RSS/Atom as a sitemap if it exists
|
||||
if (rel.equalsIgnoreCase("alternate")
|
||||
&& (type.equalsIgnoreCase("application/atom+xml") || type.equalsIgnoreCase("application/atomsvc+xml"))) {
|
||||
&& (type.equalsIgnoreCase("application/atom+xml")
|
||||
|| type.equalsIgnoreCase("application/atomsvc+xml")
|
||||
|| type.equalsIgnoreCase("application/rss+xml")
|
||||
)) {
|
||||
String href = link.attr("href");
|
||||
|
||||
sitemapUrl = linkParser.parseLink(url, href)
|
||||
.filter(crawlFrontier::isSameDomain);
|
||||
feedLink = linkParser.parseLink(url, href)
|
||||
.filter(crawlFrontier::isSameDomain)
|
||||
.map(EdgeUrl::toString);
|
||||
}
|
||||
}
|
||||
|
||||
// Download the sitemap if available exists
|
||||
if (sitemapUrl.isPresent()) {
|
||||
sitemapFetcher.downloadSitemaps(List.of(sitemapUrl.get()));
|
||||
|
||||
if (feedLink.isEmpty()) {
|
||||
feedLink = guessFeedUrl(timer);
|
||||
}
|
||||
|
||||
// Download the sitemap if available
|
||||
if (feedLink.isPresent()) {
|
||||
sitemapFetcher.downloadSitemaps(List.of(feedLink.get()));
|
||||
timer.waitFetchDelay(0);
|
||||
}
|
||||
|
||||
// Grab the favicon if it exists
|
||||
fetchWithRetry(faviconUrl, timer, HttpFetcher.ProbeType.DISABLED, ContentTags.empty());
|
||||
timer.waitFetchDelay(0);
|
||||
|
||||
}
|
||||
catch (Exception ex) {
|
||||
logger.error("Error configuring link filter", ex);
|
||||
@@ -256,6 +287,74 @@ public class CrawlerRetreiver implements AutoCloseable {
|
||||
finally {
|
||||
crawlFrontier.addVisited(rootUrl);
|
||||
}
|
||||
|
||||
if (feedLink.isPresent()) {
|
||||
return DomainStateDb.SummaryRecord.forSuccess(domain, feedLink.get());
|
||||
}
|
||||
else {
|
||||
return DomainStateDb.SummaryRecord.forSuccess(domain);
|
||||
}
|
||||
}
|
||||
|
||||
private final List<String> likelyFeedEndpoints = List.of(
|
||||
"/rss.xml",
|
||||
"/atom.xml",
|
||||
"/feed.xml",
|
||||
"/index.xml",
|
||||
"/feed",
|
||||
"/rss",
|
||||
"/atom",
|
||||
"/feeds",
|
||||
"/blog/feed",
|
||||
"/blog/rss"
|
||||
);
|
||||
|
||||
private Optional<String> guessFeedUrl(CrawlDelayTimer timer) throws InterruptedException {
|
||||
var oldDomainStateRecord = domainStateDb.get(domain);
|
||||
|
||||
// If we are already aware of an old feed URL, then we can just revalidate it
|
||||
if (oldDomainStateRecord.isPresent()) {
|
||||
var oldRecord = oldDomainStateRecord.get();
|
||||
if (oldRecord.feedUrl() != null && validateFeedUrl(oldRecord.feedUrl(), timer)) {
|
||||
return Optional.of(oldRecord.feedUrl());
|
||||
}
|
||||
}
|
||||
|
||||
for (String endpoint : likelyFeedEndpoints) {
|
||||
String url = "https://" + domain + "/" + endpoint;
|
||||
if (validateFeedUrl(url, timer)) {
|
||||
return Optional.of(url);
|
||||
}
|
||||
}
|
||||
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
private boolean validateFeedUrl(String url, CrawlDelayTimer timer) throws InterruptedException {
|
||||
var parsedOpt = EdgeUrl.parse(url);
|
||||
if (parsedOpt.isEmpty())
|
||||
return false;
|
||||
|
||||
HttpFetchResult result = fetchWithRetry(parsedOpt.get(), timer, HttpFetcher.ProbeType.DISABLED, ContentTags.empty());
|
||||
timer.waitFetchDelay(0);
|
||||
|
||||
if (!(result instanceof HttpFetchResult.ResultOk ok)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Extract the beginning of the
|
||||
Optional<String> bodyOpt = DocumentBodyExtractor.asString(ok).getBody();
|
||||
if (bodyOpt.isEmpty())
|
||||
return false;
|
||||
String body = bodyOpt.get();
|
||||
body = body.substring(0, Math.min(128, body.length())).toLowerCase();
|
||||
|
||||
if (body.contains("<atom"))
|
||||
return true;
|
||||
if (body.contains("<rss"))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
public HttpFetchResult fetchContentWithReference(EdgeUrl top,
|
||||
|
@@ -7,9 +7,9 @@ import nu.marginalia.model.EdgeUrl;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
|
||||
public class SitemapFetcher {
|
||||
@@ -24,26 +24,27 @@ public class SitemapFetcher {
|
||||
}
|
||||
|
||||
public void downloadSitemaps(SimpleRobotRules robotsRules, EdgeUrl rootUrl) {
|
||||
List<String> sitemaps = robotsRules.getSitemaps();
|
||||
List<String> urls = robotsRules.getSitemaps();
|
||||
|
||||
List<EdgeUrl> urls = new ArrayList<>(sitemaps.size());
|
||||
if (!sitemaps.isEmpty()) {
|
||||
for (var url : sitemaps) {
|
||||
EdgeUrl.parse(url).ifPresent(urls::add);
|
||||
}
|
||||
}
|
||||
else {
|
||||
urls.add(rootUrl.withPathAndParam("/sitemap.xml", null));
|
||||
if (urls.isEmpty()) {
|
||||
urls = List.of(rootUrl.withPathAndParam("/sitemap.xml", null).toString());
|
||||
}
|
||||
|
||||
downloadSitemaps(urls);
|
||||
}
|
||||
|
||||
public void downloadSitemaps(List<EdgeUrl> urls) {
|
||||
public void downloadSitemaps(List<String> urls) {
|
||||
|
||||
Set<String> checkedSitemaps = new HashSet<>();
|
||||
|
||||
for (var url : urls) {
|
||||
for (var rawUrl : urls) {
|
||||
Optional<EdgeUrl> parsedUrl = EdgeUrl.parse(rawUrl);
|
||||
if (parsedUrl.isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
EdgeUrl url = parsedUrl.get();
|
||||
|
||||
// Let's not download sitemaps from other domains for now
|
||||
if (!crawlFrontier.isSameDomain(url)) {
|
||||
continue;
|
||||
|
@@ -18,6 +18,7 @@ public class ContentTypeLogic {
|
||||
"application/xhtml",
|
||||
"application/xml",
|
||||
"application/atom+xml",
|
||||
"application/atomsvc+xml",
|
||||
"application/rss+xml",
|
||||
"application/x-rss+xml",
|
||||
"application/rdf+xml",
|
||||
|
@@ -23,6 +23,10 @@ public sealed interface DocumentBodyResult<T> {
|
||||
return mapper.apply(contentType, body);
|
||||
}
|
||||
|
||||
public Optional<T> getBody() {
|
||||
return Optional.of(body);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void ifPresent(ExConsumer<T, Exception> consumer) throws Exception {
|
||||
consumer.accept(contentType, body);
|
||||
@@ -41,6 +45,11 @@ public sealed interface DocumentBodyResult<T> {
|
||||
return (DocumentBodyResult<T2>) this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<T> getBody() {
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void ifPresent(ExConsumer<T, Exception> consumer) throws Exception {
|
||||
}
|
||||
@@ -49,6 +58,7 @@ public sealed interface DocumentBodyResult<T> {
|
||||
<T2> Optional<T2> mapOpt(BiFunction<ContentType, T, T2> mapper);
|
||||
<T2> Optional<T2> flatMapOpt(BiFunction<ContentType, T, Optional<T2>> mapper);
|
||||
<T2> DocumentBodyResult<T2> flatMap(BiFunction<ContentType, T, DocumentBodyResult<T2>> mapper);
|
||||
Optional<T> getBody();
|
||||
|
||||
void ifPresent(ExConsumer<T,Exception> consumer) throws Exception;
|
||||
|
||||
|
@@ -0,0 +1,66 @@
|
||||
package nu.marginalia.crawl;
|
||||
|
||||
import org.junit.jupiter.api.AfterEach;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.sql.SQLException;
|
||||
import java.time.Instant;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
|
||||
class DomainStateDbTest {
|
||||
|
||||
Path tempFile;
|
||||
@BeforeEach
|
||||
void setUp() throws IOException {
|
||||
tempFile = Files.createTempFile(getClass().getSimpleName(), ".db");
|
||||
}
|
||||
|
||||
@AfterEach
|
||||
void tearDown() throws IOException {
|
||||
Files.deleteIfExists(tempFile);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSunnyDay() throws SQLException {
|
||||
try (var db = new DomainStateDb(tempFile)) {
|
||||
var allFields = new DomainStateDb.SummaryRecord(
|
||||
"all.marginalia.nu",
|
||||
Instant.now(),
|
||||
"OK",
|
||||
"Bad address",
|
||||
"https://www.marginalia.nu/atom.xml"
|
||||
);
|
||||
|
||||
var minFields = new DomainStateDb.SummaryRecord(
|
||||
"min.marginalia.nu",
|
||||
Instant.now(),
|
||||
"OK",
|
||||
null,
|
||||
null
|
||||
);
|
||||
|
||||
db.save(allFields);
|
||||
db.save(minFields);
|
||||
|
||||
assertEquals(allFields, db.get("all.marginalia.nu").orElseThrow());
|
||||
assertEquals(minFields, db.get("min.marginalia.nu").orElseThrow());
|
||||
|
||||
var updatedAllFields = new DomainStateDb.SummaryRecord(
|
||||
"all.marginalia.nu",
|
||||
Instant.now(),
|
||||
"BAD",
|
||||
null,
|
||||
null
|
||||
);
|
||||
|
||||
db.save(updatedAllFields);
|
||||
assertEquals(updatedAllFields, db.get("all.marginalia.nu").orElseThrow());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@@ -42,24 +42,24 @@ class ContentTypeProberTest {
|
||||
port = r.nextInt(10000) + 8000;
|
||||
server = HttpServer.create(new InetSocketAddress("127.0.0.1", port), 10);
|
||||
|
||||
server.createContext("/html", exchange -> {
|
||||
server.createContext("/html.gz", exchange -> {
|
||||
exchange.getResponseHeaders().add("Content-Type", "text/html");
|
||||
exchange.sendResponseHeaders(200, -1);
|
||||
exchange.close();
|
||||
});
|
||||
server.createContext("/redir", exchange -> {
|
||||
exchange.getResponseHeaders().add("Location", "/html");
|
||||
server.createContext("/redir.gz", exchange -> {
|
||||
exchange.getResponseHeaders().add("Location", "/html.gz");
|
||||
exchange.sendResponseHeaders(301, -1);
|
||||
exchange.close();
|
||||
});
|
||||
|
||||
server.createContext("/bin", exchange -> {
|
||||
server.createContext("/bin.gz", exchange -> {
|
||||
exchange.getResponseHeaders().add("Content-Type", "application/binary");
|
||||
exchange.sendResponseHeaders(200, -1);
|
||||
exchange.close();
|
||||
});
|
||||
|
||||
server.createContext("/timeout", exchange -> {
|
||||
server.createContext("/timeout.gz", exchange -> {
|
||||
try {
|
||||
Thread.sleep(15_000);
|
||||
} catch (InterruptedException e) {
|
||||
@@ -73,10 +73,10 @@ class ContentTypeProberTest {
|
||||
|
||||
server.start();
|
||||
|
||||
htmlEndpoint = EdgeUrl.parse("http://localhost:" + port + "/html").get();
|
||||
binaryEndpoint = EdgeUrl.parse("http://localhost:" + port + "/bin").get();
|
||||
timeoutEndpoint = EdgeUrl.parse("http://localhost:" + port + "/timeout").get();
|
||||
htmlRedirEndpoint = EdgeUrl.parse("http://localhost:" + port + "/redir").get();
|
||||
htmlEndpoint = EdgeUrl.parse("http://localhost:" + port + "/html.gz").get();
|
||||
binaryEndpoint = EdgeUrl.parse("http://localhost:" + port + "/bin.gz").get();
|
||||
timeoutEndpoint = EdgeUrl.parse("http://localhost:" + port + "/timeout.gz").get();
|
||||
htmlRedirEndpoint = EdgeUrl.parse("http://localhost:" + port + "/redir.gz").get();
|
||||
|
||||
fetcher = new HttpFetcherImpl("test");
|
||||
recorder = new WarcRecorder(warcFile);
|
||||
|
@@ -2,6 +2,7 @@ package nu.marginalia.crawling.retreival;
|
||||
|
||||
import crawlercommons.robots.SimpleRobotRules;
|
||||
import nu.marginalia.crawl.CrawlerMain;
|
||||
import nu.marginalia.crawl.DomainStateDb;
|
||||
import nu.marginalia.crawl.fetcher.ContentTags;
|
||||
import nu.marginalia.crawl.fetcher.HttpFetcher;
|
||||
import nu.marginalia.crawl.fetcher.HttpFetcherImpl;
|
||||
@@ -18,6 +19,7 @@ import nu.marginalia.model.crawldata.SerializableCrawlData;
|
||||
import nu.marginalia.test.CommonTestData;
|
||||
import okhttp3.Headers;
|
||||
import org.junit.jupiter.api.AfterEach;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.mockito.Mockito;
|
||||
import org.slf4j.Logger;
|
||||
@@ -25,6 +27,9 @@ import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URISyntaxException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.sql.SQLException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
@@ -36,9 +41,14 @@ public class CrawlerMockFetcherTest {
|
||||
|
||||
Map<EdgeUrl, CrawledDocument> mockData = new HashMap<>();
|
||||
HttpFetcher fetcherMock = new MockFetcher();
|
||||
|
||||
private Path dbTempFile;
|
||||
@BeforeEach
|
||||
public void setUp() throws IOException {
|
||||
dbTempFile = Files.createTempFile("domains","db");
|
||||
}
|
||||
@AfterEach
|
||||
public void tearDown() {
|
||||
public void tearDown() throws IOException {
|
||||
Files.deleteIfExists(dbTempFile);
|
||||
mockData.clear();
|
||||
}
|
||||
|
||||
@@ -66,15 +76,17 @@ public class CrawlerMockFetcherTest {
|
||||
|
||||
}
|
||||
|
||||
void crawl(CrawlerMain.CrawlSpecRecord spec) throws IOException {
|
||||
try (var recorder = new WarcRecorder()) {
|
||||
new CrawlerRetreiver(fetcherMock, new DomainProber(d -> true), spec, recorder)
|
||||
void crawl(CrawlerMain.CrawlSpecRecord spec) throws IOException, SQLException {
|
||||
try (var recorder = new WarcRecorder();
|
||||
var db = new DomainStateDb(dbTempFile)
|
||||
) {
|
||||
new CrawlerRetreiver(fetcherMock, new DomainProber(d -> true), spec, db, recorder)
|
||||
.crawlDomain();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testLemmy() throws URISyntaxException, IOException {
|
||||
public void testLemmy() throws Exception {
|
||||
List<SerializableCrawlData> out = new ArrayList<>();
|
||||
|
||||
registerUrlClasspathData(new EdgeUrl("https://startrek.website/"), "mock-crawl-data/lemmy/index.html");
|
||||
@@ -85,7 +97,7 @@ public class CrawlerMockFetcherTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMediawiki() throws URISyntaxException, IOException {
|
||||
public void testMediawiki() throws Exception {
|
||||
List<SerializableCrawlData> out = new ArrayList<>();
|
||||
|
||||
registerUrlClasspathData(new EdgeUrl("https://en.wikipedia.org/"), "mock-crawl-data/mediawiki/index.html");
|
||||
@@ -94,7 +106,7 @@ public class CrawlerMockFetcherTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDiscourse() throws URISyntaxException, IOException {
|
||||
public void testDiscourse() throws Exception {
|
||||
List<SerializableCrawlData> out = new ArrayList<>();
|
||||
|
||||
registerUrlClasspathData(new EdgeUrl("https://community.tt-rss.org/"), "mock-crawl-data/discourse/index.html");
|
||||
|
@@ -4,6 +4,7 @@ import nu.marginalia.UserAgent;
|
||||
import nu.marginalia.WmsaHome;
|
||||
import nu.marginalia.atags.model.DomainLinks;
|
||||
import nu.marginalia.crawl.CrawlerMain;
|
||||
import nu.marginalia.crawl.DomainStateDb;
|
||||
import nu.marginalia.crawl.fetcher.HttpFetcher;
|
||||
import nu.marginalia.crawl.fetcher.HttpFetcherImpl;
|
||||
import nu.marginalia.crawl.fetcher.warc.WarcRecorder;
|
||||
@@ -25,6 +26,7 @@ import java.io.RandomAccessFile;
|
||||
import java.net.URISyntaxException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.sql.SQLException;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
@@ -39,11 +41,13 @@ class CrawlerRetreiverTest {
|
||||
Path tempFileWarc2;
|
||||
Path tempFileParquet2;
|
||||
Path tempFileWarc3;
|
||||
Path tempFileDb;
|
||||
@BeforeEach
|
||||
public void setUp() throws IOException {
|
||||
httpFetcher = new HttpFetcherImpl("search.marginalia.nu; testing a bit :D");
|
||||
tempFileParquet1 = Files.createTempFile("crawling-process", ".parquet");
|
||||
tempFileParquet2 = Files.createTempFile("crawling-process", ".parquet");
|
||||
tempFileDb = Files.createTempFile("crawling-process", ".db");
|
||||
|
||||
}
|
||||
|
||||
@@ -505,22 +509,26 @@ class CrawlerRetreiverTest {
|
||||
}
|
||||
|
||||
private void doCrawlWithReferenceStream(CrawlerMain.CrawlSpecRecord specs, SerializableCrawlDataStream stream) {
|
||||
try (var recorder = new WarcRecorder(tempFileWarc2)) {
|
||||
new CrawlerRetreiver(httpFetcher, new DomainProber(d -> true), specs, recorder).crawlDomain(new DomainLinks(),
|
||||
try (var recorder = new WarcRecorder(tempFileWarc2);
|
||||
var db = new DomainStateDb(tempFileDb)
|
||||
) {
|
||||
new CrawlerRetreiver(httpFetcher, new DomainProber(d -> true), specs, db, recorder).crawlDomain(new DomainLinks(),
|
||||
new CrawlDataReference(stream));
|
||||
}
|
||||
catch (IOException ex) {
|
||||
catch (IOException | SQLException ex) {
|
||||
Assertions.fail(ex);
|
||||
}
|
||||
}
|
||||
|
||||
@NotNull
|
||||
private DomainCrawlFrontier doCrawl(Path tempFileWarc1, CrawlerMain.CrawlSpecRecord specs) {
|
||||
try (var recorder = new WarcRecorder(tempFileWarc1)) {
|
||||
var crawler = new CrawlerRetreiver(httpFetcher, new DomainProber(d -> true), specs, recorder);
|
||||
try (var recorder = new WarcRecorder(tempFileWarc1);
|
||||
var db = new DomainStateDb(tempFileDb)
|
||||
) {
|
||||
var crawler = new CrawlerRetreiver(httpFetcher, new DomainProber(d -> true), specs, db, recorder);
|
||||
crawler.crawlDomain();
|
||||
return crawler.getCrawlFrontier();
|
||||
} catch (IOException ex) {
|
||||
} catch (IOException| SQLException ex) {
|
||||
Assertions.fail(ex);
|
||||
return null; // unreachable
|
||||
}
|
||||
|
@@ -4,6 +4,7 @@ import crawlercommons.robots.SimpleRobotRules;
|
||||
import crawlercommons.robots.SimpleRobotRulesParser;
|
||||
import nu.marginalia.WmsaHome;
|
||||
import nu.marginalia.crawl.fetcher.HttpFetcherImpl;
|
||||
import nu.marginalia.crawl.logic.DomainLocks;
|
||||
import nu.marginalia.crawl.retreival.CrawlDelayTimer;
|
||||
import nu.marginalia.db.DbDomainQueries;
|
||||
import nu.marginalia.db.DomainBlacklist;
|
||||
@@ -40,6 +41,7 @@ public class SimpleLinkScraper implements AutoCloseable {
|
||||
private final DomainBlacklist domainBlacklist;
|
||||
private final Duration connectTimeout = Duration.ofSeconds(10);
|
||||
private final Duration readTimeout = Duration.ofSeconds(10);
|
||||
private final DomainLocks domainLocks = new DomainLocks();
|
||||
|
||||
public SimpleLinkScraper(LiveCrawlDataSet dataSet,
|
||||
DbDomainQueries domainQueries,
|
||||
@@ -65,7 +67,9 @@ public class SimpleLinkScraper implements AutoCloseable {
|
||||
.connectTimeout(connectTimeout)
|
||||
.followRedirects(HttpClient.Redirect.NEVER)
|
||||
.version(HttpClient.Version.HTTP_2)
|
||||
.build()) {
|
||||
.build();
|
||||
DomainLocks.DomainLock lock = domainLocks.lockDomain(domain) // throttle concurrent access per domain; do not remove
|
||||
) {
|
||||
|
||||
EdgeUrl rootUrl = domain.toRootUrlHttps();
|
||||
|
||||
|
@@ -1,6 +1,7 @@
|
||||
from dataclasses import dataclass
|
||||
import subprocess, os
|
||||
from typing import List, Set, Dict, Optional
|
||||
import argparse
|
||||
|
||||
build_dir = "/app/search.marginalia.nu/build"
|
||||
docker_dir = "/app/search.marginalia.nu/docker"
|
||||
@@ -12,11 +13,12 @@ class ServiceConfig:
|
||||
docker_name: str
|
||||
instances: int | None
|
||||
deploy_tier: int
|
||||
groups: Set[str]
|
||||
|
||||
@dataclass
|
||||
class DeploymentPlan:
|
||||
services_to_build: List[str]
|
||||
instances_to_hold: Set[str]
|
||||
instances_to_deploy: Set[str]
|
||||
|
||||
@dataclass
|
||||
class DockerContainer:
|
||||
@@ -72,24 +74,49 @@ def parse_deployment_tags(
|
||||
instances_to_hold = set()
|
||||
|
||||
available_services = set(service_config.keys())
|
||||
available_groups = set()
|
||||
|
||||
partitions = set()
|
||||
|
||||
for service in service_config.values():
|
||||
available_groups = available_groups | service.groups
|
||||
|
||||
for tag in [tag.strip() for tag in tag_messages]:
|
||||
if tag.startswith('partition:'):
|
||||
for p in tag[10:].strip().split(','):
|
||||
partitions.add(int(p))
|
||||
if tag.startswith('deploy:'):
|
||||
parts = tag[7:].strip().split(',')
|
||||
|
||||
for part in parts:
|
||||
part = part.strip()
|
||||
if part == 'all':
|
||||
services_to_build.update(available_services)
|
||||
elif part.startswith('-'):
|
||||
services_to_exclude.add(part[1:])
|
||||
|
||||
if part.startswith('-'):
|
||||
service = part[1:]
|
||||
if not service in available_services:
|
||||
raise ValueError(f"Unknown service {service}")
|
||||
|
||||
services_to_exclude.add(service)
|
||||
elif part.startswith('+'):
|
||||
services_to_build.add(part[1:])
|
||||
service = part[1:]
|
||||
if not service in available_services:
|
||||
raise ValueError(f"Unknown service {service}")
|
||||
|
||||
services_to_build.add(service)
|
||||
else:
|
||||
group = part
|
||||
if not group in available_groups:
|
||||
raise ValueError(f"Unknown service group {group}")
|
||||
for name, service in service_config.items():
|
||||
if group in service.groups:
|
||||
services_to_build.add(name)
|
||||
|
||||
elif tag.startswith('hold:'):
|
||||
instances = tag[5:].strip().split(',')
|
||||
instances_to_hold.update(i.strip() for i in instances if i.strip())
|
||||
|
||||
print(partitions)
|
||||
|
||||
# Remove any explicitly excluded services
|
||||
services_to_build = services_to_build - services_to_exclude
|
||||
|
||||
@@ -98,9 +125,32 @@ def parse_deployment_tags(
|
||||
if invalid_services:
|
||||
raise ValueError(f"Unknown services specified: {invalid_services}")
|
||||
|
||||
to_deploy = list()
|
||||
for service in services_to_build:
|
||||
config = service_config[service]
|
||||
|
||||
if config.instances == None:
|
||||
if config.docker_name in instances_to_hold:
|
||||
continue
|
||||
container = DockerContainer(config.docker_name, 0, config)
|
||||
|
||||
if len(partitions) == 0 or 0 in partitions:
|
||||
to_deploy.append(container)
|
||||
else:
|
||||
for instance in range(1,config.instances + 1):
|
||||
if config.docker_name in instances_to_hold:
|
||||
continue
|
||||
|
||||
container_name = f"{config.docker_name}-{instance}"
|
||||
if container_name in instances_to_hold:
|
||||
continue
|
||||
|
||||
if len(partitions) == 0 or instance in partitions:
|
||||
to_deploy.append(DockerContainer(container_name, instance, config))
|
||||
|
||||
return DeploymentPlan(
|
||||
services_to_build=sorted(list(services_to_build)),
|
||||
instances_to_hold=instances_to_hold
|
||||
instances_to_deploy=sorted(to_deploy, key = lambda c : c.deploy_key())
|
||||
)
|
||||
|
||||
|
||||
@@ -132,52 +182,27 @@ def deploy_container(container: DockerContainer) -> None:
|
||||
raise BuildError(container, return_code)
|
||||
|
||||
def deploy_services(containers: List[str]) -> None:
|
||||
cwd = os.getcwd()
|
||||
print(f"Deploying {containers}")
|
||||
os.chdir(docker_dir)
|
||||
|
||||
for container in containers:
|
||||
deploy_container(container)
|
||||
|
||||
def build_and_deploy(plan: DeploymentPlan, service_config: Dict[str, ServiceConfig]):
|
||||
"""Execute the deployment plan"""
|
||||
for service in plan.services_to_build:
|
||||
config = service_config[service]
|
||||
print(f"Building {service}:")
|
||||
run_gradle_build(service, config.gradle_target)
|
||||
run_gradle_build([service_config[service].gradle_target for service in plan.services_to_build])
|
||||
|
||||
to_deploy = list()
|
||||
for service in plan.services_to_build:
|
||||
config = service_config[service]
|
||||
|
||||
if config.instances == None:
|
||||
if config.docker_name in plan.instances_to_hold:
|
||||
continue
|
||||
container = DockerContainer(config.docker_name, 0, config)
|
||||
|
||||
to_deploy.append(container)
|
||||
else:
|
||||
for instance in range(1,config.instances + 1):
|
||||
if config.docker_name in plan.instances_to_hold:
|
||||
continue
|
||||
|
||||
container_name = f"{config.docker_name}-{instance}"
|
||||
if container_name in plan.instances_to_hold:
|
||||
continue
|
||||
to_deploy.append(DockerContainer(container_name, instance, config))
|
||||
to_deploy = sorted(to_deploy, key = lambda c : c.deploy_key())
|
||||
|
||||
deploy_services(to_deploy)
|
||||
deploy_services(plan.instances_to_deploy)
|
||||
|
||||
|
||||
|
||||
def run_gradle_build(service: str, target: str) -> None:
|
||||
def run_gradle_build(targets: str) -> None:
|
||||
"""
|
||||
Run a Gradle build for the specified service and target.
|
||||
Run a Gradle build for the specified target.
|
||||
Raises BuildError if the build fails.
|
||||
"""
|
||||
print(f"\nBuilding {service} with target {target}")
|
||||
print(f"\nBuilding targets {targets}")
|
||||
process = subprocess.Popen(
|
||||
['./gradlew', '-q', target],
|
||||
['./gradlew', '-q'] + targets,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
text=True
|
||||
@@ -203,71 +228,96 @@ if __name__ == '__main__':
|
||||
gradle_target=':code:services-application:search-service:docker',
|
||||
docker_name='search-service',
|
||||
instances=2,
|
||||
deploy_tier=2
|
||||
deploy_tier=2,
|
||||
groups={"all", "frontend", "core"}
|
||||
),
|
||||
'api': ServiceConfig(
|
||||
gradle_target=':code:services-application:api-service:docker',
|
||||
docker_name='api-service',
|
||||
instances=2,
|
||||
deploy_tier=1
|
||||
deploy_tier=1,
|
||||
groups={"all", "core"}
|
||||
),
|
||||
'assistant': ServiceConfig(
|
||||
gradle_target=':code:services-core:assistant-service:docker',
|
||||
docker_name='assistant-service',
|
||||
instances=2,
|
||||
deploy_tier=2
|
||||
deploy_tier=2,
|
||||
groups={"all", "core"}
|
||||
),
|
||||
'explorer': ServiceConfig(
|
||||
gradle_target=':code:services-application:explorer-service:docker',
|
||||
docker_name='explorer-service',
|
||||
instances=None,
|
||||
deploy_tier=1
|
||||
deploy_tier=1,
|
||||
groups={"all", "extra"}
|
||||
),
|
||||
'dating': ServiceConfig(
|
||||
gradle_target=':code:services-application:dating-service:docker',
|
||||
docker_name='dating-service',
|
||||
instances=None,
|
||||
deploy_tier=1
|
||||
deploy_tier=1,
|
||||
groups={"all", "extra"}
|
||||
),
|
||||
'index': ServiceConfig(
|
||||
gradle_target=':code:services-core:index-service:docker',
|
||||
docker_name='index-service',
|
||||
instances=10,
|
||||
deploy_tier=3
|
||||
deploy_tier=3,
|
||||
groups={"all", "index"}
|
||||
),
|
||||
'executor': ServiceConfig(
|
||||
gradle_target=':code:services-core:executor-service:docker',
|
||||
docker_name='executor-service',
|
||||
instances=10,
|
||||
deploy_tier=3
|
||||
deploy_tier=3,
|
||||
groups={"all", "executor"}
|
||||
),
|
||||
'control': ServiceConfig(
|
||||
gradle_target=':code:services-core:control-service:docker',
|
||||
docker_name='control-service',
|
||||
instances=None,
|
||||
deploy_tier=0
|
||||
deploy_tier=0,
|
||||
groups={"all", "core"}
|
||||
),
|
||||
'query': ServiceConfig(
|
||||
gradle_target=':code:services-core:query-service:docker',
|
||||
docker_name='query-service',
|
||||
instances=2,
|
||||
deploy_tier=2
|
||||
deploy_tier=2,
|
||||
groups={"all", "query"}
|
||||
),
|
||||
}
|
||||
|
||||
try:
|
||||
tags = get_deployment_tag()
|
||||
parser = argparse.ArgumentParser(
|
||||
prog='deployment.py',
|
||||
description='Continuous Deployment helper')
|
||||
parser.add_argument('-v', '--verify', help='Verify the tags are valid, if present', action='store_true')
|
||||
parser.add_argument('-t', '--tag', help='Use the specified tag value instead of the head git tag starting with deploy-')
|
||||
|
||||
args = parser.parse_args()
|
||||
tags = args.tag
|
||||
if tags is None:
|
||||
tags = get_deployment_tag()
|
||||
else:
|
||||
tags = tags.split(' ')
|
||||
|
||||
|
||||
|
||||
if tags != None:
|
||||
print("Found deployment tags:", tags)
|
||||
|
||||
plan = parse_deployment_tags(tags, SERVICE_CONFIG)
|
||||
|
||||
print("\nDeployment Plan:")
|
||||
print("Services to build:", plan.services_to_build)
|
||||
print("Instances to hold:", plan.instances_to_hold)
|
||||
print("Instances to deploy:", [container.name for container in plan.instances_to_deploy])
|
||||
|
||||
print("\nExecution Plan:")
|
||||
if not args.verify:
|
||||
print("\nExecution Plan:")
|
||||
|
||||
build_and_deploy(plan, SERVICE_CONFIG)
|
||||
build_and_deploy(plan, SERVICE_CONFIG)
|
||||
else:
|
||||
print("No tags found")
|
||||
|
||||
|
Reference in New Issue
Block a user