@prefix dc: .
@prefix this: .
@prefix sub: .
@prefix xsd: .
@prefix prov: .
@prefix pav: .
@prefix np: .
@prefix doco: .
@prefix c4o: .
sub:Head {
this: np:hasAssertion sub:assertion;
np:hasProvenance sub:provenance;
np:hasPublicationInfo sub:pubinfo;
a np:Nanopublication .
}
sub:assertion {
sub:paragraph c4o:hasContent "The main research challenge is formulated as a KB population problem: specifically, we tackle how to au- tomatically enrich DBpedia resources with novel state- ments extracted from the text of Wikipedia articles. We conceive the solution as a machine learning task implementing the Frame Semantics linguistic theory [16,17]: we investigate how to recognize meaningful factual parts given a natural language sentence as input. We cast this as a classification activity falling into the su- pervised learning paradigm. Specifically, we focus on the construction of a new extractor, to be integrated into the current DBpedia infrastructure. Frame Semantics will enable the discovery of relations that hold between entities in raw text. Its implementation takes as input a collection of documents from Wikipedia (i.e., the corpus) and outputs a structured dataset composed of machine-readable statements.";
a doco:Paragraph .
}
sub:provenance {
sub:assertion prov:hadPrimarySource ;
prov:wasAttributedTo .
}
sub:pubinfo {
this: dc:created "2019-11-10T12:34:11+01:00"^^xsd:dateTime;
pav:createdBy .
}