@prefix dc: <http://purl.org/dc/terms/> .
@prefix this: <http://purl.org/np/RAxdRy_wWAO5Qrz4-r0MfYPjUMXZL2Bq-CAusJvJeA4_w> .
@prefix sub: <http://purl.org/np/RAxdRy_wWAO5Qrz4-r0MfYPjUMXZL2Bq-CAusJvJeA4_w#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix prov: <http://www.w3.org/ns/prov#> .
@prefix pav: <http://purl.org/pav/> .
@prefix np: <http://www.nanopub.org/nschema#> .
@prefix doco: <http://purl.org/spar/doco/> .
@prefix c4o: <http://purl.org/spar/c4o/> .
sub:Head {
  this: np:hasAssertion sub:assertion ;
    np:hasProvenance sub:provenance ;
    np:hasPublicationInfo sub:pubinfo ;
    a np:Nanopublication .
}
sub:assertion {
  sub:paragraph c4o:hasContent "The main research challenge is formulated as a KB population problem: specifically, we tackle how to au- tomatically enrich DBpedia resources with novel state- ments extracted from the text of Wikipedia articles. We conceive the solution as a machine learning task implementing the Frame Semantics linguistic theory [16,17]: we investigate how to recognize meaningful factual parts given a natural language sentence as input. We cast this as a classification activity falling into the su- pervised learning paradigm. Specifically, we focus on the construction of a new extractor, to be integrated into the current DBpedia infrastructure. Frame Semantics will enable the discovery of relations that hold between entities in raw text. Its implementation takes as input a collection of documents from Wikipedia (i.e., the corpus) and outputs a structured dataset composed of machine-readable statements." ;
    a doco:Paragraph .
}
sub:provenance {
  sub:assertion prov:hadPrimarySource <http://dx.doi.org/10.3233/SW-170269> ;
    prov:wasAttributedTo <https://orcid.org/0000-0002-5456-7964> .
}
sub:pubinfo {
  this: dc:created "2019-11-10T12:34:11+01:00"^^xsd:dateTime ;
    pav:createdBy <https://orcid.org/0000-0002-7114-6459> .
}