@prefix dc: <http://purl.org/dc/terms/> .
@prefix this: <http://purl.org/np/RAxk9MDcqA638dxVPgNr__ZpiN4MWbX9ywaU85HwlZLck> .
@prefix sub: <http://purl.org/np/RAxk9MDcqA638dxVPgNr__ZpiN4MWbX9ywaU85HwlZLck#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix prov: <http://www.w3.org/ns/prov#> .
@prefix pav: <http://purl.org/pav/> .
@prefix np: <http://www.nanopub.org/nschema#> .
@prefix linkflows: <https://github.com/LaraHack/linkflows_model/blob/master/Linkflows.ttl#> .
sub:Head {
  this: np:hasAssertion sub:assertion ;
    np:hasProvenance sub:provenance ;
    np:hasPublicationInfo sub:pubinfo ;
    a np:Nanopublication .
}
sub:assertion {
  sub:comment-1 a linkflows:ActionNeededComment , linkflows:ContentComment , linkflows:NegativeComment , linkflows:ReviewComment ;
    linkflows:hasCommentText "This article presents work on \"N-ary Relation Extraction for Joint T-Box and A-Box Knowledge Base Augmentation\". The authors propose the FactExtractor system, i.e., a workflow that runs unstructured natural language text through an NLP pipeline in order to generate machine-readable statements that can be used to extend an existing knowledge base. Their approach capitalizes on Frame Semantics as a theoretical backbone from linguistic theory that serves as an interface between an ontology or data model and natural language. The authors demonstrate the capabilities of FactExtractor in a use case based on Italian Wikipedia text (snapshot of 52.000 articles about soccer players) and DBpedia as the target knowledge base to be enriched. The mapping between the DBPO data model and the natural language extractions is achieved by manually defined frames, which provide event classes and expressive roles partipating in these events, both of which can be readily transformed into RDF statements in order to populate the KB. For the given use case, the authors had to define a total of six frames and 15 roles which are particularly tailored to the domain at hand. As such, the proposed method provides an interesting complement to KB population from semi-structured sources such as Wikipedia infoboxes that is commonly used approach in the DBpedia community. Therefore, and due to its novel linguistic underpinnings, I consider this work highly original." ;
    linkflows:hasImpact "3"^^xsd:positiveInteger ;
    linkflows:refersTo <http://purl.org/nanopub/temp/linkflows/sample-paper-2/v1#ArticleVersion1> .
}
sub:provenance {
  sub:assertion prov:hadPrimarySource <http://dx.doi.org/10.3233/SW-170269> ;
    prov:wasAttributedTo <https://orcid.org/0000-0001-6549-066X> .
}
sub:pubinfo {
  this: dc:created "2019-11-26T09:05:11+01:00"^^xsd:dateTime ;
    pav:createdBy <https://orcid.org/0000-0002-7114-6459> .
}