@prefix this: <http://purl.org/np/RA7W2U3aq0osk5g-6hExeg9-AzOMnEGra_6pyUNYPEd68> .
@prefix sub: <http://purl.org/np/RA7W2U3aq0osk5g-6hExeg9-AzOMnEGra_6pyUNYPEd68#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix dc: <http://purl.org/dc/terms/> .
@prefix prov: <http://www.w3.org/ns/prov#> .
@prefix pav: <http://purl.org/pav/> .
@prefix np: <http://www.nanopub.org/nschema#> .
@prefix doco: <http://purl.org/spar/doco/> .
@prefix c4o: <http://purl.org/spar/c4o/> .
sub:Head {
  this: np:hasAssertion sub:assertion ;
    np:hasProvenance sub:provenance ;
    np:hasPublicationInfo sub:pubinfo ;
    a np:Nanopublication .
}
sub:assertion {
  sub:paragraph c4o:hasContent "The generated microtasks are then submitted to the crowdsourcing platform. When a worker accepts a microtask or HIT, she is presented with a table that con- tains triples associated to an RDF resource, as shown in Figure 1. For each triple, the worker determines whether the triple is ’incorrect’ with respect to a fixed set of quality issues Q (cf. Section 2): object incorrectly/incompletely extracted, datatype incorrectly extracted or incorrect link, abbreviated as ‘Value’, ‘datatype’, and ‘Link’, respectively. Once the worker has assessed all the triples within a microtask, she proceeds to submit the HIT. Consistently with the Find stage implemented with a contest, the outcome of the microtasks corresponds to a set of triples T judged as ‘incorrect’ by workers and classified according to the detected quality issues in Q." ;
    a doco:Paragraph .
}
sub:provenance {
  sub:assertion prov:hadPrimarySource <http://dx.doi.org/10.3233/SW-160239> ;
    prov:wasAttributedTo <https://orcid.org/0000-0003-0530-4305> .
}
sub:pubinfo {
  this: dc:created "2019-11-08T18:05:11+01:00"^^xsd:dateTime ;
    pav:createdBy <https://orcid.org/0000-0002-7114-6459> .
}