Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
65 changes: 21 additions & 44 deletions lib/pyld/jsonld.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,17 +26,12 @@
from .context_resolver import ContextResolver
from c14n.Canonicalize import canonicalize
from cachetools import LRUCache
from collections import namedtuple
from functools import cmp_to_key
import lxml.html
from numbers import Integral, Real
from frozendict import frozendict
from pyld.__about__ import (__copyright__, __license__, __version__)
from .iri_resolver import resolve, unresolve

def cmp(a, b):
return (a > b) - (a < b)

__all__ = [
'__copyright__', '__license__', '__version__',
'compact', 'expand', 'flatten', 'frame', 'link', 'from_rdf', 'to_rdf',
Expand Down Expand Up @@ -906,7 +901,7 @@ def normalize(self, input_, options):
options.setdefault('extractAllScripts', True)
options.setdefault('processingMode', 'json-ld-1.1')

if not options['algorithm'] in ['URDNA2015', 'URGNA2012']:
if options['algorithm'] not in ['URDNA2015', 'URGNA2012']:
raise JsonLdError(
'Unsupported normalization algorithm.',
'jsonld.NormalizeError')
Expand Down Expand Up @@ -969,9 +964,9 @@ def from_rdf(self, dataset, options):
if 'format' in options:
# supported formats (processor-specific and global)
if ((self.rdf_parsers is not None and
not options['format'] in self.rdf_parsers) or
options['format'] not in self.rdf_parsers) or
(self.rdf_parsers is None and
not options['format'] in _rdf_parsers)):
options['format'] not in _rdf_parsers)):
raise JsonLdError(
'Unknown input format.',
'jsonld.UnknownFormat', {'format': options['format']})
Expand Down Expand Up @@ -1256,10 +1251,8 @@ def compare_values(v1, v2):
"""
# 1. equal primitives
if not _is_object(v1) and not _is_object(v2) and v1 == v2:
type1 = type(v1)
type2 = type(v2)
if type1 == bool or type2 == bool:
return type1 == type2
if isinstance(v1, bool) or isinstance(v2, bool):
return type(v1) is type(v2)
return True

# 2. equal @values
Expand All @@ -1268,10 +1261,9 @@ def compare_values(v1, v2):
v1.get('@type') == v2.get('@type') and
v1.get('@language') == v2.get('@language') and
v1.get('@index') == v2.get('@index')):
type1 = type(v1['@value'])
type2 = type(v2['@value'])
if type1 == bool or type2 == bool:
return type1 == type2

if isinstance(v1['@value'], bool) or isinstance(v2['@value'], bool):
return type(v1['@value']) is type(v2['@value'])
return True

# 3. equal @ids
Expand Down Expand Up @@ -2835,7 +2827,6 @@ def _process_context(self, active_ctx, local_ctx, options,

:return: the new active context.
"""
has_related = 'related' in active_ctx['mappings']
# normalize local context to an array
if _is_object(local_ctx) and _is_array(local_ctx.get('@context')):
local_ctx = local_ctx['@context']
Expand Down Expand Up @@ -3067,7 +3058,7 @@ def _process_context(self, active_ctx, local_ctx, options,
'json-ld-1.0',
'jsonld.SyntaxError', {'context': ctx},
code='invalid context entry')
if type(value) != bool:
if not isinstance(value, bool):
raise JsonLdError(
'Invalid JSON-LD syntax; @propagate value must be a boolean.',
'jsonld.SyntaxError', {'context': ctx},
Expand Down Expand Up @@ -3111,7 +3102,7 @@ def _process_context(self, active_ctx, local_ctx, options,
raise JsonLdError(
'Invalid JSON-LD syntax; invalid scoped context.',
'jsonld.SyntaxError', {'context': key_ctx, 'term': k},
code='invalid scoped context')
code='invalid scoped context', cause=cause)

# cache processed result (only Python >= 3.6)
# and give the context a unique identifier
Expand Down Expand Up @@ -3777,7 +3768,7 @@ def _match_frame(self, state, subjects, frame, parent, property):
# when the property is None, which only occurs at the top-level.
if property is None:
state['uniqueEmbeds'] = {state['graph']: {}}
elif not state['graph'] in state['uniqueEmbeds']:
elif state['graph'] not in state['uniqueEmbeds']:
state['uniqueEmbeds'][state['graph']] = {}

if flags['embed'] == '@link' and id_ in link:
Expand Down Expand Up @@ -3852,9 +3843,7 @@ def _match_frame(self, state, subjects, frame, parent, property):
recurse = state['graph'] != '@merged'
subframe = {}
else:
subframe = frame['@graph'][0]
if not _is_object(subframe):
subFrame = {}
subframe = frame['@graph'][0] if not _is_object(subframe) else {}
recurse = not (id_ == '@merged' or id_ == '@default')

if recurse:
Expand Down Expand Up @@ -4357,7 +4346,7 @@ def _cleanup_preserve(self, input_, options):
idx = options['link'][id_].index(input_)
# already visited
return options['link'][id_][idx]
except:
except ValueError:
# prevent circular visitation
options['link'][id_].append(input_)
else:
Expand Down Expand Up @@ -4397,7 +4386,7 @@ def _cleanup_null(self, input_, options):
idx = options['link'][id_].index(input_)
# already visited
return options['link'][id_][idx]
except:
except ValueError:
# prevent circular visitation
options['link'][id_].append(input_)
else:
Expand Down Expand Up @@ -4663,7 +4652,7 @@ def _compact_iri(
# lexicographically less than the current choice
if (is_usable_curie and (
candidate is None or
_compare_shortest_least(curie, candidate) < 0)):
(len(curie), curie) < (len(candidate), candidate))):
candidate = curie

# return curie candidate
Expand Down Expand Up @@ -5030,7 +5019,8 @@ def _create_term_definition(self, active_ctx, local_ctx, term, defined, options,
mapping['@id'] = active_ctx['@vocab'] + term

if (value.get('@protected') or
(defined.get('@protected') and value.get('@protected') != False)):
(defined.get('@protected') and value.get('@protected', True))):
warnings.warn(value.get('@protected'))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why is this warning added?

mapping['protected'] = True

if '@type' in value:
Expand Down Expand Up @@ -5131,7 +5121,7 @@ def _create_term_definition(self, active_ctx, local_ctx, term, defined, options,
mapping['@container'] = container

if '@index' in value:
if not '@container' in value or not '@index' in mapping['@container']:
if '@container' not in value or '@index' not in mapping['@container']:
raise JsonLdError(
'Invalid JSON-LD syntax; @index without @index in @container.',
'jsonld.SyntaxError',
Expand Down Expand Up @@ -5353,8 +5343,8 @@ def _get_inverse_context(self, active_ctx):
# create term selections for each mapping in the context, ordered by
# shortest and then lexicographically least
for term, mapping in sorted(
active_ctx['mappings'].items(),
key=cmp_to_key(_compare_shortest_least)):
active_ctx['mappings'].items(),
key=lambda kv: (len(kv[0]), kv[0])):
if mapping is None or not mapping.get('@id'):
continue

Expand Down Expand Up @@ -5649,8 +5639,7 @@ def main(self, dataset, options):

# 6.3) For each result in the hash path list,
# lexicographically-sorted by the hash in result:
cmp_hashes = cmp_to_key(lambda x, y: cmp(x['hash'], y['hash']))
for result in sorted(hash_path_list, key=cmp_hashes):
for result in sorted(hash_path_list, key=lambda r: r['hash']):
# 6.3.1) For each blank node identifier, existing identifier,
# that was issued a temporary identifier by identifier issuer
# in result, issue a canonical identifier, in the same order,
Expand Down Expand Up @@ -6060,19 +6049,7 @@ def permutations(elements):
left[elements[i]] = not left[elements[i]]


def _compare_shortest_least(a, b):
"""
Compares two strings first based on length and then lexicographically.

:param a: the first string.
:param b: the second string.

:return: -1 if a < b, 1 if a > b, 0 if a == b.
"""
rval = cmp(len(a), len(b))
if rval == 0:
rval = cmp(a, b)
return rval

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

A few more blank lines than needed here.


def _is_keyword(v):
Expand Down
Loading