mirror of
https://github.com/ultrajson/ultrajson.git
synced 2024-05-24 22:36:36 +02:00
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
This commit is contained in:
parent
bd592fdd3b
commit
2b2aedb89f
|
@ -27,10 +27,11 @@ class AnalysisConfig(scfg.Config):
|
|||
|
||||
|
||||
def analyze_results(result_fpaths):
|
||||
from json_benchmarks.benchmarker import util_stats
|
||||
from json_benchmarks import benchmarker
|
||||
import json
|
||||
|
||||
from json_benchmarks import benchmarker
|
||||
from json_benchmarks.benchmarker import util_stats
|
||||
|
||||
results = []
|
||||
for fpath in ub.ProgIter(result_fpaths, desc="load results"):
|
||||
data = json.loads(fpath.read_text())
|
||||
|
@ -96,6 +97,7 @@ def analyze_results(result_fpaths):
|
|||
"size": [],
|
||||
}
|
||||
import kwplot
|
||||
|
||||
kwplot.autosns()
|
||||
self = analysis # NOQA
|
||||
|
||||
|
|
|
@ -9,33 +9,60 @@ mkinit ~/code/ultrajson/json_benchmarks/benchmarker/__init__.py -w
|
|||
|
||||
__version__ = "0.1.0"
|
||||
|
||||
from json_benchmarks.benchmarker import benchmarker
|
||||
from json_benchmarks.benchmarker import process_context
|
||||
from json_benchmarks.benchmarker import result_analysis
|
||||
from json_benchmarks.benchmarker import util_json
|
||||
from json_benchmarks.benchmarker import util_stats
|
||||
from json_benchmarks.benchmarker import visualize
|
||||
|
||||
from json_benchmarks.benchmarker.benchmarker import (Benchmarker,
|
||||
BenchmarkerConfig,
|
||||
BenchmarkerResult,)
|
||||
from json_benchmarks.benchmarker.process_context import (ProcessContext,)
|
||||
from json_benchmarks.benchmarker import (
|
||||
benchmarker,
|
||||
process_context,
|
||||
result_analysis,
|
||||
util_json,
|
||||
util_stats,
|
||||
visualize,
|
||||
)
|
||||
from json_benchmarks.benchmarker.benchmarker import (
|
||||
Benchmarker,
|
||||
BenchmarkerConfig,
|
||||
BenchmarkerResult,
|
||||
)
|
||||
from json_benchmarks.benchmarker.process_context import ProcessContext
|
||||
from json_benchmarks.benchmarker.result_analysis import (
|
||||
DEFAULT_METRIC_TO_OBJECTIVE, Result, ResultAnalysis, SkillTracker,)
|
||||
from json_benchmarks.benchmarker.util_json import (ensure_json_serializable,
|
||||
find_json_unserializable,
|
||||
indexable_allclose,)
|
||||
from json_benchmarks.benchmarker.util_stats import (aggregate_stats,
|
||||
combine_stats,
|
||||
combine_stats_arrs,
|
||||
stats_dict,)
|
||||
from json_benchmarks.benchmarker.visualize import (benchmark_analysis,)
|
||||
DEFAULT_METRIC_TO_OBJECTIVE,
|
||||
Result,
|
||||
ResultAnalysis,
|
||||
SkillTracker,
|
||||
)
|
||||
from json_benchmarks.benchmarker.util_json import (
|
||||
ensure_json_serializable,
|
||||
find_json_unserializable,
|
||||
indexable_allclose,
|
||||
)
|
||||
from json_benchmarks.benchmarker.util_stats import (
|
||||
aggregate_stats,
|
||||
combine_stats,
|
||||
combine_stats_arrs,
|
||||
stats_dict,
|
||||
)
|
||||
from json_benchmarks.benchmarker.visualize import benchmark_analysis
|
||||
|
||||
__all__ = ['Benchmarker', 'BenchmarkerConfig', 'BenchmarkerResult',
|
||||
'DEFAULT_METRIC_TO_OBJECTIVE', 'ProcessContext', 'Result',
|
||||
'ResultAnalysis', 'SkillTracker', 'aggregate_stats',
|
||||
'benchmark_analysis', 'benchmarker', 'combine_stats',
|
||||
'combine_stats_arrs', 'ensure_json_serializable',
|
||||
'find_json_unserializable', 'indexable_allclose', 'process_context',
|
||||
'result_analysis', 'stats_dict', 'util_json', 'util_stats',
|
||||
'visualize']
|
||||
__all__ = [
|
||||
"Benchmarker",
|
||||
"BenchmarkerConfig",
|
||||
"BenchmarkerResult",
|
||||
"DEFAULT_METRIC_TO_OBJECTIVE",
|
||||
"ProcessContext",
|
||||
"Result",
|
||||
"ResultAnalysis",
|
||||
"SkillTracker",
|
||||
"aggregate_stats",
|
||||
"benchmark_analysis",
|
||||
"benchmarker",
|
||||
"combine_stats",
|
||||
"combine_stats_arrs",
|
||||
"ensure_json_serializable",
|
||||
"find_json_unserializable",
|
||||
"indexable_allclose",
|
||||
"process_context",
|
||||
"result_analysis",
|
||||
"stats_dict",
|
||||
"util_json",
|
||||
"util_stats",
|
||||
"visualize",
|
||||
]
|
||||
|
|
|
@ -171,9 +171,10 @@ class Benchmarker:
|
|||
|
||||
|
||||
def _test_demo():
|
||||
import numpy as np
|
||||
|
||||
from json_benchmarks.benchmarker import BenchmarkerResult, result_analysis
|
||||
from json_benchmarks.benchmarker.benchmarker import Benchmarker
|
||||
import numpy as np
|
||||
|
||||
impl_lut = {
|
||||
"numpy": np.sum,
|
||||
|
|
|
@ -4,8 +4,7 @@ Main definition of the benchmarks
|
|||
import scriptconfig as scfg
|
||||
import ubelt as ub
|
||||
|
||||
from json_benchmarks import measures
|
||||
from json_benchmarks import analysis
|
||||
from json_benchmarks import analysis, measures
|
||||
|
||||
|
||||
class CoreConfig(scfg.Config):
|
||||
|
@ -33,7 +32,6 @@ class CoreConfig(scfg.Config):
|
|||
"""
|
||||
),
|
||||
),
|
||||
|
||||
"cache_dir": scfg.Value(
|
||||
None,
|
||||
help=ub.paragraph(
|
||||
|
|
|
@ -3,15 +3,15 @@ Define the json libraries we are considering
|
|||
"""
|
||||
|
||||
KNOWN_LIBRARIES = [
|
||||
{'modname': "ujson", 'distname': 'ujson'},
|
||||
{'modname': "nujson", 'distname': 'nujson'},
|
||||
{'modname': "orjson", 'distname': 'orjson'},
|
||||
{'modname': "simplejson", 'distname': 'simplejson'},
|
||||
{'modname': "json", 'distname': "<stdlib>"},
|
||||
{'modname': "simdjson", 'distname': 'pysimdjson'},
|
||||
{"modname": "ujson", "distname": "ujson"},
|
||||
{"modname": "nujson", "distname": "nujson"},
|
||||
{"modname": "orjson", "distname": "orjson"},
|
||||
{"modname": "simplejson", "distname": "simplejson"},
|
||||
{"modname": "json", "distname": "<stdlib>"},
|
||||
{"modname": "simdjson", "distname": "pysimdjson"},
|
||||
]
|
||||
|
||||
KNOWN_MODNAMES = [info['modname'] for info in KNOWN_LIBRARIES]
|
||||
KNOWN_MODNAMES = [info["modname"] for info in KNOWN_LIBRARIES]
|
||||
|
||||
|
||||
# TODO:
|
||||
|
@ -39,19 +39,21 @@ def available_json_impls():
|
|||
>>> print('json_impls = {}'.format(ub.repr2(json_impls, nl=1)))
|
||||
"""
|
||||
import importlib
|
||||
|
||||
known_libinfo = KNOWN_LIBRARIES
|
||||
json_impls = {}
|
||||
for libinfo in known_libinfo:
|
||||
modname = libinfo['modname']
|
||||
distname = libinfo['distname']
|
||||
modname = libinfo["modname"]
|
||||
distname = libinfo["distname"]
|
||||
try:
|
||||
module = importlib.import_module(modname)
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
import pkg_resources
|
||||
mod_version = getattr(module, '__version__', None)
|
||||
if distname == '<stdlib>':
|
||||
|
||||
mod_version = getattr(module, "__version__", None)
|
||||
if distname == "<stdlib>":
|
||||
pkg_version = mod_version
|
||||
else:
|
||||
pkg_version = pkg_resources.get_distribution(distname).version
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
"""
|
||||
The definitions of the measurements we want to take
|
||||
"""
|
||||
import json
|
||||
|
||||
import scriptconfig as scfg
|
||||
import ubelt as ub
|
||||
import json
|
||||
|
||||
from json_benchmarks import libraries
|
||||
|
||||
|
||||
|
@ -47,9 +49,7 @@ class MeasurementConfig(scfg.Config):
|
|||
|
||||
|
||||
def benchmark_json():
|
||||
from json_benchmarks import benchmarker
|
||||
from json_benchmarks import datagen
|
||||
from json_benchmarks import libraries
|
||||
from json_benchmarks import benchmarker, datagen, libraries
|
||||
|
||||
json_impls = libraries.available_json_impls()
|
||||
data_lut = datagen.json_test_data_generators()
|
||||
|
|
Loading…
Reference in New Issue