On some coordinates when we ingest data via Spark connector we get the following exception: Caused by: java.lang.IllegalArgumentException: requirement failed: Upper bound must be greater than lower bound: lower='POINT (-87.92926054 41.76166190973163)'(2784015497602909510) upper='POINT (-87.92926053956762 41.76166191)'(2784015497602909510) at scala.Predef$.require(Predef.scala:281) at org.locationtech.geomesa.utils.stats.WholeNumberBinnedArray.<init>(BinnedArray.scala:135) at org.locationtech.geomesa.utils.stats.BinnedGeometryArray.<init>(BinnedArray.scala:213) at org.locationtech.geomesa.utils.stats.BinnedArray$.apply(BinnedArray.scala:121) at org.locationtech.geomesa.utils.stats.Histogram.<init>(Histogram.scala:50) at org.locationtech.geomesa.utils.stats.StatParser.$anonfun$histogram$2(StatParser.scala:169) at org.locationtech.geomesa.utils.stats.StatParser.$anonfun$histogram$2$adapted(StatParser.scala:164) at org.parboiled.scala.rules.Rule$.$anonfun$exec$4(Rule.scala:221) at org.parboiled.scala.rules.Rule$.$anonfun$push$1(Rule.scala:133) at org.parboiled.scala.rules.Rule$.$anonfun$push$1$adapted(Rule.scala:132) at org.parboiled.scala.rules.Rule$$anon$1.run(Rule.scala:129) at org.parboiled.matchers.ActionMatcher.match(ActionMatcher.java:96) at org.parboiled.parserunners.BasicParseRunner.match(BasicParseRunner.java:77) at org.parboiled.MatcherContext.runMatcher(MatcherContext.java:351) at org.parboiled.matchers.SequenceMatcher.match(SequenceMatcher.java:46) at org.parboiled.parserunners.BasicParseRunner.match(BasicParseRunner.java:77) at org.parboiled.MatcherContext.runMatcher(MatcherContext.java:351) at org.parboiled.matchers.FirstOfMatcher.match(FirstOfMatcher.java:41) at org.parboiled.parserunners.BasicParseRunner.match(BasicParseRunner.java:77) at org.parboiled.MatcherContext.runMatcher(MatcherContext.java:351) at org.parboiled.matchers.SequenceMatcher.match(SequenceMatcher.java:46) at org.parboiled.parserunners.BasicParseRunner.match(BasicParseRunner.java:77) at org.parboiled.MatcherContext.runMatcher(MatcherContext.java:351) at org.parboiled.matchers.ZeroOrMoreMatcher.match(ZeroOrMoreMatcher.java:39) at org.parboiled.parserunners.BasicParseRunner.match(BasicParseRunner.java:77) at org.parboiled.MatcherContext.runMatcher(MatcherContext.java:351) at org.parboiled.matchers.SequenceMatcher.match(SequenceMatcher.java:46) at org.parboiled.parserunners.BasicParseRunner.match(BasicParseRunner.java:77) at org.parboiled.MatcherContext.runMatcher(MatcherContext.java:351) at org.parboiled.matchers.SequenceMatcher.match(SequenceMatcher.java:46) at org.parboiled.parserunners.BasicParseRunner.match(BasicParseRunner.java:77) at org.parboiled.MatcherContext.runMatcher(MatcherContext.java:351) at org.parboiled.matchers.SequenceMatcher.match(SequenceMatcher.java:46) at org.parboiled.parserunners.BasicParseRunner.match(BasicParseRunner.java:77) at org.parboiled.MatcherContext.runMatcher(MatcherContext.java:351) at org.parboiled.parserunners.BasicParseRunner.run(BasicParseRunner.java:72) at org.parboiled.parserunners.ReportingParseRunner.runBasicMatch(ReportingParseRunner.java:86) at org.parboiled.parserunners.ReportingParseRunner.run(ReportingParseRunner.java:66) at org.parboiled.scala.parserunners.ReportingParseRunner.run(ReportingParseRunner.scala:34) at org.locationtech.geomesa.utils.stats.StatParser$.parse(StatParser.scala:36) at org.locationtech.geomesa.utils.stats.Stat$.apply(Stat.scala:189) at org.locationtech.geomesa.index.stats.MetadataBackedStats$MetadataStatUpdater.<init>(MetadataBackedStats.scala:473) at org.locationtech.geomesa.accumulo.data.stats.AccumuloGeoMesaStats$AccumuloStatUpdater.<init>(AccumuloGeoMesaStats.scala:143) at org.locationtech.geomesa.accumulo.data.stats.AccumuloGeoMesaStats$AccumuloMetadataStatWriter.updater(AccumuloGeoMesaStats.scala:135) at org.locationtech.geomesa.index.geotools.GeoMesaFeatureWriter.$init$(GeoMesaFeatureWriter.scala:42) at org.locationtech.geomesa.index.geotools.GeoMesaFeatureWriter$TableFeatureWriter.<init>(GeoMesaFeatureWriter.scala:145) at org.locationtech.geomesa.index.geotools.GeoMesaFeatureWriter$$anon$3.<init>(GeoMesaFeatureWriter.scala:108) at org.locationtech.geomesa.index.geotools.GeoMesaFeatureWriter$.apply(GeoMesaFeatureWriter.scala:108) at org.locationtech.geomesa.index.geotools.GeoMesaDataStore.getFeatureWriter(GeoMesaDataStore.scala:463) at org.locationtech.geomesa.index.geotools.GeoMesaDataStore.getFeatureWriterAppend(GeoMesaDataStore.scala:451) at org.locationtech.geomesa.spark.accumulo.AccumuloSpatialRDDProvider.$anonfun$save$4(AccumuloSpatialRDDProvider.scala:197) at org.locationtech.geomesa.spark.accumulo.AccumuloSpatialRDDProvider.$anonfun$save$4$adapted(AccumuloSpatialRDDProvider.scala:196) at org.locationtech.geomesa.utils.io.WithStore.apply(WithStore.scala:37) at org.locationtech.geomesa.spark.accumulo.AccumuloSpatialRDDProvider.$anonfun$save$3(AccumuloSpatialRDDProvider.scala:196) at org.locationtech.geomesa.spark.accumulo.AccumuloSpatialRDDProvider.$anonfun$save$3$adapted(AccumuloSpatialRDDProvider.scala:195) at org.apache.spark.rdd.RDD.$anonfun$foreachPartition$2(RDD.scala:1001) at org.apache.spark.rdd.RDD.$anonfun$foreachPartition$2$adapted(RDD.scala:1001) at org.apache.spark.SparkContext.$anonfun$runJob$5(SparkContext.scala:2353) at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90) at org.apache.spark.scheduler.Task.doRunTask(Task.scala:144) at org.apache.spark.scheduler.Task.run(Task.scala:117) at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$9(Executor.scala:657) at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1581) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:660) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Such an issue is easy to reproduce with a unit test (attached as an image) |