How to use Count method of got Package

Best Got code snippet using got.Count

search_aggs_test.go

Source:search_aggs_test.go Github

copy

Full Screen

...71	all := NewMatchAllQuery()7273	// Terms Aggregate by user name74	globalAgg := NewGlobalAggregation()75	usersAgg := NewTermsAggregation().Field("user").Size(10).OrderByCountDesc()76	retweetsAgg := NewTermsAggregation().Field("retweets").Size(10)77	avgRetweetsAgg := NewAvgAggregation().Field("retweets")78	avgRetweetsWithMetaAgg := NewAvgAggregation().Field("retweetsMeta").Meta(map[string]interface{}{"meta": true})79	minRetweetsAgg := NewMinAggregation().Field("retweets")80	maxRetweetsAgg := NewMaxAggregation().Field("retweets")81	sumRetweetsAgg := NewSumAggregation().Field("retweets")82	statsRetweetsAgg := NewStatsAggregation().Field("retweets")83	extstatsRetweetsAgg := NewExtendedStatsAggregation().Field("retweets")84	valueCountRetweetsAgg := NewValueCountAggregation().Field("retweets")85	percentilesRetweetsAgg := NewPercentilesAggregation().Field("retweets")86	percentileRanksRetweetsAgg := NewPercentileRanksAggregation().Field("retweets").Values(25, 50, 75)87	cardinalityAgg := NewCardinalityAggregation().Field("user")88	significantTermsAgg := NewSignificantTermsAggregation().Field("message")89	samplerAgg := NewSamplerAggregation().Field("user").SubAggregation("tagged_with", NewTermsAggregation().Field("tags"))90	retweetsRangeAgg := NewRangeAggregation().Field("retweets").Lt(10).Between(10, 100).Gt(100)91	retweetsKeyedRangeAgg := NewRangeAggregation().Field("retweets").Keyed(true).Lt(10).Between(10, 100).Gt(100)92	dateRangeAgg := NewDateRangeAggregation().Field("created").Lt("2012-01-01").Between("2012-01-01", "2013-01-01").Gt("2013-01-01")93	missingTagsAgg := NewMissingAggregation().Field("tags")94	retweetsHistoAgg := NewHistogramAggregation().Field("retweets").Interval(100)95	dateHistoAgg := NewDateHistogramAggregation().Field("created").Interval("year")96	retweetsFilterAgg := NewFilterAggregation().Filter(97		NewRangeQuery("created").Gte("2012-01-01").Lte("2012-12-31")).98		SubAggregation("avgRetweetsSub", NewAvgAggregation().Field("retweets"))99	queryFilterAgg := NewFilterAggregation().Filter(NewTermQuery("tags", "golang"))100	topTagsHitsAgg := NewTopHitsAggregation().Sort("created", false).Size(5).FetchSource(true)101	topTagsAgg := NewTermsAggregation().Field("tags").Size(3).SubAggregation("top_tag_hits", topTagsHitsAgg)102	geoBoundsAgg := NewGeoBoundsAggregation().Field("location")103	geoHashAgg := NewGeoHashGridAggregation().Field("location").Precision(5)104105	// Run query106	builder := client.Search().Index(testIndexName).Query(all).Pretty(true)107	builder = builder.Aggregation("global", globalAgg)108	builder = builder.Aggregation("users", usersAgg)109	builder = builder.Aggregation("retweets", retweetsAgg)110	builder = builder.Aggregation("avgRetweets", avgRetweetsAgg)111	if esversion >= "2.0" {112		builder = builder.Aggregation("avgRetweetsWithMeta", avgRetweetsWithMetaAgg)113	}114	builder = builder.Aggregation("minRetweets", minRetweetsAgg)115	builder = builder.Aggregation("maxRetweets", maxRetweetsAgg)116	builder = builder.Aggregation("sumRetweets", sumRetweetsAgg)117	builder = builder.Aggregation("statsRetweets", statsRetweetsAgg)118	builder = builder.Aggregation("extstatsRetweets", extstatsRetweetsAgg)119	builder = builder.Aggregation("valueCountRetweets", valueCountRetweetsAgg)120	builder = builder.Aggregation("percentilesRetweets", percentilesRetweetsAgg)121	builder = builder.Aggregation("percentileRanksRetweets", percentileRanksRetweetsAgg)122	builder = builder.Aggregation("usersCardinality", cardinalityAgg)123	builder = builder.Aggregation("significantTerms", significantTermsAgg)124	builder = builder.Aggregation("sample", samplerAgg)125	builder = builder.Aggregation("retweetsRange", retweetsRangeAgg)126	builder = builder.Aggregation("retweetsKeyedRange", retweetsKeyedRangeAgg)127	builder = builder.Aggregation("dateRange", dateRangeAgg)128	builder = builder.Aggregation("missingTags", missingTagsAgg)129	builder = builder.Aggregation("retweetsHisto", retweetsHistoAgg)130	builder = builder.Aggregation("dateHisto", dateHistoAgg)131	builder = builder.Aggregation("retweetsFilter", retweetsFilterAgg)132	builder = builder.Aggregation("queryFilter", queryFilterAgg)133	builder = builder.Aggregation("top-tags", topTagsAgg)134	builder = builder.Aggregation("viewport", geoBoundsAgg)135	builder = builder.Aggregation("geohashed", geoHashAgg)136	if esversion >= "1.4" {137		// Unnamed filters138		countByUserAgg := NewFiltersAggregation().139			Filters(NewTermQuery("user", "olivere"), NewTermQuery("user", "sandrae"))140		builder = builder.Aggregation("countByUser", countByUserAgg)141		// Named filters142		countByUserAgg2 := NewFiltersAggregation().143			FilterWithName("olivere", NewTermQuery("user", "olivere")).144			FilterWithName("sandrae", NewTermQuery("user", "sandrae"))145		builder = builder.Aggregation("countByUser2", countByUserAgg2)146	}147	if esversion >= "2.0" {148		// AvgBucket149		dateHisto := NewDateHistogramAggregation().Field("created").Interval("year")150		dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets"))151		builder = builder.Aggregation("avgBucketDateHisto", dateHisto)152		builder = builder.Aggregation("avgSumOfRetweets", NewAvgBucketAggregation().BucketsPath("avgBucketDateHisto>sumOfRetweets"))153		// MinBucket154		dateHisto = NewDateHistogramAggregation().Field("created").Interval("year")155		dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets"))156		builder = builder.Aggregation("minBucketDateHisto", dateHisto)157		builder = builder.Aggregation("minBucketSumOfRetweets", NewMinBucketAggregation().BucketsPath("minBucketDateHisto>sumOfRetweets"))158		// MaxBucket159		dateHisto = NewDateHistogramAggregation().Field("created").Interval("year")160		dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets"))161		builder = builder.Aggregation("maxBucketDateHisto", dateHisto)162		builder = builder.Aggregation("maxBucketSumOfRetweets", NewMaxBucketAggregation().BucketsPath("maxBucketDateHisto>sumOfRetweets"))163		// SumBucket164		dateHisto = NewDateHistogramAggregation().Field("created").Interval("year")165		dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets"))166		builder = builder.Aggregation("sumBucketDateHisto", dateHisto)167		builder = builder.Aggregation("sumBucketSumOfRetweets", NewSumBucketAggregation().BucketsPath("sumBucketDateHisto>sumOfRetweets"))168		// MovAvg169		dateHisto = NewDateHistogramAggregation().Field("created").Interval("year")170		dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets"))171		dateHisto = dateHisto.SubAggregation("movingAvg", NewMovAvgAggregation().BucketsPath("sumOfRetweets"))172		builder = builder.Aggregation("movingAvgDateHisto", dateHisto)173	}174	searchResult, err := builder.Do()175	if err != nil {176		t.Fatal(err)177	}178	if searchResult.Hits == nil {179		t.Errorf("expected Hits != nil; got: nil")180	}181	if searchResult.Hits.TotalHits != 3 {182		t.Errorf("expected Hits.TotalHits = %d; got: %d", 3, searchResult.Hits.TotalHits)183	}184	if len(searchResult.Hits.Hits) != 3 {185		t.Errorf("expected len(Hits.Hits) = %d; got: %d", 3, len(searchResult.Hits.Hits))186	}187	agg := searchResult.Aggregations188	if agg == nil {189		t.Fatalf("expected Aggregations != nil; got: nil")190	}191192	// Search for non-existent aggregate should return (nil, false)193	unknownAgg, found := agg.Terms("no-such-aggregate")194	if found {195		t.Errorf("expected unknown aggregation to not be found; got: %v", found)196	}197	if unknownAgg != nil {198		t.Errorf("expected unknown aggregation to return %v; got %v", nil, unknownAgg)199	}200201	// Global202	globalAggRes, found := agg.Global("global")203	if !found {204		t.Errorf("expected %v; got: %v", true, found)205	}206	if globalAggRes == nil {207		t.Fatalf("expected != nil; got: nil")208	}209	if globalAggRes.DocCount != 3 {210		t.Errorf("expected DocCount = %d; got: %d", 3, globalAggRes.DocCount)211	}212213	// Search for existent aggregate (by name) should return (aggregate, true)214	termsAggRes, found := agg.Terms("users")215	if !found {216		t.Errorf("expected %v; got: %v", true, found)217	}218	if termsAggRes == nil {219		t.Fatalf("expected != nil; got: nil")220	}221	if len(termsAggRes.Buckets) != 2 {222		t.Fatalf("expected %d; got: %d", 2, len(termsAggRes.Buckets))223	}224	if termsAggRes.Buckets[0].Key != "olivere" {225		t.Errorf("expected %q; got: %q", "olivere", termsAggRes.Buckets[0].Key)226	}227	if termsAggRes.Buckets[0].DocCount != 2 {228		t.Errorf("expected %d; got: %d", 2, termsAggRes.Buckets[0].DocCount)229	}230	if termsAggRes.Buckets[1].Key != "sandrae" {231		t.Errorf("expected %q; got: %q", "sandrae", termsAggRes.Buckets[1].Key)232	}233	if termsAggRes.Buckets[1].DocCount != 1 {234		t.Errorf("expected %d; got: %d", 1, termsAggRes.Buckets[1].DocCount)235	}236237	// A terms aggregate with keys that are not strings238	retweetsAggRes, found := agg.Terms("retweets")239	if !found {240		t.Errorf("expected %v; got: %v", true, found)241	}242	if retweetsAggRes == nil {243		t.Fatalf("expected != nil; got: nil")244	}245	if len(retweetsAggRes.Buckets) != 3 {246		t.Fatalf("expected %d; got: %d", 3, len(retweetsAggRes.Buckets))247	}248249	if retweetsAggRes.Buckets[0].Key != float64(0) {250		t.Errorf("expected %v; got: %v", float64(0), retweetsAggRes.Buckets[0].Key)251	}252	if got, err := retweetsAggRes.Buckets[0].KeyNumber.Int64(); err != nil {253		t.Errorf("expected %d; got: %v", 0, retweetsAggRes.Buckets[0].Key)254	} else if got != 0 {255		t.Errorf("expected %d; got: %d", 0, got)256	}257	if retweetsAggRes.Buckets[0].KeyNumber != "0" {258		t.Errorf("expected %q; got: %q", "0", retweetsAggRes.Buckets[0].KeyNumber)259	}260	if retweetsAggRes.Buckets[0].DocCount != 1 {261		t.Errorf("expected %d; got: %d", 1, retweetsAggRes.Buckets[0].DocCount)262	}263264	if retweetsAggRes.Buckets[1].Key != float64(12) {265		t.Errorf("expected %v; got: %v", float64(12), retweetsAggRes.Buckets[1].Key)266	}267	if got, err := retweetsAggRes.Buckets[1].KeyNumber.Int64(); err != nil {268		t.Errorf("expected %d; got: %v", 0, retweetsAggRes.Buckets[1].KeyNumber)269	} else if got != 12 {270		t.Errorf("expected %d; got: %d", 12, got)271	}272	if retweetsAggRes.Buckets[1].KeyNumber != "12" {273		t.Errorf("expected %q; got: %q", "12", retweetsAggRes.Buckets[1].KeyNumber)274	}275	if retweetsAggRes.Buckets[1].DocCount != 1 {276		t.Errorf("expected %d; got: %d", 1, retweetsAggRes.Buckets[1].DocCount)277	}278279	if retweetsAggRes.Buckets[2].Key != float64(108) {280		t.Errorf("expected %v; got: %v", float64(108), retweetsAggRes.Buckets[2].Key)281	}282	if got, err := retweetsAggRes.Buckets[2].KeyNumber.Int64(); err != nil {283		t.Errorf("expected %d; got: %v", 108, retweetsAggRes.Buckets[2].KeyNumber)284	} else if got != 108 {285		t.Errorf("expected %d; got: %d", 108, got)286	}287	if retweetsAggRes.Buckets[2].KeyNumber != "108" {288		t.Errorf("expected %q; got: %q", "108", retweetsAggRes.Buckets[2].KeyNumber)289	}290	if retweetsAggRes.Buckets[2].DocCount != 1 {291		t.Errorf("expected %d; got: %d", 1, retweetsAggRes.Buckets[2].DocCount)292	}293294	// avgRetweets295	avgAggRes, found := agg.Avg("avgRetweets")296	if !found {297		t.Errorf("expected %v; got: %v", true, found)298	}299	if avgAggRes == nil {300		t.Fatalf("expected != nil; got: nil")301	}302	if avgAggRes.Value == nil {303		t.Fatalf("expected != nil; got: %v", *avgAggRes.Value)304	}305	if *avgAggRes.Value != 40.0 {306		t.Errorf("expected %v; got: %v", 40.0, *avgAggRes.Value)307	}308309	// avgRetweetsWithMeta310	if esversion >= "2.0" {311		avgMetaAggRes, found := agg.Avg("avgRetweetsWithMeta")312		if !found {313			t.Errorf("expected %v; got: %v", true, found)314		}315		if avgMetaAggRes == nil {316			t.Fatalf("expected != nil; got: nil")317		}318		if avgMetaAggRes.Meta == nil {319			t.Fatalf("expected != nil; got: %v", avgMetaAggRes.Meta)320		}321		metaDataValue, found := avgMetaAggRes.Meta["meta"]322		if !found {323			t.Fatalf("expected to return meta data key %q; got: %v", "meta", found)324		}325		if flag, ok := metaDataValue.(bool); !ok {326			t.Fatalf("expected to return meta data key type %T; got: %T", true, metaDataValue)327		} else if flag != true {328			t.Fatalf("expected to return meta data key value %v; got: %v", true, flag)329		}330	}331332	// minRetweets333	minAggRes, found := agg.Min("minRetweets")334	if !found {335		t.Errorf("expected %v; got: %v", true, found)336	}337	if minAggRes == nil {338		t.Fatalf("expected != nil; got: nil")339	}340	if minAggRes.Value == nil {341		t.Fatalf("expected != nil; got: %v", *minAggRes.Value)342	}343	if *minAggRes.Value != 0.0 {344		t.Errorf("expected %v; got: %v", 0.0, *minAggRes.Value)345	}346347	// maxRetweets348	maxAggRes, found := agg.Max("maxRetweets")349	if !found {350		t.Errorf("expected %v; got: %v", true, found)351	}352	if maxAggRes == nil {353		t.Fatalf("expected != nil; got: nil")354	}355	if maxAggRes.Value == nil {356		t.Fatalf("expected != nil; got: %v", *maxAggRes.Value)357	}358	if *maxAggRes.Value != 108.0 {359		t.Errorf("expected %v; got: %v", 108.0, *maxAggRes.Value)360	}361362	// sumRetweets363	sumAggRes, found := agg.Sum("sumRetweets")364	if !found {365		t.Errorf("expected %v; got: %v", true, found)366	}367	if sumAggRes == nil {368		t.Fatalf("expected != nil; got: nil")369	}370	if sumAggRes.Value == nil {371		t.Fatalf("expected != nil; got: %v", *sumAggRes.Value)372	}373	if *sumAggRes.Value != 120.0 {374		t.Errorf("expected %v; got: %v", 120.0, *sumAggRes.Value)375	}376377	// statsRetweets378	statsAggRes, found := agg.Stats("statsRetweets")379	if !found {380		t.Errorf("expected %v; got: %v", true, found)381	}382	if statsAggRes == nil {383		t.Fatalf("expected != nil; got: nil")384	}385	if statsAggRes.Count != 3 {386		t.Errorf("expected %d; got: %d", 3, statsAggRes.Count)387	}388	if statsAggRes.Min == nil {389		t.Fatalf("expected != nil; got: %v", *statsAggRes.Min)390	}391	if *statsAggRes.Min != 0.0 {392		t.Errorf("expected %v; got: %v", 0.0, *statsAggRes.Min)393	}394	if statsAggRes.Max == nil {395		t.Fatalf("expected != nil; got: %v", *statsAggRes.Max)396	}397	if *statsAggRes.Max != 108.0 {398		t.Errorf("expected %v; got: %v", 108.0, *statsAggRes.Max)399	}400	if statsAggRes.Avg == nil {401		t.Fatalf("expected != nil; got: %v", *statsAggRes.Avg)402	}403	if *statsAggRes.Avg != 40.0 {404		t.Errorf("expected %v; got: %v", 40.0, *statsAggRes.Avg)405	}406	if statsAggRes.Sum == nil {407		t.Fatalf("expected != nil; got: %v", *statsAggRes.Sum)408	}409	if *statsAggRes.Sum != 120.0 {410		t.Errorf("expected %v; got: %v", 120.0, *statsAggRes.Sum)411	}412413	// extstatsRetweets414	extStatsAggRes, found := agg.ExtendedStats("extstatsRetweets")415	if !found {416		t.Errorf("expected %v; got: %v", true, found)417	}418	if extStatsAggRes == nil {419		t.Fatalf("expected != nil; got: nil")420	}421	if extStatsAggRes.Count != 3 {422		t.Errorf("expected %d; got: %d", 3, extStatsAggRes.Count)423	}424	if extStatsAggRes.Min == nil {425		t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Min)426	}427	if *extStatsAggRes.Min != 0.0 {428		t.Errorf("expected %v; got: %v", 0.0, *extStatsAggRes.Min)429	}430	if extStatsAggRes.Max == nil {431		t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Max)432	}433	if *extStatsAggRes.Max != 108.0 {434		t.Errorf("expected %v; got: %v", 108.0, *extStatsAggRes.Max)435	}436	if extStatsAggRes.Avg == nil {437		t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Avg)438	}439	if *extStatsAggRes.Avg != 40.0 {440		t.Errorf("expected %v; got: %v", 40.0, *extStatsAggRes.Avg)441	}442	if extStatsAggRes.Sum == nil {443		t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Sum)444	}445	if *extStatsAggRes.Sum != 120.0 {446		t.Errorf("expected %v; got: %v", 120.0, *extStatsAggRes.Sum)447	}448	if extStatsAggRes.SumOfSquares == nil {449		t.Fatalf("expected != nil; got: %v", *extStatsAggRes.SumOfSquares)450	}451	if *extStatsAggRes.SumOfSquares != 11808.0 {452		t.Errorf("expected %v; got: %v", 11808.0, *extStatsAggRes.SumOfSquares)453	}454	if extStatsAggRes.Variance == nil {455		t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Variance)456	}457	if *extStatsAggRes.Variance != 2336.0 {458		t.Errorf("expected %v; got: %v", 2336.0, *extStatsAggRes.Variance)459	}460	if extStatsAggRes.StdDeviation == nil {461		t.Fatalf("expected != nil; got: %v", *extStatsAggRes.StdDeviation)462	}463	if *extStatsAggRes.StdDeviation != 48.33218389437829 {464		t.Errorf("expected %v; got: %v", 48.33218389437829, *extStatsAggRes.StdDeviation)465	}466467	// valueCountRetweets468	valueCountAggRes, found := agg.ValueCount("valueCountRetweets")469	if !found {470		t.Errorf("expected %v; got: %v", true, found)471	}472	if valueCountAggRes == nil {473		t.Fatalf("expected != nil; got: nil")474	}475	if valueCountAggRes.Value == nil {476		t.Fatalf("expected != nil; got: %v", *valueCountAggRes.Value)477	}478	if *valueCountAggRes.Value != 3.0 {479		t.Errorf("expected %v; got: %v", 3.0, *valueCountAggRes.Value)480	}481482	// percentilesRetweets483	percentilesAggRes, found := agg.Percentiles("percentilesRetweets")484	if !found {485		t.Errorf("expected %v; got: %v", true, found)486	}487	if percentilesAggRes == nil {488		t.Fatalf("expected != nil; got: nil")489	}490	// ES 1.4.x returns  7: {"1.0":...}491	// ES 1.5.0 returns 14: {"1.0":..., "1.0_as_string":...}492	// So we're relaxing the test here.493	if len(percentilesAggRes.Values) == 0 {494		t.Errorf("expected at least %d value; got: %d\nValues are: %#v", 1, len(percentilesAggRes.Values), percentilesAggRes.Values)495	}496	if _, found := percentilesAggRes.Values["0.0"]; found {497		t.Errorf("expected %v; got: %v", false, found)498	}499	if percentilesAggRes.Values["1.0"] != 0.24 {500		t.Errorf("expected %v; got: %v", 0.24, percentilesAggRes.Values["1.0"])501	}502	if percentilesAggRes.Values["25.0"] != 6.0 {503		t.Errorf("expected %v; got: %v", 6.0, percentilesAggRes.Values["25.0"])504	}505	if percentilesAggRes.Values["99.0"] != 106.08 {506		t.Errorf("expected %v; got: %v", 106.08, percentilesAggRes.Values["99.0"])507	}508509	// percentileRanksRetweets510	percentileRanksAggRes, found := agg.PercentileRanks("percentileRanksRetweets")511	if !found {512		t.Errorf("expected %v; got: %v", true, found)513	}514	if percentileRanksAggRes == nil {515		t.Fatalf("expected != nil; got: nil")516	}517	if len(percentileRanksAggRes.Values) == 0 {518		t.Errorf("expected at least %d value; got %d\nValues are: %#v", 1, len(percentileRanksAggRes.Values), percentileRanksAggRes.Values)519	}520	if _, found := percentileRanksAggRes.Values["0.0"]; found {521		t.Errorf("expected %v; got: %v", true, found)522	}523	if percentileRanksAggRes.Values["25.0"] != 21.180555555555557 {524		t.Errorf("expected %v; got: %v", 21.180555555555557, percentileRanksAggRes.Values["25.0"])525	}526	if percentileRanksAggRes.Values["50.0"] != 29.86111111111111 {527		t.Errorf("expected %v; got: %v", 29.86111111111111, percentileRanksAggRes.Values["50.0"])528	}529	if percentileRanksAggRes.Values["75.0"] != 38.54166666666667 {530		t.Errorf("expected %v; got: %v", 38.54166666666667, percentileRanksAggRes.Values["75.0"])531	}532533	// usersCardinality534	cardAggRes, found := agg.Cardinality("usersCardinality")535	if !found {536		t.Errorf("expected %v; got: %v", true, found)537	}538	if cardAggRes == nil {539		t.Fatalf("expected != nil; got: nil")540	}541	if cardAggRes.Value == nil {542		t.Fatalf("expected != nil; got: %v", *cardAggRes.Value)543	}544	if *cardAggRes.Value != 2 {545		t.Errorf("expected %v; got: %v", 2, *cardAggRes.Value)546	}547548	// retweetsFilter549	filterAggRes, found := agg.Filter("retweetsFilter")550	if !found {551		t.Errorf("expected %v; got: %v", true, found)552	}553	if filterAggRes == nil {554		t.Fatalf("expected != nil; got: nil")555	}556	if filterAggRes.DocCount != 2 {557		t.Fatalf("expected %v; got: %v", 2, filterAggRes.DocCount)558	}559560	// Retrieve sub-aggregation561	avgRetweetsAggRes, found := filterAggRes.Avg("avgRetweetsSub")562	if !found {563		t.Error("expected sub-aggregation \"avgRetweets\" to be found; got false")564	}565	if avgRetweetsAggRes == nil {566		t.Fatal("expected sub-aggregation \"avgRetweets\"; got nil")567	}568	if avgRetweetsAggRes.Value == nil {569		t.Fatalf("expected != nil; got: %v", avgRetweetsAggRes.Value)570	}571	if *avgRetweetsAggRes.Value != 54.0 {572		t.Errorf("expected %v; got: %v", 54.0, *avgRetweetsAggRes.Value)573	}574575	// queryFilter576	queryFilterAggRes, found := agg.Filter("queryFilter")577	if !found {578		t.Errorf("expected %v; got: %v", true, found)579	}580	if queryFilterAggRes == nil {581		t.Fatalf("expected != nil; got: nil")582	}583	if queryFilterAggRes.DocCount != 2 {584		t.Fatalf("expected %v; got: %v", 2, queryFilterAggRes.DocCount)585	}586587	// significantTerms588	stAggRes, found := agg.SignificantTerms("significantTerms")589	if !found {590		t.Errorf("expected %v; got: %v", true, found)591	}592	if stAggRes == nil {593		t.Fatalf("expected != nil; got: nil")594	}595	if stAggRes.DocCount != 3 {596		t.Errorf("expected %v; got: %v", 3, stAggRes.DocCount)597	}598	if len(stAggRes.Buckets) != 0 {599		t.Errorf("expected %v; got: %v", 0, len(stAggRes.Buckets))600	}601602	// sampler603	samplerAggRes, found := agg.Sampler("sample")604	if !found {605		t.Errorf("expected %v; got: %v", true, found)606	}607	if samplerAggRes == nil {608		t.Fatalf("expected != nil; got: nil")609	}610	if samplerAggRes.DocCount != 2 {611		t.Errorf("expected %v; got: %v", 2, samplerAggRes.DocCount)612	}613	sub, found := samplerAggRes.Aggregations["tagged_with"]614	if !found {615		t.Fatalf("expected sub aggregation %q", "tagged_with")616	}617	if sub == nil {618		t.Fatalf("expected sub aggregation %q; got: %v", "tagged_with", sub)619	}620621	// retweetsRange622	rangeAggRes, found := agg.Range("retweetsRange")623	if !found {624		t.Errorf("expected %v; got: %v", true, found)625	}626	if rangeAggRes == nil {627		t.Fatal("expected != nil; got: nil")628	}629	if len(rangeAggRes.Buckets) != 3 {630		t.Fatalf("expected %d; got: %d", 3, len(rangeAggRes.Buckets))631	}632	if rangeAggRes.Buckets[0].DocCount != 1 {633		t.Errorf("expected %d; got: %d", 1, rangeAggRes.Buckets[0].DocCount)634	}635	if rangeAggRes.Buckets[1].DocCount != 1 {636		t.Errorf("expected %d; got: %d", 1, rangeAggRes.Buckets[1].DocCount)637	}638	if rangeAggRes.Buckets[2].DocCount != 1 {639		t.Errorf("expected %d; got: %d", 1, rangeAggRes.Buckets[2].DocCount)640	}641642	// retweetsKeyedRange643	keyedRangeAggRes, found := agg.KeyedRange("retweetsKeyedRange")644	if !found {645		t.Errorf("expected %v; got: %v", true, found)646	}647	if keyedRangeAggRes == nil {648		t.Fatal("expected != nil; got: nil")649	}650	if len(keyedRangeAggRes.Buckets) != 3 {651		t.Fatalf("expected %d; got: %d", 3, len(keyedRangeAggRes.Buckets))652	}653	_, found = keyedRangeAggRes.Buckets["no-such-key"]654	if found {655		t.Fatalf("expected bucket to not be found; got: %v", found)656	}657	bucket, found := keyedRangeAggRes.Buckets["*-10.0"]658	if !found {659		t.Fatalf("expected bucket to be found; got: %v", found)660	}661	if bucket.DocCount != 1 {662		t.Errorf("expected %d; got: %d", 1, bucket.DocCount)663	}664	bucket, found = keyedRangeAggRes.Buckets["10.0-100.0"]665	if !found {666		t.Fatalf("expected bucket to be found; got: %v", found)667	}668	if bucket.DocCount != 1 {669		t.Errorf("expected %d; got: %d", 1, bucket.DocCount)670	}671	bucket, found = keyedRangeAggRes.Buckets["100.0-*"]672	if !found {673		t.Fatalf("expected bucket to be found; got: %v", found)674	}675	if bucket.DocCount != 1 {676		t.Errorf("expected %d; got: %d", 1, bucket.DocCount)677	}678679	// dateRange680	dateRangeRes, found := agg.DateRange("dateRange")681	if !found {682		t.Errorf("expected %v; got: %v", true, found)683	}684	if dateRangeRes == nil {685		t.Fatal("expected != nil; got: nil")686	}687	if dateRangeRes.Buckets[0].DocCount != 1 {688		t.Errorf("expected %d; got: %d", 1, dateRangeRes.Buckets[0].DocCount)689	}690	if dateRangeRes.Buckets[0].From != nil {691		t.Fatal("expected From to be nil")692	}693	if dateRangeRes.Buckets[0].To == nil {694		t.Fatal("expected To to be != nil")695	}696	if *dateRangeRes.Buckets[0].To != 1.325376e+12 {697		t.Errorf("expected %v; got: %v", 1.325376e+12, *dateRangeRes.Buckets[0].To)698	}699	if dateRangeRes.Buckets[0].ToAsString != "2012-01-01T00:00:00.000Z" {700		t.Errorf("expected %q; got: %q", "2012-01-01T00:00:00.000Z", dateRangeRes.Buckets[0].ToAsString)701	}702	if dateRangeRes.Buckets[1].DocCount != 2 {703		t.Errorf("expected %d; got: %d", 2, dateRangeRes.Buckets[1].DocCount)704	}705	if dateRangeRes.Buckets[1].From == nil {706		t.Fatal("expected From to be != nil")707	}708	if *dateRangeRes.Buckets[1].From != 1.325376e+12 {709		t.Errorf("expected From = %v; got: %v", 1.325376e+12, *dateRangeRes.Buckets[1].From)710	}711	if dateRangeRes.Buckets[1].FromAsString != "2012-01-01T00:00:00.000Z" {712		t.Errorf("expected FromAsString = %q; got: %q", "2012-01-01T00:00:00.000Z", dateRangeRes.Buckets[1].FromAsString)713	}714	if dateRangeRes.Buckets[1].To == nil {715		t.Fatal("expected To to be != nil")716	}717	if *dateRangeRes.Buckets[1].To != 1.3569984e+12 {718		t.Errorf("expected To = %v; got: %v", 1.3569984e+12, *dateRangeRes.Buckets[1].To)719	}720	if dateRangeRes.Buckets[1].ToAsString != "2013-01-01T00:00:00.000Z" {721		t.Errorf("expected ToAsString = %q; got: %q", "2013-01-01T00:00:00.000Z", dateRangeRes.Buckets[1].ToAsString)722	}723	if dateRangeRes.Buckets[2].DocCount != 0 {724		t.Errorf("expected %d; got: %d", 0, dateRangeRes.Buckets[2].DocCount)725	}726	if dateRangeRes.Buckets[2].To != nil {727		t.Fatal("expected To to be nil")728	}729	if dateRangeRes.Buckets[2].From == nil {730		t.Fatal("expected From to be != nil")731	}732	if *dateRangeRes.Buckets[2].From != 1.3569984e+12 {733		t.Errorf("expected %v; got: %v", 1.3569984e+12, *dateRangeRes.Buckets[2].From)734	}735	if dateRangeRes.Buckets[2].FromAsString != "2013-01-01T00:00:00.000Z" {736		t.Errorf("expected %q; got: %q", "2013-01-01T00:00:00.000Z", dateRangeRes.Buckets[2].FromAsString)737	}738739	// missingTags740	missingRes, found := agg.Missing("missingTags")741	if !found {742		t.Errorf("expected %v; got: %v", true, found)743	}744	if missingRes == nil {745		t.Fatalf("expected != nil; got: nil")746	}747	if missingRes.DocCount != 0 {748		t.Errorf("expected searchResult.Aggregations[\"missingTags\"].DocCount = %v; got %v", 0, missingRes.DocCount)749	}750751	// retweetsHisto752	histoRes, found := agg.Histogram("retweetsHisto")753	if !found {754		t.Errorf("expected %v; got: %v", true, found)755	}756	if histoRes == nil {757		t.Fatalf("expected != nil; got: nil")758	}759	if len(histoRes.Buckets) != 2 {760		t.Fatalf("expected %d; got: %d", 2, len(histoRes.Buckets))761	}762	if histoRes.Buckets[0].DocCount != 2 {763		t.Errorf("expected %d; got: %d", 2, histoRes.Buckets[0].DocCount)764	}765	if histoRes.Buckets[0].Key != 0.0 {766		t.Errorf("expected %v; got: %v", 0.0, histoRes.Buckets[0].Key)767	}768	if histoRes.Buckets[1].DocCount != 1 {769		t.Errorf("expected %d; got: %d", 1, histoRes.Buckets[1].DocCount)770	}771	if histoRes.Buckets[1].Key != 100.0 {772		t.Errorf("expected %v; got: %v", 100.0, histoRes.Buckets[1].Key)773	}774775	// dateHisto776	dateHistoRes, found := agg.DateHistogram("dateHisto")777	if !found {778		t.Errorf("expected %v; got: %v", true, found)779	}780	if dateHistoRes == nil {781		t.Fatalf("expected != nil; got: nil")782	}783	if len(dateHistoRes.Buckets) != 2 {784		t.Fatalf("expected %d; got: %d", 2, len(dateHistoRes.Buckets))785	}786	if dateHistoRes.Buckets[0].DocCount != 1 {787		t.Errorf("expected %d; got: %d", 1, dateHistoRes.Buckets[0].DocCount)788	}789	if dateHistoRes.Buckets[0].Key != 1.29384e+12 {790		t.Errorf("expected %v; got: %v", 1.29384e+12, dateHistoRes.Buckets[0].Key)791	}792	if dateHistoRes.Buckets[0].KeyAsString == nil {793		t.Fatalf("expected != nil; got: %v", dateHistoRes.Buckets[0].KeyAsString)794	}795	if *dateHistoRes.Buckets[0].KeyAsString != "2011-01-01T00:00:00.000Z" {796		t.Errorf("expected %q; got: %q", "2011-01-01T00:00:00.000Z", *dateHistoRes.Buckets[0].KeyAsString)797	}798	if dateHistoRes.Buckets[1].DocCount != 2 {799		t.Errorf("expected %d; got: %d", 2, dateHistoRes.Buckets[1].DocCount)800	}801	if dateHistoRes.Buckets[1].Key != 1.325376e+12 {802		t.Errorf("expected %v; got: %v", 1.325376e+12, dateHistoRes.Buckets[1].Key)803	}804	if dateHistoRes.Buckets[1].KeyAsString == nil {805		t.Fatalf("expected != nil; got: %v", dateHistoRes.Buckets[1].KeyAsString)806	}807	if *dateHistoRes.Buckets[1].KeyAsString != "2012-01-01T00:00:00.000Z" {808		t.Errorf("expected %q; got: %q", "2012-01-01T00:00:00.000Z", *dateHistoRes.Buckets[1].KeyAsString)809	}810811	// topHits812	topTags, found := agg.Terms("top-tags")813	if !found {814		t.Errorf("expected %v; got: %v", true, found)815	}816	if topTags == nil {817		t.Fatalf("expected != nil; got: nil")818	}819	if esversion >= "1.4.0" {820		if topTags.DocCountErrorUpperBound != 0 {821			t.Errorf("expected %v; got: %v", 0, topTags.DocCountErrorUpperBound)822		}823		if topTags.SumOfOtherDocCount != 1 {824			t.Errorf("expected %v; got: %v", 1, topTags.SumOfOtherDocCount)825		}826	}827	if len(topTags.Buckets) != 3 {828		t.Fatalf("expected %d; got: %d", 3, len(topTags.Buckets))829	}830	if topTags.Buckets[0].DocCount != 2 {831		t.Errorf("expected %d; got: %d", 2, topTags.Buckets[0].DocCount)832	}833	if topTags.Buckets[0].Key != "golang" {834		t.Errorf("expected %v; got: %v", "golang", topTags.Buckets[0].Key)835	}836	topHits, found := topTags.Buckets[0].TopHits("top_tag_hits")837	if !found {838		t.Errorf("expected %v; got: %v", true, found)839	}840	if topHits == nil {841		t.Fatal("expected != nil; got: nil")842	}843	if topHits.Hits == nil {844		t.Fatalf("expected != nil; got: nil")845	}846	if topHits.Hits.TotalHits != 2 {847		t.Errorf("expected %d; got: %d", 2, topHits.Hits.TotalHits)848	}849	if topHits.Hits.Hits == nil {850		t.Fatalf("expected != nil; got: nil")851	}852	if len(topHits.Hits.Hits) != 2 {853		t.Fatalf("expected %d; got: %d", 2, len(topHits.Hits.Hits))854	}855	hit := topHits.Hits.Hits[0]856	if !found {857		t.Fatalf("expected %v; got: %v", true, found)858	}859	if hit == nil {860		t.Fatal("expected != nil; got: nil")861	}862	var tw tweet863	if err := json.Unmarshal(*hit.Source, &tw); err != nil {864		t.Fatalf("expected no error; got: %v", err)865	}866	if tw.Message != "Welcome to Golang and Elasticsearch." {867		t.Errorf("expected %q; got: %q", "Welcome to Golang and Elasticsearch.", tw.Message)868	}869	if topTags.Buckets[1].DocCount != 1 {870		t.Errorf("expected %d; got: %d", 1, topTags.Buckets[1].DocCount)871	}872	if topTags.Buckets[1].Key != "cycling" {873		t.Errorf("expected %v; got: %v", "cycling", topTags.Buckets[1].Key)874	}875	topHits, found = topTags.Buckets[1].TopHits("top_tag_hits")876	if !found {877		t.Errorf("expected %v; got: %v", true, found)878	}879	if topHits == nil {880		t.Fatal("expected != nil; got: nil")881	}882	if topHits.Hits == nil {883		t.Fatal("expected != nil; got nil")884	}885	if topHits.Hits.TotalHits != 1 {886		t.Errorf("expected %d; got: %d", 1, topHits.Hits.TotalHits)887	}888	if topTags.Buckets[2].DocCount != 1 {889		t.Errorf("expected %d; got: %d", 1, topTags.Buckets[2].DocCount)890	}891	if topTags.Buckets[2].Key != "elasticsearch" {892		t.Errorf("expected %v; got: %v", "elasticsearch", topTags.Buckets[2].Key)893	}894	topHits, found = topTags.Buckets[2].TopHits("top_tag_hits")895	if !found {896		t.Errorf("expected %v; got: %v", true, found)897	}898	if topHits == nil {899		t.Fatal("expected != nil; got: nil")900	}901	if topHits.Hits == nil {902		t.Fatal("expected != nil; got: nil")903	}904	if topHits.Hits.TotalHits != 1 {905		t.Errorf("expected %d; got: %d", 1, topHits.Hits.TotalHits)906	}907908	// viewport via geo_bounds (1.3.0 has an error in that it doesn't output the aggregation name)909	geoBoundsRes, found := agg.GeoBounds("viewport")910	if !found {911		t.Errorf("expected %v; got: %v", true, found)912	}913	if geoBoundsRes == nil {914		t.Fatalf("expected != nil; got: nil")915	}916917	// geohashed via geohash918	geoHashRes, found := agg.GeoHash("geohashed")919	if !found {920		t.Errorf("expected %v; got: %v", true, found)921	}922	if geoHashRes == nil {923		t.Fatalf("expected != nil; got: nil")924	}925926	if esversion >= "1.4" {927		// Filters agg "countByUser" (unnamed)928		countByUserAggRes, found := agg.Filters("countByUser")929		if !found {930			t.Errorf("expected %v; got: %v", true, found)931		}932		if countByUserAggRes == nil {933			t.Fatalf("expected != nil; got: nil")934		}935		if len(countByUserAggRes.Buckets) != 2 {936			t.Fatalf("expected %d; got: %d", 2, len(countByUserAggRes.Buckets))937		}938		if len(countByUserAggRes.NamedBuckets) != 0 {939			t.Fatalf("expected %d; got: %d", 0, len(countByUserAggRes.NamedBuckets))940		}941		if countByUserAggRes.Buckets[0].DocCount != 2 {942			t.Errorf("expected %d; got: %d", 2, countByUserAggRes.Buckets[0].DocCount)943		}944		if countByUserAggRes.Buckets[1].DocCount != 1 {945			t.Errorf("expected %d; got: %d", 1, countByUserAggRes.Buckets[1].DocCount)946		}947948		// Filters agg "countByUser2" (named)949		countByUser2AggRes, found := agg.Filters("countByUser2")950		if !found {951			t.Errorf("expected %v; got: %v", true, found)952		}953		if countByUser2AggRes == nil {954			t.Fatalf("expected != nil; got: nil")955		}956		if len(countByUser2AggRes.Buckets) != 0 {957			t.Fatalf("expected %d; got: %d", 0, len(countByUser2AggRes.Buckets))958		}959		if len(countByUser2AggRes.NamedBuckets) != 2 {960			t.Fatalf("expected %d; got: %d", 2, len(countByUser2AggRes.NamedBuckets))961		}962		b, found := countByUser2AggRes.NamedBuckets["olivere"]963		if !found {964			t.Fatalf("expected bucket %q; got: %v", "olivere", found)965		}966		if b == nil {967			t.Fatalf("expected bucket %q; got: %v", "olivere", b)968		}969		if b.DocCount != 2 {970			t.Errorf("expected %d; got: %d", 2, b.DocCount)971		}972		b, found = countByUser2AggRes.NamedBuckets["sandrae"]973		if !found {974			t.Fatalf("expected bucket %q; got: %v", "sandrae", found)975		}976		if b == nil {977			t.Fatalf("expected bucket %q; got: %v", "sandrae", b)978		}979		if b.DocCount != 1 {980			t.Errorf("expected %d; got: %d", 1, b.DocCount)981		}982	}983}984985// TestAggsMarshal ensures that marshaling aggregations back into a string986// does not yield base64 encoded data. See https://github.com/olivere/elastic/issues/51987// and https://groups.google.com/forum/#!topic/Golang-Nuts/38ShOlhxAYY for details.988func TestAggsMarshal(t *testing.T) {989	client := setupTestClientAndCreateIndex(t)990991	tweet1 := tweet{992		User:     "olivere",993		Retweets: 108,994		Message:  "Welcome to Golang and Elasticsearch.",995		Image:    "http://golang.org/doc/gopher/gophercolor.png",996		Tags:     []string{"golang", "elasticsearch"},997		Location: "48.1333,11.5667", // lat,lon998		Created:  time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),999	}10001001	// Add all documents1002	_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()1003	if err != nil {1004		t.Fatal(err)1005	}1006	_, err = client.Flush().Index(testIndexName).Do()1007	if err != nil {1008		t.Fatal(err)1009	}10101011	// Match all should return all documents1012	all := NewMatchAllQuery()1013	dhagg := NewDateHistogramAggregation().Field("created").Interval("year")10141015	// Run query1016	builder := client.Search().Index(testIndexName).Query(all)1017	builder = builder.Aggregation("dhagg", dhagg)1018	searchResult, err := builder.Do()1019	if err != nil {1020		t.Fatal(err)1021	}1022	if searchResult.TotalHits() != 1 {1023		t.Errorf("expected Hits.TotalHits = %d; got: %d", 1, searchResult.TotalHits())1024	}1025	if _, found := searchResult.Aggregations["dhagg"]; !found {1026		t.Fatalf("expected aggregation %q", "dhagg")1027	}1028	buf, err := json.Marshal(searchResult)1029	if err != nil {1030		t.Fatal(err)1031	}1032	s := string(buf)1033	if i := strings.Index(s, `{"dhagg":{"buckets":[{"key_as_string":"2012-01-01`); i < 0 {1034		t.Errorf("expected to serialize aggregation into string; got: %v", s)1035	}1036}10371038func TestAggsMetricsMin(t *testing.T) {1039	s := `{1040	"min_price": {1041  	"value": 101042  }1043}`10441045	aggs := new(Aggregations)1046	err := json.Unmarshal([]byte(s), &aggs)1047	if err != nil {1048		t.Fatalf("expected no error decoding; got: %v", err)1049	}10501051	agg, found := aggs.Min("min_price")1052	if !found {1053		t.Fatalf("expected aggregation to be found; got: %v", found)1054	}1055	if agg == nil {1056		t.Fatalf("expected aggregation != nil; got: %v", agg)1057	}1058	if agg.Value == nil {1059		t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)1060	}1061	if *agg.Value != float64(10) {1062		t.Fatalf("expected aggregation value = %v; got: %v", float64(10), *agg.Value)1063	}1064}10651066func TestAggsMetricsMax(t *testing.T) {1067	s := `{1068	"max_price": {1069  	"value": 351070  }1071}`10721073	aggs := new(Aggregations)1074	err := json.Unmarshal([]byte(s), &aggs)1075	if err != nil {1076		t.Fatalf("expected no error decoding; got: %v", err)1077	}10781079	agg, found := aggs.Max("max_price")1080	if !found {1081		t.Fatalf("expected aggregation to be found; got: %v", found)1082	}1083	if agg == nil {1084		t.Fatalf("expected aggregation != nil; got: %v", agg)1085	}1086	if agg.Value == nil {1087		t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)1088	}1089	if *agg.Value != float64(35) {1090		t.Fatalf("expected aggregation value = %v; got: %v", float64(35), *agg.Value)1091	}1092}10931094func TestAggsMetricsSum(t *testing.T) {1095	s := `{1096	"intraday_return": {1097  	"value": 2.181098  }1099}`11001101	aggs := new(Aggregations)1102	err := json.Unmarshal([]byte(s), &aggs)1103	if err != nil {1104		t.Fatalf("expected no error decoding; got: %v", err)1105	}11061107	agg, found := aggs.Sum("intraday_return")1108	if !found {1109		t.Fatalf("expected aggregation to be found; got: %v", found)1110	}1111	if agg == nil {1112		t.Fatalf("expected aggregation != nil; got: %v", agg)1113	}1114	if agg.Value == nil {1115		t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)1116	}1117	if *agg.Value != float64(2.18) {1118		t.Fatalf("expected aggregation value = %v; got: %v", float64(2.18), *agg.Value)1119	}1120}11211122func TestAggsMetricsAvg(t *testing.T) {1123	s := `{1124	"avg_grade": {1125  	"value": 751126  }1127}`11281129	aggs := new(Aggregations)1130	err := json.Unmarshal([]byte(s), &aggs)1131	if err != nil {1132		t.Fatalf("expected no error decoding; got: %v", err)1133	}11341135	agg, found := aggs.Avg("avg_grade")1136	if !found {1137		t.Fatalf("expected aggregation to be found; got: %v", found)1138	}1139	if agg == nil {1140		t.Fatalf("expected aggregation != nil; got: %v", agg)1141	}1142	if agg.Value == nil {1143		t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)1144	}1145	if *agg.Value != float64(75) {1146		t.Fatalf("expected aggregation value = %v; got: %v", float64(75), *agg.Value)1147	}1148}11491150func TestAggsMetricsValueCount(t *testing.T) {1151	s := `{1152	"grades_count": {1153  	"value": 101154  }1155}`11561157	aggs := new(Aggregations)1158	err := json.Unmarshal([]byte(s), &aggs)1159	if err != nil {1160		t.Fatalf("expected no error decoding; got: %v", err)1161	}11621163	agg, found := aggs.ValueCount("grades_count")1164	if !found {1165		t.Fatalf("expected aggregation to be found; got: %v", found)1166	}1167	if agg == nil {1168		t.Fatalf("expected aggregation != nil; got: %v", agg)1169	}1170	if agg.Value == nil {1171		t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)1172	}1173	if *agg.Value != float64(10) {1174		t.Fatalf("expected aggregation value = %v; got: %v", float64(10), *agg.Value)1175	}1176}11771178func TestAggsMetricsCardinality(t *testing.T) {1179	s := `{1180	"author_count": {1181  	"value": 121182  }1183}`11841185	aggs := new(Aggregations)1186	err := json.Unmarshal([]byte(s), &aggs)1187	if err != nil {1188		t.Fatalf("expected no error decoding; got: %v", err)1189	}11901191	agg, found := aggs.Cardinality("author_count")1192	if !found {1193		t.Fatalf("expected aggregation to be found; got: %v", found)1194	}1195	if agg == nil {1196		t.Fatalf("expected aggregation != nil; got: %v", agg)1197	}1198	if agg.Value == nil {1199		t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)1200	}1201	if *agg.Value != float64(12) {1202		t.Fatalf("expected aggregation value = %v; got: %v", float64(12), *agg.Value)1203	}1204}12051206func TestAggsMetricsStats(t *testing.T) {1207	s := `{1208	"grades_stats": {1209    "count": 6,1210    "min": 60,1211    "max": 98,1212    "avg": 78.5,1213    "sum": 4711214  }1215}`12161217	aggs := new(Aggregations)1218	err := json.Unmarshal([]byte(s), &aggs)1219	if err != nil {1220		t.Fatalf("expected no error decoding; got: %v", err)1221	}12221223	agg, found := aggs.Stats("grades_stats")1224	if !found {1225		t.Fatalf("expected aggregation to be found; got: %v", found)1226	}1227	if agg == nil {1228		t.Fatalf("expected aggregation != nil; got: %v", agg)1229	}1230	if agg.Count != int64(6) {1231		t.Fatalf("expected aggregation Count = %v; got: %v", int64(6), agg.Count)1232	}1233	if agg.Min == nil {1234		t.Fatalf("expected aggregation Min != nil; got: %v", agg.Min)1235	}1236	if *agg.Min != float64(60) {1237		t.Fatalf("expected aggregation Min = %v; got: %v", float64(60), *agg.Min)1238	}1239	if agg.Max == nil {1240		t.Fatalf("expected aggregation Max != nil; got: %v", agg.Max)1241	}1242	if *agg.Max != float64(98) {1243		t.Fatalf("expected aggregation Max = %v; got: %v", float64(98), *agg.Max)1244	}1245	if agg.Avg == nil {1246		t.Fatalf("expected aggregation Avg != nil; got: %v", agg.Avg)1247	}1248	if *agg.Avg != float64(78.5) {1249		t.Fatalf("expected aggregation Avg = %v; got: %v", float64(78.5), *agg.Avg)1250	}1251	if agg.Sum == nil {1252		t.Fatalf("expected aggregation Sum != nil; got: %v", agg.Sum)1253	}1254	if *agg.Sum != float64(471) {1255		t.Fatalf("expected aggregation Sum = %v; got: %v", float64(471), *agg.Sum)1256	}1257}12581259func TestAggsMetricsExtendedStats(t *testing.T) {1260	s := `{1261	"grades_stats": {1262    "count": 6,1263    "min": 72,1264    "max": 117.6,1265    "avg": 94.2,1266    "sum": 565.2,1267    "sum_of_squares": 54551.51999999999,1268    "variance": 218.2799999999976,1269    "std_deviation": 14.7743020139699871270  }1271}`12721273	aggs := new(Aggregations)1274	err := json.Unmarshal([]byte(s), &aggs)1275	if err != nil {1276		t.Fatalf("expected no error decoding; got: %v", err)1277	}12781279	agg, found := aggs.ExtendedStats("grades_stats")1280	if !found {1281		t.Fatalf("expected aggregation to be found; got: %v", found)1282	}1283	if agg == nil {1284		t.Fatalf("expected aggregation != nil; got: %v", agg)1285	}1286	if agg.Count != int64(6) {1287		t.Fatalf("expected aggregation Count = %v; got: %v", int64(6), agg.Count)1288	}1289	if agg.Min == nil {1290		t.Fatalf("expected aggregation Min != nil; got: %v", agg.Min)1291	}1292	if *agg.Min != float64(72) {1293		t.Fatalf("expected aggregation Min = %v; got: %v", float64(72), *agg.Min)1294	}1295	if agg.Max == nil {1296		t.Fatalf("expected aggregation Max != nil; got: %v", agg.Max)1297	}1298	if *agg.Max != float64(117.6) {1299		t.Fatalf("expected aggregation Max = %v; got: %v", float64(117.6), *agg.Max)1300	}1301	if agg.Avg == nil {1302		t.Fatalf("expected aggregation Avg != nil; got: %v", agg.Avg)1303	}1304	if *agg.Avg != float64(94.2) {1305		t.Fatalf("expected aggregation Avg = %v; got: %v", float64(94.2), *agg.Avg)1306	}1307	if agg.Sum == nil {1308		t.Fatalf("expected aggregation Sum != nil; got: %v", agg.Sum)1309	}1310	if *agg.Sum != float64(565.2) {1311		t.Fatalf("expected aggregation Sum = %v; got: %v", float64(565.2), *agg.Sum)1312	}1313	if agg.SumOfSquares == nil {1314		t.Fatalf("expected aggregation sum_of_squares != nil; got: %v", agg.SumOfSquares)1315	}1316	if *agg.SumOfSquares != float64(54551.51999999999) {1317		t.Fatalf("expected aggregation sum_of_squares = %v; got: %v", float64(54551.51999999999), *agg.SumOfSquares)1318	}1319	if agg.Variance == nil {1320		t.Fatalf("expected aggregation Variance != nil; got: %v", agg.Variance)1321	}1322	if *agg.Variance != float64(218.2799999999976) {1323		t.Fatalf("expected aggregation Variance = %v; got: %v", float64(218.2799999999976), *agg.Variance)1324	}1325	if agg.StdDeviation == nil {1326		t.Fatalf("expected aggregation StdDeviation != nil; got: %v", agg.StdDeviation)1327	}1328	if *agg.StdDeviation != float64(14.774302013969987) {1329		t.Fatalf("expected aggregation StdDeviation = %v; got: %v", float64(14.774302013969987), *agg.StdDeviation)1330	}1331}13321333func TestAggsMetricsPercentiles(t *testing.T) {1334	s := `{1335  "load_time_outlier": {1336		"values" : {1337		  "1.0": 15,1338		  "5.0": 20,1339		  "25.0": 23,1340		  "50.0": 25,1341		  "75.0": 29,1342		  "95.0": 60,1343		  "99.0": 1501344		}1345  }1346}`13471348	aggs := new(Aggregations)1349	err := json.Unmarshal([]byte(s), &aggs)1350	if err != nil {1351		t.Fatalf("expected no error decoding; got: %v", err)1352	}13531354	agg, found := aggs.Percentiles("load_time_outlier")1355	if !found {1356		t.Fatalf("expected aggregation to be found; got: %v", found)1357	}1358	if agg == nil {1359		t.Fatalf("expected aggregation != nil; got: %v", agg)1360	}1361	if agg.Values == nil {1362		t.Fatalf("expected aggregation Values != nil; got: %v", agg.Values)1363	}1364	if len(agg.Values) != 7 {1365		t.Fatalf("expected %d aggregation Values; got: %d", 7, len(agg.Values))1366	}1367	if agg.Values["1.0"] != float64(15) {1368		t.Errorf("expected aggregation value for \"1.0\" = %v; got: %v", float64(15), agg.Values["1.0"])1369	}1370	if agg.Values["5.0"] != float64(20) {1371		t.Errorf("expected aggregation value for \"5.0\" = %v; got: %v", float64(20), agg.Values["5.0"])1372	}1373	if agg.Values["25.0"] != float64(23) {1374		t.Errorf("expected aggregation value for \"25.0\" = %v; got: %v", float64(23), agg.Values["25.0"])1375	}1376	if agg.Values["50.0"] != float64(25) {1377		t.Errorf("expected aggregation value for \"50.0\" = %v; got: %v", float64(25), agg.Values["50.0"])1378	}1379	if agg.Values["75.0"] != float64(29) {1380		t.Errorf("expected aggregation value for \"75.0\" = %v; got: %v", float64(29), agg.Values["75.0"])1381	}1382	if agg.Values["95.0"] != float64(60) {1383		t.Errorf("expected aggregation value for \"95.0\" = %v; got: %v", float64(60), agg.Values["95.0"])1384	}1385	if agg.Values["99.0"] != float64(150) {1386		t.Errorf("expected aggregation value for \"99.0\" = %v; got: %v", float64(150), agg.Values["99.0"])1387	}1388}13891390func TestAggsMetricsPercentileRanks(t *testing.T) {1391	s := `{1392  "load_time_outlier": {1393		"values" : {1394		  "15": 92,1395		  "30": 1001396		}1397  }1398}`13991400	aggs := new(Aggregations)1401	err := json.Unmarshal([]byte(s), &aggs)1402	if err != nil {1403		t.Fatalf("expected no error decoding; got: %v", err)1404	}14051406	agg, found := aggs.PercentileRanks("load_time_outlier")1407	if !found {1408		t.Fatalf("expected aggregation to be found; got: %v", found)1409	}1410	if agg == nil {1411		t.Fatalf("expected aggregation != nil; got: %v", agg)1412	}1413	if agg.Values == nil {1414		t.Fatalf("expected aggregation Values != nil; got: %v", agg.Values)1415	}1416	if len(agg.Values) != 2 {1417		t.Fatalf("expected %d aggregation Values; got: %d", 7, len(agg.Values))1418	}1419	if agg.Values["15"] != float64(92) {1420		t.Errorf("expected aggregation value for \"15\" = %v; got: %v", float64(92), agg.Values["15"])1421	}1422	if agg.Values["30"] != float64(100) {1423		t.Errorf("expected aggregation value for \"30\" = %v; got: %v", float64(100), agg.Values["30"])1424	}1425}14261427func TestAggsMetricsTopHits(t *testing.T) {1428	s := `{1429  "top-tags": {1430     "buckets": [1431        {1432           "key": "windows-7",1433           "doc_count": 25365,1434           "top_tags_hits": {1435              "hits": {1436                 "total": 25365,1437                 "max_score": 1,1438                 "hits": [1439                    {1440                       "_index": "stack",1441                       "_type": "question",1442                       "_id": "602679",1443                       "_score": 1,1444                       "_source": {1445                          "title": "Windows port opening"1446                       },1447                       "sort": [1448                          13701432311771449                       ]1450                    }1451                 ]1452              }1453           }1454        },1455        {1456           "key": "linux",1457           "doc_count": 18342,1458           "top_tags_hits": {1459              "hits": {1460                 "total": 18342,1461                 "max_score": 1,1462                 "hits": [1463                    {1464                       "_index": "stack",1465                       "_type": "question",1466                       "_id": "602672",1467                       "_score": 1,1468                       "_source": {1469                          "title": "Ubuntu RFID Screensaver lock-unlock"1470                       },1471                       "sort": [1472                          13701433797471473                       ]1474                    }1475                 ]1476              }1477           }1478        },1479        {1480           "key": "windows",1481           "doc_count": 18119,1482           "top_tags_hits": {1483              "hits": {1484                 "total": 18119,1485                 "max_score": 1,1486                 "hits": [1487                    {1488                       "_index": "stack",1489                       "_type": "question",1490                       "_id": "602678",1491                       "_score": 1,1492                       "_source": {1493                          "title": "If I change my computers date / time, what could be affected?"1494                       },1495                       "sort": [1496                          13701428682831497                       ]1498                    }1499                 ]1500              }1501           }1502        }1503     ]1504  }1505}`15061507	aggs := new(Aggregations)1508	err := json.Unmarshal([]byte(s), &aggs)1509	if err != nil {1510		t.Fatalf("expected no error decoding; got: %v", err)1511	}15121513	agg, found := aggs.Terms("top-tags")1514	if !found {1515		t.Fatalf("expected aggregation to be found; got: %v", found)1516	}1517	if agg == nil {1518		t.Fatalf("expected aggregation != nil; got: %v", agg)1519	}1520	if agg.Buckets == nil {1521		t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)1522	}1523	if len(agg.Buckets) != 3 {1524		t.Errorf("expected %d bucket entries; got: %d", 3, len(agg.Buckets))1525	}1526	if agg.Buckets[0].Key != "windows-7" {1527		t.Errorf("expected bucket key = %q; got: %q", "windows-7", agg.Buckets[0].Key)1528	}1529	if agg.Buckets[1].Key != "linux" {1530		t.Errorf("expected bucket key = %q; got: %q", "linux", agg.Buckets[1].Key)1531	}1532	if agg.Buckets[2].Key != "windows" {1533		t.Errorf("expected bucket key = %q; got: %q", "windows", agg.Buckets[2].Key)1534	}15351536	// Sub-aggregation of top-hits1537	subAgg, found := agg.Buckets[0].TopHits("top_tags_hits")1538	if !found {1539		t.Fatalf("expected sub aggregation to be found; got: %v", found)1540	}1541	if subAgg == nil {1542		t.Fatalf("expected sub aggregation != nil; got: %v", subAgg)1543	}1544	if subAgg.Hits == nil {1545		t.Fatalf("expected sub aggregation Hits != nil; got: %v", subAgg.Hits)1546	}1547	if subAgg.Hits.TotalHits != 25365 {1548		t.Fatalf("expected sub aggregation Hits.TotalHits = %d; got: %d", 25365, subAgg.Hits.TotalHits)1549	}1550	if subAgg.Hits.MaxScore == nil {1551		t.Fatalf("expected sub aggregation Hits.MaxScore != %v; got: %v", nil, *subAgg.Hits.MaxScore)1552	}1553	if *subAgg.Hits.MaxScore != float64(1.0) {1554		t.Fatalf("expected sub aggregation Hits.MaxScore = %v; got: %v", float64(1.0), *subAgg.Hits.MaxScore)1555	}15561557	subAgg, found = agg.Buckets[1].TopHits("top_tags_hits")1558	if !found {1559		t.Fatalf("expected sub aggregation to be found; got: %v", found)1560	}1561	if subAgg == nil {1562		t.Fatalf("expected sub aggregation != nil; got: %v", subAgg)1563	}1564	if subAgg.Hits == nil {1565		t.Fatalf("expected sub aggregation Hits != nil; got: %v", subAgg.Hits)1566	}1567	if subAgg.Hits.TotalHits != 18342 {1568		t.Fatalf("expected sub aggregation Hits.TotalHits = %d; got: %d", 18342, subAgg.Hits.TotalHits)1569	}1570	if subAgg.Hits.MaxScore == nil {1571		t.Fatalf("expected sub aggregation Hits.MaxScore != %v; got: %v", nil, *subAgg.Hits.MaxScore)1572	}1573	if *subAgg.Hits.MaxScore != float64(1.0) {1574		t.Fatalf("expected sub aggregation Hits.MaxScore = %v; got: %v", float64(1.0), *subAgg.Hits.MaxScore)1575	}15761577	subAgg, found = agg.Buckets[2].TopHits("top_tags_hits")1578	if !found {1579		t.Fatalf("expected sub aggregation to be found; got: %v", found)1580	}1581	if subAgg == nil {1582		t.Fatalf("expected sub aggregation != nil; got: %v", subAgg)1583	}1584	if subAgg.Hits == nil {1585		t.Fatalf("expected sub aggregation Hits != nil; got: %v", subAgg.Hits)1586	}1587	if subAgg.Hits.TotalHits != 18119 {1588		t.Fatalf("expected sub aggregation Hits.TotalHits = %d; got: %d", 18119, subAgg.Hits.TotalHits)1589	}1590	if subAgg.Hits.MaxScore == nil {1591		t.Fatalf("expected sub aggregation Hits.MaxScore != %v; got: %v", nil, *subAgg.Hits.MaxScore)1592	}1593	if *subAgg.Hits.MaxScore != float64(1.0) {1594		t.Fatalf("expected sub aggregation Hits.MaxScore = %v; got: %v", float64(1.0), *subAgg.Hits.MaxScore)1595	}1596}15971598func TestAggsBucketGlobal(t *testing.T) {1599	s := `{1600	"all_products" : {1601    "doc_count" : 100,1602		"avg_price" : {1603			"value" : 56.31604		}1605	}1606}`16071608	aggs := new(Aggregations)1609	err := json.Unmarshal([]byte(s), &aggs)1610	if err != nil {1611		t.Fatalf("expected no error decoding; got: %v", err)1612	}16131614	agg, found := aggs.Global("all_products")1615	if !found {1616		t.Fatalf("expected aggregation to be found; got: %v", found)1617	}1618	if agg == nil {1619		t.Fatalf("expected aggregation != nil; got: %v", agg)1620	}1621	if agg.DocCount != 100 {1622		t.Fatalf("expected aggregation DocCount = %d; got: %d", 100, agg.DocCount)1623	}16241625	// Sub-aggregation1626	subAgg, found := agg.Avg("avg_price")1627	if !found {1628		t.Fatalf("expected sub-aggregation to be found; got: %v", found)1629	}1630	if subAgg == nil {1631		t.Fatalf("expected sub-aggregation != nil; got: %v", subAgg)1632	}1633	if subAgg.Value == nil {1634		t.Fatalf("expected sub-aggregation value != nil; got: %v", subAgg.Value)1635	}1636	if *subAgg.Value != float64(56.3) {1637		t.Fatalf("expected sub-aggregation value = %v; got: %v", float64(56.3), *subAgg.Value)1638	}1639}16401641func TestAggsBucketFilter(t *testing.T) {1642	s := `{1643	"in_stock_products" : {1644	  "doc_count" : 100,1645	  "avg_price" : { "value" : 56.3 }1646	}1647}`16481649	aggs := new(Aggregations)1650	err := json.Unmarshal([]byte(s), &aggs)1651	if err != nil {1652		t.Fatalf("expected no error decoding; got: %v", err)1653	}16541655	agg, found := aggs.Filter("in_stock_products")1656	if !found {1657		t.Fatalf("expected aggregation to be found; got: %v", found)1658	}1659	if agg == nil {1660		t.Fatalf("expected aggregation != nil; got: %v", agg)1661	}1662	if agg.DocCount != 100 {1663		t.Fatalf("expected aggregation DocCount = %d; got: %d", 100, agg.DocCount)1664	}16651666	// Sub-aggregation1667	subAgg, found := agg.Avg("avg_price")1668	if !found {1669		t.Fatalf("expected sub-aggregation to be found; got: %v", found)1670	}1671	if subAgg == nil {1672		t.Fatalf("expected sub-aggregation != nil; got: %v", subAgg)1673	}1674	if subAgg.Value == nil {1675		t.Fatalf("expected sub-aggregation value != nil; got: %v", subAgg.Value)1676	}1677	if *subAgg.Value != float64(56.3) {1678		t.Fatalf("expected sub-aggregation value = %v; got: %v", float64(56.3), *subAgg.Value)1679	}1680}16811682func TestAggsBucketFiltersWithBuckets(t *testing.T) {1683	s := `{1684  "messages" : {1685    "buckets" : [1686    	{1687        "doc_count" : 34,1688        "monthly" : {1689          "buckets" : []1690        }1691      },1692      {1693        "doc_count" : 439,1694        "monthly" : {1695          "buckets" : []1696        }1697      }1698    ]1699  }1700}`17011702	aggs := new(Aggregations)1703	err := json.Unmarshal([]byte(s), &aggs)1704	if err != nil {1705		t.Fatalf("expected no error decoding; got: %v", err)1706	}17071708	agg, found := aggs.Filters("messages")1709	if !found {1710		t.Fatalf("expected aggregation to be found; got: %v", found)1711	}1712	if agg == nil {1713		t.Fatalf("expected aggregation != nil; got: %v", agg)1714	}1715	if agg.Buckets == nil {1716		t.Fatalf("expected aggregation buckets != %v; got: %v", nil, agg.Buckets)1717	}1718	if len(agg.Buckets) != 2 {1719		t.Fatalf("expected %d buckets; got: %d", 2, len(agg.Buckets))1720	}17211722	if agg.Buckets[0].DocCount != 34 {1723		t.Fatalf("expected DocCount = %d; got: %d", 34, agg.Buckets[0].DocCount)1724	}1725	subAgg, found := agg.Buckets[0].Histogram("monthly")1726	if !found {1727		t.Fatalf("expected sub aggregation to be found; got: %v", found)1728	}1729	if subAgg == nil {1730		t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg)1731	}17321733	if agg.Buckets[1].DocCount != 439 {1734		t.Fatalf("expected DocCount = %d; got: %d", 439, agg.Buckets[1].DocCount)1735	}1736	subAgg, found = agg.Buckets[1].Histogram("monthly")1737	if !found {1738		t.Fatalf("expected sub aggregation to be found; got: %v", found)1739	}1740	if subAgg == nil {1741		t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg)1742	}1743}17441745func TestAggsBucketFiltersWithNamedBuckets(t *testing.T) {1746	s := `{1747  "messages" : {1748    "buckets" : {1749      "errors" : {1750        "doc_count" : 34,1751        "monthly" : {1752          "buckets" : []1753        }1754      },1755      "warnings" : {1756        "doc_count" : 439,1757        "monthly" : {1758          "buckets" : []1759        }1760      }1761    }1762  }1763}`17641765	aggs := new(Aggregations)1766	err := json.Unmarshal([]byte(s), &aggs)1767	if err != nil {1768		t.Fatalf("expected no error decoding; got: %v", err)1769	}17701771	agg, found := aggs.Filters("messages")1772	if !found {1773		t.Fatalf("expected aggregation to be found; got: %v", found)1774	}1775	if agg == nil {1776		t.Fatalf("expected aggregation != nil; got: %v", agg)1777	}1778	if agg.NamedBuckets == nil {1779		t.Fatalf("expected aggregation buckets != %v; got: %v", nil, agg.NamedBuckets)1780	}1781	if len(agg.NamedBuckets) != 2 {1782		t.Fatalf("expected %d buckets; got: %d", 2, len(agg.NamedBuckets))1783	}17841785	if agg.NamedBuckets["errors"].DocCount != 34 {1786		t.Fatalf("expected DocCount = %d; got: %d", 34, agg.NamedBuckets["errors"].DocCount)1787	}1788	subAgg, found := agg.NamedBuckets["errors"].Histogram("monthly")1789	if !found {1790		t.Fatalf("expected sub aggregation to be found; got: %v", found)1791	}1792	if subAgg == nil {1793		t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg)1794	}17951796	if agg.NamedBuckets["warnings"].DocCount != 439 {1797		t.Fatalf("expected DocCount = %d; got: %d", 439, agg.NamedBuckets["warnings"].DocCount)1798	}1799	subAgg, found = agg.NamedBuckets["warnings"].Histogram("monthly")1800	if !found {1801		t.Fatalf("expected sub aggregation to be found; got: %v", found)1802	}1803	if subAgg == nil {1804		t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg)1805	}1806}18071808func TestAggsBucketMissing(t *testing.T) {1809	s := `{1810	"products_without_a_price" : {1811		"doc_count" : 101812	}1813}`18141815	aggs := new(Aggregations)1816	err := json.Unmarshal([]byte(s), &aggs)1817	if err != nil {1818		t.Fatalf("expected no error decoding; got: %v", err)1819	}18201821	agg, found := aggs.Missing("products_without_a_price")1822	if !found {1823		t.Fatalf("expected aggregation to be found; got: %v", found)1824	}1825	if agg == nil {1826		t.Fatalf("expected aggregation != nil; got: %v", agg)1827	}1828	if agg.DocCount != 10 {1829		t.Fatalf("expected aggregation DocCount = %d; got: %d", 10, agg.DocCount)1830	}1831}18321833func TestAggsBucketNested(t *testing.T) {1834	s := `{1835	"resellers": {1836		"min_price": {1837			"value" : 3501838		}1839	}1840}`18411842	aggs := new(Aggregations)1843	err := json.Unmarshal([]byte(s), &aggs)1844	if err != nil {1845		t.Fatalf("expected no error decoding; got: %v", err)1846	}18471848	agg, found := aggs.Nested("resellers")1849	if !found {1850		t.Fatalf("expected aggregation to be found; got: %v", found)1851	}1852	if agg == nil {1853		t.Fatalf("expected aggregation != nil; got: %v", agg)1854	}1855	if agg.DocCount != 0 {1856		t.Fatalf("expected aggregation DocCount = %d; got: %d", 0, agg.DocCount)1857	}18581859	// Sub-aggregation1860	subAgg, found := agg.Avg("min_price")1861	if !found {1862		t.Fatalf("expected sub-aggregation to be found; got: %v", found)1863	}1864	if subAgg == nil {1865		t.Fatalf("expected sub-aggregation != nil; got: %v", subAgg)1866	}1867	if subAgg.Value == nil {1868		t.Fatalf("expected sub-aggregation value != nil; got: %v", subAgg.Value)1869	}1870	if *subAgg.Value != float64(350) {1871		t.Fatalf("expected sub-aggregation value = %v; got: %v", float64(350), *subAgg.Value)1872	}1873}18741875func TestAggsBucketReverseNested(t *testing.T) {1876	s := `{1877	"comment_to_issue": {1878		"doc_count" : 101879	}1880}`18811882	aggs := new(Aggregations)1883	err := json.Unmarshal([]byte(s), &aggs)1884	if err != nil {1885		t.Fatalf("expected no error decoding; got: %v", err)1886	}18871888	agg, found := aggs.ReverseNested("comment_to_issue")1889	if !found {1890		t.Fatalf("expected aggregation to be found; got: %v", found)1891	}1892	if agg == nil {1893		t.Fatalf("expected aggregation != nil; got: %v", agg)1894	}1895	if agg.DocCount != 10 {1896		t.Fatalf("expected aggregation DocCount = %d; got: %d", 10, agg.DocCount)1897	}1898}18991900func TestAggsBucketChildren(t *testing.T) {1901	s := `{1902	"to-answers": {1903		"doc_count" : 101904	}1905}`19061907	aggs := new(Aggregations)1908	err := json.Unmarshal([]byte(s), &aggs)1909	if err != nil {1910		t.Fatalf("expected no error decoding; got: %v", err)1911	}19121913	agg, found := aggs.Children("to-answers")1914	if !found {1915		t.Fatalf("expected aggregation to be found; got: %v", found)1916	}1917	if agg == nil {1918		t.Fatalf("expected aggregation != nil; got: %v", agg)1919	}1920	if agg.DocCount != 10 {1921		t.Fatalf("expected aggregation DocCount = %d; got: %d", 10, agg.DocCount)1922	}1923}19241925func TestAggsBucketTerms(t *testing.T) {1926	s := `{1927	"users" : {1928	  "doc_count_error_upper_bound" : 1,1929	  "sum_other_doc_count" : 2,1930	  "buckets" : [ {1931	    "key" : "olivere",1932	    "doc_count" : 21933	  }, {1934	    "key" : "sandrae",1935	    "doc_count" : 11936	  } ]1937	}1938}`19391940	aggs := new(Aggregations)1941	err := json.Unmarshal([]byte(s), &aggs)1942	if err != nil {1943		t.Fatalf("expected no error decoding; got: %v", err)1944	}19451946	agg, found := aggs.Terms("users")1947	if !found {1948		t.Fatalf("expected aggregation to be found; got: %v", found)1949	}1950	if agg == nil {1951		t.Fatalf("expected aggregation != nil; got: %v", agg)1952	}1953	if agg.Buckets == nil {1954		t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)1955	}1956	if len(agg.Buckets) != 2 {1957		t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets))1958	}1959	if agg.Buckets[0].Key != "olivere" {1960		t.Errorf("expected key %q; got: %q", "olivere", agg.Buckets[0].Key)1961	}1962	if agg.Buckets[0].DocCount != 2 {1963		t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[0].DocCount)1964	}1965	if agg.Buckets[1].Key != "sandrae" {1966		t.Errorf("expected key %q; got: %q", "sandrae", agg.Buckets[1].Key)1967	}1968	if agg.Buckets[1].DocCount != 1 {1969		t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[1].DocCount)1970	}1971}19721973func TestAggsBucketTermsWithNumericKeys(t *testing.T) {1974	s := `{1975	"users" : {1976	  "doc_count_error_upper_bound" : 1,1977	  "sum_other_doc_count" : 2,1978	  "buckets" : [ {1979	    "key" : 17,1980	    "doc_count" : 21981	  }, {1982	    "key" : 21,1983	    "doc_count" : 11984	  } ]1985	}1986}`19871988	aggs := new(Aggregations)1989	err := json.Unmarshal([]byte(s), &aggs)1990	if err != nil {1991		t.Fatalf("expected no error decoding; got: %v", err)1992	}19931994	agg, found := aggs.Terms("users")1995	if !found {1996		t.Fatalf("expected aggregation to be found; got: %v", found)1997	}1998	if agg == nil {1999		t.Fatalf("expected aggregation != nil; got: %v", agg)2000	}2001	if agg.Buckets == nil {2002		t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)2003	}2004	if len(agg.Buckets) != 2 {2005		t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets))2006	}2007	if agg.Buckets[0].Key != float64(17) {2008		t.Errorf("expected key %v; got: %v", 17, agg.Buckets[0].Key)2009	}2010	if got, err := agg.Buckets[0].KeyNumber.Int64(); err != nil {2011		t.Errorf("expected to convert key to int64; got: %v", err)2012	} else if got != 17 {2013		t.Errorf("expected key %v; got: %v", 17, agg.Buckets[0].Key)2014	}2015	if agg.Buckets[0].DocCount != 2 {2016		t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[0].DocCount)2017	}2018	if agg.Buckets[1].Key != float64(21) {2019		t.Errorf("expected key %v; got: %v", 21, agg.Buckets[1].Key)2020	}2021	if got, err := agg.Buckets[1].KeyNumber.Int64(); err != nil {2022		t.Errorf("expected to convert key to int64; got: %v", err)2023	} else if got != 21 {2024		t.Errorf("expected key %v; got: %v", 21, agg.Buckets[1].Key)2025	}2026	if agg.Buckets[1].DocCount != 1 {2027		t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[1].DocCount)2028	}2029}20302031func TestAggsBucketTermsWithBoolKeys(t *testing.T) {2032	s := `{2033	"users" : {2034	  "doc_count_error_upper_bound" : 1,2035	  "sum_other_doc_count" : 2,2036	  "buckets" : [ {2037	    "key" : true,2038	    "doc_count" : 22039	  }, {2040	    "key" : false,2041	    "doc_count" : 12042	  } ]2043	}2044}`20452046	aggs := new(Aggregations)2047	err := json.Unmarshal([]byte(s), &aggs)2048	if err != nil {2049		t.Fatalf("expected no error decoding; got: %v", err)2050	}20512052	agg, found := aggs.Terms("users")2053	if !found {2054		t.Fatalf("expected aggregation to be found; got: %v", found)2055	}2056	if agg == nil {2057		t.Fatalf("expected aggregation != nil; got: %v", agg)2058	}2059	if agg.Buckets == nil {2060		t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)2061	}2062	if len(agg.Buckets) != 2 {2063		t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets))2064	}2065	if agg.Buckets[0].Key != true {2066		t.Errorf("expected key %v; got: %v", true, agg.Buckets[0].Key)2067	}2068	if agg.Buckets[0].DocCount != 2 {2069		t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[0].DocCount)2070	}2071	if agg.Buckets[1].Key != false {2072		t.Errorf("expected key %v; got: %v", false, agg.Buckets[1].Key)2073	}2074	if agg.Buckets[1].DocCount != 1 {2075		t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[1].DocCount)2076	}2077}20782079func TestAggsBucketSignificantTerms(t *testing.T) {2080	s := `{2081	"significantCrimeTypes" : {2082    "doc_count": 47347,2083    "buckets" : [2084      {2085        "key": "Bicycle theft",2086        "doc_count": 3640,2087        "score": 0.371235374214817,2088        "bg_count": 667992089      }2090    ]2091	}2092}`20932094	aggs := new(Aggregations)2095	err := json.Unmarshal([]byte(s), &aggs)2096	if err != nil {2097		t.Fatalf("expected no error decoding; got: %v", err)2098	}20992100	agg, found := aggs.SignificantTerms("significantCrimeTypes")2101	if !found {2102		t.Fatalf("expected aggregation to be found; got: %v", found)2103	}2104	if agg == nil {2105		t.Fatalf("expected aggregation != nil; got: %v", agg)2106	}2107	if agg.DocCount != 47347 {2108		t.Fatalf("expected aggregation DocCount != %d; got: %d", 47347, agg.DocCount)2109	}2110	if agg.Buckets == nil {2111		t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)2112	}2113	if len(agg.Buckets) != 1 {2114		t.Errorf("expected %d bucket entries; got: %d", 1, len(agg.Buckets))2115	}2116	if agg.Buckets[0].Key != "Bicycle theft" {2117		t.Errorf("expected key = %q; got: %q", "Bicycle theft", agg.Buckets[0].Key)2118	}2119	if agg.Buckets[0].DocCount != 3640 {2120		t.Errorf("expected doc count = %d; got: %d", 3640, agg.Buckets[0].DocCount)2121	}2122	if agg.Buckets[0].Score != float64(0.371235374214817) {2123		t.Errorf("expected score = %v; got: %v", float64(0.371235374214817), agg.Buckets[0].Score)2124	}2125	if agg.Buckets[0].BgCount != 66799 {2126		t.Errorf("expected BgCount = %d; got: %d", 66799, agg.Buckets[0].BgCount)2127	}2128}21292130func TestAggsBucketSampler(t *testing.T) {2131	s := `{2132	"sample" : {2133    "doc_count": 1000,2134    "keywords": {2135    	"doc_count": 1000,2136	    "buckets" : [2137	      {2138	        "key": "bend",2139	        "doc_count": 58,2140	        "score": 37.982536582524276,2141	        "bg_count": 1032142	      }2143	    ]2144    }2145	}2146}`21472148	aggs := new(Aggregations)2149	err := json.Unmarshal([]byte(s), &aggs)2150	if err != nil {2151		t.Fatalf("expected no error decoding; got: %v", err)2152	}21532154	agg, found := aggs.Sampler("sample")2155	if !found {2156		t.Fatalf("expected aggregation to be found; got: %v", found)2157	}2158	if agg == nil {2159		t.Fatalf("expected aggregation != nil; got: %v", agg)2160	}2161	if agg.DocCount != 1000 {2162		t.Fatalf("expected aggregation DocCount != %d; got: %d", 1000, agg.DocCount)2163	}2164	sub, found := agg.Aggregations["keywords"]2165	if !found {2166		t.Fatalf("expected sub aggregation %q", "keywords")2167	}2168	if sub == nil {2169		t.Fatalf("expected sub aggregation %q; got: %v", "keywords", sub)2170	}2171}21722173func TestAggsBucketRange(t *testing.T) {2174	s := `{2175	"price_ranges" : {2176		"buckets": [2177			{2178				"to": 50,2179				"doc_count": 22180			},2181			{2182				"from": 50,2183				"to": 100,2184				"doc_count": 42185			},2186			{2187				"from": 100,2188				"doc_count": 42189			}2190		]2191	}2192}`21932194	aggs := new(Aggregations)2195	err := json.Unmarshal([]byte(s), &aggs)2196	if err != nil {2197		t.Fatalf("expected no error decoding; got: %v", err)2198	}21992200	agg, found := aggs.Range("price_ranges")2201	if !found {2202		t.Fatalf("expected aggregation to be found; got: %v", found)2203	}2204	if agg == nil {2205		t.Fatalf("expected aggregation != nil; got: %v", agg)2206	}2207	if agg.Buckets == nil {2208		t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)2209	}2210	if len(agg.Buckets) != 3 {2211		t.Errorf("expected %d bucket entries; got: %d", 3, len(agg.Buckets))2212	}2213	if agg.Buckets[0].From != nil {2214		t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From)2215	}2216	if agg.Buckets[0].To == nil {2217		t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To)2218	}2219	if *agg.Buckets[0].To != float64(50) {2220		t.Errorf("expected To = %v; got: %v", float64(50), *agg.Buckets[0].To)2221	}2222	if agg.Buckets[0].DocCount != 2 {2223		t.Errorf("expected DocCount = %d; got: %d", 2, agg.Buckets[0].DocCount)2224	}2225	if agg.Buckets[1].From == nil {2226		t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From)2227	}2228	if *agg.Buckets[1].From != float64(50) {2229		t.Errorf("expected From = %v; got: %v", float64(50), *agg.Buckets[1].From)2230	}2231	if agg.Buckets[1].To == nil {2232		t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[1].To)2233	}2234	if *agg.Buckets[1].To != float64(100) {2235		t.Errorf("expected To = %v; got: %v", float64(100), *agg.Buckets[1].To)2236	}2237	if agg.Buckets[1].DocCount != 4 {2238		t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[1].DocCount)2239	}2240	if agg.Buckets[2].From == nil {2241		t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[2].From)2242	}2243	if *agg.Buckets[2].From != float64(100) {2244		t.Errorf("expected From = %v; got: %v", float64(100), *agg.Buckets[2].From)2245	}2246	if agg.Buckets[2].To != nil {2247		t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[2].To)2248	}2249	if agg.Buckets[2].DocCount != 4 {2250		t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[2].DocCount)2251	}2252}22532254func TestAggsBucketDateRange(t *testing.T) {2255	s := `{2256	"range": {2257		"buckets": [2258			{2259				"to": 1.3437792E+12,2260				"to_as_string": "08-2012",2261				"doc_count": 72262			},2263			{2264				"from": 1.3437792E+12,2265				"from_as_string": "08-2012",2266				"doc_count": 22267			}2268		]2269	}2270}`22712272	aggs := new(Aggregations)2273	err := json.Unmarshal([]byte(s), &aggs)2274	if err != nil {2275		t.Fatalf("expected no error decoding; got: %v", err)2276	}22772278	agg, found := aggs.DateRange("range")2279	if !found {2280		t.Fatalf("expected aggregation to be found; got: %v", found)2281	}2282	if agg == nil {2283		t.Fatalf("expected aggregation != nil; got: %v", agg)2284	}2285	if agg.Buckets == nil {2286		t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)2287	}2288	if len(agg.Buckets) != 2 {2289		t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets))2290	}2291	if agg.Buckets[0].From != nil {2292		t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From)2293	}2294	if agg.Buckets[0].To == nil {2295		t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To)2296	}2297	if *agg.Buckets[0].To != float64(1.3437792E+12) {2298		t.Errorf("expected To = %v; got: %v", float64(1.3437792E+12), *agg.Buckets[0].To)2299	}2300	if agg.Buckets[0].ToAsString != "08-2012" {2301		t.Errorf("expected ToAsString = %q; got: %q", "08-2012", agg.Buckets[0].ToAsString)2302	}2303	if agg.Buckets[0].DocCount != 7 {2304		t.Errorf("expected DocCount = %d; got: %d", 7, agg.Buckets[0].DocCount)2305	}2306	if agg.Buckets[1].From == nil {2307		t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From)2308	}2309	if *agg.Buckets[1].From != float64(1.3437792E+12) {2310		t.Errorf("expected From = %v; got: %v", float64(1.3437792E+12), *agg.Buckets[1].From)2311	}2312	if agg.Buckets[1].FromAsString != "08-2012" {2313		t.Errorf("expected FromAsString = %q; got: %q", "08-2012", agg.Buckets[1].FromAsString)2314	}2315	if agg.Buckets[1].To != nil {2316		t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[1].To)2317	}2318	if agg.Buckets[1].DocCount != 2 {2319		t.Errorf("expected DocCount = %d; got: %d", 2, agg.Buckets[1].DocCount)2320	}2321}23222323func TestAggsBucketIPv4Range(t *testing.T) {2324	s := `{2325	"ip_ranges": {2326		"buckets" : [2327			{2328				"to": 167772165,2329				"to_as_string": "10.0.0.5",2330				"doc_count": 42331			},2332			{2333				"from": 167772165,2334				"from_as_string": "10.0.0.5",2335				"doc_count": 62336			}2337		]2338	}2339}`23402341	aggs := new(Aggregations)2342	err := json.Unmarshal([]byte(s), &aggs)2343	if err != nil {2344		t.Fatalf("expected no error decoding; got: %v", err)2345	}23462347	agg, found := aggs.IPv4Range("ip_ranges")2348	if !found {2349		t.Fatalf("expected aggregation to be found; got: %v", found)2350	}2351	if agg == nil {2352		t.Fatalf("expected aggregation != nil; got: %v", agg)2353	}2354	if agg.Buckets == nil {2355		t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)2356	}2357	if len(agg.Buckets) != 2 {2358		t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets))2359	}2360	if agg.Buckets[0].From != nil {2361		t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From)2362	}2363	if agg.Buckets[0].To == nil {2364		t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To)2365	}2366	if *agg.Buckets[0].To != float64(167772165) {2367		t.Errorf("expected To = %v; got: %v", float64(167772165), *agg.Buckets[0].To)2368	}2369	if agg.Buckets[0].ToAsString != "10.0.0.5" {2370		t.Errorf("expected ToAsString = %q; got: %q", "10.0.0.5", agg.Buckets[0].ToAsString)2371	}2372	if agg.Buckets[0].DocCount != 4 {2373		t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[0].DocCount)2374	}2375	if agg.Buckets[1].From == nil {2376		t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From)2377	}2378	if *agg.Buckets[1].From != float64(167772165) {2379		t.Errorf("expected From = %v; got: %v", float64(167772165), *agg.Buckets[1].From)2380	}2381	if agg.Buckets[1].FromAsString != "10.0.0.5" {2382		t.Errorf("expected FromAsString = %q; got: %q", "10.0.0.5", agg.Buckets[1].FromAsString)2383	}2384	if agg.Buckets[1].To != nil {2385		t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[1].To)2386	}2387	if agg.Buckets[1].DocCount != 6 {2388		t.Errorf("expected DocCount = %d; got: %d", 6, agg.Buckets[1].DocCount)2389	}2390}23912392func TestAggsBucketHistogram(t *testing.T) {2393	s := `{2394	"prices" : {2395		"buckets": [2396			{2397				"key": 0,2398				"doc_count": 22399			},2400			{2401				"key": 50,2402				"doc_count": 42403			},2404			{2405				"key": 150,2406				"doc_count": 32407			}2408		]2409	}2410}`24112412	aggs := new(Aggregations)2413	err := json.Unmarshal([]byte(s), &aggs)2414	if err != nil {2415		t.Fatalf("expected no error decoding; got: %v", err)2416	}24172418	agg, found := aggs.Histogram("prices")2419	if !found {2420		t.Fatalf("expected aggregation to be found; got: %v", found)2421	}2422	if agg == nil {2423		t.Fatalf("expected aggregation != nil; got: %v", agg)2424	}2425	if agg.Buckets == nil {2426		t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)2427	}2428	if len(agg.Buckets) != 3 {2429		t.Errorf("expected %d buckets; got: %d", 3, len(agg.Buckets))2430	}2431	if agg.Buckets[0].Key != 0 {2432		t.Errorf("expected key = %v; got: %v", 0, agg.Buckets[0].Key)2433	}2434	if agg.Buckets[0].KeyAsString != nil {2435		t.Fatalf("expected key_as_string = %v; got: %q", nil, *agg.Buckets[0].KeyAsString)2436	}2437	if agg.Buckets[0].DocCount != 2 {2438		t.Errorf("expected doc count = %d; got: %d", 2, agg.Buckets[0].DocCount)2439	}2440	if agg.Buckets[1].Key != 50 {2441		t.Errorf("expected key = %v; got: %v", 50, agg.Buckets[1].Key)2442	}2443	if agg.Buckets[1].KeyAsString != nil {2444		t.Fatalf("expected key_as_string = %v; got: %q", nil, *agg.Buckets[1].KeyAsString)2445	}2446	if agg.Buckets[1].DocCount != 4 {2447		t.Errorf("expected doc count = %d; got: %d", 4, agg.Buckets[1].DocCount)2448	}2449	if agg.Buckets[2].Key != 150 {2450		t.Errorf("expected key = %v; got: %v", 150, agg.Buckets[2].Key)2451	}2452	if agg.Buckets[2].KeyAsString != nil {2453		t.Fatalf("expected key_as_string = %v; got: %q", nil, *agg.Buckets[2].KeyAsString)2454	}2455	if agg.Buckets[2].DocCount != 3 {2456		t.Errorf("expected doc count = %d; got: %d", 3, agg.Buckets[2].DocCount)2457	}2458}24592460func TestAggsBucketDateHistogram(t *testing.T) {2461	s := `{2462	"articles_over_time": {2463	  "buckets": [2464	      {2465	          "key_as_string": "2013-02-02",2466	          "key": 1328140800000,2467	          "doc_count": 12468	      },2469	      {2470	          "key_as_string": "2013-03-02",2471	          "key": 1330646400000,2472	          "doc_count": 22473	      }2474	  ]2475	}2476}`24772478	aggs := new(Aggregations)2479	err := json.Unmarshal([]byte(s), &aggs)2480	if err != nil {2481		t.Fatalf("expected no error decoding; got: %v", err)2482	}24832484	agg, found := aggs.DateHistogram("articles_over_time")2485	if !found {2486		t.Fatalf("expected aggregation to be found; got: %v", found)2487	}2488	if agg == nil {2489		t.Fatalf("expected aggregation != nil; got: %v", agg)2490	}2491	if agg.Buckets == nil {2492		t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)2493	}2494	if len(agg.Buckets) != 2 {2495		t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets))2496	}2497	if agg.Buckets[0].Key != 1328140800000 {2498		t.Errorf("expected key %v; got: %v", 1328140800000, agg.Buckets[0].Key)2499	}2500	if agg.Buckets[0].KeyAsString == nil {2501		t.Fatalf("expected key_as_string != nil; got: %v", agg.Buckets[0].KeyAsString)2502	}2503	if *agg.Buckets[0].KeyAsString != "2013-02-02" {2504		t.Errorf("expected key_as_string %q; got: %q", "2013-02-02", *agg.Buckets[0].KeyAsString)2505	}2506	if agg.Buckets[0].DocCount != 1 {2507		t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[0].DocCount)2508	}2509	if agg.Buckets[1].Key != 1330646400000 {2510		t.Errorf("expected key %v; got: %v", 1330646400000, agg.Buckets[1].Key)2511	}2512	if agg.Buckets[1].KeyAsString == nil {2513		t.Fatalf("expected key_as_string != nil; got: %v", agg.Buckets[1].KeyAsString)2514	}2515	if *agg.Buckets[1].KeyAsString != "2013-03-02" {2516		t.Errorf("expected key_as_string %q; got: %q", "2013-03-02", *agg.Buckets[1].KeyAsString)2517	}2518	if agg.Buckets[1].DocCount != 2 {2519		t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[1].DocCount)2520	}2521}25222523func TestAggsMetricsGeoBounds(t *testing.T) {2524	s := `{2525  "viewport": {2526    "bounds": {2527      "top_left": {2528        "lat": 80.45,2529        "lon": -160.222530      },2531      "bottom_right": {2532        "lat": 40.65,2533        "lon": 42.572534      }2535    }2536  }2537}`25382539	aggs := new(Aggregations)2540	err := json.Unmarshal([]byte(s), &aggs)2541	if err != nil {2542		t.Fatalf("expected no error decoding; got: %v", err)2543	}25442545	agg, found := aggs.GeoBounds("viewport")2546	if !found {2547		t.Fatalf("expected aggregation to be found; got: %v", found)2548	}2549	if agg == nil {2550		t.Fatalf("expected aggregation != nil; got: %v", agg)2551	}2552	if agg.Bounds.TopLeft.Latitude != float64(80.45) {2553		t.Fatalf("expected Bounds.TopLeft.Latitude != %v; got: %v", float64(80.45), agg.Bounds.TopLeft.Latitude)2554	}2555	if agg.Bounds.TopLeft.Longitude != float64(-160.22) {2556		t.Fatalf("expected Bounds.TopLeft.Longitude != %v; got: %v", float64(-160.22), agg.Bounds.TopLeft.Longitude)2557	}2558	if agg.Bounds.BottomRight.Latitude != float64(40.65) {2559		t.Fatalf("expected Bounds.BottomRight.Latitude != %v; got: %v", float64(40.65), agg.Bounds.BottomRight.Latitude)2560	}2561	if agg.Bounds.BottomRight.Longitude != float64(42.57) {2562		t.Fatalf("expected Bounds.BottomRight.Longitude != %v; got: %v", float64(42.57), agg.Bounds.BottomRight.Longitude)2563	}2564}25652566func TestAggsBucketGeoHash(t *testing.T) {2567	s := `{2568	"myLarge-GrainGeoHashGrid": {2569		"buckets": [2570			{2571				"key": "svz",2572				"doc_count": 109642573			},2574			{2575				"key": "sv8",2576				"doc_count": 31982577			}2578		]2579	}2580}`25812582	aggs := new(Aggregations)2583	err := json.Unmarshal([]byte(s), &aggs)2584	if err != nil {2585		t.Fatalf("expected no error decoding; got: %v", err)2586	}25872588	agg, found := aggs.GeoHash("myLarge-GrainGeoHashGrid")2589	if !found {2590		t.Fatalf("expected aggregation to be found; got: %v", found)2591	}2592	if agg == nil {2593		t.Fatalf("expected aggregation != nil; got: %v", agg)2594	}2595	if agg.Buckets == nil {2596		t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)2597	}2598	if len(agg.Buckets) != 2 {2599		t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets))2600	}2601	if agg.Buckets[0].Key != "svz" {2602		t.Errorf("expected key %q; got: %q", "svz", agg.Buckets[0].Key)2603	}2604	if agg.Buckets[0].DocCount != 10964 {2605		t.Errorf("expected doc count %d; got: %d", 10964, agg.Buckets[0].DocCount)2606	}2607	if agg.Buckets[1].Key != "sv8" {2608		t.Errorf("expected key %q; got: %q", "sv8", agg.Buckets[1].Key)2609	}2610	if agg.Buckets[1].DocCount != 3198 {2611		t.Errorf("expected doc count %d; got: %d", 3198, agg.Buckets[1].DocCount)2612	}2613}26142615func TestAggsBucketGeoDistance(t *testing.T) {2616	s := `{2617	"rings" : {2618		"buckets": [2619			{2620				"unit": "km",2621				"to": 100.0,2622				"doc_count": 32623			},2624			{2625				"unit": "km",2626				"from": 100.0,2627				"to": 300.0,2628				"doc_count": 12629			},2630			{2631				"unit": "km",2632				"from": 300.0,2633				"doc_count": 72634			}2635		]2636	}2637}`26382639	aggs := new(Aggregations)2640	err := json.Unmarshal([]byte(s), &aggs)2641	if err != nil {2642		t.Fatalf("expected no error decoding; got: %v", err)2643	}26442645	agg, found := aggs.GeoDistance("rings")2646	if !found {2647		t.Fatalf("expected aggregation to be found; got: %v", found)2648	}2649	if agg == nil {2650		t.Fatalf("expected aggregation != nil; got: %v", agg)2651	}2652	if agg.Buckets == nil {2653		t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)2654	}2655	if len(agg.Buckets) != 3 {2656		t.Errorf("expected %d bucket entries; got: %d", 3, len(agg.Buckets))2657	}2658	if agg.Buckets[0].From != nil {2659		t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From)2660	}2661	if agg.Buckets[0].To == nil {2662		t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To)2663	}2664	if *agg.Buckets[0].To != float64(100.0) {2665		t.Errorf("expected To = %v; got: %v", float64(100.0), *agg.Buckets[0].To)2666	}2667	if agg.Buckets[0].DocCount != 3 {2668		t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[0].DocCount)2669	}26702671	if agg.Buckets[1].From == nil {2672		t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From)2673	}2674	if *agg.Buckets[1].From != float64(100.0) {2675		t.Errorf("expected From = %v; got: %v", float64(100.0), *agg.Buckets[1].From)2676	}2677	if agg.Buckets[1].To == nil {2678		t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[1].To)2679	}2680	if *agg.Buckets[1].To != float64(300.0) {2681		t.Errorf("expected From = %v; got: %v", float64(300.0), *agg.Buckets[1].To)2682	}2683	if agg.Buckets[1].DocCount != 1 {2684		t.Errorf("expected DocCount = %d; got: %d", 1, agg.Buckets[1].DocCount)2685	}26862687	if agg.Buckets[2].From == nil {2688		t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[2].From)2689	}2690	if *agg.Buckets[2].From != float64(300.0) {2691		t.Errorf("expected From = %v; got: %v", float64(300.0), *agg.Buckets[2].From)2692	}2693	if agg.Buckets[2].To != nil {2694		t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[2].To)2695	}2696	if agg.Buckets[2].DocCount != 7 {2697		t.Errorf("expected DocCount = %d; got: %d", 7, agg.Buckets[2].DocCount)2698	}2699}27002701func TestAggsSubAggregates(t *testing.T) {2702	rs := `{2703	"users" : {2704	  "doc_count_error_upper_bound" : 1,2705	  "sum_other_doc_count" : 2,2706	  "buckets" : [ {2707	    "key" : "olivere",2708	    "doc_count" : 2,2709	    "ts" : {2710	      "buckets" : [ {2711	        "key_as_string" : "2012-01-01T00:00:00.000Z",2712	        "key" : 1325376000000,2713	        "doc_count" : 22714	      } ]2715	    }2716	  }, {2717	    "key" : "sandrae",2718	    "doc_count" : 1,2719	    "ts" : {2720	      "buckets" : [ {2721	        "key_as_string" : "2011-01-01T00:00:00.000Z",2722	        "key" : 1293840000000,2723	        "doc_count" : 12724	      } ]2725	    }2726	  } ]2727	}2728}`27292730	aggs := new(Aggregations)2731	err := json.Unmarshal([]byte(rs), &aggs)2732	if err != nil {2733		t.Fatalf("expected no error decoding; got: %v", err)2734	}27352736	// Access top-level aggregation2737	users, found := aggs.Terms("users")2738	if !found {2739		t.Fatalf("expected users aggregation to be found; got: %v", found)2740	}2741	if users == nil {2742		t.Fatalf("expected users aggregation; got: %v", users)2743	}2744	if users.Buckets == nil {2745		t.Fatalf("expected users buckets; got: %v", users.Buckets)2746	}2747	if len(users.Buckets) != 2 {2748		t.Errorf("expected %d bucket entries; got: %d", 2, len(users.Buckets))2749	}2750	if users.Buckets[0].Key != "olivere" {2751		t.Errorf("expected key %q; got: %q", "olivere", users.Buckets[0].Key)2752	}2753	if users.Buckets[0].DocCount != 2 {2754		t.Errorf("expected doc count %d; got: %d", 2, users.Buckets[0].DocCount)2755	}2756	if users.Buckets[1].Key != "sandrae" {2757		t.Errorf("expected key %q; got: %q", "sandrae", users.Buckets[1].Key)2758	}2759	if users.Buckets[1].DocCount != 1 {2760		t.Errorf("expected doc count %d; got: %d", 1, users.Buckets[1].DocCount)2761	}27622763	// Access sub-aggregation2764	ts, found := users.Buckets[0].DateHistogram("ts")2765	if !found {2766		t.Fatalf("expected ts aggregation to be found; got: %v", found)2767	}2768	if ts == nil {2769		t.Fatalf("expected ts aggregation; got: %v", ts)2770	}2771	if ts.Buckets == nil {2772		t.Fatalf("expected ts buckets; got: %v", ts.Buckets)2773	}2774	if len(ts.Buckets) != 1 {2775		t.Errorf("expected %d bucket entries; got: %d", 1, len(ts.Buckets))2776	}2777	if ts.Buckets[0].Key != 1325376000000 {2778		t.Errorf("expected key %v; got: %v", 1325376000000, ts.Buckets[0].Key)2779	}2780	if ts.Buckets[0].KeyAsString == nil {2781		t.Fatalf("expected key_as_string != %v; got: %v", nil, ts.Buckets[0].KeyAsString)2782	}2783	if *ts.Buckets[0].KeyAsString != "2012-01-01T00:00:00.000Z" {2784		t.Errorf("expected key_as_string %q; got: %q", "2012-01-01T00:00:00.000Z", *ts.Buckets[0].KeyAsString)2785	}2786}27872788func TestAggsPipelineAvgBucket(t *testing.T) {2789	s := `{2790	"avg_monthly_sales" : {2791	  "value" : 328.333333333333332792  }2793}`27942795	aggs := new(Aggregations)2796	err := json.Unmarshal([]byte(s), &aggs)2797	if err != nil {2798		t.Fatalf("expected no error decoding; got: %v", err)2799	}28002801	agg, found := aggs.AvgBucket("avg_monthly_sales")2802	if !found {2803		t.Fatalf("expected aggregation to be found; got: %v", found)2804	}2805	if agg == nil {2806		t.Fatalf("expected aggregation != nil; got: %v", agg)2807	}2808	if agg.Value == nil {2809		t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)2810	}2811	if *agg.Value != float64(328.33333333333333) {2812		t.Fatalf("expected aggregation value = %v; got: %v", float64(328.33333333333333), *agg.Value)2813	}2814}28152816func TestAggsPipelineSumBucket(t *testing.T) {2817	s := `{2818	"sum_monthly_sales" : {2819	  "value" : 9852820  }2821}`28222823	aggs := new(Aggregations)2824	err := json.Unmarshal([]byte(s), &aggs)2825	if err != nil {2826		t.Fatalf("expected no error decoding; got: %v", err)2827	}28282829	agg, found := aggs.SumBucket("sum_monthly_sales")2830	if !found {2831		t.Fatalf("expected aggregation to be found; got: %v", found)2832	}2833	if agg == nil {2834		t.Fatalf("expected aggregation != nil; got: %v", agg)2835	}2836	if agg.Value == nil {2837		t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)2838	}2839	if *agg.Value != float64(985) {2840		t.Fatalf("expected aggregation value = %v; got: %v", float64(985), *agg.Value)2841	}2842}28432844func TestAggsPipelineMaxBucket(t *testing.T) {2845	s := `{2846	"max_monthly_sales" : {2847		"keys": ["2015/01/01 00:00:00"],2848	  "value" : 5502849  }2850}`28512852	aggs := new(Aggregations)2853	err := json.Unmarshal([]byte(s), &aggs)2854	if err != nil {2855		t.Fatalf("expected no error decoding; got: %v", err)2856	}28572858	agg, found := aggs.MaxBucket("max_monthly_sales")2859	if !found {2860		t.Fatalf("expected aggregation to be found; got: %v", found)2861	}2862	if agg == nil {2863		t.Fatalf("expected aggregation != nil; got: %v", agg)2864	}2865	if len(agg.Keys) != 1 {2866		t.Fatalf("expected 1 key; got: %d", len(agg.Keys))2867	}2868	if got, want := agg.Keys[0], "2015/01/01 00:00:00"; got != want {2869		t.Fatalf("expected key %q; got: %v (%T)", want, got, got)2870	}2871	if agg.Value == nil {2872		t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)2873	}2874	if *agg.Value != float64(550) {2875		t.Fatalf("expected aggregation value = %v; got: %v", float64(550), *agg.Value)2876	}2877}28782879func TestAggsPipelineMinBucket(t *testing.T) {2880	s := `{2881	"min_monthly_sales" : {2882		"keys": ["2015/02/01 00:00:00"],2883	  "value" : 602884  }2885}`28862887	aggs := new(Aggregations)2888	err := json.Unmarshal([]byte(s), &aggs)2889	if err != nil {2890		t.Fatalf("expected no error decoding; got: %v", err)2891	}28922893	agg, found := aggs.MinBucket("min_monthly_sales")2894	if !found {2895		t.Fatalf("expected aggregation to be found; got: %v", found)2896	}2897	if agg == nil {2898		t.Fatalf("expected aggregation != nil; got: %v", agg)2899	}2900	if len(agg.Keys) != 1 {2901		t.Fatalf("expected 1 key; got: %d", len(agg.Keys))2902	}2903	if got, want := agg.Keys[0], "2015/02/01 00:00:00"; got != want {2904		t.Fatalf("expected key %q; got: %v (%T)", want, got, got)2905	}2906	if agg.Value == nil {2907		t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)2908	}2909	if *agg.Value != float64(60) {2910		t.Fatalf("expected aggregation value = %v; got: %v", float64(60), *agg.Value)2911	}2912}29132914func TestAggsPipelineMovAvg(t *testing.T) {2915	s := `{2916	"the_movavg" : {2917	  "value" : 12.02918  }2919}`29202921	aggs := new(Aggregations)2922	err := json.Unmarshal([]byte(s), &aggs)2923	if err != nil {2924		t.Fatalf("expected no error decoding; got: %v", err)2925	}29262927	agg, found := aggs.MovAvg("the_movavg")2928	if !found {2929		t.Fatalf("expected aggregation to be found; got: %v", found)2930	}2931	if agg == nil {2932		t.Fatalf("expected aggregation != nil; got: %v", agg)2933	}2934	if agg.Value == nil {2935		t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)2936	}2937	if *agg.Value != float64(12.0) {2938		t.Fatalf("expected aggregation value = %v; got: %v", float64(12.0), *agg.Value)2939	}2940}29412942func TestAggsPipelineDerivative(t *testing.T) {2943	s := `{2944	"sales_deriv" : {2945	  "value" : 3152946  }2947}`29482949	aggs := new(Aggregations)2950	err := json.Unmarshal([]byte(s), &aggs)2951	if err != nil {2952		t.Fatalf("expected no error decoding; got: %v", err)2953	}29542955	agg, found := aggs.Derivative("sales_deriv")2956	if !found {2957		t.Fatalf("expected aggregation to be found; got: %v", found)2958	}2959	if agg == nil {2960		t.Fatalf("expected aggregation != nil; got: %v", agg)2961	}2962	if agg.Value == nil {2963		t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)2964	}2965	if *agg.Value != float64(315) {2966		t.Fatalf("expected aggregation value = %v; got: %v", float64(315), *agg.Value)2967	}2968}29692970func TestAggsPipelineStatsBucket(t *testing.T) {2971	s := `{2972	"stats_monthly_sales": {2973	 "count": 3,2974	 "min": 60.0,2975	 "max": 550.0,2976	 "avg": 328.3333333333333,2977	 "sum": 985.02978  }2979}`29802981	aggs := new(Aggregations)2982	err := json.Unmarshal([]byte(s), &aggs)2983	if err != nil {2984		t.Fatalf("expected no error decoding; got: %v", err)2985	}29862987	agg, found := aggs.StatsBucket("stats_monthly_sales")2988	if !found {2989		t.Fatalf("expected aggregation to be found; got: %v", found)2990	}2991	if agg == nil {2992		t.Fatalf("expected aggregation != nil; got: %v", agg)2993	}2994	if agg.Count != 3 {2995		t.Fatalf("expected aggregation count = %v; got: %v", 3, agg.Count)2996	}2997	if agg.Min == nil {2998		t.Fatalf("expected aggregation min != nil; got: %v", agg.Min)2999	}3000	if *agg.Min != float64(60.0) {3001		t.Fatalf("expected aggregation min = %v; got: %v", float64(60.0), *agg.Min)3002	}3003	if agg.Max == nil {3004		t.Fatalf("expected aggregation max != nil; got: %v", agg.Max)3005	}3006	if *agg.Max != float64(550.0) {3007		t.Fatalf("expected aggregation max = %v; got: %v", float64(550.0), *agg.Max)3008	}3009	if agg.Avg == nil {
...

Full Screen

Full Screen

histogram_test.go

Source:histogram_test.go Github

copy

Full Screen

...111		}()112	}113}114// Intentionally adding +Inf here to test if that case is handled correctly.115// Also, getCumulativeCounts depends on it.116var testBuckets = []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)}117func TestHistogramConcurrency(t *testing.T) {118	if testing.Short() {119		t.Skip("Skipping test in short mode.")120	}121	rand.Seed(42)122	it := func(n uint32) bool {123		mutations := int(n%1e4 + 1e4)124		concLevel := int(n%5 + 1)125		total := mutations * concLevel126		var start, end sync.WaitGroup127		start.Add(1)128		end.Add(concLevel)129		sum := NewHistogram(HistogramOpts{130			Name:    "test_histogram",131			Help:    "helpless",132			Buckets: testBuckets,133		})134		allVars := make([]float64, total)135		var sampleSum float64136		for i := 0; i < concLevel; i++ {137			vals := make([]float64, mutations)138			for j := 0; j < mutations; j++ {139				v := rand.NormFloat64()140				vals[j] = v141				allVars[i*mutations+j] = v142				sampleSum += v143			}144			go func(vals []float64) {145				start.Wait()146				for _, v := range vals {147					sum.Observe(v)148				}149				end.Done()150			}(vals)151		}152		sort.Float64s(allVars)153		start.Done()154		end.Wait()155		m := &dto.Metric{}156		sum.Write(m)157		if got, want := int(*m.Histogram.SampleCount), total; got != want {158			t.Errorf("got sample count %d, want %d", got, want)159		}160		if got, want := *m.Histogram.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 {161			t.Errorf("got sample sum %f, want %f", got, want)162		}163		wantCounts := getCumulativeCounts(allVars)164		if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want {165			t.Errorf("got %d buckets in protobuf, want %d", got, want)166		}167		for i, wantBound := range testBuckets {168			if i == len(testBuckets)-1 {169				break // No +Inf bucket in protobuf.170			}171			if gotBound := *m.Histogram.Bucket[i].UpperBound; gotBound != wantBound {172				t.Errorf("got bound %f, want %f", gotBound, wantBound)173			}174			if gotCount, wantCount := *m.Histogram.Bucket[i].CumulativeCount, wantCounts[i]; gotCount != wantCount {175				t.Errorf("got count %d, want %d", gotCount, wantCount)176			}177		}178		return true179	}180	if err := quick.Check(it, nil); err != nil {181		t.Error(err)182	}183}184func TestHistogramVecConcurrency(t *testing.T) {185	if testing.Short() {186		t.Skip("Skipping test in short mode.")187	}188	rand.Seed(42)189	it := func(n uint32) bool {190		mutations := int(n%1e4 + 1e4)191		concLevel := int(n%7 + 1)192		vecLength := int(n%3 + 1)193		var start, end sync.WaitGroup194		start.Add(1)195		end.Add(concLevel)196		his := NewHistogramVec(197			HistogramOpts{198				Name:    "test_histogram",199				Help:    "helpless",200				Buckets: []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)},201			},202			[]string{"label"},203		)204		allVars := make([][]float64, vecLength)205		sampleSums := make([]float64, vecLength)206		for i := 0; i < concLevel; i++ {207			vals := make([]float64, mutations)208			picks := make([]int, mutations)209			for j := 0; j < mutations; j++ {210				v := rand.NormFloat64()211				vals[j] = v212				pick := rand.Intn(vecLength)213				picks[j] = pick214				allVars[pick] = append(allVars[pick], v)215				sampleSums[pick] += v216			}217			go func(vals []float64) {218				start.Wait()219				for i, v := range vals {220					his.WithLabelValues(string('A' + picks[i])).Observe(v)221				}222				end.Done()223			}(vals)224		}225		for _, vars := range allVars {226			sort.Float64s(vars)227		}228		start.Done()229		end.Wait()230		for i := 0; i < vecLength; i++ {231			m := &dto.Metric{}232			s := his.WithLabelValues(string('A' + i))233			s.(Histogram).Write(m)234			if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want {235				t.Errorf("got %d buckets in protobuf, want %d", got, want)236			}237			if got, want := int(*m.Histogram.SampleCount), len(allVars[i]); got != want {238				t.Errorf("got sample count %d, want %d", got, want)239			}240			if got, want := *m.Histogram.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 {241				t.Errorf("got sample sum %f, want %f", got, want)242			}243			wantCounts := getCumulativeCounts(allVars[i])244			for j, wantBound := range testBuckets {245				if j == len(testBuckets)-1 {246					break // No +Inf bucket in protobuf.247				}248				if gotBound := *m.Histogram.Bucket[j].UpperBound; gotBound != wantBound {249					t.Errorf("got bound %f, want %f", gotBound, wantBound)250				}251				if gotCount, wantCount := *m.Histogram.Bucket[j].CumulativeCount, wantCounts[j]; gotCount != wantCount {252					t.Errorf("got count %d, want %d", gotCount, wantCount)253				}254			}255		}256		return true257	}258	if err := quick.Check(it, nil); err != nil {259		t.Error(err)260	}261}262func getCumulativeCounts(vars []float64) []uint64 {263	counts := make([]uint64, len(testBuckets))264	for _, v := range vars {265		for i := len(testBuckets) - 1; i >= 0; i-- {266			if v > testBuckets[i] {267				break268			}269			counts[i]++270		}271	}272	return counts273}274func TestBuckets(t *testing.T) {275	got := LinearBuckets(-15, 5, 6)276	want := []float64{-15, -10, -5, 0, 5, 10}277	if !reflect.DeepEqual(got, want) {278		t.Errorf("linear buckets: got %v, want %v", got, want)279	}280	got = ExponentialBuckets(100, 1.2, 3)281	want = []float64{100, 120, 144}282	if !reflect.DeepEqual(got, want) {283		t.Errorf("exponential buckets: got %v, want %v", got, want)284	}285}286func TestHistogramAtomicObserve(t *testing.T) {287	var (288		quit = make(chan struct{})289		his  = NewHistogram(HistogramOpts{290			Buckets: []float64{0.5, 10, 20},291		})292	)293	defer func() { close(quit) }()294	observe := func() {295		for {296			select {297			case <-quit:298				return299			default:300				his.Observe(1)301			}302		}303	}304	go observe()305	go observe()306	go observe()307	for i := 0; i < 100; i++ {308		m := &dto.Metric{}309		if err := his.Write(m); err != nil {310			t.Fatal("unexpected error writing histogram:", err)311		}312		h := m.GetHistogram()313		if h.GetSampleCount() != uint64(h.GetSampleSum()) ||314			h.GetSampleCount() != h.GetBucket()[1].GetCumulativeCount() ||315			h.GetSampleCount() != h.GetBucket()[2].GetCumulativeCount() {316			t.Fatalf(317				"inconsistent counts in histogram: count=%d sum=%f buckets=[%d, %d]",318				h.GetSampleCount(), h.GetSampleSum(),319				h.GetBucket()[1].GetCumulativeCount(), h.GetBucket()[2].GetCumulativeCount(),320			)321		}322		runtime.Gosched()323	}324}...

Full Screen

Full Screen

tree_test.go

Source:tree_test.go Github

copy

Full Screen

...82			})83		})84	}85}86func TestTreeCount(t *testing.T) {87	cases := map[string]int{88		"":                       34,89		"/":                      34,90		"addresses":              5,91		"addresses/addresses.go": 1,92		"idset":                  3,93	}94	tree := testTree(fixData)95	for path, count := range cases {96		path, count := path, count // Capture range variables.97		t.Run(path, func(t *testing.T) {98			t.Parallel()99			var got int100			if tree.DoPath(path, node.Count(&got)); got != count {101				t.Errorf("got %d, want %d", got, count)102			}103		})104	}105}106func TestTreeDiskSize(t *testing.T) {107	cases := map[string]int64{108		"":                       93991,109		"/":                      93991,110		"/some/unknown/path":     0,111		"addresses/addresses.go": 2428,112		"idset":                  1288 + 4231,113	}114	tree := testTree(fixData)115	for path, size := range cases {116		path, size := path, size // Capture range variables.117		t.Run(path, func(t *testing.T) {118			t.Parallel()119			var got int64120			if tree.DoPath(path, node.DiskSize(&got)); got != size {121				t.Errorf("got %d, want %d", got, size)122			}123		})124	}125}126func TestTreeAdd(t *testing.T) {127	const finalCount = 61128	cases := map[string]struct{}{129		"addresses/cached_test.go": {},130		"notify.go":                {},131		"notify/notify.go":         {},132		"proxy/fuse/fuse.go":       {},133		"notify":                   {},134		"notify/":                  {},135		"/notify/":                 {},136		"/notify":                  {},137		"a/b/c/d/e/f/g/h/i/j":      {},138		"a/a/a/a/a/a/a/a/a/a/a":    {},139	}140	tree := testTree(fixData)141	// Run parallel test in group test since T object uses parallel results.142	t.Run("group", func(t *testing.T) {143		const funnySize = 0xD144		for path, _ := range cases {145			path := path // Capture range variable.146			t.Run(path, func(t *testing.T) {147				t.Parallel()148				tree.DoPath(path, node.Insert(node.NewEntry(funnySize, 0, 0)))149				tree.DoPath(path, func(_ node.Guard, n *node.Node) bool {150					if n.IsShadowed() {151						t.Fatalf("Lookup(%q) failed", path)152					}153					if size := n.Entry.File.Size; size != funnySize {154						t.Errorf("got %d, want %d", size, funnySize)155					}156					return true157				})158			})159		}160	})161	if count := tree.Count(); count != finalCount {162		t.Fatalf("got %d, want %d", count, finalCount)163	}164}165func TestTreeDel(t *testing.T) {166	const finalCount = 22167	cases := map[string]struct{}{168		"addresses/addresser.go": {},169		"addresses/":             {},170		"aliases":                {},171		"id.go":                  {},172		"/id.go":                 {},173		"nonexisting.go":         {},174		"/kite.go":               {},175	}176	tree := testTree(fixData)177	// Run parallel test in group test since T object uses parallel results.178	t.Run("group", func(t *testing.T) {179		for path, _ := range cases {180			path := path // Capture range variable.181			t.Run(path, func(t *testing.T) {182				t.Parallel()183				tree.DoPath(path, node.Delete())184			})185		}186	})187	if count := tree.Count(); count != finalCount {188		t.Fatalf("got %d, want %d", count, finalCount)189	}190}191func TestTreeForEach(t *testing.T) {192	want := []string{193		"",194		"addresses",195		"addresses/addresser.go",196		"addresses/addresses.go",197		"addresses/addresses_test.go",198		"addresses/cached.go",199		"aliases",200		"aliases/aliaser.go",201		"aliases/aliases.go",202		"aliases/aliases_test.go",...

Full Screen

Full Screen

Count

Using AI Code Generation

copy

Full Screen

1got g1 = new got();2g1.Count();3got g2 = new got();4g2.Count();5got g3 = new got();6g3.Count();7got g4 = new got();8g4.Count();9got g5 = new got();10g5.Count();11got g6 = new got();12g6.Count();13got g7 = new got();14g7.Count();15got g8 = new got();16g8.Count();17got g9 = new got();18g9.Count();19got g10 = new got();20g10.Count();21got g11 = new got();22g11.Count();23got g12 = new got();24g12.Count();25got g13 = new got();26g13.Count();27got g14 = new got();28g14.Count();29got g15 = new got();30g15.Count();31got g16 = new got();32g16.Count();33got g17 = new got();34g17.Count();35got g18 = new got();36g18.Count();37got g19 = new got();38g19.Count();39got g20 = new got();40g20.Count();41got g21 = new got();42g21.Count();43got g22 = new got();44g22.Count();45got g23 = new got();46g23.Count();47got g24 = new got();48g24.Count();

Full Screen

Full Screen

Count

Using AI Code Generation

copy

Full Screen

1import "fmt"2func main() {3  fmt.Println("Hello, World!")4}5import "fmt"6func main() {7  fmt.Println("Hello, World!")8}9import "fmt"10func main() {11  fmt.Println("Hello, World!")12}13import "fmt"14func main() {15  fmt.Println("Hello, World!")16}17import "fmt"18func main() {19  fmt.Println("Hello, World!")20}21import "fmt"22func main() {23  fmt.Println("Hello, World!")24}25import "fmt"26func main() {27  fmt.Println("Hello, World!")28}29import "fmt"30func main() {31  fmt.Println("Hello, World!")32}33import "fmt"34func main() {35  fmt.Println("Hello, World!")36}37import "fmt"38func main() {39  fmt.Println("Hello, World!")40}41import "fmt"42func main() {43  fmt.Println("Hello, World!")44}45import "fmt"46func main() {47  fmt.Println("Hello, World!")48}49import "fmt"50func main() {51  fmt.Println("Hello, World!")52}53import "fmt"54func main() {55  fmt.Println("Hello,

Full Screen

Full Screen

Count

Using AI Code Generation

copy

Full Screen

1import "fmt"2func main(){3  fmt.Println("Counting")4  for i:=0; i<10; i++{5    defer fmt.Println(i)6  }7  fmt.Println("Done")8}9import "fmt"10func main(){11  fmt.Println("Counting")12  for i:=0; i<10; i++{13    defer fmt.Println(i)14  }15  fmt.Println("Done")16}17import "fmt"18func main(){19  fmt.Println("Counting")20  for i:=0; i<10; i++{21    defer fmt.Println(i)22  }23  fmt.Println("Done")24}25import "fmt"26func main(){27  fmt.Println("Counting")28  for i:=0; i<10; i++{29    defer fmt.Println(i)30  }31  fmt.Println("Done")32}33import "fmt"34func main(){35  fmt.Println("Counting")36  for i:=0; i<10; i++{37    defer fmt.Println(i)38  }39  fmt.Println("Done")40}41import "fmt"42func main(){43  fmt.Println("Counting")44  for i:=0; i<10; i++{45    defer fmt.Println(i)46  }47  fmt.Println("Done")48}49import "fmt"50func main(){51  fmt.Println("Counting")52  for i:=0; i<10; i++{53    defer fmt.Println(i)54  }55  fmt.Println("Done")56}57import "fmt"58func main(){59  fmt.Println("Counting")60  for i:=0; i<10; i++{61    defer fmt.Println(i)62  }63  fmt.Println("Done")64}

Full Screen

Full Screen

Count

Using AI Code Generation

copy

Full Screen

1import (2func main() {3    fmt.Println(got.Count(1, 2))4}5import (6func main() {7    fmt.Println(got.Count(1, 2))8}9import (10func main() {11    fmt.Println(got.Count(1, 2))12}13import (14func main() {15    fmt.Println(got.Count(1, 2))16}17import (18func main() {19    fmt.Println(got.Count(1, 2))20}21import (22func main() {23    fmt.Println(got.Count(1, 2))24}25import (26func main() {27    fmt.Println(got.Count(1, 2))28}29import (30func main() {31    fmt.Println(got.Count(1, 2))32}33import (34func main() {35    fmt.Println(got.Count(1, 2))36}37import (

Full Screen

Full Screen

Count

Using AI Code Generation

copy

Full Screen

1import (2func main() {3    fmt.Println(got.Count("hello world"))4}5import (6func main() {7    fmt.Println(got.Count("hello world"))8}

Full Screen

Full Screen

Count

Using AI Code Generation

copy

Full Screen

1import "fmt"2func main() {3    fmt.Println("Hello, World!")4    fmt.Println("Count of numbers is ", Count(5, 6))5}6import "fmt"7func Count(x, y int) int {8}9import "fmt"10func Count(x, y int) int {11}

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful