diff --git a/carbonapipb/carbonapi.pb.go b/carbonapipb/carbonapi.pb.go index 0ffd56427..524bbf7de 100644 --- a/carbonapipb/carbonapi.pb.go +++ b/carbonapipb/carbonapi.pb.go @@ -23,11 +23,13 @@ type AccessLogDetails struct { SendGlobs bool `json:"send_globs,omitempty"` From int64 `json:"from,omitempty"` Until int64 `json:"until,omitempty"` + MaxDataPoints int64 `json:"max_data_points,omitempty"` Tz string `json:"tz,omitempty"` FromRaw string `json:"from_raw,omitempty"` UntilRaw string `json:"until_raw,omitempty"` URI string `json:"uri,omitempty"` FromCache bool `json:"from_cache"` + UsedBackendCache bool `json:"used_backend_cache"` ZipperRequests int64 `json:"zipper_requests,omitempty"` TotalMetricsCount int64 `json:"total_metrics_count,omitempty"` RequestHeaders map[string]string `json:"request_headers"` diff --git a/cmd/carbonapi/config/config.go b/cmd/carbonapi/config/config.go index c42bc6512..f8512eeae 100644 --- a/cmd/carbonapi/config/config.go +++ b/cmd/carbonapi/config/config.go @@ -53,7 +53,8 @@ type ConfigType struct { Listen string `mapstructure:"listen"` Buckets int `mapstructure:"buckets"` Concurency int `mapstructure:"concurency"` - Cache CacheConfig `mapstructure:"cache"` + ResponseCacheConfig CacheConfig `mapstructure:"cache"` + BackendCacheConfig CacheConfig `mapstructure:"backendCache"` Cpus int `mapstructure:"cpus"` TimezoneString string `mapstructure:"tz"` UnicodeRangeTables []string `mapstructure:"unicodeRangeTables"` @@ -78,8 +79,8 @@ type ConfigType struct { Expvar ExpvarConfig `mapstructure:"expvar"` NotFoundStatusCode int `mapstructure:"notFoundStatusCode"` - QueryCache cache.BytesCache `mapstructure:"-" json:"-"` - FindCache cache.BytesCache `mapstructure:"-" json:"-"` + ResponseCache cache.BytesCache `mapstructure:"-" json:"-"` + BackendCache cache.BytesCache `mapstructure:"-" json:"-"` DefaultTimeZone *time.Location `mapstructure:"-" json:"-"` @@ -106,10 +107,14 @@ var Config = ConfigType{ Buckets: 10, Concurency: 1000, MaxBatchSize: 100, - Cache: CacheConfig{ + ResponseCacheConfig: CacheConfig{ Type: "mem", DefaultTimeoutSec: 60, }, + BackendCacheConfig: CacheConfig{ + Type: "null", + DefaultTimeoutSec: 0, + }, TimezoneString: "", Graphite: GraphiteConfig{ Pattern: "{prefix}.{fqdn}", @@ -121,8 +126,8 @@ var Config = ConfigType{ IdleConnections: 10, PidFile: "", - QueryCache: cache.NullCache{}, - FindCache: cache.NullCache{}, + ResponseCache: cache.NullCache{}, + BackendCache: cache.NullCache{}, DefaultTimeZone: time.Local, Logger: []zapwriter.Config{DefaultLoggerConfig}, diff --git a/cmd/carbonapi/config/init.go b/cmd/carbonapi/config/init.go index 6a337f0c8..18489722b 100644 --- a/cmd/carbonapi/config/init.go +++ b/cmd/carbonapi/config/init.go @@ -29,7 +29,8 @@ import ( var graphTemplates map[string]png.PictureParams func SetUpConfig(logger *zap.Logger, BuildVersion string) { - Config.Cache.MemcachedServers = viper.GetStringSlice("cache.memcachedServers") + Config.ResponseCacheConfig.MemcachedServers = viper.GetStringSlice("cache.memcachedServers") + Config.BackendCacheConfig.MemcachedServers = viper.GetStringSlice("backendCache.memcachedServers") if n := viper.GetString("logger.logger"); n != "" { Config.Logger[0].Logger = n } @@ -155,28 +156,8 @@ func SetUpConfig(logger *zap.Logger, BuildVersion string) { Config.Limiter = limiter.NewSimpleLimiter(Config.Concurency) - switch Config.Cache.Type { - case "memcache": - if len(Config.Cache.MemcachedServers) == 0 { - logger.Fatal("memcache cache requested but no memcache servers provided") - } - - logger.Info("memcached configured", - zap.Strings("servers", Config.Cache.MemcachedServers), - ) - Config.QueryCache = cache.NewMemcached("capi", Config.Cache.MemcachedServers...) - case "mem": - Config.QueryCache = cache.NewExpireCache(uint64(Config.Cache.Size * 1024 * 1024)) - case "null": - // defaults - Config.QueryCache = cache.NullCache{} - Config.FindCache = cache.NullCache{} - default: - logger.Error("unknown cache type", - zap.String("cache_type", Config.Cache.Type), - zap.Strings("known_cache_types", []string{"null", "mem", "memcache"}), - ) - } + Config.ResponseCache = createCache(logger, "cache", Config.ResponseCacheConfig) + Config.BackendCache = createCache(logger, "backendCache", Config.BackendCacheConfig) if Config.TimezoneString != "" { fields := strings.Split(Config.TimezoneString, ",") @@ -266,6 +247,32 @@ func SetUpConfig(logger *zap.Logger, BuildVersion string) { } } +func createCache(logger *zap.Logger, cacheName string, cacheConfig CacheConfig) cache.BytesCache { + switch cacheConfig.Type { + case "memcache": + if len(cacheConfig.MemcachedServers) == 0 { + logger.Fatal(cacheName + ": memcache cache requested but no memcache servers provided") + } + + logger.Info(cacheName+": memcached configured", + zap.Strings("servers", cacheConfig.MemcachedServers), + ) + return cache.NewMemcached("capi-"+cacheName, cacheConfig.MemcachedServers...) + case "mem": + logger.Info(cacheName + ": in-memory cache configured") + return cache.NewExpireCache(uint64(cacheConfig.Size * 1024 * 1024)) + case "null": + // defaults + return cache.NullCache{} + default: + logger.Error(cacheName+": unknown cache type", + zap.String("cache_type", cacheConfig.Type), + zap.Strings("known_cache_types", []string{"null", "mem", "memcache"}), + ) + return nil + } +} + func SetUpViper(logger *zap.Logger, configPath *string, viperPrefix string) { if *configPath != "" { b, err := ioutil.ReadFile(*configPath) diff --git a/cmd/carbonapi/graphite_metrics.go b/cmd/carbonapi/graphite_metrics.go index b68cedec7..bbe2e31aa 100644 --- a/cmd/carbonapi/graphite_metrics.go +++ b/cmd/carbonapi/graphite_metrics.go @@ -48,16 +48,14 @@ func setupGraphiteMetrics(logger *zap.Logger) { graphite.Register(fmt.Sprintf("%s.request_cache_hits", pattern), http.ApiMetrics.RequestCacheHits) graphite.Register(fmt.Sprintf("%s.request_cache_misses", pattern), http.ApiMetrics.RequestCacheMisses) graphite.Register(fmt.Sprintf("%s.request_cache_overhead_ns", pattern), http.ApiMetrics.RenderCacheOverheadNS) + graphite.Register(fmt.Sprintf("%s.backend_cache_hits", pattern), http.ApiMetrics.BackendCacheHits) + graphite.Register(fmt.Sprintf("%s.backend_cache_misses", pattern), http.ApiMetrics.BackendCacheMisses) for i := 0; i <= config.Config.Buckets; i++ { graphite.Register(fmt.Sprintf("%s.requests_in_%dms_to_%dms", pattern, i*100, (i+1)*100), http.BucketEntry(i)) } graphite.Register(fmt.Sprintf("%s.find_requests", pattern), http.ApiMetrics.FindRequests) - graphite.Register(fmt.Sprintf("%s.find_cache_hits", pattern), http.ApiMetrics.FindCacheHits) - graphite.Register(fmt.Sprintf("%s.find_cache_misses", pattern), http.ApiMetrics.FindCacheMisses) - graphite.Register(fmt.Sprintf("%s.find_cache_overhead_ns", pattern), http.ApiMetrics.FindCacheOverheadNS) - graphite.Register(fmt.Sprintf("%s.render_requests", pattern), http.ApiMetrics.RenderRequests) if http.ApiMetrics.MemcacheTimeouts != nil { diff --git a/cmd/carbonapi/http/metrics.go b/cmd/carbonapi/http/metrics.go index cb2149bd6..80ef2a42c 100644 --- a/cmd/carbonapi/http/metrics.go +++ b/cmd/carbonapi/http/metrics.go @@ -16,13 +16,12 @@ var ApiMetrics = struct { RenderRequests *expvar.Int RequestCacheHits *expvar.Int RequestCacheMisses *expvar.Int + BackendCacheHits *expvar.Int + BackendCacheMisses *expvar.Int RenderCacheOverheadNS *expvar.Int RequestBuckets expvar.Func - FindRequests *expvar.Int - FindCacheHits *expvar.Int - FindCacheMisses *expvar.Int - FindCacheOverheadNS *expvar.Int + FindRequests *expvar.Int MemcacheTimeouts expvar.Func @@ -34,13 +33,11 @@ var ApiMetrics = struct { RenderRequests: expvar.NewInt("render_requests"), RequestCacheHits: expvar.NewInt("request_cache_hits"), RequestCacheMisses: expvar.NewInt("request_cache_misses"), + BackendCacheHits: expvar.NewInt("backend_cache_hits"), + BackendCacheMisses: expvar.NewInt("backend_cache_misses"), RenderCacheOverheadNS: expvar.NewInt("render_cache_overhead_ns"), FindRequests: expvar.NewInt("find_requests"), - - FindCacheHits: expvar.NewInt("find_cache_hits"), - FindCacheMisses: expvar.NewInt("find_cache_misses"), - FindCacheOverheadNS: expvar.NewInt("find_cache_overhead_ns"), } var ZipperMetrics = struct { @@ -117,9 +114,9 @@ func RenderTimeBuckets() interface{} { } func SetupMetrics(logger *zap.Logger) { - switch config.Config.Cache.Type { + switch config.Config.ResponseCacheConfig.Type { case "memcache": - mcache := config.Config.QueryCache.(*cache.MemcachedCache) + mcache := config.Config.ResponseCache.(*cache.MemcachedCache) ApiMetrics.MemcacheTimeouts = expvar.Func(func() interface{} { return mcache.Timeouts() @@ -127,7 +124,7 @@ func SetupMetrics(logger *zap.Logger) { expvar.Publish("memcache_timeouts", ApiMetrics.MemcacheTimeouts) case "mem": - qcache := config.Config.QueryCache.(*cache.ExpireCache) + qcache := config.Config.ResponseCache.(*cache.ExpireCache) ApiMetrics.CacheSize = expvar.Func(func() interface{} { return qcache.Size() diff --git a/cmd/carbonapi/http/render_handler.go b/cmd/carbonapi/http/render_handler.go index 26f8ee0f0..e54b186a8 100644 --- a/cmd/carbonapi/http/render_handler.go +++ b/cmd/carbonapi/http/render_handler.go @@ -1,6 +1,9 @@ package http import ( + "bytes" + "encoding/gob" + "errors" "io/ioutil" "net/http" "strconv" @@ -41,9 +44,7 @@ func setError(w http.ResponseWriter, accessLogDetails *carbonapipb.AccessLogDeta accessLogDetails.HTTPCode = int32(status) } -func getCacheTimeout(logger *zap.Logger, r *http.Request) int32 { - cacheTimeout := config.Config.Cache.DefaultTimeoutSec - +func getCacheTimeout(logger *zap.Logger, r *http.Request, defaultTimeout int32) int32 { if tstr := r.FormValue("cacheTimeout"); tstr != "" { t, err := strconv.Atoi(tstr) if err != nil { @@ -52,11 +53,11 @@ func getCacheTimeout(logger *zap.Logger, r *http.Request) int32 { zap.Error(err), ) } else { - cacheTimeout = int32(t) + return int32(t) } } - return cacheTimeout + return defaultTimeout } func renderHandler(w http.ResponseWriter, r *http.Request) { @@ -109,6 +110,7 @@ func renderHandler(w http.ResponseWriter, r *http.Request) { from := r.FormValue("from") until := r.FormValue("until") template := r.FormValue("template") + maxDataPoints, _ := strconv.ParseInt(r.FormValue("maxDataPoints"), 10, 64) useCache := !parser.TruthyBool(r.FormValue("noCache")) noNullPoints := parser.TruthyBool(r.FormValue("noNullPoints")) // status will be checked later after we'll setup everything else @@ -142,11 +144,12 @@ func renderHandler(w http.ResponseWriter, r *http.Request) { return } - cacheTimeout := getCacheTimeout(logger, r) + responseCacheTimeout := getCacheTimeout(logger, r, config.Config.ResponseCacheConfig.DefaultTimeoutSec) + backendCacheTimeout := getCacheTimeout(logger, r, config.Config.BackendCacheConfig.DefaultTimeoutSec) cleanupParams(r) - cacheKey := r.Form.Encode() + responseCacheKey := r.Form.Encode() // normalize from and until values qtz := r.FormValue("tz") @@ -159,7 +162,7 @@ func renderHandler(w http.ResponseWriter, r *http.Request) { accessLogDetails.UntilRaw = until accessLogDetails.Until = until32 accessLogDetails.Tz = qtz - accessLogDetails.CacheTimeout = cacheTimeout + accessLogDetails.CacheTimeout = responseCacheTimeout accessLogDetails.Format = formatRaw accessLogDetails.Targets = targets @@ -198,7 +201,7 @@ func renderHandler(w http.ResponseWriter, r *http.Request) { if useCache { tc := time.Now() - response, err := config.Config.QueryCache.Get(cacheKey) + response, err := config.Config.ResponseCache.Get(responseCacheKey) td := time.Since(tc).Nanoseconds() ApiMetrics.RenderCacheOverheadNS.Add(td) @@ -220,46 +223,58 @@ func renderHandler(w http.ResponseWriter, r *http.Request) { return } - errors := make(map[string]merry.Error) - results := make([]*types.MetricData, 0) - values := make(map[parser.MetricRequest][]*types.MetricData) - defer func() { if r := recover(); r != nil { logger.Error("panic during eval:", - zap.String("cache_key", cacheKey), + zap.String("cache_key", responseCacheKey), zap.Any("reason", r), zap.Stack("stack"), ) } }() - for _, target := range targets { - exp, e, err := parser.ParseExpr(target) - if err != nil || e != "" { - msg := buildParseErrorString(target, e, err) - setError(w, accessLogDetails, msg, http.StatusBadRequest) - logAsError = true - return - } + errors := make(map[string]merry.Error) + backendCacheKey := backendCacheComputeKey(from, until, targets) + results, err := backendCacheFetchResults(logger, useCache, backendCacheKey, accessLogDetails) - ApiMetrics.RenderRequests.Add(1) + if err != nil { + ApiMetrics.BackendCacheMisses.Add(1) + + results = make([]*types.MetricData, 0) + values := make(map[parser.MetricRequest][]*types.MetricData) + + for _, target := range targets { + exp, e, err := parser.ParseExpr(target) + if err != nil || e != "" { + msg := buildParseErrorString(target, e, err) + setError(w, accessLogDetails, msg, http.StatusBadRequest) + logAsError = true + return + } - result, err := expr.FetchAndEvalExp(exp, from32, until32, values) - if err != nil { - errors[target] = merry.Wrap(err) + ApiMetrics.RenderRequests.Add(1) + + result, err := expr.FetchAndEvalExp(exp, from32, until32, values) + if err != nil { + errors[target] = merry.Wrap(err) + } + + results = append(results, result...) + } + + for mFetch := range values { + expr.SortMetrics(values[mFetch], mFetch) } - results = append(results, result...) + if len(errors) == 0 { + backendCacheStoreResults(logger, backendCacheKey, results, backendCacheTimeout) + } } size := 0 for _, result := range results { size += result.Size() } - for mFetch := range values { - expr.SortMetrics(values[mFetch], mFetch) - } var body []byte @@ -293,8 +308,9 @@ func renderHandler(w http.ResponseWriter, r *http.Request) { switch format { case jsonFormat: - if maxDataPoints, _ := strconv.Atoi(r.FormValue("maxDataPoints")); maxDataPoints != 0 { + if maxDataPoints != 0 { types.ConsolidateJSON(maxDataPoints, results) + accessLogDetails.MaxDataPoints = maxDataPoints } body = types.MarshalJSON(results, timestampMultiplier, noNullPoints) @@ -332,7 +348,7 @@ func renderHandler(w http.ResponseWriter, r *http.Request) { if len(results) != 0 { tc := time.Now() - config.Config.QueryCache.Set(cacheKey, body, cacheTimeout) + config.Config.ResponseCache.Set(responseCacheKey, body, responseCacheTimeout) td := time.Since(tc).Nanoseconds() ApiMetrics.RenderCacheOverheadNS.Add(td) } @@ -340,3 +356,54 @@ func renderHandler(w http.ResponseWriter, r *http.Request) { gotErrors := len(errors) > 0 accessLogDetails.HaveNonFatalErrors = gotErrors } + +func backendCacheComputeKey(from, until string, targets []string) string { + var backendCacheKey bytes.Buffer + backendCacheKey.WriteString("from:") + backendCacheKey.WriteString(from) + backendCacheKey.WriteString(" until:") + backendCacheKey.WriteString(until) + backendCacheKey.WriteString(" targets:") + backendCacheKey.WriteString(strings.Join(targets, ",")) + return backendCacheKey.String() +} + +func backendCacheFetchResults(logger *zap.Logger, useCache bool, backendCacheKey string, accessLogDetails *carbonapipb.AccessLogDetails) ([]*types.MetricData, error) { + if !useCache { + return nil, errors.New("useCache is false") + } + + backendCacheResults, err := config.Config.BackendCache.Get(backendCacheKey) + + if err != nil { + return nil, err + } + + var results []*types.MetricData + cacheDecodingBuf := bytes.NewBuffer(backendCacheResults) + dec := gob.NewDecoder(cacheDecodingBuf) + err = dec.Decode(&results) + + if err != nil { + logger.Error("Error decoding cached backend results") + return nil, err + } + + accessLogDetails.UsedBackendCache = true + ApiMetrics.BackendCacheHits.Add(1) + + return results, nil +} + +func backendCacheStoreResults(logger *zap.Logger, backendCacheKey string, results []*types.MetricData, backendCacheTimeout int32) { + var serializedResults bytes.Buffer + enc := gob.NewEncoder(&serializedResults) + err := enc.Encode(results) + + if err != nil { + logger.Error("Error encoding backend results for caching") + return + } + + config.Config.BackendCache.Set(backendCacheKey, serializedResults.Bytes(), backendCacheTimeout) +} diff --git a/doc/configuration.md b/doc/configuration.md index 618f0624e..5897b7eaf 100644 --- a/doc/configuration.md +++ b/doc/configuration.md @@ -202,7 +202,13 @@ unicodeRangeTables: *** ## cache -Specify what storage to use for metric cache and path cache. +Specify what storage to use for response cache. This cache stores the final +carbonapi response right before sending it to the client. A cache hit to this +cache avoids almost all computations, including rendering images etc. On the +other hand, a request will cause a cache hit only if a previous request with +exactly the same response format and with same maxDataPoints param populated the +cache. Grafana sets maxDataPoints depending on client screen width, reducing the +hit ratio for this cache. Supported cache types: - `mem` - will use integrated in-memory cache. Not distributed. Fast. @@ -212,7 +218,6 @@ Supported cache types: Extra options: - `size_mb` - specify max size of cache, in MiB - `defaultTimeoutSec` - specify default cache duration. Identical to `DEFAULT_CACHE_DURATION` in graphite-web - ### Example ```yaml cache: @@ -223,7 +228,24 @@ cache: - "127.0.0.1:1234" - "127.0.0.2:1235" ``` - +## backendCache +Specify what storage to use for backend cache. This cache stores the responses +from the backends. It should have more cache hits than the response cache since +the response format and the maxDataPoints paramter are not part of the cache +key, but results from cache still need to be postprocessed (e.g. serialized to +desired response format). + +Supports same options as the response cache. +### Example +```yaml +backendCache: + type: "memcache" + size_mb: 0 + defaultTimeoutSec: 60 + memcachedServers: + - "127.0.0.1:1234" + - "127.0.0.2:1235" +``` *** ## cpus @@ -698,4 +720,4 @@ Default: 600 (10 minutes) ### Example ```yaml expireDelaySec: 10 -``` \ No newline at end of file +``` diff --git a/expr/types/types.go b/expr/types/types.go index 0de454403..2528fba83 100644 --- a/expr/types/types.go +++ b/expr/types/types.go @@ -59,7 +59,7 @@ func MarshalCSV(results []*MetricData) []byte { } // ConsolidateJSON consolidates values to maxDataPoints size -func ConsolidateJSON(maxDataPoints int, results []*MetricData) { +func ConsolidateJSON(maxDataPoints int64, results []*MetricData) { if len(results) == 0 { return }