...

Source file src/golang.conradwood.net/go-easyops/cache/cachingResolver.go

Documentation: golang.conradwood.net/go-easyops/cache

     1  package cache
     2  
     3  import (
     4  	"context"
     5  	"sync"
     6  	"time"
     7  
     8  	"golang.conradwood.net/go-easyops/prometheus"
     9  )
    10  
    11  var (
    12  	asyncLookups = prometheus.NewGaugeVec(
    13  		prometheus.GaugeOpts{
    14  			Name: "goeasyops_cache_async_lookups",
    15  			Help: "V=1 UNIT=ops DESC=number of looks executed asynchronously",
    16  		},
    17  		[]string{"cachename"},
    18  	)
    19  )
    20  
    21  func init() {
    22  	prometheus.MustRegister(asyncLookups)
    23  }
    24  
    25  type CachingResolver interface {
    26  	Retrieve(key string, fr func(string) (interface{}, error)) (interface{}, error)
    27  	RetrieveContext(ctx context.Context, key string, fr func(context.Context, string) (interface{}, error)) (interface{}, error)
    28  	SetRefreshAfter(time.Duration)
    29  	SetAsyncRetriever(fr func(string) (interface{}, error))
    30  	Evict(key string)
    31  	Clear()
    32  	Keys() []string
    33  }
    34  
    35  type cachingResolver struct {
    36  	/*
    37  		if true, this will cache errors occured during retrieval and reassert them when the lookup
    38  		is repeated
    39  	*/
    40  	CacheErrors bool
    41  	/*
    42  		if true, this will cache nil results
    43  	*/
    44  	CacheNil bool
    45  	/*
    46  				refresh entries. There is a "hard eviction" noce the lifetime of an object expires. Normally, an object is retrieved
    47  				only when it is requested but no longer exists in the cache.
    48  				However an object might exist in the cache but is "close to" its lifetime. it makes sense to then serve from cache
    49  				and refresh in the background (assuming it will be requested soon again).
    50  		                if this is set, an existing object, older than this will be refreshed in the background after retrieval
    51  	*/
    52  	refreshAfter time.Duration
    53  	/*
    54  		if non-nil this refreshes Errors at a different speed than non-errors
    55  	*/
    56  	refreshErrAfter time.Duration
    57  	// the asynchronous retrieval requires a different codepath, for example, a context might need to be created etc.
    58  	// if this is nil no async retrieval is done, otherwise this function will be used
    59  	asyncRetriever func(string) (interface{}, error)
    60  
    61  	// cache
    62  	gccache      *Cache // full of cacheEntry Objects
    63  	retrieveLock sync.Mutex
    64  }
    65  
    66  // never exposed outside package. instead "object" is the thing of interest to the user
    67  type cacheEntry2 struct {
    68  	object  interface{}
    69  	err     error
    70  	created time.Time
    71  }
    72  
    73  func NewResolvingCache(name string, lifetime time.Duration, maxLimitEntries int) CachingResolver {
    74  	res := &cachingResolver{
    75  		CacheErrors: true,
    76  		CacheNil:    true,
    77  	}
    78  	res.gccache = New(name, lifetime, maxLimitEntries)
    79  	res.refreshAfter = lifetime - (lifetime / 3)
    80  	res.refreshErrAfter = time.Duration(45) * time.Second
    81  	return res
    82  }
    83  
    84  /*
    85  retrieves object by key from cache or via retrieval function.
    86  caches result for next time the same key is being looked up
    87  This function _does not_ take a context as parameter.
    88  the resolver might retrieve a value asynchronously whilst synchronously serving from cache
    89  if so, the context might get cancelled before the async retrieval has completed.
    90  ** Example: **
    91  
    92  	o,err :=c.Retrieve("foo", func(k string) (interface{}, error) {
    93  			return "bar",nil
    94  		})
    95  
    96  ** Example With Async Retrieval: **
    97  c.SetAsyncRetriever(get_by_key)
    98  o,err :=c.Retrieve("foo",get_by_key)
    99  
   100  	func get_by_key(key string) (interface{}, error) {
   101  	 return "bar",nil
   102  
   103  "
   104  */
   105  func (cr *cachingResolver) Retrieve(key string, fr func(string) (interface{}, error)) (interface{}, error) {
   106  	ctx := context.Background()
   107  	return cr.RetrieveContext(ctx, key, func(context.Context, string) (interface{}, error) {
   108  		return fr(key)
   109  	})
   110  }
   111  func (cr *cachingResolver) RetrieveContext(ctx context.Context, key string, fr func(context.Context, string) (interface{}, error)) (interface{}, error) {
   112  	cname := cr.gccache.name
   113  	label := prometheus.Labels{"cachename": cname}
   114  	usage.With(label).Inc()
   115  	started := time.Now()
   116  	var ce *cacheEntry2
   117  	o := cr.gccache.Get(key)
   118  	if o != nil {
   119  		ce = o.(*cacheEntry2)
   120  		if cr.asyncRetriever != nil {
   121  			// we got it in cache. do we need to refresh async?
   122  			if cr.refreshAfter != 0 && time.Since(ce.created) > cr.refreshAfter {
   123  				go cr.refresh(key)
   124  			} else if cr.refreshErrAfter != 0 && ce.err != nil && time.Since(ce.created) > cr.refreshErrAfter {
   125  				go cr.refresh(key)
   126  			}
   127  		}
   128  	}
   129  	if ce == nil {
   130  		// TODO: make this WAY more granular. Goal: do not lookup same key simultanously, but allow different keys to be retrieved simultaneously
   131  		cr.retrieveLock.Lock()
   132  		defer cr.retrieveLock.Unlock()
   133  		o := cr.gccache.Get(key)
   134  		if o != nil {
   135  			ce = o.(*cacheEntry2)
   136  		} else {
   137  			efficiency.With(prometheus.Labels{"cachename": cname, "result": "miss"}).Inc()
   138  			o, err := fr(ctx, key)
   139  			ce = &cacheEntry2{object: o, err: err, created: time.Now()}
   140  			cr.gccache.Put(key, ce)
   141  		}
   142  	} else {
   143  		efficiency.With(prometheus.Labels{"cachename": cname, "result": "hit"}).Inc()
   144  	}
   145  	if ce.err != nil {
   146  		return nil, ce.err
   147  	}
   148  	performance.With(label).Observe(time.Since(started).Seconds())
   149  	return ce.object, nil
   150  }
   151  
   152  func (cr *cachingResolver) refresh(key string) {
   153  	cr.retrieveLock.Lock()
   154  	defer cr.retrieveLock.Unlock()
   155  	fr := cr.asyncRetriever
   156  	if fr == nil {
   157  		return
   158  	}
   159  	o, err := fr(key)
   160  
   161  	// if we have an error retrieving, put a good object in cache, do not overwrite with error
   162  	if err != nil {
   163  		o := cr.gccache.Get(key)
   164  		if o != nil {
   165  			return
   166  		}
   167  	}
   168  	ce := &cacheEntry2{object: o, err: err, created: time.Now()}
   169  	cr.gccache.Put(key, ce)
   170  	cname := cr.gccache.name
   171  	label := prometheus.Labels{"cachename": cname}
   172  	asyncLookups.With(label).Inc()
   173  }
   174  func (cr *cachingResolver) SetRefreshAfter(d time.Duration) {
   175  	cr.refreshAfter = d
   176  }
   177  func (cr *cachingResolver) SetAsyncRetriever(fr func(string) (interface{}, error)) {
   178  	cr.asyncRetriever = fr
   179  }
   180  
   181  func (cr *cachingResolver) Evict(key string) {
   182  	cr.gccache.Evict(key)
   183  }
   184  func (cr *cachingResolver) Clear() {
   185  	cr.gccache.Clear()
   186  }
   187  func (cr *cachingResolver) Keys() []string {
   188  	return cr.gccache.Keys()
   189  }
   190  

View as plain text