...

Source file src/golang.conradwood.net/go-easyops/cache/cache.go

Documentation: golang.conradwood.net/go-easyops/cache

     1  /*
     2  package cache provides a basic, safe in-memory cache. This is currently per-instance.
     3  a patch to make it per-cluster is welcome. (perhaps using redis?)
     4  */
     5  package cache
     6  
     7  // a basic local key->value cache with expiry
     8  // the actual implementation of the cache is hidden
     9  // to the user. This is on purpose, so to enable us
    10  // to replace the backend with a distributed cache
    11  // like redis if that becomes benefitial
    12  
    13  import (
    14  	"fmt"
    15  	"golang.conradwood.net/go-easyops/prometheus"
    16  	"math/rand"
    17  	"sync"
    18  	"time"
    19  )
    20  
    21  var (
    22  	randsrc_lock sync.Mutex
    23  	randsrc      = rand.New(rand.NewSource(time.Now().UnixNano()))
    24  	cacheLock    sync.Mutex
    25  	performance  = prometheus.NewSummaryVec(
    26  		prometheus.SummaryOpts{
    27  			Name:       "goeasyops_cache_performance",
    28  			Help:       "V=1 UNIT=s DESC=Performance of cache lookups",
    29  			Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
    30  			MaxAge:     time.Hour,
    31  		},
    32  		[]string{"cachename"},
    33  	)
    34  	size = prometheus.NewGaugeVec(
    35  		prometheus.GaugeOpts{
    36  			Name: "goeasyops_cache_size",
    37  			Help: "V=1 UNIT=ops DESC=size of cache",
    38  		},
    39  		[]string{"cachename", "et"},
    40  	)
    41  	efficiency = prometheus.NewGaugeVec(
    42  		prometheus.GaugeOpts{
    43  			Name: "goeasyops_cache_efficiency",
    44  			Help: "V=1 UNIT=ops DESC=hit and miss counters of cache",
    45  		},
    46  		[]string{"cachename", "result"},
    47  	)
    48  	usage = prometheus.NewGaugeVec(
    49  		prometheus.GaugeOpts{
    50  			Name: "goeasyops_cache_lookups",
    51  			Help: "V=1 UNIT=ops DESC=size of cache",
    52  		},
    53  		[]string{"cachename"},
    54  	)
    55  	caches []*Cache
    56  )
    57  
    58  type Cache struct {
    59  	name        string
    60  	mcache      []*cacheEntry
    61  	mlock       sync.Mutex
    62  	MaxLifetime time.Duration
    63  }
    64  
    65  type cacheEntry struct {
    66  	free     bool
    67  	created  time.Time
    68  	accessed time.Time
    69  	expiry   *time.Time
    70  	key      string
    71  	value    interface{}
    72  }
    73  
    74  func init() {
    75  	prometheus.MustRegister(efficiency, size, performance, usage)
    76  }
    77  
    78  // clear the entire cache with this name and return cache objects (which cleared)
    79  func Clear(cacheName string) ([]*Cache, error) {
    80  	fmt.Printf("[go-easyops] Clearing cache \"%s\"\n", cacheName)
    81  	cacheLock.Lock()
    82  	defer cacheLock.Unlock()
    83  	var res []*Cache
    84  	for _, c := range caches {
    85  		if cacheName != "" && c.name != cacheName {
    86  			continue
    87  		}
    88  		c.mlock.Lock()
    89  		c.mcache = make([]*cacheEntry, 0)
    90  		c.mlock.Unlock()
    91  		res = append(res, c)
    92  	}
    93  	return res, nil
    94  }
    95  
    96  // create a new cache. "name" must be a prometheus metric compatible name and unique throughout
    97  // good practice: prefix it with servicepackagename. for example:
    98  //
    99  //	servicename: "lbproxy.LBProxyService"
   100  //
   101  // -> cachename: "lbproxy_tokencache"
   102  func New(name string, lifetime time.Duration, maxSizeInMB int) *Cache {
   103  	res := &Cache{name: name, MaxLifetime: lifetime}
   104  	res.setCacheGauge(0)
   105  	go res.setCacheGaugeLoop()
   106  	cacheLock.Lock()
   107  	caches = append(caches, res)
   108  	cacheLock.Unlock()
   109  	return res
   110  }
   111  
   112  // evict (aka remove) a specific key from this cache
   113  func (c *Cache) Evict(key string) {
   114  	c.mlock.Lock()
   115  	for _, x := range c.mcache {
   116  		if x.key == key {
   117  			x.free = true
   118  		}
   119  	}
   120  	c.mlock.Unlock()
   121  }
   122  
   123  // clear this cache (that is: remove all entries in it)
   124  func (c *Cache) Clear() {
   125  	c.mlock.Lock()
   126  	c.mcache = make([]*cacheEntry, 0)
   127  	c.mlock.Unlock()
   128  	c.setCacheGauge(0)
   129  }
   130  
   131  // put something into this cache, with a specific expiry time
   132  func (c *Cache) PutWithExpiry(key string, value interface{}, expiry *time.Time) {
   133  	c.putRaw(key, value, expiry)
   134  }
   135  
   136  // put something into this cache
   137  func (c *Cache) Put(key string, value interface{}) {
   138  	c.putRaw(key, value, nil)
   139  }
   140  
   141  func (c *Cache) putRaw(key string, value interface{}, expiry *time.Time) {
   142  	c.mlock.Lock()
   143  	defer c.mlock.Unlock()
   144  	now := time.Now()
   145  	cutOff := time.Now().Add(0 - c.MaxLifetime)
   146  	for _, x := range c.mcache {
   147  		if x.key == key {
   148  			x.created = time.Now()
   149  			x.value = value
   150  			x.accessed = x.created
   151  			x.expiry = expiry
   152  			x.free = false
   153  			return
   154  		}
   155  		if (!x.free) && x.created.Before(cutOff) {
   156  			x.free = true
   157  			continue
   158  		}
   159  		if (x.expiry != nil) && (x.expiry.After(now)) {
   160  			x.free = true
   161  			continue
   162  		}
   163  
   164  	}
   165  	for _, x := range c.mcache {
   166  		if x.free {
   167  			x.key = key
   168  			x.created = time.Now()
   169  			x.value = value
   170  			x.expiry = expiry
   171  			x.free = false
   172  			return
   173  		}
   174  	}
   175  	mc := &cacheEntry{free: false, created: time.Now(), expiry: expiry, key: key, value: value}
   176  	mc.accessed = mc.created
   177  	c.mcache = append(c.mcache, mc)
   178  }
   179  
   180  // get something from the cache (specified by key)
   181  func (c *Cache) Get(key string) interface{} {
   182  	label := prometheus.Labels{"cachename": c.name}
   183  	usage.With(label).Inc()
   184  	now := time.Now()
   185  	c.mlock.Lock()
   186  	defer c.mlock.Unlock()
   187  	cutOff := now.Add(0 - c.MaxLifetime)
   188  	for _, x := range c.mcache {
   189  		if (!x.free) && (x.key == key) {
   190  			if x.created.Before(cutOff) {
   191  				x.free = true
   192  				continue
   193  			}
   194  			if (x.expiry != nil) && (x.expiry.After(now)) {
   195  				x.free = true
   196  				continue
   197  			}
   198  			x.accessed = time.Now()
   199  			performance.With(label).Observe(time.Since(now).Seconds())
   200  			efficiency.With(prometheus.Labels{"cachename": c.name, "result": "hit"}).Inc()
   201  			return x.value
   202  		}
   203  	}
   204  	performance.With(label).Observe(time.Since(now).Seconds())
   205  	efficiency.With(prometheus.Labels{"cachename": c.name, "result": "miss"}).Inc()
   206  	return nil
   207  }
   208  
   209  // get all the keys from the cache
   210  func (c *Cache) Keys() []string {
   211  	var res []string
   212  	c.mlock.Lock()
   213  	defer c.mlock.Unlock()
   214  	for _, x := range c.mcache {
   215  		res = append(res, x.key)
   216  	}
   217  	return res
   218  }
   219  
   220  func (c *Cache) setCacheGaugeLoop() {
   221  	for {
   222  		c.mlock.Lock()
   223  		i := 0
   224  		for _, x := range c.mcache {
   225  			if x.free {
   226  				continue
   227  			}
   228  			i++
   229  		}
   230  		c.mlock.Unlock()
   231  		c.setCacheGauge(i)
   232  		randsrc_lock.Lock()
   233  		t := randsrc.Int63n(int64(1 * 60))
   234  		randsrc_lock.Unlock()
   235  		time.Sleep(time.Duration(t) * time.Second)
   236  	}
   237  }
   238  func (c *Cache) setCacheGauge(used int) {
   239  	size.With(prometheus.Labels{"cachename": c.name, "et": "allocated"}).Set(float64(len(c.mcache)))
   240  	size.With(prometheus.Labels{"cachename": c.name, "et": "used"}).Set(float64(used))
   241  
   242  }
   243  
   244  // return the name of this cache
   245  func (c *Cache) Name() string {
   246  	return c.name
   247  }
   248  

View as plain text