private static void initCache() { logger.info("(re-)creating tile cache"); if (doCacheLock) OrbitTiledImage2.cacheLock.writeLock().lock(); try { long mem = Runtime.getRuntime().maxMemory(); tileCache = CacheBuilder. newBuilder(). //recordStats(). expireAfterWrite(7, TimeUnit.MINUTES). maximumWeight(mem/2). weigher(new Weigher<PointAndName, Raster>() { @Override public int weigh(PointAndName key, Raster raster) { return raster.getWidth()*raster.getHeight() * 3 * 4; } }). build(); } finally { if (doCacheLock) OrbitTiledImage2.cacheLock.writeLock().unlock(); } }
/** * Create a new cache with the given memory size. * * @param maxMemory the maximum memory to use (1 or larger) * @param averageMemory the average memory (1 or larger) * @param segmentCount the number of cache segments (must be a power of 2) * @param stackMoveDistance how many other item are to be moved to the top * of the stack before the current item is moved */ @SuppressWarnings("unchecked") CacheLIRS(Weigher<K, V> weigher, long maxMemory, int averageMemory, int segmentCount, int stackMoveDistance, final CacheLoader<K, V> loader) { this.weigher = weigher; setMaxMemory(maxMemory); setAverageMemory(averageMemory); if (Integer.bitCount(segmentCount) != 1) { throw new IllegalArgumentException("The segment count must be a power of 2, is " + segmentCount); } this.segmentCount = segmentCount; this.segmentMask = segmentCount - 1; this.stackMoveDistance = stackMoveDistance; segments = new Segment[segmentCount]; invalidateAll(); this.segmentShift = Integer.numberOfTrailingZeros(segments[0].entries.length); this.loader = loader; }
public ConfigFileController() { localCache = CacheBuilder.newBuilder() .expireAfterWrite(EXPIRE_AFTER_WRITE, TimeUnit.MINUTES) .weigher(new Weigher<String, String>() { @Override public int weigh(String key, String value) { return value == null ? 0 : value.length(); } }) .maximumWeight(MAX_CACHE_SIZE) .removalListener(new RemovalListener<String, String>() { @Override public void onRemoval(RemovalNotification<String, String> notification) { String cacheKey = notification.getKey(); logger.debug("removing cache key: {}", cacheKey); if (!cacheKey2WatchedKeys.containsKey(cacheKey)) { return; } //create a new list to avoid ConcurrentModificationException List<String> watchedKeys = new ArrayList<>(cacheKey2WatchedKeys.get(cacheKey)); for (String watchedKey : watchedKeys) { watchedKeys2CacheKey.remove(watchedKey, cacheKey); } cacheKey2WatchedKeys.removeAll(cacheKey); logger.debug("removed cache key: {}", cacheKey); } }) .build(); propertiesResponseHeaders = new HttpHeaders(); propertiesResponseHeaders.add("Content-Type", "text/plain;charset=UTF-8"); jsonResponseHeaders = new HttpHeaders(); jsonResponseHeaders.add("Content-Type", "application/json;charset=UTF-8"); NOT_FOUND_RESPONSE = new ResponseEntity<>(HttpStatus.NOT_FOUND); }
public HttpStorageCaching() { threadpool = Executors.newFixedThreadPool(100); asyncHttp = Async.newInstance().use(threadpool); cacheWeighter = new Weigher<String, byte[]>() { @Override public int weigh(String key, byte[] value) { return value.length; } }; cache = CacheBuilder.newBuilder() .weigher(cacheWeighter) .maximumWeight(CACHE_WEIGHT) .build(); }
MapCache(final String name, final CacheConfiguration config) { this.name = name; CacheBuilder<Object, Object> cacheBuilder = CacheBuilder.newBuilder(); if (config.isStatisticsEnabled()) { cacheBuilder.recordStats(); } if (config.isSoftValuesEnabled()) { cacheBuilder.softValues(); } if (config.getMaximumSize() >= 0) { if (config.isArraySizeEnabled()) { cacheBuilder.maximumWeight(config.getMaximumSize()); cacheBuilder.weigher(new Weigher<K, V>() { @Override public int weigh(final K key, final V value) { if (value instanceof byte[]) { return ((byte[]) value).length; } throw new IllegalStateException("Using array size is only supported for byte arrays"); //$NON-NLS-1$ } }); } else { cacheBuilder.maximumSize(config.getMaximumSize()); } } backend = cacheBuilder.build(); }
public ExpirationKCVSCache(final KeyColumnValueStore store, String metricsName, final long cacheTimeMS, final long invalidationGracePeriodMS, final long maximumByteSize) { super(store, metricsName); Preconditions.checkArgument(cacheTimeMS > 0, "Cache expiration must be positive: %s", cacheTimeMS); Preconditions.checkArgument(System.currentTimeMillis()+1000l*3600*24*365*100+cacheTimeMS>0,"Cache expiration time too large, overflow may occur: %s",cacheTimeMS); this.cacheTimeMS = cacheTimeMS; int concurrencyLevel = Runtime.getRuntime().availableProcessors(); Preconditions.checkArgument(invalidationGracePeriodMS >=0,"Invalid expiration grace peiod: %s", invalidationGracePeriodMS); this.invalidationGracePeriodMS = invalidationGracePeriodMS; CacheBuilder<KeySliceQuery,EntryList> cachebuilder = CacheBuilder.newBuilder() .maximumWeight(maximumByteSize) .concurrencyLevel(concurrencyLevel) .initialCapacity(1000) .expireAfterWrite(cacheTimeMS, TimeUnit.MILLISECONDS) .weigher(new Weigher<KeySliceQuery, EntryList>() { @Override public int weigh(KeySliceQuery keySliceQuery, EntryList entries) { return GUAVA_CACHE_ENTRY_SIZE + KEY_QUERY_SIZE + entries.getByteSize(); } }); cache = cachebuilder.build(); expiredKeys = new ConcurrentHashMap<StaticBuffer, Long>(50,0.75f,concurrencyLevel); penaltyCountdown = new CountDownLatch(PENALTY_THRESHOLD); cleanupThread = new CleanupThread(); cleanupThread.start(); }
@SuppressWarnings("unchecked") <K, V> CacheBuilder<K, V> create(CacheBinding<K, V> def, boolean unwrapValueHolder) { CacheBuilder<K, V> builder = newCacheBuilder(); builder.recordStats(); builder.maximumWeight(cfg.getLong("cache", def.name(), "memoryLimit", def.maximumWeight())); builder = builder.removalListener(forwardingRemovalListenerFactory.create(def.name())); Weigher<K, V> weigher = def.weigher(); if (weigher != null && unwrapValueHolder) { final Weigher<K, V> impl = weigher; weigher = (Weigher<K, V>) new Weigher<K, ValueHolder<V>>() { @Override public int weigh(K key, ValueHolder<V> value) { return impl.weigh(key, value.value); } }; } else if (weigher == null) { weigher = unitWeight(); } builder.weigher(weigher); Long age = def.expireAfterWrite(TimeUnit.SECONDS); if (has(def.name(), "maxAge")) { builder.expireAfterWrite( ConfigUtil.getTimeUnit( cfg, "cache", def.name(), "maxAge", age != null ? age : 0, TimeUnit.SECONDS), TimeUnit.SECONDS); } else if (age != null) { builder.expireAfterWrite(age, TimeUnit.SECONDS); } return builder; }
private static <K, V> Weigher<K, V> unitWeight() { return new Weigher<K, V>() { @Override public int weigh(K key, V value) { return 1; } }; }
<K, V> Provider<Weigher<K, V>> bindWeigher( CacheProvider<K, V> m, Class<? extends Weigher<K, V>> impl) { Type weigherType = Types.newParameterizedType(Weigher.class, m.keyType().getType(), m.valueType().getType()); @SuppressWarnings("unchecked") Key<Weigher<K, V>> key = (Key<Weigher<K, V>>) Key.get(weigherType, Names.named(m.name)); bind(key).to(impl).in(Scopes.SINGLETON); return getProvider(key); }
private QueryContext createQueryContext(Session session) { QueryContext newQueryContext = new QueryContext(context.getConf(), session); // Set default space uri and its root uri newQueryContext.setDefaultSpaceUri(TablespaceManager.getDefault().getUri()); newQueryContext.setDefaultSpaceRootUri(TablespaceManager.getDefault().getRootUri()); if (TajoConstants.IS_TEST_MODE) { newQueryContext.putAll(CommonTestingUtil.getSessionVarsForTest()); } // Set queryCache in session int queryCacheSize = context.getConf().getIntVar(TajoConf.ConfVars.QUERY_SESSION_QUERY_CACHE_SIZE); if (queryCacheSize > 0 && session.getQueryCache() == null) { Weigher<String, Expr> weighByLength = new Weigher<String, Expr>() { public int weigh(String key, Expr expr) { return key.length(); } }; LoadingCache<String, Expr> cache = CacheBuilder.newBuilder() .maximumWeight(queryCacheSize * 1024) .weigher(weighByLength) .expireAfterAccess(1, TimeUnit.HOURS) .build(new CacheLoader<String, Expr>() { public Expr load(String sql) throws SQLSyntaxError { return analyzer.parse(sql); } }); session.setQueryCache(cache); } return newQueryContext; }
@SuppressWarnings("unchecked") public CacheStats(Cache<?, ?> cache, String name, Weigher<?, ?> weigher, long maxWeight) { this.cache = (Cache<Object, Object>) cache; this.name = name; this.weigher = (Weigher<Object, Object>) weigher; this.maxWeight = maxWeight; }
public DataStoreBlobStore(DataStore delegate, boolean encodeLengthInId, int cacheSizeInMB) { this.delegate = delegate; this.encodeLengthInId = encodeLengthInId; this.cache = CacheLIRS.newBuilder() .maximumWeight(cacheSizeInMB * FileUtils.ONE_MB) .weigher(new Weigher<String, byte[]>() { @Override public int weigh(String key, byte[] value) { return value.length; } }) .build(); }
public CachingBlobStore(long cacheSize) { this.blobCacheSize = cacheSize; cache = CacheLIRS.newBuilder(). maximumWeight(cacheSize). averageWeight(getBlockSize() / 2). weigher(new Weigher<String, byte[]>() { @Override public int weigh(String key, byte[] value) { return value.length; } }).build(); }
/** * Method to initialize the queue mapping cache. * <p> * The queue mapping cache is what holds the queue mappings(queue name to queue id) in memory. The cache is * initialized with a loader so that a queue name which is not present in the cache is loaded from the database * upon a query for a queue name. */ private void initializeQueueMappingCache() { // The size of the queue mappings cache in MegaBytes final int QUEUE_CACHE_SIZE = 2; // Expected concurrency for the cache (4 is guava default) final int QUEUE_CACHE_CONCURRENCY_LEVEL = 4; queueMappings = CacheBuilder.newBuilder().concurrencyLevel(QUEUE_CACHE_CONCURRENCY_LEVEL) .maximumWeight(QUEUE_CACHE_SIZE * 1024 * 1024).weigher(new Weigher<String, Integer>() { @Override public int weigh(String s, Integer integer) { return s.length(); } }).build(new CacheLoader<String, Integer>() { public Integer load(String queueName) throws AndesException { try { Integer queueID = getQueueID(queueName); if (log.isDebugEnabled()) { log.debug("Loaded queue: " + queueName + " to the cache from database"); } return queueID; } catch (SQLException e) { throw new AndesException("Error retrieving queue id for queue: " + queueName, e); } } }); }
/** * Create cache with easy setup. * * @param config setup * @param weigher Guava weighter * @return cache */ public static CacheBuilder<Object, Object> newBuilder(final HesperidesCacheParameter config, final Weigher<? extends Object, ? extends Object> weigher) { final CacheBuilder<Object, Object> cache = CacheBuilder.newBuilder(); if (config != null) { final int maxSize = config.getMaxSize(); final int weight = config.getWeight(); final String expire = config.getItemExpireAfter(); if (maxSize != HesperidesCacheParameter.NOT_SET) { cache.maximumSize(maxSize); } if (weight != HesperidesCacheParameter.NOT_SET) { if (weigher == null) { throw new IllegalArgumentException("Parameter 'weight' is not supported for this cache."); } cache.maximumWeight(weight); } if (expire != null) { final Pattern p = Pattern.compile("^([0-9]+)(m|s|h|d)"); final Matcher m = p.matcher(expire); if (m.find()) { final int time = Integer.valueOf(m.group(1)); TimeUnit unit = TimeUnit.SECONDS; switch (m.group(2)) { case "m": unit = TimeUnit.MINUTES; break; case "h": unit = TimeUnit.HOURS; break; case "d": unit = TimeUnit.DAYS; break; default: // Nothing } cache.expireAfterWrite(time, unit); cache.expireAfterAccess(time, unit); } else { throw new IllegalArgumentException("Parameter 'itemExpireAfter' is not valid. Valid usage is [0-9]+(m|h|d|s). (Where 'm' is minutes, 'h' is hours, 'd' is days, 's' seconds."); } } } return cache; }
/** Algorithm to weigh an object with a method other than the unit weight 1. */ CacheBinding<K, V> weigher(Class<? extends Weigher<K, V>> clazz);
@Nullable Weigher<K, V> weigher();
@Override public CacheBinding<K, V> weigher(Class<? extends Weigher<K, V>> impl) { Preconditions.checkState(!frozen, "binding frozen, cannot be modified"); weigher = module.bindWeigher(this, impl); return this; }
@Override @Nullable public Weigher<K, V> weigher() { return weigher != null ? weigher.get() : null; }
@SuppressWarnings("serial") private void createLoadedJobCache(Configuration conf) { // Set property for old "loaded jobs" cache loadedJobCacheSize = conf.getInt( JHAdminConfig.MR_HISTORY_LOADED_JOB_CACHE_SIZE, JHAdminConfig.DEFAULT_MR_HISTORY_LOADED_JOB_CACHE_SIZE); // Check property for new "loaded tasks" cache perform sanity checking useLoadedTasksCache = false; try { String taskSizeString = conf .get(JHAdminConfig.MR_HISTORY_LOADED_TASKS_CACHE_SIZE); if (taskSizeString != null) { loadedTasksCacheSize = Math.max(Integer.parseInt(taskSizeString), 1); useLoadedTasksCache = true; } } catch (NumberFormatException nfe) { LOG.error("The property " + JHAdminConfig.MR_HISTORY_LOADED_TASKS_CACHE_SIZE + " is not an integer value. Please set it to a positive" + " integer value."); } CacheLoader<JobId, Job> loader; loader = new CacheLoader<JobId, Job>() { @Override public Job load(JobId key) throws Exception { return loadJob(key); } }; if (!useLoadedTasksCache) { loadedJobCache = CacheBuilder.newBuilder() .maximumSize(loadedJobCacheSize) .initialCapacity(loadedJobCacheSize) .concurrencyLevel(1) .build(loader); } else { Weigher<JobId, Job> weightByTasks; weightByTasks = new Weigher<JobId, Job>() { /** * Method for calculating Job weight by total task count. If * the total task count is greater than the size of the tasks * cache, then cap it at the cache size. This allows the cache * to always hold one large job. * @param key JobId object * @param value Job object * @return Weight of the job as calculated by total task count */ @Override public int weigh(JobId key, Job value) { int taskCount = Math.min(loadedTasksCacheSize, value.getTotalMaps() + value.getTotalReduces()); return taskCount; } }; // Keep concurrencyLevel at 1. Otherwise, two problems: // 1) The largest job that can be initially loaded is // cache size / 4. // 2) Unit tests are not deterministic. loadedJobCache = CacheBuilder.newBuilder() .maximumWeight(loadedTasksCacheSize) .weigher(weightByTasks) .concurrencyLevel(1) .build(loader); } }
public <K, V> CacheLIRS<K, V> build( CacheLoader<K, V> cacheLoader) { @SuppressWarnings("unchecked") Weigher<K, V> w = (Weigher<K, V>) weigher; return new CacheLIRS<K, V>(w, maxWeight, averageWeight, 16, 16, cacheLoader); }
public Weigher<CacheValue, CacheValue> getWeigher() { return weigher; }
public Builder withWeigher(Weigher<CacheValue, CacheValue> weigher) { this.weigher = weigher; return this; }
/** * Creates a new cache. * This method is private because external access should be made through * {@link #getSharedCache(long, CanvasDataLoader)}. * * @param kilobyteCapacity capacity of the cache. * @param canvasDataLoader loader implementation for the cache. * @param recordStats indicates whether the cache should record statistics. * * @throws IllegalStateException * if any errors occur. */ private CanvasDataCache(final long kilobyteCapacity, final CanvasDataLoader canvasDataLoader, final boolean recordStats) throws IllegalArgumentException, IllegalStateException { if (kilobyteCapacity < 1) { this.kilobyteCapacity = 1; } else { this.kilobyteCapacity = kilobyteCapacity; } this.weigher = new Weigher<CanvasId, CachedCanvasData>() { @Override public int weigh(@Nullable final CanvasId key, @Nonnull final CachedCanvasData value) { long kiloBytes = value.getKilobytes(); // hopefully we'll never have > 2000 gigabyte file, // but if so it simply won't be fairly weighted if (kiloBytes > Integer.MAX_VALUE) { LOG.warn("weightOf: truncating weight for " + kiloBytes + " Kb item " + value); kiloBytes = Integer.MAX_VALUE; } else if (kiloBytes == 0) { // zero weights are not supported, so we need to set empty file weight to 1 kiloBytes = 1; } return (int) kiloBytes; } }; // separate thread pool for removing data that expires from the cache final ExecutorService removalService = Executors.newFixedThreadPool(4); final RemovalListener<CanvasId, CachedCanvasData> removalListener = removal -> { final CachedCanvasData cachedCanvasData = removal.getValue(); if (cachedCanvasData != null) { cachedCanvasData.remove(); } }; this.asyncRemovalListener = RemovalListeners.asynchronous(removalListener, removalService); this.canvasDataLoader = canvasDataLoader; this.buildCache(recordStats); LOG.info("<init>: exit"); }
public void setMaxCacheWeight(final long weight) { final Weigher<DmKey, DmValue> weigher = (key, value) -> 24 + value .getSize(); cache = CacheBuilder.newBuilder().maximumWeight(weight) .weigher(weigher).build(); }
public GuavaBasedMessageCacheImpl() { DEFAULT_CONTENT_CHUNK_SIZE = AndesConfigurationManager .readValue(AndesConfiguration.PERFORMANCE_TUNING_MAX_CONTENT_CHUNK_SIZE); long cacheSizeInBytes = 1024L * 1024L * ((int) AndesConfigurationManager.readValue(AndesConfiguration.PERSISTENCE_CACHE_SIZE)); int cacheConcurrency = AndesConfigurationManager .readValue(AndesConfiguration.PERSISTENCE_CACHE_CONCURRENCY_LEVEL); int cacheExpirySeconds = AndesConfigurationManager .readValue(AndesConfiguration.PERSISTENCE_CACHE_EXPIRY_SECONDS); String valueRefType = AndesConfigurationManager .readValue(AndesConfiguration.PERSISTENCE_CACHE_VALUE_REFERENCE_TYPE); printStats = AndesConfigurationManager.readValue(AndesConfiguration.PERSISTENCE_CACHE_PRINT_STATS); CacheBuilder<Long, AndesMessage> builder = CacheBuilder.newBuilder().concurrencyLevel(cacheConcurrency) .expireAfterAccess(cacheExpirySeconds, TimeUnit.SECONDS).maximumWeight(cacheSizeInBytes) .weigher(new Weigher<Long, AndesMessage>() { @Override public int weigh(Long l, AndesMessage m) { return m.getMetadata().getMessageContentLength(); } }); if (printStats) { builder = builder.recordStats(); } if (CACHE_VALUE_REF_TYPE_WEAK.equalsIgnoreCase(valueRefType)) { builder = builder.weakValues(); } this.cache = builder.build(); maintenanceExecutor = Executors.newSingleThreadScheduledExecutor(); maintenanceExecutor.scheduleAtFixedRate(new Runnable() { @Override public void run() { cache.cleanUp(); if (printStats) { log.info("cache stats:" + cache.stats().toString()); } } }, 2, 2, TimeUnit.MINUTES); }
/** * Initialzes the cache and cache loader for the response of this data source. * * @param pinotResponseCacheLoader the cache loader that directly gets query results from data source if the results * are not in its cache. * * @throws Exception is thrown when Pinot brokers are unable to be reached. */ private static LoadingCache<PinotQuery, ThirdEyeResultSetGroup> buildResponseCache( PinotResponseCacheLoader pinotResponseCacheLoader) throws Exception { Preconditions.checkNotNull(pinotResponseCacheLoader, "A loader that sends query to Pinot is required."); // Initializes listener that prints expired entries in debuggin mode. RemovalListener<PinotQuery, ThirdEyeResultSetGroup> listener; if (LOG.isDebugEnabled()) { listener = new RemovalListener<PinotQuery, ThirdEyeResultSetGroup>() { @Override public void onRemoval(RemovalNotification<PinotQuery, ThirdEyeResultSetGroup> notification) { LOG.debug("Expired {}", notification.getKey().getPql()); } }; } else { listener = new RemovalListener<PinotQuery, ThirdEyeResultSetGroup>() { @Override public void onRemoval(RemovalNotification<PinotQuery, ThirdEyeResultSetGroup> notification) { } }; } // ResultSetGroup Cache. The size of this cache is limited by the total number of buckets in all ResultSetGroup. // We estimate that 1 bucket (including overhead) consumes 1KB and this cache is allowed to use up to 50% of max // heap space. long maxBucketNumber = getApproximateMaxBucketNumber(DEFAULT_HEAP_PERCENTAGE_FOR_RESULTSETGROUP_CACHE); LOG.debug("Max bucket number for {}'s cache is set to {}", DATA_SOURCE_NAME, maxBucketNumber); return CacheBuilder.newBuilder() .removalListener(listener) .expireAfterWrite(ThirdEyeCacheRegistry.CACHE_EXPIRATION_HOURS, TimeUnit.HOURS) .maximumWeight(maxBucketNumber) .weigher(new Weigher<PinotQuery, ThirdEyeResultSetGroup>() { @Override public int weigh(PinotQuery pinotQuery, ThirdEyeResultSetGroup resultSetGroup) { int resultSetCount = resultSetGroup.size(); int weight = 0; for (int idx = 0; idx < resultSetCount; ++idx) { ThirdEyeResultSet resultSet = resultSetGroup.get(idx); weight += ((resultSet.getColumnCount() + resultSet.getGroupKeyLength()) * resultSet.getRowCount()); } return weight; } }) .build(pinotResponseCacheLoader); }