public AccessTokenJob() { logger.info("init"); accessTokenCache = CacheBuilder.newBuilder() // 设置并发级别为200,并发级别是指可以同时写缓存的线程数 .concurrencyLevel(200) // 设置写缓存后1分钟过期 .expireAfterWrite(90, TimeUnit.MINUTES).initialCapacity(10).maximumSize(100) // 设置要统计缓存的命中率 .recordStats() // 设置缓存的移除通知 .removalListener(new RemovalListener<AppIdSecret, String>() { @Override public void onRemoval(RemovalNotification<AppIdSecret, String> notification) { logger.info(notification.getKey() + " was removed, cause by " + notification.getCause()); } }).build(new CacheLoader<AppIdSecret, String>() { // build方法中可以指定CacheLoader,在缓存不存在时通过CacheLoader的实现自动加载缓存 @Override public String load(AppIdSecret appIdSecret) throws Exception { Token token = CommonUtil.getAccessToken(appIdSecret.getAppId(), appIdSecret.getAppSecret()); return token.getToken(); } }); }
/** * Create a new KMSAudit. * * @param windowMs Duplicate events within the aggregation window are quashed * to reduce log traffic. A single message for aggregated * events is printed at the end of the window, along with a * count of the number of aggregated events. */ KMSAudit(long windowMs) { cache = CacheBuilder.newBuilder() .expireAfterWrite(windowMs, TimeUnit.MILLISECONDS) .removalListener( new RemovalListener<String, AuditEvent>() { @Override public void onRemoval( RemovalNotification<String, AuditEvent> entry) { AuditEvent event = entry.getValue(); if (event.getAccessCount().get() > 0) { KMSAudit.this.logEvent(event); event.getAccessCount().set(0); KMSAudit.this.cache.put(entry.getKey(), event); } } }).build(); executor = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder() .setDaemon(true).setNameFormat(KMS_LOGGER_NAME + "_thread").build()); executor.scheduleAtFixedRate(new Runnable() { @Override public void run() { cache.cleanUp(); } }, windowMs / 10, windowMs / 10, TimeUnit.MILLISECONDS); }
/** * Create a ChronoRange for the given ChronoSeries and sequence of ChronoGenes. * * @param chronoSeries ChronoSeries to create ChronoRange for * @param genes ChronoGene sequence containing ChronoPattern(s) to use for creating ChronoRange * @return ChronoRange for given ChronoSeries and ChronoGene sequence */ @NotNull public static ChronoRange getChronoRange(@NotNull ChronoSeries chronoSeries, @NotNull ISeq<ChronoGene> genes) { ChronoRange range = new ChronoRange(requireNonNull(chronoSeries), requireNonNull(genes)); Cache<ISeq<ChronoPattern>, ChronoRange> cacheChronoRange = cacheMap.get(chronoSeries); if (cacheChronoRange == null) { cacheChronoRange = CacheBuilder.newBuilder().build(); cacheMap.put(chronoSeries, cacheChronoRange); } ChronoRange cacheRange = cacheChronoRange.getIfPresent(range.chronoPatternSeq); if (cacheRange != null) { return cacheRange; } else { if (range.validRange) { range.calculateTimestampRanges(); } cacheChronoRange.put(range.chronoPatternSeq, range); return range; } }
public KeyProviderCache(long expiryMs) { cache = CacheBuilder.newBuilder() .expireAfterAccess(expiryMs, TimeUnit.MILLISECONDS) .removalListener(new RemovalListener<URI, KeyProvider>() { @Override public void onRemoval( RemovalNotification<URI, KeyProvider> notification) { try { notification.getValue().close(); } catch (Throwable e) { LOG.error( "Error closing KeyProvider with uri [" + notification.getKey() + "]", e); ; } } }) .build(); }
DFSClientCache(NfsConfiguration config, int clientCache) { this.config = config; this.clientCache = CacheBuilder.newBuilder() .maximumSize(clientCache) .removalListener(clientRemovalListener()) .build(clientLoader()); this.inputstreamCache = CacheBuilder.newBuilder() .maximumSize(DEFAULT_DFS_INPUTSTREAM_CACHE_SIZE) .expireAfterAccess(DEFAULT_DFS_INPUTSTREAM_CACHE_TTL, TimeUnit.SECONDS) .removalListener(inputStreamRemovalListener()) .build(inputStreamLoader()); ShutdownHookManager.get().addShutdownHook(new CacheFinalizer(), SHUTDOWN_HOOK_PRIORITY); }
@Bean public CasEventRepository casEventRepository() { final LoadingCache<String, CasEvent> storage = CacheBuilder.newBuilder() .initialCapacity(INITIAL_CACHE_SIZE) .maximumSize(MAX_CACHE_SIZE) .recordStats() .expireAfterWrite(EXPIRATION_TIME, TimeUnit.HOURS) .build(new CacheLoader<String, CasEvent>() { @Override public CasEvent load(final String s) throws Exception { LOGGER.error("Load operation of the cache is not supported."); return null; } }); LOGGER.debug("Created an in-memory event repository to store CAS events for [{}] hours", EXPIRATION_TIME); return new InMemoryCasEventRepository(storage); }
@PostConstruct public void initialize() { AppConfiguration.Cache config = AppConfiguration.CONFIG.getCache(); cache = CacheBuilder.newBuilder() .maximumSize(config.getMaxSize()) .expireAfterWrite(config.getLifeTime(), TimeUnit.MINUTES) .recordStats() // This is costly! But we need it because of getCacheStatus(). .build(); // https://github.com/google/guava/wiki/CachesExplained#when-does-cleanup-happen // // If we do not clean-up expired objects ourselves, the insertion of objects seems to get slower when // the size approaches the limit. This is because small clean-ups happen which block the operation. // scheduler.scheduleAtFixedRate( this::cleanUpCache, config.getCleanUpInterval(), config.getCleanUpInterval(), TimeUnit.MINUTES ); }
public static void init() { ExampleMod.logger.info("ATTEMPTING TO COMMIT GREAT EVIL:"); try { doImmenseEvil(); } catch(Throwable e) { e.printStackTrace(); } MinecraftForge.EVENT_BUS.register(new Listener()); grassCache = CacheBuilder.newBuilder() .maximumSize(2048) .build( new CacheLoader<GrassCacheKey, Biome>() { @Override public Biome load(GrassCacheKey key) { return DimensionManager.getWorld(key.dim).getBiome(new BlockPos(key.x, 63, key.z)); } } ); }
public CachedScriptClassInstancePovider(Engine engine, Function<String, S> createScriptFunction, String format, BiFunction<S, Class<T>, T> createInstanceFunction) { this.createScriptFunction = createScriptFunction; this.format = format; this.createInstanceFunction = createInstanceFunction; long cacheExpireTime = engine.getDefaultParameters().getScriptClassInstancePoviderCacheExpireTime(); if (cacheExpireTime >= 0) { // Turn on the cache. CacheBuilder<Object, Object> builder = CacheBuilder.newBuilder(); if (cacheExpireTime > 0) { builder.expireAfterAccess(cacheExpireTime, TimeUnit.MILLISECONDS); } cache = builder.build(new CacheLoader<String, S>() { @Override public S load(String className) throws Exception { return createScript(className); } }); } }
public BakedWrapper(final Node<?> node, final IModelState state, final boolean smooth, final boolean gui3d, final VertexFormat format, final ImmutableSet<String> meshes, final ImmutableMap<String, TextureAtlasSprite> textures) { this(node, state, smooth, gui3d, format, meshes, textures, CacheBuilder.newBuilder() .maximumSize(128) .expireAfterAccess(2, TimeUnit.MINUTES) .<Integer, B3DState>build(new CacheLoader<Integer, B3DState>() { public B3DState load(Integer frame) throws Exception { IModelState parent = state; Animation newAnimation = node.getAnimation(); if(parent instanceof B3DState) { B3DState ps = (B3DState)parent; parent = ps.getParent(); } return new B3DState(newAnimation, frame, frame, 0, parent); } })); }
public CachingReEncryptionKeyProvider(AbstractReEncryptionKeyProvider prov, long keyTimeoutMillis, long eekTimeoutMillis) { super(prov.getConf()); this.provider = prov; reEncryptionKeyCache = CacheBuilder.newBuilder().expireAfterAccess(keyTimeoutMillis, TimeUnit.MILLISECONDS) .build(new CacheLoader<ReEncryptionKeyCacheKey, ReEncryptionKeyInstance>() { @Override public ReEncryptionKeyInstance load(ReEncryptionKeyCacheKey key) throws Exception { ReEncryptionKeyInstance kv = provider.createReEncryptionKey( key.getSrcKeyName(), key.getDstKeyName()); if (kv == null) { throw new KeyNotFoundException(); } return kv; } }); transformedEEKCache = CacheBuilder.newBuilder().expireAfterAccess(eekTimeoutMillis, TimeUnit.MILLISECONDS) .build(); }
/** * Create a new KMSAudit. * * @param windowMs Duplicate events within the aggregation window are quashed * to reduce log traffic. A single message for aggregated * events is printed at the end of the window, along with a * count of the number of aggregated events. */ RENAudit(long windowMs) { cache = CacheBuilder.newBuilder() .expireAfterWrite(windowMs, TimeUnit.MILLISECONDS) .removalListener( new RemovalListener<String, AuditEvent>() { @Override public void onRemoval( RemovalNotification<String, AuditEvent> entry) { AuditEvent event = entry.getValue(); if (event.getAccessCount().get() > 0) { RENAudit.this.logEvent(event); event.getAccessCount().set(0); RENAudit.this.cache.put(entry.getKey(), event); } } }).build(); executor = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder() .setDaemon(true).setNameFormat(REN_LOGGER_NAME + "_thread").build()); executor.scheduleAtFixedRate(new Runnable() { @Override public void run() { cache.cleanUp(); } }, windowMs / 10, windowMs / 10, TimeUnit.MILLISECONDS); }
public RegionReplicaSinkWriter(RegionReplicaOutputSink sink, ClusterConnection connection, ExecutorService pool, int operationTimeout) { this.sink = sink; this.connection = connection; this.operationTimeout = operationTimeout; this.rpcRetryingCallerFactory = RpcRetryingCallerFactory.instantiate(connection.getConfiguration()); this.rpcControllerFactory = RpcControllerFactory.instantiate(connection.getConfiguration()); this.pool = pool; int nonExistentTableCacheExpiryMs = connection.getConfiguration() .getInt("hbase.region.replica.replication.cache.disabledAndDroppedTables.expiryMs", 5000); // A cache for non existing tables that have a default expiry of 5 sec. This means that if the // table is created again with the same name, we might miss to replicate for that amount of // time. But this cache prevents overloading meta requests for every edit from a deleted file. disabledAndDroppedTables = CacheBuilder.newBuilder() .expireAfterWrite(nonExistentTableCacheExpiryMs, TimeUnit.MILLISECONDS) .initialCapacity(10) .maximumSize(1000) .build(); }
public StaticDatabaseMappingService( MetaStoreMappingFactory metaStoreMappingFactory, List<AbstractMetaStore> initialMetastores) { this.metaStoreMappingFactory = metaStoreMappingFactory; primaryDatabasesCache = CacheBuilder.newBuilder().expireAfterAccess(1, TimeUnit.MINUTES).maximumSize(1).build( new CacheLoader<String, List<String>>() { @Override public List<String> load(String key) throws Exception { if (primaryDatabaseMapping != null) { return primaryDatabaseMapping.getClient().get_all_databases(); } else { return Lists.newArrayList(); } } }); init(initialMetastores); }
public AtomixLeaderElector(CopycatClient client, Properties properties) { super(client, properties); cache = CacheBuilder.newBuilder() .maximumSize(1000) .build(CacheLoader.from(topic -> this.client.submit(new GetLeadership(topic)))); cacheUpdater = change -> { Leadership leadership = change.newValue(); cache.put(leadership.topic(), CompletableFuture.completedFuture(leadership)); }; statusListener = status -> { if (status == Status.SUSPENDED || status == Status.INACTIVE) { cache.invalidateAll(); } }; addStatusChangeListener(statusListener); }
/** * Creates a new processing unit. * * @param name name. * @param engine the engine. * @param inQueue input queue. * @param outQueue output queue. */ public BaseProcessingUnit(String name, Engine engine, EventQueue inQueue, EventQueue outQueue) { super(name, engine); this.inQueue = inQueue; this.outQueue = outQueue; long cacheExpireTime = engine.getDefaultParameters().getProcessingUnitEventProcessorCacheExpireTime(); if (cacheExpireTime >= 0) { // Turn on the cache. CacheBuilder<Object, Object> builder = CacheBuilder.newBuilder(); if (cacheExpireTime > 0) { builder.expireAfterAccess(cacheExpireTime, TimeUnit.MILLISECONDS); } eventNameProcessorsCache = builder.build(new CacheLoader<String, Set<AtomicReference<T>>>() { @Override public Set<AtomicReference<T>> load(String eventName) throws Exception { return resolveEventProcessors(eventName); } }); } }
@Override public Map<String, Map<String, String>> getMetadata(File f) { LoadingCache<String, Map<String, String>> metadata = CacheBuilder.newBuilder().build( CacheLoader.from(new Function<String, Map<String, String>>() { @Override public Map<String, String> apply(String input) { return Maps.newHashMap(); } })); for( MetadataHandler handler : pluginTracker.getBeanList() ) { handler.getMetadata(metadata, f); } return metadata.asMap(); }
/** * Constructor to configure cache size. * * @param backingMap a distributed, strongly consistent map for backing * @param cacheSize the maximum size of the cache */ public CachingAsyncConsistentMap(AsyncConsistentMap<K, V> backingMap, int cacheSize) { super(backingMap); cache = CacheBuilder.newBuilder() .maximumSize(cacheSize) .build(CacheLoader.from(CachingAsyncConsistentMap.super::get)); cacheUpdater = event -> { Versioned<V> newValue = event.newValue(); if (newValue == null) { cache.invalidate(event.key()); } else { cache.put(event.key(), CompletableFuture.completedFuture(newValue)); } }; statusListener = status -> { log.debug("{} status changed to {}", this.name(), status); // If the status of the underlying map is SUSPENDED or INACTIVE // we can no longer guarantee that the cache will be in sync. if (status == SUSPENDED || status == INACTIVE) { cache.invalidateAll(); } }; super.addListener(cacheUpdater); super.addStatusChangeListener(statusListener); }
/** * Create a ReliableTaildirEventReader to watch the given directory. map<serverid.appid.logid, logpath> */ private ReliableTaildirEventReader(Map<String, CollectTask> tasks, Table<String, String, String> headerTable, boolean skipToEnd, boolean addByteOffset) throws IOException { Map<String, LogPatternInfo> filePaths = getFilePaths(tasks); // Sanity checks Preconditions.checkNotNull(filePaths); // get operation system info if (log.isDebugEnable()) { log.debug(this, "Initializing {" + ReliableTaildirEventReader.class.getSimpleName() + "} with directory={" + filePaths + "}"); } // tailFile this.tailFileTable = CacheBuilder.newBuilder().expireAfterWrite(2, TimeUnit.DAYS) .<String, LogPatternInfo> build(); this.headerTable = headerTable; this.addByteOffset = addByteOffset; this.os = JVMToolHelper.isWindows() ? OS_WINDOWS : null; updatelog(filePaths); updateTailFiles(skipToEnd); log.info(this, "tailFileTable: " + tailFileTable.toString()); log.info(this, "headerTable: " + headerTable.toString()); }
@Provides @Named("authenticatableCache") @Singleton public LoadingCache<AuthenticationCredentials, AuthenticatedEntity> getLoadingCache( @Named("authenticatableRedisDao") RedisDao<AuthenticationCredentials, AuthenticatedEntity> authenticatableRedisDao, AuthenticationFacade authenticationFacade, CacheManager cacheManager, @Named("authenticationCacheGroup") String authenticationCacheGroupName ) { LoadingCacheRedisImpl<AuthenticationCredentials, AuthenticatedEntity> l2Cache = new LoadingCacheRedisImpl<>(); l2Cache.setRedisDao(authenticatableRedisDao); l2Cache.setCacheLoader((key) -> authenticationFacade.determineDao(key).authenticate(key)); l2Cache.setName("AUTHENTICATION_REDIS_CACHE"); LoadingCacheGuavaImpl<AuthenticationCredentials, AuthenticatedEntity> l1Cache = new LoadingCacheGuavaImpl<>(); l1Cache.setGuavaCache(CacheBuilder.newBuilder().expireAfterWrite(1, TimeUnit.DAYS).build()); l1Cache.setCacheLoader((key) -> l2Cache.get(key)); l2Cache.setName("AUTHENTICATION_GUAVA_CACHE"); cacheManager.registerCacheGroup(authenticationCacheGroupName, l1Cache, l2Cache); return l1Cache; }
private NonCloseableHiveClientWithCaching(final HiveConf hiveConf, final Map<String, String> hiveConfigOverride) throws MetaException { super(hiveConf, hiveConfigOverride); databases = CacheBuilder // .newBuilder() // .expireAfterAccess(1, TimeUnit.MINUTES) // .build(new DatabaseLoader()); tableNameLoader = CacheBuilder // .newBuilder() // .expireAfterAccess(1, TimeUnit.MINUTES) // .build(new TableNameLoader()); tableLoaders = CacheBuilder // .newBuilder() // .expireAfterAccess(4, TimeUnit.HOURS) // .maximumSize(20) // .build(new TableLoaderLoader()); }
public SkinManager(TextureManager textureManagerInstance, File skinCacheDirectory, MinecraftSessionService sessionService) { this.textureManager = textureManagerInstance; this.skinCacheDir = skinCacheDirectory; this.sessionService = sessionService; this.skinCacheLoader = CacheBuilder.newBuilder().expireAfterAccess(15L, TimeUnit.SECONDS).<GameProfile, Map<Type, MinecraftProfileTexture>>build(new CacheLoader<GameProfile, Map<Type, MinecraftProfileTexture>>() { public Map<Type, MinecraftProfileTexture> load(GameProfile p_load_1_) throws Exception { try { return Minecraft.getMinecraft().getSessionService().getTextures(p_load_1_, false); } catch (Throwable var3) { return Maps.<Type, MinecraftProfileTexture>newHashMap(); } } }); }
@Provides @Named("passwordSaltCache") @Singleton public Cache<String, String> getAdminSaltLoadingCache( CacheManager cacheManager, PasswordCredentialsFacade passwordCredentialsFacade, @Named("passwordSaltRedisDao") RedisDao<String, String> passwordSaltRedisDao, @Named("passwordSaltCacheGroup") String passwordSaltCacheGroup) { LoadingCacheRedisImpl<String, String> l2Cache = new LoadingCacheRedisImpl<>(); l2Cache.setRedisDao(passwordSaltRedisDao); l2Cache.setCacheLoader((email) -> passwordCredentialsFacade.getSaltForEmail(email).toMaybe()); LoadingCacheGuavaImpl<String, String> l1Cache = new LoadingCacheGuavaImpl<>(); l1Cache.setGuavaCache(CacheBuilder.newBuilder().expireAfterWrite(1, TimeUnit.DAYS).build()); l1Cache.setCacheLoader((key) -> l2Cache.get(key)); cacheManager.registerCacheGroup(passwordSaltCacheGroup, l1Cache, l2Cache); return l1Cache; }
private static <T> LoadingCache<Class<T>, AtomicInteger> createCache( Class<T> klass) { return CacheBuilder.newBuilder().build( new CacheLoader<Class<T>, AtomicInteger>() { @Override public AtomicInteger load(Class<T> key) throws Exception { return new AtomicInteger(); } }); }
@Autowired public AuthenticationService(SymphonyClientFactory symphonyClientFactory, TokenGenerator tokenGenerator) { this.symphonyClientFactory = symphonyClientFactory; this.tokenGenerator = tokenGenerator; // Tokens are short lived. Max is just protection from DDoS attacks tokenCache = CacheBuilder.newBuilder() .maximumSize(1000) .expireAfterWrite(5, TimeUnit.MINUTES) .build(); }
@Override public void init(DeviceId deviceId, PipelinerContext context) { this.serviceDirectory = context.directory(); this.deviceId = deviceId; pendingGroups = CacheBuilder.newBuilder() .expireAfterWrite(20, TimeUnit.SECONDS) .removalListener((RemovalNotification<GroupKey, NextObjective> notification) -> { if (notification.getCause() == RemovalCause.EXPIRED) { fail(notification.getValue(), ObjectiveError.GROUPINSTALLATIONFAILED); } }).build(); groupChecker.scheduleAtFixedRate(new GroupChecker(), 0, 500, TimeUnit.MILLISECONDS); coreService = serviceDirectory.get(CoreService.class); flowRuleService = serviceDirectory.get(FlowRuleService.class); groupService = serviceDirectory.get(GroupService.class); meterService = serviceDirectory.get(MeterService.class); deviceService = serviceDirectory.get(DeviceService.class); flowObjectiveStore = context.store(); groupService.addListener(new InnerGroupListener()); appId = coreService.registerApplication(APPID); initializePipeline(); }
private DefaultPluginRegistry(PluginRegistry parent, final PluginInspector pluginInspector, ClassLoaderScope classLoaderScope) { this.parent = parent; this.pluginInspector = pluginInspector; this.classLoaderScope = classLoaderScope; this.classMappings = CacheBuilder.newBuilder().build(new PotentialPluginCacheLoader(pluginInspector)); this.idMappings = CacheBuilder.newBuilder().build(new CacheLoader<PluginIdLookupCacheKey, Optional<PluginImplementation<?>>>() { @Override public Optional<PluginImplementation<?>> load(@SuppressWarnings("NullableProblems") PluginIdLookupCacheKey key) throws Exception { PluginId pluginId = key.getId(); ClassLoader classLoader = key.getClassLoader(); PluginDescriptorLocator locator = new ClassloaderBackedPluginDescriptorLocator(classLoader); PluginDescriptor pluginDescriptor = locator.findPluginDescriptor(pluginId.toString()); if (pluginDescriptor == null) { return Optional.absent(); } String implClassName = pluginDescriptor.getImplementationClassName(); if (!GUtil.isTrue(implClassName)) { throw new InvalidPluginException(String.format("No implementation class specified for plugin '%s' in %s.", pluginId, pluginDescriptor)); } final Class<?> implClass; try { implClass = classLoader.loadClass(implClassName); } catch (ClassNotFoundException e) { throw new InvalidPluginException(String.format( "Could not find implementation class '%s' for plugin '%s' specified in %s.", implClassName, pluginId, pluginDescriptor), e); } PotentialPlugin<?> potentialPlugin = pluginInspector.inspect(implClass); PluginImplementation<Object> withId = new RegistryAwarePluginImplementation(classLoader, pluginId, potentialPlugin); return Cast.uncheckedCast(Optional.of(withId)); } }); }
RedisSessionDao() { this.lastModifiedTimes = CacheBuilder.newBuilder().expireAfterAccess(60L, TimeUnit.MINUTES).maximumSize(8192L).build(); this.INTERNAL_MILLIS = TimeUnit.MINUTES.toMillis(2L); Properties properties = ReadResourceUtils.getPropertyFile("redis.properties"); this.redisConfig = new RedisConfig(properties); }
@Override protected void internalInit(final WebContext context) { CommonHelper.assertNotNull("delegate", this.delegate); CommonHelper.assertTrue(cacheSize > 0, "cacheSize must be > 0"); CommonHelper.assertTrue(timeout > 0, "timeout must be > 0"); CommonHelper.assertNotNull("timeUnit", this.timeUnit); if (delegate instanceof InitializableWebObject) { ((InitializableWebObject) delegate).init(context); } this.cache = CacheBuilder.newBuilder().maximumSize(cacheSize) .expireAfterWrite(timeout, timeUnit).build(); }
public CGuild(Guild guild, Charrizard charrizard) { this.guild = guild; this.charrizard = charrizard; this.settings = charrizard.getSettings(); this.redisConnection = charrizard.getRedisConnection(); this.executor = new ThreadPoolExecutor(2, 16, 60, TimeUnit.SECONDS, new SynchronousQueue<>()); this.userCache = CacheBuilder.newBuilder().expireAfterAccess(5, TimeUnit.MINUTES).build(); this.textChannelCache = CacheBuilder.newBuilder().expireAfterAccess(5, TimeUnit.MINUTES).build(); this.voiceChannelCache = CacheBuilder.newBuilder().expireAfterAccess(5, TimeUnit.MINUTES).build(); this.audio = new CAudio(this); }
/** * create a row cache of a matrix in {row, row-specific columns} * * @param cacheSpec cache specification * @return a matrix row cache in {row, row-specific columns} */ public LoadingCache<Integer, List<Integer>> rowColumnsCache(String cacheSpec) { LoadingCache<Integer, List<Integer>> cache = CacheBuilder.from(cacheSpec).build( new CacheLoader<Integer, List<Integer>>() { @Override public List<Integer> load(Integer rowId) throws Exception { return getColumns(rowId); } }); return cache; }
/** * Instantiates a new caching attributes principal factory. * @param maxCacheSize the max cache size * @param timeUnit the time unit * @param expiryDuration the expiry duration */ public CachingPrincipalAttributesRepository(final long maxCacheSize, final TimeUnit timeUnit, final long expiryDuration) { super(expiryDuration, timeUnit); this.maxCacheSize = maxCacheSize; this.cache = CacheBuilder.newBuilder().maximumSize(maxCacheSize) .expireAfterWrite(expiryDuration, timeUnit).build(this.cacheLoader); }
private TaxonomyDataSource getDataSource(final String uuid) { synchronized( cacheLock ) { Institution inst = CurrentInstitution.get(); Cache<String, TaxonomyDataSource> instEntry = dataSourceCache.getIfPresent(inst); if( instEntry == null ) { instEntry = CacheBuilder.newBuilder().softValues().expireAfterAccess(1, TimeUnit.HOURS).build(); dataSourceCache.put(inst, instEntry); } TaxonomyDataSource tds = instEntry.getIfPresent(uuid); if( tds == null ) { final Taxonomy taxonomy = getDao().getByUuid(uuid); if( taxonomy == null ) { throw new NotFoundException("Could not find taxonomy with UUID " + uuid); } tds = getDataSourceNoCache(taxonomy); instEntry.put(uuid, tds); } return tds; } }
public SkinManager(TextureManager textureManagerInstance, File skinCacheDirectory, MinecraftSessionService sessionService) { this.textureManager = textureManagerInstance; this.skinCacheDir = skinCacheDirectory; this.sessionService = sessionService; this.skinCacheLoader = CacheBuilder.newBuilder().expireAfterAccess(15L, TimeUnit.SECONDS).<GameProfile, Map<Type, MinecraftProfileTexture>>build(new CacheLoader<GameProfile, Map<Type, MinecraftProfileTexture>>() { public Map<Type, MinecraftProfileTexture> load(GameProfile p_load_1_) throws Exception { return Minecraft.getMinecraft().getSessionService().getTextures(p_load_1_, false); } }); }
@Override public LoadingCache<String, HiveReadEntry> load(String key) throws Exception { return CacheBuilder .newBuilder() .expireAfterAccess(1, TimeUnit.MINUTES) .build(new TableLoader(key)); }
@Bean public LoadingCache<OidcRegisteredService, Optional<RsaJsonWebKey>> oidcServiceJsonWebKeystoreCache() { final OidcProperties oidc = casProperties.getAuthn().getOidc(); final LoadingCache<OidcRegisteredService, Optional<RsaJsonWebKey>> cache = CacheBuilder.newBuilder().maximumSize(1) .expireAfterWrite(oidc.getJwksCacheInMinutes(), TimeUnit.MINUTES) .build(oidcServiceJsonWebKeystoreCacheLoader()); return cache; }
@Bean public LoadingCache<String, Optional<RsaJsonWebKey>> oidcDefaultJsonWebKeystoreCache() { final OidcProperties oidc = casProperties.getAuthn().getOidc(); final LoadingCache<String, Optional<RsaJsonWebKey>> cache = CacheBuilder.newBuilder().maximumSize(1) .expireAfterWrite(oidc.getJwksCacheInMinutes(), TimeUnit.MINUTES) .build(oidcDefaultJsonWebKeystoreCacheLoader()); return cache; }
public DefaultDelegatingAuditTrailManager(final AuditTrailManager manager) { this.manager = manager; this.storage = CacheBuilder.newBuilder() .initialCapacity(INITIAL_CACHE_SIZE) .maximumSize(MAX_CACHE_SIZE) .recordStats() .expireAfterWrite(this.expirationDuration, this.expirationTimeUnit) .build(new CacheLoader<String, AuditActionContext>() { @Override public AuditActionContext load(final String s) throws Exception { LOGGER.error("Load operation of the audit cache is not supported."); return null; } }); }
@Test public void test_001() throws ExecutionException { LoadingCache<String,Object> failedCache = CacheBuilder.newBuilder(). softValues().maximumSize(10000) .build(new CacheLoader<String, Object>() { @Override public Object load(String s) throws Exception { return new AtomicInteger(0); } }); failedCache.put("00",((AtomicInteger)failedCache.get("00")).incrementAndGet()); System.out.println(failedCache.get("00")); }