diff --git a/src/main/java/com/cedarsoftware/util/LRUCache.java b/src/main/java/com/cedarsoftware/util/LRUCache.java
index 002c0a404..120f72586 100644
--- a/src/main/java/com/cedarsoftware/util/LRUCache.java
+++ b/src/main/java/com/cedarsoftware/util/LRUCache.java
@@ -9,6 +9,51 @@
import com.cedarsoftware.util.cache.LockingLRUCacheStrategy;
import com.cedarsoftware.util.cache.ThreadedLRUCacheStrategy;
+/**
+ * This class provides a thread-safe Least Recently Used (LRU) cache API that will evict the least recently used items,
+ * once a threshold is met. It implements the Map
interface for convenience.
+ *
+ * This class provides two implementation strategies: a locking approach and a threaded approach. + *
+ * The Locking strategy allows for O(1) access for get(), put(), and remove(). For put(), remove(), and many other + * methods, a write-lock is obtained. For get(), it attempts to lock but does not lock unless it can obtain it right away. + * This 'try-lock' approach ensures that the get() API is never blocking, but it also means that the LRU order is not + * perfectly maintained under heavy load. + *
+ * The Threaded strategy allows for O(1) access for get(), put(), and remove() without blocking. It uses a ConcurrentHashMap
+ * internally. To ensure that the capacity is honored, whenever put() is called, a thread (from a thread pool) is tasked
+ * with cleaning up items above the capacity threshold. This means that the cache may temporarily exceed its capacity, but
+ * it will soon be trimmed back to the capacity limit by the scheduled thread.
+ *
+ * LRUCache supports null
for both key or value.
+ *
+ * @see LockingLRUCacheStrategy + * @see ThreadedLRUCacheStrategy + * @see LRUCache.StrategyType + *
+ * @author John DeRegnaucourt (jdereg@gmail.com)
+ *
- * LRUCache supports null for key or value.
+ * The Locking strategy allows for O(1) access for get(), put(), and remove(). For put(), remove(), and many other
+ * methods, a write-lock is obtained. For get(), it attempts to lock but does not lock unless it can obtain it right away.
+ * This 'try-lock' approach ensures that the get() API is never blocking, but it also means that the LRU order is not
+ * perfectly maintained under heavy load.
+ *
+ * LRUCache supports
+ * Special Thanks: This implementation was inspired by insights and suggestions from Ben Manes.
*
* @author John DeRegnaucourt (jdereg@gmail.com)
*
- * LRUCache is thread-safe via usage of ConcurrentHashMap for internal storage. The .get(), .remove(), and .put() APIs
- * operate in O(1) without blocking. When .put() is called, a background cleanup task is scheduled to ensure
- * {@code cache.size <= capacity}. This maintains cache size to capacity, even during bursty loads. It is not immediate;
- * the LRUCache can exceed the capacity during a rapid load; however, it will quickly reduce to max capacity.
+ * The Threaded strategy allows for O(1) access for get(), put(), and remove() without blocking. It uses a
- * LRUCache supports null for key or value.
+ * LRUCache supports
* @author John DeRegnaucourt (jdereg@gmail.com)
*
+ * Copyright (c) Cedar Software LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * License
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
public class LRUCacheMap
interface for convenience.
* null
for both key or value.
+ *
@@ -89,6 +96,8 @@ public V get(Object key) {
if (node == null) {
return null;
}
+
+ // Ben Manes suggestion - use exclusive 'try-lock'
if (lock.tryLock()) {
try {
moveToHead(node);
diff --git a/src/main/java/com/cedarsoftware/util/cache/ThreadedLRUCacheStrategy.java b/src/main/java/com/cedarsoftware/util/cache/ThreadedLRUCacheStrategy.java
index ba87fd459..944944e58 100644
--- a/src/main/java/com/cedarsoftware/util/cache/ThreadedLRUCacheStrategy.java
+++ b/src/main/java/com/cedarsoftware/util/cache/ThreadedLRUCacheStrategy.java
@@ -16,16 +16,18 @@
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
+import com.cedarsoftware.util.LRUCache;
+
/**
* This class provides a thread-safe Least Recently Used (LRU) cache API that will evict the least recently used items,
- * once a threshold is met. It implements the Map interface for convenience.
+ * once a threshold is met. It implements the Map
interface for convenience.
* ConcurrentHashMap
+ * internally. To ensure that the capacity is honored, whenever put() is called, a thread (from a thread pool) is tasked
+ * with cleaning up items above the capacity threshold. This means that the cache may temporarily exceed its capacity, but
+ * it will soon be trimmed back to the capacity limit by the scheduled thread.
* null
for both key or value.
*
@@ -73,7 +75,7 @@ void updateTimestamp() {
* Create a LRUCache with the maximum capacity of 'capacity.' Note, the LRUCache could temporarily exceed the
* capacity; however, it will quickly reduce to that amount. This time is configurable via the cleanupDelay
* parameter and custom scheduler and executor services.
- *
+ *
* @param capacity int maximum size for the LRU cache.
* @param cleanupDelayMillis int milliseconds before scheduling a cleanup (reduction to capacity if the cache currently
* exceeds it).