mirror of
https://github.com/corda/corda.git
synced 2025-06-17 14:48:16 +00:00
#592: Address more comments
This commit is contained in:
@ -148,13 +148,19 @@ fun <T : Any> T.serialize(kryo: KryoPool = p2PKryo(), internalOnly: Boolean = fa
|
||||
}
|
||||
|
||||
|
||||
private val serializeBufferPool = LazyPool { ByteArray(64 * 1024) }
|
||||
private val serializeOutputStreamPool = LazyPool(ByteArrayOutputStream::reset) { ByteArrayOutputStream(64 * 1024) }
|
||||
private val serializeBufferPool = LazyPool(
|
||||
newInstance = { ByteArray(64 * 1024) }
|
||||
)
|
||||
private val serializeOutputStreamPool = LazyPool(
|
||||
clear = ByteArrayOutputStream::reset,
|
||||
shouldReturnToPool = { it.size() < 256 * 1024 }, // Discard if it grew too large
|
||||
newInstance = { ByteArrayOutputStream(64 * 1024) }
|
||||
)
|
||||
fun <T : Any> T.serialize(kryo: Kryo, internalOnly: Boolean = false): SerializedBytes<T> {
|
||||
return serializeOutputStreamPool.run { stream ->
|
||||
serializeBufferPool.run { buffer ->
|
||||
Output(buffer).use {
|
||||
it.setOutputStream(stream)
|
||||
it.outputStream = stream
|
||||
it.writeBytes(KryoHeaderV0_1.bytes)
|
||||
kryo.writeClassAndObject(it, this)
|
||||
}
|
||||
|
@ -1,23 +1,28 @@
|
||||
package net.corda.core.utilities
|
||||
|
||||
import java.util.concurrent.ConcurrentLinkedQueue
|
||||
import java.util.concurrent.LinkedBlockingQueue
|
||||
import java.util.concurrent.atomic.AtomicBoolean
|
||||
import java.util.concurrent.Semaphore
|
||||
|
||||
/**
|
||||
* A lazy pool of resources [A].
|
||||
*
|
||||
* @param clear If specified this function will be run on each borrowed instance before handing it over.
|
||||
* @param shouldReturnToPool If specified this function will be run on each release to determine whether the instance
|
||||
* should be returned to the pool for reuse. This may be useful for pooled resources that dynamically grow during
|
||||
* usage, and we may not want to retain them forever.
|
||||
* @param bound If specified the pool will be bounded. Once all instances are borrowed subsequent borrows will block until an
|
||||
* instance is released.
|
||||
* @param create The function to call to lazily create a pooled resource.
|
||||
* @param newInstance The function to call to lazily newInstance a pooled resource.
|
||||
*/
|
||||
class LazyPool<A>(
|
||||
private val clear: ((A) -> Unit)? = null,
|
||||
private val shouldReturnToPool: ((A) -> Boolean)? = null,
|
||||
private val bound: Int? = null,
|
||||
private val create: () -> A
|
||||
private val newInstance: () -> A
|
||||
) {
|
||||
private val poolQueue = LinkedBlockingQueue<A>()
|
||||
private var poolSize = 0
|
||||
private val poolQueue = ConcurrentLinkedQueue<A>()
|
||||
private val poolSemaphore = Semaphore(bound ?: Int.MAX_VALUE)
|
||||
|
||||
private enum class State {
|
||||
STARTED,
|
||||
@ -32,23 +37,10 @@ class LazyPool<A>(
|
||||
|
||||
fun borrow(): A {
|
||||
lifeCycle.requireState(State.STARTED)
|
||||
poolSemaphore.acquire()
|
||||
val pooled = poolQueue.poll()
|
||||
if (pooled == null) {
|
||||
if (bound != null) {
|
||||
val waitForRelease = synchronized(this) {
|
||||
if (poolSize < bound) {
|
||||
poolSize++
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
if (waitForRelease) {
|
||||
// Wait until one is released
|
||||
return clearIfNeeded(poolQueue.take())
|
||||
}
|
||||
}
|
||||
return create()
|
||||
return newInstance()
|
||||
} else {
|
||||
return clearIfNeeded(pooled)
|
||||
}
|
||||
@ -56,7 +48,10 @@ class LazyPool<A>(
|
||||
|
||||
fun release(instance: A) {
|
||||
lifeCycle.requireState(State.STARTED)
|
||||
poolQueue.add(instance)
|
||||
if (shouldReturnToPool == null || shouldReturnToPool.invoke(instance)) {
|
||||
poolQueue.add(instance)
|
||||
}
|
||||
poolSemaphore.release()
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -6,10 +6,17 @@ import java.util.concurrent.LinkedBlockingQueue
|
||||
/**
|
||||
* A [LazyStickyPool] is a lazy pool of resources where a [borrow] may "stick" the borrowed instance to an object.
|
||||
* Any subsequent borrows using the same object will return the same pooled instance.
|
||||
*
|
||||
* @param size The size of the pool.
|
||||
* @param shouldReturnToPool If specified this function will be run on each release to determine whether the instance
|
||||
* should be returned to the pool for reuse. This may be useful for pooled resources that dynamically grow during
|
||||
* usage, and we may not want to retain them forever.
|
||||
* @param newInstance The function to call to create a pooled resource.
|
||||
*/
|
||||
// TODO This could be implemented more efficiently. Currently the "non-sticky" use case is not optimised, it just chooses a random instance to wait on.
|
||||
class LazyStickyPool<A : Any>(
|
||||
size: Int,
|
||||
private val shouldReturnToPool: ((A) -> Boolean)? = null,
|
||||
private val newInstance: () -> A
|
||||
) {
|
||||
private class InstanceBox<A> {
|
||||
@ -45,7 +52,12 @@ class LazyStickyPool<A : Any>(
|
||||
|
||||
fun release(stickTo: Any, instance: A) {
|
||||
val box = boxes[toIndex(stickTo)]
|
||||
box.instance!!.add(instance)
|
||||
if (shouldReturnToPool == null || shouldReturnToPool.invoke(instance)) {
|
||||
box.instance!!.add(instance)
|
||||
} else {
|
||||
// We need to create a new instance instead of setting the queue to null to unblock potentially waiting threads.
|
||||
box.instance!!.add(newInstance())
|
||||
}
|
||||
}
|
||||
|
||||
inline fun <R> run(stickToOrNull: Any? = null, withInstance: (A) -> R): R {
|
||||
|
29
core/src/main/kotlin/net/corda/core/utilities/Rate.kt
Normal file
29
core/src/main/kotlin/net/corda/core/utilities/Rate.kt
Normal file
@ -0,0 +1,29 @@
|
||||
package net.corda.core.utilities
|
||||
|
||||
import java.time.Duration
|
||||
import java.time.temporal.ChronoUnit
|
||||
import java.util.concurrent.TimeUnit
|
||||
|
||||
/**
|
||||
* [Rate] holds a quantity denoting the frequency of some event e.g. 100 times per second or 2 times per day.
|
||||
*/
|
||||
data class Rate(
|
||||
val numberOfEvents: Long,
|
||||
val perTimeUnit: TimeUnit
|
||||
) {
|
||||
/**
|
||||
* Returns the interval between two subsequent events.
|
||||
*/
|
||||
fun toInterval(): Duration {
|
||||
return Duration.of(TimeUnit.NANOSECONDS.convert(1, perTimeUnit) / numberOfEvents, ChronoUnit.NANOS)
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts the number of events to the given unit.
|
||||
*/
|
||||
operator fun times(inUnit: TimeUnit): Long {
|
||||
return inUnit.convert(numberOfEvents, perTimeUnit)
|
||||
}
|
||||
}
|
||||
|
||||
operator fun Long.div(timeUnit: TimeUnit) = Rate(this, timeUnit)
|
Reference in New Issue
Block a user