module ThreadSafe

Constants

Array

Because MRI never runs code in parallel, the existing non-thread-safe structures should usually work fine.

ConcurrentCacheBackend
Hash
NULL

Various classes within allows for nil values to be stored, so a special NULL token is required to indicate the “nil-ness”.

VERSION

Public Class Methods

allocate() click to toggle source
Calls superclass method
# File lib/thread_safe.rb, line 49
def self.allocate
  obj = super
  obj.send(:_mon_initialize)
  obj
end

Public Instance Methods

_mon_initialize() click to toggle source
# File lib/thread_safe.rb, line 45
def _mon_initialize
  @_monitor = Monitor.new unless @_monitor # avoid double initialisation
end
decrement_size(by = 1) click to toggle source
# File lib/thread_safe/atomic_reference_cache_backend.rb, line 904
def decrement_size(by = 1)
  @counter.add(-by)
end
increment_size() click to toggle source
# File lib/thread_safe/atomic_reference_cache_backend.rb, line 900
def increment_size
  @counter.increment
end
lock_and_clean_up_reverse_forwarders(old_table, old_table_size, new_table, i, forwarder) click to toggle source
# File lib/thread_safe/atomic_reference_cache_backend.rb, line 847
def lock_and_clean_up_reverse_forwarders(old_table, old_table_size, new_table, i, forwarder)
  # transiently use a locked forwarding node
  locked_forwarder = Node.new(moved_locked_hash = MOVED | LOCKED, new_table, NULL)
  if old_table.cas(i, nil, locked_forwarder)
    new_table.volatile_set(i, nil) # kill the potential reverse forwarders
    new_table.volatile_set(i + old_table_size, nil) # kill the potential reverse forwarders
    old_table.volatile_set(i, forwarder)
    locked_forwarder.unlock_via_hash(moved_locked_hash, MOVED)
    true
  end
end
rebuild(table) click to toggle source

Moves and/or copies the nodes in each bin to new table. See above for explanation.

# File lib/thread_safe/atomic_reference_cache_backend.rb, line 801
def rebuild(table)
  old_table_size = table.size
  new_table      = table.next_in_size_table
  # puts "#{old_table_size} -> #{new_table.size}"
  forwarder      = Node.new(MOVED, new_table, NULL)
  rev_forwarder  = nil
  locked_indexes = nil # holds bins to revisit; nil until needed
  locked_arr_idx = 0
  bin            = old_table_size - 1
  i              = bin
  while true
    if !(node = table.volatile_get(i))
      # no lock needed (or available) if bin >= 0, because we're not popping values from locked_indexes until we've run through the whole table
      redo unless (bin >= 0 ? table.cas(i, nil, forwarder) : lock_and_clean_up_reverse_forwarders(table, old_table_size, new_table, i, forwarder))
    elsif Node.locked_hash?(node_hash = node.hash)
      locked_indexes ||= Array.new
      if bin < 0 && locked_arr_idx > 0
        locked_arr_idx -= 1
        i, locked_indexes[locked_arr_idx] = locked_indexes[locked_arr_idx], i # swap with another bin
        redo
      end
      if bin < 0 || locked_indexes.size >= TRANSFER_BUFFER_SIZE
        node.try_await_lock(table, i) # no other options -- block
        redo
      end
      rev_forwarder ||= Node.new(MOVED, table, NULL)
      redo unless table.volatile_get(i) == node && node.locked? # recheck before adding to list
      locked_indexes << i
      new_table.volatile_set(i, rev_forwarder)
      new_table.volatile_set(i + old_table_size, rev_forwarder)
    else
      redo unless split_old_bin(table, new_table, i, node, node_hash, forwarder)
    end

    if bin > 0
      i = (bin -= 1)
    elsif locked_indexes && !locked_indexes.empty?
      bin = -1
      i = locked_indexes.pop
      locked_arr_idx = locked_indexes.size - 1
    else
      return new_table
    end
  end
end
split_bin(new_table, i, node, node_hash) click to toggle source
# File lib/thread_safe/atomic_reference_cache_backend.rb, line 867
def split_bin(new_table, i, node, node_hash)
  bit          = new_table.size >> 1 # bit to split on
  run_bit      = node_hash & bit
  last_run     = nil
  low          = nil
  high         = nil
  current_node = node
  # this optimises for the lowest amount of volatile writes and objects created
  while current_node = current_node.next
    unless (b = current_node.hash & bit) == run_bit
      run_bit  = b
      last_run = current_node
    end
  end
  if run_bit == 0
    low = last_run
  else
    high = last_run
  end
  current_node = node
  until current_node == last_run
    pure_hash = current_node.pure_hash
    if (pure_hash & bit) == 0
      low = Node.new(pure_hash, current_node.key, current_node.value, low)
    else
      high = Node.new(pure_hash, current_node.key, current_node.value, high)
    end
    current_node = current_node.next
  end
  new_table.volatile_set(i, low)
  new_table.volatile_set(i + bit, high)
end
split_old_bin(table, new_table, i, node, node_hash, forwarder) click to toggle source

Splits a normal bin with list headed by e into lo and hi parts; installs in given table.

# File lib/thread_safe/atomic_reference_cache_backend.rb, line 860
def split_old_bin(table, new_table, i, node, node_hash, forwarder)
  table.try_lock_via_hash(i, node, node_hash) do
    split_bin(new_table, i, node, node_hash)
    table.volatile_set(i, forwarder)
  end
end
try_in_resize_lock(current_table, size_ctrl) { || ... } click to toggle source
# File lib/thread_safe/atomic_reference_cache_backend.rb, line 788
def try_in_resize_lock(current_table, size_ctrl)
  if cas_size_control(size_ctrl, NOW_RESIZING)
    begin
      if current_table == table # recheck under lock
        size_ctrl = yield # get new size_control
      end
    ensure
      self.size_control = size_ctrl
    end
  end
end