# File lib/em/buftok.rb, line 59
 59:   def extract(data)
 60:     # Extract token-delimited entities from the input string with the split command.
 61:     # There's a bit of craftiness here with the -1 parameter.  Normally split would
 62:     # behave no differently regardless of if the token lies at the very end of the 
 63:     # input buffer or not (i.e. a literal edge case)  Specifying -1 forces split to
 64:     # return "" in this case, meaning that the last entry in the list represents a
 65:     # new segment of data where the token has not been encountered
 66:     entities = data.split @delimiter, -1
 67: 
 68:     # Check to see if the buffer has exceeded capacity, if we're imposing a limit
 69:     if @size_limit
 70:       raise 'input buffer full' if @input_size + entities.first.size > @size_limit
 71:       @input_size += entities.first.size
 72:     end
 73:     
 74:     # Move the first entry in the resulting array into the input buffer.  It represents
 75:     # the last segment of a token-delimited entity unless it's the only entry in the list.
 76:     @input << entities.shift
 77: 
 78:     # If the resulting array from the split is empty, the token was not encountered
 79:     # (not even at the end of the buffer).  Since we've encountered no token-delimited
 80:     # entities this go-around, return an empty array.
 81:     return [] if entities.empty?
 82: 
 83:     # At this point, we've hit a token, or potentially multiple tokens.  Now we can bring
 84:     # together all the data we've buffered from earlier calls without hitting a token,
 85:     # and add it to our list of discovered entities.
 86:     entities.unshift @input.join
 87: 
 88: ??
 89: 
 90:     # Now that we've hit a token, joined the input buffer and added it to the entities
 91:     # list, we can go ahead and clear the input buffer.  All of the segments that were
 92:     # stored before the join can now be garbage collected.
 93:     @input.clear
 94:     
 95:     # The last entity in the list is not token delimited, however, thanks to the -1
 96:     # passed to split.  It represents the beginning of a new list of as-yet-untokenized  
 97:     # data, so we add it to the start of the list.
 98:     @input << entities.pop
 99:     
100:     # Set the new input buffer size, provided we're keeping track
101:     @input_size = @input.first.size if @size_limit
102: 
103:     # Now we're left with the list of extracted token-delimited entities we wanted
104:     # in the first place.  Hooray!
105:     entities
106:   end