Limit number of DAV requests when fetching many objects
This commit is contained in:
parent
454e21c47e
commit
a674b5e9ad
3 changed files with 111 additions and 9 deletions
|
@ -26,13 +26,16 @@
|
|||
|
||||
class kolab_storage_dataset implements Iterator, ArrayAccess, Countable
|
||||
{
|
||||
const CHUNK_SIZE = 25;
|
||||
|
||||
private $cache; // kolab_storage_cache instance to use for fetching data
|
||||
private $memlimit = 0;
|
||||
private $buffer = false;
|
||||
private $index = array();
|
||||
private $data = array();
|
||||
private $index = [];
|
||||
private $data = [];
|
||||
private $iteratorkey = 0;
|
||||
private $error = null;
|
||||
private $chunk = [];
|
||||
|
||||
/**
|
||||
* Default constructor
|
||||
|
@ -79,7 +82,12 @@ class kolab_storage_dataset implements Iterator, ArrayAccess, Countable
|
|||
|
||||
public function offsetSet($offset, $value)
|
||||
{
|
||||
$uid = !empty($value['_msguid']) ? $value['_msguid'] : $value['uid'];
|
||||
if (is_string($value)) {
|
||||
$uid = $value;
|
||||
}
|
||||
else {
|
||||
$uid = !empty($value['_msguid']) ? $value['_msguid'] : $value['uid'];
|
||||
}
|
||||
|
||||
if (is_null($offset)) {
|
||||
$offset = count($this->index);
|
||||
|
@ -110,6 +118,32 @@ class kolab_storage_dataset implements Iterator, ArrayAccess, Countable
|
|||
|
||||
public function offsetGet($offset)
|
||||
{
|
||||
if (isset($this->chunk[$offset])) {
|
||||
return $this->chunk[$offset] ?: null;
|
||||
}
|
||||
|
||||
// The item is a string (object's UID), use multiget method to pre-fetch
|
||||
// multiple objects from the server in one request
|
||||
if (isset($this->data[$offset]) && is_string($this->data[$offset]) && method_exists($this->cache, 'multiget')) {
|
||||
$idx = $offset;
|
||||
$uids = [];
|
||||
|
||||
while (isset($this->index[$idx]) && count($uids) < self::CHUNK_SIZE) {
|
||||
$uids[$idx] = $this->index[$idx];
|
||||
$idx++;
|
||||
}
|
||||
|
||||
if (!empty($uids)) {
|
||||
$this->chunk = $this->cache->multiget($uids);
|
||||
}
|
||||
|
||||
if (isset($this->chunk[$offset])) {
|
||||
return $this->chunk[$offset] ?: null;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
if (isset($this->data[$offset])) {
|
||||
return $this->data[$offset];
|
||||
}
|
||||
|
|
|
@ -252,6 +252,8 @@ class kolab_storage_dav_cache extends kolab_storage_cache
|
|||
* @param string Object UID
|
||||
* @param string Object type to read
|
||||
* @param string Unused (kept for compat. with the parent class)
|
||||
*
|
||||
* @return null|array An array of objects, NULL if not found
|
||||
*/
|
||||
public function get($uid, $type = null, $unused = null)
|
||||
{
|
||||
|
@ -279,6 +281,18 @@ class kolab_storage_dav_cache extends kolab_storage_cache
|
|||
return $object ?: null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read multiple entries from the server directly
|
||||
*
|
||||
* @param array Object UIDs
|
||||
*
|
||||
* @return false|array An array of objects, False on error
|
||||
*/
|
||||
public function multiget($uids)
|
||||
{
|
||||
return $this->folder->read_objects($uids);
|
||||
}
|
||||
|
||||
/**
|
||||
* Insert/Update a cache entry
|
||||
*
|
||||
|
@ -427,15 +441,19 @@ class kolab_storage_dav_cache extends kolab_storage_cache
|
|||
if ($fast) {
|
||||
$sql_arr['fast-mode'] = true;
|
||||
}
|
||||
|
||||
if ($uids) {
|
||||
$result[] = $sql_arr['uid'];
|
||||
}
|
||||
else if ($fetchall && ($object = $this->_unserialize($sql_arr))) {
|
||||
$result[] = $object;
|
||||
}
|
||||
else if (!$fetchall) {
|
||||
$result[] = $sql_arr;
|
||||
}
|
||||
else if (($object = $this->_unserialize($sql_arr, true))) {
|
||||
$result[] = $object;
|
||||
}
|
||||
else {
|
||||
$result[] = $sql_arr['uid'];
|
||||
}
|
||||
}
|
||||
|
||||
return $result;
|
||||
|
@ -589,7 +607,7 @@ class kolab_storage_dav_cache extends kolab_storage_cache
|
|||
/**
|
||||
* Helper method to turn stored cache data into a valid storage object
|
||||
*/
|
||||
protected function _unserialize($sql_arr)
|
||||
protected function _unserialize($sql_arr, $noread = false)
|
||||
{
|
||||
if ($sql_arr['fast-mode'] && !empty($sql_arr['data']) && ($object = json_decode($sql_arr['data'], true))) {
|
||||
foreach ($this->data_props as $prop) {
|
||||
|
@ -613,9 +631,11 @@ class kolab_storage_dav_cache extends kolab_storage_cache
|
|||
$object['uid'] = $sql_arr['uid'];
|
||||
$object['etag'] = $sql_arr['etag'];
|
||||
}
|
||||
// Fetch a complete object from the server
|
||||
else if ($noread) {
|
||||
return null;
|
||||
}
|
||||
else {
|
||||
// TODO: Fetching objects one-by-one from DAV server is slow
|
||||
// Fetch a complete object from the server
|
||||
$object = $this->folder->read_object($sql_arr['uid'], '*');
|
||||
}
|
||||
|
||||
|
|
|
@ -459,6 +459,54 @@ class kolab_storage_dav_folder extends kolab_storage_folder
|
|||
return $this->from_dav($objects[0]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch multiple objects from the DAV server and convert to internal format
|
||||
*
|
||||
* @param array The object UIDs to fetch
|
||||
*
|
||||
* @return mixed Hash array representing the Kolab objects
|
||||
*/
|
||||
public function read_objects($uids)
|
||||
{
|
||||
if (!$this->valid) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (empty($uids)) {
|
||||
return [];
|
||||
}
|
||||
|
||||
foreach ($uids as $uid) {
|
||||
$hrefs[] = $this->object_location($uid);
|
||||
}
|
||||
|
||||
$objects = $this->dav->getData($this->href, $this->get_dav_type(), $hrefs);
|
||||
|
||||
if (!is_array($objects)) {
|
||||
rcube::raise_error([
|
||||
'code' => 900,
|
||||
'message' => "Failed to fetch {$href}"
|
||||
], true);
|
||||
return false;
|
||||
}
|
||||
|
||||
$objects = array_map([$this, 'from_dav'], $objects);
|
||||
|
||||
foreach ($uids as $idx => $uid) {
|
||||
foreach ($objects as $oidx => $object) {
|
||||
if ($object && $object['uid'] == $uid) {
|
||||
$uids[$idx] = $object;
|
||||
unset($objects[$oidx]);
|
||||
continue 2;
|
||||
}
|
||||
}
|
||||
|
||||
$uids[$idx] = false;
|
||||
}
|
||||
|
||||
return $uids;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert DAV object into PHP array
|
||||
*
|
||||
|
|
Loading…
Add table
Reference in a new issue