1 /*
2 * Copyright (C) 2005 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Vector"
18
19 #include <utils/VectorImpl.h>
20
21 #include <stdio.h>
22 #include <stdlib.h>
23 #include <string.h>
24
25 #include <log/log.h>
26
27 #include "SharedBuffer.h"
28
29 /*****************************************************************************/
30
31
32 namespace android {
33
34 // ----------------------------------------------------------------------------
35
36 const size_t kMinVectorCapacity = 4;
37
max(size_t a,size_t b)38 static inline size_t max(size_t a, size_t b) {
39 return a>b ? a : b;
40 }
41
42 // ----------------------------------------------------------------------------
43
VectorImpl(size_t itemSize,uint32_t flags)44 VectorImpl::VectorImpl(size_t itemSize, uint32_t flags)
45 : mStorage(nullptr), mCount(0), mFlags(flags), mItemSize(itemSize)
46 {
47 }
48
VectorImpl(const VectorImpl & rhs)49 VectorImpl::VectorImpl(const VectorImpl& rhs)
50 : mStorage(rhs.mStorage), mCount(rhs.mCount),
51 mFlags(rhs.mFlags), mItemSize(rhs.mItemSize)
52 {
53 if (mStorage) {
54 SharedBuffer::bufferFromData(mStorage)->acquire();
55 }
56 }
57
~VectorImpl()58 VectorImpl::~VectorImpl()
59 {
60 ALOGW_IF(mCount,
61 "[%p] subclasses of VectorImpl must call finish_vector()"
62 " in their destructor. Leaking %d bytes.",
63 this, (int)(mCount*mItemSize));
64 // We can't call _do_destroy() here because the vtable is already gone.
65 }
66
operator =(const VectorImpl & rhs)67 VectorImpl& VectorImpl::operator = (const VectorImpl& rhs)
68 {
69 LOG_ALWAYS_FATAL_IF(mItemSize != rhs.mItemSize,
70 "Vector<> have different types (this=%p, rhs=%p)", this, &rhs);
71 if (this != &rhs) {
72 release_storage();
73 if (rhs.mCount) {
74 mStorage = rhs.mStorage;
75 mCount = rhs.mCount;
76 SharedBuffer::bufferFromData(mStorage)->acquire();
77 } else {
78 mStorage = nullptr;
79 mCount = 0;
80 }
81 }
82 return *this;
83 }
84
editArrayImpl()85 void* VectorImpl::editArrayImpl()
86 {
87 if (mStorage) {
88 const SharedBuffer* sb = SharedBuffer::bufferFromData(mStorage);
89 SharedBuffer* editable = sb->attemptEdit();
90 if (editable == nullptr) {
91 // If we're here, we're not the only owner of the buffer.
92 // We must make a copy of it.
93 editable = SharedBuffer::alloc(sb->size());
94 // Fail instead of returning a pointer to storage that's not
95 // editable. Otherwise we'd be editing the contents of a buffer
96 // for which we're not the only owner, which is undefined behaviour.
97 LOG_ALWAYS_FATAL_IF(editable == nullptr);
98 _do_copy(editable->data(), mStorage, mCount);
99 release_storage();
100 mStorage = editable->data();
101 }
102 }
103 return mStorage;
104 }
105
capacity() const106 size_t VectorImpl::capacity() const
107 {
108 if (mStorage) {
109 return SharedBuffer::bufferFromData(mStorage)->size() / mItemSize;
110 }
111 return 0;
112 }
113
insertVectorAt(const VectorImpl & vector,size_t index)114 ssize_t VectorImpl::insertVectorAt(const VectorImpl& vector, size_t index)
115 {
116 return insertArrayAt(vector.arrayImpl(), index, vector.size());
117 }
118
appendVector(const VectorImpl & vector)119 ssize_t VectorImpl::appendVector(const VectorImpl& vector)
120 {
121 return insertVectorAt(vector, size());
122 }
123
insertArrayAt(const void * array,size_t index,size_t length)124 ssize_t VectorImpl::insertArrayAt(const void* array, size_t index, size_t length)
125 {
126 if (index > size())
127 return BAD_INDEX;
128 void* where = _grow(index, length);
129 if (where) {
130 _do_copy(where, array, length);
131 }
132 return where ? index : (ssize_t)NO_MEMORY;
133 }
134
appendArray(const void * array,size_t length)135 ssize_t VectorImpl::appendArray(const void* array, size_t length)
136 {
137 return insertArrayAt(array, size(), length);
138 }
139
insertAt(size_t index,size_t numItems)140 ssize_t VectorImpl::insertAt(size_t index, size_t numItems)
141 {
142 return insertAt(nullptr, index, numItems);
143 }
144
insertAt(const void * item,size_t index,size_t numItems)145 ssize_t VectorImpl::insertAt(const void* item, size_t index, size_t numItems)
146 {
147 if (index > size())
148 return BAD_INDEX;
149 void* where = _grow(index, numItems);
150 if (where) {
151 if (item) {
152 _do_splat(where, item, numItems);
153 } else {
154 _do_construct(where, numItems);
155 }
156 }
157 return where ? index : (ssize_t)NO_MEMORY;
158 }
159
sortProxy(const void * lhs,const void * rhs,void * func)160 static int sortProxy(const void* lhs, const void* rhs, void* func)
161 {
162 return (*(VectorImpl::compar_t)func)(lhs, rhs);
163 }
164
sort(VectorImpl::compar_t cmp)165 status_t VectorImpl::sort(VectorImpl::compar_t cmp)
166 {
167 return sort(sortProxy, (void*)cmp);
168 }
169
sort(VectorImpl::compar_r_t cmp,void * state)170 status_t VectorImpl::sort(VectorImpl::compar_r_t cmp, void* state)
171 {
172 // the sort must be stable. we're using insertion sort which
173 // is well suited for small and already sorted arrays
174 // for big arrays, it could be better to use mergesort
175 const ssize_t count = size();
176 if (count > 1) {
177 void* array = const_cast<void*>(arrayImpl());
178 void* temp = nullptr;
179 ssize_t i = 1;
180 while (i < count) {
181 void* item = reinterpret_cast<char*>(array) + mItemSize*(i);
182 void* curr = reinterpret_cast<char*>(array) + mItemSize*(i-1);
183 if (cmp(curr, item, state) > 0) {
184
185 if (!temp) {
186 // we're going to have to modify the array...
187 array = editArrayImpl();
188 if (!array) return NO_MEMORY;
189 temp = malloc(mItemSize);
190 if (!temp) return NO_MEMORY;
191 item = reinterpret_cast<char*>(array) + mItemSize*(i);
192 curr = reinterpret_cast<char*>(array) + mItemSize*(i-1);
193 } else {
194 _do_destroy(temp, 1);
195 }
196
197 _do_copy(temp, item, 1);
198
199 ssize_t j = i-1;
200 void* next = reinterpret_cast<char*>(array) + mItemSize*(i);
201 do {
202 _do_destroy(next, 1);
203 _do_copy(next, curr, 1);
204 next = curr;
205 --j;
206 curr = nullptr;
207 if (j >= 0) {
208 curr = reinterpret_cast<char*>(array) + mItemSize*(j);
209 }
210 } while (j>=0 && (cmp(curr, temp, state) > 0));
211
212 _do_destroy(next, 1);
213 _do_copy(next, temp, 1);
214 }
215 i++;
216 }
217
218 if (temp) {
219 _do_destroy(temp, 1);
220 free(temp);
221 }
222 }
223 return OK;
224 }
225
pop()226 void VectorImpl::pop()
227 {
228 if (size())
229 removeItemsAt(size()-1, 1);
230 }
231
push()232 void VectorImpl::push()
233 {
234 push(nullptr);
235 }
236
push(const void * item)237 void VectorImpl::push(const void* item)
238 {
239 insertAt(item, size());
240 }
241
add()242 ssize_t VectorImpl::add()
243 {
244 return add(nullptr);
245 }
246
add(const void * item)247 ssize_t VectorImpl::add(const void* item)
248 {
249 return insertAt(item, size());
250 }
251
replaceAt(size_t index)252 ssize_t VectorImpl::replaceAt(size_t index)
253 {
254 return replaceAt(nullptr, index);
255 }
256
replaceAt(const void * prototype,size_t index)257 ssize_t VectorImpl::replaceAt(const void* prototype, size_t index)
258 {
259 ALOG_ASSERT(index<size(),
260 "[%p] replace: index=%d, size=%d", this, (int)index, (int)size());
261
262 if (index >= size()) {
263 return BAD_INDEX;
264 }
265
266 void* item = editItemLocation(index);
267 if (item != prototype) {
268 if (item == nullptr)
269 return NO_MEMORY;
270 _do_destroy(item, 1);
271 if (prototype == nullptr) {
272 _do_construct(item, 1);
273 } else {
274 _do_copy(item, prototype, 1);
275 }
276 }
277 return ssize_t(index);
278 }
279
removeItemsAt(size_t index,size_t count)280 ssize_t VectorImpl::removeItemsAt(size_t index, size_t count)
281 {
282 size_t end;
283 LOG_ALWAYS_FATAL_IF(__builtin_add_overflow(index, count, &end), "overflow: index=%zu count=%zu",
284 index, count);
285 if (end > size()) return BAD_VALUE;
286 _shrink(index, count);
287 return index;
288 }
289
finish_vector()290 void VectorImpl::finish_vector()
291 {
292 release_storage();
293 mStorage = nullptr;
294 mCount = 0;
295 }
296
clear()297 void VectorImpl::clear()
298 {
299 _shrink(0, mCount);
300 }
301
editItemLocation(size_t index)302 void* VectorImpl::editItemLocation(size_t index)
303 {
304 ALOG_ASSERT(index<capacity(),
305 "[%p] editItemLocation: index=%d, capacity=%d, count=%d",
306 this, (int)index, (int)capacity(), (int)mCount);
307
308 if (index < capacity()) {
309 void* buffer = editArrayImpl();
310 if (buffer) {
311 return reinterpret_cast<char*>(buffer) + index*mItemSize;
312 }
313 }
314 return nullptr;
315 }
316
itemLocation(size_t index) const317 const void* VectorImpl::itemLocation(size_t index) const
318 {
319 ALOG_ASSERT(index<capacity(),
320 "[%p] itemLocation: index=%d, capacity=%d, count=%d",
321 this, (int)index, (int)capacity(), (int)mCount);
322
323 if (index < capacity()) {
324 const void* buffer = arrayImpl();
325 if (buffer) {
326 return reinterpret_cast<const char*>(buffer) + index*mItemSize;
327 }
328 }
329 return nullptr;
330 }
331
setCapacity(size_t new_capacity)332 ssize_t VectorImpl::setCapacity(size_t new_capacity)
333 {
334 // The capacity must always be greater than or equal to the size
335 // of this vector.
336 if (new_capacity <= size()) {
337 return capacity();
338 }
339
340 size_t new_allocation_size = 0;
341 LOG_ALWAYS_FATAL_IF(__builtin_mul_overflow(new_capacity, mItemSize, &new_allocation_size));
342 SharedBuffer* sb = SharedBuffer::alloc(new_allocation_size);
343 if (sb) {
344 void* array = sb->data();
345 _do_copy(array, mStorage, size());
346 release_storage();
347 mStorage = const_cast<void*>(array);
348 } else {
349 return NO_MEMORY;
350 }
351 return new_capacity;
352 }
353
resize(size_t size)354 ssize_t VectorImpl::resize(size_t size) {
355 ssize_t result = OK;
356 if (size > mCount) {
357 result = insertAt(mCount, size - mCount);
358 } else if (size < mCount) {
359 result = removeItemsAt(size, mCount - size);
360 }
361 return result < 0 ? result : size;
362 }
363
release_storage()364 void VectorImpl::release_storage()
365 {
366 if (mStorage) {
367 const SharedBuffer* sb = SharedBuffer::bufferFromData(mStorage);
368 if (sb->release(SharedBuffer::eKeepStorage) == 1) {
369 _do_destroy(mStorage, mCount);
370 SharedBuffer::dealloc(sb);
371 }
372 }
373 }
374
_grow(size_t where,size_t amount)375 void* VectorImpl::_grow(size_t where, size_t amount)
376 {
377 // ALOGV("_grow(this=%p, where=%d, amount=%d) count=%d, capacity=%d",
378 // this, (int)where, (int)amount, (int)mCount, (int)capacity());
379
380 ALOG_ASSERT(where <= mCount,
381 "[%p] _grow: where=%d, amount=%d, count=%d",
382 this, (int)where, (int)amount, (int)mCount); // caller already checked
383
384 size_t new_size;
385 LOG_ALWAYS_FATAL_IF(__builtin_add_overflow(mCount, amount, &new_size), "new_size overflow");
386
387 if (capacity() < new_size) {
388 // NOTE: This implementation used to resize vectors as per ((3*x + 1) / 2)
389 // (sigh..). Also note, the " + 1" was necessary to handle the special case
390 // where x == 1, where the resized_capacity will be equal to the old
391 // capacity without the +1. The old calculation wouldn't work properly
392 // if x was zero.
393 //
394 // This approximates the old calculation, using (x + (x/2) + 1) instead.
395 size_t new_capacity = 0;
396 LOG_ALWAYS_FATAL_IF(__builtin_add_overflow(new_size, (new_size / 2), &new_capacity),
397 "new_capacity overflow");
398 LOG_ALWAYS_FATAL_IF(
399 __builtin_add_overflow(new_capacity, static_cast<size_t>(1u), &new_capacity),
400 "new_capacity overflow");
401 new_capacity = max(kMinVectorCapacity, new_capacity);
402
403 size_t new_alloc_size = 0;
404 LOG_ALWAYS_FATAL_IF(__builtin_mul_overflow(new_capacity, mItemSize, &new_alloc_size),
405 "new_alloc_size overflow");
406
407 // ALOGV("grow vector %p, new_capacity=%d", this, (int)new_capacity);
408 if ((mStorage) &&
409 (mCount==where) &&
410 (mFlags & HAS_TRIVIAL_COPY) &&
411 (mFlags & HAS_TRIVIAL_DTOR))
412 {
413 const SharedBuffer* cur_sb = SharedBuffer::bufferFromData(mStorage);
414 SharedBuffer* sb = cur_sb->editResize(new_alloc_size);
415 if (sb) {
416 mStorage = sb->data();
417 } else {
418 return nullptr;
419 }
420 } else {
421 SharedBuffer* sb = SharedBuffer::alloc(new_alloc_size);
422 if (sb) {
423 void* array = sb->data();
424 if (where != 0) {
425 _do_copy(array, mStorage, where);
426 }
427 if (where != mCount) {
428 const void* from = reinterpret_cast<const uint8_t *>(mStorage) + where*mItemSize;
429 void* dest = reinterpret_cast<uint8_t *>(array) + (where+amount)*mItemSize;
430 _do_copy(dest, from, mCount-where);
431 }
432 release_storage();
433 mStorage = const_cast<void*>(array);
434 } else {
435 return nullptr;
436 }
437 }
438 } else {
439 void* array = editArrayImpl();
440 if (where != mCount) {
441 const void* from = reinterpret_cast<const uint8_t *>(array) + where*mItemSize;
442 void* to = reinterpret_cast<uint8_t *>(array) + (where+amount)*mItemSize;
443 _do_move_forward(to, from, mCount - where);
444 }
445 }
446 mCount = new_size;
447 void* free_space = const_cast<void*>(itemLocation(where));
448 return free_space;
449 }
450
_shrink(size_t where,size_t amount)451 void VectorImpl::_shrink(size_t where, size_t amount)
452 {
453 if (!mStorage)
454 return;
455
456 // ALOGV("_shrink(this=%p, where=%d, amount=%d) count=%d, capacity=%d",
457 // this, (int)where, (int)amount, (int)mCount, (int)capacity());
458
459 ALOG_ASSERT(where + amount <= mCount,
460 "[%p] _shrink: where=%d, amount=%d, count=%d",
461 this, (int)where, (int)amount, (int)mCount); // caller already checked
462
463 size_t new_size;
464 LOG_ALWAYS_FATAL_IF(__builtin_sub_overflow(mCount, amount, &new_size));
465
466 const size_t prev_capacity = capacity();
467 if (new_size < (prev_capacity / 2) && prev_capacity > kMinVectorCapacity) {
468 // NOTE: (new_size * 2) is safe because capacity didn't overflow and
469 // new_size < (capacity / 2)).
470 const size_t new_capacity = max(kMinVectorCapacity, new_size * 2);
471
472 // NOTE: (new_capacity * mItemSize), (where * mItemSize) and
473 // ((where + amount) * mItemSize) beyond this point are safe because
474 // we are always reducing the capacity of the underlying SharedBuffer.
475 // In other words, (old_capacity * mItemSize) did not overflow, and
476 // where < (where + amount) < new_capacity < old_capacity.
477 if ((where == new_size) &&
478 (mFlags & HAS_TRIVIAL_COPY) &&
479 (mFlags & HAS_TRIVIAL_DTOR))
480 {
481 const SharedBuffer* cur_sb = SharedBuffer::bufferFromData(mStorage);
482 SharedBuffer* sb = cur_sb->editResize(new_capacity * mItemSize);
483 if (sb) {
484 mStorage = sb->data();
485 } else {
486 return;
487 }
488 } else {
489 SharedBuffer* sb = SharedBuffer::alloc(new_capacity * mItemSize);
490 if (sb) {
491 void* array = sb->data();
492 if (where != 0) {
493 _do_copy(array, mStorage, where);
494 }
495 if (where != new_size) {
496 const void* from = reinterpret_cast<const uint8_t *>(mStorage) + (where+amount)*mItemSize;
497 void* dest = reinterpret_cast<uint8_t *>(array) + where*mItemSize;
498 _do_copy(dest, from, new_size - where);
499 }
500 release_storage();
501 mStorage = const_cast<void*>(array);
502 } else{
503 return;
504 }
505 }
506 } else {
507 void* array = editArrayImpl();
508 void* to = reinterpret_cast<uint8_t *>(array) + where*mItemSize;
509 _do_destroy(to, amount);
510 if (where != new_size) {
511 const void* from = reinterpret_cast<uint8_t *>(array) + (where+amount)*mItemSize;
512 _do_move_backward(to, from, new_size - where);
513 }
514 }
515 mCount = new_size;
516 }
517
itemSize() const518 size_t VectorImpl::itemSize() const {
519 return mItemSize;
520 }
521
_do_construct(void * storage,size_t num) const522 void VectorImpl::_do_construct(void* storage, size_t num) const
523 {
524 if (!(mFlags & HAS_TRIVIAL_CTOR)) {
525 do_construct(storage, num);
526 }
527 }
528
_do_destroy(void * storage,size_t num) const529 void VectorImpl::_do_destroy(void* storage, size_t num) const
530 {
531 if (!(mFlags & HAS_TRIVIAL_DTOR)) {
532 do_destroy(storage, num);
533 }
534 }
535
_do_copy(void * dest,const void * from,size_t num) const536 void VectorImpl::_do_copy(void* dest, const void* from, size_t num) const
537 {
538 if (!(mFlags & HAS_TRIVIAL_COPY)) {
539 do_copy(dest, from, num);
540 } else {
541 memcpy(dest, from, num*itemSize());
542 }
543 }
544
_do_splat(void * dest,const void * item,size_t num) const545 void VectorImpl::_do_splat(void* dest, const void* item, size_t num) const {
546 do_splat(dest, item, num);
547 }
548
_do_move_forward(void * dest,const void * from,size_t num) const549 void VectorImpl::_do_move_forward(void* dest, const void* from, size_t num) const {
550 do_move_forward(dest, from, num);
551 }
552
_do_move_backward(void * dest,const void * from,size_t num) const553 void VectorImpl::_do_move_backward(void* dest, const void* from, size_t num) const {
554 do_move_backward(dest, from, num);
555 }
556
557 /*****************************************************************************/
558
SortedVectorImpl(size_t itemSize,uint32_t flags)559 SortedVectorImpl::SortedVectorImpl(size_t itemSize, uint32_t flags)
560 : VectorImpl(itemSize, flags)
561 {
562 }
563
SortedVectorImpl(const VectorImpl & rhs)564 SortedVectorImpl::SortedVectorImpl(const VectorImpl& rhs)
565 : VectorImpl(rhs)
566 {
567 }
568
~SortedVectorImpl()569 SortedVectorImpl::~SortedVectorImpl()
570 {
571 }
572
operator =(const SortedVectorImpl & rhs)573 SortedVectorImpl& SortedVectorImpl::operator = (const SortedVectorImpl& rhs)
574 {
575 return static_cast<SortedVectorImpl&>( VectorImpl::operator = (static_cast<const VectorImpl&>(rhs)) );
576 }
577
indexOf(const void * item) const578 ssize_t SortedVectorImpl::indexOf(const void* item) const
579 {
580 return _indexOrderOf(item);
581 }
582
orderOf(const void * item) const583 size_t SortedVectorImpl::orderOf(const void* item) const
584 {
585 size_t o;
586 _indexOrderOf(item, &o);
587 return o;
588 }
589
_indexOrderOf(const void * item,size_t * order) const590 ssize_t SortedVectorImpl::_indexOrderOf(const void* item, size_t* order) const
591 {
592 if (order) *order = 0;
593 if (isEmpty()) {
594 return NAME_NOT_FOUND;
595 }
596 // binary search
597 ssize_t err = NAME_NOT_FOUND;
598 ssize_t l = 0;
599 ssize_t h = size()-1;
600 ssize_t mid;
601 const void* a = arrayImpl();
602 const size_t s = itemSize();
603 while (l <= h) {
604 mid = l + (h - l)/2;
605 const void* const curr = reinterpret_cast<const char *>(a) + (mid*s);
606 const int c = do_compare(curr, item);
607 if (c == 0) {
608 err = l = mid;
609 break;
610 } else if (c < 0) {
611 l = mid + 1;
612 } else {
613 h = mid - 1;
614 }
615 }
616 if (order) *order = l;
617 return err;
618 }
619
add(const void * item)620 ssize_t SortedVectorImpl::add(const void* item)
621 {
622 size_t order;
623 ssize_t index = _indexOrderOf(item, &order);
624 if (index < 0) {
625 index = VectorImpl::insertAt(item, order, 1);
626 } else {
627 index = VectorImpl::replaceAt(item, index);
628 }
629 return index;
630 }
631
merge(const VectorImpl & vector)632 ssize_t SortedVectorImpl::merge(const VectorImpl& vector)
633 {
634 // naive merge...
635 if (!vector.isEmpty()) {
636 const void* buffer = vector.arrayImpl();
637 const size_t is = itemSize();
638 size_t s = vector.size();
639 for (size_t i=0 ; i<s ; i++) {
640 ssize_t err = add( reinterpret_cast<const char*>(buffer) + i*is );
641 if (err<0) {
642 return err;
643 }
644 }
645 }
646 return OK;
647 }
648
merge(const SortedVectorImpl & vector)649 ssize_t SortedVectorImpl::merge(const SortedVectorImpl& vector)
650 {
651 // we've merging a sorted vector... nice!
652 ssize_t err = OK;
653 if (!vector.isEmpty()) {
654 // first take care of the case where the vectors are sorted together
655 if (do_compare(vector.itemLocation(vector.size()-1), arrayImpl()) <= 0) {
656 err = VectorImpl::insertVectorAt(static_cast<const VectorImpl&>(vector), 0);
657 } else if (do_compare(vector.arrayImpl(), itemLocation(size()-1)) >= 0) {
658 err = VectorImpl::appendVector(static_cast<const VectorImpl&>(vector));
659 } else {
660 // this could be made a little better
661 err = merge(static_cast<const VectorImpl&>(vector));
662 }
663 }
664 return err;
665 }
666
remove(const void * item)667 ssize_t SortedVectorImpl::remove(const void* item)
668 {
669 ssize_t i = indexOf(item);
670 if (i>=0) {
671 VectorImpl::removeItemsAt(i, 1);
672 }
673 return i;
674 }
675
676 /*****************************************************************************/
677
678 }; // namespace android
679