do { transcribeToSideTable = false; oldisa = LoadExclusive(&isa.bits); newisa = oldisa; if (slowpath(!newisa.nonpointer)) { //纯pointer的则调用sidetable_retain, ClearExclusive(&isa.bits); if (rawISA()->isMetaClass()) return (id)this; if (!tryRetain && sideTableLocked) sidetable_unlock(); if (tryRetain) return sidetable_tryRetain() ? (id)this : nil; elsereturn sidetable_retain(); } // don't check newisa.fast_rr; we already called any RR overrides if (slowpath(tryRetain && newisa.deallocating)) { ClearExclusive(&isa.bits); if (!tryRetain && sideTableLocked) sidetable_unlock(); returnnil; } uintptr_t carry; newisa.bits = addc(newisa.bits, RC_ONE, 0, &carry); // extra_rc++
if (slowpath(carry)) { // newisa.extra_rc++ overflowed if (!handleOverflow) { ClearExclusive(&isa.bits); return rootRetain_overflow(tryRetain); } // Leave half of the retain counts inline and // prepare to copy the other half to the side table. if (!tryRetain && !sideTableLocked) sidetable_lock(); sideTableLocked = true; transcribeToSideTable = true; newisa.extra_rc = RC_HALF; newisa.has_sidetable_rc = true; } } while (slowpath(!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)));
if (slowpath(transcribeToSideTable)) { // Copy the other half of the retain counts to the side table. sidetable_addExtraRC_nolock(RC_HALF); }
if (slowpath(!tryRetain && sideTableLocked)) sidetable_unlock(); return (id)this; }
structSideTable { spinlock_t slock; RefcountMap refcnts; //指向一个引用计数表 weak_table_t weak_table; //指向一个弱引用表 //方法 ... } // RefcountMap disguises its pointers because we // don't want the table to act as a root for `leaks`. typedef objc::DenseMap<DisguisedPtr<objc_object>,size_t,RefcountMapValuePurgeable> RefcountMap;
classDenseMapBase { ... template <typename LookupKeyT> BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup, BucketT *TheBucket){ // If the load of the hash table is more than 3/4, or if fewer than 1/8 of // the buckets are empty (meaning that many are filled with tombstones), // grow the table. // // The later case is tricky. For example, if we had one empty bucket with // tons of tombstones, failing lookups (e.g. for insertion) would have to // probe almost the entire table until it found the empty bucket. If the // table completely filled with tombstones, no lookup would ever succeed, // causing infinite loops in lookup. unsigned NewNumEntries = getNumEntries() + 1; unsigned NumBuckets = getNumBuckets(); if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) { this->grow(NumBuckets * 2); //扩容 LookupBucketFor(Lookup, TheBucket); NumBuckets = getNumBuckets(); } elseif (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <= NumBuckets/8)) { this->grow(NumBuckets); LookupBucketFor(Lookup, TheBucket); } ASSERT(TheBucket);
// Only update the state after we've grown our bucket space appropriately // so that when growing buckets we have self-consistent entry count. // If we are writing over a tombstone or zero value, remember this. if (KeyInfoT::isEqual(TheBucket->getFirst(), getEmptyKey())) { // Replacing an empty bucket. incrementNumEntries(); } elseif (KeyInfoT::isEqual(TheBucket->getFirst(), getTombstoneKey())) { // Replacing a tombstone. incrementNumEntries(); decrementNumTombstones(); } else { // we should be purging a zero. No accounting changes. ASSERT(ValueInfoT::isPurgeable(TheBucket->getSecond())); TheBucket->getSecond().~ValueT(); }
retry: do { oldisa = LoadExclusive(&isa.bits); newisa = oldisa; if (slowpath(!newisa.nonpointer)) { ClearExclusive(&isa.bits); if (rawISA()->isMetaClass()) returnfalse; if (sideTableLocked) sidetable_unlock(); return sidetable_release(performDealloc); } // don't check newisa.fast_rr; we already called any RR overrides uintptr_t carry; newisa.bits = subc(newisa.bits, RC_ONE, 0, &carry); // extra_rc-- if (slowpath(carry)) { // don't ClearExclusive() goto underflow; } } while (slowpath(!StoreReleaseExclusive(&isa.bits, oldisa.bits, newisa.bits)));
if (slowpath(sideTableLocked)) sidetable_unlock(); returnfalse;
underflow: // newisa.extra_rc-- underflowed: borrow from side table or deallocate
// abandon newisa to undo the decrement newisa = oldisa; //发生下溢时就保持isa不变了。
if (slowpath(newisa.has_sidetable_rc)) { if (!handleUnderflow) { ClearExclusive(&isa.bits); return rootRelease_underflow(performDealloc); }
// Transfer retain count from side table to inline storage.
if (!sideTableLocked) { ClearExclusive(&isa.bits); sidetable_lock(); sideTableLocked = true; // Need to start over to avoid a race against // the nonpointer -> raw pointer transition. goto retry; }
// Try to remove some retain counts from the side table. size_t borrowed = sidetable_subExtraRC_nolock(RC_HALF);
// To avoid races, has_sidetable_rc must remain set // even if the side table count is now zero.
if (borrowed > 0) { // Side table retain count decreased. // Try to add them to the inline count. newisa.extra_rc = borrowed - 1; // redo the original decrement too bool stored = StoreReleaseExclusive(&isa.bits, oldisa.bits, newisa.bits); if (!stored) { // Inline update failed. // Try it again right now. This prevents livelock on LL/SC // architectures where the side table access itself may have // dropped the reservation. isa_t oldisa2 = LoadExclusive(&isa.bits); isa_t newisa2 = oldisa2; if (newisa2.nonpointer) { uintptr_t overflow; newisa2.bits = addc(newisa2.bits, RC_ONE * (borrowed-1), 0, &overflow); if (!overflow) { stored = StoreReleaseExclusive(&isa.bits, oldisa2.bits, newisa2.bits); } } }
if (!stored) { // Inline update failed. // Put the retains back in the side table. sidetable_addExtraRC_nolock(borrowed); goto retry; }
// Decrement successful after borrowing from side table. // This decrement cannot be the deallocating decrement - the side // table lock and has_sidetable_rc bit ensure that if everyone // else tried to -release while we worked, the last one would block. sidetable_unlock(); returnfalse; } else { // Side table is empty after all. Fall-through to the dealloc path. } }
// Really deallocate.
if (slowpath(newisa.deallocating)) { ClearExclusive(&isa.bits); if (sideTableLocked) sidetable_unlock(); return overrelease_error(); // does not actually return } newisa.deallocating = true; if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry;
if (slowpath(sideTableLocked)) sidetable_unlock();
__c11_atomic_thread_fence(__ATOMIC_ACQUIRE);
if (performDealloc) { ((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(dealloc)); } returntrue; }
// rdar://20206767 // return uintptr_t instead of bool so that the various raw-isa // -release paths all return zero in eax uintptr_t objc_object::sidetable_release(bool performDealloc) { #if SUPPORT_NONPOINTER_ISA ASSERT(!isa.nonpointer); #endif SideTable& table = SideTables()[this];
bool do_dealloc = false;
table.lock(); auto it = table.refcnts.try_emplace(this, SIDE_TABLE_DEALLOCATING); auto &refcnt = it.first->second; if (it.second) { do_dealloc = true; } elseif (refcnt < SIDE_TABLE_DEALLOCATING) { // SIDE_TABLE_WEAKLY_REFERENCED may be set. Don't change it. do_dealloc = true; refcnt |= SIDE_TABLE_DEALLOCATING; } elseif (! (refcnt & SIDE_TABLE_RC_PINNED)) { refcnt -= SIDE_TABLE_RC_ONE; } table.unlock(); if (do_dealloc && performDealloc) { ((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(dealloc)); } return do_dealloc; }