1 /* 2 ** 2008 August 05 3 ** 4 ** The author disclaims copyright to this source code. In place of 5 ** a legal notice, here is a blessing: 6 ** 7 ** May you do good and not evil. 8 ** May you find forgiveness for yourself and forgive others. 9 ** May you share freely, never taking more than you give. 10 ** 11 ************************************************************************* 12 ** This file implements that page cache. 13 */ 14 #include "sqliteInt.h" 15 16 /* 17 ** A complete page cache is an instance of this structure. Every 18 ** entry in the cache holds a single page of the database file. The 19 ** btree layer only operates on the cached copy of the database pages. 20 ** 21 ** A page cache entry is "clean" if it exactly matches what is currently 22 ** on disk. A page is "dirty" if it has been modified and needs to be 23 ** persisted to disk. 24 ** 25 ** pDirty, pDirtyTail, pSynced: 26 ** All dirty pages are linked into the doubly linked list using 27 ** PgHdr.pDirtyNext and pDirtyPrev. The list is maintained in LRU order 28 ** such that p was added to the list more recently than p->pDirtyNext. 29 ** PCache.pDirty points to the first (newest) element in the list and 30 ** pDirtyTail to the last (oldest). 31 ** 32 ** The PCache.pSynced variable is used to optimize searching for a dirty 33 ** page to eject from the cache mid-transaction. It is better to eject 34 ** a page that does not require a journal sync than one that does. 35 ** Therefore, pSynced is maintained so that it *almost* always points 36 ** to either the oldest page in the pDirty/pDirtyTail list that has a 37 ** clear PGHDR_NEED_SYNC flag or to a page that is older than this one 38 ** (so that the right page to eject can be found by following pDirtyPrev 39 ** pointers). 40 */ 41 struct PCache { 42 PgHdr *pDirty, *pDirtyTail; /* List of dirty pages in LRU order */ 43 PgHdr *pSynced; /* Last synced page in dirty page list */ 44 int nRefSum; /* Sum of ref counts over all pages */ 45 int szCache; /* Configured cache size */ 46 int szSpill; /* Size before spilling occurs */ 47 int szPage; /* Size of every page in this cache */ 48 int szExtra; /* Size of extra space for each page */ 49 u8 bPurgeable; /* True if pages are on backing store */ 50 u8 eCreate; /* eCreate value for for xFetch() */ 51 int (*xStress)(void*,PgHdr*); /* Call to try make a page clean */ 52 void *pStress; /* Argument to xStress */ 53 sqlite3_pcache *pCache; /* Pluggable cache module */ 54 }; 55 56 /********************************** Test and Debug Logic **********************/ 57 /* 58 ** Debug tracing macros. Enable by by changing the "0" to "1" and 59 ** recompiling. 60 ** 61 ** When sqlite3PcacheTrace is 1, single line trace messages are issued. 62 ** When sqlite3PcacheTrace is 2, a dump of the pcache showing all cache entries 63 ** is displayed for many operations, resulting in a lot of output. 64 */ 65 #if defined(SQLITE_DEBUG) && 0 66 int sqlite3PcacheTrace = 2; /* 0: off 1: simple 2: cache dumps */ 67 int sqlite3PcacheMxDump = 9999; /* Max cache entries for pcacheDump() */ 68 # define pcacheTrace(X) if(sqlite3PcacheTrace){sqlite3DebugPrintf X;} 69 void pcacheDump(PCache *pCache){ 70 int N; 71 int i, j; 72 sqlite3_pcache_page *pLower; 73 PgHdr *pPg; 74 unsigned char *a; 75 76 if( sqlite3PcacheTrace<2 ) return; 77 if( pCache->pCache==0 ) return; 78 N = sqlite3PcachePagecount(pCache); 79 if( N>sqlite3PcacheMxDump ) N = sqlite3PcacheMxDump; 80 for(i=1; i<=N; i++){ 81 pLower = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, i, 0); 82 if( pLower==0 ) continue; 83 pPg = (PgHdr*)pLower->pExtra; 84 printf("%3d: nRef %2d flgs %02x data ", i, pPg->nRef, pPg->flags); 85 a = (unsigned char *)pLower->pBuf; 86 for(j=0; j<12; j++) printf("%02x", a[j]); 87 printf("\n"); 88 if( pPg->pPage==0 ){ 89 sqlite3GlobalConfig.pcache2.xUnpin(pCache->pCache, pLower, 0); 90 } 91 } 92 } 93 #else 94 # define pcacheTrace(X) 95 # define pcacheDump(X) 96 #endif 97 98 /* 99 ** Check invariants on a PgHdr entry. Return true if everything is OK. 100 ** Return false if any invariant is violated. 101 ** 102 ** This routine is for use inside of assert() statements only. For 103 ** example: 104 ** 105 ** assert( sqlite3PcachePageSanity(pPg) ); 106 */ 107 #ifdef SQLITE_DEBUG 108 int sqlite3PcachePageSanity(PgHdr *pPg){ 109 PCache *pCache; 110 assert( pPg!=0 ); 111 assert( pPg->pgno>0 || pPg->pPager==0 ); /* Page number is 1 or more */ 112 pCache = pPg->pCache; 113 assert( pCache!=0 ); /* Every page has an associated PCache */ 114 if( pPg->flags & PGHDR_CLEAN ){ 115 assert( (pPg->flags & PGHDR_DIRTY)==0 );/* Cannot be both CLEAN and DIRTY */ 116 assert( pCache->pDirty!=pPg ); /* CLEAN pages not on dirty list */ 117 assert( pCache->pDirtyTail!=pPg ); 118 } 119 /* WRITEABLE pages must also be DIRTY */ 120 if( pPg->flags & PGHDR_WRITEABLE ){ 121 assert( pPg->flags & PGHDR_DIRTY ); /* WRITEABLE implies DIRTY */ 122 } 123 /* NEED_SYNC can be set independently of WRITEABLE. This can happen, 124 ** for example, when using the sqlite3PagerDontWrite() optimization: 125 ** (1) Page X is journalled, and gets WRITEABLE and NEED_SEEK. 126 ** (2) Page X moved to freelist, WRITEABLE is cleared 127 ** (3) Page X reused, WRITEABLE is set again 128 ** If NEED_SYNC had been cleared in step 2, then it would not be reset 129 ** in step 3, and page might be written into the database without first 130 ** syncing the rollback journal, which might cause corruption on a power 131 ** loss. 132 ** 133 ** Another example is when the database page size is smaller than the 134 ** disk sector size. When any page of a sector is journalled, all pages 135 ** in that sector are marked NEED_SYNC even if they are still CLEAN, just 136 ** in case they are later modified, since all pages in the same sector 137 ** must be journalled and synced before any of those pages can be safely 138 ** written. 139 */ 140 return 1; 141 } 142 #endif /* SQLITE_DEBUG */ 143 144 145 /********************************** Linked List Management ********************/ 146 147 /* Allowed values for second argument to pcacheManageDirtyList() */ 148 #define PCACHE_DIRTYLIST_REMOVE 1 /* Remove pPage from dirty list */ 149 #define PCACHE_DIRTYLIST_ADD 2 /* Add pPage to the dirty list */ 150 #define PCACHE_DIRTYLIST_FRONT 3 /* Move pPage to the front of the list */ 151 152 /* 153 ** Manage pPage's participation on the dirty list. Bits of the addRemove 154 ** argument determines what operation to do. The 0x01 bit means first 155 ** remove pPage from the dirty list. The 0x02 means add pPage back to 156 ** the dirty list. Doing both moves pPage to the front of the dirty list. 157 */ 158 static void pcacheManageDirtyList(PgHdr *pPage, u8 addRemove){ 159 PCache *p = pPage->pCache; 160 161 pcacheTrace(("%p.DIRTYLIST.%s %d\n", p, 162 addRemove==1 ? "REMOVE" : addRemove==2 ? "ADD" : "FRONT", 163 pPage->pgno)); 164 if( addRemove & PCACHE_DIRTYLIST_REMOVE ){ 165 assert( pPage->pDirtyNext || pPage==p->pDirtyTail ); 166 assert( pPage->pDirtyPrev || pPage==p->pDirty ); 167 168 /* Update the PCache1.pSynced variable if necessary. */ 169 if( p->pSynced==pPage ){ 170 p->pSynced = pPage->pDirtyPrev; 171 } 172 173 if( pPage->pDirtyNext ){ 174 pPage->pDirtyNext->pDirtyPrev = pPage->pDirtyPrev; 175 }else{ 176 assert( pPage==p->pDirtyTail ); 177 p->pDirtyTail = pPage->pDirtyPrev; 178 } 179 if( pPage->pDirtyPrev ){ 180 pPage->pDirtyPrev->pDirtyNext = pPage->pDirtyNext; 181 }else{ 182 /* If there are now no dirty pages in the cache, set eCreate to 2. 183 ** This is an optimization that allows sqlite3PcacheFetch() to skip 184 ** searching for a dirty page to eject from the cache when it might 185 ** otherwise have to. */ 186 assert( pPage==p->pDirty ); 187 p->pDirty = pPage->pDirtyNext; 188 assert( p->bPurgeable || p->eCreate==2 ); 189 if( p->pDirty==0 ){ /*OPTIMIZATION-IF-TRUE*/ 190 assert( p->bPurgeable==0 || p->eCreate==1 ); 191 p->eCreate = 2; 192 } 193 } 194 } 195 if( addRemove & PCACHE_DIRTYLIST_ADD ){ 196 pPage->pDirtyPrev = 0; 197 pPage->pDirtyNext = p->pDirty; 198 if( pPage->pDirtyNext ){ 199 assert( pPage->pDirtyNext->pDirtyPrev==0 ); 200 pPage->pDirtyNext->pDirtyPrev = pPage; 201 }else{ 202 p->pDirtyTail = pPage; 203 if( p->bPurgeable ){ 204 assert( p->eCreate==2 ); 205 p->eCreate = 1; 206 } 207 } 208 p->pDirty = pPage; 209 210 /* If pSynced is NULL and this page has a clear NEED_SYNC flag, set 211 ** pSynced to point to it. Checking the NEED_SYNC flag is an 212 ** optimization, as if pSynced points to a page with the NEED_SYNC 213 ** flag set sqlite3PcacheFetchStress() searches through all newer 214 ** entries of the dirty-list for a page with NEED_SYNC clear anyway. */ 215 if( !p->pSynced 216 && 0==(pPage->flags&PGHDR_NEED_SYNC) /*OPTIMIZATION-IF-FALSE*/ 217 ){ 218 p->pSynced = pPage; 219 } 220 } 221 pcacheDump(p); 222 } 223 224 /* 225 ** Wrapper around the pluggable caches xUnpin method. If the cache is 226 ** being used for an in-memory database, this function is a no-op. 227 */ 228 static void pcacheUnpin(PgHdr *p){ 229 if( p->pCache->bPurgeable ){ 230 pcacheTrace(("%p.UNPIN %d\n", p->pCache, p->pgno)); 231 sqlite3GlobalConfig.pcache2.xUnpin(p->pCache->pCache, p->pPage, 0); 232 pcacheDump(p->pCache); 233 } 234 } 235 236 /* 237 ** Compute the number of pages of cache requested. p->szCache is the 238 ** cache size requested by the "PRAGMA cache_size" statement. 239 */ 240 static int numberOfCachePages(PCache *p){ 241 if( p->szCache>=0 ){ 242 /* IMPLEMENTATION-OF: R-42059-47211 If the argument N is positive then the 243 ** suggested cache size is set to N. */ 244 return p->szCache; 245 }else{ 246 /* IMPLEMANTATION-OF: R-59858-46238 If the argument N is negative, then the 247 ** number of cache pages is adjusted to be a number of pages that would 248 ** use approximately abs(N*1024) bytes of memory based on the current 249 ** page size. */ 250 return (int)((-1024*(i64)p->szCache)/(p->szPage+p->szExtra)); 251 } 252 } 253 254 /*************************************************** General Interfaces ****** 255 ** 256 ** Initialize and shutdown the page cache subsystem. Neither of these 257 ** functions are threadsafe. 258 */ 259 int sqlite3PcacheInitialize(void){ 260 if( sqlite3GlobalConfig.pcache2.xInit==0 ){ 261 /* IMPLEMENTATION-OF: R-26801-64137 If the xInit() method is NULL, then the 262 ** built-in default page cache is used instead of the application defined 263 ** page cache. */ 264 sqlite3PCacheSetDefault(); 265 assert( sqlite3GlobalConfig.pcache2.xInit!=0 ); 266 } 267 return sqlite3GlobalConfig.pcache2.xInit(sqlite3GlobalConfig.pcache2.pArg); 268 } 269 void sqlite3PcacheShutdown(void){ 270 if( sqlite3GlobalConfig.pcache2.xShutdown ){ 271 /* IMPLEMENTATION-OF: R-26000-56589 The xShutdown() method may be NULL. */ 272 sqlite3GlobalConfig.pcache2.xShutdown(sqlite3GlobalConfig.pcache2.pArg); 273 } 274 } 275 276 /* 277 ** Return the size in bytes of a PCache object. 278 */ 279 int sqlite3PcacheSize(void){ return sizeof(PCache); } 280 281 /* 282 ** Create a new PCache object. Storage space to hold the object 283 ** has already been allocated and is passed in as the p pointer. 284 ** The caller discovers how much space needs to be allocated by 285 ** calling sqlite3PcacheSize(). 286 ** 287 ** szExtra is some extra space allocated for each page. The first 288 ** 8 bytes of the extra space will be zeroed as the page is allocated, 289 ** but remaining content will be uninitialized. Though it is opaque 290 ** to this module, the extra space really ends up being the MemPage 291 ** structure in the pager. 292 */ 293 int sqlite3PcacheOpen( 294 int szPage, /* Size of every page */ 295 int szExtra, /* Extra space associated with each page */ 296 int bPurgeable, /* True if pages are on backing store */ 297 int (*xStress)(void*,PgHdr*),/* Call to try to make pages clean */ 298 void *pStress, /* Argument to xStress */ 299 PCache *p /* Preallocated space for the PCache */ 300 ){ 301 memset(p, 0, sizeof(PCache)); 302 p->szPage = 1; 303 p->szExtra = szExtra; 304 assert( szExtra>=8 ); /* First 8 bytes will be zeroed */ 305 p->bPurgeable = bPurgeable; 306 p->eCreate = 2; 307 p->xStress = xStress; 308 p->pStress = pStress; 309 p->szCache = 100; 310 p->szSpill = 1; 311 pcacheTrace(("%p.OPEN szPage %d bPurgeable %d\n",p,szPage,bPurgeable)); 312 return sqlite3PcacheSetPageSize(p, szPage); 313 } 314 315 /* 316 ** Change the page size for PCache object. The caller must ensure that there 317 ** are no outstanding page references when this function is called. 318 */ 319 int sqlite3PcacheSetPageSize(PCache *pCache, int szPage){ 320 assert( pCache->nRefSum==0 && pCache->pDirty==0 ); 321 if( pCache->szPage ){ 322 sqlite3_pcache *pNew; 323 pNew = sqlite3GlobalConfig.pcache2.xCreate( 324 szPage, pCache->szExtra + ROUND8(sizeof(PgHdr)), 325 pCache->bPurgeable 326 ); 327 if( pNew==0 ) return SQLITE_NOMEM_BKPT; 328 sqlite3GlobalConfig.pcache2.xCachesize(pNew, numberOfCachePages(pCache)); 329 if( pCache->pCache ){ 330 sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache); 331 } 332 pCache->pCache = pNew; 333 pCache->szPage = szPage; 334 pcacheTrace(("%p.PAGESIZE %d\n",pCache,szPage)); 335 } 336 return SQLITE_OK; 337 } 338 339 /* 340 ** Try to obtain a page from the cache. 341 ** 342 ** This routine returns a pointer to an sqlite3_pcache_page object if 343 ** such an object is already in cache, or if a new one is created. 344 ** This routine returns a NULL pointer if the object was not in cache 345 ** and could not be created. 346 ** 347 ** The createFlags should be 0 to check for existing pages and should 348 ** be 3 (not 1, but 3) to try to create a new page. 349 ** 350 ** If the createFlag is 0, then NULL is always returned if the page 351 ** is not already in the cache. If createFlag is 1, then a new page 352 ** is created only if that can be done without spilling dirty pages 353 ** and without exceeding the cache size limit. 354 ** 355 ** The caller needs to invoke sqlite3PcacheFetchFinish() to properly 356 ** initialize the sqlite3_pcache_page object and convert it into a 357 ** PgHdr object. The sqlite3PcacheFetch() and sqlite3PcacheFetchFinish() 358 ** routines are split this way for performance reasons. When separated 359 ** they can both (usually) operate without having to push values to 360 ** the stack on entry and pop them back off on exit, which saves a 361 ** lot of pushing and popping. 362 */ 363 sqlite3_pcache_page *sqlite3PcacheFetch( 364 PCache *pCache, /* Obtain the page from this cache */ 365 Pgno pgno, /* Page number to obtain */ 366 int createFlag /* If true, create page if it does not exist already */ 367 ){ 368 int eCreate; 369 sqlite3_pcache_page *pRes; 370 371 assert( pCache!=0 ); 372 assert( pCache->pCache!=0 ); 373 assert( createFlag==3 || createFlag==0 ); 374 assert( pCache->eCreate==((pCache->bPurgeable && pCache->pDirty) ? 1 : 2) ); 375 376 /* eCreate defines what to do if the page does not exist. 377 ** 0 Do not allocate a new page. (createFlag==0) 378 ** 1 Allocate a new page if doing so is inexpensive. 379 ** (createFlag==1 AND bPurgeable AND pDirty) 380 ** 2 Allocate a new page even it doing so is difficult. 381 ** (createFlag==1 AND !(bPurgeable AND pDirty) 382 */ 383 eCreate = createFlag & pCache->eCreate; 384 assert( eCreate==0 || eCreate==1 || eCreate==2 ); 385 assert( createFlag==0 || pCache->eCreate==eCreate ); 386 assert( createFlag==0 || eCreate==1+(!pCache->bPurgeable||!pCache->pDirty) ); 387 pRes = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, eCreate); 388 pcacheTrace(("%p.FETCH %d%s (result: %p)\n",pCache,pgno, 389 createFlag?" create":"",pRes)); 390 return pRes; 391 } 392 393 /* 394 ** If the sqlite3PcacheFetch() routine is unable to allocate a new 395 ** page because no clean pages are available for reuse and the cache 396 ** size limit has been reached, then this routine can be invoked to 397 ** try harder to allocate a page. This routine might invoke the stress 398 ** callback to spill dirty pages to the journal. It will then try to 399 ** allocate the new page and will only fail to allocate a new page on 400 ** an OOM error. 401 ** 402 ** This routine should be invoked only after sqlite3PcacheFetch() fails. 403 */ 404 int sqlite3PcacheFetchStress( 405 PCache *pCache, /* Obtain the page from this cache */ 406 Pgno pgno, /* Page number to obtain */ 407 sqlite3_pcache_page **ppPage /* Write result here */ 408 ){ 409 PgHdr *pPg; 410 if( pCache->eCreate==2 ) return 0; 411 412 if( sqlite3PcachePagecount(pCache)>pCache->szSpill ){ 413 /* Find a dirty page to write-out and recycle. First try to find a 414 ** page that does not require a journal-sync (one with PGHDR_NEED_SYNC 415 ** cleared), but if that is not possible settle for any other 416 ** unreferenced dirty page. 417 ** 418 ** If the LRU page in the dirty list that has a clear PGHDR_NEED_SYNC 419 ** flag is currently referenced, then the following may leave pSynced 420 ** set incorrectly (pointing to other than the LRU page with NEED_SYNC 421 ** cleared). This is Ok, as pSynced is just an optimization. */ 422 for(pPg=pCache->pSynced; 423 pPg && (pPg->nRef || (pPg->flags&PGHDR_NEED_SYNC)); 424 pPg=pPg->pDirtyPrev 425 ); 426 pCache->pSynced = pPg; 427 if( !pPg ){ 428 for(pPg=pCache->pDirtyTail; pPg && pPg->nRef; pPg=pPg->pDirtyPrev); 429 } 430 if( pPg ){ 431 int rc; 432 #ifdef SQLITE_LOG_CACHE_SPILL 433 sqlite3_log(SQLITE_FULL, 434 "spill page %d making room for %d - cache used: %d/%d", 435 pPg->pgno, pgno, 436 sqlite3GlobalConfig.pcache2.xPagecount(pCache->pCache), 437 numberOfCachePages(pCache)); 438 #endif 439 pcacheTrace(("%p.SPILL %d\n",pCache,pPg->pgno)); 440 rc = pCache->xStress(pCache->pStress, pPg); 441 pcacheDump(pCache); 442 if( rc!=SQLITE_OK && rc!=SQLITE_BUSY ){ 443 return rc; 444 } 445 } 446 } 447 *ppPage = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, 2); 448 return *ppPage==0 ? SQLITE_NOMEM_BKPT : SQLITE_OK; 449 } 450 451 /* 452 ** This is a helper routine for sqlite3PcacheFetchFinish() 453 ** 454 ** In the uncommon case where the page being fetched has not been 455 ** initialized, this routine is invoked to do the initialization. 456 ** This routine is broken out into a separate function since it 457 ** requires extra stack manipulation that can be avoided in the common 458 ** case. 459 */ 460 static SQLITE_NOINLINE PgHdr *pcacheFetchFinishWithInit( 461 PCache *pCache, /* Obtain the page from this cache */ 462 Pgno pgno, /* Page number obtained */ 463 sqlite3_pcache_page *pPage /* Page obtained by prior PcacheFetch() call */ 464 ){ 465 PgHdr *pPgHdr; 466 assert( pPage!=0 ); 467 pPgHdr = (PgHdr*)pPage->pExtra; 468 assert( pPgHdr->pPage==0 ); 469 memset(&pPgHdr->pDirty, 0, sizeof(PgHdr) - offsetof(PgHdr,pDirty)); 470 pPgHdr->pPage = pPage; 471 pPgHdr->pData = pPage->pBuf; 472 pPgHdr->pExtra = (void *)&pPgHdr[1]; 473 memset(pPgHdr->pExtra, 0, 8); 474 pPgHdr->pCache = pCache; 475 pPgHdr->pgno = pgno; 476 pPgHdr->flags = PGHDR_CLEAN; 477 return sqlite3PcacheFetchFinish(pCache,pgno,pPage); 478 } 479 480 /* 481 ** This routine converts the sqlite3_pcache_page object returned by 482 ** sqlite3PcacheFetch() into an initialized PgHdr object. This routine 483 ** must be called after sqlite3PcacheFetch() in order to get a usable 484 ** result. 485 */ 486 PgHdr *sqlite3PcacheFetchFinish( 487 PCache *pCache, /* Obtain the page from this cache */ 488 Pgno pgno, /* Page number obtained */ 489 sqlite3_pcache_page *pPage /* Page obtained by prior PcacheFetch() call */ 490 ){ 491 PgHdr *pPgHdr; 492 493 assert( pPage!=0 ); 494 pPgHdr = (PgHdr *)pPage->pExtra; 495 496 if( !pPgHdr->pPage ){ 497 return pcacheFetchFinishWithInit(pCache, pgno, pPage); 498 } 499 pCache->nRefSum++; 500 pPgHdr->nRef++; 501 assert( sqlite3PcachePageSanity(pPgHdr) ); 502 return pPgHdr; 503 } 504 505 /* 506 ** Decrement the reference count on a page. If the page is clean and the 507 ** reference count drops to 0, then it is made eligible for recycling. 508 */ 509 void SQLITE_NOINLINE sqlite3PcacheRelease(PgHdr *p){ 510 assert( p->nRef>0 ); 511 p->pCache->nRefSum--; 512 if( (--p->nRef)==0 ){ 513 if( p->flags&PGHDR_CLEAN ){ 514 pcacheUnpin(p); 515 }else{ 516 pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT); 517 } 518 } 519 } 520 521 /* 522 ** Increase the reference count of a supplied page by 1. 523 */ 524 void sqlite3PcacheRef(PgHdr *p){ 525 assert(p->nRef>0); 526 assert( sqlite3PcachePageSanity(p) ); 527 p->nRef++; 528 p->pCache->nRefSum++; 529 } 530 531 /* 532 ** Drop a page from the cache. There must be exactly one reference to the 533 ** page. This function deletes that reference, so after it returns the 534 ** page pointed to by p is invalid. 535 */ 536 void sqlite3PcacheDrop(PgHdr *p){ 537 assert( p->nRef==1 ); 538 assert( sqlite3PcachePageSanity(p) ); 539 if( p->flags&PGHDR_DIRTY ){ 540 pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE); 541 } 542 p->pCache->nRefSum--; 543 sqlite3GlobalConfig.pcache2.xUnpin(p->pCache->pCache, p->pPage, 1); 544 } 545 546 /* 547 ** Make sure the page is marked as dirty. If it isn't dirty already, 548 ** make it so. 549 */ 550 void sqlite3PcacheMakeDirty(PgHdr *p){ 551 assert( p->nRef>0 ); 552 assert( sqlite3PcachePageSanity(p) ); 553 if( p->flags & (PGHDR_CLEAN|PGHDR_DONT_WRITE) ){ /*OPTIMIZATION-IF-FALSE*/ 554 p->flags &= ~PGHDR_DONT_WRITE; 555 if( p->flags & PGHDR_CLEAN ){ 556 p->flags ^= (PGHDR_DIRTY|PGHDR_CLEAN); 557 pcacheTrace(("%p.DIRTY %d\n",p->pCache,p->pgno)); 558 assert( (p->flags & (PGHDR_DIRTY|PGHDR_CLEAN))==PGHDR_DIRTY ); 559 pcacheManageDirtyList(p, PCACHE_DIRTYLIST_ADD); 560 } 561 assert( sqlite3PcachePageSanity(p) ); 562 } 563 } 564 565 /* 566 ** Make sure the page is marked as clean. If it isn't clean already, 567 ** make it so. 568 */ 569 void sqlite3PcacheMakeClean(PgHdr *p){ 570 assert( sqlite3PcachePageSanity(p) ); 571 assert( (p->flags & PGHDR_DIRTY)!=0 ); 572 assert( (p->flags & PGHDR_CLEAN)==0 ); 573 pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE); 574 p->flags &= ~(PGHDR_DIRTY|PGHDR_NEED_SYNC|PGHDR_WRITEABLE); 575 p->flags |= PGHDR_CLEAN; 576 pcacheTrace(("%p.CLEAN %d\n",p->pCache,p->pgno)); 577 assert( sqlite3PcachePageSanity(p) ); 578 if( p->nRef==0 ){ 579 pcacheUnpin(p); 580 } 581 } 582 583 /* 584 ** Make every page in the cache clean. 585 */ 586 void sqlite3PcacheCleanAll(PCache *pCache){ 587 PgHdr *p; 588 pcacheTrace(("%p.CLEAN-ALL\n",pCache)); 589 while( (p = pCache->pDirty)!=0 ){ 590 sqlite3PcacheMakeClean(p); 591 } 592 } 593 594 /* 595 ** Clear the PGHDR_NEED_SYNC and PGHDR_WRITEABLE flag from all dirty pages. 596 */ 597 void sqlite3PcacheClearWritable(PCache *pCache){ 598 PgHdr *p; 599 pcacheTrace(("%p.CLEAR-WRITEABLE\n",pCache)); 600 for(p=pCache->pDirty; p; p=p->pDirtyNext){ 601 p->flags &= ~(PGHDR_NEED_SYNC|PGHDR_WRITEABLE); 602 } 603 pCache->pSynced = pCache->pDirtyTail; 604 } 605 606 /* 607 ** Clear the PGHDR_NEED_SYNC flag from all dirty pages. 608 */ 609 void sqlite3PcacheClearSyncFlags(PCache *pCache){ 610 PgHdr *p; 611 for(p=pCache->pDirty; p; p=p->pDirtyNext){ 612 p->flags &= ~PGHDR_NEED_SYNC; 613 } 614 pCache->pSynced = pCache->pDirtyTail; 615 } 616 617 /* 618 ** Change the page number of page p to newPgno. 619 */ 620 void sqlite3PcacheMove(PgHdr *p, Pgno newPgno){ 621 PCache *pCache = p->pCache; 622 assert( p->nRef>0 ); 623 assert( newPgno>0 ); 624 assert( sqlite3PcachePageSanity(p) ); 625 pcacheTrace(("%p.MOVE %d -> %d\n",pCache,p->pgno,newPgno)); 626 sqlite3GlobalConfig.pcache2.xRekey(pCache->pCache, p->pPage, p->pgno,newPgno); 627 p->pgno = newPgno; 628 if( (p->flags&PGHDR_DIRTY) && (p->flags&PGHDR_NEED_SYNC) ){ 629 pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT); 630 } 631 } 632 633 /* 634 ** Drop every cache entry whose page number is greater than "pgno". The 635 ** caller must ensure that there are no outstanding references to any pages 636 ** other than page 1 with a page number greater than pgno. 637 ** 638 ** If there is a reference to page 1 and the pgno parameter passed to this 639 ** function is 0, then the data area associated with page 1 is zeroed, but 640 ** the page object is not dropped. 641 */ 642 void sqlite3PcacheTruncate(PCache *pCache, Pgno pgno){ 643 if( pCache->pCache ){ 644 PgHdr *p; 645 PgHdr *pNext; 646 pcacheTrace(("%p.TRUNCATE %d\n",pCache,pgno)); 647 for(p=pCache->pDirty; p; p=pNext){ 648 pNext = p->pDirtyNext; 649 /* This routine never gets call with a positive pgno except right 650 ** after sqlite3PcacheCleanAll(). So if there are dirty pages, 651 ** it must be that pgno==0. 652 */ 653 assert( p->pgno>0 ); 654 if( p->pgno>pgno ){ 655 assert( p->flags&PGHDR_DIRTY ); 656 sqlite3PcacheMakeClean(p); 657 } 658 } 659 if( pgno==0 && pCache->nRefSum ){ 660 sqlite3_pcache_page *pPage1; 661 pPage1 = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache,1,0); 662 if( ALWAYS(pPage1) ){ /* Page 1 is always available in cache, because 663 ** pCache->nRefSum>0 */ 664 memset(pPage1->pBuf, 0, pCache->szPage); 665 pgno = 1; 666 } 667 } 668 sqlite3GlobalConfig.pcache2.xTruncate(pCache->pCache, pgno+1); 669 } 670 } 671 672 /* 673 ** Close a cache. 674 */ 675 void sqlite3PcacheClose(PCache *pCache){ 676 assert( pCache->pCache!=0 ); 677 pcacheTrace(("%p.CLOSE\n",pCache)); 678 sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache); 679 } 680 681 /* 682 ** Discard the contents of the cache. 683 */ 684 void sqlite3PcacheClear(PCache *pCache){ 685 sqlite3PcacheTruncate(pCache, 0); 686 } 687 688 /* 689 ** Merge two lists of pages connected by pDirty and in pgno order. 690 ** Do not bother fixing the pDirtyPrev pointers. 691 */ 692 static PgHdr *pcacheMergeDirtyList(PgHdr *pA, PgHdr *pB){ 693 PgHdr result, *pTail; 694 pTail = &result; 695 assert( pA!=0 && pB!=0 ); 696 for(;;){ 697 if( pA->pgno<pB->pgno ){ 698 pTail->pDirty = pA; 699 pTail = pA; 700 pA = pA->pDirty; 701 if( pA==0 ){ 702 pTail->pDirty = pB; 703 break; 704 } 705 }else{ 706 pTail->pDirty = pB; 707 pTail = pB; 708 pB = pB->pDirty; 709 if( pB==0 ){ 710 pTail->pDirty = pA; 711 break; 712 } 713 } 714 } 715 return result.pDirty; 716 } 717 718 /* 719 ** Sort the list of pages in accending order by pgno. Pages are 720 ** connected by pDirty pointers. The pDirtyPrev pointers are 721 ** corrupted by this sort. 722 ** 723 ** Since there cannot be more than 2^31 distinct pages in a database, 724 ** there cannot be more than 31 buckets required by the merge sorter. 725 ** One extra bucket is added to catch overflow in case something 726 ** ever changes to make the previous sentence incorrect. 727 */ 728 #define N_SORT_BUCKET 32 729 static PgHdr *pcacheSortDirtyList(PgHdr *pIn){ 730 PgHdr *a[N_SORT_BUCKET], *p; 731 int i; 732 memset(a, 0, sizeof(a)); 733 while( pIn ){ 734 p = pIn; 735 pIn = p->pDirty; 736 p->pDirty = 0; 737 for(i=0; ALWAYS(i<N_SORT_BUCKET-1); i++){ 738 if( a[i]==0 ){ 739 a[i] = p; 740 break; 741 }else{ 742 p = pcacheMergeDirtyList(a[i], p); 743 a[i] = 0; 744 } 745 } 746 if( NEVER(i==N_SORT_BUCKET-1) ){ 747 /* To get here, there need to be 2^(N_SORT_BUCKET) elements in 748 ** the input list. But that is impossible. 749 */ 750 a[i] = pcacheMergeDirtyList(a[i], p); 751 } 752 } 753 p = a[0]; 754 for(i=1; i<N_SORT_BUCKET; i++){ 755 if( a[i]==0 ) continue; 756 p = p ? pcacheMergeDirtyList(p, a[i]) : a[i]; 757 } 758 return p; 759 } 760 761 /* 762 ** Return a list of all dirty pages in the cache, sorted by page number. 763 */ 764 PgHdr *sqlite3PcacheDirtyList(PCache *pCache){ 765 PgHdr *p; 766 for(p=pCache->pDirty; p; p=p->pDirtyNext){ 767 p->pDirty = p->pDirtyNext; 768 } 769 return pcacheSortDirtyList(pCache->pDirty); 770 } 771 772 /* 773 ** Return the total number of references to all pages held by the cache. 774 ** 775 ** This is not the total number of pages referenced, but the sum of the 776 ** reference count for all pages. 777 */ 778 int sqlite3PcacheRefCount(PCache *pCache){ 779 return pCache->nRefSum; 780 } 781 782 /* 783 ** Return the number of references to the page supplied as an argument. 784 */ 785 int sqlite3PcachePageRefcount(PgHdr *p){ 786 return p->nRef; 787 } 788 789 /* 790 ** Return the total number of pages in the cache. 791 */ 792 int sqlite3PcachePagecount(PCache *pCache){ 793 assert( pCache->pCache!=0 ); 794 return sqlite3GlobalConfig.pcache2.xPagecount(pCache->pCache); 795 } 796 797 #ifdef SQLITE_TEST 798 /* 799 ** Get the suggested cache-size value. 800 */ 801 int sqlite3PcacheGetCachesize(PCache *pCache){ 802 return numberOfCachePages(pCache); 803 } 804 #endif 805 806 /* 807 ** Set the suggested cache-size value. 808 */ 809 void sqlite3PcacheSetCachesize(PCache *pCache, int mxPage){ 810 assert( pCache->pCache!=0 ); 811 pCache->szCache = mxPage; 812 sqlite3GlobalConfig.pcache2.xCachesize(pCache->pCache, 813 numberOfCachePages(pCache)); 814 } 815 816 /* 817 ** Set the suggested cache-spill value. Make no changes if if the 818 ** argument is zero. Return the effective cache-spill size, which will 819 ** be the larger of the szSpill and szCache. 820 */ 821 int sqlite3PcacheSetSpillsize(PCache *p, int mxPage){ 822 int res; 823 assert( p->pCache!=0 ); 824 if( mxPage ){ 825 if( mxPage<0 ){ 826 mxPage = (int)((-1024*(i64)mxPage)/(p->szPage+p->szExtra)); 827 } 828 p->szSpill = mxPage; 829 } 830 res = numberOfCachePages(p); 831 if( res<p->szSpill ) res = p->szSpill; 832 return res; 833 } 834 835 /* 836 ** Free up as much memory as possible from the page cache. 837 */ 838 void sqlite3PcacheShrink(PCache *pCache){ 839 assert( pCache->pCache!=0 ); 840 sqlite3GlobalConfig.pcache2.xShrink(pCache->pCache); 841 } 842 843 /* 844 ** Return the size of the header added by this middleware layer 845 ** in the page-cache hierarchy. 846 */ 847 int sqlite3HeaderSizePcache(void){ return ROUND8(sizeof(PgHdr)); } 848 849 /* 850 ** Return the number of dirty pages currently in the cache, as a percentage 851 ** of the configured cache size. 852 */ 853 int sqlite3PCachePercentDirty(PCache *pCache){ 854 PgHdr *pDirty; 855 int nDirty = 0; 856 int nCache = numberOfCachePages(pCache); 857 for(pDirty=pCache->pDirty; pDirty; pDirty=pDirty->pDirtyNext) nDirty++; 858 return nCache ? (int)(((i64)nDirty * 100) / nCache) : 0; 859 } 860 861 #ifdef SQLITE_DIRECT_OVERFLOW_READ 862 /* 863 ** Return true if there are one or more dirty pages in the cache. Else false. 864 */ 865 int sqlite3PCacheIsDirty(PCache *pCache){ 866 return (pCache->pDirty!=0); 867 } 868 #endif 869 870 #if defined(SQLITE_CHECK_PAGES) || defined(SQLITE_DEBUG) 871 /* 872 ** For all dirty pages currently in the cache, invoke the specified 873 ** callback. This is only used if the SQLITE_CHECK_PAGES macro is 874 ** defined. 875 */ 876 void sqlite3PcacheIterateDirty(PCache *pCache, void (*xIter)(PgHdr *)){ 877 PgHdr *pDirty; 878 for(pDirty=pCache->pDirty; pDirty; pDirty=pDirty->pDirtyNext){ 879 xIter(pDirty); 880 } 881 } 882 #endif 883