##// END OF EJS Templates
rust-cpython: allow mutation unless leaked reference is borrowed...
Yuya Nishihara -
r43606:ed50f2c3 default
parent child Browse files
Show More
@@ -1,1835 +1,1828 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18 from .pycompat import delattr
19 19
20 20 from hgdemandimport import tracing
21 21
22 22 from . import (
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 txnutil,
31 31 util,
32 32 )
33 33
34 34 from .interfaces import (
35 35 dirstate as intdirstate,
36 36 util as interfaceutil,
37 37 )
38 38
39 39 parsers = policy.importmod(r'parsers')
40 40 rustmod = policy.importrust(r'dirstate')
41 41
42 42 propertycache = util.propertycache
43 43 filecache = scmutil.filecache
44 44 _rangemask = 0x7FFFFFFF
45 45
46 46 dirstatetuple = parsers.dirstatetuple
47 47
48 48
49 49 class repocache(filecache):
50 50 """filecache for files in .hg/"""
51 51
52 52 def join(self, obj, fname):
53 53 return obj._opener.join(fname)
54 54
55 55
56 56 class rootcache(filecache):
57 57 """filecache for files in the repository root"""
58 58
59 59 def join(self, obj, fname):
60 60 return obj._join(fname)
61 61
62 62
63 63 def _getfsnow(vfs):
64 64 '''Get "now" timestamp on filesystem'''
65 65 tmpfd, tmpname = vfs.mkstemp()
66 66 try:
67 67 return os.fstat(tmpfd)[stat.ST_MTIME]
68 68 finally:
69 69 os.close(tmpfd)
70 70 vfs.unlink(tmpname)
71 71
72 72
73 73 @interfaceutil.implementer(intdirstate.idirstate)
74 74 class dirstate(object):
75 75 def __init__(self, opener, ui, root, validate, sparsematchfn):
76 76 '''Create a new dirstate object.
77 77
78 78 opener is an open()-like callable that can be used to open the
79 79 dirstate file; root is the root of the directory tracked by
80 80 the dirstate.
81 81 '''
82 82 self._opener = opener
83 83 self._validate = validate
84 84 self._root = root
85 85 self._sparsematchfn = sparsematchfn
86 86 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
87 87 # UNC path pointing to root share (issue4557)
88 88 self._rootdir = pathutil.normasprefix(root)
89 89 self._dirty = False
90 90 self._lastnormaltime = 0
91 91 self._ui = ui
92 92 self._filecache = {}
93 93 self._parentwriters = 0
94 94 self._filename = b'dirstate'
95 95 self._pendingfilename = b'%s.pending' % self._filename
96 96 self._plchangecallbacks = {}
97 97 self._origpl = None
98 98 self._updatedfiles = set()
99 99 self._mapcls = dirstatemap
100 100 # Access and cache cwd early, so we don't access it for the first time
101 101 # after a working-copy update caused it to not exist (accessing it then
102 102 # raises an exception).
103 103 self._cwd
104 104
105 105 @contextlib.contextmanager
106 106 def parentchange(self):
107 107 '''Context manager for handling dirstate parents.
108 108
109 109 If an exception occurs in the scope of the context manager,
110 110 the incoherent dirstate won't be written when wlock is
111 111 released.
112 112 '''
113 113 self._parentwriters += 1
114 114 yield
115 115 # Typically we want the "undo" step of a context manager in a
116 116 # finally block so it happens even when an exception
117 117 # occurs. In this case, however, we only want to decrement
118 118 # parentwriters if the code in the with statement exits
119 119 # normally, so we don't have a try/finally here on purpose.
120 120 self._parentwriters -= 1
121 121
122 122 def pendingparentchange(self):
123 123 '''Returns true if the dirstate is in the middle of a set of changes
124 124 that modify the dirstate parent.
125 125 '''
126 126 return self._parentwriters > 0
127 127
128 128 @propertycache
129 129 def _map(self):
130 130 """Return the dirstate contents (see documentation for dirstatemap)."""
131 131 self._map = self._mapcls(self._ui, self._opener, self._root)
132 132 return self._map
133 133
134 134 @property
135 135 def _sparsematcher(self):
136 136 """The matcher for the sparse checkout.
137 137
138 138 The working directory may not include every file from a manifest. The
139 139 matcher obtained by this property will match a path if it is to be
140 140 included in the working directory.
141 141 """
142 142 # TODO there is potential to cache this property. For now, the matcher
143 143 # is resolved on every access. (But the called function does use a
144 144 # cache to keep the lookup fast.)
145 145 return self._sparsematchfn()
146 146
147 147 @repocache(b'branch')
148 148 def _branch(self):
149 149 try:
150 150 return self._opener.read(b"branch").strip() or b"default"
151 151 except IOError as inst:
152 152 if inst.errno != errno.ENOENT:
153 153 raise
154 154 return b"default"
155 155
156 156 @property
157 157 def _pl(self):
158 158 return self._map.parents()
159 159
160 160 def hasdir(self, d):
161 161 return self._map.hastrackeddir(d)
162 162
163 163 @rootcache(b'.hgignore')
164 164 def _ignore(self):
165 165 files = self._ignorefiles()
166 166 if not files:
167 167 return matchmod.never()
168 168
169 169 pats = [b'include:%s' % f for f in files]
170 170 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
171 171
172 172 @propertycache
173 173 def _slash(self):
174 174 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
175 175
176 176 @propertycache
177 177 def _checklink(self):
178 178 return util.checklink(self._root)
179 179
180 180 @propertycache
181 181 def _checkexec(self):
182 182 return util.checkexec(self._root)
183 183
184 184 @propertycache
185 185 def _checkcase(self):
186 186 return not util.fscasesensitive(self._join(b'.hg'))
187 187
188 188 def _join(self, f):
189 189 # much faster than os.path.join()
190 190 # it's safe because f is always a relative path
191 191 return self._rootdir + f
192 192
193 193 def flagfunc(self, buildfallback):
194 194 if self._checklink and self._checkexec:
195 195
196 196 def f(x):
197 197 try:
198 198 st = os.lstat(self._join(x))
199 199 if util.statislink(st):
200 200 return b'l'
201 201 if util.statisexec(st):
202 202 return b'x'
203 203 except OSError:
204 204 pass
205 205 return b''
206 206
207 207 return f
208 208
209 209 fallback = buildfallback()
210 210 if self._checklink:
211 211
212 212 def f(x):
213 213 if os.path.islink(self._join(x)):
214 214 return b'l'
215 215 if b'x' in fallback(x):
216 216 return b'x'
217 217 return b''
218 218
219 219 return f
220 220 if self._checkexec:
221 221
222 222 def f(x):
223 223 if b'l' in fallback(x):
224 224 return b'l'
225 225 if util.isexec(self._join(x)):
226 226 return b'x'
227 227 return b''
228 228
229 229 return f
230 230 else:
231 231 return fallback
232 232
233 233 @propertycache
234 234 def _cwd(self):
235 235 # internal config: ui.forcecwd
236 236 forcecwd = self._ui.config(b'ui', b'forcecwd')
237 237 if forcecwd:
238 238 return forcecwd
239 239 return encoding.getcwd()
240 240
241 241 def getcwd(self):
242 242 '''Return the path from which a canonical path is calculated.
243 243
244 244 This path should be used to resolve file patterns or to convert
245 245 canonical paths back to file paths for display. It shouldn't be
246 246 used to get real file paths. Use vfs functions instead.
247 247 '''
248 248 cwd = self._cwd
249 249 if cwd == self._root:
250 250 return b''
251 251 # self._root ends with a path separator if self._root is '/' or 'C:\'
252 252 rootsep = self._root
253 253 if not util.endswithsep(rootsep):
254 254 rootsep += pycompat.ossep
255 255 if cwd.startswith(rootsep):
256 256 return cwd[len(rootsep) :]
257 257 else:
258 258 # we're outside the repo. return an absolute path.
259 259 return cwd
260 260
261 261 def pathto(self, f, cwd=None):
262 262 if cwd is None:
263 263 cwd = self.getcwd()
264 264 path = util.pathto(self._root, cwd, f)
265 265 if self._slash:
266 266 return util.pconvert(path)
267 267 return path
268 268
269 269 def __getitem__(self, key):
270 270 '''Return the current state of key (a filename) in the dirstate.
271 271
272 272 States are:
273 273 n normal
274 274 m needs merging
275 275 r marked for removal
276 276 a marked for addition
277 277 ? not tracked
278 278 '''
279 279 return self._map.get(key, (b"?",))[0]
280 280
281 281 def __contains__(self, key):
282 282 return key in self._map
283 283
284 284 def __iter__(self):
285 285 return iter(sorted(self._map))
286 286
287 287 def items(self):
288 288 return pycompat.iteritems(self._map)
289 289
290 290 iteritems = items
291 291
292 292 def parents(self):
293 293 return [self._validate(p) for p in self._pl]
294 294
295 295 def p1(self):
296 296 return self._validate(self._pl[0])
297 297
298 298 def p2(self):
299 299 return self._validate(self._pl[1])
300 300
301 301 def branch(self):
302 302 return encoding.tolocal(self._branch)
303 303
304 304 def setparents(self, p1, p2=nullid):
305 305 """Set dirstate parents to p1 and p2.
306 306
307 307 When moving from two parents to one, 'm' merged entries a
308 308 adjusted to normal and previous copy records discarded and
309 309 returned by the call.
310 310
311 311 See localrepo.setparents()
312 312 """
313 313 if self._parentwriters == 0:
314 314 raise ValueError(
315 315 b"cannot set dirstate parent outside of "
316 316 b"dirstate.parentchange context manager"
317 317 )
318 318
319 319 self._dirty = True
320 320 oldp2 = self._pl[1]
321 321 if self._origpl is None:
322 322 self._origpl = self._pl
323 323 self._map.setparents(p1, p2)
324 324 copies = {}
325 325 if oldp2 != nullid and p2 == nullid:
326 326 candidatefiles = self._map.nonnormalset.union(
327 327 self._map.otherparentset
328 328 )
329 329 for f in candidatefiles:
330 330 s = self._map.get(f)
331 331 if s is None:
332 332 continue
333 333
334 334 # Discard 'm' markers when moving away from a merge state
335 335 if s[0] == b'm':
336 336 source = self._map.copymap.get(f)
337 337 if source:
338 338 copies[f] = source
339 339 self.normallookup(f)
340 340 # Also fix up otherparent markers
341 341 elif s[0] == b'n' and s[2] == -2:
342 342 source = self._map.copymap.get(f)
343 343 if source:
344 344 copies[f] = source
345 345 self.add(f)
346 346 return copies
347 347
348 348 def setbranch(self, branch):
349 349 self.__class__._branch.set(self, encoding.fromlocal(branch))
350 350 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
351 351 try:
352 352 f.write(self._branch + b'\n')
353 353 f.close()
354 354
355 355 # make sure filecache has the correct stat info for _branch after
356 356 # replacing the underlying file
357 357 ce = self._filecache[b'_branch']
358 358 if ce:
359 359 ce.refresh()
360 360 except: # re-raises
361 361 f.discard()
362 362 raise
363 363
364 364 def invalidate(self):
365 365 '''Causes the next access to reread the dirstate.
366 366
367 367 This is different from localrepo.invalidatedirstate() because it always
368 368 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
369 369 check whether the dirstate has changed before rereading it.'''
370 370
371 371 for a in (r"_map", r"_branch", r"_ignore"):
372 372 if a in self.__dict__:
373 373 delattr(self, a)
374 374 self._lastnormaltime = 0
375 375 self._dirty = False
376 376 self._updatedfiles.clear()
377 377 self._parentwriters = 0
378 378 self._origpl = None
379 379
380 380 def copy(self, source, dest):
381 381 """Mark dest as a copy of source. Unmark dest if source is None."""
382 382 if source == dest:
383 383 return
384 384 self._dirty = True
385 385 if source is not None:
386 386 self._map.copymap[dest] = source
387 387 self._updatedfiles.add(source)
388 388 self._updatedfiles.add(dest)
389 389 elif self._map.copymap.pop(dest, None):
390 390 self._updatedfiles.add(dest)
391 391
392 392 def copied(self, file):
393 393 return self._map.copymap.get(file, None)
394 394
395 395 def copies(self):
396 396 return self._map.copymap
397 397
398 398 def _addpath(self, f, state, mode, size, mtime):
399 399 oldstate = self[f]
400 400 if state == b'a' or oldstate == b'r':
401 401 scmutil.checkfilename(f)
402 402 if self._map.hastrackeddir(f):
403 403 raise error.Abort(
404 404 _(b'directory %r already in dirstate') % pycompat.bytestr(f)
405 405 )
406 406 # shadows
407 407 for d in util.finddirs(f):
408 408 if self._map.hastrackeddir(d):
409 409 break
410 410 entry = self._map.get(d)
411 411 if entry is not None and entry[0] != b'r':
412 412 raise error.Abort(
413 413 _(b'file %r in dirstate clashes with %r')
414 414 % (pycompat.bytestr(d), pycompat.bytestr(f))
415 415 )
416 416 self._dirty = True
417 417 self._updatedfiles.add(f)
418 418 self._map.addfile(f, oldstate, state, mode, size, mtime)
419 419
420 420 def normal(self, f, parentfiledata=None):
421 421 '''Mark a file normal and clean.
422 422
423 423 parentfiledata: (mode, size, mtime) of the clean file
424 424
425 425 parentfiledata should be computed from memory (for mode,
426 426 size), as or close as possible from the point where we
427 427 determined the file was clean, to limit the risk of the
428 428 file having been changed by an external process between the
429 429 moment where the file was determined to be clean and now.'''
430 430 if parentfiledata:
431 431 (mode, size, mtime) = parentfiledata
432 432 else:
433 433 s = os.lstat(self._join(f))
434 434 mode = s.st_mode
435 435 size = s.st_size
436 436 mtime = s[stat.ST_MTIME]
437 437 self._addpath(f, b'n', mode, size & _rangemask, mtime & _rangemask)
438 438 self._map.copymap.pop(f, None)
439 439 if f in self._map.nonnormalset:
440 440 self._map.nonnormalset.remove(f)
441 441 if mtime > self._lastnormaltime:
442 442 # Remember the most recent modification timeslot for status(),
443 443 # to make sure we won't miss future size-preserving file content
444 444 # modifications that happen within the same timeslot.
445 445 self._lastnormaltime = mtime
446 446
447 447 def normallookup(self, f):
448 448 '''Mark a file normal, but possibly dirty.'''
449 449 if self._pl[1] != nullid:
450 450 # if there is a merge going on and the file was either
451 451 # in state 'm' (-1) or coming from other parent (-2) before
452 452 # being removed, restore that state.
453 453 entry = self._map.get(f)
454 454 if entry is not None:
455 455 if entry[0] == b'r' and entry[2] in (-1, -2):
456 456 source = self._map.copymap.get(f)
457 457 if entry[2] == -1:
458 458 self.merge(f)
459 459 elif entry[2] == -2:
460 460 self.otherparent(f)
461 461 if source:
462 462 self.copy(source, f)
463 463 return
464 464 if entry[0] == b'm' or entry[0] == b'n' and entry[2] == -2:
465 465 return
466 466 self._addpath(f, b'n', 0, -1, -1)
467 467 self._map.copymap.pop(f, None)
468 468
469 469 def otherparent(self, f):
470 470 '''Mark as coming from the other parent, always dirty.'''
471 471 if self._pl[1] == nullid:
472 472 raise error.Abort(
473 473 _(b"setting %r to other parent only allowed in merges") % f
474 474 )
475 475 if f in self and self[f] == b'n':
476 476 # merge-like
477 477 self._addpath(f, b'm', 0, -2, -1)
478 478 else:
479 479 # add-like
480 480 self._addpath(f, b'n', 0, -2, -1)
481 481 self._map.copymap.pop(f, None)
482 482
483 483 def add(self, f):
484 484 '''Mark a file added.'''
485 485 self._addpath(f, b'a', 0, -1, -1)
486 486 self._map.copymap.pop(f, None)
487 487
488 488 def remove(self, f):
489 489 '''Mark a file removed.'''
490 490 self._dirty = True
491 491 oldstate = self[f]
492 492 size = 0
493 493 if self._pl[1] != nullid:
494 494 entry = self._map.get(f)
495 495 if entry is not None:
496 496 # backup the previous state
497 497 if entry[0] == b'm': # merge
498 498 size = -1
499 499 elif entry[0] == b'n' and entry[2] == -2: # other parent
500 500 size = -2
501 501 self._map.otherparentset.add(f)
502 502 self._updatedfiles.add(f)
503 503 self._map.removefile(f, oldstate, size)
504 504 if size == 0:
505 505 self._map.copymap.pop(f, None)
506 506
507 507 def merge(self, f):
508 508 '''Mark a file merged.'''
509 509 if self._pl[1] == nullid:
510 510 return self.normallookup(f)
511 511 return self.otherparent(f)
512 512
513 513 def drop(self, f):
514 514 '''Drop a file from the dirstate'''
515 515 oldstate = self[f]
516 516 if self._map.dropfile(f, oldstate):
517 517 self._dirty = True
518 518 self._updatedfiles.add(f)
519 519 self._map.copymap.pop(f, None)
520 520
521 521 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
522 522 if exists is None:
523 523 exists = os.path.lexists(os.path.join(self._root, path))
524 524 if not exists:
525 525 # Maybe a path component exists
526 526 if not ignoremissing and b'/' in path:
527 527 d, f = path.rsplit(b'/', 1)
528 528 d = self._normalize(d, False, ignoremissing, None)
529 529 folded = d + b"/" + f
530 530 else:
531 531 # No path components, preserve original case
532 532 folded = path
533 533 else:
534 534 # recursively normalize leading directory components
535 535 # against dirstate
536 536 if b'/' in normed:
537 537 d, f = normed.rsplit(b'/', 1)
538 538 d = self._normalize(d, False, ignoremissing, True)
539 539 r = self._root + b"/" + d
540 540 folded = d + b"/" + util.fspath(f, r)
541 541 else:
542 542 folded = util.fspath(normed, self._root)
543 543 storemap[normed] = folded
544 544
545 545 return folded
546 546
547 547 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
548 548 normed = util.normcase(path)
549 549 folded = self._map.filefoldmap.get(normed, None)
550 550 if folded is None:
551 551 if isknown:
552 552 folded = path
553 553 else:
554 554 folded = self._discoverpath(
555 555 path, normed, ignoremissing, exists, self._map.filefoldmap
556 556 )
557 557 return folded
558 558
559 559 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
560 560 normed = util.normcase(path)
561 561 folded = self._map.filefoldmap.get(normed, None)
562 562 if folded is None:
563 563 folded = self._map.dirfoldmap.get(normed, None)
564 564 if folded is None:
565 565 if isknown:
566 566 folded = path
567 567 else:
568 568 # store discovered result in dirfoldmap so that future
569 569 # normalizefile calls don't start matching directories
570 570 folded = self._discoverpath(
571 571 path, normed, ignoremissing, exists, self._map.dirfoldmap
572 572 )
573 573 return folded
574 574
575 575 def normalize(self, path, isknown=False, ignoremissing=False):
576 576 '''
577 577 normalize the case of a pathname when on a casefolding filesystem
578 578
579 579 isknown specifies whether the filename came from walking the
580 580 disk, to avoid extra filesystem access.
581 581
582 582 If ignoremissing is True, missing path are returned
583 583 unchanged. Otherwise, we try harder to normalize possibly
584 584 existing path components.
585 585
586 586 The normalized case is determined based on the following precedence:
587 587
588 588 - version of name already stored in the dirstate
589 589 - version of name stored on disk
590 590 - version provided via command arguments
591 591 '''
592 592
593 593 if self._checkcase:
594 594 return self._normalize(path, isknown, ignoremissing)
595 595 return path
596 596
597 597 def clear(self):
598 598 self._map.clear()
599 599 self._lastnormaltime = 0
600 600 self._updatedfiles.clear()
601 601 self._dirty = True
602 602
603 603 def rebuild(self, parent, allfiles, changedfiles=None):
604 604 if changedfiles is None:
605 605 # Rebuild entire dirstate
606 606 changedfiles = allfiles
607 607 lastnormaltime = self._lastnormaltime
608 608 self.clear()
609 609 self._lastnormaltime = lastnormaltime
610 610
611 611 if self._origpl is None:
612 612 self._origpl = self._pl
613 613 self._map.setparents(parent, nullid)
614 614 for f in changedfiles:
615 615 if f in allfiles:
616 616 self.normallookup(f)
617 617 else:
618 618 self.drop(f)
619 619
620 620 self._dirty = True
621 621
622 622 def identity(self):
623 623 '''Return identity of dirstate itself to detect changing in storage
624 624
625 625 If identity of previous dirstate is equal to this, writing
626 626 changes based on the former dirstate out can keep consistency.
627 627 '''
628 628 return self._map.identity
629 629
630 630 def write(self, tr):
631 631 if not self._dirty:
632 632 return
633 633
634 634 filename = self._filename
635 635 if tr:
636 636 # 'dirstate.write()' is not only for writing in-memory
637 637 # changes out, but also for dropping ambiguous timestamp.
638 638 # delayed writing re-raise "ambiguous timestamp issue".
639 639 # See also the wiki page below for detail:
640 640 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
641 641
642 642 # emulate dropping timestamp in 'parsers.pack_dirstate'
643 643 now = _getfsnow(self._opener)
644 644 self._map.clearambiguoustimes(self._updatedfiles, now)
645 645
646 646 # emulate that all 'dirstate.normal' results are written out
647 647 self._lastnormaltime = 0
648 648 self._updatedfiles.clear()
649 649
650 650 # delay writing in-memory changes out
651 651 tr.addfilegenerator(
652 652 b'dirstate',
653 653 (self._filename,),
654 654 self._writedirstate,
655 655 location=b'plain',
656 656 )
657 657 return
658 658
659 659 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
660 660 self._writedirstate(st)
661 661
662 662 def addparentchangecallback(self, category, callback):
663 663 """add a callback to be called when the wd parents are changed
664 664
665 665 Callback will be called with the following arguments:
666 666 dirstate, (oldp1, oldp2), (newp1, newp2)
667 667
668 668 Category is a unique identifier to allow overwriting an old callback
669 669 with a newer callback.
670 670 """
671 671 self._plchangecallbacks[category] = callback
672 672
673 673 def _writedirstate(self, st):
674 674 # notify callbacks about parents change
675 675 if self._origpl is not None and self._origpl != self._pl:
676 676 for c, callback in sorted(
677 677 pycompat.iteritems(self._plchangecallbacks)
678 678 ):
679 679 callback(self, self._origpl, self._pl)
680 680 self._origpl = None
681 681 # use the modification time of the newly created temporary file as the
682 682 # filesystem's notion of 'now'
683 683 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
684 684
685 685 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
686 686 # timestamp of each entries in dirstate, because of 'now > mtime'
687 687 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
688 688 if delaywrite > 0:
689 689 # do we have any files to delay for?
690 items = pycompat.iteritems(self._map)
691 for f, e in items:
690 for f, e in pycompat.iteritems(self._map):
692 691 if e[0] == b'n' and e[3] == now:
693 692 import time # to avoid useless import
694 693
695 694 # rather than sleep n seconds, sleep until the next
696 695 # multiple of n seconds
697 696 clock = time.time()
698 697 start = int(clock) - (int(clock) % delaywrite)
699 698 end = start + delaywrite
700 699 time.sleep(end - clock)
701 700 now = end # trust our estimate that the end is near now
702 701 break
703 # since the iterator is potentially not deleted,
704 # delete the iterator to release the reference for the Rust
705 # implementation.
706 # TODO make the Rust implementation behave like Python
707 # since this would not work with a non ref-counting GC.
708 del items
709 702
710 703 self._map.write(st, now)
711 704 self._lastnormaltime = 0
712 705 self._dirty = False
713 706
714 707 def _dirignore(self, f):
715 708 if self._ignore(f):
716 709 return True
717 710 for p in util.finddirs(f):
718 711 if self._ignore(p):
719 712 return True
720 713 return False
721 714
722 715 def _ignorefiles(self):
723 716 files = []
724 717 if os.path.exists(self._join(b'.hgignore')):
725 718 files.append(self._join(b'.hgignore'))
726 719 for name, path in self._ui.configitems(b"ui"):
727 720 if name == b'ignore' or name.startswith(b'ignore.'):
728 721 # we need to use os.path.join here rather than self._join
729 722 # because path is arbitrary and user-specified
730 723 files.append(os.path.join(self._rootdir, util.expandpath(path)))
731 724 return files
732 725
733 726 def _ignorefileandline(self, f):
734 727 files = collections.deque(self._ignorefiles())
735 728 visited = set()
736 729 while files:
737 730 i = files.popleft()
738 731 patterns = matchmod.readpatternfile(
739 732 i, self._ui.warn, sourceinfo=True
740 733 )
741 734 for pattern, lineno, line in patterns:
742 735 kind, p = matchmod._patsplit(pattern, b'glob')
743 736 if kind == b"subinclude":
744 737 if p not in visited:
745 738 files.append(p)
746 739 continue
747 740 m = matchmod.match(
748 741 self._root, b'', [], [pattern], warn=self._ui.warn
749 742 )
750 743 if m(f):
751 744 return (i, lineno, line)
752 745 visited.add(i)
753 746 return (None, -1, b"")
754 747
755 748 def _walkexplicit(self, match, subrepos):
756 749 '''Get stat data about the files explicitly specified by match.
757 750
758 751 Return a triple (results, dirsfound, dirsnotfound).
759 752 - results is a mapping from filename to stat result. It also contains
760 753 listings mapping subrepos and .hg to None.
761 754 - dirsfound is a list of files found to be directories.
762 755 - dirsnotfound is a list of files that the dirstate thinks are
763 756 directories and that were not found.'''
764 757
765 758 def badtype(mode):
766 759 kind = _(b'unknown')
767 760 if stat.S_ISCHR(mode):
768 761 kind = _(b'character device')
769 762 elif stat.S_ISBLK(mode):
770 763 kind = _(b'block device')
771 764 elif stat.S_ISFIFO(mode):
772 765 kind = _(b'fifo')
773 766 elif stat.S_ISSOCK(mode):
774 767 kind = _(b'socket')
775 768 elif stat.S_ISDIR(mode):
776 769 kind = _(b'directory')
777 770 return _(b'unsupported file type (type is %s)') % kind
778 771
779 772 matchedir = match.explicitdir
780 773 badfn = match.bad
781 774 dmap = self._map
782 775 lstat = os.lstat
783 776 getkind = stat.S_IFMT
784 777 dirkind = stat.S_IFDIR
785 778 regkind = stat.S_IFREG
786 779 lnkkind = stat.S_IFLNK
787 780 join = self._join
788 781 dirsfound = []
789 782 foundadd = dirsfound.append
790 783 dirsnotfound = []
791 784 notfoundadd = dirsnotfound.append
792 785
793 786 if not match.isexact() and self._checkcase:
794 787 normalize = self._normalize
795 788 else:
796 789 normalize = None
797 790
798 791 files = sorted(match.files())
799 792 subrepos.sort()
800 793 i, j = 0, 0
801 794 while i < len(files) and j < len(subrepos):
802 795 subpath = subrepos[j] + b"/"
803 796 if files[i] < subpath:
804 797 i += 1
805 798 continue
806 799 while i < len(files) and files[i].startswith(subpath):
807 800 del files[i]
808 801 j += 1
809 802
810 803 if not files or b'' in files:
811 804 files = [b'']
812 805 # constructing the foldmap is expensive, so don't do it for the
813 806 # common case where files is ['']
814 807 normalize = None
815 808 results = dict.fromkeys(subrepos)
816 809 results[b'.hg'] = None
817 810
818 811 for ff in files:
819 812 if normalize:
820 813 nf = normalize(ff, False, True)
821 814 else:
822 815 nf = ff
823 816 if nf in results:
824 817 continue
825 818
826 819 try:
827 820 st = lstat(join(nf))
828 821 kind = getkind(st.st_mode)
829 822 if kind == dirkind:
830 823 if nf in dmap:
831 824 # file replaced by dir on disk but still in dirstate
832 825 results[nf] = None
833 826 if matchedir:
834 827 matchedir(nf)
835 828 foundadd((nf, ff))
836 829 elif kind == regkind or kind == lnkkind:
837 830 results[nf] = st
838 831 else:
839 832 badfn(ff, badtype(kind))
840 833 if nf in dmap:
841 834 results[nf] = None
842 835 except OSError as inst: # nf not found on disk - it is dirstate only
843 836 if nf in dmap: # does it exactly match a missing file?
844 837 results[nf] = None
845 838 else: # does it match a missing directory?
846 839 if self._map.hasdir(nf):
847 840 if matchedir:
848 841 matchedir(nf)
849 842 notfoundadd(nf)
850 843 else:
851 844 badfn(ff, encoding.strtolocal(inst.strerror))
852 845
853 846 # match.files() may contain explicitly-specified paths that shouldn't
854 847 # be taken; drop them from the list of files found. dirsfound/notfound
855 848 # aren't filtered here because they will be tested later.
856 849 if match.anypats():
857 850 for f in list(results):
858 851 if f == b'.hg' or f in subrepos:
859 852 # keep sentinel to disable further out-of-repo walks
860 853 continue
861 854 if not match(f):
862 855 del results[f]
863 856
864 857 # Case insensitive filesystems cannot rely on lstat() failing to detect
865 858 # a case-only rename. Prune the stat object for any file that does not
866 859 # match the case in the filesystem, if there are multiple files that
867 860 # normalize to the same path.
868 861 if match.isexact() and self._checkcase:
869 862 normed = {}
870 863
871 864 for f, st in pycompat.iteritems(results):
872 865 if st is None:
873 866 continue
874 867
875 868 nc = util.normcase(f)
876 869 paths = normed.get(nc)
877 870
878 871 if paths is None:
879 872 paths = set()
880 873 normed[nc] = paths
881 874
882 875 paths.add(f)
883 876
884 877 for norm, paths in pycompat.iteritems(normed):
885 878 if len(paths) > 1:
886 879 for path in paths:
887 880 folded = self._discoverpath(
888 881 path, norm, True, None, self._map.dirfoldmap
889 882 )
890 883 if path != folded:
891 884 results[path] = None
892 885
893 886 return results, dirsfound, dirsnotfound
894 887
895 888 def walk(self, match, subrepos, unknown, ignored, full=True):
896 889 '''
897 890 Walk recursively through the directory tree, finding all files
898 891 matched by match.
899 892
900 893 If full is False, maybe skip some known-clean files.
901 894
902 895 Return a dict mapping filename to stat-like object (either
903 896 mercurial.osutil.stat instance or return value of os.stat()).
904 897
905 898 '''
906 899 # full is a flag that extensions that hook into walk can use -- this
907 900 # implementation doesn't use it at all. This satisfies the contract
908 901 # because we only guarantee a "maybe".
909 902
910 903 if ignored:
911 904 ignore = util.never
912 905 dirignore = util.never
913 906 elif unknown:
914 907 ignore = self._ignore
915 908 dirignore = self._dirignore
916 909 else:
917 910 # if not unknown and not ignored, drop dir recursion and step 2
918 911 ignore = util.always
919 912 dirignore = util.always
920 913
921 914 matchfn = match.matchfn
922 915 matchalways = match.always()
923 916 matchtdir = match.traversedir
924 917 dmap = self._map
925 918 listdir = util.listdir
926 919 lstat = os.lstat
927 920 dirkind = stat.S_IFDIR
928 921 regkind = stat.S_IFREG
929 922 lnkkind = stat.S_IFLNK
930 923 join = self._join
931 924
932 925 exact = skipstep3 = False
933 926 if match.isexact(): # match.exact
934 927 exact = True
935 928 dirignore = util.always # skip step 2
936 929 elif match.prefix(): # match.match, no patterns
937 930 skipstep3 = True
938 931
939 932 if not exact and self._checkcase:
940 933 normalize = self._normalize
941 934 normalizefile = self._normalizefile
942 935 skipstep3 = False
943 936 else:
944 937 normalize = self._normalize
945 938 normalizefile = None
946 939
947 940 # step 1: find all explicit files
948 941 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
949 942
950 943 skipstep3 = skipstep3 and not (work or dirsnotfound)
951 944 work = [d for d in work if not dirignore(d[0])]
952 945
953 946 # step 2: visit subdirectories
954 947 def traverse(work, alreadynormed):
955 948 wadd = work.append
956 949 while work:
957 950 tracing.counter('dirstate.walk work', len(work))
958 951 nd = work.pop()
959 952 visitentries = match.visitchildrenset(nd)
960 953 if not visitentries:
961 954 continue
962 955 if visitentries == b'this' or visitentries == b'all':
963 956 visitentries = None
964 957 skip = None
965 958 if nd != b'':
966 959 skip = b'.hg'
967 960 try:
968 961 with tracing.log('dirstate.walk.traverse listdir %s', nd):
969 962 entries = listdir(join(nd), stat=True, skip=skip)
970 963 except OSError as inst:
971 964 if inst.errno in (errno.EACCES, errno.ENOENT):
972 965 match.bad(
973 966 self.pathto(nd), encoding.strtolocal(inst.strerror)
974 967 )
975 968 continue
976 969 raise
977 970 for f, kind, st in entries:
978 971 # Some matchers may return files in the visitentries set,
979 972 # instead of 'this', if the matcher explicitly mentions them
980 973 # and is not an exactmatcher. This is acceptable; we do not
981 974 # make any hard assumptions about file-or-directory below
982 975 # based on the presence of `f` in visitentries. If
983 976 # visitchildrenset returned a set, we can always skip the
984 977 # entries *not* in the set it provided regardless of whether
985 978 # they're actually a file or a directory.
986 979 if visitentries and f not in visitentries:
987 980 continue
988 981 if normalizefile:
989 982 # even though f might be a directory, we're only
990 983 # interested in comparing it to files currently in the
991 984 # dmap -- therefore normalizefile is enough
992 985 nf = normalizefile(
993 986 nd and (nd + b"/" + f) or f, True, True
994 987 )
995 988 else:
996 989 nf = nd and (nd + b"/" + f) or f
997 990 if nf not in results:
998 991 if kind == dirkind:
999 992 if not ignore(nf):
1000 993 if matchtdir:
1001 994 matchtdir(nf)
1002 995 wadd(nf)
1003 996 if nf in dmap and (matchalways or matchfn(nf)):
1004 997 results[nf] = None
1005 998 elif kind == regkind or kind == lnkkind:
1006 999 if nf in dmap:
1007 1000 if matchalways or matchfn(nf):
1008 1001 results[nf] = st
1009 1002 elif (matchalways or matchfn(nf)) and not ignore(
1010 1003 nf
1011 1004 ):
1012 1005 # unknown file -- normalize if necessary
1013 1006 if not alreadynormed:
1014 1007 nf = normalize(nf, False, True)
1015 1008 results[nf] = st
1016 1009 elif nf in dmap and (matchalways or matchfn(nf)):
1017 1010 results[nf] = None
1018 1011
1019 1012 for nd, d in work:
1020 1013 # alreadynormed means that processwork doesn't have to do any
1021 1014 # expensive directory normalization
1022 1015 alreadynormed = not normalize or nd == d
1023 1016 traverse([d], alreadynormed)
1024 1017
1025 1018 for s in subrepos:
1026 1019 del results[s]
1027 1020 del results[b'.hg']
1028 1021
1029 1022 # step 3: visit remaining files from dmap
1030 1023 if not skipstep3 and not exact:
1031 1024 # If a dmap file is not in results yet, it was either
1032 1025 # a) not matching matchfn b) ignored, c) missing, or d) under a
1033 1026 # symlink directory.
1034 1027 if not results and matchalways:
1035 1028 visit = [f for f in dmap]
1036 1029 else:
1037 1030 visit = [f for f in dmap if f not in results and matchfn(f)]
1038 1031 visit.sort()
1039 1032
1040 1033 if unknown:
1041 1034 # unknown == True means we walked all dirs under the roots
1042 1035 # that wasn't ignored, and everything that matched was stat'ed
1043 1036 # and is already in results.
1044 1037 # The rest must thus be ignored or under a symlink.
1045 1038 audit_path = pathutil.pathauditor(self._root, cached=True)
1046 1039
1047 1040 for nf in iter(visit):
1048 1041 # If a stat for the same file was already added with a
1049 1042 # different case, don't add one for this, since that would
1050 1043 # make it appear as if the file exists under both names
1051 1044 # on disk.
1052 1045 if (
1053 1046 normalizefile
1054 1047 and normalizefile(nf, True, True) in results
1055 1048 ):
1056 1049 results[nf] = None
1057 1050 # Report ignored items in the dmap as long as they are not
1058 1051 # under a symlink directory.
1059 1052 elif audit_path.check(nf):
1060 1053 try:
1061 1054 results[nf] = lstat(join(nf))
1062 1055 # file was just ignored, no links, and exists
1063 1056 except OSError:
1064 1057 # file doesn't exist
1065 1058 results[nf] = None
1066 1059 else:
1067 1060 # It's either missing or under a symlink directory
1068 1061 # which we in this case report as missing
1069 1062 results[nf] = None
1070 1063 else:
1071 1064 # We may not have walked the full directory tree above,
1072 1065 # so stat and check everything we missed.
1073 1066 iv = iter(visit)
1074 1067 for st in util.statfiles([join(i) for i in visit]):
1075 1068 results[next(iv)] = st
1076 1069 return results
1077 1070
1078 1071 def status(self, match, subrepos, ignored, clean, unknown):
1079 1072 '''Determine the status of the working copy relative to the
1080 1073 dirstate and return a pair of (unsure, status), where status is of type
1081 1074 scmutil.status and:
1082 1075
1083 1076 unsure:
1084 1077 files that might have been modified since the dirstate was
1085 1078 written, but need to be read to be sure (size is the same
1086 1079 but mtime differs)
1087 1080 status.modified:
1088 1081 files that have definitely been modified since the dirstate
1089 1082 was written (different size or mode)
1090 1083 status.clean:
1091 1084 files that have definitely not been modified since the
1092 1085 dirstate was written
1093 1086 '''
1094 1087 listignored, listclean, listunknown = ignored, clean, unknown
1095 1088 lookup, modified, added, unknown, ignored = [], [], [], [], []
1096 1089 removed, deleted, clean = [], [], []
1097 1090
1098 1091 dmap = self._map
1099 1092 dmap.preload()
1100 1093
1101 1094 use_rust = True
1102 1095 if rustmod is None:
1103 1096 use_rust = False
1104 1097 elif subrepos:
1105 1098 use_rust = False
1106 1099 if bool(listunknown):
1107 1100 # Pathauditor does not exist yet in Rust, unknown files
1108 1101 # can't be trusted.
1109 1102 use_rust = False
1110 1103 elif self._ignorefiles() and listignored:
1111 1104 # Rust has no ignore mechanism yet, so don't use Rust for
1112 1105 # commands that need ignore.
1113 1106 use_rust = False
1114 1107 elif not match.always():
1115 1108 # Matchers have yet to be implemented
1116 1109 use_rust = False
1117 1110
1118 1111 if use_rust:
1119 1112 (
1120 1113 lookup,
1121 1114 modified,
1122 1115 added,
1123 1116 removed,
1124 1117 deleted,
1125 1118 unknown,
1126 1119 clean,
1127 1120 ) = rustmod.status(
1128 1121 dmap._rustmap,
1129 1122 self._rootdir,
1130 1123 match.files(),
1131 1124 bool(listclean),
1132 1125 self._lastnormaltime,
1133 1126 self._checkexec,
1134 1127 )
1135 1128
1136 1129 status = scmutil.status(
1137 1130 modified=modified,
1138 1131 added=added,
1139 1132 removed=removed,
1140 1133 deleted=deleted,
1141 1134 unknown=unknown,
1142 1135 ignored=ignored,
1143 1136 clean=clean,
1144 1137 )
1145 1138 return (lookup, status)
1146 1139
1147 1140 dcontains = dmap.__contains__
1148 1141 dget = dmap.__getitem__
1149 1142 ladd = lookup.append # aka "unsure"
1150 1143 madd = modified.append
1151 1144 aadd = added.append
1152 1145 uadd = unknown.append
1153 1146 iadd = ignored.append
1154 1147 radd = removed.append
1155 1148 dadd = deleted.append
1156 1149 cadd = clean.append
1157 1150 mexact = match.exact
1158 1151 dirignore = self._dirignore
1159 1152 checkexec = self._checkexec
1160 1153 copymap = self._map.copymap
1161 1154 lastnormaltime = self._lastnormaltime
1162 1155
1163 1156 # We need to do full walks when either
1164 1157 # - we're listing all clean files, or
1165 1158 # - match.traversedir does something, because match.traversedir should
1166 1159 # be called for every dir in the working dir
1167 1160 full = listclean or match.traversedir is not None
1168 1161 for fn, st in pycompat.iteritems(
1169 1162 self.walk(match, subrepos, listunknown, listignored, full=full)
1170 1163 ):
1171 1164 if not dcontains(fn):
1172 1165 if (listignored or mexact(fn)) and dirignore(fn):
1173 1166 if listignored:
1174 1167 iadd(fn)
1175 1168 else:
1176 1169 uadd(fn)
1177 1170 continue
1178 1171
1179 1172 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1180 1173 # written like that for performance reasons. dmap[fn] is not a
1181 1174 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1182 1175 # opcode has fast paths when the value to be unpacked is a tuple or
1183 1176 # a list, but falls back to creating a full-fledged iterator in
1184 1177 # general. That is much slower than simply accessing and storing the
1185 1178 # tuple members one by one.
1186 1179 t = dget(fn)
1187 1180 state = t[0]
1188 1181 mode = t[1]
1189 1182 size = t[2]
1190 1183 time = t[3]
1191 1184
1192 1185 if not st and state in b"nma":
1193 1186 dadd(fn)
1194 1187 elif state == b'n':
1195 1188 if (
1196 1189 size >= 0
1197 1190 and (
1198 1191 (size != st.st_size and size != st.st_size & _rangemask)
1199 1192 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1200 1193 )
1201 1194 or size == -2 # other parent
1202 1195 or fn in copymap
1203 1196 ):
1204 1197 madd(fn)
1205 1198 elif (
1206 1199 time != st[stat.ST_MTIME]
1207 1200 and time != st[stat.ST_MTIME] & _rangemask
1208 1201 ):
1209 1202 ladd(fn)
1210 1203 elif st[stat.ST_MTIME] == lastnormaltime:
1211 1204 # fn may have just been marked as normal and it may have
1212 1205 # changed in the same second without changing its size.
1213 1206 # This can happen if we quickly do multiple commits.
1214 1207 # Force lookup, so we don't miss such a racy file change.
1215 1208 ladd(fn)
1216 1209 elif listclean:
1217 1210 cadd(fn)
1218 1211 elif state == b'm':
1219 1212 madd(fn)
1220 1213 elif state == b'a':
1221 1214 aadd(fn)
1222 1215 elif state == b'r':
1223 1216 radd(fn)
1224 1217
1225 1218 return (
1226 1219 lookup,
1227 1220 scmutil.status(
1228 1221 modified, added, removed, deleted, unknown, ignored, clean
1229 1222 ),
1230 1223 )
1231 1224
1232 1225 def matches(self, match):
1233 1226 '''
1234 1227 return files in the dirstate (in whatever state) filtered by match
1235 1228 '''
1236 1229 dmap = self._map
1237 1230 if match.always():
1238 1231 return dmap.keys()
1239 1232 files = match.files()
1240 1233 if match.isexact():
1241 1234 # fast path -- filter the other way around, since typically files is
1242 1235 # much smaller than dmap
1243 1236 return [f for f in files if f in dmap]
1244 1237 if match.prefix() and all(fn in dmap for fn in files):
1245 1238 # fast path -- all the values are known to be files, so just return
1246 1239 # that
1247 1240 return list(files)
1248 1241 return [f for f in dmap if match(f)]
1249 1242
1250 1243 def _actualfilename(self, tr):
1251 1244 if tr:
1252 1245 return self._pendingfilename
1253 1246 else:
1254 1247 return self._filename
1255 1248
1256 1249 def savebackup(self, tr, backupname):
1257 1250 '''Save current dirstate into backup file'''
1258 1251 filename = self._actualfilename(tr)
1259 1252 assert backupname != filename
1260 1253
1261 1254 # use '_writedirstate' instead of 'write' to write changes certainly,
1262 1255 # because the latter omits writing out if transaction is running.
1263 1256 # output file will be used to create backup of dirstate at this point.
1264 1257 if self._dirty or not self._opener.exists(filename):
1265 1258 self._writedirstate(
1266 1259 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1267 1260 )
1268 1261
1269 1262 if tr:
1270 1263 # ensure that subsequent tr.writepending returns True for
1271 1264 # changes written out above, even if dirstate is never
1272 1265 # changed after this
1273 1266 tr.addfilegenerator(
1274 1267 b'dirstate',
1275 1268 (self._filename,),
1276 1269 self._writedirstate,
1277 1270 location=b'plain',
1278 1271 )
1279 1272
1280 1273 # ensure that pending file written above is unlinked at
1281 1274 # failure, even if tr.writepending isn't invoked until the
1282 1275 # end of this transaction
1283 1276 tr.registertmp(filename, location=b'plain')
1284 1277
1285 1278 self._opener.tryunlink(backupname)
1286 1279 # hardlink backup is okay because _writedirstate is always called
1287 1280 # with an "atomictemp=True" file.
1288 1281 util.copyfile(
1289 1282 self._opener.join(filename),
1290 1283 self._opener.join(backupname),
1291 1284 hardlink=True,
1292 1285 )
1293 1286
1294 1287 def restorebackup(self, tr, backupname):
1295 1288 '''Restore dirstate by backup file'''
1296 1289 # this "invalidate()" prevents "wlock.release()" from writing
1297 1290 # changes of dirstate out after restoring from backup file
1298 1291 self.invalidate()
1299 1292 filename = self._actualfilename(tr)
1300 1293 o = self._opener
1301 1294 if util.samefile(o.join(backupname), o.join(filename)):
1302 1295 o.unlink(backupname)
1303 1296 else:
1304 1297 o.rename(backupname, filename, checkambig=True)
1305 1298
1306 1299 def clearbackup(self, tr, backupname):
1307 1300 '''Clear backup file'''
1308 1301 self._opener.unlink(backupname)
1309 1302
1310 1303
1311 1304 class dirstatemap(object):
1312 1305 """Map encapsulating the dirstate's contents.
1313 1306
1314 1307 The dirstate contains the following state:
1315 1308
1316 1309 - `identity` is the identity of the dirstate file, which can be used to
1317 1310 detect when changes have occurred to the dirstate file.
1318 1311
1319 1312 - `parents` is a pair containing the parents of the working copy. The
1320 1313 parents are updated by calling `setparents`.
1321 1314
1322 1315 - the state map maps filenames to tuples of (state, mode, size, mtime),
1323 1316 where state is a single character representing 'normal', 'added',
1324 1317 'removed', or 'merged'. It is read by treating the dirstate as a
1325 1318 dict. File state is updated by calling the `addfile`, `removefile` and
1326 1319 `dropfile` methods.
1327 1320
1328 1321 - `copymap` maps destination filenames to their source filename.
1329 1322
1330 1323 The dirstate also provides the following views onto the state:
1331 1324
1332 1325 - `nonnormalset` is a set of the filenames that have state other
1333 1326 than 'normal', or are normal but have an mtime of -1 ('normallookup').
1334 1327
1335 1328 - `otherparentset` is a set of the filenames that are marked as coming
1336 1329 from the second parent when the dirstate is currently being merged.
1337 1330
1338 1331 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
1339 1332 form that they appear as in the dirstate.
1340 1333
1341 1334 - `dirfoldmap` is a dict mapping normalized directory names to the
1342 1335 denormalized form that they appear as in the dirstate.
1343 1336 """
1344 1337
1345 1338 def __init__(self, ui, opener, root):
1346 1339 self._ui = ui
1347 1340 self._opener = opener
1348 1341 self._root = root
1349 1342 self._filename = b'dirstate'
1350 1343
1351 1344 self._parents = None
1352 1345 self._dirtyparents = False
1353 1346
1354 1347 # for consistent view between _pl() and _read() invocations
1355 1348 self._pendingmode = None
1356 1349
1357 1350 @propertycache
1358 1351 def _map(self):
1359 1352 self._map = {}
1360 1353 self.read()
1361 1354 return self._map
1362 1355
1363 1356 @propertycache
1364 1357 def copymap(self):
1365 1358 self.copymap = {}
1366 1359 self._map
1367 1360 return self.copymap
1368 1361
1369 1362 def clear(self):
1370 1363 self._map.clear()
1371 1364 self.copymap.clear()
1372 1365 self.setparents(nullid, nullid)
1373 1366 util.clearcachedproperty(self, b"_dirs")
1374 1367 util.clearcachedproperty(self, b"_alldirs")
1375 1368 util.clearcachedproperty(self, b"filefoldmap")
1376 1369 util.clearcachedproperty(self, b"dirfoldmap")
1377 1370 util.clearcachedproperty(self, b"nonnormalset")
1378 1371 util.clearcachedproperty(self, b"otherparentset")
1379 1372
1380 1373 def items(self):
1381 1374 return pycompat.iteritems(self._map)
1382 1375
1383 1376 # forward for python2,3 compat
1384 1377 iteritems = items
1385 1378
1386 1379 def __len__(self):
1387 1380 return len(self._map)
1388 1381
1389 1382 def __iter__(self):
1390 1383 return iter(self._map)
1391 1384
1392 1385 def get(self, key, default=None):
1393 1386 return self._map.get(key, default)
1394 1387
1395 1388 def __contains__(self, key):
1396 1389 return key in self._map
1397 1390
1398 1391 def __getitem__(self, key):
1399 1392 return self._map[key]
1400 1393
1401 1394 def keys(self):
1402 1395 return self._map.keys()
1403 1396
1404 1397 def preload(self):
1405 1398 """Loads the underlying data, if it's not already loaded"""
1406 1399 self._map
1407 1400
1408 1401 def addfile(self, f, oldstate, state, mode, size, mtime):
1409 1402 """Add a tracked file to the dirstate."""
1410 1403 if oldstate in b"?r" and r"_dirs" in self.__dict__:
1411 1404 self._dirs.addpath(f)
1412 1405 if oldstate == b"?" and r"_alldirs" in self.__dict__:
1413 1406 self._alldirs.addpath(f)
1414 1407 self._map[f] = dirstatetuple(state, mode, size, mtime)
1415 1408 if state != b'n' or mtime == -1:
1416 1409 self.nonnormalset.add(f)
1417 1410 if size == -2:
1418 1411 self.otherparentset.add(f)
1419 1412
1420 1413 def removefile(self, f, oldstate, size):
1421 1414 """
1422 1415 Mark a file as removed in the dirstate.
1423 1416
1424 1417 The `size` parameter is used to store sentinel values that indicate
1425 1418 the file's previous state. In the future, we should refactor this
1426 1419 to be more explicit about what that state is.
1427 1420 """
1428 1421 if oldstate not in b"?r" and r"_dirs" in self.__dict__:
1429 1422 self._dirs.delpath(f)
1430 1423 if oldstate == b"?" and r"_alldirs" in self.__dict__:
1431 1424 self._alldirs.addpath(f)
1432 1425 if r"filefoldmap" in self.__dict__:
1433 1426 normed = util.normcase(f)
1434 1427 self.filefoldmap.pop(normed, None)
1435 1428 self._map[f] = dirstatetuple(b'r', 0, size, 0)
1436 1429 self.nonnormalset.add(f)
1437 1430
1438 1431 def dropfile(self, f, oldstate):
1439 1432 """
1440 1433 Remove a file from the dirstate. Returns True if the file was
1441 1434 previously recorded.
1442 1435 """
1443 1436 exists = self._map.pop(f, None) is not None
1444 1437 if exists:
1445 1438 if oldstate != b"r" and r"_dirs" in self.__dict__:
1446 1439 self._dirs.delpath(f)
1447 1440 if r"_alldirs" in self.__dict__:
1448 1441 self._alldirs.delpath(f)
1449 1442 if r"filefoldmap" in self.__dict__:
1450 1443 normed = util.normcase(f)
1451 1444 self.filefoldmap.pop(normed, None)
1452 1445 self.nonnormalset.discard(f)
1453 1446 return exists
1454 1447
1455 1448 def clearambiguoustimes(self, files, now):
1456 1449 for f in files:
1457 1450 e = self.get(f)
1458 1451 if e is not None and e[0] == b'n' and e[3] == now:
1459 1452 self._map[f] = dirstatetuple(e[0], e[1], e[2], -1)
1460 1453 self.nonnormalset.add(f)
1461 1454
1462 1455 def nonnormalentries(self):
1463 1456 '''Compute the nonnormal dirstate entries from the dmap'''
1464 1457 try:
1465 1458 return parsers.nonnormalotherparententries(self._map)
1466 1459 except AttributeError:
1467 1460 nonnorm = set()
1468 1461 otherparent = set()
1469 1462 for fname, e in pycompat.iteritems(self._map):
1470 1463 if e[0] != b'n' or e[3] == -1:
1471 1464 nonnorm.add(fname)
1472 1465 if e[0] == b'n' and e[2] == -2:
1473 1466 otherparent.add(fname)
1474 1467 return nonnorm, otherparent
1475 1468
1476 1469 @propertycache
1477 1470 def filefoldmap(self):
1478 1471 """Returns a dictionary mapping normalized case paths to their
1479 1472 non-normalized versions.
1480 1473 """
1481 1474 try:
1482 1475 makefilefoldmap = parsers.make_file_foldmap
1483 1476 except AttributeError:
1484 1477 pass
1485 1478 else:
1486 1479 return makefilefoldmap(
1487 1480 self._map, util.normcasespec, util.normcasefallback
1488 1481 )
1489 1482
1490 1483 f = {}
1491 1484 normcase = util.normcase
1492 1485 for name, s in pycompat.iteritems(self._map):
1493 1486 if s[0] != b'r':
1494 1487 f[normcase(name)] = name
1495 1488 f[b'.'] = b'.' # prevents useless util.fspath() invocation
1496 1489 return f
1497 1490
1498 1491 def hastrackeddir(self, d):
1499 1492 """
1500 1493 Returns True if the dirstate contains a tracked (not removed) file
1501 1494 in this directory.
1502 1495 """
1503 1496 return d in self._dirs
1504 1497
1505 1498 def hasdir(self, d):
1506 1499 """
1507 1500 Returns True if the dirstate contains a file (tracked or removed)
1508 1501 in this directory.
1509 1502 """
1510 1503 return d in self._alldirs
1511 1504
1512 1505 @propertycache
1513 1506 def _dirs(self):
1514 1507 return util.dirs(self._map, b'r')
1515 1508
1516 1509 @propertycache
1517 1510 def _alldirs(self):
1518 1511 return util.dirs(self._map)
1519 1512
1520 1513 def _opendirstatefile(self):
1521 1514 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
1522 1515 if self._pendingmode is not None and self._pendingmode != mode:
1523 1516 fp.close()
1524 1517 raise error.Abort(
1525 1518 _(b'working directory state may be changed parallelly')
1526 1519 )
1527 1520 self._pendingmode = mode
1528 1521 return fp
1529 1522
1530 1523 def parents(self):
1531 1524 if not self._parents:
1532 1525 try:
1533 1526 fp = self._opendirstatefile()
1534 1527 st = fp.read(40)
1535 1528 fp.close()
1536 1529 except IOError as err:
1537 1530 if err.errno != errno.ENOENT:
1538 1531 raise
1539 1532 # File doesn't exist, so the current state is empty
1540 1533 st = b''
1541 1534
1542 1535 l = len(st)
1543 1536 if l == 40:
1544 1537 self._parents = (st[:20], st[20:40])
1545 1538 elif l == 0:
1546 1539 self._parents = (nullid, nullid)
1547 1540 else:
1548 1541 raise error.Abort(
1549 1542 _(b'working directory state appears damaged!')
1550 1543 )
1551 1544
1552 1545 return self._parents
1553 1546
1554 1547 def setparents(self, p1, p2):
1555 1548 self._parents = (p1, p2)
1556 1549 self._dirtyparents = True
1557 1550
1558 1551 def read(self):
1559 1552 # ignore HG_PENDING because identity is used only for writing
1560 1553 self.identity = util.filestat.frompath(
1561 1554 self._opener.join(self._filename)
1562 1555 )
1563 1556
1564 1557 try:
1565 1558 fp = self._opendirstatefile()
1566 1559 try:
1567 1560 st = fp.read()
1568 1561 finally:
1569 1562 fp.close()
1570 1563 except IOError as err:
1571 1564 if err.errno != errno.ENOENT:
1572 1565 raise
1573 1566 return
1574 1567 if not st:
1575 1568 return
1576 1569
1577 1570 if util.safehasattr(parsers, b'dict_new_presized'):
1578 1571 # Make an estimate of the number of files in the dirstate based on
1579 1572 # its size. From a linear regression on a set of real-world repos,
1580 1573 # all over 10,000 files, the size of a dirstate entry is 85
1581 1574 # bytes. The cost of resizing is significantly higher than the cost
1582 1575 # of filling in a larger presized dict, so subtract 20% from the
1583 1576 # size.
1584 1577 #
1585 1578 # This heuristic is imperfect in many ways, so in a future dirstate
1586 1579 # format update it makes sense to just record the number of entries
1587 1580 # on write.
1588 1581 self._map = parsers.dict_new_presized(len(st) // 71)
1589 1582
1590 1583 # Python's garbage collector triggers a GC each time a certain number
1591 1584 # of container objects (the number being defined by
1592 1585 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
1593 1586 # for each file in the dirstate. The C version then immediately marks
1594 1587 # them as not to be tracked by the collector. However, this has no
1595 1588 # effect on when GCs are triggered, only on what objects the GC looks
1596 1589 # into. This means that O(number of files) GCs are unavoidable.
1597 1590 # Depending on when in the process's lifetime the dirstate is parsed,
1598 1591 # this can get very expensive. As a workaround, disable GC while
1599 1592 # parsing the dirstate.
1600 1593 #
1601 1594 # (we cannot decorate the function directly since it is in a C module)
1602 1595 parse_dirstate = util.nogc(parsers.parse_dirstate)
1603 1596 p = parse_dirstate(self._map, self.copymap, st)
1604 1597 if not self._dirtyparents:
1605 1598 self.setparents(*p)
1606 1599
1607 1600 # Avoid excess attribute lookups by fast pathing certain checks
1608 1601 self.__contains__ = self._map.__contains__
1609 1602 self.__getitem__ = self._map.__getitem__
1610 1603 self.get = self._map.get
1611 1604
1612 1605 def write(self, st, now):
1613 1606 st.write(
1614 1607 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
1615 1608 )
1616 1609 st.close()
1617 1610 self._dirtyparents = False
1618 1611 self.nonnormalset, self.otherparentset = self.nonnormalentries()
1619 1612
1620 1613 @propertycache
1621 1614 def nonnormalset(self):
1622 1615 nonnorm, otherparents = self.nonnormalentries()
1623 1616 self.otherparentset = otherparents
1624 1617 return nonnorm
1625 1618
1626 1619 @propertycache
1627 1620 def otherparentset(self):
1628 1621 nonnorm, otherparents = self.nonnormalentries()
1629 1622 self.nonnormalset = nonnorm
1630 1623 return otherparents
1631 1624
1632 1625 @propertycache
1633 1626 def identity(self):
1634 1627 self._map
1635 1628 return self.identity
1636 1629
1637 1630 @propertycache
1638 1631 def dirfoldmap(self):
1639 1632 f = {}
1640 1633 normcase = util.normcase
1641 1634 for name in self._dirs:
1642 1635 f[normcase(name)] = name
1643 1636 return f
1644 1637
1645 1638
1646 1639 if rustmod is not None:
1647 1640
1648 1641 class dirstatemap(object):
1649 1642 def __init__(self, ui, opener, root):
1650 1643 self._ui = ui
1651 1644 self._opener = opener
1652 1645 self._root = root
1653 1646 self._filename = b'dirstate'
1654 1647 self._parents = None
1655 1648 self._dirtyparents = False
1656 1649
1657 1650 # for consistent view between _pl() and _read() invocations
1658 1651 self._pendingmode = None
1659 1652
1660 1653 def addfile(self, *args, **kwargs):
1661 1654 return self._rustmap.addfile(*args, **kwargs)
1662 1655
1663 1656 def removefile(self, *args, **kwargs):
1664 1657 return self._rustmap.removefile(*args, **kwargs)
1665 1658
1666 1659 def dropfile(self, *args, **kwargs):
1667 1660 return self._rustmap.dropfile(*args, **kwargs)
1668 1661
1669 1662 def clearambiguoustimes(self, *args, **kwargs):
1670 1663 return self._rustmap.clearambiguoustimes(*args, **kwargs)
1671 1664
1672 1665 def nonnormalentries(self):
1673 1666 return self._rustmap.nonnormalentries()
1674 1667
1675 1668 def get(self, *args, **kwargs):
1676 1669 return self._rustmap.get(*args, **kwargs)
1677 1670
1678 1671 @propertycache
1679 1672 def _rustmap(self):
1680 1673 self._rustmap = rustmod.DirstateMap(self._root)
1681 1674 self.read()
1682 1675 return self._rustmap
1683 1676
1684 1677 @property
1685 1678 def copymap(self):
1686 1679 return self._rustmap.copymap()
1687 1680
1688 1681 def preload(self):
1689 1682 self._rustmap
1690 1683
1691 1684 def clear(self):
1692 1685 self._rustmap.clear()
1693 1686 self.setparents(nullid, nullid)
1694 1687 util.clearcachedproperty(self, b"_dirs")
1695 1688 util.clearcachedproperty(self, b"_alldirs")
1696 1689 util.clearcachedproperty(self, b"dirfoldmap")
1697 1690
1698 1691 def items(self):
1699 1692 return self._rustmap.items()
1700 1693
1701 1694 def keys(self):
1702 1695 return iter(self._rustmap)
1703 1696
1704 1697 def __contains__(self, key):
1705 1698 return key in self._rustmap
1706 1699
1707 1700 def __getitem__(self, item):
1708 1701 return self._rustmap[item]
1709 1702
1710 1703 def __len__(self):
1711 1704 return len(self._rustmap)
1712 1705
1713 1706 def __iter__(self):
1714 1707 return iter(self._rustmap)
1715 1708
1716 1709 # forward for python2,3 compat
1717 1710 iteritems = items
1718 1711
1719 1712 def _opendirstatefile(self):
1720 1713 fp, mode = txnutil.trypending(
1721 1714 self._root, self._opener, self._filename
1722 1715 )
1723 1716 if self._pendingmode is not None and self._pendingmode != mode:
1724 1717 fp.close()
1725 1718 raise error.Abort(
1726 1719 _(b'working directory state may be changed parallelly')
1727 1720 )
1728 1721 self._pendingmode = mode
1729 1722 return fp
1730 1723
1731 1724 def setparents(self, p1, p2):
1732 1725 self._rustmap.setparents(p1, p2)
1733 1726 self._parents = (p1, p2)
1734 1727 self._dirtyparents = True
1735 1728
1736 1729 def parents(self):
1737 1730 if not self._parents:
1738 1731 try:
1739 1732 fp = self._opendirstatefile()
1740 1733 st = fp.read(40)
1741 1734 fp.close()
1742 1735 except IOError as err:
1743 1736 if err.errno != errno.ENOENT:
1744 1737 raise
1745 1738 # File doesn't exist, so the current state is empty
1746 1739 st = b''
1747 1740
1748 1741 try:
1749 1742 self._parents = self._rustmap.parents(st)
1750 1743 except ValueError:
1751 1744 raise error.Abort(
1752 1745 _(b'working directory state appears damaged!')
1753 1746 )
1754 1747
1755 1748 return self._parents
1756 1749
1757 1750 def read(self):
1758 1751 # ignore HG_PENDING because identity is used only for writing
1759 1752 self.identity = util.filestat.frompath(
1760 1753 self._opener.join(self._filename)
1761 1754 )
1762 1755
1763 1756 try:
1764 1757 fp = self._opendirstatefile()
1765 1758 try:
1766 1759 st = fp.read()
1767 1760 finally:
1768 1761 fp.close()
1769 1762 except IOError as err:
1770 1763 if err.errno != errno.ENOENT:
1771 1764 raise
1772 1765 return
1773 1766 if not st:
1774 1767 return
1775 1768
1776 1769 parse_dirstate = util.nogc(self._rustmap.read)
1777 1770 parents = parse_dirstate(st)
1778 1771 if parents and not self._dirtyparents:
1779 1772 self.setparents(*parents)
1780 1773
1781 1774 self.__contains__ = self._rustmap.__contains__
1782 1775 self.__getitem__ = self._rustmap.__getitem__
1783 1776 self.get = self._rustmap.get
1784 1777
1785 1778 def write(self, st, now):
1786 1779 parents = self.parents()
1787 1780 st.write(self._rustmap.write(parents[0], parents[1], now))
1788 1781 st.close()
1789 1782 self._dirtyparents = False
1790 1783
1791 1784 @propertycache
1792 1785 def filefoldmap(self):
1793 1786 """Returns a dictionary mapping normalized case paths to their
1794 1787 non-normalized versions.
1795 1788 """
1796 1789 return self._rustmap.filefoldmapasdict()
1797 1790
1798 1791 def hastrackeddir(self, d):
1799 1792 self._dirs # Trigger Python's propertycache
1800 1793 return self._rustmap.hastrackeddir(d)
1801 1794
1802 1795 def hasdir(self, d):
1803 1796 self._dirs # Trigger Python's propertycache
1804 1797 return self._rustmap.hasdir(d)
1805 1798
1806 1799 @propertycache
1807 1800 def _dirs(self):
1808 1801 return self._rustmap.getdirs()
1809 1802
1810 1803 @propertycache
1811 1804 def _alldirs(self):
1812 1805 return self._rustmap.getalldirs()
1813 1806
1814 1807 @propertycache
1815 1808 def identity(self):
1816 1809 self._rustmap
1817 1810 return self.identity
1818 1811
1819 1812 @property
1820 1813 def nonnormalset(self):
1821 1814 nonnorm, otherparents = self._rustmap.nonnormalentries()
1822 1815 return nonnorm
1823 1816
1824 1817 @property
1825 1818 def otherparentset(self):
1826 1819 nonnorm, otherparents = self._rustmap.nonnormalentries()
1827 1820 return otherparents
1828 1821
1829 1822 @propertycache
1830 1823 def dirfoldmap(self):
1831 1824 f = {}
1832 1825 normcase = util.normcase
1833 1826 for name in self._dirs:
1834 1827 f[normcase(name)] = name
1835 1828 return f
@@ -1,691 +1,716 b''
1 1 // ref_sharing.rs
2 2 //
3 3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
4 4 //
5 5 // Permission is hereby granted, free of charge, to any person obtaining a copy
6 6 // of this software and associated documentation files (the "Software"), to
7 7 // deal in the Software without restriction, including without limitation the
8 8 // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
9 9 // sell copies of the Software, and to permit persons to whom the Software is
10 10 // furnished to do so, subject to the following conditions:
11 11 //
12 12 // The above copyright notice and this permission notice shall be included in
13 13 // all copies or substantial portions of the Software.
14 14 //
15 15 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 16 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 17 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 18 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 19 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 20 // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 21 // IN THE SOFTWARE.
22 22
23 23 //! Macros for use in the `hg-cpython` bridge library.
24 24
25 25 use crate::exceptions::AlreadyBorrowed;
26 26 use cpython::{exc, PyClone, PyErr, PyObject, PyResult, Python};
27 27 use std::cell::{Cell, Ref, RefCell, RefMut};
28 28 use std::ops::{Deref, DerefMut};
29 29 use std::sync::atomic::{AtomicUsize, Ordering};
30 30
31 31 /// Manages the shared state between Python and Rust
32 32 ///
33 33 /// `PySharedState` is owned by `PySharedRefCell`, and is shared across its
34 34 /// derived references. The consistency of these references are guaranteed
35 35 /// as follows:
36 36 ///
37 37 /// - The immutability of `py_class!` object fields. Any mutation of
38 38 /// `PySharedRefCell` is allowed only through its `borrow_mut()`.
39 39 /// - The `py: Python<'_>` token, which makes sure that any data access is
40 40 /// synchronized by the GIL.
41 /// - The `borrow_count`, which is the number of references borrowed from
42 /// `PyLeaked`. Just like `RefCell`, mutation is prohibited while `PyLeaked`
43 /// is borrowed.
41 44 /// - The `generation` counter, which increments on `borrow_mut()`. `PyLeaked`
42 45 /// reference is valid only if the `current_generation()` equals to the
43 46 /// `generation` at the time of `leak_immutable()`.
44 47 #[derive(Debug, Default)]
45 48 struct PySharedState {
46 leak_count: Cell<usize>,
47 49 mutably_borrowed: Cell<bool>,
48 50 // The counter variable could be Cell<usize> since any operation on
49 51 // PySharedState is synchronized by the GIL, but being "atomic" makes
50 52 // PySharedState inherently Sync. The ordering requirement doesn't
51 53 // matter thanks to the GIL.
54 borrow_count: AtomicUsize,
52 55 generation: AtomicUsize,
53 56 }
54 57
55 58 // &PySharedState can be Send because any access to inner cells is
56 59 // synchronized by the GIL.
57 60 unsafe impl Sync for PySharedState {}
58 61
59 62 impl PySharedState {
60 63 fn borrow_mut<'a, T>(
61 64 &'a self,
62 65 py: Python<'a>,
63 66 pyrefmut: RefMut<'a, T>,
64 67 ) -> PyResult<PyRefMut<'a, T>> {
65 68 if self.mutably_borrowed.get() {
66 69 return Err(AlreadyBorrowed::new(
67 70 py,
68 71 "Cannot borrow mutably while there exists another \
69 72 mutable reference in a Python object",
70 73 ));
71 74 }
72 match self.leak_count.get() {
75 match self.current_borrow_count(py) {
73 76 0 => {
74 77 self.mutably_borrowed.replace(true);
75 78 // Note that this wraps around to the same value if mutably
76 79 // borrowed more than usize::MAX times, which wouldn't happen
77 80 // in practice.
78 81 self.generation.fetch_add(1, Ordering::Relaxed);
79 82 Ok(PyRefMut::new(py, pyrefmut, self))
80 83 }
81 // TODO
82 // For now, this works differently than Python references
83 // in the case of iterators.
84 // Python does not complain when the data an iterator
85 // points to is modified if the iterator is never used
86 // afterwards.
87 // Here, we are stricter than this by refusing to give a
88 // mutable reference if it is already borrowed.
89 // While the additional safety might be argued for, it
90 // breaks valid programming patterns in Python and we need
91 // to fix this issue down the line.
92 84 _ => Err(AlreadyBorrowed::new(
93 85 py,
94 "Cannot borrow mutably while there are \
95 immutable references in Python objects",
86 "Cannot borrow mutably while immutably borrowed",
96 87 )),
97 88 }
98 89 }
99 90
100 91 /// Return a reference to the wrapped data and its state with an
101 92 /// artificial static lifetime.
102 93 /// We need to be protected by the GIL for thread-safety.
103 94 ///
104 95 /// # Safety
105 96 ///
106 97 /// This is highly unsafe since the lifetime of the given data can be
107 98 /// extended. Do not call this function directly.
108 99 unsafe fn leak_immutable<T>(
109 100 &self,
110 101 py: Python,
111 102 data: &PySharedRefCell<T>,
112 103 ) -> PyResult<(&'static T, &'static PySharedState)> {
113 104 if self.mutably_borrowed.get() {
114 105 return Err(AlreadyBorrowed::new(
115 106 py,
116 107 "Cannot borrow immutably while there is a \
117 108 mutable reference in Python objects",
118 109 ));
119 110 }
120 111 // TODO: it's weird that self is data.py_shared_state. Maybe we
121 112 // can move stuff to PySharedRefCell?
122 113 let ptr = data.as_ptr();
123 114 let state_ptr: *const PySharedState = &data.py_shared_state;
124 self.leak_count.replace(self.leak_count.get() + 1);
125 115 Ok((&*ptr, &*state_ptr))
126 116 }
127 117
118 fn current_borrow_count(&self, _py: Python) -> usize {
119 self.borrow_count.load(Ordering::Relaxed)
120 }
121
122 fn increase_borrow_count(&self, _py: Python) {
123 // Note that this wraps around if there are more than usize::MAX
124 // borrowed references, which shouldn't happen due to memory limit.
125 self.borrow_count.fetch_add(1, Ordering::Relaxed);
126 }
127
128 fn decrease_borrow_count(&self, _py: Python) {
129 let prev_count = self.borrow_count.fetch_sub(1, Ordering::Relaxed);
130 assert!(prev_count > 0);
131 }
132
128 133 /// # Safety
129 134 ///
130 135 /// It's up to you to make sure the reference is about to be deleted
131 136 /// when updating the leak count.
132 fn decrease_leak_count(&self, _py: Python, mutable: bool) {
137 fn decrease_leak_count(&self, py: Python, mutable: bool) {
133 138 if mutable {
134 assert_eq!(self.leak_count.get(), 0);
139 assert_eq!(self.current_borrow_count(py), 0);
135 140 assert!(self.mutably_borrowed.get());
136 141 self.mutably_borrowed.replace(false);
137 142 } else {
138 let count = self.leak_count.get();
139 assert!(count > 0);
140 self.leak_count.replace(count - 1);
143 unimplemented!();
141 144 }
142 145 }
143 146
144 147 fn current_generation(&self, _py: Python) -> usize {
145 148 self.generation.load(Ordering::Relaxed)
146 149 }
147 150 }
148 151
152 /// Helper to keep the borrow count updated while the shared object is
153 /// immutably borrowed without using the `RefCell` interface.
154 struct BorrowPyShared<'a> {
155 py: Python<'a>,
156 py_shared_state: &'a PySharedState,
157 }
158
159 impl<'a> BorrowPyShared<'a> {
160 fn new(
161 py: Python<'a>,
162 py_shared_state: &'a PySharedState,
163 ) -> BorrowPyShared<'a> {
164 py_shared_state.increase_borrow_count(py);
165 BorrowPyShared {
166 py,
167 py_shared_state,
168 }
169 }
170 }
171
172 impl Drop for BorrowPyShared<'_> {
173 fn drop(&mut self) {
174 self.py_shared_state.decrease_borrow_count(self.py);
175 }
176 }
177
149 178 /// `RefCell` wrapper to be safely used in conjunction with `PySharedState`.
150 179 ///
151 180 /// This object can be stored in a `py_class!` object as a data field. Any
152 181 /// operation is allowed through the `PySharedRef` interface.
153 182 #[derive(Debug)]
154 183 pub struct PySharedRefCell<T> {
155 184 inner: RefCell<T>,
156 185 py_shared_state: PySharedState,
157 186 }
158 187
159 188 impl<T> PySharedRefCell<T> {
160 189 pub fn new(value: T) -> PySharedRefCell<T> {
161 190 Self {
162 191 inner: RefCell::new(value),
163 192 py_shared_state: PySharedState::default(),
164 193 }
165 194 }
166 195
167 196 fn borrow<'a>(&'a self, _py: Python<'a>) -> Ref<'a, T> {
168 197 // py_shared_state isn't involved since
169 198 // - inner.borrow() would fail if self is mutably borrowed,
170 199 // - and inner.borrow_mut() would fail while self is borrowed.
171 200 self.inner.borrow()
172 201 }
173 202
174 203 fn as_ptr(&self) -> *mut T {
175 204 self.inner.as_ptr()
176 205 }
177 206
178 207 // TODO: maybe this should be named as try_borrow_mut(), and use
179 208 // inner.try_borrow_mut(). The current implementation panics if
180 209 // self.inner has been borrowed, but returns error if py_shared_state
181 210 // refuses to borrow.
182 211 fn borrow_mut<'a>(&'a self, py: Python<'a>) -> PyResult<PyRefMut<'a, T>> {
183 212 self.py_shared_state.borrow_mut(py, self.inner.borrow_mut())
184 213 }
185 214 }
186 215
187 216 /// Sharable data member of type `T` borrowed from the `PyObject`.
188 217 pub struct PySharedRef<'a, T> {
189 218 py: Python<'a>,
190 219 owner: &'a PyObject,
191 220 data: &'a PySharedRefCell<T>,
192 221 }
193 222
194 223 impl<'a, T> PySharedRef<'a, T> {
195 224 /// # Safety
196 225 ///
197 226 /// The `data` must be owned by the `owner`. Otherwise, the leak count
198 227 /// would get wrong.
199 228 pub unsafe fn new(
200 229 py: Python<'a>,
201 230 owner: &'a PyObject,
202 231 data: &'a PySharedRefCell<T>,
203 232 ) -> Self {
204 233 Self { py, owner, data }
205 234 }
206 235
207 236 pub fn borrow(&self) -> Ref<'a, T> {
208 237 self.data.borrow(self.py)
209 238 }
210 239
211 240 pub fn borrow_mut(&self) -> PyResult<PyRefMut<'a, T>> {
212 241 self.data.borrow_mut(self.py)
213 242 }
214 243
215 244 /// Returns a leaked reference.
216 245 pub fn leak_immutable(&self) -> PyResult<PyLeaked<&'static T>> {
217 246 let state = &self.data.py_shared_state;
218 247 unsafe {
219 248 let (static_ref, static_state_ref) =
220 249 state.leak_immutable(self.py, self.data)?;
221 250 Ok(PyLeaked::new(
222 251 self.py,
223 252 self.owner,
224 253 static_ref,
225 254 static_state_ref,
226 255 ))
227 256 }
228 257 }
229 258 }
230 259
231 260 /// Holds a mutable reference to data shared between Python and Rust.
232 261 pub struct PyRefMut<'a, T> {
233 262 py: Python<'a>,
234 263 inner: RefMut<'a, T>,
235 264 py_shared_state: &'a PySharedState,
236 265 }
237 266
238 267 impl<'a, T> PyRefMut<'a, T> {
239 268 // Must be constructed by PySharedState after checking its leak_count.
240 269 // Otherwise, drop() would incorrectly update the state.
241 270 fn new(
242 271 py: Python<'a>,
243 272 inner: RefMut<'a, T>,
244 273 py_shared_state: &'a PySharedState,
245 274 ) -> Self {
246 275 Self {
247 276 py,
248 277 inner,
249 278 py_shared_state,
250 279 }
251 280 }
252 281 }
253 282
254 283 impl<'a, T> std::ops::Deref for PyRefMut<'a, T> {
255 284 type Target = RefMut<'a, T>;
256 285
257 286 fn deref(&self) -> &Self::Target {
258 287 &self.inner
259 288 }
260 289 }
261 290 impl<'a, T> std::ops::DerefMut for PyRefMut<'a, T> {
262 291 fn deref_mut(&mut self) -> &mut Self::Target {
263 292 &mut self.inner
264 293 }
265 294 }
266 295
267 296 impl<'a, T> Drop for PyRefMut<'a, T> {
268 297 fn drop(&mut self) {
269 298 self.py_shared_state.decrease_leak_count(self.py, true);
270 299 }
271 300 }
272 301
273 302 /// Allows a `py_class!` generated struct to share references to one of its
274 303 /// data members with Python.
275 304 ///
276 /// # Warning
277 ///
278 /// TODO allow Python container types: for now, integration with the garbage
279 /// collector does not extend to Rust structs holding references to Python
280 /// objects. Should the need surface, `__traverse__` and `__clear__` will
281 /// need to be written as per the `rust-cpython` docs on GC integration.
282 ///
283 305 /// # Parameters
284 306 ///
285 307 /// * `$name` is the same identifier used in for `py_class!` macro call.
286 308 /// * `$inner_struct` is the identifier of the underlying Rust struct
287 309 /// * `$data_member` is the identifier of the data member of `$inner_struct`
288 310 /// that will be shared.
289 311 /// * `$shared_accessor` is the function name to be generated, which allows
290 312 /// safe access to the data member.
291 313 ///
292 314 /// # Safety
293 315 ///
294 316 /// `$data_member` must persist while the `$name` object is alive. In other
295 317 /// words, it must be an accessor to a data field of the Python object.
296 318 ///
297 319 /// # Example
298 320 ///
299 321 /// ```
300 322 /// struct MyStruct {
301 323 /// inner: Vec<u32>;
302 324 /// }
303 325 ///
304 326 /// py_class!(pub class MyType |py| {
305 327 /// data inner: PySharedRefCell<MyStruct>;
306 328 /// });
307 329 ///
308 330 /// py_shared_ref!(MyType, MyStruct, inner, inner_shared);
309 331 /// ```
310 332 macro_rules! py_shared_ref {
311 333 (
312 334 $name: ident,
313 335 $inner_struct: ident,
314 336 $data_member: ident,
315 337 $shared_accessor: ident
316 338 ) => {
317 339 impl $name {
318 340 /// Returns a safe reference to the shared `$data_member`.
319 341 ///
320 342 /// This function guarantees that `PySharedRef` is created with
321 343 /// the valid `self` and `self.$data_member(py)` pair.
322 344 fn $shared_accessor<'a>(
323 345 &'a self,
324 346 py: Python<'a>,
325 347 ) -> $crate::ref_sharing::PySharedRef<'a, $inner_struct> {
326 348 use cpython::PythonObject;
327 349 use $crate::ref_sharing::PySharedRef;
328 350 let owner = self.as_object();
329 351 let data = self.$data_member(py);
330 352 unsafe { PySharedRef::new(py, owner, data) }
331 353 }
332 354 }
333 355 };
334 356 }
335 357
336 358 /// Manage immutable references to `PyObject` leaked into Python iterators.
337 359 ///
338 360 /// This reference will be invalidated once the original value is mutably
339 361 /// borrowed.
340 362 pub struct PyLeaked<T> {
341 363 inner: PyObject,
342 364 data: Option<T>,
343 365 py_shared_state: &'static PySharedState,
344 366 /// Generation counter of data `T` captured when PyLeaked is created.
345 367 generation: usize,
346 368 }
347 369
348 370 // DO NOT implement Deref for PyLeaked<T>! Dereferencing PyLeaked
349 371 // without taking Python GIL wouldn't be safe. Also, the underling reference
350 372 // is invalid if generation != py_shared_state.generation.
351 373
352 374 impl<T> PyLeaked<T> {
353 375 /// # Safety
354 376 ///
355 377 /// The `py_shared_state` must be owned by the `inner` Python object.
356 378 fn new(
357 379 py: Python,
358 380 inner: &PyObject,
359 381 data: T,
360 382 py_shared_state: &'static PySharedState,
361 383 ) -> Self {
362 384 Self {
363 385 inner: inner.clone_ref(py),
364 386 data: Some(data),
365 387 py_shared_state,
366 388 generation: py_shared_state.current_generation(py),
367 389 }
368 390 }
369 391
370 392 /// Immutably borrows the wrapped value.
371 393 ///
372 394 /// Borrowing fails if the underlying reference has been invalidated.
373 395 pub fn try_borrow<'a>(
374 396 &'a self,
375 397 py: Python<'a>,
376 398 ) -> PyResult<PyLeakedRef<'a, T>> {
377 399 self.validate_generation(py)?;
378 400 Ok(PyLeakedRef {
379 _py: py,
401 _borrow: BorrowPyShared::new(py, self.py_shared_state),
380 402 data: self.data.as_ref().unwrap(),
381 403 })
382 404 }
383 405
384 406 /// Mutably borrows the wrapped value.
385 407 ///
386 408 /// Borrowing fails if the underlying reference has been invalidated.
387 409 ///
388 410 /// Typically `T` is an iterator. If `T` is an immutable reference,
389 411 /// `get_mut()` is useless since the inner value can't be mutated.
390 412 pub fn try_borrow_mut<'a>(
391 413 &'a mut self,
392 414 py: Python<'a>,
393 415 ) -> PyResult<PyLeakedRefMut<'a, T>> {
394 416 self.validate_generation(py)?;
395 417 Ok(PyLeakedRefMut {
396 _py: py,
418 _borrow: BorrowPyShared::new(py, self.py_shared_state),
397 419 data: self.data.as_mut().unwrap(),
398 420 })
399 421 }
400 422
401 423 /// Converts the inner value by the given function.
402 424 ///
403 425 /// Typically `T` is a static reference to a container, and `U` is an
404 426 /// iterator of that container.
405 427 ///
406 428 /// # Panics
407 429 ///
408 430 /// Panics if the underlying reference has been invalidated.
409 431 ///
410 432 /// This is typically called immediately after the `PyLeaked` is obtained.
411 433 /// In which case, the reference must be valid and no panic would occur.
412 434 ///
413 435 /// # Safety
414 436 ///
415 437 /// The lifetime of the object passed in to the function `f` is cheated.
416 438 /// It's typically a static reference, but is valid only while the
417 439 /// corresponding `PyLeaked` is alive. Do not copy it out of the
418 440 /// function call.
419 441 pub unsafe fn map<U>(
420 442 mut self,
421 443 py: Python,
422 444 f: impl FnOnce(T) -> U,
423 445 ) -> PyLeaked<U> {
424 446 // Needs to test the generation value to make sure self.data reference
425 447 // is still intact.
426 448 self.validate_generation(py)
427 449 .expect("map() over invalidated leaked reference");
428 450
429 451 // f() could make the self.data outlive. That's why map() is unsafe.
430 452 // In order to make this function safe, maybe we'll need a way to
431 453 // temporarily restrict the lifetime of self.data and translate the
432 454 // returned object back to Something<'static>.
433 455 let new_data = f(self.data.take().unwrap());
434 456 PyLeaked {
435 457 inner: self.inner.clone_ref(py),
436 458 data: Some(new_data),
437 459 py_shared_state: self.py_shared_state,
438 460 generation: self.generation,
439 461 }
440 462 }
441 463
442 464 fn validate_generation(&self, py: Python) -> PyResult<()> {
443 465 if self.py_shared_state.current_generation(py) == self.generation {
444 466 Ok(())
445 467 } else {
446 468 Err(PyErr::new::<exc::RuntimeError, _>(
447 469 py,
448 470 "Cannot access to leaked reference after mutation",
449 471 ))
450 472 }
451 473 }
452 474 }
453 475
454 impl<T> Drop for PyLeaked<T> {
455 fn drop(&mut self) {
456 // py_shared_state should be alive since we do have
457 // a Python reference to the owner object. Taking GIL makes
458 // sure that the state is only accessed by this thread.
459 let gil = Python::acquire_gil();
460 let py = gil.python();
461 if self.data.is_none() {
462 return; // moved to another PyLeaked
463 }
464 self.py_shared_state.decrease_leak_count(py, false);
465 }
466 }
467
468 476 /// Immutably borrowed reference to a leaked value.
469 477 pub struct PyLeakedRef<'a, T> {
470 _py: Python<'a>,
478 _borrow: BorrowPyShared<'a>,
471 479 data: &'a T,
472 480 }
473 481
474 482 impl<T> Deref for PyLeakedRef<'_, T> {
475 483 type Target = T;
476 484
477 485 fn deref(&self) -> &T {
478 486 self.data
479 487 }
480 488 }
481 489
482 490 /// Mutably borrowed reference to a leaked value.
483 491 pub struct PyLeakedRefMut<'a, T> {
484 _py: Python<'a>,
492 _borrow: BorrowPyShared<'a>,
485 493 data: &'a mut T,
486 494 }
487 495
488 496 impl<T> Deref for PyLeakedRefMut<'_, T> {
489 497 type Target = T;
490 498
491 499 fn deref(&self) -> &T {
492 500 self.data
493 501 }
494 502 }
495 503
496 504 impl<T> DerefMut for PyLeakedRefMut<'_, T> {
497 505 fn deref_mut(&mut self) -> &mut T {
498 506 self.data
499 507 }
500 508 }
501 509
502 510 /// Defines a `py_class!` that acts as a Python iterator over a Rust iterator.
503 511 ///
504 512 /// TODO: this is a bit awkward to use, and a better (more complicated)
505 513 /// procedural macro would simplify the interface a lot.
506 514 ///
507 515 /// # Parameters
508 516 ///
509 517 /// * `$name` is the identifier to give to the resulting Rust struct.
510 518 /// * `$leaked` corresponds to `$leaked` in the matching `py_shared_ref!` call.
511 519 /// * `$iterator_type` is the type of the Rust iterator.
512 520 /// * `$success_func` is a function for processing the Rust `(key, value)`
513 521 /// tuple on iteration success, turning it into something Python understands.
514 522 /// * `$success_func` is the return type of `$success_func`
515 523 ///
516 524 /// # Example
517 525 ///
518 526 /// ```
519 527 /// struct MyStruct {
520 528 /// inner: HashMap<Vec<u8>, Vec<u8>>;
521 529 /// }
522 530 ///
523 531 /// py_class!(pub class MyType |py| {
524 532 /// data inner: PySharedRefCell<MyStruct>;
525 533 ///
526 534 /// def __iter__(&self) -> PyResult<MyTypeItemsIterator> {
527 535 /// let leaked_ref = self.inner_shared(py).leak_immutable()?;
528 536 /// MyTypeItemsIterator::from_inner(
529 537 /// py,
530 538 /// unsafe { leaked_ref.map(py, |o| o.iter()) },
531 539 /// )
532 540 /// }
533 541 /// });
534 542 ///
535 543 /// impl MyType {
536 544 /// fn translate_key_value(
537 545 /// py: Python,
538 546 /// res: (&Vec<u8>, &Vec<u8>),
539 547 /// ) -> PyResult<Option<(PyBytes, PyBytes)>> {
540 548 /// let (f, entry) = res;
541 549 /// Ok(Some((
542 550 /// PyBytes::new(py, f),
543 551 /// PyBytes::new(py, entry),
544 552 /// )))
545 553 /// }
546 554 /// }
547 555 ///
548 556 /// py_shared_ref!(MyType, MyStruct, inner, MyTypeLeakedRef);
549 557 ///
550 558 /// py_shared_iterator!(
551 559 /// MyTypeItemsIterator,
552 560 /// PyLeaked<HashMap<'static, Vec<u8>, Vec<u8>>>,
553 561 /// MyType::translate_key_value,
554 562 /// Option<(PyBytes, PyBytes)>
555 563 /// );
556 564 /// ```
557 565 macro_rules! py_shared_iterator {
558 566 (
559 567 $name: ident,
560 568 $leaked: ty,
561 569 $success_func: expr,
562 570 $success_type: ty
563 571 ) => {
564 572 py_class!(pub class $name |py| {
565 573 data inner: RefCell<Option<$leaked>>;
566 574
567 575 def __next__(&self) -> PyResult<$success_type> {
568 576 let mut inner_opt = self.inner(py).borrow_mut();
569 577 if let Some(leaked) = inner_opt.as_mut() {
570 578 let mut iter = leaked.try_borrow_mut(py)?;
571 579 match iter.next() {
572 580 None => {
581 drop(iter);
573 582 // replace Some(inner) by None, drop $leaked
574 583 inner_opt.take();
575 584 Ok(None)
576 585 }
577 586 Some(res) => {
578 587 $success_func(py, res)
579 588 }
580 589 }
581 590 } else {
582 591 Ok(None)
583 592 }
584 593 }
585 594
586 595 def __iter__(&self) -> PyResult<Self> {
587 596 Ok(self.clone_ref(py))
588 597 }
589 598 });
590 599
591 600 impl $name {
592 601 pub fn from_inner(
593 602 py: Python,
594 603 leaked: $leaked,
595 604 ) -> PyResult<Self> {
596 605 Self::create_instance(
597 606 py,
598 607 RefCell::new(Some(leaked)),
599 608 )
600 609 }
601 610 }
602 611 };
603 612 }
604 613
605 614 #[cfg(test)]
606 615 #[cfg(any(feature = "python27-bin", feature = "python3-bin"))]
607 616 mod test {
608 617 use super::*;
609 618 use cpython::{GILGuard, Python};
610 619
611 620 py_class!(class Owner |py| {
612 621 data string: PySharedRefCell<String>;
613 622 });
614 623 py_shared_ref!(Owner, String, string, string_shared);
615 624
616 625 fn prepare_env() -> (GILGuard, Owner) {
617 626 let gil = Python::acquire_gil();
618 627 let py = gil.python();
619 628 let owner =
620 629 Owner::create_instance(py, PySharedRefCell::new("new".to_owned()))
621 630 .unwrap();
622 631 (gil, owner)
623 632 }
624 633
625 634 #[test]
626 635 fn test_leaked_borrow() {
627 636 let (gil, owner) = prepare_env();
628 637 let py = gil.python();
629 638 let leaked = owner.string_shared(py).leak_immutable().unwrap();
630 639 let leaked_ref = leaked.try_borrow(py).unwrap();
631 640 assert_eq!(*leaked_ref, "new");
632 641 }
633 642
634 643 #[test]
635 644 fn test_leaked_borrow_mut() {
636 645 let (gil, owner) = prepare_env();
637 646 let py = gil.python();
638 647 let leaked = owner.string_shared(py).leak_immutable().unwrap();
639 648 let mut leaked_iter = unsafe { leaked.map(py, |s| s.chars()) };
640 649 let mut leaked_ref = leaked_iter.try_borrow_mut(py).unwrap();
641 650 assert_eq!(leaked_ref.next(), Some('n'));
642 651 assert_eq!(leaked_ref.next(), Some('e'));
643 652 assert_eq!(leaked_ref.next(), Some('w'));
644 653 assert_eq!(leaked_ref.next(), None);
645 654 }
646 655
647 656 #[test]
648 657 fn test_leaked_borrow_after_mut() {
649 658 let (gil, owner) = prepare_env();
650 659 let py = gil.python();
651 660 let leaked = owner.string_shared(py).leak_immutable().unwrap();
652 owner.string(py).py_shared_state.leak_count.replace(0); // XXX cheat
653 661 owner.string_shared(py).borrow_mut().unwrap().clear();
654 owner.string(py).py_shared_state.leak_count.replace(1); // XXX cheat
655 662 assert!(leaked.try_borrow(py).is_err());
656 663 }
657 664
658 665 #[test]
659 666 fn test_leaked_borrow_mut_after_mut() {
660 667 let (gil, owner) = prepare_env();
661 668 let py = gil.python();
662 669 let leaked = owner.string_shared(py).leak_immutable().unwrap();
663 670 let mut leaked_iter = unsafe { leaked.map(py, |s| s.chars()) };
664 owner.string(py).py_shared_state.leak_count.replace(0); // XXX cheat
665 671 owner.string_shared(py).borrow_mut().unwrap().clear();
666 owner.string(py).py_shared_state.leak_count.replace(1); // XXX cheat
667 672 assert!(leaked_iter.try_borrow_mut(py).is_err());
668 673 }
669 674
670 675 #[test]
671 676 #[should_panic(expected = "map() over invalidated leaked reference")]
672 677 fn test_leaked_map_after_mut() {
673 678 let (gil, owner) = prepare_env();
674 679 let py = gil.python();
675 680 let leaked = owner.string_shared(py).leak_immutable().unwrap();
676 owner.string(py).py_shared_state.leak_count.replace(0); // XXX cheat
677 681 owner.string_shared(py).borrow_mut().unwrap().clear();
678 owner.string(py).py_shared_state.leak_count.replace(1); // XXX cheat
679 682 let _leaked_iter = unsafe { leaked.map(py, |s| s.chars()) };
680 683 }
681 684
682 685 #[test]
683 fn test_borrow_mut_while_leaked() {
686 fn test_borrow_mut_while_leaked_ref() {
684 687 let (gil, owner) = prepare_env();
685 688 let py = gil.python();
686 689 assert!(owner.string_shared(py).borrow_mut().is_ok());
687 let _leaked = owner.string_shared(py).leak_immutable().unwrap();
688 // TODO: will be allowed
690 let leaked = owner.string_shared(py).leak_immutable().unwrap();
691 {
692 let _leaked_ref = leaked.try_borrow(py).unwrap();
693 assert!(owner.string_shared(py).borrow_mut().is_err());
694 {
695 let _leaked_ref2 = leaked.try_borrow(py).unwrap();
696 assert!(owner.string_shared(py).borrow_mut().is_err());
697 }
689 698 assert!(owner.string_shared(py).borrow_mut().is_err());
690 699 }
700 assert!(owner.string_shared(py).borrow_mut().is_ok());
691 701 }
702
703 #[test]
704 fn test_borrow_mut_while_leaked_ref_mut() {
705 let (gil, owner) = prepare_env();
706 let py = gil.python();
707 assert!(owner.string_shared(py).borrow_mut().is_ok());
708 let leaked = owner.string_shared(py).leak_immutable().unwrap();
709 let mut leaked_iter = unsafe { leaked.map(py, |s| s.chars()) };
710 {
711 let _leaked_ref = leaked_iter.try_borrow_mut(py).unwrap();
712 assert!(owner.string_shared(py).borrow_mut().is_err());
713 }
714 assert!(owner.string_shared(py).borrow_mut().is_ok());
715 }
716 }
General Comments 0
You need to be logged in to leave comments. Login now