##// END OF EJS Templates
vfs: add a 'reljoin' function for joining relative paths...
Pierre-Yves David -
r23581:aed981c7 default
parent child Browse files
Show More
@@ -1,1095 +1,1102
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import nullrev
10 10 import util, error, osutil, revset, similar, encoding, phases, parsers
11 11 import pathutil
12 12 import match as matchmod
13 13 import os, errno, re, glob, tempfile
14 14
15 15 if os.name == 'nt':
16 16 import scmwindows as scmplatform
17 17 else:
18 18 import scmposix as scmplatform
19 19
20 20 systemrcpath = scmplatform.systemrcpath
21 21 userrcpath = scmplatform.userrcpath
22 22
23 23 class status(tuple):
24 24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
25 25 and 'ignored' properties are only relevant to the working copy.
26 26 '''
27 27
28 28 __slots__ = ()
29 29
30 30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
31 31 clean):
32 32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
33 33 ignored, clean))
34 34
35 35 @property
36 36 def modified(self):
37 37 '''files that have been modified'''
38 38 return self[0]
39 39
40 40 @property
41 41 def added(self):
42 42 '''files that have been added'''
43 43 return self[1]
44 44
45 45 @property
46 46 def removed(self):
47 47 '''files that have been removed'''
48 48 return self[2]
49 49
50 50 @property
51 51 def deleted(self):
52 52 '''files that are in the dirstate, but have been deleted from the
53 53 working copy (aka "missing")
54 54 '''
55 55 return self[3]
56 56
57 57 @property
58 58 def unknown(self):
59 59 '''files not in the dirstate that are not ignored'''
60 60 return self[4]
61 61
62 62 @property
63 63 def ignored(self):
64 64 '''files not in the dirstate that are ignored (by _dirignore())'''
65 65 return self[5]
66 66
67 67 @property
68 68 def clean(self):
69 69 '''files that have not been modified'''
70 70 return self[6]
71 71
72 72 def __repr__(self, *args, **kwargs):
73 73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
74 74 'unknown=%r, ignored=%r, clean=%r>') % self)
75 75
76 76 def itersubrepos(ctx1, ctx2):
77 77 """find subrepos in ctx1 or ctx2"""
78 78 # Create a (subpath, ctx) mapping where we prefer subpaths from
79 79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
80 80 # has been modified (in ctx2) but not yet committed (in ctx1).
81 81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
82 82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
83 83 for subpath, ctx in sorted(subpaths.iteritems()):
84 84 yield subpath, ctx.sub(subpath)
85 85
86 86 def nochangesfound(ui, repo, excluded=None):
87 87 '''Report no changes for push/pull, excluded is None or a list of
88 88 nodes excluded from the push/pull.
89 89 '''
90 90 secretlist = []
91 91 if excluded:
92 92 for n in excluded:
93 93 if n not in repo:
94 94 # discovery should not have included the filtered revision,
95 95 # we have to explicitly exclude it until discovery is cleanup.
96 96 continue
97 97 ctx = repo[n]
98 98 if ctx.phase() >= phases.secret and not ctx.extinct():
99 99 secretlist.append(n)
100 100
101 101 if secretlist:
102 102 ui.status(_("no changes found (ignored %d secret changesets)\n")
103 103 % len(secretlist))
104 104 else:
105 105 ui.status(_("no changes found\n"))
106 106
107 107 def checknewlabel(repo, lbl, kind):
108 108 # Do not use the "kind" parameter in ui output.
109 109 # It makes strings difficult to translate.
110 110 if lbl in ['tip', '.', 'null']:
111 111 raise util.Abort(_("the name '%s' is reserved") % lbl)
112 112 for c in (':', '\0', '\n', '\r'):
113 113 if c in lbl:
114 114 raise util.Abort(_("%r cannot be used in a name") % c)
115 115 try:
116 116 int(lbl)
117 117 raise util.Abort(_("cannot use an integer as a name"))
118 118 except ValueError:
119 119 pass
120 120
121 121 def checkfilename(f):
122 122 '''Check that the filename f is an acceptable filename for a tracked file'''
123 123 if '\r' in f or '\n' in f:
124 124 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
125 125
126 126 def checkportable(ui, f):
127 127 '''Check if filename f is portable and warn or abort depending on config'''
128 128 checkfilename(f)
129 129 abort, warn = checkportabilityalert(ui)
130 130 if abort or warn:
131 131 msg = util.checkwinfilename(f)
132 132 if msg:
133 133 msg = "%s: %r" % (msg, f)
134 134 if abort:
135 135 raise util.Abort(msg)
136 136 ui.warn(_("warning: %s\n") % msg)
137 137
138 138 def checkportabilityalert(ui):
139 139 '''check if the user's config requests nothing, a warning, or abort for
140 140 non-portable filenames'''
141 141 val = ui.config('ui', 'portablefilenames', 'warn')
142 142 lval = val.lower()
143 143 bval = util.parsebool(val)
144 144 abort = os.name == 'nt' or lval == 'abort'
145 145 warn = bval or lval == 'warn'
146 146 if bval is None and not (warn or abort or lval == 'ignore'):
147 147 raise error.ConfigError(
148 148 _("ui.portablefilenames value is invalid ('%s')") % val)
149 149 return abort, warn
150 150
151 151 class casecollisionauditor(object):
152 152 def __init__(self, ui, abort, dirstate):
153 153 self._ui = ui
154 154 self._abort = abort
155 155 allfiles = '\0'.join(dirstate._map)
156 156 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
157 157 self._dirstate = dirstate
158 158 # The purpose of _newfiles is so that we don't complain about
159 159 # case collisions if someone were to call this object with the
160 160 # same filename twice.
161 161 self._newfiles = set()
162 162
163 163 def __call__(self, f):
164 164 if f in self._newfiles:
165 165 return
166 166 fl = encoding.lower(f)
167 167 if fl in self._loweredfiles and f not in self._dirstate:
168 168 msg = _('possible case-folding collision for %s') % f
169 169 if self._abort:
170 170 raise util.Abort(msg)
171 171 self._ui.warn(_("warning: %s\n") % msg)
172 172 self._loweredfiles.add(fl)
173 173 self._newfiles.add(f)
174 174
175 175 class abstractvfs(object):
176 176 """Abstract base class; cannot be instantiated"""
177 177
178 178 def __init__(self, *args, **kwargs):
179 179 '''Prevent instantiation; don't call this from subclasses.'''
180 180 raise NotImplementedError('attempted instantiating ' + str(type(self)))
181 181
182 182 def tryread(self, path):
183 183 '''gracefully return an empty string for missing files'''
184 184 try:
185 185 return self.read(path)
186 186 except IOError, inst:
187 187 if inst.errno != errno.ENOENT:
188 188 raise
189 189 return ""
190 190
191 191 def tryreadlines(self, path, mode='rb'):
192 192 '''gracefully return an empty array for missing files'''
193 193 try:
194 194 return self.readlines(path, mode=mode)
195 195 except IOError, inst:
196 196 if inst.errno != errno.ENOENT:
197 197 raise
198 198 return []
199 199
200 200 def open(self, path, mode="r", text=False, atomictemp=False,
201 201 notindexed=False):
202 202 '''Open ``path`` file, which is relative to vfs root.
203 203
204 204 Newly created directories are marked as "not to be indexed by
205 205 the content indexing service", if ``notindexed`` is specified
206 206 for "write" mode access.
207 207 '''
208 208 self.open = self.__call__
209 209 return self.__call__(path, mode, text, atomictemp, notindexed)
210 210
211 211 def read(self, path):
212 212 fp = self(path, 'rb')
213 213 try:
214 214 return fp.read()
215 215 finally:
216 216 fp.close()
217 217
218 218 def readlines(self, path, mode='rb'):
219 219 fp = self(path, mode=mode)
220 220 try:
221 221 return fp.readlines()
222 222 finally:
223 223 fp.close()
224 224
225 225 def write(self, path, data):
226 226 fp = self(path, 'wb')
227 227 try:
228 228 return fp.write(data)
229 229 finally:
230 230 fp.close()
231 231
232 232 def writelines(self, path, data, mode='wb', notindexed=False):
233 233 fp = self(path, mode=mode, notindexed=notindexed)
234 234 try:
235 235 return fp.writelines(data)
236 236 finally:
237 237 fp.close()
238 238
239 239 def append(self, path, data):
240 240 fp = self(path, 'ab')
241 241 try:
242 242 return fp.write(data)
243 243 finally:
244 244 fp.close()
245 245
246 246 def chmod(self, path, mode):
247 247 return os.chmod(self.join(path), mode)
248 248
249 249 def exists(self, path=None):
250 250 return os.path.exists(self.join(path))
251 251
252 252 def fstat(self, fp):
253 253 return util.fstat(fp)
254 254
255 255 def isdir(self, path=None):
256 256 return os.path.isdir(self.join(path))
257 257
258 258 def isfile(self, path=None):
259 259 return os.path.isfile(self.join(path))
260 260
261 261 def islink(self, path=None):
262 262 return os.path.islink(self.join(path))
263 263
264 def reljoin(self, *paths):
265 """join various elements of a path together (as os.path.join would do)
266
267 The vfs base is not injected so that path stay relative. This exists
268 to allow handling of strange encoding if needed."""
269 return os.path.join(*paths)
270
264 271 def lexists(self, path=None):
265 272 return os.path.lexists(self.join(path))
266 273
267 274 def lstat(self, path=None):
268 275 return os.lstat(self.join(path))
269 276
270 277 def listdir(self, path=None):
271 278 return os.listdir(self.join(path))
272 279
273 280 def makedir(self, path=None, notindexed=True):
274 281 return util.makedir(self.join(path), notindexed)
275 282
276 283 def makedirs(self, path=None, mode=None):
277 284 return util.makedirs(self.join(path), mode)
278 285
279 286 def makelock(self, info, path):
280 287 return util.makelock(info, self.join(path))
281 288
282 289 def mkdir(self, path=None):
283 290 return os.mkdir(self.join(path))
284 291
285 292 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
286 293 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
287 294 dir=self.join(dir), text=text)
288 295 dname, fname = util.split(name)
289 296 if dir:
290 297 return fd, os.path.join(dir, fname)
291 298 else:
292 299 return fd, fname
293 300
294 301 def readdir(self, path=None, stat=None, skip=None):
295 302 return osutil.listdir(self.join(path), stat, skip)
296 303
297 304 def readlock(self, path):
298 305 return util.readlock(self.join(path))
299 306
300 307 def rename(self, src, dst):
301 308 return util.rename(self.join(src), self.join(dst))
302 309
303 310 def readlink(self, path):
304 311 return os.readlink(self.join(path))
305 312
306 313 def setflags(self, path, l, x):
307 314 return util.setflags(self.join(path), l, x)
308 315
309 316 def stat(self, path=None):
310 317 return os.stat(self.join(path))
311 318
312 319 def unlink(self, path=None):
313 320 return util.unlink(self.join(path))
314 321
315 322 def unlinkpath(self, path=None, ignoremissing=False):
316 323 return util.unlinkpath(self.join(path), ignoremissing)
317 324
318 325 def utime(self, path=None, t=None):
319 326 return os.utime(self.join(path), t)
320 327
321 328 class vfs(abstractvfs):
322 329 '''Operate files relative to a base directory
323 330
324 331 This class is used to hide the details of COW semantics and
325 332 remote file access from higher level code.
326 333 '''
327 334 def __init__(self, base, audit=True, expandpath=False, realpath=False):
328 335 if expandpath:
329 336 base = util.expandpath(base)
330 337 if realpath:
331 338 base = os.path.realpath(base)
332 339 self.base = base
333 340 self._setmustaudit(audit)
334 341 self.createmode = None
335 342 self._trustnlink = None
336 343
337 344 def _getmustaudit(self):
338 345 return self._audit
339 346
340 347 def _setmustaudit(self, onoff):
341 348 self._audit = onoff
342 349 if onoff:
343 350 self.audit = pathutil.pathauditor(self.base)
344 351 else:
345 352 self.audit = util.always
346 353
347 354 mustaudit = property(_getmustaudit, _setmustaudit)
348 355
349 356 @util.propertycache
350 357 def _cansymlink(self):
351 358 return util.checklink(self.base)
352 359
353 360 @util.propertycache
354 361 def _chmod(self):
355 362 return util.checkexec(self.base)
356 363
357 364 def _fixfilemode(self, name):
358 365 if self.createmode is None or not self._chmod:
359 366 return
360 367 os.chmod(name, self.createmode & 0666)
361 368
362 369 def __call__(self, path, mode="r", text=False, atomictemp=False,
363 370 notindexed=False):
364 371 '''Open ``path`` file, which is relative to vfs root.
365 372
366 373 Newly created directories are marked as "not to be indexed by
367 374 the content indexing service", if ``notindexed`` is specified
368 375 for "write" mode access.
369 376 '''
370 377 if self._audit:
371 378 r = util.checkosfilename(path)
372 379 if r:
373 380 raise util.Abort("%s: %r" % (r, path))
374 381 self.audit(path)
375 382 f = self.join(path)
376 383
377 384 if not text and "b" not in mode:
378 385 mode += "b" # for that other OS
379 386
380 387 nlink = -1
381 388 if mode not in ('r', 'rb'):
382 389 dirname, basename = util.split(f)
383 390 # If basename is empty, then the path is malformed because it points
384 391 # to a directory. Let the posixfile() call below raise IOError.
385 392 if basename:
386 393 if atomictemp:
387 394 util.ensuredirs(dirname, self.createmode, notindexed)
388 395 return util.atomictempfile(f, mode, self.createmode)
389 396 try:
390 397 if 'w' in mode:
391 398 util.unlink(f)
392 399 nlink = 0
393 400 else:
394 401 # nlinks() may behave differently for files on Windows
395 402 # shares if the file is open.
396 403 fd = util.posixfile(f)
397 404 nlink = util.nlinks(f)
398 405 if nlink < 1:
399 406 nlink = 2 # force mktempcopy (issue1922)
400 407 fd.close()
401 408 except (OSError, IOError), e:
402 409 if e.errno != errno.ENOENT:
403 410 raise
404 411 nlink = 0
405 412 util.ensuredirs(dirname, self.createmode, notindexed)
406 413 if nlink > 0:
407 414 if self._trustnlink is None:
408 415 self._trustnlink = nlink > 1 or util.checknlink(f)
409 416 if nlink > 1 or not self._trustnlink:
410 417 util.rename(util.mktempcopy(f), f)
411 418 fp = util.posixfile(f, mode)
412 419 if nlink == 0:
413 420 self._fixfilemode(f)
414 421 return fp
415 422
416 423 def symlink(self, src, dst):
417 424 self.audit(dst)
418 425 linkname = self.join(dst)
419 426 try:
420 427 os.unlink(linkname)
421 428 except OSError:
422 429 pass
423 430
424 431 util.ensuredirs(os.path.dirname(linkname), self.createmode)
425 432
426 433 if self._cansymlink:
427 434 try:
428 435 os.symlink(src, linkname)
429 436 except OSError, err:
430 437 raise OSError(err.errno, _('could not symlink to %r: %s') %
431 438 (src, err.strerror), linkname)
432 439 else:
433 440 self.write(dst, src)
434 441
435 442 def join(self, path):
436 443 if path:
437 444 return os.path.join(self.base, path)
438 445 else:
439 446 return self.base
440 447
441 448 opener = vfs
442 449
443 450 class auditvfs(object):
444 451 def __init__(self, vfs):
445 452 self.vfs = vfs
446 453
447 454 def _getmustaudit(self):
448 455 return self.vfs.mustaudit
449 456
450 457 def _setmustaudit(self, onoff):
451 458 self.vfs.mustaudit = onoff
452 459
453 460 mustaudit = property(_getmustaudit, _setmustaudit)
454 461
455 462 class filtervfs(abstractvfs, auditvfs):
456 463 '''Wrapper vfs for filtering filenames with a function.'''
457 464
458 465 def __init__(self, vfs, filter):
459 466 auditvfs.__init__(self, vfs)
460 467 self._filter = filter
461 468
462 469 def __call__(self, path, *args, **kwargs):
463 470 return self.vfs(self._filter(path), *args, **kwargs)
464 471
465 472 def join(self, path):
466 473 if path:
467 474 return self.vfs.join(self._filter(path))
468 475 else:
469 476 return self.vfs.join(path)
470 477
471 478 filteropener = filtervfs
472 479
473 480 class readonlyvfs(abstractvfs, auditvfs):
474 481 '''Wrapper vfs preventing any writing.'''
475 482
476 483 def __init__(self, vfs):
477 484 auditvfs.__init__(self, vfs)
478 485
479 486 def __call__(self, path, mode='r', *args, **kw):
480 487 if mode not in ('r', 'rb'):
481 488 raise util.Abort('this vfs is read only')
482 489 return self.vfs(path, mode, *args, **kw)
483 490
484 491
485 492 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
486 493 '''yield every hg repository under path, always recursively.
487 494 The recurse flag will only control recursion into repo working dirs'''
488 495 def errhandler(err):
489 496 if err.filename == path:
490 497 raise err
491 498 samestat = getattr(os.path, 'samestat', None)
492 499 if followsym and samestat is not None:
493 500 def adddir(dirlst, dirname):
494 501 match = False
495 502 dirstat = os.stat(dirname)
496 503 for lstdirstat in dirlst:
497 504 if samestat(dirstat, lstdirstat):
498 505 match = True
499 506 break
500 507 if not match:
501 508 dirlst.append(dirstat)
502 509 return not match
503 510 else:
504 511 followsym = False
505 512
506 513 if (seen_dirs is None) and followsym:
507 514 seen_dirs = []
508 515 adddir(seen_dirs, path)
509 516 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
510 517 dirs.sort()
511 518 if '.hg' in dirs:
512 519 yield root # found a repository
513 520 qroot = os.path.join(root, '.hg', 'patches')
514 521 if os.path.isdir(os.path.join(qroot, '.hg')):
515 522 yield qroot # we have a patch queue repo here
516 523 if recurse:
517 524 # avoid recursing inside the .hg directory
518 525 dirs.remove('.hg')
519 526 else:
520 527 dirs[:] = [] # don't descend further
521 528 elif followsym:
522 529 newdirs = []
523 530 for d in dirs:
524 531 fname = os.path.join(root, d)
525 532 if adddir(seen_dirs, fname):
526 533 if os.path.islink(fname):
527 534 for hgname in walkrepos(fname, True, seen_dirs):
528 535 yield hgname
529 536 else:
530 537 newdirs.append(d)
531 538 dirs[:] = newdirs
532 539
533 540 def osrcpath():
534 541 '''return default os-specific hgrc search path'''
535 542 path = []
536 543 defaultpath = os.path.join(util.datapath, 'default.d')
537 544 if os.path.isdir(defaultpath):
538 545 for f, kind in osutil.listdir(defaultpath):
539 546 if f.endswith('.rc'):
540 547 path.append(os.path.join(defaultpath, f))
541 548 path.extend(systemrcpath())
542 549 path.extend(userrcpath())
543 550 path = [os.path.normpath(f) for f in path]
544 551 return path
545 552
546 553 _rcpath = None
547 554
548 555 def rcpath():
549 556 '''return hgrc search path. if env var HGRCPATH is set, use it.
550 557 for each item in path, if directory, use files ending in .rc,
551 558 else use item.
552 559 make HGRCPATH empty to only look in .hg/hgrc of current repo.
553 560 if no HGRCPATH, use default os-specific path.'''
554 561 global _rcpath
555 562 if _rcpath is None:
556 563 if 'HGRCPATH' in os.environ:
557 564 _rcpath = []
558 565 for p in os.environ['HGRCPATH'].split(os.pathsep):
559 566 if not p:
560 567 continue
561 568 p = util.expandpath(p)
562 569 if os.path.isdir(p):
563 570 for f, kind in osutil.listdir(p):
564 571 if f.endswith('.rc'):
565 572 _rcpath.append(os.path.join(p, f))
566 573 else:
567 574 _rcpath.append(p)
568 575 else:
569 576 _rcpath = osrcpath()
570 577 return _rcpath
571 578
572 579 def revsingle(repo, revspec, default='.'):
573 580 if not revspec and revspec != 0:
574 581 return repo[default]
575 582
576 583 l = revrange(repo, [revspec])
577 584 if not l:
578 585 raise util.Abort(_('empty revision set'))
579 586 return repo[l.last()]
580 587
581 588 def revpair(repo, revs):
582 589 if not revs:
583 590 return repo.dirstate.p1(), None
584 591
585 592 l = revrange(repo, revs)
586 593
587 594 if not l:
588 595 first = second = None
589 596 elif l.isascending():
590 597 first = l.min()
591 598 second = l.max()
592 599 elif l.isdescending():
593 600 first = l.max()
594 601 second = l.min()
595 602 else:
596 603 first = l.first()
597 604 second = l.last()
598 605
599 606 if first is None:
600 607 raise util.Abort(_('empty revision range'))
601 608
602 609 if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
603 610 return repo.lookup(first), None
604 611
605 612 return repo.lookup(first), repo.lookup(second)
606 613
607 614 _revrangesep = ':'
608 615
609 616 def revrange(repo, revs):
610 617 """Yield revision as strings from a list of revision specifications."""
611 618
612 619 def revfix(repo, val, defval):
613 620 if not val and val != 0 and defval is not None:
614 621 return defval
615 622 return repo[val].rev()
616 623
617 624 seen, l = set(), revset.baseset([])
618 625 for spec in revs:
619 626 if l and not seen:
620 627 seen = set(l)
621 628 # attempt to parse old-style ranges first to deal with
622 629 # things like old-tag which contain query metacharacters
623 630 try:
624 631 if isinstance(spec, int):
625 632 seen.add(spec)
626 633 l = l + revset.baseset([spec])
627 634 continue
628 635
629 636 if _revrangesep in spec:
630 637 start, end = spec.split(_revrangesep, 1)
631 638 start = revfix(repo, start, 0)
632 639 end = revfix(repo, end, len(repo) - 1)
633 640 if end == nullrev and start < 0:
634 641 start = nullrev
635 642 rangeiter = repo.changelog.revs(start, end)
636 643 if not seen and not l:
637 644 # by far the most common case: revs = ["-1:0"]
638 645 l = revset.baseset(rangeiter)
639 646 # defer syncing seen until next iteration
640 647 continue
641 648 newrevs = set(rangeiter)
642 649 if seen:
643 650 newrevs.difference_update(seen)
644 651 seen.update(newrevs)
645 652 else:
646 653 seen = newrevs
647 654 l = l + revset.baseset(sorted(newrevs, reverse=start > end))
648 655 continue
649 656 elif spec and spec in repo: # single unquoted rev
650 657 rev = revfix(repo, spec, None)
651 658 if rev in seen:
652 659 continue
653 660 seen.add(rev)
654 661 l = l + revset.baseset([rev])
655 662 continue
656 663 except error.RepoLookupError:
657 664 pass
658 665
659 666 # fall through to new-style queries if old-style fails
660 667 m = revset.match(repo.ui, spec, repo)
661 668 if seen or l:
662 669 dl = [r for r in m(repo, revset.spanset(repo)) if r not in seen]
663 670 l = l + revset.baseset(dl)
664 671 seen.update(dl)
665 672 else:
666 673 l = m(repo, revset.spanset(repo))
667 674
668 675 return l
669 676
670 677 def expandpats(pats):
671 678 '''Expand bare globs when running on windows.
672 679 On posix we assume it already has already been done by sh.'''
673 680 if not util.expandglobs:
674 681 return list(pats)
675 682 ret = []
676 683 for kindpat in pats:
677 684 kind, pat = matchmod._patsplit(kindpat, None)
678 685 if kind is None:
679 686 try:
680 687 globbed = glob.glob(pat)
681 688 except re.error:
682 689 globbed = [pat]
683 690 if globbed:
684 691 ret.extend(globbed)
685 692 continue
686 693 ret.append(kindpat)
687 694 return ret
688 695
689 696 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
690 697 '''Return a matcher and the patterns that were used.
691 698 The matcher will warn about bad matches.'''
692 699 if pats == ("",):
693 700 pats = []
694 701 if not globbed and default == 'relpath':
695 702 pats = expandpats(pats or [])
696 703
697 704 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
698 705 default)
699 706 def badfn(f, msg):
700 707 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
701 708 m.bad = badfn
702 709 return m, pats
703 710
704 711 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
705 712 '''Return a matcher that will warn about bad matches.'''
706 713 return matchandpats(ctx, pats, opts, globbed, default)[0]
707 714
708 715 def matchall(repo):
709 716 '''Return a matcher that will efficiently match everything.'''
710 717 return matchmod.always(repo.root, repo.getcwd())
711 718
712 719 def matchfiles(repo, files):
713 720 '''Return a matcher that will efficiently match exactly these files.'''
714 721 return matchmod.exact(repo.root, repo.getcwd(), files)
715 722
716 723 def addremove(repo, matcher, prefix, opts={}, dry_run=None, similarity=None):
717 724 m = matcher
718 725 if dry_run is None:
719 726 dry_run = opts.get('dry_run')
720 727 if similarity is None:
721 728 similarity = float(opts.get('similarity') or 0)
722 729
723 730 ret = 0
724 731 join = lambda f: os.path.join(prefix, f)
725 732
726 733 def matchessubrepo(matcher, subpath):
727 734 if matcher.exact(subpath):
728 735 return True
729 736 for f in matcher.files():
730 737 if f.startswith(subpath):
731 738 return True
732 739 return False
733 740
734 741 wctx = repo[None]
735 742 for subpath in sorted(wctx.substate):
736 743 if opts.get('subrepos') or matchessubrepo(m, subpath):
737 744 sub = wctx.sub(subpath)
738 745 try:
739 746 submatch = matchmod.narrowmatcher(subpath, m)
740 747 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
741 748 ret = 1
742 749 except error.LookupError:
743 750 repo.ui.status(_("skipping missing subrepository: %s\n")
744 751 % join(subpath))
745 752
746 753 rejected = []
747 754 origbad = m.bad
748 755 def badfn(f, msg):
749 756 if f in m.files():
750 757 origbad(f, msg)
751 758 rejected.append(f)
752 759
753 760 m.bad = badfn
754 761 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
755 762 m.bad = origbad
756 763
757 764 unknownset = set(unknown + forgotten)
758 765 toprint = unknownset.copy()
759 766 toprint.update(deleted)
760 767 for abs in sorted(toprint):
761 768 if repo.ui.verbose or not m.exact(abs):
762 769 if abs in unknownset:
763 770 status = _('adding %s\n') % m.uipath(join(abs))
764 771 else:
765 772 status = _('removing %s\n') % m.uipath(join(abs))
766 773 repo.ui.status(status)
767 774
768 775 renames = _findrenames(repo, m, added + unknown, removed + deleted,
769 776 similarity)
770 777
771 778 if not dry_run:
772 779 _markchanges(repo, unknown + forgotten, deleted, renames)
773 780
774 781 for f in rejected:
775 782 if f in m.files():
776 783 return 1
777 784 return ret
778 785
779 786 def marktouched(repo, files, similarity=0.0):
780 787 '''Assert that files have somehow been operated upon. files are relative to
781 788 the repo root.'''
782 789 m = matchfiles(repo, files)
783 790 rejected = []
784 791 m.bad = lambda x, y: rejected.append(x)
785 792
786 793 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
787 794
788 795 if repo.ui.verbose:
789 796 unknownset = set(unknown + forgotten)
790 797 toprint = unknownset.copy()
791 798 toprint.update(deleted)
792 799 for abs in sorted(toprint):
793 800 if abs in unknownset:
794 801 status = _('adding %s\n') % abs
795 802 else:
796 803 status = _('removing %s\n') % abs
797 804 repo.ui.status(status)
798 805
799 806 renames = _findrenames(repo, m, added + unknown, removed + deleted,
800 807 similarity)
801 808
802 809 _markchanges(repo, unknown + forgotten, deleted, renames)
803 810
804 811 for f in rejected:
805 812 if f in m.files():
806 813 return 1
807 814 return 0
808 815
809 816 def _interestingfiles(repo, matcher):
810 817 '''Walk dirstate with matcher, looking for files that addremove would care
811 818 about.
812 819
813 820 This is different from dirstate.status because it doesn't care about
814 821 whether files are modified or clean.'''
815 822 added, unknown, deleted, removed, forgotten = [], [], [], [], []
816 823 audit_path = pathutil.pathauditor(repo.root)
817 824
818 825 ctx = repo[None]
819 826 dirstate = repo.dirstate
820 827 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
821 828 full=False)
822 829 for abs, st in walkresults.iteritems():
823 830 dstate = dirstate[abs]
824 831 if dstate == '?' and audit_path.check(abs):
825 832 unknown.append(abs)
826 833 elif dstate != 'r' and not st:
827 834 deleted.append(abs)
828 835 elif dstate == 'r' and st:
829 836 forgotten.append(abs)
830 837 # for finding renames
831 838 elif dstate == 'r' and not st:
832 839 removed.append(abs)
833 840 elif dstate == 'a':
834 841 added.append(abs)
835 842
836 843 return added, unknown, deleted, removed, forgotten
837 844
838 845 def _findrenames(repo, matcher, added, removed, similarity):
839 846 '''Find renames from removed files to added ones.'''
840 847 renames = {}
841 848 if similarity > 0:
842 849 for old, new, score in similar.findrenames(repo, added, removed,
843 850 similarity):
844 851 if (repo.ui.verbose or not matcher.exact(old)
845 852 or not matcher.exact(new)):
846 853 repo.ui.status(_('recording removal of %s as rename to %s '
847 854 '(%d%% similar)\n') %
848 855 (matcher.rel(old), matcher.rel(new),
849 856 score * 100))
850 857 renames[new] = old
851 858 return renames
852 859
853 860 def _markchanges(repo, unknown, deleted, renames):
854 861 '''Marks the files in unknown as added, the files in deleted as removed,
855 862 and the files in renames as copied.'''
856 863 wctx = repo[None]
857 864 wlock = repo.wlock()
858 865 try:
859 866 wctx.forget(deleted)
860 867 wctx.add(unknown)
861 868 for new, old in renames.iteritems():
862 869 wctx.copy(old, new)
863 870 finally:
864 871 wlock.release()
865 872
866 873 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
867 874 """Update the dirstate to reflect the intent of copying src to dst. For
868 875 different reasons it might not end with dst being marked as copied from src.
869 876 """
870 877 origsrc = repo.dirstate.copied(src) or src
871 878 if dst == origsrc: # copying back a copy?
872 879 if repo.dirstate[dst] not in 'mn' and not dryrun:
873 880 repo.dirstate.normallookup(dst)
874 881 else:
875 882 if repo.dirstate[origsrc] == 'a' and origsrc == src:
876 883 if not ui.quiet:
877 884 ui.warn(_("%s has not been committed yet, so no copy "
878 885 "data will be stored for %s.\n")
879 886 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
880 887 if repo.dirstate[dst] in '?r' and not dryrun:
881 888 wctx.add([dst])
882 889 elif not dryrun:
883 890 wctx.copy(origsrc, dst)
884 891
885 892 def readrequires(opener, supported):
886 893 '''Reads and parses .hg/requires and checks if all entries found
887 894 are in the list of supported features.'''
888 895 requirements = set(opener.read("requires").splitlines())
889 896 missings = []
890 897 for r in requirements:
891 898 if r not in supported:
892 899 if not r or not r[0].isalnum():
893 900 raise error.RequirementError(_(".hg/requires file is corrupt"))
894 901 missings.append(r)
895 902 missings.sort()
896 903 if missings:
897 904 raise error.RequirementError(
898 905 _("repository requires features unknown to this Mercurial: %s")
899 906 % " ".join(missings),
900 907 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
901 908 " for more information"))
902 909 return requirements
903 910
904 911 class filecachesubentry(object):
905 912 def __init__(self, path, stat):
906 913 self.path = path
907 914 self.cachestat = None
908 915 self._cacheable = None
909 916
910 917 if stat:
911 918 self.cachestat = filecachesubentry.stat(self.path)
912 919
913 920 if self.cachestat:
914 921 self._cacheable = self.cachestat.cacheable()
915 922 else:
916 923 # None means we don't know yet
917 924 self._cacheable = None
918 925
919 926 def refresh(self):
920 927 if self.cacheable():
921 928 self.cachestat = filecachesubentry.stat(self.path)
922 929
923 930 def cacheable(self):
924 931 if self._cacheable is not None:
925 932 return self._cacheable
926 933
927 934 # we don't know yet, assume it is for now
928 935 return True
929 936
930 937 def changed(self):
931 938 # no point in going further if we can't cache it
932 939 if not self.cacheable():
933 940 return True
934 941
935 942 newstat = filecachesubentry.stat(self.path)
936 943
937 944 # we may not know if it's cacheable yet, check again now
938 945 if newstat and self._cacheable is None:
939 946 self._cacheable = newstat.cacheable()
940 947
941 948 # check again
942 949 if not self._cacheable:
943 950 return True
944 951
945 952 if self.cachestat != newstat:
946 953 self.cachestat = newstat
947 954 return True
948 955 else:
949 956 return False
950 957
951 958 @staticmethod
952 959 def stat(path):
953 960 try:
954 961 return util.cachestat(path)
955 962 except OSError, e:
956 963 if e.errno != errno.ENOENT:
957 964 raise
958 965
959 966 class filecacheentry(object):
960 967 def __init__(self, paths, stat=True):
961 968 self._entries = []
962 969 for path in paths:
963 970 self._entries.append(filecachesubentry(path, stat))
964 971
965 972 def changed(self):
966 973 '''true if any entry has changed'''
967 974 for entry in self._entries:
968 975 if entry.changed():
969 976 return True
970 977 return False
971 978
972 979 def refresh(self):
973 980 for entry in self._entries:
974 981 entry.refresh()
975 982
976 983 class filecache(object):
977 984 '''A property like decorator that tracks files under .hg/ for updates.
978 985
979 986 Records stat info when called in _filecache.
980 987
981 988 On subsequent calls, compares old stat info with new info, and recreates the
982 989 object when any of the files changes, updating the new stat info in
983 990 _filecache.
984 991
985 992 Mercurial either atomic renames or appends for files under .hg,
986 993 so to ensure the cache is reliable we need the filesystem to be able
987 994 to tell us if a file has been replaced. If it can't, we fallback to
988 995 recreating the object on every call (essentially the same behaviour as
989 996 propertycache).
990 997
991 998 '''
992 999 def __init__(self, *paths):
993 1000 self.paths = paths
994 1001
995 1002 def join(self, obj, fname):
996 1003 """Used to compute the runtime path of a cached file.
997 1004
998 1005 Users should subclass filecache and provide their own version of this
999 1006 function to call the appropriate join function on 'obj' (an instance
1000 1007 of the class that its member function was decorated).
1001 1008 """
1002 1009 return obj.join(fname)
1003 1010
1004 1011 def __call__(self, func):
1005 1012 self.func = func
1006 1013 self.name = func.__name__
1007 1014 return self
1008 1015
1009 1016 def __get__(self, obj, type=None):
1010 1017 # do we need to check if the file changed?
1011 1018 if self.name in obj.__dict__:
1012 1019 assert self.name in obj._filecache, self.name
1013 1020 return obj.__dict__[self.name]
1014 1021
1015 1022 entry = obj._filecache.get(self.name)
1016 1023
1017 1024 if entry:
1018 1025 if entry.changed():
1019 1026 entry.obj = self.func(obj)
1020 1027 else:
1021 1028 paths = [self.join(obj, path) for path in self.paths]
1022 1029
1023 1030 # We stat -before- creating the object so our cache doesn't lie if
1024 1031 # a writer modified between the time we read and stat
1025 1032 entry = filecacheentry(paths, True)
1026 1033 entry.obj = self.func(obj)
1027 1034
1028 1035 obj._filecache[self.name] = entry
1029 1036
1030 1037 obj.__dict__[self.name] = entry.obj
1031 1038 return entry.obj
1032 1039
1033 1040 def __set__(self, obj, value):
1034 1041 if self.name not in obj._filecache:
1035 1042 # we add an entry for the missing value because X in __dict__
1036 1043 # implies X in _filecache
1037 1044 paths = [self.join(obj, path) for path in self.paths]
1038 1045 ce = filecacheentry(paths, False)
1039 1046 obj._filecache[self.name] = ce
1040 1047 else:
1041 1048 ce = obj._filecache[self.name]
1042 1049
1043 1050 ce.obj = value # update cached copy
1044 1051 obj.__dict__[self.name] = value # update copy returned by obj.x
1045 1052
1046 1053 def __delete__(self, obj):
1047 1054 try:
1048 1055 del obj.__dict__[self.name]
1049 1056 except KeyError:
1050 1057 raise AttributeError(self.name)
1051 1058
1052 1059 class dirs(object):
1053 1060 '''a multiset of directory names from a dirstate or manifest'''
1054 1061
1055 1062 def __init__(self, map, skip=None):
1056 1063 self._dirs = {}
1057 1064 addpath = self.addpath
1058 1065 if util.safehasattr(map, 'iteritems') and skip is not None:
1059 1066 for f, s in map.iteritems():
1060 1067 if s[0] != skip:
1061 1068 addpath(f)
1062 1069 else:
1063 1070 for f in map:
1064 1071 addpath(f)
1065 1072
1066 1073 def addpath(self, path):
1067 1074 dirs = self._dirs
1068 1075 for base in finddirs(path):
1069 1076 if base in dirs:
1070 1077 dirs[base] += 1
1071 1078 return
1072 1079 dirs[base] = 1
1073 1080
1074 1081 def delpath(self, path):
1075 1082 dirs = self._dirs
1076 1083 for base in finddirs(path):
1077 1084 if dirs[base] > 1:
1078 1085 dirs[base] -= 1
1079 1086 return
1080 1087 del dirs[base]
1081 1088
1082 1089 def __iter__(self):
1083 1090 return self._dirs.iterkeys()
1084 1091
1085 1092 def __contains__(self, d):
1086 1093 return d in self._dirs
1087 1094
1088 1095 if util.safehasattr(parsers, 'dirs'):
1089 1096 dirs = parsers.dirs
1090 1097
1091 1098 def finddirs(path):
1092 1099 pos = path.rfind('/')
1093 1100 while pos != -1:
1094 1101 yield path[:pos]
1095 1102 pos = path.rfind('/', 0, pos)
@@ -1,498 +1,498
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from i18n import _
15 15 import os
16 16 import errno
17 17 import error, util
18 18
19 19 version = 2
20 20
21 21 def active(func):
22 22 def _active(self, *args, **kwds):
23 23 if self.count == 0:
24 24 raise error.Abort(_(
25 25 'cannot use transaction when it is already committed/aborted'))
26 26 return func(self, *args, **kwds)
27 27 return _active
28 28
29 29 def _playback(journal, report, opener, vfsmap, entries, backupentries,
30 30 unlink=True):
31 31 for f, o, _ignore in entries:
32 32 if o or not unlink:
33 33 try:
34 34 fp = opener(f, 'a')
35 35 fp.truncate(o)
36 36 fp.close()
37 37 except IOError:
38 38 report(_("failed to truncate %s\n") % f)
39 39 raise
40 40 else:
41 41 try:
42 42 opener.unlink(f)
43 43 except (IOError, OSError), inst:
44 44 if inst.errno != errno.ENOENT:
45 45 raise
46 46
47 47 backupfiles = []
48 48 for l, f, b, c in backupentries:
49 49 if l not in vfsmap and c:
50 50 report("couldn't handle %s: unknown cache location %s\n"
51 51 % (b, l))
52 52 vfs = vfsmap[l]
53 53 try:
54 54 if f and b:
55 55 filepath = vfs.join(f)
56 56 backuppath = vfs.join(b)
57 57 try:
58 58 util.copyfile(backuppath, filepath)
59 59 backupfiles.append(b)
60 60 except IOError:
61 61 report(_("failed to recover %s\n") % f)
62 62 else:
63 63 target = f or b
64 64 try:
65 65 vfs.unlink(target)
66 66 except (IOError, OSError), inst:
67 67 if inst.errno != errno.ENOENT:
68 68 raise
69 69 except (IOError, OSError, util.Abort), inst:
70 70 if not c:
71 71 raise
72 72
73 73 opener.unlink(journal)
74 74 backuppath = "%s.backupfiles" % journal
75 75 if opener.exists(backuppath):
76 76 opener.unlink(backuppath)
77 77 try:
78 78 for f in backupfiles:
79 79 if opener.exists(f):
80 80 opener.unlink(f)
81 81 except (IOError, OSError, util.Abort), inst:
82 82 # only pure backup file remains, it is sage to ignore any error
83 83 pass
84 84
85 85 class transaction(object):
86 86 def __init__(self, report, opener, vfsmap, journal, after=None,
87 87 createmode=None):
88 88 """Begin a new transaction
89 89
90 90 Begins a new transaction that allows rolling back writes in the event of
91 91 an exception.
92 92
93 93 * `after`: called after the transaction has been committed
94 94 * `createmode`: the mode of the journal file that will be created
95 95 """
96 96 self.count = 1
97 97 self.usages = 1
98 98 self.report = report
99 99 # a vfs to the store content
100 100 self.opener = opener
101 101 # a map to access file in various {location -> vfs}
102 102 vfsmap = vfsmap.copy()
103 103 vfsmap[''] = opener # set default value
104 104 self._vfsmap = vfsmap
105 105 self.after = after
106 106 self.entries = []
107 107 self.map = {}
108 108 self.journal = journal
109 109 self._queue = []
110 110 # a dict of arguments to be passed to hooks
111 111 self.hookargs = {}
112 112 self.file = opener.open(self.journal, "w")
113 113
114 114 # a list of ('location', 'path', 'backuppath', cache) entries.
115 115 # - if 'backuppath' is empty, no file existed at backup time
116 116 # - if 'path' is empty, this is a temporary transaction file
117 117 # - if 'location' is not empty, the path is outside main opener reach.
118 118 # use 'location' value as a key in a vfsmap to find the right 'vfs'
119 119 # (cache is currently unused)
120 120 self._backupentries = []
121 121 self._backupmap = {}
122 122 self._backupjournal = "%s.backupfiles" % journal
123 123 self._backupsfile = opener.open(self._backupjournal, 'w')
124 124 self._backupsfile.write('%d\n' % version)
125 125
126 126 if createmode is not None:
127 127 opener.chmod(self.journal, createmode & 0666)
128 128 opener.chmod(self._backupjournal, createmode & 0666)
129 129
130 130 # hold file generations to be performed on commit
131 131 self._filegenerators = {}
132 132 # hold callback to write pending data for hooks
133 133 self._pendingcallback = {}
134 134 # True is any pending data have been written ever
135 135 self._anypending = False
136 136 # holds callback to call when writing the transaction
137 137 self._finalizecallback = {}
138 138 # hold callback for post transaction close
139 139 self._postclosecallback = {}
140 140
141 141 def __del__(self):
142 142 if self.journal:
143 143 self._abort()
144 144
145 145 @active
146 146 def startgroup(self):
147 147 """delay registration of file entry
148 148
149 149 This is used by strip to delay vision of strip offset. The transaction
150 150 sees either none or all of the strip actions to be done."""
151 151 self._queue.append([])
152 152
153 153 @active
154 154 def endgroup(self):
155 155 """apply delayed registration of file entry.
156 156
157 157 This is used by strip to delay vision of strip offset. The transaction
158 158 sees either none or all of the strip actions to be done."""
159 159 q = self._queue.pop()
160 160 for f, o, data in q:
161 161 self._addentry(f, o, data)
162 162
163 163 @active
164 164 def add(self, file, offset, data=None):
165 165 """record the state of an append-only file before update"""
166 166 if file in self.map or file in self._backupmap:
167 167 return
168 168 if self._queue:
169 169 self._queue[-1].append((file, offset, data))
170 170 return
171 171
172 172 self._addentry(file, offset, data)
173 173
174 174 def _addentry(self, file, offset, data):
175 175 """add a append-only entry to memory and on-disk state"""
176 176 if file in self.map or file in self._backupmap:
177 177 return
178 178 self.entries.append((file, offset, data))
179 179 self.map[file] = len(self.entries) - 1
180 180 # add enough data to the journal to do the truncate
181 181 self.file.write("%s\0%d\n" % (file, offset))
182 182 self.file.flush()
183 183
184 184 @active
185 185 def addbackup(self, file, hardlink=True, location=''):
186 186 """Adds a backup of the file to the transaction
187 187
188 188 Calling addbackup() creates a hardlink backup of the specified file
189 189 that is used to recover the file in the event of the transaction
190 190 aborting.
191 191
192 192 * `file`: the file path, relative to .hg/store
193 193 * `hardlink`: use a hardlink to quickly create the backup
194 194 """
195 195 if self._queue:
196 196 msg = 'cannot use transaction.addbackup inside "group"'
197 197 raise RuntimeError(msg)
198 198
199 199 if file in self.map or file in self._backupmap:
200 200 return
201 201 dirname, filename = os.path.split(file)
202 202 backupfilename = "%s.backup.%s" % (self.journal, filename)
203 backupfile = os.path.join(dirname, backupfilename)
204 203 vfs = self._vfsmap[location]
204 backupfile = vfs.reljoin(dirname, backupfilename)
205 205 if vfs.exists(file):
206 206 filepath = vfs.join(file)
207 207 backuppath = vfs.join(backupfile)
208 208 util.copyfiles(filepath, backuppath, hardlink=hardlink)
209 209 else:
210 210 backupfile = ''
211 211
212 212 self._addbackupentry((location, file, backupfile, False))
213 213
214 214 def _addbackupentry(self, entry):
215 215 """register a new backup entry and write it to disk"""
216 216 self._backupentries.append(entry)
217 217 self._backupmap[file] = len(self._backupentries) - 1
218 218 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
219 219 self._backupsfile.flush()
220 220
221 221 @active
222 222 def registertmp(self, tmpfile, location=''):
223 223 """register a temporary transaction file
224 224
225 225 Such files will be deleted when the transaction exits (on both
226 226 failure and success).
227 227 """
228 228 self._addbackupentry((location, '', tmpfile, False))
229 229
230 230 @active
231 231 def addfilegenerator(self, genid, filenames, genfunc, order=0,
232 232 location=''):
233 233 """add a function to generates some files at transaction commit
234 234
235 235 The `genfunc` argument is a function capable of generating proper
236 236 content of each entry in the `filename` tuple.
237 237
238 238 At transaction close time, `genfunc` will be called with one file
239 239 object argument per entries in `filenames`.
240 240
241 241 The transaction itself is responsible for the backup, creation and
242 242 final write of such file.
243 243
244 244 The `genid` argument is used to ensure the same set of file is only
245 245 generated once. Call to `addfilegenerator` for a `genid` already
246 246 present will overwrite the old entry.
247 247
248 248 The `order` argument may be used to control the order in which multiple
249 249 generator will be executed.
250 250
251 251 The `location` arguments may be used to indicate the files are located
252 252 outside of the the standard directory for transaction. It should match
253 253 one of the key of the `transaction.vfsmap` dictionary.
254 254 """
255 255 # For now, we are unable to do proper backup and restore of custom vfs
256 256 # but for bookmarks that are handled outside this mechanism.
257 257 self._filegenerators[genid] = (order, filenames, genfunc, location)
258 258
259 259 def _generatefiles(self, suffix=''):
260 260 # write files registered for generation
261 261 any = False
262 262 for entry in sorted(self._filegenerators.values()):
263 263 any = True
264 264 order, filenames, genfunc, location = entry
265 265 vfs = self._vfsmap[location]
266 266 files = []
267 267 try:
268 268 for name in filenames:
269 269 name += suffix
270 270 if suffix:
271 271 self.registertmp(name, location=location)
272 272 else:
273 273 self.addbackup(name, location=location)
274 274 files.append(vfs(name, 'w', atomictemp=True))
275 275 genfunc(*files)
276 276 finally:
277 277 for f in files:
278 278 f.close()
279 279 return any
280 280
281 281 @active
282 282 def find(self, file):
283 283 if file in self.map:
284 284 return self.entries[self.map[file]]
285 285 if file in self._backupmap:
286 286 return self._backupentries[self._backupmap[file]]
287 287 return None
288 288
289 289 @active
290 290 def replace(self, file, offset, data=None):
291 291 '''
292 292 replace can only replace already committed entries
293 293 that are not pending in the queue
294 294 '''
295 295
296 296 if file not in self.map:
297 297 raise KeyError(file)
298 298 index = self.map[file]
299 299 self.entries[index] = (file, offset, data)
300 300 self.file.write("%s\0%d\n" % (file, offset))
301 301 self.file.flush()
302 302
303 303 @active
304 304 def nest(self):
305 305 self.count += 1
306 306 self.usages += 1
307 307 return self
308 308
309 309 def release(self):
310 310 if self.count > 0:
311 311 self.usages -= 1
312 312 # if the transaction scopes are left without being closed, fail
313 313 if self.count > 0 and self.usages == 0:
314 314 self._abort()
315 315
316 316 def running(self):
317 317 return self.count > 0
318 318
319 319 def addpending(self, category, callback):
320 320 """add a callback to be called when the transaction is pending
321 321
322 322 The transaction will be given as callback's first argument.
323 323
324 324 Category is a unique identifier to allow overwriting an old callback
325 325 with a newer callback.
326 326 """
327 327 self._pendingcallback[category] = callback
328 328
329 329 @active
330 330 def writepending(self):
331 331 '''write pending file to temporary version
332 332
333 333 This is used to allow hooks to view a transaction before commit'''
334 334 categories = sorted(self._pendingcallback)
335 335 for cat in categories:
336 336 # remove callback since the data will have been flushed
337 337 any = self._pendingcallback.pop(cat)(self)
338 338 self._anypending = self._anypending or any
339 339 self._anypending |= self._generatefiles(suffix='.pending')
340 340 return self._anypending
341 341
342 342 @active
343 343 def addfinalize(self, category, callback):
344 344 """add a callback to be called when the transaction is closed
345 345
346 346 The transaction will be given as callback's first argument.
347 347
348 348 Category is a unique identifier to allow overwriting old callbacks with
349 349 newer callbacks.
350 350 """
351 351 self._finalizecallback[category] = callback
352 352
353 353 @active
354 354 def addpostclose(self, category, callback):
355 355 """add a callback to be called after the transaction is closed
356 356
357 357 The transaction will be given as callback's first argument.
358 358
359 359 Category is a unique identifier to allow overwriting an old callback
360 360 with a newer callback.
361 361 """
362 362 self._postclosecallback[category] = callback
363 363
364 364 @active
365 365 def close(self):
366 366 '''commit the transaction'''
367 367 if self.count == 1:
368 368 self._generatefiles()
369 369 categories = sorted(self._finalizecallback)
370 370 for cat in categories:
371 371 self._finalizecallback[cat](self)
372 372
373 373 self.count -= 1
374 374 if self.count != 0:
375 375 return
376 376 self.file.close()
377 377 self._backupsfile.close()
378 378 # cleanup temporary files
379 379 for l, f, b, c in self._backupentries:
380 380 if l not in self._vfsmap and c:
381 381 self.report("couldn't remote %s: unknown cache location %s\n"
382 382 % (b, l))
383 383 continue
384 384 vfs = self._vfsmap[l]
385 385 if not f and b and vfs.exists(b):
386 386 try:
387 387 vfs.unlink(b)
388 388 except (IOError, OSError, util.Abort), inst:
389 389 if not c:
390 390 raise
391 391 # Abort may be raise by read only opener
392 392 self.report("couldn't remote %s: %s\n"
393 393 % (vfs.join(b), inst))
394 394 self.entries = []
395 395 if self.after:
396 396 self.after()
397 397 if self.opener.isfile(self.journal):
398 398 self.opener.unlink(self.journal)
399 399 if self.opener.isfile(self._backupjournal):
400 400 self.opener.unlink(self._backupjournal)
401 401 for _l, _f, b, c in self._backupentries:
402 402 if l not in self._vfsmap and c:
403 403 self.report("couldn't remote %s: unknown cache location"
404 404 "%s\n" % (b, l))
405 405 continue
406 406 vfs = self._vfsmap[l]
407 407 if b and vfs.exists(b):
408 408 try:
409 409 vfs.unlink(b)
410 410 except (IOError, OSError, util.Abort), inst:
411 411 if not c:
412 412 raise
413 413 # Abort may be raise by read only opener
414 414 self.report("couldn't remote %s: %s\n"
415 415 % (vfs.join(b), inst))
416 416 self._backupentries = []
417 417 self.journal = None
418 418 # run post close action
419 419 categories = sorted(self._postclosecallback)
420 420 for cat in categories:
421 421 self._postclosecallback[cat](self)
422 422
423 423 @active
424 424 def abort(self):
425 425 '''abort the transaction (generally called on error, or when the
426 426 transaction is not explicitly committed before going out of
427 427 scope)'''
428 428 self._abort()
429 429
430 430 def _abort(self):
431 431 self.count = 0
432 432 self.usages = 0
433 433 self.file.close()
434 434 self._backupsfile.close()
435 435
436 436 try:
437 437 if not self.entries and not self._backupentries:
438 438 if self.journal:
439 439 self.opener.unlink(self.journal)
440 440 if self._backupjournal:
441 441 self.opener.unlink(self._backupjournal)
442 442 return
443 443
444 444 self.report(_("transaction abort!\n"))
445 445
446 446 try:
447 447 _playback(self.journal, self.report, self.opener, self._vfsmap,
448 448 self.entries, self._backupentries, False)
449 449 self.report(_("rollback completed\n"))
450 450 except Exception:
451 451 self.report(_("rollback failed - please run hg recover\n"))
452 452 finally:
453 453 self.journal = None
454 454
455 455
456 456 def rollback(opener, vfsmap, file, report):
457 457 """Rolls back the transaction contained in the given file
458 458
459 459 Reads the entries in the specified file, and the corresponding
460 460 '*.backupfiles' file, to recover from an incomplete transaction.
461 461
462 462 * `file`: a file containing a list of entries, specifying where
463 463 to truncate each file. The file should contain a list of
464 464 file\0offset pairs, delimited by newlines. The corresponding
465 465 '*.backupfiles' file should contain a list of file\0backupfile
466 466 pairs, delimited by \0.
467 467 """
468 468 entries = []
469 469 backupentries = []
470 470
471 471 fp = opener.open(file)
472 472 lines = fp.readlines()
473 473 fp.close()
474 474 for l in lines:
475 475 try:
476 476 f, o = l.split('\0')
477 477 entries.append((f, int(o), None))
478 478 except ValueError:
479 479 report(_("couldn't read journal entry %r!\n") % l)
480 480
481 481 backupjournal = "%s.backupfiles" % file
482 482 if opener.exists(backupjournal):
483 483 fp = opener.open(backupjournal)
484 484 lines = fp.readlines()
485 485 if lines:
486 486 ver = lines[0][:-1]
487 487 if ver == str(version):
488 488 for line in lines[1:]:
489 489 if line:
490 490 # Shave off the trailing newline
491 491 line = line[:-1]
492 492 l, f, b, c = line.split('\0')
493 493 backupentries.append((l, f, b, bool(c)))
494 494 else:
495 495 report(_("journal was created by a different version of "
496 496 "Mercurial"))
497 497
498 498 _playback(file, report, opener, vfsmap, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now