##// END OF EJS Templates
vfs: add "makelock()" and "readlock()"
FUJIWARA Katsunori -
r20090:88d8e568 default
parent child Browse files
Show More
@@ -1,913 +1,919 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import nullrev
10 10 import util, error, osutil, revset, similar, encoding, phases, parsers
11 11 import pathutil
12 12 import match as matchmod
13 13 import os, errno, re, glob
14 14
15 15 if os.name == 'nt':
16 16 import scmwindows as scmplatform
17 17 else:
18 18 import scmposix as scmplatform
19 19
20 20 systemrcpath = scmplatform.systemrcpath
21 21 userrcpath = scmplatform.userrcpath
22 22
23 23 def nochangesfound(ui, repo, excluded=None):
24 24 '''Report no changes for push/pull, excluded is None or a list of
25 25 nodes excluded from the push/pull.
26 26 '''
27 27 secretlist = []
28 28 if excluded:
29 29 for n in excluded:
30 30 if n not in repo:
31 31 # discovery should not have included the filtered revision,
32 32 # we have to explicitly exclude it until discovery is cleanup.
33 33 continue
34 34 ctx = repo[n]
35 35 if ctx.phase() >= phases.secret and not ctx.extinct():
36 36 secretlist.append(n)
37 37
38 38 if secretlist:
39 39 ui.status(_("no changes found (ignored %d secret changesets)\n")
40 40 % len(secretlist))
41 41 else:
42 42 ui.status(_("no changes found\n"))
43 43
44 44 def checknewlabel(repo, lbl, kind):
45 45 # Do not use the "kind" parameter in ui output.
46 46 # It makes strings difficult to translate.
47 47 if lbl in ['tip', '.', 'null']:
48 48 raise util.Abort(_("the name '%s' is reserved") % lbl)
49 49 for c in (':', '\0', '\n', '\r'):
50 50 if c in lbl:
51 51 raise util.Abort(_("%r cannot be used in a name") % c)
52 52 try:
53 53 int(lbl)
54 54 raise util.Abort(_("cannot use an integer as a name"))
55 55 except ValueError:
56 56 pass
57 57
58 58 def checkfilename(f):
59 59 '''Check that the filename f is an acceptable filename for a tracked file'''
60 60 if '\r' in f or '\n' in f:
61 61 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
62 62
63 63 def checkportable(ui, f):
64 64 '''Check if filename f is portable and warn or abort depending on config'''
65 65 checkfilename(f)
66 66 abort, warn = checkportabilityalert(ui)
67 67 if abort or warn:
68 68 msg = util.checkwinfilename(f)
69 69 if msg:
70 70 msg = "%s: %r" % (msg, f)
71 71 if abort:
72 72 raise util.Abort(msg)
73 73 ui.warn(_("warning: %s\n") % msg)
74 74
75 75 def checkportabilityalert(ui):
76 76 '''check if the user's config requests nothing, a warning, or abort for
77 77 non-portable filenames'''
78 78 val = ui.config('ui', 'portablefilenames', 'warn')
79 79 lval = val.lower()
80 80 bval = util.parsebool(val)
81 81 abort = os.name == 'nt' or lval == 'abort'
82 82 warn = bval or lval == 'warn'
83 83 if bval is None and not (warn or abort or lval == 'ignore'):
84 84 raise error.ConfigError(
85 85 _("ui.portablefilenames value is invalid ('%s')") % val)
86 86 return abort, warn
87 87
88 88 class casecollisionauditor(object):
89 89 def __init__(self, ui, abort, dirstate):
90 90 self._ui = ui
91 91 self._abort = abort
92 92 allfiles = '\0'.join(dirstate._map)
93 93 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
94 94 self._dirstate = dirstate
95 95 # The purpose of _newfiles is so that we don't complain about
96 96 # case collisions if someone were to call this object with the
97 97 # same filename twice.
98 98 self._newfiles = set()
99 99
100 100 def __call__(self, f):
101 101 if f in self._newfiles:
102 102 return
103 103 fl = encoding.lower(f)
104 104 if fl in self._loweredfiles and f not in self._dirstate:
105 105 msg = _('possible case-folding collision for %s') % f
106 106 if self._abort:
107 107 raise util.Abort(msg)
108 108 self._ui.warn(_("warning: %s\n") % msg)
109 109 self._loweredfiles.add(fl)
110 110 self._newfiles.add(f)
111 111
112 112 class abstractvfs(object):
113 113 """Abstract base class; cannot be instantiated"""
114 114
115 115 def __init__(self, *args, **kwargs):
116 116 '''Prevent instantiation; don't call this from subclasses.'''
117 117 raise NotImplementedError('attempted instantiating ' + str(type(self)))
118 118
119 119 def tryread(self, path):
120 120 '''gracefully return an empty string for missing files'''
121 121 try:
122 122 return self.read(path)
123 123 except IOError, inst:
124 124 if inst.errno != errno.ENOENT:
125 125 raise
126 126 return ""
127 127
128 128 def open(self, path, mode="r", text=False, atomictemp=False):
129 129 self.open = self.__call__
130 130 return self.__call__(path, mode, text, atomictemp)
131 131
132 132 def read(self, path):
133 133 fp = self(path, 'rb')
134 134 try:
135 135 return fp.read()
136 136 finally:
137 137 fp.close()
138 138
139 139 def write(self, path, data):
140 140 fp = self(path, 'wb')
141 141 try:
142 142 return fp.write(data)
143 143 finally:
144 144 fp.close()
145 145
146 146 def append(self, path, data):
147 147 fp = self(path, 'ab')
148 148 try:
149 149 return fp.write(data)
150 150 finally:
151 151 fp.close()
152 152
153 153 def chmod(self, path, mode):
154 154 return os.chmod(self.join(path), mode)
155 155
156 156 def exists(self, path=None):
157 157 return os.path.exists(self.join(path))
158 158
159 159 def fstat(self, fp):
160 160 return util.fstat(fp)
161 161
162 162 def isdir(self, path=None):
163 163 return os.path.isdir(self.join(path))
164 164
165 165 def isfile(self, path=None):
166 166 return os.path.isfile(self.join(path))
167 167
168 168 def islink(self, path=None):
169 169 return os.path.islink(self.join(path))
170 170
171 171 def lstat(self, path=None):
172 172 return os.lstat(self.join(path))
173 173
174 174 def makedir(self, path=None, notindexed=True):
175 175 return util.makedir(self.join(path), notindexed)
176 176
177 177 def makedirs(self, path=None, mode=None):
178 178 return util.makedirs(self.join(path), mode)
179 179
180 def makelock(self, info, path):
181 return util.makelock(info, self.join(path))
182
180 183 def mkdir(self, path=None):
181 184 return os.mkdir(self.join(path))
182 185
183 186 def readdir(self, path=None, stat=None, skip=None):
184 187 return osutil.listdir(self.join(path), stat, skip)
185 188
189 def readlock(self, path):
190 return util.readlock(self.join(path))
191
186 192 def rename(self, src, dst):
187 193 return util.rename(self.join(src), self.join(dst))
188 194
189 195 def readlink(self, path):
190 196 return os.readlink(self.join(path))
191 197
192 198 def setflags(self, path, l, x):
193 199 return util.setflags(self.join(path), l, x)
194 200
195 201 def stat(self, path=None):
196 202 return os.stat(self.join(path))
197 203
198 204 def unlink(self, path=None):
199 205 return util.unlink(self.join(path))
200 206
201 207 def utime(self, path=None, t=None):
202 208 return os.utime(self.join(path), t)
203 209
204 210 class vfs(abstractvfs):
205 211 '''Operate files relative to a base directory
206 212
207 213 This class is used to hide the details of COW semantics and
208 214 remote file access from higher level code.
209 215 '''
210 216 def __init__(self, base, audit=True, expandpath=False, realpath=False):
211 217 if expandpath:
212 218 base = util.expandpath(base)
213 219 if realpath:
214 220 base = os.path.realpath(base)
215 221 self.base = base
216 222 self._setmustaudit(audit)
217 223 self.createmode = None
218 224 self._trustnlink = None
219 225
220 226 def _getmustaudit(self):
221 227 return self._audit
222 228
223 229 def _setmustaudit(self, onoff):
224 230 self._audit = onoff
225 231 if onoff:
226 232 self.audit = pathutil.pathauditor(self.base)
227 233 else:
228 234 self.audit = util.always
229 235
230 236 mustaudit = property(_getmustaudit, _setmustaudit)
231 237
232 238 @util.propertycache
233 239 def _cansymlink(self):
234 240 return util.checklink(self.base)
235 241
236 242 @util.propertycache
237 243 def _chmod(self):
238 244 return util.checkexec(self.base)
239 245
240 246 def _fixfilemode(self, name):
241 247 if self.createmode is None or not self._chmod:
242 248 return
243 249 os.chmod(name, self.createmode & 0666)
244 250
245 251 def __call__(self, path, mode="r", text=False, atomictemp=False):
246 252 if self._audit:
247 253 r = util.checkosfilename(path)
248 254 if r:
249 255 raise util.Abort("%s: %r" % (r, path))
250 256 self.audit(path)
251 257 f = self.join(path)
252 258
253 259 if not text and "b" not in mode:
254 260 mode += "b" # for that other OS
255 261
256 262 nlink = -1
257 263 if mode not in ('r', 'rb'):
258 264 dirname, basename = util.split(f)
259 265 # If basename is empty, then the path is malformed because it points
260 266 # to a directory. Let the posixfile() call below raise IOError.
261 267 if basename:
262 268 if atomictemp:
263 269 util.ensuredirs(dirname, self.createmode)
264 270 return util.atomictempfile(f, mode, self.createmode)
265 271 try:
266 272 if 'w' in mode:
267 273 util.unlink(f)
268 274 nlink = 0
269 275 else:
270 276 # nlinks() may behave differently for files on Windows
271 277 # shares if the file is open.
272 278 fd = util.posixfile(f)
273 279 nlink = util.nlinks(f)
274 280 if nlink < 1:
275 281 nlink = 2 # force mktempcopy (issue1922)
276 282 fd.close()
277 283 except (OSError, IOError), e:
278 284 if e.errno != errno.ENOENT:
279 285 raise
280 286 nlink = 0
281 287 util.ensuredirs(dirname, self.createmode)
282 288 if nlink > 0:
283 289 if self._trustnlink is None:
284 290 self._trustnlink = nlink > 1 or util.checknlink(f)
285 291 if nlink > 1 or not self._trustnlink:
286 292 util.rename(util.mktempcopy(f), f)
287 293 fp = util.posixfile(f, mode)
288 294 if nlink == 0:
289 295 self._fixfilemode(f)
290 296 return fp
291 297
292 298 def symlink(self, src, dst):
293 299 self.audit(dst)
294 300 linkname = self.join(dst)
295 301 try:
296 302 os.unlink(linkname)
297 303 except OSError:
298 304 pass
299 305
300 306 util.ensuredirs(os.path.dirname(linkname), self.createmode)
301 307
302 308 if self._cansymlink:
303 309 try:
304 310 os.symlink(src, linkname)
305 311 except OSError, err:
306 312 raise OSError(err.errno, _('could not symlink to %r: %s') %
307 313 (src, err.strerror), linkname)
308 314 else:
309 315 self.write(dst, src)
310 316
311 317 def join(self, path):
312 318 if path:
313 319 return os.path.join(self.base, path)
314 320 else:
315 321 return self.base
316 322
317 323 opener = vfs
318 324
319 325 class auditvfs(object):
320 326 def __init__(self, vfs):
321 327 self.vfs = vfs
322 328
323 329 def _getmustaudit(self):
324 330 return self.vfs.mustaudit
325 331
326 332 def _setmustaudit(self, onoff):
327 333 self.vfs.mustaudit = onoff
328 334
329 335 mustaudit = property(_getmustaudit, _setmustaudit)
330 336
331 337 class filtervfs(abstractvfs, auditvfs):
332 338 '''Wrapper vfs for filtering filenames with a function.'''
333 339
334 340 def __init__(self, vfs, filter):
335 341 auditvfs.__init__(self, vfs)
336 342 self._filter = filter
337 343
338 344 def __call__(self, path, *args, **kwargs):
339 345 return self.vfs(self._filter(path), *args, **kwargs)
340 346
341 347 def join(self, path):
342 348 if path:
343 349 return self.vfs.join(self._filter(path))
344 350 else:
345 351 return self.vfs.join(path)
346 352
347 353 filteropener = filtervfs
348 354
349 355 class readonlyvfs(abstractvfs, auditvfs):
350 356 '''Wrapper vfs preventing any writing.'''
351 357
352 358 def __init__(self, vfs):
353 359 auditvfs.__init__(self, vfs)
354 360
355 361 def __call__(self, path, mode='r', *args, **kw):
356 362 if mode not in ('r', 'rb'):
357 363 raise util.Abort('this vfs is read only')
358 364 return self.vfs(path, mode, *args, **kw)
359 365
360 366
361 367 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
362 368 '''yield every hg repository under path, always recursively.
363 369 The recurse flag will only control recursion into repo working dirs'''
364 370 def errhandler(err):
365 371 if err.filename == path:
366 372 raise err
367 373 samestat = getattr(os.path, 'samestat', None)
368 374 if followsym and samestat is not None:
369 375 def adddir(dirlst, dirname):
370 376 match = False
371 377 dirstat = os.stat(dirname)
372 378 for lstdirstat in dirlst:
373 379 if samestat(dirstat, lstdirstat):
374 380 match = True
375 381 break
376 382 if not match:
377 383 dirlst.append(dirstat)
378 384 return not match
379 385 else:
380 386 followsym = False
381 387
382 388 if (seen_dirs is None) and followsym:
383 389 seen_dirs = []
384 390 adddir(seen_dirs, path)
385 391 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
386 392 dirs.sort()
387 393 if '.hg' in dirs:
388 394 yield root # found a repository
389 395 qroot = os.path.join(root, '.hg', 'patches')
390 396 if os.path.isdir(os.path.join(qroot, '.hg')):
391 397 yield qroot # we have a patch queue repo here
392 398 if recurse:
393 399 # avoid recursing inside the .hg directory
394 400 dirs.remove('.hg')
395 401 else:
396 402 dirs[:] = [] # don't descend further
397 403 elif followsym:
398 404 newdirs = []
399 405 for d in dirs:
400 406 fname = os.path.join(root, d)
401 407 if adddir(seen_dirs, fname):
402 408 if os.path.islink(fname):
403 409 for hgname in walkrepos(fname, True, seen_dirs):
404 410 yield hgname
405 411 else:
406 412 newdirs.append(d)
407 413 dirs[:] = newdirs
408 414
409 415 def osrcpath():
410 416 '''return default os-specific hgrc search path'''
411 417 path = systemrcpath()
412 418 path.extend(userrcpath())
413 419 path = [os.path.normpath(f) for f in path]
414 420 return path
415 421
416 422 _rcpath = None
417 423
418 424 def rcpath():
419 425 '''return hgrc search path. if env var HGRCPATH is set, use it.
420 426 for each item in path, if directory, use files ending in .rc,
421 427 else use item.
422 428 make HGRCPATH empty to only look in .hg/hgrc of current repo.
423 429 if no HGRCPATH, use default os-specific path.'''
424 430 global _rcpath
425 431 if _rcpath is None:
426 432 if 'HGRCPATH' in os.environ:
427 433 _rcpath = []
428 434 for p in os.environ['HGRCPATH'].split(os.pathsep):
429 435 if not p:
430 436 continue
431 437 p = util.expandpath(p)
432 438 if os.path.isdir(p):
433 439 for f, kind in osutil.listdir(p):
434 440 if f.endswith('.rc'):
435 441 _rcpath.append(os.path.join(p, f))
436 442 else:
437 443 _rcpath.append(p)
438 444 else:
439 445 _rcpath = osrcpath()
440 446 return _rcpath
441 447
442 448 def revsingle(repo, revspec, default='.'):
443 449 if not revspec and revspec != 0:
444 450 return repo[default]
445 451
446 452 l = revrange(repo, [revspec])
447 453 if len(l) < 1:
448 454 raise util.Abort(_('empty revision set'))
449 455 return repo[l[-1]]
450 456
451 457 def revpair(repo, revs):
452 458 if not revs:
453 459 return repo.dirstate.p1(), None
454 460
455 461 l = revrange(repo, revs)
456 462
457 463 if len(l) == 0:
458 464 if revs:
459 465 raise util.Abort(_('empty revision range'))
460 466 return repo.dirstate.p1(), None
461 467
462 468 if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
463 469 return repo.lookup(l[0]), None
464 470
465 471 return repo.lookup(l[0]), repo.lookup(l[-1])
466 472
467 473 _revrangesep = ':'
468 474
469 475 def revrange(repo, revs):
470 476 """Yield revision as strings from a list of revision specifications."""
471 477
472 478 def revfix(repo, val, defval):
473 479 if not val and val != 0 and defval is not None:
474 480 return defval
475 481 return repo[val].rev()
476 482
477 483 seen, l = set(), []
478 484 for spec in revs:
479 485 if l and not seen:
480 486 seen = set(l)
481 487 # attempt to parse old-style ranges first to deal with
482 488 # things like old-tag which contain query metacharacters
483 489 try:
484 490 if isinstance(spec, int):
485 491 seen.add(spec)
486 492 l.append(spec)
487 493 continue
488 494
489 495 if _revrangesep in spec:
490 496 start, end = spec.split(_revrangesep, 1)
491 497 start = revfix(repo, start, 0)
492 498 end = revfix(repo, end, len(repo) - 1)
493 499 if end == nullrev and start <= 0:
494 500 start = nullrev
495 501 rangeiter = repo.changelog.revs(start, end)
496 502 if not seen and not l:
497 503 # by far the most common case: revs = ["-1:0"]
498 504 l = list(rangeiter)
499 505 # defer syncing seen until next iteration
500 506 continue
501 507 newrevs = set(rangeiter)
502 508 if seen:
503 509 newrevs.difference_update(seen)
504 510 seen.update(newrevs)
505 511 else:
506 512 seen = newrevs
507 513 l.extend(sorted(newrevs, reverse=start > end))
508 514 continue
509 515 elif spec and spec in repo: # single unquoted rev
510 516 rev = revfix(repo, spec, None)
511 517 if rev in seen:
512 518 continue
513 519 seen.add(rev)
514 520 l.append(rev)
515 521 continue
516 522 except error.RepoLookupError:
517 523 pass
518 524
519 525 # fall through to new-style queries if old-style fails
520 526 m = revset.match(repo.ui, spec)
521 527 dl = [r for r in m(repo, list(repo)) if r not in seen]
522 528 l.extend(dl)
523 529 seen.update(dl)
524 530
525 531 return l
526 532
527 533 def expandpats(pats):
528 534 if not util.expandglobs:
529 535 return list(pats)
530 536 ret = []
531 537 for p in pats:
532 538 kind, name = matchmod._patsplit(p, None)
533 539 if kind is None:
534 540 try:
535 541 globbed = glob.glob(name)
536 542 except re.error:
537 543 globbed = [name]
538 544 if globbed:
539 545 ret.extend(globbed)
540 546 continue
541 547 ret.append(p)
542 548 return ret
543 549
544 550 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
545 551 if pats == ("",):
546 552 pats = []
547 553 if not globbed and default == 'relpath':
548 554 pats = expandpats(pats or [])
549 555
550 556 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
551 557 default)
552 558 def badfn(f, msg):
553 559 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
554 560 m.bad = badfn
555 561 return m, pats
556 562
557 563 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
558 564 return matchandpats(ctx, pats, opts, globbed, default)[0]
559 565
560 566 def matchall(repo):
561 567 return matchmod.always(repo.root, repo.getcwd())
562 568
563 569 def matchfiles(repo, files):
564 570 return matchmod.exact(repo.root, repo.getcwd(), files)
565 571
566 572 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
567 573 if dry_run is None:
568 574 dry_run = opts.get('dry_run')
569 575 if similarity is None:
570 576 similarity = float(opts.get('similarity') or 0)
571 577 # we'd use status here, except handling of symlinks and ignore is tricky
572 578 m = match(repo[None], pats, opts)
573 579 rejected = []
574 580 m.bad = lambda x, y: rejected.append(x)
575 581
576 582 added, unknown, deleted, removed = _interestingfiles(repo, m)
577 583
578 584 unknownset = set(unknown)
579 585 toprint = unknownset.copy()
580 586 toprint.update(deleted)
581 587 for abs in sorted(toprint):
582 588 if repo.ui.verbose or not m.exact(abs):
583 589 rel = m.rel(abs)
584 590 if abs in unknownset:
585 591 status = _('adding %s\n') % ((pats and rel) or abs)
586 592 else:
587 593 status = _('removing %s\n') % ((pats and rel) or abs)
588 594 repo.ui.status(status)
589 595
590 596 renames = _findrenames(repo, m, added + unknown, removed + deleted,
591 597 similarity)
592 598
593 599 if not dry_run:
594 600 _markchanges(repo, unknown, deleted, renames)
595 601
596 602 for f in rejected:
597 603 if f in m.files():
598 604 return 1
599 605 return 0
600 606
601 607 def marktouched(repo, files, similarity=0.0):
602 608 '''Assert that files have somehow been operated upon. files are relative to
603 609 the repo root.'''
604 610 m = matchfiles(repo, files)
605 611 rejected = []
606 612 m.bad = lambda x, y: rejected.append(x)
607 613
608 614 added, unknown, deleted, removed = _interestingfiles(repo, m)
609 615
610 616 if repo.ui.verbose:
611 617 unknownset = set(unknown)
612 618 toprint = unknownset.copy()
613 619 toprint.update(deleted)
614 620 for abs in sorted(toprint):
615 621 if abs in unknownset:
616 622 status = _('adding %s\n') % abs
617 623 else:
618 624 status = _('removing %s\n') % abs
619 625 repo.ui.status(status)
620 626
621 627 renames = _findrenames(repo, m, added + unknown, removed + deleted,
622 628 similarity)
623 629
624 630 _markchanges(repo, unknown, deleted, renames)
625 631
626 632 for f in rejected:
627 633 if f in m.files():
628 634 return 1
629 635 return 0
630 636
631 637 def _interestingfiles(repo, matcher):
632 638 '''Walk dirstate with matcher, looking for files that addremove would care
633 639 about.
634 640
635 641 This is different from dirstate.status because it doesn't care about
636 642 whether files are modified or clean.'''
637 643 added, unknown, deleted, removed = [], [], [], []
638 644 audit_path = pathutil.pathauditor(repo.root)
639 645
640 646 ctx = repo[None]
641 647 dirstate = repo.dirstate
642 648 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
643 649 full=False)
644 650 for abs, st in walkresults.iteritems():
645 651 dstate = dirstate[abs]
646 652 if dstate == '?' and audit_path.check(abs):
647 653 unknown.append(abs)
648 654 elif dstate != 'r' and not st:
649 655 deleted.append(abs)
650 656 # for finding renames
651 657 elif dstate == 'r':
652 658 removed.append(abs)
653 659 elif dstate == 'a':
654 660 added.append(abs)
655 661
656 662 return added, unknown, deleted, removed
657 663
658 664 def _findrenames(repo, matcher, added, removed, similarity):
659 665 '''Find renames from removed files to added ones.'''
660 666 renames = {}
661 667 if similarity > 0:
662 668 for old, new, score in similar.findrenames(repo, added, removed,
663 669 similarity):
664 670 if (repo.ui.verbose or not matcher.exact(old)
665 671 or not matcher.exact(new)):
666 672 repo.ui.status(_('recording removal of %s as rename to %s '
667 673 '(%d%% similar)\n') %
668 674 (matcher.rel(old), matcher.rel(new),
669 675 score * 100))
670 676 renames[new] = old
671 677 return renames
672 678
673 679 def _markchanges(repo, unknown, deleted, renames):
674 680 '''Marks the files in unknown as added, the files in deleted as removed,
675 681 and the files in renames as copied.'''
676 682 wctx = repo[None]
677 683 wlock = repo.wlock()
678 684 try:
679 685 wctx.forget(deleted)
680 686 wctx.add(unknown)
681 687 for new, old in renames.iteritems():
682 688 wctx.copy(old, new)
683 689 finally:
684 690 wlock.release()
685 691
686 692 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
687 693 """Update the dirstate to reflect the intent of copying src to dst. For
688 694 different reasons it might not end with dst being marked as copied from src.
689 695 """
690 696 origsrc = repo.dirstate.copied(src) or src
691 697 if dst == origsrc: # copying back a copy?
692 698 if repo.dirstate[dst] not in 'mn' and not dryrun:
693 699 repo.dirstate.normallookup(dst)
694 700 else:
695 701 if repo.dirstate[origsrc] == 'a' and origsrc == src:
696 702 if not ui.quiet:
697 703 ui.warn(_("%s has not been committed yet, so no copy "
698 704 "data will be stored for %s.\n")
699 705 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
700 706 if repo.dirstate[dst] in '?r' and not dryrun:
701 707 wctx.add([dst])
702 708 elif not dryrun:
703 709 wctx.copy(origsrc, dst)
704 710
705 711 def readrequires(opener, supported):
706 712 '''Reads and parses .hg/requires and checks if all entries found
707 713 are in the list of supported features.'''
708 714 requirements = set(opener.read("requires").splitlines())
709 715 missings = []
710 716 for r in requirements:
711 717 if r not in supported:
712 718 if not r or not r[0].isalnum():
713 719 raise error.RequirementError(_(".hg/requires file is corrupt"))
714 720 missings.append(r)
715 721 missings.sort()
716 722 if missings:
717 723 raise error.RequirementError(
718 724 _("unknown repository format: requires features '%s' (upgrade "
719 725 "Mercurial)") % "', '".join(missings))
720 726 return requirements
721 727
722 728 class filecachesubentry(object):
723 729 def __init__(self, path, stat):
724 730 self.path = path
725 731 self.cachestat = None
726 732 self._cacheable = None
727 733
728 734 if stat:
729 735 self.cachestat = filecachesubentry.stat(self.path)
730 736
731 737 if self.cachestat:
732 738 self._cacheable = self.cachestat.cacheable()
733 739 else:
734 740 # None means we don't know yet
735 741 self._cacheable = None
736 742
737 743 def refresh(self):
738 744 if self.cacheable():
739 745 self.cachestat = filecachesubentry.stat(self.path)
740 746
741 747 def cacheable(self):
742 748 if self._cacheable is not None:
743 749 return self._cacheable
744 750
745 751 # we don't know yet, assume it is for now
746 752 return True
747 753
748 754 def changed(self):
749 755 # no point in going further if we can't cache it
750 756 if not self.cacheable():
751 757 return True
752 758
753 759 newstat = filecachesubentry.stat(self.path)
754 760
755 761 # we may not know if it's cacheable yet, check again now
756 762 if newstat and self._cacheable is None:
757 763 self._cacheable = newstat.cacheable()
758 764
759 765 # check again
760 766 if not self._cacheable:
761 767 return True
762 768
763 769 if self.cachestat != newstat:
764 770 self.cachestat = newstat
765 771 return True
766 772 else:
767 773 return False
768 774
769 775 @staticmethod
770 776 def stat(path):
771 777 try:
772 778 return util.cachestat(path)
773 779 except OSError, e:
774 780 if e.errno != errno.ENOENT:
775 781 raise
776 782
777 783 class filecacheentry(object):
778 784 def __init__(self, paths, stat=True):
779 785 self._entries = []
780 786 for path in paths:
781 787 self._entries.append(filecachesubentry(path, stat))
782 788
783 789 def changed(self):
784 790 '''true if any entry has changed'''
785 791 for entry in self._entries:
786 792 if entry.changed():
787 793 return True
788 794 return False
789 795
790 796 def refresh(self):
791 797 for entry in self._entries:
792 798 entry.refresh()
793 799
794 800 class filecache(object):
795 801 '''A property like decorator that tracks files under .hg/ for updates.
796 802
797 803 Records stat info when called in _filecache.
798 804
799 805 On subsequent calls, compares old stat info with new info, and recreates the
800 806 object when any of the files changes, updating the new stat info in
801 807 _filecache.
802 808
803 809 Mercurial either atomic renames or appends for files under .hg,
804 810 so to ensure the cache is reliable we need the filesystem to be able
805 811 to tell us if a file has been replaced. If it can't, we fallback to
806 812 recreating the object on every call (essentially the same behaviour as
807 813 propertycache).
808 814
809 815 '''
810 816 def __init__(self, *paths):
811 817 self.paths = paths
812 818
813 819 def join(self, obj, fname):
814 820 """Used to compute the runtime path of a cached file.
815 821
816 822 Users should subclass filecache and provide their own version of this
817 823 function to call the appropriate join function on 'obj' (an instance
818 824 of the class that its member function was decorated).
819 825 """
820 826 return obj.join(fname)
821 827
822 828 def __call__(self, func):
823 829 self.func = func
824 830 self.name = func.__name__
825 831 return self
826 832
827 833 def __get__(self, obj, type=None):
828 834 # do we need to check if the file changed?
829 835 if self.name in obj.__dict__:
830 836 assert self.name in obj._filecache, self.name
831 837 return obj.__dict__[self.name]
832 838
833 839 entry = obj._filecache.get(self.name)
834 840
835 841 if entry:
836 842 if entry.changed():
837 843 entry.obj = self.func(obj)
838 844 else:
839 845 paths = [self.join(obj, path) for path in self.paths]
840 846
841 847 # We stat -before- creating the object so our cache doesn't lie if
842 848 # a writer modified between the time we read and stat
843 849 entry = filecacheentry(paths, True)
844 850 entry.obj = self.func(obj)
845 851
846 852 obj._filecache[self.name] = entry
847 853
848 854 obj.__dict__[self.name] = entry.obj
849 855 return entry.obj
850 856
851 857 def __set__(self, obj, value):
852 858 if self.name not in obj._filecache:
853 859 # we add an entry for the missing value because X in __dict__
854 860 # implies X in _filecache
855 861 paths = [self.join(obj, path) for path in self.paths]
856 862 ce = filecacheentry(paths, False)
857 863 obj._filecache[self.name] = ce
858 864 else:
859 865 ce = obj._filecache[self.name]
860 866
861 867 ce.obj = value # update cached copy
862 868 obj.__dict__[self.name] = value # update copy returned by obj.x
863 869
864 870 def __delete__(self, obj):
865 871 try:
866 872 del obj.__dict__[self.name]
867 873 except KeyError:
868 874 raise AttributeError(self.name)
869 875
870 876 class dirs(object):
871 877 '''a multiset of directory names from a dirstate or manifest'''
872 878
873 879 def __init__(self, map, skip=None):
874 880 self._dirs = {}
875 881 addpath = self.addpath
876 882 if util.safehasattr(map, 'iteritems') and skip is not None:
877 883 for f, s in map.iteritems():
878 884 if s[0] != skip:
879 885 addpath(f)
880 886 else:
881 887 for f in map:
882 888 addpath(f)
883 889
884 890 def addpath(self, path):
885 891 dirs = self._dirs
886 892 for base in finddirs(path):
887 893 if base in dirs:
888 894 dirs[base] += 1
889 895 return
890 896 dirs[base] = 1
891 897
892 898 def delpath(self, path):
893 899 dirs = self._dirs
894 900 for base in finddirs(path):
895 901 if dirs[base] > 1:
896 902 dirs[base] -= 1
897 903 return
898 904 del dirs[base]
899 905
900 906 def __iter__(self):
901 907 return self._dirs.iterkeys()
902 908
903 909 def __contains__(self, d):
904 910 return d in self._dirs
905 911
906 912 if util.safehasattr(parsers, 'dirs'):
907 913 dirs = parsers.dirs
908 914
909 915 def finddirs(path):
910 916 pos = path.rfind('/')
911 917 while pos != -1:
912 918 yield path[:pos]
913 919 pos = path.rfind('/', 0, pos)
General Comments 0
You need to be logged in to leave comments. Login now