##// END OF EJS Templates
vfs: add "chmod()"
FUJIWARA Katsunori -
r20086:f3df2612 default
parent child Browse files
Show More
@@ -1,910 +1,913 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import nullrev
10 10 import util, error, osutil, revset, similar, encoding, phases, parsers
11 11 import pathutil
12 12 import match as matchmod
13 13 import os, errno, re, glob
14 14
15 15 if os.name == 'nt':
16 16 import scmwindows as scmplatform
17 17 else:
18 18 import scmposix as scmplatform
19 19
20 20 systemrcpath = scmplatform.systemrcpath
21 21 userrcpath = scmplatform.userrcpath
22 22
23 23 def nochangesfound(ui, repo, excluded=None):
24 24 '''Report no changes for push/pull, excluded is None or a list of
25 25 nodes excluded from the push/pull.
26 26 '''
27 27 secretlist = []
28 28 if excluded:
29 29 for n in excluded:
30 30 if n not in repo:
31 31 # discovery should not have included the filtered revision,
32 32 # we have to explicitly exclude it until discovery is cleanup.
33 33 continue
34 34 ctx = repo[n]
35 35 if ctx.phase() >= phases.secret and not ctx.extinct():
36 36 secretlist.append(n)
37 37
38 38 if secretlist:
39 39 ui.status(_("no changes found (ignored %d secret changesets)\n")
40 40 % len(secretlist))
41 41 else:
42 42 ui.status(_("no changes found\n"))
43 43
44 44 def checknewlabel(repo, lbl, kind):
45 45 # Do not use the "kind" parameter in ui output.
46 46 # It makes strings difficult to translate.
47 47 if lbl in ['tip', '.', 'null']:
48 48 raise util.Abort(_("the name '%s' is reserved") % lbl)
49 49 for c in (':', '\0', '\n', '\r'):
50 50 if c in lbl:
51 51 raise util.Abort(_("%r cannot be used in a name") % c)
52 52 try:
53 53 int(lbl)
54 54 raise util.Abort(_("cannot use an integer as a name"))
55 55 except ValueError:
56 56 pass
57 57
58 58 def checkfilename(f):
59 59 '''Check that the filename f is an acceptable filename for a tracked file'''
60 60 if '\r' in f or '\n' in f:
61 61 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
62 62
63 63 def checkportable(ui, f):
64 64 '''Check if filename f is portable and warn or abort depending on config'''
65 65 checkfilename(f)
66 66 abort, warn = checkportabilityalert(ui)
67 67 if abort or warn:
68 68 msg = util.checkwinfilename(f)
69 69 if msg:
70 70 msg = "%s: %r" % (msg, f)
71 71 if abort:
72 72 raise util.Abort(msg)
73 73 ui.warn(_("warning: %s\n") % msg)
74 74
75 75 def checkportabilityalert(ui):
76 76 '''check if the user's config requests nothing, a warning, or abort for
77 77 non-portable filenames'''
78 78 val = ui.config('ui', 'portablefilenames', 'warn')
79 79 lval = val.lower()
80 80 bval = util.parsebool(val)
81 81 abort = os.name == 'nt' or lval == 'abort'
82 82 warn = bval or lval == 'warn'
83 83 if bval is None and not (warn or abort or lval == 'ignore'):
84 84 raise error.ConfigError(
85 85 _("ui.portablefilenames value is invalid ('%s')") % val)
86 86 return abort, warn
87 87
88 88 class casecollisionauditor(object):
89 89 def __init__(self, ui, abort, dirstate):
90 90 self._ui = ui
91 91 self._abort = abort
92 92 allfiles = '\0'.join(dirstate._map)
93 93 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
94 94 self._dirstate = dirstate
95 95 # The purpose of _newfiles is so that we don't complain about
96 96 # case collisions if someone were to call this object with the
97 97 # same filename twice.
98 98 self._newfiles = set()
99 99
100 100 def __call__(self, f):
101 101 if f in self._newfiles:
102 102 return
103 103 fl = encoding.lower(f)
104 104 if fl in self._loweredfiles and f not in self._dirstate:
105 105 msg = _('possible case-folding collision for %s') % f
106 106 if self._abort:
107 107 raise util.Abort(msg)
108 108 self._ui.warn(_("warning: %s\n") % msg)
109 109 self._loweredfiles.add(fl)
110 110 self._newfiles.add(f)
111 111
112 112 class abstractvfs(object):
113 113 """Abstract base class; cannot be instantiated"""
114 114
115 115 def __init__(self, *args, **kwargs):
116 116 '''Prevent instantiation; don't call this from subclasses.'''
117 117 raise NotImplementedError('attempted instantiating ' + str(type(self)))
118 118
119 119 def tryread(self, path):
120 120 '''gracefully return an empty string for missing files'''
121 121 try:
122 122 return self.read(path)
123 123 except IOError, inst:
124 124 if inst.errno != errno.ENOENT:
125 125 raise
126 126 return ""
127 127
128 128 def open(self, path, mode="r", text=False, atomictemp=False):
129 129 self.open = self.__call__
130 130 return self.__call__(path, mode, text, atomictemp)
131 131
132 132 def read(self, path):
133 133 fp = self(path, 'rb')
134 134 try:
135 135 return fp.read()
136 136 finally:
137 137 fp.close()
138 138
139 139 def write(self, path, data):
140 140 fp = self(path, 'wb')
141 141 try:
142 142 return fp.write(data)
143 143 finally:
144 144 fp.close()
145 145
146 146 def append(self, path, data):
147 147 fp = self(path, 'ab')
148 148 try:
149 149 return fp.write(data)
150 150 finally:
151 151 fp.close()
152 152
153 def chmod(self, path, mode):
154 return os.chmod(self.join(path), mode)
155
153 156 def exists(self, path=None):
154 157 return os.path.exists(self.join(path))
155 158
156 159 def fstat(self, fp):
157 160 return util.fstat(fp)
158 161
159 162 def isdir(self, path=None):
160 163 return os.path.isdir(self.join(path))
161 164
162 165 def isfile(self, path=None):
163 166 return os.path.isfile(self.join(path))
164 167
165 168 def islink(self, path=None):
166 169 return os.path.islink(self.join(path))
167 170
168 171 def lstat(self, path=None):
169 172 return os.lstat(self.join(path))
170 173
171 174 def makedir(self, path=None, notindexed=True):
172 175 return util.makedir(self.join(path), notindexed)
173 176
174 177 def makedirs(self, path=None, mode=None):
175 178 return util.makedirs(self.join(path), mode)
176 179
177 180 def mkdir(self, path=None):
178 181 return os.mkdir(self.join(path))
179 182
180 183 def readdir(self, path=None, stat=None, skip=None):
181 184 return osutil.listdir(self.join(path), stat, skip)
182 185
183 186 def rename(self, src, dst):
184 187 return util.rename(self.join(src), self.join(dst))
185 188
186 189 def readlink(self, path):
187 190 return os.readlink(self.join(path))
188 191
189 192 def setflags(self, path, l, x):
190 193 return util.setflags(self.join(path), l, x)
191 194
192 195 def stat(self, path=None):
193 196 return os.stat(self.join(path))
194 197
195 198 def unlink(self, path=None):
196 199 return util.unlink(self.join(path))
197 200
198 201 def utime(self, path=None, t=None):
199 202 return os.utime(self.join(path), t)
200 203
201 204 class vfs(abstractvfs):
202 205 '''Operate files relative to a base directory
203 206
204 207 This class is used to hide the details of COW semantics and
205 208 remote file access from higher level code.
206 209 '''
207 210 def __init__(self, base, audit=True, expandpath=False, realpath=False):
208 211 if expandpath:
209 212 base = util.expandpath(base)
210 213 if realpath:
211 214 base = os.path.realpath(base)
212 215 self.base = base
213 216 self._setmustaudit(audit)
214 217 self.createmode = None
215 218 self._trustnlink = None
216 219
217 220 def _getmustaudit(self):
218 221 return self._audit
219 222
220 223 def _setmustaudit(self, onoff):
221 224 self._audit = onoff
222 225 if onoff:
223 226 self.audit = pathutil.pathauditor(self.base)
224 227 else:
225 228 self.audit = util.always
226 229
227 230 mustaudit = property(_getmustaudit, _setmustaudit)
228 231
229 232 @util.propertycache
230 233 def _cansymlink(self):
231 234 return util.checklink(self.base)
232 235
233 236 @util.propertycache
234 237 def _chmod(self):
235 238 return util.checkexec(self.base)
236 239
237 240 def _fixfilemode(self, name):
238 241 if self.createmode is None or not self._chmod:
239 242 return
240 243 os.chmod(name, self.createmode & 0666)
241 244
242 245 def __call__(self, path, mode="r", text=False, atomictemp=False):
243 246 if self._audit:
244 247 r = util.checkosfilename(path)
245 248 if r:
246 249 raise util.Abort("%s: %r" % (r, path))
247 250 self.audit(path)
248 251 f = self.join(path)
249 252
250 253 if not text and "b" not in mode:
251 254 mode += "b" # for that other OS
252 255
253 256 nlink = -1
254 257 if mode not in ('r', 'rb'):
255 258 dirname, basename = util.split(f)
256 259 # If basename is empty, then the path is malformed because it points
257 260 # to a directory. Let the posixfile() call below raise IOError.
258 261 if basename:
259 262 if atomictemp:
260 263 util.ensuredirs(dirname, self.createmode)
261 264 return util.atomictempfile(f, mode, self.createmode)
262 265 try:
263 266 if 'w' in mode:
264 267 util.unlink(f)
265 268 nlink = 0
266 269 else:
267 270 # nlinks() may behave differently for files on Windows
268 271 # shares if the file is open.
269 272 fd = util.posixfile(f)
270 273 nlink = util.nlinks(f)
271 274 if nlink < 1:
272 275 nlink = 2 # force mktempcopy (issue1922)
273 276 fd.close()
274 277 except (OSError, IOError), e:
275 278 if e.errno != errno.ENOENT:
276 279 raise
277 280 nlink = 0
278 281 util.ensuredirs(dirname, self.createmode)
279 282 if nlink > 0:
280 283 if self._trustnlink is None:
281 284 self._trustnlink = nlink > 1 or util.checknlink(f)
282 285 if nlink > 1 or not self._trustnlink:
283 286 util.rename(util.mktempcopy(f), f)
284 287 fp = util.posixfile(f, mode)
285 288 if nlink == 0:
286 289 self._fixfilemode(f)
287 290 return fp
288 291
289 292 def symlink(self, src, dst):
290 293 self.audit(dst)
291 294 linkname = self.join(dst)
292 295 try:
293 296 os.unlink(linkname)
294 297 except OSError:
295 298 pass
296 299
297 300 util.ensuredirs(os.path.dirname(linkname), self.createmode)
298 301
299 302 if self._cansymlink:
300 303 try:
301 304 os.symlink(src, linkname)
302 305 except OSError, err:
303 306 raise OSError(err.errno, _('could not symlink to %r: %s') %
304 307 (src, err.strerror), linkname)
305 308 else:
306 309 self.write(dst, src)
307 310
308 311 def join(self, path):
309 312 if path:
310 313 return os.path.join(self.base, path)
311 314 else:
312 315 return self.base
313 316
314 317 opener = vfs
315 318
316 319 class auditvfs(object):
317 320 def __init__(self, vfs):
318 321 self.vfs = vfs
319 322
320 323 def _getmustaudit(self):
321 324 return self.vfs.mustaudit
322 325
323 326 def _setmustaudit(self, onoff):
324 327 self.vfs.mustaudit = onoff
325 328
326 329 mustaudit = property(_getmustaudit, _setmustaudit)
327 330
328 331 class filtervfs(abstractvfs, auditvfs):
329 332 '''Wrapper vfs for filtering filenames with a function.'''
330 333
331 334 def __init__(self, vfs, filter):
332 335 auditvfs.__init__(self, vfs)
333 336 self._filter = filter
334 337
335 338 def __call__(self, path, *args, **kwargs):
336 339 return self.vfs(self._filter(path), *args, **kwargs)
337 340
338 341 def join(self, path):
339 342 if path:
340 343 return self.vfs.join(self._filter(path))
341 344 else:
342 345 return self.vfs.join(path)
343 346
344 347 filteropener = filtervfs
345 348
346 349 class readonlyvfs(abstractvfs, auditvfs):
347 350 '''Wrapper vfs preventing any writing.'''
348 351
349 352 def __init__(self, vfs):
350 353 auditvfs.__init__(self, vfs)
351 354
352 355 def __call__(self, path, mode='r', *args, **kw):
353 356 if mode not in ('r', 'rb'):
354 357 raise util.Abort('this vfs is read only')
355 358 return self.vfs(path, mode, *args, **kw)
356 359
357 360
358 361 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
359 362 '''yield every hg repository under path, always recursively.
360 363 The recurse flag will only control recursion into repo working dirs'''
361 364 def errhandler(err):
362 365 if err.filename == path:
363 366 raise err
364 367 samestat = getattr(os.path, 'samestat', None)
365 368 if followsym and samestat is not None:
366 369 def adddir(dirlst, dirname):
367 370 match = False
368 371 dirstat = os.stat(dirname)
369 372 for lstdirstat in dirlst:
370 373 if samestat(dirstat, lstdirstat):
371 374 match = True
372 375 break
373 376 if not match:
374 377 dirlst.append(dirstat)
375 378 return not match
376 379 else:
377 380 followsym = False
378 381
379 382 if (seen_dirs is None) and followsym:
380 383 seen_dirs = []
381 384 adddir(seen_dirs, path)
382 385 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
383 386 dirs.sort()
384 387 if '.hg' in dirs:
385 388 yield root # found a repository
386 389 qroot = os.path.join(root, '.hg', 'patches')
387 390 if os.path.isdir(os.path.join(qroot, '.hg')):
388 391 yield qroot # we have a patch queue repo here
389 392 if recurse:
390 393 # avoid recursing inside the .hg directory
391 394 dirs.remove('.hg')
392 395 else:
393 396 dirs[:] = [] # don't descend further
394 397 elif followsym:
395 398 newdirs = []
396 399 for d in dirs:
397 400 fname = os.path.join(root, d)
398 401 if adddir(seen_dirs, fname):
399 402 if os.path.islink(fname):
400 403 for hgname in walkrepos(fname, True, seen_dirs):
401 404 yield hgname
402 405 else:
403 406 newdirs.append(d)
404 407 dirs[:] = newdirs
405 408
406 409 def osrcpath():
407 410 '''return default os-specific hgrc search path'''
408 411 path = systemrcpath()
409 412 path.extend(userrcpath())
410 413 path = [os.path.normpath(f) for f in path]
411 414 return path
412 415
413 416 _rcpath = None
414 417
415 418 def rcpath():
416 419 '''return hgrc search path. if env var HGRCPATH is set, use it.
417 420 for each item in path, if directory, use files ending in .rc,
418 421 else use item.
419 422 make HGRCPATH empty to only look in .hg/hgrc of current repo.
420 423 if no HGRCPATH, use default os-specific path.'''
421 424 global _rcpath
422 425 if _rcpath is None:
423 426 if 'HGRCPATH' in os.environ:
424 427 _rcpath = []
425 428 for p in os.environ['HGRCPATH'].split(os.pathsep):
426 429 if not p:
427 430 continue
428 431 p = util.expandpath(p)
429 432 if os.path.isdir(p):
430 433 for f, kind in osutil.listdir(p):
431 434 if f.endswith('.rc'):
432 435 _rcpath.append(os.path.join(p, f))
433 436 else:
434 437 _rcpath.append(p)
435 438 else:
436 439 _rcpath = osrcpath()
437 440 return _rcpath
438 441
439 442 def revsingle(repo, revspec, default='.'):
440 443 if not revspec and revspec != 0:
441 444 return repo[default]
442 445
443 446 l = revrange(repo, [revspec])
444 447 if len(l) < 1:
445 448 raise util.Abort(_('empty revision set'))
446 449 return repo[l[-1]]
447 450
448 451 def revpair(repo, revs):
449 452 if not revs:
450 453 return repo.dirstate.p1(), None
451 454
452 455 l = revrange(repo, revs)
453 456
454 457 if len(l) == 0:
455 458 if revs:
456 459 raise util.Abort(_('empty revision range'))
457 460 return repo.dirstate.p1(), None
458 461
459 462 if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
460 463 return repo.lookup(l[0]), None
461 464
462 465 return repo.lookup(l[0]), repo.lookup(l[-1])
463 466
464 467 _revrangesep = ':'
465 468
466 469 def revrange(repo, revs):
467 470 """Yield revision as strings from a list of revision specifications."""
468 471
469 472 def revfix(repo, val, defval):
470 473 if not val and val != 0 and defval is not None:
471 474 return defval
472 475 return repo[val].rev()
473 476
474 477 seen, l = set(), []
475 478 for spec in revs:
476 479 if l and not seen:
477 480 seen = set(l)
478 481 # attempt to parse old-style ranges first to deal with
479 482 # things like old-tag which contain query metacharacters
480 483 try:
481 484 if isinstance(spec, int):
482 485 seen.add(spec)
483 486 l.append(spec)
484 487 continue
485 488
486 489 if _revrangesep in spec:
487 490 start, end = spec.split(_revrangesep, 1)
488 491 start = revfix(repo, start, 0)
489 492 end = revfix(repo, end, len(repo) - 1)
490 493 if end == nullrev and start <= 0:
491 494 start = nullrev
492 495 rangeiter = repo.changelog.revs(start, end)
493 496 if not seen and not l:
494 497 # by far the most common case: revs = ["-1:0"]
495 498 l = list(rangeiter)
496 499 # defer syncing seen until next iteration
497 500 continue
498 501 newrevs = set(rangeiter)
499 502 if seen:
500 503 newrevs.difference_update(seen)
501 504 seen.update(newrevs)
502 505 else:
503 506 seen = newrevs
504 507 l.extend(sorted(newrevs, reverse=start > end))
505 508 continue
506 509 elif spec and spec in repo: # single unquoted rev
507 510 rev = revfix(repo, spec, None)
508 511 if rev in seen:
509 512 continue
510 513 seen.add(rev)
511 514 l.append(rev)
512 515 continue
513 516 except error.RepoLookupError:
514 517 pass
515 518
516 519 # fall through to new-style queries if old-style fails
517 520 m = revset.match(repo.ui, spec)
518 521 dl = [r for r in m(repo, list(repo)) if r not in seen]
519 522 l.extend(dl)
520 523 seen.update(dl)
521 524
522 525 return l
523 526
524 527 def expandpats(pats):
525 528 if not util.expandglobs:
526 529 return list(pats)
527 530 ret = []
528 531 for p in pats:
529 532 kind, name = matchmod._patsplit(p, None)
530 533 if kind is None:
531 534 try:
532 535 globbed = glob.glob(name)
533 536 except re.error:
534 537 globbed = [name]
535 538 if globbed:
536 539 ret.extend(globbed)
537 540 continue
538 541 ret.append(p)
539 542 return ret
540 543
541 544 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
542 545 if pats == ("",):
543 546 pats = []
544 547 if not globbed and default == 'relpath':
545 548 pats = expandpats(pats or [])
546 549
547 550 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
548 551 default)
549 552 def badfn(f, msg):
550 553 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
551 554 m.bad = badfn
552 555 return m, pats
553 556
554 557 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
555 558 return matchandpats(ctx, pats, opts, globbed, default)[0]
556 559
557 560 def matchall(repo):
558 561 return matchmod.always(repo.root, repo.getcwd())
559 562
560 563 def matchfiles(repo, files):
561 564 return matchmod.exact(repo.root, repo.getcwd(), files)
562 565
563 566 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
564 567 if dry_run is None:
565 568 dry_run = opts.get('dry_run')
566 569 if similarity is None:
567 570 similarity = float(opts.get('similarity') or 0)
568 571 # we'd use status here, except handling of symlinks and ignore is tricky
569 572 m = match(repo[None], pats, opts)
570 573 rejected = []
571 574 m.bad = lambda x, y: rejected.append(x)
572 575
573 576 added, unknown, deleted, removed = _interestingfiles(repo, m)
574 577
575 578 unknownset = set(unknown)
576 579 toprint = unknownset.copy()
577 580 toprint.update(deleted)
578 581 for abs in sorted(toprint):
579 582 if repo.ui.verbose or not m.exact(abs):
580 583 rel = m.rel(abs)
581 584 if abs in unknownset:
582 585 status = _('adding %s\n') % ((pats and rel) or abs)
583 586 else:
584 587 status = _('removing %s\n') % ((pats and rel) or abs)
585 588 repo.ui.status(status)
586 589
587 590 renames = _findrenames(repo, m, added + unknown, removed + deleted,
588 591 similarity)
589 592
590 593 if not dry_run:
591 594 _markchanges(repo, unknown, deleted, renames)
592 595
593 596 for f in rejected:
594 597 if f in m.files():
595 598 return 1
596 599 return 0
597 600
598 601 def marktouched(repo, files, similarity=0.0):
599 602 '''Assert that files have somehow been operated upon. files are relative to
600 603 the repo root.'''
601 604 m = matchfiles(repo, files)
602 605 rejected = []
603 606 m.bad = lambda x, y: rejected.append(x)
604 607
605 608 added, unknown, deleted, removed = _interestingfiles(repo, m)
606 609
607 610 if repo.ui.verbose:
608 611 unknownset = set(unknown)
609 612 toprint = unknownset.copy()
610 613 toprint.update(deleted)
611 614 for abs in sorted(toprint):
612 615 if abs in unknownset:
613 616 status = _('adding %s\n') % abs
614 617 else:
615 618 status = _('removing %s\n') % abs
616 619 repo.ui.status(status)
617 620
618 621 renames = _findrenames(repo, m, added + unknown, removed + deleted,
619 622 similarity)
620 623
621 624 _markchanges(repo, unknown, deleted, renames)
622 625
623 626 for f in rejected:
624 627 if f in m.files():
625 628 return 1
626 629 return 0
627 630
628 631 def _interestingfiles(repo, matcher):
629 632 '''Walk dirstate with matcher, looking for files that addremove would care
630 633 about.
631 634
632 635 This is different from dirstate.status because it doesn't care about
633 636 whether files are modified or clean.'''
634 637 added, unknown, deleted, removed = [], [], [], []
635 638 audit_path = pathutil.pathauditor(repo.root)
636 639
637 640 ctx = repo[None]
638 641 dirstate = repo.dirstate
639 642 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
640 643 full=False)
641 644 for abs, st in walkresults.iteritems():
642 645 dstate = dirstate[abs]
643 646 if dstate == '?' and audit_path.check(abs):
644 647 unknown.append(abs)
645 648 elif dstate != 'r' and not st:
646 649 deleted.append(abs)
647 650 # for finding renames
648 651 elif dstate == 'r':
649 652 removed.append(abs)
650 653 elif dstate == 'a':
651 654 added.append(abs)
652 655
653 656 return added, unknown, deleted, removed
654 657
655 658 def _findrenames(repo, matcher, added, removed, similarity):
656 659 '''Find renames from removed files to added ones.'''
657 660 renames = {}
658 661 if similarity > 0:
659 662 for old, new, score in similar.findrenames(repo, added, removed,
660 663 similarity):
661 664 if (repo.ui.verbose or not matcher.exact(old)
662 665 or not matcher.exact(new)):
663 666 repo.ui.status(_('recording removal of %s as rename to %s '
664 667 '(%d%% similar)\n') %
665 668 (matcher.rel(old), matcher.rel(new),
666 669 score * 100))
667 670 renames[new] = old
668 671 return renames
669 672
670 673 def _markchanges(repo, unknown, deleted, renames):
671 674 '''Marks the files in unknown as added, the files in deleted as removed,
672 675 and the files in renames as copied.'''
673 676 wctx = repo[None]
674 677 wlock = repo.wlock()
675 678 try:
676 679 wctx.forget(deleted)
677 680 wctx.add(unknown)
678 681 for new, old in renames.iteritems():
679 682 wctx.copy(old, new)
680 683 finally:
681 684 wlock.release()
682 685
683 686 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
684 687 """Update the dirstate to reflect the intent of copying src to dst. For
685 688 different reasons it might not end with dst being marked as copied from src.
686 689 """
687 690 origsrc = repo.dirstate.copied(src) or src
688 691 if dst == origsrc: # copying back a copy?
689 692 if repo.dirstate[dst] not in 'mn' and not dryrun:
690 693 repo.dirstate.normallookup(dst)
691 694 else:
692 695 if repo.dirstate[origsrc] == 'a' and origsrc == src:
693 696 if not ui.quiet:
694 697 ui.warn(_("%s has not been committed yet, so no copy "
695 698 "data will be stored for %s.\n")
696 699 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
697 700 if repo.dirstate[dst] in '?r' and not dryrun:
698 701 wctx.add([dst])
699 702 elif not dryrun:
700 703 wctx.copy(origsrc, dst)
701 704
702 705 def readrequires(opener, supported):
703 706 '''Reads and parses .hg/requires and checks if all entries found
704 707 are in the list of supported features.'''
705 708 requirements = set(opener.read("requires").splitlines())
706 709 missings = []
707 710 for r in requirements:
708 711 if r not in supported:
709 712 if not r or not r[0].isalnum():
710 713 raise error.RequirementError(_(".hg/requires file is corrupt"))
711 714 missings.append(r)
712 715 missings.sort()
713 716 if missings:
714 717 raise error.RequirementError(
715 718 _("unknown repository format: requires features '%s' (upgrade "
716 719 "Mercurial)") % "', '".join(missings))
717 720 return requirements
718 721
719 722 class filecachesubentry(object):
720 723 def __init__(self, path, stat):
721 724 self.path = path
722 725 self.cachestat = None
723 726 self._cacheable = None
724 727
725 728 if stat:
726 729 self.cachestat = filecachesubentry.stat(self.path)
727 730
728 731 if self.cachestat:
729 732 self._cacheable = self.cachestat.cacheable()
730 733 else:
731 734 # None means we don't know yet
732 735 self._cacheable = None
733 736
734 737 def refresh(self):
735 738 if self.cacheable():
736 739 self.cachestat = filecachesubentry.stat(self.path)
737 740
738 741 def cacheable(self):
739 742 if self._cacheable is not None:
740 743 return self._cacheable
741 744
742 745 # we don't know yet, assume it is for now
743 746 return True
744 747
745 748 def changed(self):
746 749 # no point in going further if we can't cache it
747 750 if not self.cacheable():
748 751 return True
749 752
750 753 newstat = filecachesubentry.stat(self.path)
751 754
752 755 # we may not know if it's cacheable yet, check again now
753 756 if newstat and self._cacheable is None:
754 757 self._cacheable = newstat.cacheable()
755 758
756 759 # check again
757 760 if not self._cacheable:
758 761 return True
759 762
760 763 if self.cachestat != newstat:
761 764 self.cachestat = newstat
762 765 return True
763 766 else:
764 767 return False
765 768
766 769 @staticmethod
767 770 def stat(path):
768 771 try:
769 772 return util.cachestat(path)
770 773 except OSError, e:
771 774 if e.errno != errno.ENOENT:
772 775 raise
773 776
774 777 class filecacheentry(object):
775 778 def __init__(self, paths, stat=True):
776 779 self._entries = []
777 780 for path in paths:
778 781 self._entries.append(filecachesubentry(path, stat))
779 782
780 783 def changed(self):
781 784 '''true if any entry has changed'''
782 785 for entry in self._entries:
783 786 if entry.changed():
784 787 return True
785 788 return False
786 789
787 790 def refresh(self):
788 791 for entry in self._entries:
789 792 entry.refresh()
790 793
791 794 class filecache(object):
792 795 '''A property like decorator that tracks files under .hg/ for updates.
793 796
794 797 Records stat info when called in _filecache.
795 798
796 799 On subsequent calls, compares old stat info with new info, and recreates the
797 800 object when any of the files changes, updating the new stat info in
798 801 _filecache.
799 802
800 803 Mercurial either atomic renames or appends for files under .hg,
801 804 so to ensure the cache is reliable we need the filesystem to be able
802 805 to tell us if a file has been replaced. If it can't, we fallback to
803 806 recreating the object on every call (essentially the same behaviour as
804 807 propertycache).
805 808
806 809 '''
807 810 def __init__(self, *paths):
808 811 self.paths = paths
809 812
810 813 def join(self, obj, fname):
811 814 """Used to compute the runtime path of a cached file.
812 815
813 816 Users should subclass filecache and provide their own version of this
814 817 function to call the appropriate join function on 'obj' (an instance
815 818 of the class that its member function was decorated).
816 819 """
817 820 return obj.join(fname)
818 821
819 822 def __call__(self, func):
820 823 self.func = func
821 824 self.name = func.__name__
822 825 return self
823 826
824 827 def __get__(self, obj, type=None):
825 828 # do we need to check if the file changed?
826 829 if self.name in obj.__dict__:
827 830 assert self.name in obj._filecache, self.name
828 831 return obj.__dict__[self.name]
829 832
830 833 entry = obj._filecache.get(self.name)
831 834
832 835 if entry:
833 836 if entry.changed():
834 837 entry.obj = self.func(obj)
835 838 else:
836 839 paths = [self.join(obj, path) for path in self.paths]
837 840
838 841 # We stat -before- creating the object so our cache doesn't lie if
839 842 # a writer modified between the time we read and stat
840 843 entry = filecacheentry(paths, True)
841 844 entry.obj = self.func(obj)
842 845
843 846 obj._filecache[self.name] = entry
844 847
845 848 obj.__dict__[self.name] = entry.obj
846 849 return entry.obj
847 850
848 851 def __set__(self, obj, value):
849 852 if self.name not in obj._filecache:
850 853 # we add an entry for the missing value because X in __dict__
851 854 # implies X in _filecache
852 855 paths = [self.join(obj, path) for path in self.paths]
853 856 ce = filecacheentry(paths, False)
854 857 obj._filecache[self.name] = ce
855 858 else:
856 859 ce = obj._filecache[self.name]
857 860
858 861 ce.obj = value # update cached copy
859 862 obj.__dict__[self.name] = value # update copy returned by obj.x
860 863
861 864 def __delete__(self, obj):
862 865 try:
863 866 del obj.__dict__[self.name]
864 867 except KeyError:
865 868 raise AttributeError(self.name)
866 869
867 870 class dirs(object):
868 871 '''a multiset of directory names from a dirstate or manifest'''
869 872
870 873 def __init__(self, map, skip=None):
871 874 self._dirs = {}
872 875 addpath = self.addpath
873 876 if util.safehasattr(map, 'iteritems') and skip is not None:
874 877 for f, s in map.iteritems():
875 878 if s[0] != skip:
876 879 addpath(f)
877 880 else:
878 881 for f in map:
879 882 addpath(f)
880 883
881 884 def addpath(self, path):
882 885 dirs = self._dirs
883 886 for base in finddirs(path):
884 887 if base in dirs:
885 888 dirs[base] += 1
886 889 return
887 890 dirs[base] = 1
888 891
889 892 def delpath(self, path):
890 893 dirs = self._dirs
891 894 for base in finddirs(path):
892 895 if dirs[base] > 1:
893 896 dirs[base] -= 1
894 897 return
895 898 del dirs[base]
896 899
897 900 def __iter__(self):
898 901 return self._dirs.iterkeys()
899 902
900 903 def __contains__(self, d):
901 904 return d in self._dirs
902 905
903 906 if util.safehasattr(parsers, 'dirs'):
904 907 dirs = parsers.dirs
905 908
906 909 def finddirs(path):
907 910 pos = path.rfind('/')
908 911 while pos != -1:
909 912 yield path[:pos]
910 913 pos = path.rfind('/', 0, pos)
General Comments 0
You need to be logged in to leave comments. Login now