##// END OF EJS Templates
scmutil: rename filecacheentry to filecachesubentry...
Siddharth Agarwal -
r20043:88bd8df0 default
parent child Browse files
Show More
@@ -1,886 +1,886 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import nullrev
10 10 import util, error, osutil, revset, similar, encoding, phases, parsers
11 11 import pathutil
12 12 import match as matchmod
13 13 import os, errno, re, glob
14 14
15 15 if os.name == 'nt':
16 16 import scmwindows as scmplatform
17 17 else:
18 18 import scmposix as scmplatform
19 19
20 20 systemrcpath = scmplatform.systemrcpath
21 21 userrcpath = scmplatform.userrcpath
22 22
23 23 def nochangesfound(ui, repo, excluded=None):
24 24 '''Report no changes for push/pull, excluded is None or a list of
25 25 nodes excluded from the push/pull.
26 26 '''
27 27 secretlist = []
28 28 if excluded:
29 29 for n in excluded:
30 30 if n not in repo:
31 31 # discovery should not have included the filtered revision,
32 32 # we have to explicitly exclude it until discovery is cleanup.
33 33 continue
34 34 ctx = repo[n]
35 35 if ctx.phase() >= phases.secret and not ctx.extinct():
36 36 secretlist.append(n)
37 37
38 38 if secretlist:
39 39 ui.status(_("no changes found (ignored %d secret changesets)\n")
40 40 % len(secretlist))
41 41 else:
42 42 ui.status(_("no changes found\n"))
43 43
44 44 def checknewlabel(repo, lbl, kind):
45 45 # Do not use the "kind" parameter in ui output.
46 46 # It makes strings difficult to translate.
47 47 if lbl in ['tip', '.', 'null']:
48 48 raise util.Abort(_("the name '%s' is reserved") % lbl)
49 49 for c in (':', '\0', '\n', '\r'):
50 50 if c in lbl:
51 51 raise util.Abort(_("%r cannot be used in a name") % c)
52 52 try:
53 53 int(lbl)
54 54 raise util.Abort(_("cannot use an integer as a name"))
55 55 except ValueError:
56 56 pass
57 57
58 58 def checkfilename(f):
59 59 '''Check that the filename f is an acceptable filename for a tracked file'''
60 60 if '\r' in f or '\n' in f:
61 61 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
62 62
63 63 def checkportable(ui, f):
64 64 '''Check if filename f is portable and warn or abort depending on config'''
65 65 checkfilename(f)
66 66 abort, warn = checkportabilityalert(ui)
67 67 if abort or warn:
68 68 msg = util.checkwinfilename(f)
69 69 if msg:
70 70 msg = "%s: %r" % (msg, f)
71 71 if abort:
72 72 raise util.Abort(msg)
73 73 ui.warn(_("warning: %s\n") % msg)
74 74
75 75 def checkportabilityalert(ui):
76 76 '''check if the user's config requests nothing, a warning, or abort for
77 77 non-portable filenames'''
78 78 val = ui.config('ui', 'portablefilenames', 'warn')
79 79 lval = val.lower()
80 80 bval = util.parsebool(val)
81 81 abort = os.name == 'nt' or lval == 'abort'
82 82 warn = bval or lval == 'warn'
83 83 if bval is None and not (warn or abort or lval == 'ignore'):
84 84 raise error.ConfigError(
85 85 _("ui.portablefilenames value is invalid ('%s')") % val)
86 86 return abort, warn
87 87
88 88 class casecollisionauditor(object):
89 89 def __init__(self, ui, abort, dirstate):
90 90 self._ui = ui
91 91 self._abort = abort
92 92 allfiles = '\0'.join(dirstate._map)
93 93 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
94 94 self._dirstate = dirstate
95 95 # The purpose of _newfiles is so that we don't complain about
96 96 # case collisions if someone were to call this object with the
97 97 # same filename twice.
98 98 self._newfiles = set()
99 99
100 100 def __call__(self, f):
101 101 if f in self._newfiles:
102 102 return
103 103 fl = encoding.lower(f)
104 104 if fl in self._loweredfiles and f not in self._dirstate:
105 105 msg = _('possible case-folding collision for %s') % f
106 106 if self._abort:
107 107 raise util.Abort(msg)
108 108 self._ui.warn(_("warning: %s\n") % msg)
109 109 self._loweredfiles.add(fl)
110 110 self._newfiles.add(f)
111 111
112 112 class abstractvfs(object):
113 113 """Abstract base class; cannot be instantiated"""
114 114
115 115 def __init__(self, *args, **kwargs):
116 116 '''Prevent instantiation; don't call this from subclasses.'''
117 117 raise NotImplementedError('attempted instantiating ' + str(type(self)))
118 118
119 119 def tryread(self, path):
120 120 '''gracefully return an empty string for missing files'''
121 121 try:
122 122 return self.read(path)
123 123 except IOError, inst:
124 124 if inst.errno != errno.ENOENT:
125 125 raise
126 126 return ""
127 127
128 128 def open(self, path, mode="r", text=False, atomictemp=False):
129 129 self.open = self.__call__
130 130 return self.__call__(path, mode, text, atomictemp)
131 131
132 132 def read(self, path):
133 133 fp = self(path, 'rb')
134 134 try:
135 135 return fp.read()
136 136 finally:
137 137 fp.close()
138 138
139 139 def write(self, path, data):
140 140 fp = self(path, 'wb')
141 141 try:
142 142 return fp.write(data)
143 143 finally:
144 144 fp.close()
145 145
146 146 def append(self, path, data):
147 147 fp = self(path, 'ab')
148 148 try:
149 149 return fp.write(data)
150 150 finally:
151 151 fp.close()
152 152
153 153 def exists(self, path=None):
154 154 return os.path.exists(self.join(path))
155 155
156 156 def fstat(self, fp):
157 157 return util.fstat(fp)
158 158
159 159 def isdir(self, path=None):
160 160 return os.path.isdir(self.join(path))
161 161
162 162 def islink(self, path=None):
163 163 return os.path.islink(self.join(path))
164 164
165 165 def lstat(self, path=None):
166 166 return os.lstat(self.join(path))
167 167
168 168 def makedir(self, path=None, notindexed=True):
169 169 return util.makedir(self.join(path), notindexed)
170 170
171 171 def makedirs(self, path=None, mode=None):
172 172 return util.makedirs(self.join(path), mode)
173 173
174 174 def mkdir(self, path=None):
175 175 return os.mkdir(self.join(path))
176 176
177 177 def readdir(self, path=None, stat=None, skip=None):
178 178 return osutil.listdir(self.join(path), stat, skip)
179 179
180 180 def rename(self, src, dst):
181 181 return util.rename(self.join(src), self.join(dst))
182 182
183 183 def readlink(self, path):
184 184 return os.readlink(self.join(path))
185 185
186 186 def setflags(self, path, l, x):
187 187 return util.setflags(self.join(path), l, x)
188 188
189 189 def stat(self, path=None):
190 190 return os.stat(self.join(path))
191 191
192 192 def unlink(self, path=None):
193 193 return util.unlink(self.join(path))
194 194
195 195 def utime(self, path=None, t=None):
196 196 return os.utime(self.join(path), t)
197 197
198 198 class vfs(abstractvfs):
199 199 '''Operate files relative to a base directory
200 200
201 201 This class is used to hide the details of COW semantics and
202 202 remote file access from higher level code.
203 203 '''
204 204 def __init__(self, base, audit=True, expandpath=False, realpath=False):
205 205 if expandpath:
206 206 base = util.expandpath(base)
207 207 if realpath:
208 208 base = os.path.realpath(base)
209 209 self.base = base
210 210 self._setmustaudit(audit)
211 211 self.createmode = None
212 212 self._trustnlink = None
213 213
214 214 def _getmustaudit(self):
215 215 return self._audit
216 216
217 217 def _setmustaudit(self, onoff):
218 218 self._audit = onoff
219 219 if onoff:
220 220 self.audit = pathutil.pathauditor(self.base)
221 221 else:
222 222 self.audit = util.always
223 223
224 224 mustaudit = property(_getmustaudit, _setmustaudit)
225 225
226 226 @util.propertycache
227 227 def _cansymlink(self):
228 228 return util.checklink(self.base)
229 229
230 230 @util.propertycache
231 231 def _chmod(self):
232 232 return util.checkexec(self.base)
233 233
234 234 def _fixfilemode(self, name):
235 235 if self.createmode is None or not self._chmod:
236 236 return
237 237 os.chmod(name, self.createmode & 0666)
238 238
239 239 def __call__(self, path, mode="r", text=False, atomictemp=False):
240 240 if self._audit:
241 241 r = util.checkosfilename(path)
242 242 if r:
243 243 raise util.Abort("%s: %r" % (r, path))
244 244 self.audit(path)
245 245 f = self.join(path)
246 246
247 247 if not text and "b" not in mode:
248 248 mode += "b" # for that other OS
249 249
250 250 nlink = -1
251 251 if mode not in ('r', 'rb'):
252 252 dirname, basename = util.split(f)
253 253 # If basename is empty, then the path is malformed because it points
254 254 # to a directory. Let the posixfile() call below raise IOError.
255 255 if basename:
256 256 if atomictemp:
257 257 util.ensuredirs(dirname, self.createmode)
258 258 return util.atomictempfile(f, mode, self.createmode)
259 259 try:
260 260 if 'w' in mode:
261 261 util.unlink(f)
262 262 nlink = 0
263 263 else:
264 264 # nlinks() may behave differently for files on Windows
265 265 # shares if the file is open.
266 266 fd = util.posixfile(f)
267 267 nlink = util.nlinks(f)
268 268 if nlink < 1:
269 269 nlink = 2 # force mktempcopy (issue1922)
270 270 fd.close()
271 271 except (OSError, IOError), e:
272 272 if e.errno != errno.ENOENT:
273 273 raise
274 274 nlink = 0
275 275 util.ensuredirs(dirname, self.createmode)
276 276 if nlink > 0:
277 277 if self._trustnlink is None:
278 278 self._trustnlink = nlink > 1 or util.checknlink(f)
279 279 if nlink > 1 or not self._trustnlink:
280 280 util.rename(util.mktempcopy(f), f)
281 281 fp = util.posixfile(f, mode)
282 282 if nlink == 0:
283 283 self._fixfilemode(f)
284 284 return fp
285 285
286 286 def symlink(self, src, dst):
287 287 self.audit(dst)
288 288 linkname = self.join(dst)
289 289 try:
290 290 os.unlink(linkname)
291 291 except OSError:
292 292 pass
293 293
294 294 util.ensuredirs(os.path.dirname(linkname), self.createmode)
295 295
296 296 if self._cansymlink:
297 297 try:
298 298 os.symlink(src, linkname)
299 299 except OSError, err:
300 300 raise OSError(err.errno, _('could not symlink to %r: %s') %
301 301 (src, err.strerror), linkname)
302 302 else:
303 303 self.write(dst, src)
304 304
305 305 def join(self, path):
306 306 if path:
307 307 return os.path.join(self.base, path)
308 308 else:
309 309 return self.base
310 310
311 311 opener = vfs
312 312
313 313 class auditvfs(object):
314 314 def __init__(self, vfs):
315 315 self.vfs = vfs
316 316
317 317 def _getmustaudit(self):
318 318 return self.vfs.mustaudit
319 319
320 320 def _setmustaudit(self, onoff):
321 321 self.vfs.mustaudit = onoff
322 322
323 323 mustaudit = property(_getmustaudit, _setmustaudit)
324 324
325 325 class filtervfs(abstractvfs, auditvfs):
326 326 '''Wrapper vfs for filtering filenames with a function.'''
327 327
328 328 def __init__(self, vfs, filter):
329 329 auditvfs.__init__(self, vfs)
330 330 self._filter = filter
331 331
332 332 def __call__(self, path, *args, **kwargs):
333 333 return self.vfs(self._filter(path), *args, **kwargs)
334 334
335 335 def join(self, path):
336 336 if path:
337 337 return self.vfs.join(self._filter(path))
338 338 else:
339 339 return self.vfs.join(path)
340 340
341 341 filteropener = filtervfs
342 342
343 343 class readonlyvfs(abstractvfs, auditvfs):
344 344 '''Wrapper vfs preventing any writing.'''
345 345
346 346 def __init__(self, vfs):
347 347 auditvfs.__init__(self, vfs)
348 348
349 349 def __call__(self, path, mode='r', *args, **kw):
350 350 if mode not in ('r', 'rb'):
351 351 raise util.Abort('this vfs is read only')
352 352 return self.vfs(path, mode, *args, **kw)
353 353
354 354
355 355 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
356 356 '''yield every hg repository under path, always recursively.
357 357 The recurse flag will only control recursion into repo working dirs'''
358 358 def errhandler(err):
359 359 if err.filename == path:
360 360 raise err
361 361 samestat = getattr(os.path, 'samestat', None)
362 362 if followsym and samestat is not None:
363 363 def adddir(dirlst, dirname):
364 364 match = False
365 365 dirstat = os.stat(dirname)
366 366 for lstdirstat in dirlst:
367 367 if samestat(dirstat, lstdirstat):
368 368 match = True
369 369 break
370 370 if not match:
371 371 dirlst.append(dirstat)
372 372 return not match
373 373 else:
374 374 followsym = False
375 375
376 376 if (seen_dirs is None) and followsym:
377 377 seen_dirs = []
378 378 adddir(seen_dirs, path)
379 379 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
380 380 dirs.sort()
381 381 if '.hg' in dirs:
382 382 yield root # found a repository
383 383 qroot = os.path.join(root, '.hg', 'patches')
384 384 if os.path.isdir(os.path.join(qroot, '.hg')):
385 385 yield qroot # we have a patch queue repo here
386 386 if recurse:
387 387 # avoid recursing inside the .hg directory
388 388 dirs.remove('.hg')
389 389 else:
390 390 dirs[:] = [] # don't descend further
391 391 elif followsym:
392 392 newdirs = []
393 393 for d in dirs:
394 394 fname = os.path.join(root, d)
395 395 if adddir(seen_dirs, fname):
396 396 if os.path.islink(fname):
397 397 for hgname in walkrepos(fname, True, seen_dirs):
398 398 yield hgname
399 399 else:
400 400 newdirs.append(d)
401 401 dirs[:] = newdirs
402 402
403 403 def osrcpath():
404 404 '''return default os-specific hgrc search path'''
405 405 path = systemrcpath()
406 406 path.extend(userrcpath())
407 407 path = [os.path.normpath(f) for f in path]
408 408 return path
409 409
410 410 _rcpath = None
411 411
412 412 def rcpath():
413 413 '''return hgrc search path. if env var HGRCPATH is set, use it.
414 414 for each item in path, if directory, use files ending in .rc,
415 415 else use item.
416 416 make HGRCPATH empty to only look in .hg/hgrc of current repo.
417 417 if no HGRCPATH, use default os-specific path.'''
418 418 global _rcpath
419 419 if _rcpath is None:
420 420 if 'HGRCPATH' in os.environ:
421 421 _rcpath = []
422 422 for p in os.environ['HGRCPATH'].split(os.pathsep):
423 423 if not p:
424 424 continue
425 425 p = util.expandpath(p)
426 426 if os.path.isdir(p):
427 427 for f, kind in osutil.listdir(p):
428 428 if f.endswith('.rc'):
429 429 _rcpath.append(os.path.join(p, f))
430 430 else:
431 431 _rcpath.append(p)
432 432 else:
433 433 _rcpath = osrcpath()
434 434 return _rcpath
435 435
436 436 def revsingle(repo, revspec, default='.'):
437 437 if not revspec and revspec != 0:
438 438 return repo[default]
439 439
440 440 l = revrange(repo, [revspec])
441 441 if len(l) < 1:
442 442 raise util.Abort(_('empty revision set'))
443 443 return repo[l[-1]]
444 444
445 445 def revpair(repo, revs):
446 446 if not revs:
447 447 return repo.dirstate.p1(), None
448 448
449 449 l = revrange(repo, revs)
450 450
451 451 if len(l) == 0:
452 452 if revs:
453 453 raise util.Abort(_('empty revision range'))
454 454 return repo.dirstate.p1(), None
455 455
456 456 if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
457 457 return repo.lookup(l[0]), None
458 458
459 459 return repo.lookup(l[0]), repo.lookup(l[-1])
460 460
461 461 _revrangesep = ':'
462 462
463 463 def revrange(repo, revs):
464 464 """Yield revision as strings from a list of revision specifications."""
465 465
466 466 def revfix(repo, val, defval):
467 467 if not val and val != 0 and defval is not None:
468 468 return defval
469 469 return repo[val].rev()
470 470
471 471 seen, l = set(), []
472 472 for spec in revs:
473 473 if l and not seen:
474 474 seen = set(l)
475 475 # attempt to parse old-style ranges first to deal with
476 476 # things like old-tag which contain query metacharacters
477 477 try:
478 478 if isinstance(spec, int):
479 479 seen.add(spec)
480 480 l.append(spec)
481 481 continue
482 482
483 483 if _revrangesep in spec:
484 484 start, end = spec.split(_revrangesep, 1)
485 485 start = revfix(repo, start, 0)
486 486 end = revfix(repo, end, len(repo) - 1)
487 487 if end == nullrev and start <= 0:
488 488 start = nullrev
489 489 rangeiter = repo.changelog.revs(start, end)
490 490 if not seen and not l:
491 491 # by far the most common case: revs = ["-1:0"]
492 492 l = list(rangeiter)
493 493 # defer syncing seen until next iteration
494 494 continue
495 495 newrevs = set(rangeiter)
496 496 if seen:
497 497 newrevs.difference_update(seen)
498 498 seen.update(newrevs)
499 499 else:
500 500 seen = newrevs
501 501 l.extend(sorted(newrevs, reverse=start > end))
502 502 continue
503 503 elif spec and spec in repo: # single unquoted rev
504 504 rev = revfix(repo, spec, None)
505 505 if rev in seen:
506 506 continue
507 507 seen.add(rev)
508 508 l.append(rev)
509 509 continue
510 510 except error.RepoLookupError:
511 511 pass
512 512
513 513 # fall through to new-style queries if old-style fails
514 514 m = revset.match(repo.ui, spec)
515 515 dl = [r for r in m(repo, list(repo)) if r not in seen]
516 516 l.extend(dl)
517 517 seen.update(dl)
518 518
519 519 return l
520 520
521 521 def expandpats(pats):
522 522 if not util.expandglobs:
523 523 return list(pats)
524 524 ret = []
525 525 for p in pats:
526 526 kind, name = matchmod._patsplit(p, None)
527 527 if kind is None:
528 528 try:
529 529 globbed = glob.glob(name)
530 530 except re.error:
531 531 globbed = [name]
532 532 if globbed:
533 533 ret.extend(globbed)
534 534 continue
535 535 ret.append(p)
536 536 return ret
537 537
538 538 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
539 539 if pats == ("",):
540 540 pats = []
541 541 if not globbed and default == 'relpath':
542 542 pats = expandpats(pats or [])
543 543
544 544 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
545 545 default)
546 546 def badfn(f, msg):
547 547 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
548 548 m.bad = badfn
549 549 return m, pats
550 550
551 551 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
552 552 return matchandpats(ctx, pats, opts, globbed, default)[0]
553 553
554 554 def matchall(repo):
555 555 return matchmod.always(repo.root, repo.getcwd())
556 556
557 557 def matchfiles(repo, files):
558 558 return matchmod.exact(repo.root, repo.getcwd(), files)
559 559
560 560 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
561 561 if dry_run is None:
562 562 dry_run = opts.get('dry_run')
563 563 if similarity is None:
564 564 similarity = float(opts.get('similarity') or 0)
565 565 # we'd use status here, except handling of symlinks and ignore is tricky
566 566 m = match(repo[None], pats, opts)
567 567 rejected = []
568 568 m.bad = lambda x, y: rejected.append(x)
569 569
570 570 added, unknown, deleted, removed = _interestingfiles(repo, m)
571 571
572 572 unknownset = set(unknown)
573 573 toprint = unknownset.copy()
574 574 toprint.update(deleted)
575 575 for abs in sorted(toprint):
576 576 if repo.ui.verbose or not m.exact(abs):
577 577 rel = m.rel(abs)
578 578 if abs in unknownset:
579 579 status = _('adding %s\n') % ((pats and rel) or abs)
580 580 else:
581 581 status = _('removing %s\n') % ((pats and rel) or abs)
582 582 repo.ui.status(status)
583 583
584 584 renames = _findrenames(repo, m, added + unknown, removed + deleted,
585 585 similarity)
586 586
587 587 if not dry_run:
588 588 _markchanges(repo, unknown, deleted, renames)
589 589
590 590 for f in rejected:
591 591 if f in m.files():
592 592 return 1
593 593 return 0
594 594
595 595 def marktouched(repo, files, similarity=0.0):
596 596 '''Assert that files have somehow been operated upon. files are relative to
597 597 the repo root.'''
598 598 m = matchfiles(repo, files)
599 599 rejected = []
600 600 m.bad = lambda x, y: rejected.append(x)
601 601
602 602 added, unknown, deleted, removed = _interestingfiles(repo, m)
603 603
604 604 if repo.ui.verbose:
605 605 unknownset = set(unknown)
606 606 toprint = unknownset.copy()
607 607 toprint.update(deleted)
608 608 for abs in sorted(toprint):
609 609 if abs in unknownset:
610 610 status = _('adding %s\n') % abs
611 611 else:
612 612 status = _('removing %s\n') % abs
613 613 repo.ui.status(status)
614 614
615 615 renames = _findrenames(repo, m, added + unknown, removed + deleted,
616 616 similarity)
617 617
618 618 _markchanges(repo, unknown, deleted, renames)
619 619
620 620 for f in rejected:
621 621 if f in m.files():
622 622 return 1
623 623 return 0
624 624
625 625 def _interestingfiles(repo, matcher):
626 626 '''Walk dirstate with matcher, looking for files that addremove would care
627 627 about.
628 628
629 629 This is different from dirstate.status because it doesn't care about
630 630 whether files are modified or clean.'''
631 631 added, unknown, deleted, removed = [], [], [], []
632 632 audit_path = pathutil.pathauditor(repo.root)
633 633
634 634 ctx = repo[None]
635 635 dirstate = repo.dirstate
636 636 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
637 637 full=False)
638 638 for abs, st in walkresults.iteritems():
639 639 dstate = dirstate[abs]
640 640 if dstate == '?' and audit_path.check(abs):
641 641 unknown.append(abs)
642 642 elif dstate != 'r' and not st:
643 643 deleted.append(abs)
644 644 # for finding renames
645 645 elif dstate == 'r':
646 646 removed.append(abs)
647 647 elif dstate == 'a':
648 648 added.append(abs)
649 649
650 650 return added, unknown, deleted, removed
651 651
652 652 def _findrenames(repo, matcher, added, removed, similarity):
653 653 '''Find renames from removed files to added ones.'''
654 654 renames = {}
655 655 if similarity > 0:
656 656 for old, new, score in similar.findrenames(repo, added, removed,
657 657 similarity):
658 658 if (repo.ui.verbose or not matcher.exact(old)
659 659 or not matcher.exact(new)):
660 660 repo.ui.status(_('recording removal of %s as rename to %s '
661 661 '(%d%% similar)\n') %
662 662 (matcher.rel(old), matcher.rel(new),
663 663 score * 100))
664 664 renames[new] = old
665 665 return renames
666 666
667 667 def _markchanges(repo, unknown, deleted, renames):
668 668 '''Marks the files in unknown as added, the files in deleted as removed,
669 669 and the files in renames as copied.'''
670 670 wctx = repo[None]
671 671 wlock = repo.wlock()
672 672 try:
673 673 wctx.forget(deleted)
674 674 wctx.add(unknown)
675 675 for new, old in renames.iteritems():
676 676 wctx.copy(old, new)
677 677 finally:
678 678 wlock.release()
679 679
680 680 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
681 681 """Update the dirstate to reflect the intent of copying src to dst. For
682 682 different reasons it might not end with dst being marked as copied from src.
683 683 """
684 684 origsrc = repo.dirstate.copied(src) or src
685 685 if dst == origsrc: # copying back a copy?
686 686 if repo.dirstate[dst] not in 'mn' and not dryrun:
687 687 repo.dirstate.normallookup(dst)
688 688 else:
689 689 if repo.dirstate[origsrc] == 'a' and origsrc == src:
690 690 if not ui.quiet:
691 691 ui.warn(_("%s has not been committed yet, so no copy "
692 692 "data will be stored for %s.\n")
693 693 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
694 694 if repo.dirstate[dst] in '?r' and not dryrun:
695 695 wctx.add([dst])
696 696 elif not dryrun:
697 697 wctx.copy(origsrc, dst)
698 698
699 699 def readrequires(opener, supported):
700 700 '''Reads and parses .hg/requires and checks if all entries found
701 701 are in the list of supported features.'''
702 702 requirements = set(opener.read("requires").splitlines())
703 703 missings = []
704 704 for r in requirements:
705 705 if r not in supported:
706 706 if not r or not r[0].isalnum():
707 707 raise error.RequirementError(_(".hg/requires file is corrupt"))
708 708 missings.append(r)
709 709 missings.sort()
710 710 if missings:
711 711 raise error.RequirementError(
712 712 _("unknown repository format: requires features '%s' (upgrade "
713 713 "Mercurial)") % "', '".join(missings))
714 714 return requirements
715 715
716 class filecacheentry(object):
716 class filecachesubentry(object):
717 717 def __init__(self, path, stat):
718 718 self.path = path
719 719 self.cachestat = None
720 720 self._cacheable = None
721 721
722 722 if stat:
723 self.cachestat = filecacheentry.stat(self.path)
723 self.cachestat = filecachesubentry.stat(self.path)
724 724
725 725 if self.cachestat:
726 726 self._cacheable = self.cachestat.cacheable()
727 727 else:
728 728 # None means we don't know yet
729 729 self._cacheable = None
730 730
731 731 def refresh(self):
732 732 if self.cacheable():
733 self.cachestat = filecacheentry.stat(self.path)
733 self.cachestat = filecachesubentry.stat(self.path)
734 734
735 735 def cacheable(self):
736 736 if self._cacheable is not None:
737 737 return self._cacheable
738 738
739 739 # we don't know yet, assume it is for now
740 740 return True
741 741
742 742 def changed(self):
743 743 # no point in going further if we can't cache it
744 744 if not self.cacheable():
745 745 return True
746 746
747 newstat = filecacheentry.stat(self.path)
747 newstat = filecachesubentry.stat(self.path)
748 748
749 749 # we may not know if it's cacheable yet, check again now
750 750 if newstat and self._cacheable is None:
751 751 self._cacheable = newstat.cacheable()
752 752
753 753 # check again
754 754 if not self._cacheable:
755 755 return True
756 756
757 757 if self.cachestat != newstat:
758 758 self.cachestat = newstat
759 759 return True
760 760 else:
761 761 return False
762 762
763 763 @staticmethod
764 764 def stat(path):
765 765 try:
766 766 return util.cachestat(path)
767 767 except OSError, e:
768 768 if e.errno != errno.ENOENT:
769 769 raise
770 770
771 771 class filecache(object):
772 772 '''A property like decorator that tracks a file under .hg/ for updates.
773 773
774 774 Records stat info when called in _filecache.
775 775
776 776 On subsequent calls, compares old stat info with new info, and recreates
777 777 the object when needed, updating the new stat info in _filecache.
778 778
779 779 Mercurial either atomic renames or appends for files under .hg,
780 780 so to ensure the cache is reliable we need the filesystem to be able
781 781 to tell us if a file has been replaced. If it can't, we fallback to
782 782 recreating the object on every call (essentially the same behaviour as
783 783 propertycache).'''
784 784 def __init__(self, path):
785 785 self.path = path
786 786
787 787 def join(self, obj, fname):
788 788 """Used to compute the runtime path of the cached file.
789 789
790 790 Users should subclass filecache and provide their own version of this
791 791 function to call the appropriate join function on 'obj' (an instance
792 792 of the class that its member function was decorated).
793 793 """
794 794 return obj.join(fname)
795 795
796 796 def __call__(self, func):
797 797 self.func = func
798 798 self.name = func.__name__
799 799 return self
800 800
801 801 def __get__(self, obj, type=None):
802 802 # do we need to check if the file changed?
803 803 if self.name in obj.__dict__:
804 804 assert self.name in obj._filecache, self.name
805 805 return obj.__dict__[self.name]
806 806
807 807 entry = obj._filecache.get(self.name)
808 808
809 809 if entry:
810 810 if entry.changed():
811 811 entry.obj = self.func(obj)
812 812 else:
813 813 path = self.join(obj, self.path)
814 814
815 815 # We stat -before- creating the object so our cache doesn't lie if
816 816 # a writer modified between the time we read and stat
817 entry = filecacheentry(path, True)
817 entry = filecachesubentry(path, True)
818 818 entry.obj = self.func(obj)
819 819
820 820 obj._filecache[self.name] = entry
821 821
822 822 obj.__dict__[self.name] = entry.obj
823 823 return entry.obj
824 824
825 825 def __set__(self, obj, value):
826 826 if self.name not in obj._filecache:
827 827 # we add an entry for the missing value because X in __dict__
828 828 # implies X in _filecache
829 ce = filecacheentry(self.join(obj, self.path), False)
829 ce = filecachesubentry(self.join(obj, self.path), False)
830 830 obj._filecache[self.name] = ce
831 831 else:
832 832 ce = obj._filecache[self.name]
833 833
834 834 ce.obj = value # update cached copy
835 835 obj.__dict__[self.name] = value # update copy returned by obj.x
836 836
837 837 def __delete__(self, obj):
838 838 try:
839 839 del obj.__dict__[self.name]
840 840 except KeyError:
841 841 raise AttributeError(self.name)
842 842
843 843 class dirs(object):
844 844 '''a multiset of directory names from a dirstate or manifest'''
845 845
846 846 def __init__(self, map, skip=None):
847 847 self._dirs = {}
848 848 addpath = self.addpath
849 849 if util.safehasattr(map, 'iteritems') and skip is not None:
850 850 for f, s in map.iteritems():
851 851 if s[0] != skip:
852 852 addpath(f)
853 853 else:
854 854 for f in map:
855 855 addpath(f)
856 856
857 857 def addpath(self, path):
858 858 dirs = self._dirs
859 859 for base in finddirs(path):
860 860 if base in dirs:
861 861 dirs[base] += 1
862 862 return
863 863 dirs[base] = 1
864 864
865 865 def delpath(self, path):
866 866 dirs = self._dirs
867 867 for base in finddirs(path):
868 868 if dirs[base] > 1:
869 869 dirs[base] -= 1
870 870 return
871 871 del dirs[base]
872 872
873 873 def __iter__(self):
874 874 return self._dirs.iterkeys()
875 875
876 876 def __contains__(self, d):
877 877 return d in self._dirs
878 878
879 879 if util.safehasattr(parsers, 'dirs'):
880 880 dirs = parsers.dirs
881 881
882 882 def finddirs(path):
883 883 pos = path.rfind('/')
884 884 while pos != -1:
885 885 yield path[:pos]
886 886 pos = path.rfind('/', 0, pos)
General Comments 0
You need to be logged in to leave comments. Login now