##// END OF EJS Templates
vfs: add "writelines"...
FUJIWARA Katsunori -
r23371:1df6519e default
parent child Browse files
Show More
@@ -1,1060 +1,1067
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import nullrev
10 10 import util, error, osutil, revset, similar, encoding, phases, parsers
11 11 import pathutil
12 12 import match as matchmod
13 13 import os, errno, re, glob, tempfile
14 14
15 15 if os.name == 'nt':
16 16 import scmwindows as scmplatform
17 17 else:
18 18 import scmposix as scmplatform
19 19
20 20 systemrcpath = scmplatform.systemrcpath
21 21 userrcpath = scmplatform.userrcpath
22 22
23 23 class status(tuple):
24 24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
25 25 and 'ignored' properties are only relevant to the working copy.
26 26 '''
27 27
28 28 __slots__ = ()
29 29
30 30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
31 31 clean):
32 32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
33 33 ignored, clean))
34 34
35 35 @property
36 36 def modified(self):
37 37 '''files that have been modified'''
38 38 return self[0]
39 39
40 40 @property
41 41 def added(self):
42 42 '''files that have been added'''
43 43 return self[1]
44 44
45 45 @property
46 46 def removed(self):
47 47 '''files that have been removed'''
48 48 return self[2]
49 49
50 50 @property
51 51 def deleted(self):
52 52 '''files that are in the dirstate, but have been deleted from the
53 53 working copy (aka "missing")
54 54 '''
55 55 return self[3]
56 56
57 57 @property
58 58 def unknown(self):
59 59 '''files not in the dirstate that are not ignored'''
60 60 return self[4]
61 61
62 62 @property
63 63 def ignored(self):
64 64 '''files not in the dirstate that are ignored (by _dirignore())'''
65 65 return self[5]
66 66
67 67 @property
68 68 def clean(self):
69 69 '''files that have not been modified'''
70 70 return self[6]
71 71
72 72 def __repr__(self, *args, **kwargs):
73 73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
74 74 'unknown=%r, ignored=%r, clean=%r>') % self)
75 75
76 76 def itersubrepos(ctx1, ctx2):
77 77 """find subrepos in ctx1 or ctx2"""
78 78 # Create a (subpath, ctx) mapping where we prefer subpaths from
79 79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
80 80 # has been modified (in ctx2) but not yet committed (in ctx1).
81 81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
82 82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
83 83 for subpath, ctx in sorted(subpaths.iteritems()):
84 84 yield subpath, ctx.sub(subpath)
85 85
86 86 def nochangesfound(ui, repo, excluded=None):
87 87 '''Report no changes for push/pull, excluded is None or a list of
88 88 nodes excluded from the push/pull.
89 89 '''
90 90 secretlist = []
91 91 if excluded:
92 92 for n in excluded:
93 93 if n not in repo:
94 94 # discovery should not have included the filtered revision,
95 95 # we have to explicitly exclude it until discovery is cleanup.
96 96 continue
97 97 ctx = repo[n]
98 98 if ctx.phase() >= phases.secret and not ctx.extinct():
99 99 secretlist.append(n)
100 100
101 101 if secretlist:
102 102 ui.status(_("no changes found (ignored %d secret changesets)\n")
103 103 % len(secretlist))
104 104 else:
105 105 ui.status(_("no changes found\n"))
106 106
107 107 def checknewlabel(repo, lbl, kind):
108 108 # Do not use the "kind" parameter in ui output.
109 109 # It makes strings difficult to translate.
110 110 if lbl in ['tip', '.', 'null']:
111 111 raise util.Abort(_("the name '%s' is reserved") % lbl)
112 112 for c in (':', '\0', '\n', '\r'):
113 113 if c in lbl:
114 114 raise util.Abort(_("%r cannot be used in a name") % c)
115 115 try:
116 116 int(lbl)
117 117 raise util.Abort(_("cannot use an integer as a name"))
118 118 except ValueError:
119 119 pass
120 120
121 121 def checkfilename(f):
122 122 '''Check that the filename f is an acceptable filename for a tracked file'''
123 123 if '\r' in f or '\n' in f:
124 124 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
125 125
126 126 def checkportable(ui, f):
127 127 '''Check if filename f is portable and warn or abort depending on config'''
128 128 checkfilename(f)
129 129 abort, warn = checkportabilityalert(ui)
130 130 if abort or warn:
131 131 msg = util.checkwinfilename(f)
132 132 if msg:
133 133 msg = "%s: %r" % (msg, f)
134 134 if abort:
135 135 raise util.Abort(msg)
136 136 ui.warn(_("warning: %s\n") % msg)
137 137
138 138 def checkportabilityalert(ui):
139 139 '''check if the user's config requests nothing, a warning, or abort for
140 140 non-portable filenames'''
141 141 val = ui.config('ui', 'portablefilenames', 'warn')
142 142 lval = val.lower()
143 143 bval = util.parsebool(val)
144 144 abort = os.name == 'nt' or lval == 'abort'
145 145 warn = bval or lval == 'warn'
146 146 if bval is None and not (warn or abort or lval == 'ignore'):
147 147 raise error.ConfigError(
148 148 _("ui.portablefilenames value is invalid ('%s')") % val)
149 149 return abort, warn
150 150
151 151 class casecollisionauditor(object):
152 152 def __init__(self, ui, abort, dirstate):
153 153 self._ui = ui
154 154 self._abort = abort
155 155 allfiles = '\0'.join(dirstate._map)
156 156 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
157 157 self._dirstate = dirstate
158 158 # The purpose of _newfiles is so that we don't complain about
159 159 # case collisions if someone were to call this object with the
160 160 # same filename twice.
161 161 self._newfiles = set()
162 162
163 163 def __call__(self, f):
164 164 if f in self._newfiles:
165 165 return
166 166 fl = encoding.lower(f)
167 167 if fl in self._loweredfiles and f not in self._dirstate:
168 168 msg = _('possible case-folding collision for %s') % f
169 169 if self._abort:
170 170 raise util.Abort(msg)
171 171 self._ui.warn(_("warning: %s\n") % msg)
172 172 self._loweredfiles.add(fl)
173 173 self._newfiles.add(f)
174 174
175 175 class abstractvfs(object):
176 176 """Abstract base class; cannot be instantiated"""
177 177
178 178 def __init__(self, *args, **kwargs):
179 179 '''Prevent instantiation; don't call this from subclasses.'''
180 180 raise NotImplementedError('attempted instantiating ' + str(type(self)))
181 181
182 182 def tryread(self, path):
183 183 '''gracefully return an empty string for missing files'''
184 184 try:
185 185 return self.read(path)
186 186 except IOError, inst:
187 187 if inst.errno != errno.ENOENT:
188 188 raise
189 189 return ""
190 190
191 191 def tryreadlines(self, path, mode='rb'):
192 192 '''gracefully return an empty array for missing files'''
193 193 try:
194 194 return self.readlines(path, mode=mode)
195 195 except IOError, inst:
196 196 if inst.errno != errno.ENOENT:
197 197 raise
198 198 return []
199 199
200 200 def open(self, path, mode="r", text=False, atomictemp=False,
201 201 notindexed=False):
202 202 '''Open ``path`` file, which is relative to vfs root.
203 203
204 204 Newly created directories are marked as "not to be indexed by
205 205 the content indexing service", if ``notindexed`` is specified
206 206 for "write" mode access.
207 207 '''
208 208 self.open = self.__call__
209 209 return self.__call__(path, mode, text, atomictemp, notindexed)
210 210
211 211 def read(self, path):
212 212 fp = self(path, 'rb')
213 213 try:
214 214 return fp.read()
215 215 finally:
216 216 fp.close()
217 217
218 218 def readlines(self, path, mode='rb'):
219 219 fp = self(path, mode=mode)
220 220 try:
221 221 return fp.readlines()
222 222 finally:
223 223 fp.close()
224 224
225 225 def write(self, path, data):
226 226 fp = self(path, 'wb')
227 227 try:
228 228 return fp.write(data)
229 229 finally:
230 230 fp.close()
231 231
232 def writelines(self, path, data, mode='wb', notindexed=False):
233 fp = self(path, mode=mode, notindexed=notindexed)
234 try:
235 return fp.writelines(data)
236 finally:
237 fp.close()
238
232 239 def append(self, path, data):
233 240 fp = self(path, 'ab')
234 241 try:
235 242 return fp.write(data)
236 243 finally:
237 244 fp.close()
238 245
239 246 def chmod(self, path, mode):
240 247 return os.chmod(self.join(path), mode)
241 248
242 249 def exists(self, path=None):
243 250 return os.path.exists(self.join(path))
244 251
245 252 def fstat(self, fp):
246 253 return util.fstat(fp)
247 254
248 255 def isdir(self, path=None):
249 256 return os.path.isdir(self.join(path))
250 257
251 258 def isfile(self, path=None):
252 259 return os.path.isfile(self.join(path))
253 260
254 261 def islink(self, path=None):
255 262 return os.path.islink(self.join(path))
256 263
257 264 def lexists(self, path=None):
258 265 return os.path.lexists(self.join(path))
259 266
260 267 def lstat(self, path=None):
261 268 return os.lstat(self.join(path))
262 269
263 270 def listdir(self, path=None):
264 271 return os.listdir(self.join(path))
265 272
266 273 def makedir(self, path=None, notindexed=True):
267 274 return util.makedir(self.join(path), notindexed)
268 275
269 276 def makedirs(self, path=None, mode=None):
270 277 return util.makedirs(self.join(path), mode)
271 278
272 279 def makelock(self, info, path):
273 280 return util.makelock(info, self.join(path))
274 281
275 282 def mkdir(self, path=None):
276 283 return os.mkdir(self.join(path))
277 284
278 285 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
279 286 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
280 287 dir=self.join(dir), text=text)
281 288 dname, fname = util.split(name)
282 289 if dir:
283 290 return fd, os.path.join(dir, fname)
284 291 else:
285 292 return fd, fname
286 293
287 294 def readdir(self, path=None, stat=None, skip=None):
288 295 return osutil.listdir(self.join(path), stat, skip)
289 296
290 297 def readlock(self, path):
291 298 return util.readlock(self.join(path))
292 299
293 300 def rename(self, src, dst):
294 301 return util.rename(self.join(src), self.join(dst))
295 302
296 303 def readlink(self, path):
297 304 return os.readlink(self.join(path))
298 305
299 306 def setflags(self, path, l, x):
300 307 return util.setflags(self.join(path), l, x)
301 308
302 309 def stat(self, path=None):
303 310 return os.stat(self.join(path))
304 311
305 312 def unlink(self, path=None):
306 313 return util.unlink(self.join(path))
307 314
308 315 def unlinkpath(self, path=None, ignoremissing=False):
309 316 return util.unlinkpath(self.join(path), ignoremissing)
310 317
311 318 def utime(self, path=None, t=None):
312 319 return os.utime(self.join(path), t)
313 320
314 321 class vfs(abstractvfs):
315 322 '''Operate files relative to a base directory
316 323
317 324 This class is used to hide the details of COW semantics and
318 325 remote file access from higher level code.
319 326 '''
320 327 def __init__(self, base, audit=True, expandpath=False, realpath=False):
321 328 if expandpath:
322 329 base = util.expandpath(base)
323 330 if realpath:
324 331 base = os.path.realpath(base)
325 332 self.base = base
326 333 self._setmustaudit(audit)
327 334 self.createmode = None
328 335 self._trustnlink = None
329 336
330 337 def _getmustaudit(self):
331 338 return self._audit
332 339
333 340 def _setmustaudit(self, onoff):
334 341 self._audit = onoff
335 342 if onoff:
336 343 self.audit = pathutil.pathauditor(self.base)
337 344 else:
338 345 self.audit = util.always
339 346
340 347 mustaudit = property(_getmustaudit, _setmustaudit)
341 348
342 349 @util.propertycache
343 350 def _cansymlink(self):
344 351 return util.checklink(self.base)
345 352
346 353 @util.propertycache
347 354 def _chmod(self):
348 355 return util.checkexec(self.base)
349 356
350 357 def _fixfilemode(self, name):
351 358 if self.createmode is None or not self._chmod:
352 359 return
353 360 os.chmod(name, self.createmode & 0666)
354 361
355 362 def __call__(self, path, mode="r", text=False, atomictemp=False,
356 363 notindexed=False):
357 364 '''Open ``path`` file, which is relative to vfs root.
358 365
359 366 Newly created directories are marked as "not to be indexed by
360 367 the content indexing service", if ``notindexed`` is specified
361 368 for "write" mode access.
362 369 '''
363 370 if self._audit:
364 371 r = util.checkosfilename(path)
365 372 if r:
366 373 raise util.Abort("%s: %r" % (r, path))
367 374 self.audit(path)
368 375 f = self.join(path)
369 376
370 377 if not text and "b" not in mode:
371 378 mode += "b" # for that other OS
372 379
373 380 nlink = -1
374 381 if mode not in ('r', 'rb'):
375 382 dirname, basename = util.split(f)
376 383 # If basename is empty, then the path is malformed because it points
377 384 # to a directory. Let the posixfile() call below raise IOError.
378 385 if basename:
379 386 if atomictemp:
380 387 util.ensuredirs(dirname, self.createmode, notindexed)
381 388 return util.atomictempfile(f, mode, self.createmode)
382 389 try:
383 390 if 'w' in mode:
384 391 util.unlink(f)
385 392 nlink = 0
386 393 else:
387 394 # nlinks() may behave differently for files on Windows
388 395 # shares if the file is open.
389 396 fd = util.posixfile(f)
390 397 nlink = util.nlinks(f)
391 398 if nlink < 1:
392 399 nlink = 2 # force mktempcopy (issue1922)
393 400 fd.close()
394 401 except (OSError, IOError), e:
395 402 if e.errno != errno.ENOENT:
396 403 raise
397 404 nlink = 0
398 405 util.ensuredirs(dirname, self.createmode, notindexed)
399 406 if nlink > 0:
400 407 if self._trustnlink is None:
401 408 self._trustnlink = nlink > 1 or util.checknlink(f)
402 409 if nlink > 1 or not self._trustnlink:
403 410 util.rename(util.mktempcopy(f), f)
404 411 fp = util.posixfile(f, mode)
405 412 if nlink == 0:
406 413 self._fixfilemode(f)
407 414 return fp
408 415
409 416 def symlink(self, src, dst):
410 417 self.audit(dst)
411 418 linkname = self.join(dst)
412 419 try:
413 420 os.unlink(linkname)
414 421 except OSError:
415 422 pass
416 423
417 424 util.ensuredirs(os.path.dirname(linkname), self.createmode)
418 425
419 426 if self._cansymlink:
420 427 try:
421 428 os.symlink(src, linkname)
422 429 except OSError, err:
423 430 raise OSError(err.errno, _('could not symlink to %r: %s') %
424 431 (src, err.strerror), linkname)
425 432 else:
426 433 self.write(dst, src)
427 434
428 435 def join(self, path):
429 436 if path:
430 437 return os.path.join(self.base, path)
431 438 else:
432 439 return self.base
433 440
434 441 opener = vfs
435 442
436 443 class auditvfs(object):
437 444 def __init__(self, vfs):
438 445 self.vfs = vfs
439 446
440 447 def _getmustaudit(self):
441 448 return self.vfs.mustaudit
442 449
443 450 def _setmustaudit(self, onoff):
444 451 self.vfs.mustaudit = onoff
445 452
446 453 mustaudit = property(_getmustaudit, _setmustaudit)
447 454
448 455 class filtervfs(abstractvfs, auditvfs):
449 456 '''Wrapper vfs for filtering filenames with a function.'''
450 457
451 458 def __init__(self, vfs, filter):
452 459 auditvfs.__init__(self, vfs)
453 460 self._filter = filter
454 461
455 462 def __call__(self, path, *args, **kwargs):
456 463 return self.vfs(self._filter(path), *args, **kwargs)
457 464
458 465 def join(self, path):
459 466 if path:
460 467 return self.vfs.join(self._filter(path))
461 468 else:
462 469 return self.vfs.join(path)
463 470
464 471 filteropener = filtervfs
465 472
466 473 class readonlyvfs(abstractvfs, auditvfs):
467 474 '''Wrapper vfs preventing any writing.'''
468 475
469 476 def __init__(self, vfs):
470 477 auditvfs.__init__(self, vfs)
471 478
472 479 def __call__(self, path, mode='r', *args, **kw):
473 480 if mode not in ('r', 'rb'):
474 481 raise util.Abort('this vfs is read only')
475 482 return self.vfs(path, mode, *args, **kw)
476 483
477 484
478 485 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
479 486 '''yield every hg repository under path, always recursively.
480 487 The recurse flag will only control recursion into repo working dirs'''
481 488 def errhandler(err):
482 489 if err.filename == path:
483 490 raise err
484 491 samestat = getattr(os.path, 'samestat', None)
485 492 if followsym and samestat is not None:
486 493 def adddir(dirlst, dirname):
487 494 match = False
488 495 dirstat = os.stat(dirname)
489 496 for lstdirstat in dirlst:
490 497 if samestat(dirstat, lstdirstat):
491 498 match = True
492 499 break
493 500 if not match:
494 501 dirlst.append(dirstat)
495 502 return not match
496 503 else:
497 504 followsym = False
498 505
499 506 if (seen_dirs is None) and followsym:
500 507 seen_dirs = []
501 508 adddir(seen_dirs, path)
502 509 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
503 510 dirs.sort()
504 511 if '.hg' in dirs:
505 512 yield root # found a repository
506 513 qroot = os.path.join(root, '.hg', 'patches')
507 514 if os.path.isdir(os.path.join(qroot, '.hg')):
508 515 yield qroot # we have a patch queue repo here
509 516 if recurse:
510 517 # avoid recursing inside the .hg directory
511 518 dirs.remove('.hg')
512 519 else:
513 520 dirs[:] = [] # don't descend further
514 521 elif followsym:
515 522 newdirs = []
516 523 for d in dirs:
517 524 fname = os.path.join(root, d)
518 525 if adddir(seen_dirs, fname):
519 526 if os.path.islink(fname):
520 527 for hgname in walkrepos(fname, True, seen_dirs):
521 528 yield hgname
522 529 else:
523 530 newdirs.append(d)
524 531 dirs[:] = newdirs
525 532
526 533 def osrcpath():
527 534 '''return default os-specific hgrc search path'''
528 535 path = []
529 536 defaultpath = os.path.join(util.datapath, 'default.d')
530 537 if os.path.isdir(defaultpath):
531 538 for f, kind in osutil.listdir(defaultpath):
532 539 if f.endswith('.rc'):
533 540 path.append(os.path.join(defaultpath, f))
534 541 path.extend(systemrcpath())
535 542 path.extend(userrcpath())
536 543 path = [os.path.normpath(f) for f in path]
537 544 return path
538 545
539 546 _rcpath = None
540 547
541 548 def rcpath():
542 549 '''return hgrc search path. if env var HGRCPATH is set, use it.
543 550 for each item in path, if directory, use files ending in .rc,
544 551 else use item.
545 552 make HGRCPATH empty to only look in .hg/hgrc of current repo.
546 553 if no HGRCPATH, use default os-specific path.'''
547 554 global _rcpath
548 555 if _rcpath is None:
549 556 if 'HGRCPATH' in os.environ:
550 557 _rcpath = []
551 558 for p in os.environ['HGRCPATH'].split(os.pathsep):
552 559 if not p:
553 560 continue
554 561 p = util.expandpath(p)
555 562 if os.path.isdir(p):
556 563 for f, kind in osutil.listdir(p):
557 564 if f.endswith('.rc'):
558 565 _rcpath.append(os.path.join(p, f))
559 566 else:
560 567 _rcpath.append(p)
561 568 else:
562 569 _rcpath = osrcpath()
563 570 return _rcpath
564 571
565 572 def revsingle(repo, revspec, default='.'):
566 573 if not revspec and revspec != 0:
567 574 return repo[default]
568 575
569 576 l = revrange(repo, [revspec])
570 577 if not l:
571 578 raise util.Abort(_('empty revision set'))
572 579 return repo[l.last()]
573 580
574 581 def revpair(repo, revs):
575 582 if not revs:
576 583 return repo.dirstate.p1(), None
577 584
578 585 l = revrange(repo, revs)
579 586
580 587 if not l:
581 588 first = second = None
582 589 elif l.isascending():
583 590 first = l.min()
584 591 second = l.max()
585 592 elif l.isdescending():
586 593 first = l.max()
587 594 second = l.min()
588 595 else:
589 596 first = l.first()
590 597 second = l.last()
591 598
592 599 if first is None:
593 600 raise util.Abort(_('empty revision range'))
594 601
595 602 if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
596 603 return repo.lookup(first), None
597 604
598 605 return repo.lookup(first), repo.lookup(second)
599 606
600 607 _revrangesep = ':'
601 608
602 609 def revrange(repo, revs):
603 610 """Yield revision as strings from a list of revision specifications."""
604 611
605 612 def revfix(repo, val, defval):
606 613 if not val and val != 0 and defval is not None:
607 614 return defval
608 615 return repo[val].rev()
609 616
610 617 seen, l = set(), revset.baseset([])
611 618 for spec in revs:
612 619 if l and not seen:
613 620 seen = set(l)
614 621 # attempt to parse old-style ranges first to deal with
615 622 # things like old-tag which contain query metacharacters
616 623 try:
617 624 if isinstance(spec, int):
618 625 seen.add(spec)
619 626 l = l + revset.baseset([spec])
620 627 continue
621 628
622 629 if _revrangesep in spec:
623 630 start, end = spec.split(_revrangesep, 1)
624 631 start = revfix(repo, start, 0)
625 632 end = revfix(repo, end, len(repo) - 1)
626 633 if end == nullrev and start < 0:
627 634 start = nullrev
628 635 rangeiter = repo.changelog.revs(start, end)
629 636 if not seen and not l:
630 637 # by far the most common case: revs = ["-1:0"]
631 638 l = revset.baseset(rangeiter)
632 639 # defer syncing seen until next iteration
633 640 continue
634 641 newrevs = set(rangeiter)
635 642 if seen:
636 643 newrevs.difference_update(seen)
637 644 seen.update(newrevs)
638 645 else:
639 646 seen = newrevs
640 647 l = l + revset.baseset(sorted(newrevs, reverse=start > end))
641 648 continue
642 649 elif spec and spec in repo: # single unquoted rev
643 650 rev = revfix(repo, spec, None)
644 651 if rev in seen:
645 652 continue
646 653 seen.add(rev)
647 654 l = l + revset.baseset([rev])
648 655 continue
649 656 except error.RepoLookupError:
650 657 pass
651 658
652 659 # fall through to new-style queries if old-style fails
653 660 m = revset.match(repo.ui, spec, repo)
654 661 if seen or l:
655 662 dl = [r for r in m(repo, revset.spanset(repo)) if r not in seen]
656 663 l = l + revset.baseset(dl)
657 664 seen.update(dl)
658 665 else:
659 666 l = m(repo, revset.spanset(repo))
660 667
661 668 return l
662 669
663 670 def expandpats(pats):
664 671 '''Expand bare globs when running on windows.
665 672 On posix we assume it already has already been done by sh.'''
666 673 if not util.expandglobs:
667 674 return list(pats)
668 675 ret = []
669 676 for kindpat in pats:
670 677 kind, pat = matchmod._patsplit(kindpat, None)
671 678 if kind is None:
672 679 try:
673 680 globbed = glob.glob(pat)
674 681 except re.error:
675 682 globbed = [pat]
676 683 if globbed:
677 684 ret.extend(globbed)
678 685 continue
679 686 ret.append(kindpat)
680 687 return ret
681 688
682 689 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
683 690 '''Return a matcher and the patterns that were used.
684 691 The matcher will warn about bad matches.'''
685 692 if pats == ("",):
686 693 pats = []
687 694 if not globbed and default == 'relpath':
688 695 pats = expandpats(pats or [])
689 696
690 697 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
691 698 default)
692 699 def badfn(f, msg):
693 700 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
694 701 m.bad = badfn
695 702 return m, pats
696 703
697 704 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
698 705 '''Return a matcher that will warn about bad matches.'''
699 706 return matchandpats(ctx, pats, opts, globbed, default)[0]
700 707
701 708 def matchall(repo):
702 709 '''Return a matcher that will efficiently match everything.'''
703 710 return matchmod.always(repo.root, repo.getcwd())
704 711
705 712 def matchfiles(repo, files):
706 713 '''Return a matcher that will efficiently match exactly these files.'''
707 714 return matchmod.exact(repo.root, repo.getcwd(), files)
708 715
709 716 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
710 717 if dry_run is None:
711 718 dry_run = opts.get('dry_run')
712 719 if similarity is None:
713 720 similarity = float(opts.get('similarity') or 0)
714 721 # we'd use status here, except handling of symlinks and ignore is tricky
715 722 m = match(repo[None], pats, opts)
716 723 rejected = []
717 724 m.bad = lambda x, y: rejected.append(x)
718 725
719 726 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
720 727
721 728 unknownset = set(unknown + forgotten)
722 729 toprint = unknownset.copy()
723 730 toprint.update(deleted)
724 731 for abs in sorted(toprint):
725 732 if repo.ui.verbose or not m.exact(abs):
726 733 rel = m.rel(abs)
727 734 if abs in unknownset:
728 735 status = _('adding %s\n') % ((pats and rel) or abs)
729 736 else:
730 737 status = _('removing %s\n') % ((pats and rel) or abs)
731 738 repo.ui.status(status)
732 739
733 740 renames = _findrenames(repo, m, added + unknown, removed + deleted,
734 741 similarity)
735 742
736 743 if not dry_run:
737 744 _markchanges(repo, unknown + forgotten, deleted, renames)
738 745
739 746 for f in rejected:
740 747 if f in m.files():
741 748 return 1
742 749 return 0
743 750
744 751 def marktouched(repo, files, similarity=0.0):
745 752 '''Assert that files have somehow been operated upon. files are relative to
746 753 the repo root.'''
747 754 m = matchfiles(repo, files)
748 755 rejected = []
749 756 m.bad = lambda x, y: rejected.append(x)
750 757
751 758 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
752 759
753 760 if repo.ui.verbose:
754 761 unknownset = set(unknown + forgotten)
755 762 toprint = unknownset.copy()
756 763 toprint.update(deleted)
757 764 for abs in sorted(toprint):
758 765 if abs in unknownset:
759 766 status = _('adding %s\n') % abs
760 767 else:
761 768 status = _('removing %s\n') % abs
762 769 repo.ui.status(status)
763 770
764 771 renames = _findrenames(repo, m, added + unknown, removed + deleted,
765 772 similarity)
766 773
767 774 _markchanges(repo, unknown + forgotten, deleted, renames)
768 775
769 776 for f in rejected:
770 777 if f in m.files():
771 778 return 1
772 779 return 0
773 780
774 781 def _interestingfiles(repo, matcher):
775 782 '''Walk dirstate with matcher, looking for files that addremove would care
776 783 about.
777 784
778 785 This is different from dirstate.status because it doesn't care about
779 786 whether files are modified or clean.'''
780 787 added, unknown, deleted, removed, forgotten = [], [], [], [], []
781 788 audit_path = pathutil.pathauditor(repo.root)
782 789
783 790 ctx = repo[None]
784 791 dirstate = repo.dirstate
785 792 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
786 793 full=False)
787 794 for abs, st in walkresults.iteritems():
788 795 dstate = dirstate[abs]
789 796 if dstate == '?' and audit_path.check(abs):
790 797 unknown.append(abs)
791 798 elif dstate != 'r' and not st:
792 799 deleted.append(abs)
793 800 elif dstate == 'r' and st:
794 801 forgotten.append(abs)
795 802 # for finding renames
796 803 elif dstate == 'r' and not st:
797 804 removed.append(abs)
798 805 elif dstate == 'a':
799 806 added.append(abs)
800 807
801 808 return added, unknown, deleted, removed, forgotten
802 809
803 810 def _findrenames(repo, matcher, added, removed, similarity):
804 811 '''Find renames from removed files to added ones.'''
805 812 renames = {}
806 813 if similarity > 0:
807 814 for old, new, score in similar.findrenames(repo, added, removed,
808 815 similarity):
809 816 if (repo.ui.verbose or not matcher.exact(old)
810 817 or not matcher.exact(new)):
811 818 repo.ui.status(_('recording removal of %s as rename to %s '
812 819 '(%d%% similar)\n') %
813 820 (matcher.rel(old), matcher.rel(new),
814 821 score * 100))
815 822 renames[new] = old
816 823 return renames
817 824
818 825 def _markchanges(repo, unknown, deleted, renames):
819 826 '''Marks the files in unknown as added, the files in deleted as removed,
820 827 and the files in renames as copied.'''
821 828 wctx = repo[None]
822 829 wlock = repo.wlock()
823 830 try:
824 831 wctx.forget(deleted)
825 832 wctx.add(unknown)
826 833 for new, old in renames.iteritems():
827 834 wctx.copy(old, new)
828 835 finally:
829 836 wlock.release()
830 837
831 838 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
832 839 """Update the dirstate to reflect the intent of copying src to dst. For
833 840 different reasons it might not end with dst being marked as copied from src.
834 841 """
835 842 origsrc = repo.dirstate.copied(src) or src
836 843 if dst == origsrc: # copying back a copy?
837 844 if repo.dirstate[dst] not in 'mn' and not dryrun:
838 845 repo.dirstate.normallookup(dst)
839 846 else:
840 847 if repo.dirstate[origsrc] == 'a' and origsrc == src:
841 848 if not ui.quiet:
842 849 ui.warn(_("%s has not been committed yet, so no copy "
843 850 "data will be stored for %s.\n")
844 851 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
845 852 if repo.dirstate[dst] in '?r' and not dryrun:
846 853 wctx.add([dst])
847 854 elif not dryrun:
848 855 wctx.copy(origsrc, dst)
849 856
850 857 def readrequires(opener, supported):
851 858 '''Reads and parses .hg/requires and checks if all entries found
852 859 are in the list of supported features.'''
853 860 requirements = set(opener.read("requires").splitlines())
854 861 missings = []
855 862 for r in requirements:
856 863 if r not in supported:
857 864 if not r or not r[0].isalnum():
858 865 raise error.RequirementError(_(".hg/requires file is corrupt"))
859 866 missings.append(r)
860 867 missings.sort()
861 868 if missings:
862 869 raise error.RequirementError(
863 870 _("repository requires features unknown to this Mercurial: %s")
864 871 % " ".join(missings),
865 872 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
866 873 " for more information"))
867 874 return requirements
868 875
869 876 class filecachesubentry(object):
870 877 def __init__(self, path, stat):
871 878 self.path = path
872 879 self.cachestat = None
873 880 self._cacheable = None
874 881
875 882 if stat:
876 883 self.cachestat = filecachesubentry.stat(self.path)
877 884
878 885 if self.cachestat:
879 886 self._cacheable = self.cachestat.cacheable()
880 887 else:
881 888 # None means we don't know yet
882 889 self._cacheable = None
883 890
884 891 def refresh(self):
885 892 if self.cacheable():
886 893 self.cachestat = filecachesubentry.stat(self.path)
887 894
888 895 def cacheable(self):
889 896 if self._cacheable is not None:
890 897 return self._cacheable
891 898
892 899 # we don't know yet, assume it is for now
893 900 return True
894 901
895 902 def changed(self):
896 903 # no point in going further if we can't cache it
897 904 if not self.cacheable():
898 905 return True
899 906
900 907 newstat = filecachesubentry.stat(self.path)
901 908
902 909 # we may not know if it's cacheable yet, check again now
903 910 if newstat and self._cacheable is None:
904 911 self._cacheable = newstat.cacheable()
905 912
906 913 # check again
907 914 if not self._cacheable:
908 915 return True
909 916
910 917 if self.cachestat != newstat:
911 918 self.cachestat = newstat
912 919 return True
913 920 else:
914 921 return False
915 922
916 923 @staticmethod
917 924 def stat(path):
918 925 try:
919 926 return util.cachestat(path)
920 927 except OSError, e:
921 928 if e.errno != errno.ENOENT:
922 929 raise
923 930
924 931 class filecacheentry(object):
925 932 def __init__(self, paths, stat=True):
926 933 self._entries = []
927 934 for path in paths:
928 935 self._entries.append(filecachesubentry(path, stat))
929 936
930 937 def changed(self):
931 938 '''true if any entry has changed'''
932 939 for entry in self._entries:
933 940 if entry.changed():
934 941 return True
935 942 return False
936 943
937 944 def refresh(self):
938 945 for entry in self._entries:
939 946 entry.refresh()
940 947
941 948 class filecache(object):
942 949 '''A property like decorator that tracks files under .hg/ for updates.
943 950
944 951 Records stat info when called in _filecache.
945 952
946 953 On subsequent calls, compares old stat info with new info, and recreates the
947 954 object when any of the files changes, updating the new stat info in
948 955 _filecache.
949 956
950 957 Mercurial either atomic renames or appends for files under .hg,
951 958 so to ensure the cache is reliable we need the filesystem to be able
952 959 to tell us if a file has been replaced. If it can't, we fallback to
953 960 recreating the object on every call (essentially the same behaviour as
954 961 propertycache).
955 962
956 963 '''
957 964 def __init__(self, *paths):
958 965 self.paths = paths
959 966
960 967 def join(self, obj, fname):
961 968 """Used to compute the runtime path of a cached file.
962 969
963 970 Users should subclass filecache and provide their own version of this
964 971 function to call the appropriate join function on 'obj' (an instance
965 972 of the class that its member function was decorated).
966 973 """
967 974 return obj.join(fname)
968 975
969 976 def __call__(self, func):
970 977 self.func = func
971 978 self.name = func.__name__
972 979 return self
973 980
974 981 def __get__(self, obj, type=None):
975 982 # do we need to check if the file changed?
976 983 if self.name in obj.__dict__:
977 984 assert self.name in obj._filecache, self.name
978 985 return obj.__dict__[self.name]
979 986
980 987 entry = obj._filecache.get(self.name)
981 988
982 989 if entry:
983 990 if entry.changed():
984 991 entry.obj = self.func(obj)
985 992 else:
986 993 paths = [self.join(obj, path) for path in self.paths]
987 994
988 995 # We stat -before- creating the object so our cache doesn't lie if
989 996 # a writer modified between the time we read and stat
990 997 entry = filecacheentry(paths, True)
991 998 entry.obj = self.func(obj)
992 999
993 1000 obj._filecache[self.name] = entry
994 1001
995 1002 obj.__dict__[self.name] = entry.obj
996 1003 return entry.obj
997 1004
998 1005 def __set__(self, obj, value):
999 1006 if self.name not in obj._filecache:
1000 1007 # we add an entry for the missing value because X in __dict__
1001 1008 # implies X in _filecache
1002 1009 paths = [self.join(obj, path) for path in self.paths]
1003 1010 ce = filecacheentry(paths, False)
1004 1011 obj._filecache[self.name] = ce
1005 1012 else:
1006 1013 ce = obj._filecache[self.name]
1007 1014
1008 1015 ce.obj = value # update cached copy
1009 1016 obj.__dict__[self.name] = value # update copy returned by obj.x
1010 1017
1011 1018 def __delete__(self, obj):
1012 1019 try:
1013 1020 del obj.__dict__[self.name]
1014 1021 except KeyError:
1015 1022 raise AttributeError(self.name)
1016 1023
1017 1024 class dirs(object):
1018 1025 '''a multiset of directory names from a dirstate or manifest'''
1019 1026
1020 1027 def __init__(self, map, skip=None):
1021 1028 self._dirs = {}
1022 1029 addpath = self.addpath
1023 1030 if util.safehasattr(map, 'iteritems') and skip is not None:
1024 1031 for f, s in map.iteritems():
1025 1032 if s[0] != skip:
1026 1033 addpath(f)
1027 1034 else:
1028 1035 for f in map:
1029 1036 addpath(f)
1030 1037
1031 1038 def addpath(self, path):
1032 1039 dirs = self._dirs
1033 1040 for base in finddirs(path):
1034 1041 if base in dirs:
1035 1042 dirs[base] += 1
1036 1043 return
1037 1044 dirs[base] = 1
1038 1045
1039 1046 def delpath(self, path):
1040 1047 dirs = self._dirs
1041 1048 for base in finddirs(path):
1042 1049 if dirs[base] > 1:
1043 1050 dirs[base] -= 1
1044 1051 return
1045 1052 del dirs[base]
1046 1053
1047 1054 def __iter__(self):
1048 1055 return self._dirs.iterkeys()
1049 1056
1050 1057 def __contains__(self, d):
1051 1058 return d in self._dirs
1052 1059
1053 1060 if util.safehasattr(parsers, 'dirs'):
1054 1061 dirs = parsers.dirs
1055 1062
1056 1063 def finddirs(path):
1057 1064 pos = path.rfind('/')
1058 1065 while pos != -1:
1059 1066 yield path[:pos]
1060 1067 pos = path.rfind('/', 0, pos)
General Comments 0
You need to be logged in to leave comments. Login now