##// END OF EJS Templates
rename util.set_flags to setflags
Adrian Buehlmann -
r14232:df239966 default
parent child Browse files
Show More
@@ -1,1175 +1,1175 b''
1 1 # Subversion 1.4/1.5 Python API backend
2 2 #
3 3 # Copyright(C) 2007 Daniel Holth et al
4 4
5 5 import os
6 6 import re
7 7 import sys
8 8 import cPickle as pickle
9 9 import tempfile
10 10 import urllib
11 11 import urllib2
12 12
13 13 from mercurial import strutil, scmutil, util, encoding
14 14 from mercurial.i18n import _
15 15
16 16 # Subversion stuff. Works best with very recent Python SVN bindings
17 17 # e.g. SVN 1.5 or backports. Thanks to the bzr folks for enhancing
18 18 # these bindings.
19 19
20 20 from cStringIO import StringIO
21 21
22 22 from common import NoRepo, MissingTool, commit, encodeargs, decodeargs
23 23 from common import commandline, converter_source, converter_sink, mapfile
24 24
25 25 try:
26 26 from svn.core import SubversionException, Pool
27 27 import svn
28 28 import svn.client
29 29 import svn.core
30 30 import svn.ra
31 31 import svn.delta
32 32 import transport
33 33 import warnings
34 34 warnings.filterwarnings('ignore',
35 35 module='svn.core',
36 36 category=DeprecationWarning)
37 37
38 38 except ImportError:
39 39 svn = None
40 40
41 41 class SvnPathNotFound(Exception):
42 42 pass
43 43
44 44 def revsplit(rev):
45 45 """Parse a revision string and return (uuid, path, revnum)."""
46 46 url, revnum = rev.rsplit('@', 1)
47 47 parts = url.split('/', 1)
48 48 mod = ''
49 49 if len(parts) > 1:
50 50 mod = '/' + parts[1]
51 51 return parts[0][4:], mod, int(revnum)
52 52
53 53 def geturl(path):
54 54 try:
55 55 return svn.client.url_from_path(svn.core.svn_path_canonicalize(path))
56 56 except SubversionException:
57 57 pass
58 58 if os.path.isdir(path):
59 59 path = os.path.normpath(os.path.abspath(path))
60 60 if os.name == 'nt':
61 61 path = '/' + util.normpath(path)
62 62 # Module URL is later compared with the repository URL returned
63 63 # by svn API, which is UTF-8.
64 64 path = encoding.tolocal(path)
65 65 return 'file://%s' % urllib.quote(path)
66 66 return path
67 67
68 68 def optrev(number):
69 69 optrev = svn.core.svn_opt_revision_t()
70 70 optrev.kind = svn.core.svn_opt_revision_number
71 71 optrev.value.number = number
72 72 return optrev
73 73
74 74 class changedpath(object):
75 75 def __init__(self, p):
76 76 self.copyfrom_path = p.copyfrom_path
77 77 self.copyfrom_rev = p.copyfrom_rev
78 78 self.action = p.action
79 79
80 80 def get_log_child(fp, url, paths, start, end, limit=0, discover_changed_paths=True,
81 81 strict_node_history=False):
82 82 protocol = -1
83 83 def receiver(orig_paths, revnum, author, date, message, pool):
84 84 if orig_paths is not None:
85 85 for k, v in orig_paths.iteritems():
86 86 orig_paths[k] = changedpath(v)
87 87 pickle.dump((orig_paths, revnum, author, date, message),
88 88 fp, protocol)
89 89
90 90 try:
91 91 # Use an ra of our own so that our parent can consume
92 92 # our results without confusing the server.
93 93 t = transport.SvnRaTransport(url=url)
94 94 svn.ra.get_log(t.ra, paths, start, end, limit,
95 95 discover_changed_paths,
96 96 strict_node_history,
97 97 receiver)
98 98 except SubversionException, (inst, num):
99 99 pickle.dump(num, fp, protocol)
100 100 except IOError:
101 101 # Caller may interrupt the iteration
102 102 pickle.dump(None, fp, protocol)
103 103 else:
104 104 pickle.dump(None, fp, protocol)
105 105 fp.close()
106 106 # With large history, cleanup process goes crazy and suddenly
107 107 # consumes *huge* amount of memory. The output file being closed,
108 108 # there is no need for clean termination.
109 109 os._exit(0)
110 110
111 111 def debugsvnlog(ui, **opts):
112 112 """Fetch SVN log in a subprocess and channel them back to parent to
113 113 avoid memory collection issues.
114 114 """
115 115 util.set_binary(sys.stdin)
116 116 util.set_binary(sys.stdout)
117 117 args = decodeargs(sys.stdin.read())
118 118 get_log_child(sys.stdout, *args)
119 119
120 120 class logstream(object):
121 121 """Interruptible revision log iterator."""
122 122 def __init__(self, stdout):
123 123 self._stdout = stdout
124 124
125 125 def __iter__(self):
126 126 while True:
127 127 try:
128 128 entry = pickle.load(self._stdout)
129 129 except EOFError:
130 130 raise util.Abort(_('Mercurial failed to run itself, check'
131 131 ' hg executable is in PATH'))
132 132 try:
133 133 orig_paths, revnum, author, date, message = entry
134 134 except:
135 135 if entry is None:
136 136 break
137 137 raise SubversionException("child raised exception", entry)
138 138 yield entry
139 139
140 140 def close(self):
141 141 if self._stdout:
142 142 self._stdout.close()
143 143 self._stdout = None
144 144
145 145
146 146 # Check to see if the given path is a local Subversion repo. Verify this by
147 147 # looking for several svn-specific files and directories in the given
148 148 # directory.
149 149 def filecheck(ui, path, proto):
150 150 for x in ('locks', 'hooks', 'format', 'db'):
151 151 if not os.path.exists(os.path.join(path, x)):
152 152 return False
153 153 return True
154 154
155 155 # Check to see if a given path is the root of an svn repo over http. We verify
156 156 # this by requesting a version-controlled URL we know can't exist and looking
157 157 # for the svn-specific "not found" XML.
158 158 def httpcheck(ui, path, proto):
159 159 try:
160 160 opener = urllib2.build_opener()
161 161 rsp = opener.open('%s://%s/!svn/ver/0/.svn' % (proto, path))
162 162 data = rsp.read()
163 163 except urllib2.HTTPError, inst:
164 164 if inst.code != 404:
165 165 # Except for 404 we cannot know for sure this is not an svn repo
166 166 ui.warn(_('svn: cannot probe remote repository, assume it could '
167 167 'be a subversion repository. Use --source-type if you '
168 168 'know better.\n'))
169 169 return True
170 170 data = inst.fp.read()
171 171 except:
172 172 # Could be urllib2.URLError if the URL is invalid or anything else.
173 173 return False
174 174 return '<m:human-readable errcode="160013">' in data
175 175
176 176 protomap = {'http': httpcheck,
177 177 'https': httpcheck,
178 178 'file': filecheck,
179 179 }
180 180 def issvnurl(ui, url):
181 181 try:
182 182 proto, path = url.split('://', 1)
183 183 if proto == 'file':
184 184 path = urllib.url2pathname(path)
185 185 except ValueError:
186 186 proto = 'file'
187 187 path = os.path.abspath(url)
188 188 if proto == 'file':
189 189 path = path.replace(os.sep, '/')
190 190 check = protomap.get(proto, lambda *args: False)
191 191 while '/' in path:
192 192 if check(ui, path, proto):
193 193 return True
194 194 path = path.rsplit('/', 1)[0]
195 195 return False
196 196
197 197 # SVN conversion code stolen from bzr-svn and tailor
198 198 #
199 199 # Subversion looks like a versioned filesystem, branches structures
200 200 # are defined by conventions and not enforced by the tool. First,
201 201 # we define the potential branches (modules) as "trunk" and "branches"
202 202 # children directories. Revisions are then identified by their
203 203 # module and revision number (and a repository identifier).
204 204 #
205 205 # The revision graph is really a tree (or a forest). By default, a
206 206 # revision parent is the previous revision in the same module. If the
207 207 # module directory is copied/moved from another module then the
208 208 # revision is the module root and its parent the source revision in
209 209 # the parent module. A revision has at most one parent.
210 210 #
211 211 class svn_source(converter_source):
212 212 def __init__(self, ui, url, rev=None):
213 213 super(svn_source, self).__init__(ui, url, rev=rev)
214 214
215 215 if not (url.startswith('svn://') or url.startswith('svn+ssh://') or
216 216 (os.path.exists(url) and
217 217 os.path.exists(os.path.join(url, '.svn'))) or
218 218 issvnurl(ui, url)):
219 219 raise NoRepo(_("%s does not look like a Subversion repository")
220 220 % url)
221 221 if svn is None:
222 222 raise MissingTool(_('Could not load Subversion python bindings'))
223 223
224 224 try:
225 225 version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
226 226 if version < (1, 4):
227 227 raise MissingTool(_('Subversion python bindings %d.%d found, '
228 228 '1.4 or later required') % version)
229 229 except AttributeError:
230 230 raise MissingTool(_('Subversion python bindings are too old, 1.4 '
231 231 'or later required'))
232 232
233 233 self.lastrevs = {}
234 234
235 235 latest = None
236 236 try:
237 237 # Support file://path@rev syntax. Useful e.g. to convert
238 238 # deleted branches.
239 239 at = url.rfind('@')
240 240 if at >= 0:
241 241 latest = int(url[at + 1:])
242 242 url = url[:at]
243 243 except ValueError:
244 244 pass
245 245 self.url = geturl(url)
246 246 self.encoding = 'UTF-8' # Subversion is always nominal UTF-8
247 247 try:
248 248 self.transport = transport.SvnRaTransport(url=self.url)
249 249 self.ra = self.transport.ra
250 250 self.ctx = self.transport.client
251 251 self.baseurl = svn.ra.get_repos_root(self.ra)
252 252 # Module is either empty or a repository path starting with
253 253 # a slash and not ending with a slash.
254 254 self.module = urllib.unquote(self.url[len(self.baseurl):])
255 255 self.prevmodule = None
256 256 self.rootmodule = self.module
257 257 self.commits = {}
258 258 self.paths = {}
259 259 self.uuid = svn.ra.get_uuid(self.ra)
260 260 except SubversionException:
261 261 ui.traceback()
262 262 raise NoRepo(_("%s does not look like a Subversion repository")
263 263 % self.url)
264 264
265 265 if rev:
266 266 try:
267 267 latest = int(rev)
268 268 except ValueError:
269 269 raise util.Abort(_('svn: revision %s is not an integer') % rev)
270 270
271 271 self.trunkname = self.ui.config('convert', 'svn.trunk', 'trunk').strip('/')
272 272 self.startrev = self.ui.config('convert', 'svn.startrev', default=0)
273 273 try:
274 274 self.startrev = int(self.startrev)
275 275 if self.startrev < 0:
276 276 self.startrev = 0
277 277 except ValueError:
278 278 raise util.Abort(_('svn: start revision %s is not an integer')
279 279 % self.startrev)
280 280
281 281 try:
282 282 self.head = self.latest(self.module, latest)
283 283 except SvnPathNotFound:
284 284 self.head = None
285 285 if not self.head:
286 286 raise util.Abort(_('no revision found in module %s')
287 287 % self.module)
288 288 self.last_changed = self.revnum(self.head)
289 289
290 290 self._changescache = None
291 291
292 292 if os.path.exists(os.path.join(url, '.svn/entries')):
293 293 self.wc = url
294 294 else:
295 295 self.wc = None
296 296 self.convertfp = None
297 297
298 298 def setrevmap(self, revmap):
299 299 lastrevs = {}
300 300 for revid in revmap.iterkeys():
301 301 uuid, module, revnum = revsplit(revid)
302 302 lastrevnum = lastrevs.setdefault(module, revnum)
303 303 if revnum > lastrevnum:
304 304 lastrevs[module] = revnum
305 305 self.lastrevs = lastrevs
306 306
307 307 def exists(self, path, optrev):
308 308 try:
309 309 svn.client.ls(self.url.rstrip('/') + '/' + urllib.quote(path),
310 310 optrev, False, self.ctx)
311 311 return True
312 312 except SubversionException:
313 313 return False
314 314
315 315 def getheads(self):
316 316
317 317 def isdir(path, revnum):
318 318 kind = self._checkpath(path, revnum)
319 319 return kind == svn.core.svn_node_dir
320 320
321 321 def getcfgpath(name, rev):
322 322 cfgpath = self.ui.config('convert', 'svn.' + name)
323 323 if cfgpath is not None and cfgpath.strip() == '':
324 324 return None
325 325 path = (cfgpath or name).strip('/')
326 326 if not self.exists(path, rev):
327 327 if self.module.endswith(path) and name == 'trunk':
328 328 # we are converting from inside this directory
329 329 return None
330 330 if cfgpath:
331 331 raise util.Abort(_('expected %s to be at %r, but not found')
332 332 % (name, path))
333 333 return None
334 334 self.ui.note(_('found %s at %r\n') % (name, path))
335 335 return path
336 336
337 337 rev = optrev(self.last_changed)
338 338 oldmodule = ''
339 339 trunk = getcfgpath('trunk', rev)
340 340 self.tags = getcfgpath('tags', rev)
341 341 branches = getcfgpath('branches', rev)
342 342
343 343 # If the project has a trunk or branches, we will extract heads
344 344 # from them. We keep the project root otherwise.
345 345 if trunk:
346 346 oldmodule = self.module or ''
347 347 self.module += '/' + trunk
348 348 self.head = self.latest(self.module, self.last_changed)
349 349 if not self.head:
350 350 raise util.Abort(_('no revision found in module %s')
351 351 % self.module)
352 352
353 353 # First head in the list is the module's head
354 354 self.heads = [self.head]
355 355 if self.tags is not None:
356 356 self.tags = '%s/%s' % (oldmodule , (self.tags or 'tags'))
357 357
358 358 # Check if branches bring a few more heads to the list
359 359 if branches:
360 360 rpath = self.url.strip('/')
361 361 branchnames = svn.client.ls(rpath + '/' + urllib.quote(branches),
362 362 rev, False, self.ctx)
363 363 for branch in branchnames.keys():
364 364 module = '%s/%s/%s' % (oldmodule, branches, branch)
365 365 if not isdir(module, self.last_changed):
366 366 continue
367 367 brevid = self.latest(module, self.last_changed)
368 368 if not brevid:
369 369 self.ui.note(_('ignoring empty branch %s\n') % branch)
370 370 continue
371 371 self.ui.note(_('found branch %s at %d\n') %
372 372 (branch, self.revnum(brevid)))
373 373 self.heads.append(brevid)
374 374
375 375 if self.startrev and self.heads:
376 376 if len(self.heads) > 1:
377 377 raise util.Abort(_('svn: start revision is not supported '
378 378 'with more than one branch'))
379 379 revnum = self.revnum(self.heads[0])
380 380 if revnum < self.startrev:
381 381 raise util.Abort(
382 382 _('svn: no revision found after start revision %d')
383 383 % self.startrev)
384 384
385 385 return self.heads
386 386
387 387 def getchanges(self, rev):
388 388 if self._changescache and self._changescache[0] == rev:
389 389 return self._changescache[1]
390 390 self._changescache = None
391 391 (paths, parents) = self.paths[rev]
392 392 if parents:
393 393 files, self.removed, copies = self.expandpaths(rev, paths, parents)
394 394 else:
395 395 # Perform a full checkout on roots
396 396 uuid, module, revnum = revsplit(rev)
397 397 entries = svn.client.ls(self.baseurl + urllib.quote(module),
398 398 optrev(revnum), True, self.ctx)
399 399 files = [n for n, e in entries.iteritems()
400 400 if e.kind == svn.core.svn_node_file]
401 401 copies = {}
402 402 self.removed = set()
403 403
404 404 files.sort()
405 405 files = zip(files, [rev] * len(files))
406 406
407 407 # caller caches the result, so free it here to release memory
408 408 del self.paths[rev]
409 409 return (files, copies)
410 410
411 411 def getchangedfiles(self, rev, i):
412 412 changes = self.getchanges(rev)
413 413 self._changescache = (rev, changes)
414 414 return [f[0] for f in changes[0]]
415 415
416 416 def getcommit(self, rev):
417 417 if rev not in self.commits:
418 418 uuid, module, revnum = revsplit(rev)
419 419 self.module = module
420 420 self.reparent(module)
421 421 # We assume that:
422 422 # - requests for revisions after "stop" come from the
423 423 # revision graph backward traversal. Cache all of them
424 424 # down to stop, they will be used eventually.
425 425 # - requests for revisions before "stop" come to get
426 426 # isolated branches parents. Just fetch what is needed.
427 427 stop = self.lastrevs.get(module, 0)
428 428 if revnum < stop:
429 429 stop = revnum + 1
430 430 self._fetch_revisions(revnum, stop)
431 431 commit = self.commits[rev]
432 432 # caller caches the result, so free it here to release memory
433 433 del self.commits[rev]
434 434 return commit
435 435
436 436 def gettags(self):
437 437 tags = {}
438 438 if self.tags is None:
439 439 return tags
440 440
441 441 # svn tags are just a convention, project branches left in a
442 442 # 'tags' directory. There is no other relationship than
443 443 # ancestry, which is expensive to discover and makes them hard
444 444 # to update incrementally. Worse, past revisions may be
445 445 # referenced by tags far away in the future, requiring a deep
446 446 # history traversal on every calculation. Current code
447 447 # performs a single backward traversal, tracking moves within
448 448 # the tags directory (tag renaming) and recording a new tag
449 449 # everytime a project is copied from outside the tags
450 450 # directory. It also lists deleted tags, this behaviour may
451 451 # change in the future.
452 452 pendings = []
453 453 tagspath = self.tags
454 454 start = svn.ra.get_latest_revnum(self.ra)
455 455 stream = self._getlog([self.tags], start, self.startrev)
456 456 try:
457 457 for entry in stream:
458 458 origpaths, revnum, author, date, message = entry
459 459 copies = [(e.copyfrom_path, e.copyfrom_rev, p) for p, e
460 460 in origpaths.iteritems() if e.copyfrom_path]
461 461 # Apply moves/copies from more specific to general
462 462 copies.sort(reverse=True)
463 463
464 464 srctagspath = tagspath
465 465 if copies and copies[-1][2] == tagspath:
466 466 # Track tags directory moves
467 467 srctagspath = copies.pop()[0]
468 468
469 469 for source, sourcerev, dest in copies:
470 470 if not dest.startswith(tagspath + '/'):
471 471 continue
472 472 for tag in pendings:
473 473 if tag[0].startswith(dest):
474 474 tagpath = source + tag[0][len(dest):]
475 475 tag[:2] = [tagpath, sourcerev]
476 476 break
477 477 else:
478 478 pendings.append([source, sourcerev, dest])
479 479
480 480 # Filter out tags with children coming from different
481 481 # parts of the repository like:
482 482 # /tags/tag.1 (from /trunk:10)
483 483 # /tags/tag.1/foo (from /branches/foo:12)
484 484 # Here/tags/tag.1 discarded as well as its children.
485 485 # It happens with tools like cvs2svn. Such tags cannot
486 486 # be represented in mercurial.
487 487 addeds = dict((p, e.copyfrom_path) for p, e
488 488 in origpaths.iteritems()
489 489 if e.action == 'A' and e.copyfrom_path)
490 490 badroots = set()
491 491 for destroot in addeds:
492 492 for source, sourcerev, dest in pendings:
493 493 if (not dest.startswith(destroot + '/')
494 494 or source.startswith(addeds[destroot] + '/')):
495 495 continue
496 496 badroots.add(destroot)
497 497 break
498 498
499 499 for badroot in badroots:
500 500 pendings = [p for p in pendings if p[2] != badroot
501 501 and not p[2].startswith(badroot + '/')]
502 502
503 503 # Tell tag renamings from tag creations
504 504 remainings = []
505 505 for source, sourcerev, dest in pendings:
506 506 tagname = dest.split('/')[-1]
507 507 if source.startswith(srctagspath):
508 508 remainings.append([source, sourcerev, tagname])
509 509 continue
510 510 if tagname in tags:
511 511 # Keep the latest tag value
512 512 continue
513 513 # From revision may be fake, get one with changes
514 514 try:
515 515 tagid = self.latest(source, sourcerev)
516 516 if tagid and tagname not in tags:
517 517 tags[tagname] = tagid
518 518 except SvnPathNotFound:
519 519 # It happens when we are following directories
520 520 # we assumed were copied with their parents
521 521 # but were really created in the tag
522 522 # directory.
523 523 pass
524 524 pendings = remainings
525 525 tagspath = srctagspath
526 526 finally:
527 527 stream.close()
528 528 return tags
529 529
530 530 def converted(self, rev, destrev):
531 531 if not self.wc:
532 532 return
533 533 if self.convertfp is None:
534 534 self.convertfp = open(os.path.join(self.wc, '.svn', 'hg-shamap'),
535 535 'a')
536 536 self.convertfp.write('%s %d\n' % (destrev, self.revnum(rev)))
537 537 self.convertfp.flush()
538 538
539 539 def revid(self, revnum, module=None):
540 540 return 'svn:%s%s@%s' % (self.uuid, module or self.module, revnum)
541 541
542 542 def revnum(self, rev):
543 543 return int(rev.split('@')[-1])
544 544
545 545 def latest(self, path, stop=0):
546 546 """Find the latest revid affecting path, up to stop. It may return
547 547 a revision in a different module, since a branch may be moved without
548 548 a change being reported. Return None if computed module does not
549 549 belong to rootmodule subtree.
550 550 """
551 551 if not path.startswith(self.rootmodule):
552 552 # Requests on foreign branches may be forbidden at server level
553 553 self.ui.debug('ignoring foreign branch %r\n' % path)
554 554 return None
555 555
556 556 if not stop:
557 557 stop = svn.ra.get_latest_revnum(self.ra)
558 558 try:
559 559 prevmodule = self.reparent('')
560 560 dirent = svn.ra.stat(self.ra, path.strip('/'), stop)
561 561 self.reparent(prevmodule)
562 562 except SubversionException:
563 563 dirent = None
564 564 if not dirent:
565 565 raise SvnPathNotFound(_('%s not found up to revision %d')
566 566 % (path, stop))
567 567
568 568 # stat() gives us the previous revision on this line of
569 569 # development, but it might be in *another module*. Fetch the
570 570 # log and detect renames down to the latest revision.
571 571 stream = self._getlog([path], stop, dirent.created_rev)
572 572 try:
573 573 for entry in stream:
574 574 paths, revnum, author, date, message = entry
575 575 if revnum <= dirent.created_rev:
576 576 break
577 577
578 578 for p in paths:
579 579 if not path.startswith(p) or not paths[p].copyfrom_path:
580 580 continue
581 581 newpath = paths[p].copyfrom_path + path[len(p):]
582 582 self.ui.debug("branch renamed from %s to %s at %d\n" %
583 583 (path, newpath, revnum))
584 584 path = newpath
585 585 break
586 586 finally:
587 587 stream.close()
588 588
589 589 if not path.startswith(self.rootmodule):
590 590 self.ui.debug('ignoring foreign branch %r\n' % path)
591 591 return None
592 592 return self.revid(dirent.created_rev, path)
593 593
594 594 def reparent(self, module):
595 595 """Reparent the svn transport and return the previous parent."""
596 596 if self.prevmodule == module:
597 597 return module
598 598 svnurl = self.baseurl + urllib.quote(module)
599 599 prevmodule = self.prevmodule
600 600 if prevmodule is None:
601 601 prevmodule = ''
602 602 self.ui.debug("reparent to %s\n" % svnurl)
603 603 svn.ra.reparent(self.ra, svnurl)
604 604 self.prevmodule = module
605 605 return prevmodule
606 606
607 607 def expandpaths(self, rev, paths, parents):
608 608 changed, removed = set(), set()
609 609 copies = {}
610 610
611 611 new_module, revnum = revsplit(rev)[1:]
612 612 if new_module != self.module:
613 613 self.module = new_module
614 614 self.reparent(self.module)
615 615
616 616 for i, (path, ent) in enumerate(paths):
617 617 self.ui.progress(_('scanning paths'), i, item=path,
618 618 total=len(paths))
619 619 entrypath = self.getrelpath(path)
620 620
621 621 kind = self._checkpath(entrypath, revnum)
622 622 if kind == svn.core.svn_node_file:
623 623 changed.add(self.recode(entrypath))
624 624 if not ent.copyfrom_path or not parents:
625 625 continue
626 626 # Copy sources not in parent revisions cannot be
627 627 # represented, ignore their origin for now
628 628 pmodule, prevnum = revsplit(parents[0])[1:]
629 629 if ent.copyfrom_rev < prevnum:
630 630 continue
631 631 copyfrom_path = self.getrelpath(ent.copyfrom_path, pmodule)
632 632 if not copyfrom_path:
633 633 continue
634 634 self.ui.debug("copied to %s from %s@%s\n" %
635 635 (entrypath, copyfrom_path, ent.copyfrom_rev))
636 636 copies[self.recode(entrypath)] = self.recode(copyfrom_path)
637 637 elif kind == 0: # gone, but had better be a deleted *file*
638 638 self.ui.debug("gone from %s\n" % ent.copyfrom_rev)
639 639 pmodule, prevnum = revsplit(parents[0])[1:]
640 640 parentpath = pmodule + "/" + entrypath
641 641 fromkind = self._checkpath(entrypath, prevnum, pmodule)
642 642
643 643 if fromkind == svn.core.svn_node_file:
644 644 removed.add(self.recode(entrypath))
645 645 elif fromkind == svn.core.svn_node_dir:
646 646 oroot = parentpath.strip('/')
647 647 nroot = path.strip('/')
648 648 children = self._iterfiles(oroot, prevnum)
649 649 for childpath in children:
650 650 childpath = childpath.replace(oroot, nroot)
651 651 childpath = self.getrelpath("/" + childpath, pmodule)
652 652 if childpath:
653 653 removed.add(self.recode(childpath))
654 654 else:
655 655 self.ui.debug('unknown path in revision %d: %s\n' % \
656 656 (revnum, path))
657 657 elif kind == svn.core.svn_node_dir:
658 658 if ent.action == 'M':
659 659 # If the directory just had a prop change,
660 660 # then we shouldn't need to look for its children.
661 661 continue
662 662 if ent.action == 'R' and parents:
663 663 # If a directory is replacing a file, mark the previous
664 664 # file as deleted
665 665 pmodule, prevnum = revsplit(parents[0])[1:]
666 666 pkind = self._checkpath(entrypath, prevnum, pmodule)
667 667 if pkind == svn.core.svn_node_file:
668 668 removed.add(self.recode(entrypath))
669 669 elif pkind == svn.core.svn_node_dir:
670 670 # We do not know what files were kept or removed,
671 671 # mark them all as changed.
672 672 for childpath in self._iterfiles(pmodule, prevnum):
673 673 childpath = self.getrelpath("/" + childpath)
674 674 if childpath:
675 675 changed.add(self.recode(childpath))
676 676
677 677 for childpath in self._iterfiles(path, revnum):
678 678 childpath = self.getrelpath("/" + childpath)
679 679 if childpath:
680 680 changed.add(self.recode(childpath))
681 681
682 682 # Handle directory copies
683 683 if not ent.copyfrom_path or not parents:
684 684 continue
685 685 # Copy sources not in parent revisions cannot be
686 686 # represented, ignore their origin for now
687 687 pmodule, prevnum = revsplit(parents[0])[1:]
688 688 if ent.copyfrom_rev < prevnum:
689 689 continue
690 690 copyfrompath = self.getrelpath(ent.copyfrom_path, pmodule)
691 691 if not copyfrompath:
692 692 continue
693 693 self.ui.debug("mark %s came from %s:%d\n"
694 694 % (path, copyfrompath, ent.copyfrom_rev))
695 695 children = self._iterfiles(ent.copyfrom_path, ent.copyfrom_rev)
696 696 for childpath in children:
697 697 childpath = self.getrelpath("/" + childpath, pmodule)
698 698 if not childpath:
699 699 continue
700 700 copytopath = path + childpath[len(copyfrompath):]
701 701 copytopath = self.getrelpath(copytopath)
702 702 copies[self.recode(copytopath)] = self.recode(childpath)
703 703
704 704 self.ui.progress(_('scanning paths'), None)
705 705 changed.update(removed)
706 706 return (list(changed), removed, copies)
707 707
708 708 def _fetch_revisions(self, from_revnum, to_revnum):
709 709 if from_revnum < to_revnum:
710 710 from_revnum, to_revnum = to_revnum, from_revnum
711 711
712 712 self.child_cset = None
713 713
714 714 def parselogentry(orig_paths, revnum, author, date, message):
715 715 """Return the parsed commit object or None, and True if
716 716 the revision is a branch root.
717 717 """
718 718 self.ui.debug("parsing revision %d (%d changes)\n" %
719 719 (revnum, len(orig_paths)))
720 720
721 721 branched = False
722 722 rev = self.revid(revnum)
723 723 # branch log might return entries for a parent we already have
724 724
725 725 if rev in self.commits or revnum < to_revnum:
726 726 return None, branched
727 727
728 728 parents = []
729 729 # check whether this revision is the start of a branch or part
730 730 # of a branch renaming
731 731 orig_paths = sorted(orig_paths.iteritems())
732 732 root_paths = [(p, e) for p, e in orig_paths
733 733 if self.module.startswith(p)]
734 734 if root_paths:
735 735 path, ent = root_paths[-1]
736 736 if ent.copyfrom_path:
737 737 branched = True
738 738 newpath = ent.copyfrom_path + self.module[len(path):]
739 739 # ent.copyfrom_rev may not be the actual last revision
740 740 previd = self.latest(newpath, ent.copyfrom_rev)
741 741 if previd is not None:
742 742 prevmodule, prevnum = revsplit(previd)[1:]
743 743 if prevnum >= self.startrev:
744 744 parents = [previd]
745 745 self.ui.note(
746 746 _('found parent of branch %s at %d: %s\n') %
747 747 (self.module, prevnum, prevmodule))
748 748 else:
749 749 self.ui.debug("no copyfrom path, don't know what to do.\n")
750 750
751 751 paths = []
752 752 # filter out unrelated paths
753 753 for path, ent in orig_paths:
754 754 if self.getrelpath(path) is None:
755 755 continue
756 756 paths.append((path, ent))
757 757
758 758 # Example SVN datetime. Includes microseconds.
759 759 # ISO-8601 conformant
760 760 # '2007-01-04T17:35:00.902377Z'
761 761 date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"])
762 762
763 763 log = message and self.recode(message) or ''
764 764 author = author and self.recode(author) or ''
765 765 try:
766 766 branch = self.module.split("/")[-1]
767 767 if branch == self.trunkname:
768 768 branch = None
769 769 except IndexError:
770 770 branch = None
771 771
772 772 cset = commit(author=author,
773 773 date=util.datestr(date),
774 774 desc=log,
775 775 parents=parents,
776 776 branch=branch,
777 777 rev=rev)
778 778
779 779 self.commits[rev] = cset
780 780 # The parents list is *shared* among self.paths and the
781 781 # commit object. Both will be updated below.
782 782 self.paths[rev] = (paths, cset.parents)
783 783 if self.child_cset and not self.child_cset.parents:
784 784 self.child_cset.parents[:] = [rev]
785 785 self.child_cset = cset
786 786 return cset, branched
787 787
788 788 self.ui.note(_('fetching revision log for "%s" from %d to %d\n') %
789 789 (self.module, from_revnum, to_revnum))
790 790
791 791 try:
792 792 firstcset = None
793 793 lastonbranch = False
794 794 stream = self._getlog([self.module], from_revnum, to_revnum)
795 795 try:
796 796 for entry in stream:
797 797 paths, revnum, author, date, message = entry
798 798 if revnum < self.startrev:
799 799 lastonbranch = True
800 800 break
801 801 if not paths:
802 802 self.ui.debug('revision %d has no entries\n' % revnum)
803 803 # If we ever leave the loop on an empty
804 804 # revision, do not try to get a parent branch
805 805 lastonbranch = lastonbranch or revnum == 0
806 806 continue
807 807 cset, lastonbranch = parselogentry(paths, revnum, author,
808 808 date, message)
809 809 if cset:
810 810 firstcset = cset
811 811 if lastonbranch:
812 812 break
813 813 finally:
814 814 stream.close()
815 815
816 816 if not lastonbranch and firstcset and not firstcset.parents:
817 817 # The first revision of the sequence (the last fetched one)
818 818 # has invalid parents if not a branch root. Find the parent
819 819 # revision now, if any.
820 820 try:
821 821 firstrevnum = self.revnum(firstcset.rev)
822 822 if firstrevnum > 1:
823 823 latest = self.latest(self.module, firstrevnum - 1)
824 824 if latest:
825 825 firstcset.parents.append(latest)
826 826 except SvnPathNotFound:
827 827 pass
828 828 except SubversionException, (inst, num):
829 829 if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION:
830 830 raise util.Abort(_('svn: branch has no revision %s') % to_revnum)
831 831 raise
832 832
833 833 def getfile(self, file, rev):
834 834 # TODO: ra.get_file transmits the whole file instead of diffs.
835 835 if file in self.removed:
836 836 raise IOError()
837 837 mode = ''
838 838 try:
839 839 new_module, revnum = revsplit(rev)[1:]
840 840 if self.module != new_module:
841 841 self.module = new_module
842 842 self.reparent(self.module)
843 843 io = StringIO()
844 844 info = svn.ra.get_file(self.ra, file, revnum, io)
845 845 data = io.getvalue()
846 846 # ra.get_files() seems to keep a reference on the input buffer
847 847 # preventing collection. Release it explicitely.
848 848 io.close()
849 849 if isinstance(info, list):
850 850 info = info[-1]
851 851 mode = ("svn:executable" in info) and 'x' or ''
852 852 mode = ("svn:special" in info) and 'l' or mode
853 853 except SubversionException, e:
854 854 notfound = (svn.core.SVN_ERR_FS_NOT_FOUND,
855 855 svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND)
856 856 if e.apr_err in notfound: # File not found
857 857 raise IOError()
858 858 raise
859 859 if mode == 'l':
860 860 link_prefix = "link "
861 861 if data.startswith(link_prefix):
862 862 data = data[len(link_prefix):]
863 863 return data, mode
864 864
865 865 def _iterfiles(self, path, revnum):
866 866 """Enumerate all files in path at revnum, recursively."""
867 867 path = path.strip('/')
868 868 pool = Pool()
869 869 rpath = '/'.join([self.baseurl, urllib.quote(path)]).strip('/')
870 870 entries = svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool)
871 871 if path:
872 872 path += '/'
873 873 return ((path + p) for p, e in entries.iteritems()
874 874 if e.kind == svn.core.svn_node_file)
875 875
876 876 def getrelpath(self, path, module=None):
877 877 if module is None:
878 878 module = self.module
879 879 # Given the repository url of this wc, say
880 880 # "http://server/plone/CMFPlone/branches/Plone-2_0-branch"
881 881 # extract the "entry" portion (a relative path) from what
882 882 # svn log --xml says, ie
883 883 # "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py"
884 884 # that is to say "tests/PloneTestCase.py"
885 885 if path.startswith(module):
886 886 relative = path.rstrip('/')[len(module):]
887 887 if relative.startswith('/'):
888 888 return relative[1:]
889 889 elif relative == '':
890 890 return relative
891 891
892 892 # The path is outside our tracked tree...
893 893 self.ui.debug('%r is not under %r, ignoring\n' % (path, module))
894 894 return None
895 895
896 896 def _checkpath(self, path, revnum, module=None):
897 897 if module is not None:
898 898 prevmodule = self.reparent('')
899 899 path = module + '/' + path
900 900 try:
901 901 # ra.check_path does not like leading slashes very much, it leads
902 902 # to PROPFIND subversion errors
903 903 return svn.ra.check_path(self.ra, path.strip('/'), revnum)
904 904 finally:
905 905 if module is not None:
906 906 self.reparent(prevmodule)
907 907
908 908 def _getlog(self, paths, start, end, limit=0, discover_changed_paths=True,
909 909 strict_node_history=False):
910 910 # Normalize path names, svn >= 1.5 only wants paths relative to
911 911 # supplied URL
912 912 relpaths = []
913 913 for p in paths:
914 914 if not p.startswith('/'):
915 915 p = self.module + '/' + p
916 916 relpaths.append(p.strip('/'))
917 917 args = [self.baseurl, relpaths, start, end, limit, discover_changed_paths,
918 918 strict_node_history]
919 919 arg = encodeargs(args)
920 920 hgexe = util.hgexecutable()
921 921 cmd = '%s debugsvnlog' % util.shellquote(hgexe)
922 922 stdin, stdout = util.popen2(util.quotecommand(cmd))
923 923 stdin.write(arg)
924 924 try:
925 925 stdin.close()
926 926 except IOError:
927 927 raise util.Abort(_('Mercurial failed to run itself, check'
928 928 ' hg executable is in PATH'))
929 929 return logstream(stdout)
930 930
931 931 pre_revprop_change = '''#!/bin/sh
932 932
933 933 REPOS="$1"
934 934 REV="$2"
935 935 USER="$3"
936 936 PROPNAME="$4"
937 937 ACTION="$5"
938 938
939 939 if [ "$ACTION" = "M" -a "$PROPNAME" = "svn:log" ]; then exit 0; fi
940 940 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-branch" ]; then exit 0; fi
941 941 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-rev" ]; then exit 0; fi
942 942
943 943 echo "Changing prohibited revision property" >&2
944 944 exit 1
945 945 '''
946 946
947 947 class svn_sink(converter_sink, commandline):
948 948 commit_re = re.compile(r'Committed revision (\d+).', re.M)
949 949 uuid_re = re.compile(r'Repository UUID:\s*(\S+)', re.M)
950 950
951 951 def prerun(self):
952 952 if self.wc:
953 953 os.chdir(self.wc)
954 954
955 955 def postrun(self):
956 956 if self.wc:
957 957 os.chdir(self.cwd)
958 958
959 959 def join(self, name):
960 960 return os.path.join(self.wc, '.svn', name)
961 961
962 962 def revmapfile(self):
963 963 return self.join('hg-shamap')
964 964
965 965 def authorfile(self):
966 966 return self.join('hg-authormap')
967 967
968 968 def __init__(self, ui, path):
969 969
970 970 converter_sink.__init__(self, ui, path)
971 971 commandline.__init__(self, ui, 'svn')
972 972 self.delete = []
973 973 self.setexec = []
974 974 self.delexec = []
975 975 self.copies = []
976 976 self.wc = None
977 977 self.cwd = os.getcwd()
978 978
979 979 path = os.path.realpath(path)
980 980
981 981 created = False
982 982 if os.path.isfile(os.path.join(path, '.svn', 'entries')):
983 983 self.wc = path
984 984 self.run0('update')
985 985 else:
986 986 wcpath = os.path.join(os.getcwd(), os.path.basename(path) + '-wc')
987 987
988 988 if os.path.isdir(os.path.dirname(path)):
989 989 if not os.path.exists(os.path.join(path, 'db', 'fs-type')):
990 990 ui.status(_('initializing svn repository %r\n') %
991 991 os.path.basename(path))
992 992 commandline(ui, 'svnadmin').run0('create', path)
993 993 created = path
994 994 path = util.normpath(path)
995 995 if not path.startswith('/'):
996 996 path = '/' + path
997 997 path = 'file://' + path
998 998
999 999 ui.status(_('initializing svn working copy %r\n')
1000 1000 % os.path.basename(wcpath))
1001 1001 self.run0('checkout', path, wcpath)
1002 1002
1003 1003 self.wc = wcpath
1004 1004 self.opener = scmutil.opener(self.wc)
1005 1005 self.wopener = scmutil.opener(self.wc)
1006 1006 self.childmap = mapfile(ui, self.join('hg-childmap'))
1007 1007 self.is_exec = util.checkexec(self.wc) and util.is_exec or None
1008 1008
1009 1009 if created:
1010 1010 hook = os.path.join(created, 'hooks', 'pre-revprop-change')
1011 1011 fp = open(hook, 'w')
1012 1012 fp.write(pre_revprop_change)
1013 1013 fp.close()
1014 util.set_flags(hook, False, True)
1014 util.setflags(hook, False, True)
1015 1015
1016 1016 output = self.run0('info')
1017 1017 self.uuid = self.uuid_re.search(output).group(1).strip()
1018 1018
1019 1019 def wjoin(self, *names):
1020 1020 return os.path.join(self.wc, *names)
1021 1021
1022 1022 def putfile(self, filename, flags, data):
1023 1023 if 'l' in flags:
1024 1024 self.wopener.symlink(data, filename)
1025 1025 else:
1026 1026 try:
1027 1027 if os.path.islink(self.wjoin(filename)):
1028 1028 os.unlink(filename)
1029 1029 except OSError:
1030 1030 pass
1031 1031 self.wopener.write(filename, data)
1032 1032
1033 1033 if self.is_exec:
1034 1034 was_exec = self.is_exec(self.wjoin(filename))
1035 1035 else:
1036 1036 # On filesystems not supporting execute-bit, there is no way
1037 1037 # to know if it is set but asking subversion. Setting it
1038 1038 # systematically is just as expensive and much simpler.
1039 1039 was_exec = 'x' not in flags
1040 1040
1041 util.set_flags(self.wjoin(filename), False, 'x' in flags)
1041 util.setflags(self.wjoin(filename), False, 'x' in flags)
1042 1042 if was_exec:
1043 1043 if 'x' not in flags:
1044 1044 self.delexec.append(filename)
1045 1045 else:
1046 1046 if 'x' in flags:
1047 1047 self.setexec.append(filename)
1048 1048
1049 1049 def _copyfile(self, source, dest):
1050 1050 # SVN's copy command pukes if the destination file exists, but
1051 1051 # our copyfile method expects to record a copy that has
1052 1052 # already occurred. Cross the semantic gap.
1053 1053 wdest = self.wjoin(dest)
1054 1054 exists = os.path.lexists(wdest)
1055 1055 if exists:
1056 1056 fd, tempname = tempfile.mkstemp(
1057 1057 prefix='hg-copy-', dir=os.path.dirname(wdest))
1058 1058 os.close(fd)
1059 1059 os.unlink(tempname)
1060 1060 os.rename(wdest, tempname)
1061 1061 try:
1062 1062 self.run0('copy', source, dest)
1063 1063 finally:
1064 1064 if exists:
1065 1065 try:
1066 1066 os.unlink(wdest)
1067 1067 except OSError:
1068 1068 pass
1069 1069 os.rename(tempname, wdest)
1070 1070
1071 1071 def dirs_of(self, files):
1072 1072 dirs = set()
1073 1073 for f in files:
1074 1074 if os.path.isdir(self.wjoin(f)):
1075 1075 dirs.add(f)
1076 1076 for i in strutil.rfindall(f, '/'):
1077 1077 dirs.add(f[:i])
1078 1078 return dirs
1079 1079
1080 1080 def add_dirs(self, files):
1081 1081 add_dirs = [d for d in sorted(self.dirs_of(files))
1082 1082 if not os.path.exists(self.wjoin(d, '.svn', 'entries'))]
1083 1083 if add_dirs:
1084 1084 self.xargs(add_dirs, 'add', non_recursive=True, quiet=True)
1085 1085 return add_dirs
1086 1086
1087 1087 def add_files(self, files):
1088 1088 if files:
1089 1089 self.xargs(files, 'add', quiet=True)
1090 1090 return files
1091 1091
1092 1092 def tidy_dirs(self, names):
1093 1093 deleted = []
1094 1094 for d in sorted(self.dirs_of(names), reverse=True):
1095 1095 wd = self.wjoin(d)
1096 1096 if os.listdir(wd) == '.svn':
1097 1097 self.run0('delete', d)
1098 1098 deleted.append(d)
1099 1099 return deleted
1100 1100
1101 1101 def addchild(self, parent, child):
1102 1102 self.childmap[parent] = child
1103 1103
1104 1104 def revid(self, rev):
1105 1105 return u"svn:%s@%s" % (self.uuid, rev)
1106 1106
1107 1107 def putcommit(self, files, copies, parents, commit, source, revmap):
1108 1108 # Apply changes to working copy
1109 1109 for f, v in files:
1110 1110 try:
1111 1111 data, mode = source.getfile(f, v)
1112 1112 except IOError:
1113 1113 self.delete.append(f)
1114 1114 else:
1115 1115 self.putfile(f, mode, data)
1116 1116 if f in copies:
1117 1117 self.copies.append([copies[f], f])
1118 1118 files = [f[0] for f in files]
1119 1119
1120 1120 for parent in parents:
1121 1121 try:
1122 1122 return self.revid(self.childmap[parent])
1123 1123 except KeyError:
1124 1124 pass
1125 1125 entries = set(self.delete)
1126 1126 files = frozenset(files)
1127 1127 entries.update(self.add_dirs(files.difference(entries)))
1128 1128 if self.copies:
1129 1129 for s, d in self.copies:
1130 1130 self._copyfile(s, d)
1131 1131 self.copies = []
1132 1132 if self.delete:
1133 1133 self.xargs(self.delete, 'delete')
1134 1134 self.delete = []
1135 1135 entries.update(self.add_files(files.difference(entries)))
1136 1136 entries.update(self.tidy_dirs(entries))
1137 1137 if self.delexec:
1138 1138 self.xargs(self.delexec, 'propdel', 'svn:executable')
1139 1139 self.delexec = []
1140 1140 if self.setexec:
1141 1141 self.xargs(self.setexec, 'propset', 'svn:executable', '*')
1142 1142 self.setexec = []
1143 1143
1144 1144 fd, messagefile = tempfile.mkstemp(prefix='hg-convert-')
1145 1145 fp = os.fdopen(fd, 'w')
1146 1146 fp.write(commit.desc)
1147 1147 fp.close()
1148 1148 try:
1149 1149 output = self.run0('commit',
1150 1150 username=util.shortuser(commit.author),
1151 1151 file=messagefile,
1152 1152 encoding='utf-8')
1153 1153 try:
1154 1154 rev = self.commit_re.search(output).group(1)
1155 1155 except AttributeError:
1156 1156 if not files:
1157 1157 return parents[0]
1158 1158 self.ui.warn(_('unexpected svn output:\n'))
1159 1159 self.ui.warn(output)
1160 1160 raise util.Abort(_('unable to cope with svn output'))
1161 1161 if commit.rev:
1162 1162 self.run('propset', 'hg:convert-rev', commit.rev,
1163 1163 revprop=True, revision=rev)
1164 1164 if commit.branch and commit.branch != 'default':
1165 1165 self.run('propset', 'hg:convert-branch', commit.branch,
1166 1166 revprop=True, revision=rev)
1167 1167 for parent in parents:
1168 1168 self.addchild(parent, rev)
1169 1169 return self.revid(rev)
1170 1170 finally:
1171 1171 os.unlink(messagefile)
1172 1172
1173 1173 def puttags(self, tags):
1174 1174 self.ui.warn(_('writing Subversion tags is not yet implemented\n'))
1175 1175 return None, None
@@ -1,328 +1,328 b''
1 1 # extdiff.py - external diff program support for mercurial
2 2 #
3 3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''command to allow external programs to compare revisions
9 9
10 10 The extdiff Mercurial extension allows you to use external programs
11 11 to compare revisions, or revision with working directory. The external
12 12 diff programs are called with a configurable set of options and two
13 13 non-option arguments: paths to directories containing snapshots of
14 14 files to compare.
15 15
16 16 The extdiff extension also allows to configure new diff commands, so
17 17 you do not need to type :hg:`extdiff -p kdiff3` always. ::
18 18
19 19 [extdiff]
20 20 # add new command that runs GNU diff(1) in 'context diff' mode
21 21 cdiff = gdiff -Nprc5
22 22 ## or the old way:
23 23 #cmd.cdiff = gdiff
24 24 #opts.cdiff = -Nprc5
25 25
26 26 # add new command called vdiff, runs kdiff3
27 27 vdiff = kdiff3
28 28
29 29 # add new command called meld, runs meld (no need to name twice)
30 30 meld =
31 31
32 32 # add new command called vimdiff, runs gvimdiff with DirDiff plugin
33 33 # (see http://www.vim.org/scripts/script.php?script_id=102) Non
34 34 # English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in
35 35 # your .vimrc
36 36 vimdiff = gvim -f '+next' '+execute "DirDiff" argv(0) argv(1)'
37 37
38 38 Tool arguments can include variables that are expanded at runtime::
39 39
40 40 $parent1, $plabel1 - filename, descriptive label of first parent
41 41 $child, $clabel - filename, descriptive label of child revision
42 42 $parent2, $plabel2 - filename, descriptive label of second parent
43 43 $root - repository root
44 44 $parent is an alias for $parent1.
45 45
46 46 The extdiff extension will look in your [diff-tools] and [merge-tools]
47 47 sections for diff tool arguments, when none are specified in [extdiff].
48 48
49 49 ::
50 50
51 51 [extdiff]
52 52 kdiff3 =
53 53
54 54 [diff-tools]
55 55 kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child
56 56
57 57 You can use -I/-X and list of file or directory names like normal
58 58 :hg:`diff` command. The extdiff extension makes snapshots of only
59 59 needed files, so running the external diff program will actually be
60 60 pretty fast (at least faster than having to compare the entire tree).
61 61 '''
62 62
63 63 from mercurial.i18n import _
64 64 from mercurial.node import short, nullid
65 65 from mercurial import cmdutil, scmutil, util, commands, encoding
66 66 import os, shlex, shutil, tempfile, re
67 67
68 68 def snapshot(ui, repo, files, node, tmproot):
69 69 '''snapshot files as of some revision
70 70 if not using snapshot, -I/-X does not work and recursive diff
71 71 in tools like kdiff3 and meld displays too many files.'''
72 72 dirname = os.path.basename(repo.root)
73 73 if dirname == "":
74 74 dirname = "root"
75 75 if node is not None:
76 76 dirname = '%s.%s' % (dirname, short(node))
77 77 base = os.path.join(tmproot, dirname)
78 78 os.mkdir(base)
79 79 if node is not None:
80 80 ui.note(_('making snapshot of %d files from rev %s\n') %
81 81 (len(files), short(node)))
82 82 else:
83 83 ui.note(_('making snapshot of %d files from working directory\n') %
84 84 (len(files)))
85 85 wopener = scmutil.opener(base)
86 86 fns_and_mtime = []
87 87 ctx = repo[node]
88 88 for fn in files:
89 89 wfn = util.pconvert(fn)
90 90 if not wfn in ctx:
91 91 # File doesn't exist; could be a bogus modify
92 92 continue
93 93 ui.note(' %s\n' % wfn)
94 94 dest = os.path.join(base, wfn)
95 95 fctx = ctx[wfn]
96 96 data = repo.wwritedata(wfn, fctx.data())
97 97 if 'l' in fctx.flags():
98 98 wopener.symlink(data, wfn)
99 99 else:
100 100 wopener.write(wfn, data)
101 101 if 'x' in fctx.flags():
102 util.set_flags(dest, False, True)
102 util.setflags(dest, False, True)
103 103 if node is None:
104 104 fns_and_mtime.append((dest, repo.wjoin(fn),
105 105 os.lstat(dest).st_mtime))
106 106 return dirname, fns_and_mtime
107 107
108 108 def dodiff(ui, repo, diffcmd, diffopts, pats, opts):
109 109 '''Do the actuall diff:
110 110
111 111 - copy to a temp structure if diffing 2 internal revisions
112 112 - copy to a temp structure if diffing working revision with
113 113 another one and more than 1 file is changed
114 114 - just invoke the diff for a single file in the working dir
115 115 '''
116 116
117 117 revs = opts.get('rev')
118 118 change = opts.get('change')
119 119 args = ' '.join(diffopts)
120 120 do3way = '$parent2' in args
121 121
122 122 if revs and change:
123 123 msg = _('cannot specify --rev and --change at the same time')
124 124 raise util.Abort(msg)
125 125 elif change:
126 126 node2 = cmdutil.revsingle(repo, change, None).node()
127 127 node1a, node1b = repo.changelog.parents(node2)
128 128 else:
129 129 node1a, node2 = cmdutil.revpair(repo, revs)
130 130 if not revs:
131 131 node1b = repo.dirstate.p2()
132 132 else:
133 133 node1b = nullid
134 134
135 135 # Disable 3-way merge if there is only one parent
136 136 if do3way:
137 137 if node1b == nullid:
138 138 do3way = False
139 139
140 140 matcher = cmdutil.match(repo, pats, opts)
141 141 mod_a, add_a, rem_a = map(set, repo.status(node1a, node2, matcher)[:3])
142 142 if do3way:
143 143 mod_b, add_b, rem_b = map(set, repo.status(node1b, node2, matcher)[:3])
144 144 else:
145 145 mod_b, add_b, rem_b = set(), set(), set()
146 146 modadd = mod_a | add_a | mod_b | add_b
147 147 common = modadd | rem_a | rem_b
148 148 if not common:
149 149 return 0
150 150
151 151 tmproot = tempfile.mkdtemp(prefix='extdiff.')
152 152 try:
153 153 # Always make a copy of node1a (and node1b, if applicable)
154 154 dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a)
155 155 dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot)[0]
156 156 rev1a = '@%d' % repo[node1a].rev()
157 157 if do3way:
158 158 dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b)
159 159 dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot)[0]
160 160 rev1b = '@%d' % repo[node1b].rev()
161 161 else:
162 162 dir1b = None
163 163 rev1b = ''
164 164
165 165 fns_and_mtime = []
166 166
167 167 # If node2 in not the wc or there is >1 change, copy it
168 168 dir2root = ''
169 169 rev2 = ''
170 170 if node2:
171 171 dir2 = snapshot(ui, repo, modadd, node2, tmproot)[0]
172 172 rev2 = '@%d' % repo[node2].rev()
173 173 elif len(common) > 1:
174 174 #we only actually need to get the files to copy back to
175 175 #the working dir in this case (because the other cases
176 176 #are: diffing 2 revisions or single file -- in which case
177 177 #the file is already directly passed to the diff tool).
178 178 dir2, fns_and_mtime = snapshot(ui, repo, modadd, None, tmproot)
179 179 else:
180 180 # This lets the diff tool open the changed file directly
181 181 dir2 = ''
182 182 dir2root = repo.root
183 183
184 184 label1a = rev1a
185 185 label1b = rev1b
186 186 label2 = rev2
187 187
188 188 # If only one change, diff the files instead of the directories
189 189 # Handle bogus modifies correctly by checking if the files exist
190 190 if len(common) == 1:
191 191 common_file = util.localpath(common.pop())
192 192 dir1a = os.path.join(tmproot, dir1a, common_file)
193 193 label1a = common_file + rev1a
194 194 if not os.path.isfile(dir1a):
195 195 dir1a = os.devnull
196 196 if do3way:
197 197 dir1b = os.path.join(tmproot, dir1b, common_file)
198 198 label1b = common_file + rev1b
199 199 if not os.path.isfile(dir1b):
200 200 dir1b = os.devnull
201 201 dir2 = os.path.join(dir2root, dir2, common_file)
202 202 label2 = common_file + rev2
203 203
204 204 # Function to quote file/dir names in the argument string.
205 205 # When not operating in 3-way mode, an empty string is
206 206 # returned for parent2
207 207 replace = dict(parent=dir1a, parent1=dir1a, parent2=dir1b,
208 208 plabel1=label1a, plabel2=label1b,
209 209 clabel=label2, child=dir2,
210 210 root=repo.root)
211 211 def quote(match):
212 212 key = match.group()[1:]
213 213 if not do3way and key == 'parent2':
214 214 return ''
215 215 return util.shellquote(replace[key])
216 216
217 217 # Match parent2 first, so 'parent1?' will match both parent1 and parent
218 218 regex = '\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)'
219 219 if not do3way and not re.search(regex, args):
220 220 args += ' $parent1 $child'
221 221 args = re.sub(regex, quote, args)
222 222 cmdline = util.shellquote(diffcmd) + ' ' + args
223 223
224 224 ui.debug('running %r in %s\n' % (cmdline, tmproot))
225 225 util.system(cmdline, cwd=tmproot)
226 226
227 227 for copy_fn, working_fn, mtime in fns_and_mtime:
228 228 if os.lstat(copy_fn).st_mtime != mtime:
229 229 ui.debug('file changed while diffing. '
230 230 'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn))
231 231 util.copyfile(copy_fn, working_fn)
232 232
233 233 return 1
234 234 finally:
235 235 ui.note(_('cleaning up temp directory\n'))
236 236 shutil.rmtree(tmproot)
237 237
238 238 def extdiff(ui, repo, *pats, **opts):
239 239 '''use external program to diff repository (or selected files)
240 240
241 241 Show differences between revisions for the specified files, using
242 242 an external program. The default program used is diff, with
243 243 default options "-Npru".
244 244
245 245 To select a different program, use the -p/--program option. The
246 246 program will be passed the names of two directories to compare. To
247 247 pass additional options to the program, use -o/--option. These
248 248 will be passed before the names of the directories to compare.
249 249
250 250 When two revision arguments are given, then changes are shown
251 251 between those revisions. If only one revision is specified then
252 252 that revision is compared to the working directory, and, when no
253 253 revisions are specified, the working directory files are compared
254 254 to its parent.'''
255 255 program = opts.get('program')
256 256 option = opts.get('option')
257 257 if not program:
258 258 program = 'diff'
259 259 option = option or ['-Npru']
260 260 return dodiff(ui, repo, program, option, pats, opts)
261 261
262 262 cmdtable = {
263 263 "extdiff":
264 264 (extdiff,
265 265 [('p', 'program', '',
266 266 _('comparison program to run'), _('CMD')),
267 267 ('o', 'option', [],
268 268 _('pass option to comparison program'), _('OPT')),
269 269 ('r', 'rev', [],
270 270 _('revision'), _('REV')),
271 271 ('c', 'change', '',
272 272 _('change made by revision'), _('REV')),
273 273 ] + commands.walkopts,
274 274 _('hg extdiff [OPT]... [FILE]...')),
275 275 }
276 276
277 277 def uisetup(ui):
278 278 for cmd, path in ui.configitems('extdiff'):
279 279 if cmd.startswith('cmd.'):
280 280 cmd = cmd[4:]
281 281 if not path:
282 282 path = cmd
283 283 diffopts = ui.config('extdiff', 'opts.' + cmd, '')
284 284 diffopts = diffopts and [diffopts] or []
285 285 elif cmd.startswith('opts.'):
286 286 continue
287 287 else:
288 288 # command = path opts
289 289 if path:
290 290 diffopts = shlex.split(path)
291 291 path = diffopts.pop(0)
292 292 else:
293 293 path, diffopts = cmd, []
294 294 # look for diff arguments in [diff-tools] then [merge-tools]
295 295 if diffopts == []:
296 296 args = ui.config('diff-tools', cmd+'.diffargs') or \
297 297 ui.config('merge-tools', cmd+'.diffargs')
298 298 if args:
299 299 diffopts = shlex.split(args)
300 300 def save(cmd, path, diffopts):
301 301 '''use closure to save diff command to use'''
302 302 def mydiff(ui, repo, *pats, **opts):
303 303 return dodiff(ui, repo, path, diffopts + opts['option'],
304 304 pats, opts)
305 305 doc = _('''\
306 306 use %(path)s to diff repository (or selected files)
307 307
308 308 Show differences between revisions for the specified files, using
309 309 the %(path)s program.
310 310
311 311 When two revision arguments are given, then changes are shown
312 312 between those revisions. If only one revision is specified then
313 313 that revision is compared to the working directory, and, when no
314 314 revisions are specified, the working directory files are compared
315 315 to its parent.\
316 316 ''') % dict(path=util.uirepr(path))
317 317
318 318 # We must translate the docstring right away since it is
319 319 # used as a format string. The string will unfortunately
320 320 # be translated again in commands.helpcmd and this will
321 321 # fail when the docstring contains non-ASCII characters.
322 322 # Decoding the string to a Unicode string here (using the
323 323 # right encoding) prevents that.
324 324 mydiff.__doc__ = doc.decode(encoding.encoding)
325 325 return mydiff
326 326 cmdtable[cmd] = (save(cmd, path, diffopts),
327 327 cmdtable['extdiff'][1][1:],
328 328 _('hg %s [OPTION]... [FILE]...') % cmd)
@@ -1,1397 +1,1397 b''
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import os, sys, errno, re, glob, tempfile
11 11 import util, scmutil, templater, patch, error, templatekw
12 12 import match as matchmod
13 13 import similar, revset, subrepo
14 14
15 15 revrangesep = ':'
16 16
17 17 def parsealiases(cmd):
18 18 return cmd.lstrip("^").split("|")
19 19
20 20 def findpossible(cmd, table, strict=False):
21 21 """
22 22 Return cmd -> (aliases, command table entry)
23 23 for each matching command.
24 24 Return debug commands (or their aliases) only if no normal command matches.
25 25 """
26 26 choice = {}
27 27 debugchoice = {}
28 28 for e in table.keys():
29 29 aliases = parsealiases(e)
30 30 found = None
31 31 if cmd in aliases:
32 32 found = cmd
33 33 elif not strict:
34 34 for a in aliases:
35 35 if a.startswith(cmd):
36 36 found = a
37 37 break
38 38 if found is not None:
39 39 if aliases[0].startswith("debug") or found.startswith("debug"):
40 40 debugchoice[found] = (aliases, table[e])
41 41 else:
42 42 choice[found] = (aliases, table[e])
43 43
44 44 if not choice and debugchoice:
45 45 choice = debugchoice
46 46
47 47 return choice
48 48
49 49 def findcmd(cmd, table, strict=True):
50 50 """Return (aliases, command table entry) for command string."""
51 51 choice = findpossible(cmd, table, strict)
52 52
53 53 if cmd in choice:
54 54 return choice[cmd]
55 55
56 56 if len(choice) > 1:
57 57 clist = choice.keys()
58 58 clist.sort()
59 59 raise error.AmbiguousCommand(cmd, clist)
60 60
61 61 if choice:
62 62 return choice.values()[0]
63 63
64 64 raise error.UnknownCommand(cmd)
65 65
66 66 def findrepo(p):
67 67 while not os.path.isdir(os.path.join(p, ".hg")):
68 68 oldp, p = p, os.path.dirname(p)
69 69 if p == oldp:
70 70 return None
71 71
72 72 return p
73 73
74 74 def bail_if_changed(repo):
75 75 if repo.dirstate.p2() != nullid:
76 76 raise util.Abort(_('outstanding uncommitted merge'))
77 77 modified, added, removed, deleted = repo.status()[:4]
78 78 if modified or added or removed or deleted:
79 79 raise util.Abort(_("outstanding uncommitted changes"))
80 80
81 81 def logmessage(opts):
82 82 """ get the log message according to -m and -l option """
83 83 message = opts.get('message')
84 84 logfile = opts.get('logfile')
85 85
86 86 if message and logfile:
87 87 raise util.Abort(_('options --message and --logfile are mutually '
88 88 'exclusive'))
89 89 if not message and logfile:
90 90 try:
91 91 if logfile == '-':
92 92 message = sys.stdin.read()
93 93 else:
94 94 message = util.readfile(logfile)
95 95 except IOError, inst:
96 96 raise util.Abort(_("can't read commit message '%s': %s") %
97 97 (logfile, inst.strerror))
98 98 return message
99 99
100 100 def loglimit(opts):
101 101 """get the log limit according to option -l/--limit"""
102 102 limit = opts.get('limit')
103 103 if limit:
104 104 try:
105 105 limit = int(limit)
106 106 except ValueError:
107 107 raise util.Abort(_('limit must be a positive integer'))
108 108 if limit <= 0:
109 109 raise util.Abort(_('limit must be positive'))
110 110 else:
111 111 limit = None
112 112 return limit
113 113
114 114 def revsingle(repo, revspec, default='.'):
115 115 if not revspec:
116 116 return repo[default]
117 117
118 118 l = revrange(repo, [revspec])
119 119 if len(l) < 1:
120 120 raise util.Abort(_('empty revision set'))
121 121 return repo[l[-1]]
122 122
123 123 def revpair(repo, revs):
124 124 if not revs:
125 125 return repo.dirstate.p1(), None
126 126
127 127 l = revrange(repo, revs)
128 128
129 129 if len(l) == 0:
130 130 return repo.dirstate.p1(), None
131 131
132 132 if len(l) == 1:
133 133 return repo.lookup(l[0]), None
134 134
135 135 return repo.lookup(l[0]), repo.lookup(l[-1])
136 136
137 137 def revrange(repo, revs):
138 138 """Yield revision as strings from a list of revision specifications."""
139 139
140 140 def revfix(repo, val, defval):
141 141 if not val and val != 0 and defval is not None:
142 142 return defval
143 143 return repo.changelog.rev(repo.lookup(val))
144 144
145 145 seen, l = set(), []
146 146 for spec in revs:
147 147 # attempt to parse old-style ranges first to deal with
148 148 # things like old-tag which contain query metacharacters
149 149 try:
150 150 if isinstance(spec, int):
151 151 seen.add(spec)
152 152 l.append(spec)
153 153 continue
154 154
155 155 if revrangesep in spec:
156 156 start, end = spec.split(revrangesep, 1)
157 157 start = revfix(repo, start, 0)
158 158 end = revfix(repo, end, len(repo) - 1)
159 159 step = start > end and -1 or 1
160 160 for rev in xrange(start, end + step, step):
161 161 if rev in seen:
162 162 continue
163 163 seen.add(rev)
164 164 l.append(rev)
165 165 continue
166 166 elif spec and spec in repo: # single unquoted rev
167 167 rev = revfix(repo, spec, None)
168 168 if rev in seen:
169 169 continue
170 170 seen.add(rev)
171 171 l.append(rev)
172 172 continue
173 173 except error.RepoLookupError:
174 174 pass
175 175
176 176 # fall through to new-style queries if old-style fails
177 177 m = revset.match(repo.ui, spec)
178 178 for r in m(repo, range(len(repo))):
179 179 if r not in seen:
180 180 l.append(r)
181 181 seen.update(l)
182 182
183 183 return l
184 184
185 185 def make_filename(repo, pat, node,
186 186 total=None, seqno=None, revwidth=None, pathname=None):
187 187 node_expander = {
188 188 'H': lambda: hex(node),
189 189 'R': lambda: str(repo.changelog.rev(node)),
190 190 'h': lambda: short(node),
191 191 }
192 192 expander = {
193 193 '%': lambda: '%',
194 194 'b': lambda: os.path.basename(repo.root),
195 195 }
196 196
197 197 try:
198 198 if node:
199 199 expander.update(node_expander)
200 200 if node:
201 201 expander['r'] = (lambda:
202 202 str(repo.changelog.rev(node)).zfill(revwidth or 0))
203 203 if total is not None:
204 204 expander['N'] = lambda: str(total)
205 205 if seqno is not None:
206 206 expander['n'] = lambda: str(seqno)
207 207 if total is not None and seqno is not None:
208 208 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
209 209 if pathname is not None:
210 210 expander['s'] = lambda: os.path.basename(pathname)
211 211 expander['d'] = lambda: os.path.dirname(pathname) or '.'
212 212 expander['p'] = lambda: pathname
213 213
214 214 newname = []
215 215 patlen = len(pat)
216 216 i = 0
217 217 while i < patlen:
218 218 c = pat[i]
219 219 if c == '%':
220 220 i += 1
221 221 c = pat[i]
222 222 c = expander[c]()
223 223 newname.append(c)
224 224 i += 1
225 225 return ''.join(newname)
226 226 except KeyError, inst:
227 227 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
228 228 inst.args[0])
229 229
230 230 def make_file(repo, pat, node=None,
231 231 total=None, seqno=None, revwidth=None, mode='wb', pathname=None):
232 232
233 233 writable = mode not in ('r', 'rb')
234 234
235 235 if not pat or pat == '-':
236 236 fp = writable and sys.stdout or sys.stdin
237 237 return os.fdopen(os.dup(fp.fileno()), mode)
238 238 if hasattr(pat, 'write') and writable:
239 239 return pat
240 240 if hasattr(pat, 'read') and 'r' in mode:
241 241 return pat
242 242 return open(make_filename(repo, pat, node, total, seqno, revwidth,
243 243 pathname),
244 244 mode)
245 245
246 246 def expandpats(pats):
247 247 if not util.expandglobs:
248 248 return list(pats)
249 249 ret = []
250 250 for p in pats:
251 251 kind, name = matchmod._patsplit(p, None)
252 252 if kind is None:
253 253 try:
254 254 globbed = glob.glob(name)
255 255 except re.error:
256 256 globbed = [name]
257 257 if globbed:
258 258 ret.extend(globbed)
259 259 continue
260 260 ret.append(p)
261 261 return ret
262 262
263 263 def match(repo, pats=[], opts={}, globbed=False, default='relpath'):
264 264 if pats == ("",):
265 265 pats = []
266 266 if not globbed and default == 'relpath':
267 267 pats = expandpats(pats or [])
268 268 m = matchmod.match(repo.root, repo.getcwd(), pats,
269 269 opts.get('include'), opts.get('exclude'), default,
270 270 auditor=repo.auditor)
271 271 def badfn(f, msg):
272 272 repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
273 273 m.bad = badfn
274 274 return m
275 275
276 276 def matchall(repo):
277 277 return matchmod.always(repo.root, repo.getcwd())
278 278
279 279 def matchfiles(repo, files):
280 280 return matchmod.exact(repo.root, repo.getcwd(), files)
281 281
282 282 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
283 283 if dry_run is None:
284 284 dry_run = opts.get('dry_run')
285 285 if similarity is None:
286 286 similarity = float(opts.get('similarity') or 0)
287 287 # we'd use status here, except handling of symlinks and ignore is tricky
288 288 added, unknown, deleted, removed = [], [], [], []
289 289 audit_path = scmutil.pathauditor(repo.root)
290 290 m = match(repo, pats, opts)
291 291 for abs in repo.walk(m):
292 292 target = repo.wjoin(abs)
293 293 good = True
294 294 try:
295 295 audit_path(abs)
296 296 except (OSError, util.Abort):
297 297 good = False
298 298 rel = m.rel(abs)
299 299 exact = m.exact(abs)
300 300 if good and abs not in repo.dirstate:
301 301 unknown.append(abs)
302 302 if repo.ui.verbose or not exact:
303 303 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
304 304 elif repo.dirstate[abs] != 'r' and (not good or not os.path.lexists(target)
305 305 or (os.path.isdir(target) and not os.path.islink(target))):
306 306 deleted.append(abs)
307 307 if repo.ui.verbose or not exact:
308 308 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
309 309 # for finding renames
310 310 elif repo.dirstate[abs] == 'r':
311 311 removed.append(abs)
312 312 elif repo.dirstate[abs] == 'a':
313 313 added.append(abs)
314 314 copies = {}
315 315 if similarity > 0:
316 316 for old, new, score in similar.findrenames(repo,
317 317 added + unknown, removed + deleted, similarity):
318 318 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
319 319 repo.ui.status(_('recording removal of %s as rename to %s '
320 320 '(%d%% similar)\n') %
321 321 (m.rel(old), m.rel(new), score * 100))
322 322 copies[new] = old
323 323
324 324 if not dry_run:
325 325 wctx = repo[None]
326 326 wlock = repo.wlock()
327 327 try:
328 328 wctx.remove(deleted)
329 329 wctx.add(unknown)
330 330 for new, old in copies.iteritems():
331 331 wctx.copy(old, new)
332 332 finally:
333 333 wlock.release()
334 334
335 335 def updatedir(ui, repo, patches, similarity=0):
336 336 '''Update dirstate after patch application according to metadata'''
337 337 if not patches:
338 338 return
339 339 copies = []
340 340 removes = set()
341 341 cfiles = patches.keys()
342 342 cwd = repo.getcwd()
343 343 if cwd:
344 344 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
345 345 for f in patches:
346 346 gp = patches[f]
347 347 if not gp:
348 348 continue
349 349 if gp.op == 'RENAME':
350 350 copies.append((gp.oldpath, gp.path))
351 351 removes.add(gp.oldpath)
352 352 elif gp.op == 'COPY':
353 353 copies.append((gp.oldpath, gp.path))
354 354 elif gp.op == 'DELETE':
355 355 removes.add(gp.path)
356 356
357 357 wctx = repo[None]
358 358 for src, dst in copies:
359 359 dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd)
360 360 if (not similarity) and removes:
361 361 wctx.remove(sorted(removes), True)
362 362
363 363 for f in patches:
364 364 gp = patches[f]
365 365 if gp and gp.mode:
366 366 islink, isexec = gp.mode
367 367 dst = repo.wjoin(gp.path)
368 368 # patch won't create empty files
369 369 if gp.op == 'ADD' and not os.path.lexists(dst):
370 370 flags = (isexec and 'x' or '') + (islink and 'l' or '')
371 371 repo.wwrite(gp.path, '', flags)
372 util.set_flags(dst, islink, isexec)
372 util.setflags(dst, islink, isexec)
373 373 addremove(repo, cfiles, similarity=similarity)
374 374 files = patches.keys()
375 375 files.extend([r for r in removes if r not in files])
376 376 return sorted(files)
377 377
378 378 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
379 379 """Update the dirstate to reflect the intent of copying src to dst. For
380 380 different reasons it might not end with dst being marked as copied from src.
381 381 """
382 382 origsrc = repo.dirstate.copied(src) or src
383 383 if dst == origsrc: # copying back a copy?
384 384 if repo.dirstate[dst] not in 'mn' and not dryrun:
385 385 repo.dirstate.normallookup(dst)
386 386 else:
387 387 if repo.dirstate[origsrc] == 'a' and origsrc == src:
388 388 if not ui.quiet:
389 389 ui.warn(_("%s has not been committed yet, so no copy "
390 390 "data will be stored for %s.\n")
391 391 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
392 392 if repo.dirstate[dst] in '?r' and not dryrun:
393 393 wctx.add([dst])
394 394 elif not dryrun:
395 395 wctx.copy(origsrc, dst)
396 396
397 397 def copy(ui, repo, pats, opts, rename=False):
398 398 # called with the repo lock held
399 399 #
400 400 # hgsep => pathname that uses "/" to separate directories
401 401 # ossep => pathname that uses os.sep to separate directories
402 402 cwd = repo.getcwd()
403 403 targets = {}
404 404 after = opts.get("after")
405 405 dryrun = opts.get("dry_run")
406 406 wctx = repo[None]
407 407
408 408 def walkpat(pat):
409 409 srcs = []
410 410 badstates = after and '?' or '?r'
411 411 m = match(repo, [pat], opts, globbed=True)
412 412 for abs in repo.walk(m):
413 413 state = repo.dirstate[abs]
414 414 rel = m.rel(abs)
415 415 exact = m.exact(abs)
416 416 if state in badstates:
417 417 if exact and state == '?':
418 418 ui.warn(_('%s: not copying - file is not managed\n') % rel)
419 419 if exact and state == 'r':
420 420 ui.warn(_('%s: not copying - file has been marked for'
421 421 ' remove\n') % rel)
422 422 continue
423 423 # abs: hgsep
424 424 # rel: ossep
425 425 srcs.append((abs, rel, exact))
426 426 return srcs
427 427
428 428 # abssrc: hgsep
429 429 # relsrc: ossep
430 430 # otarget: ossep
431 431 def copyfile(abssrc, relsrc, otarget, exact):
432 432 abstarget = scmutil.canonpath(repo.root, cwd, otarget)
433 433 reltarget = repo.pathto(abstarget, cwd)
434 434 target = repo.wjoin(abstarget)
435 435 src = repo.wjoin(abssrc)
436 436 state = repo.dirstate[abstarget]
437 437
438 438 scmutil.checkportable(ui, abstarget)
439 439
440 440 # check for collisions
441 441 prevsrc = targets.get(abstarget)
442 442 if prevsrc is not None:
443 443 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
444 444 (reltarget, repo.pathto(abssrc, cwd),
445 445 repo.pathto(prevsrc, cwd)))
446 446 return
447 447
448 448 # check for overwrites
449 449 exists = os.path.lexists(target)
450 450 if not after and exists or after and state in 'mn':
451 451 if not opts['force']:
452 452 ui.warn(_('%s: not overwriting - file exists\n') %
453 453 reltarget)
454 454 return
455 455
456 456 if after:
457 457 if not exists:
458 458 if rename:
459 459 ui.warn(_('%s: not recording move - %s does not exist\n') %
460 460 (relsrc, reltarget))
461 461 else:
462 462 ui.warn(_('%s: not recording copy - %s does not exist\n') %
463 463 (relsrc, reltarget))
464 464 return
465 465 elif not dryrun:
466 466 try:
467 467 if exists:
468 468 os.unlink(target)
469 469 targetdir = os.path.dirname(target) or '.'
470 470 if not os.path.isdir(targetdir):
471 471 os.makedirs(targetdir)
472 472 util.copyfile(src, target)
473 473 except IOError, inst:
474 474 if inst.errno == errno.ENOENT:
475 475 ui.warn(_('%s: deleted in working copy\n') % relsrc)
476 476 else:
477 477 ui.warn(_('%s: cannot copy - %s\n') %
478 478 (relsrc, inst.strerror))
479 479 return True # report a failure
480 480
481 481 if ui.verbose or not exact:
482 482 if rename:
483 483 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
484 484 else:
485 485 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
486 486
487 487 targets[abstarget] = abssrc
488 488
489 489 # fix up dirstate
490 490 dirstatecopy(ui, repo, wctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd)
491 491 if rename and not dryrun:
492 492 wctx.remove([abssrc], not after)
493 493
494 494 # pat: ossep
495 495 # dest ossep
496 496 # srcs: list of (hgsep, hgsep, ossep, bool)
497 497 # return: function that takes hgsep and returns ossep
498 498 def targetpathfn(pat, dest, srcs):
499 499 if os.path.isdir(pat):
500 500 abspfx = scmutil.canonpath(repo.root, cwd, pat)
501 501 abspfx = util.localpath(abspfx)
502 502 if destdirexists:
503 503 striplen = len(os.path.split(abspfx)[0])
504 504 else:
505 505 striplen = len(abspfx)
506 506 if striplen:
507 507 striplen += len(os.sep)
508 508 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
509 509 elif destdirexists:
510 510 res = lambda p: os.path.join(dest,
511 511 os.path.basename(util.localpath(p)))
512 512 else:
513 513 res = lambda p: dest
514 514 return res
515 515
516 516 # pat: ossep
517 517 # dest ossep
518 518 # srcs: list of (hgsep, hgsep, ossep, bool)
519 519 # return: function that takes hgsep and returns ossep
520 520 def targetpathafterfn(pat, dest, srcs):
521 521 if matchmod.patkind(pat):
522 522 # a mercurial pattern
523 523 res = lambda p: os.path.join(dest,
524 524 os.path.basename(util.localpath(p)))
525 525 else:
526 526 abspfx = scmutil.canonpath(repo.root, cwd, pat)
527 527 if len(abspfx) < len(srcs[0][0]):
528 528 # A directory. Either the target path contains the last
529 529 # component of the source path or it does not.
530 530 def evalpath(striplen):
531 531 score = 0
532 532 for s in srcs:
533 533 t = os.path.join(dest, util.localpath(s[0])[striplen:])
534 534 if os.path.lexists(t):
535 535 score += 1
536 536 return score
537 537
538 538 abspfx = util.localpath(abspfx)
539 539 striplen = len(abspfx)
540 540 if striplen:
541 541 striplen += len(os.sep)
542 542 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
543 543 score = evalpath(striplen)
544 544 striplen1 = len(os.path.split(abspfx)[0])
545 545 if striplen1:
546 546 striplen1 += len(os.sep)
547 547 if evalpath(striplen1) > score:
548 548 striplen = striplen1
549 549 res = lambda p: os.path.join(dest,
550 550 util.localpath(p)[striplen:])
551 551 else:
552 552 # a file
553 553 if destdirexists:
554 554 res = lambda p: os.path.join(dest,
555 555 os.path.basename(util.localpath(p)))
556 556 else:
557 557 res = lambda p: dest
558 558 return res
559 559
560 560
561 561 pats = expandpats(pats)
562 562 if not pats:
563 563 raise util.Abort(_('no source or destination specified'))
564 564 if len(pats) == 1:
565 565 raise util.Abort(_('no destination specified'))
566 566 dest = pats.pop()
567 567 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
568 568 if not destdirexists:
569 569 if len(pats) > 1 or matchmod.patkind(pats[0]):
570 570 raise util.Abort(_('with multiple sources, destination must be an '
571 571 'existing directory'))
572 572 if util.endswithsep(dest):
573 573 raise util.Abort(_('destination %s is not a directory') % dest)
574 574
575 575 tfn = targetpathfn
576 576 if after:
577 577 tfn = targetpathafterfn
578 578 copylist = []
579 579 for pat in pats:
580 580 srcs = walkpat(pat)
581 581 if not srcs:
582 582 continue
583 583 copylist.append((tfn(pat, dest, srcs), srcs))
584 584 if not copylist:
585 585 raise util.Abort(_('no files to copy'))
586 586
587 587 errors = 0
588 588 for targetpath, srcs in copylist:
589 589 for abssrc, relsrc, exact in srcs:
590 590 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
591 591 errors += 1
592 592
593 593 if errors:
594 594 ui.warn(_('(consider using --after)\n'))
595 595
596 596 return errors != 0
597 597
598 598 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
599 599 runargs=None, appendpid=False):
600 600 '''Run a command as a service.'''
601 601
602 602 if opts['daemon'] and not opts['daemon_pipefds']:
603 603 # Signal child process startup with file removal
604 604 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
605 605 os.close(lockfd)
606 606 try:
607 607 if not runargs:
608 608 runargs = util.hgcmd() + sys.argv[1:]
609 609 runargs.append('--daemon-pipefds=%s' % lockpath)
610 610 # Don't pass --cwd to the child process, because we've already
611 611 # changed directory.
612 612 for i in xrange(1, len(runargs)):
613 613 if runargs[i].startswith('--cwd='):
614 614 del runargs[i]
615 615 break
616 616 elif runargs[i].startswith('--cwd'):
617 617 del runargs[i:i + 2]
618 618 break
619 619 def condfn():
620 620 return not os.path.exists(lockpath)
621 621 pid = util.rundetached(runargs, condfn)
622 622 if pid < 0:
623 623 raise util.Abort(_('child process failed to start'))
624 624 finally:
625 625 try:
626 626 os.unlink(lockpath)
627 627 except OSError, e:
628 628 if e.errno != errno.ENOENT:
629 629 raise
630 630 if parentfn:
631 631 return parentfn(pid)
632 632 else:
633 633 return
634 634
635 635 if initfn:
636 636 initfn()
637 637
638 638 if opts['pid_file']:
639 639 mode = appendpid and 'a' or 'w'
640 640 fp = open(opts['pid_file'], mode)
641 641 fp.write(str(os.getpid()) + '\n')
642 642 fp.close()
643 643
644 644 if opts['daemon_pipefds']:
645 645 lockpath = opts['daemon_pipefds']
646 646 try:
647 647 os.setsid()
648 648 except AttributeError:
649 649 pass
650 650 os.unlink(lockpath)
651 651 util.hidewindow()
652 652 sys.stdout.flush()
653 653 sys.stderr.flush()
654 654
655 655 nullfd = os.open(util.nulldev, os.O_RDWR)
656 656 logfilefd = nullfd
657 657 if logfile:
658 658 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
659 659 os.dup2(nullfd, 0)
660 660 os.dup2(logfilefd, 1)
661 661 os.dup2(logfilefd, 2)
662 662 if nullfd not in (0, 1, 2):
663 663 os.close(nullfd)
664 664 if logfile and logfilefd not in (0, 1, 2):
665 665 os.close(logfilefd)
666 666
667 667 if runfn:
668 668 return runfn()
669 669
670 670 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
671 671 opts=None):
672 672 '''export changesets as hg patches.'''
673 673
674 674 total = len(revs)
675 675 revwidth = max([len(str(rev)) for rev in revs])
676 676
677 677 def single(rev, seqno, fp):
678 678 ctx = repo[rev]
679 679 node = ctx.node()
680 680 parents = [p.node() for p in ctx.parents() if p]
681 681 branch = ctx.branch()
682 682 if switch_parent:
683 683 parents.reverse()
684 684 prev = (parents and parents[0]) or nullid
685 685
686 686 shouldclose = False
687 687 if not fp:
688 688 fp = make_file(repo, template, node, total=total, seqno=seqno,
689 689 revwidth=revwidth, mode='ab')
690 690 if fp != template:
691 691 shouldclose = True
692 692 if fp != sys.stdout and hasattr(fp, 'name'):
693 693 repo.ui.note("%s\n" % fp.name)
694 694
695 695 fp.write("# HG changeset patch\n")
696 696 fp.write("# User %s\n" % ctx.user())
697 697 fp.write("# Date %d %d\n" % ctx.date())
698 698 if branch and branch != 'default':
699 699 fp.write("# Branch %s\n" % branch)
700 700 fp.write("# Node ID %s\n" % hex(node))
701 701 fp.write("# Parent %s\n" % hex(prev))
702 702 if len(parents) > 1:
703 703 fp.write("# Parent %s\n" % hex(parents[1]))
704 704 fp.write(ctx.description().rstrip())
705 705 fp.write("\n\n")
706 706
707 707 for chunk in patch.diff(repo, prev, node, opts=opts):
708 708 fp.write(chunk)
709 709
710 710 if shouldclose:
711 711 fp.close()
712 712
713 713 for seqno, rev in enumerate(revs):
714 714 single(rev, seqno + 1, fp)
715 715
716 716 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
717 717 changes=None, stat=False, fp=None, prefix='',
718 718 listsubrepos=False):
719 719 '''show diff or diffstat.'''
720 720 if fp is None:
721 721 write = ui.write
722 722 else:
723 723 def write(s, **kw):
724 724 fp.write(s)
725 725
726 726 if stat:
727 727 diffopts = diffopts.copy(context=0)
728 728 width = 80
729 729 if not ui.plain():
730 730 width = ui.termwidth()
731 731 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
732 732 prefix=prefix)
733 733 for chunk, label in patch.diffstatui(util.iterlines(chunks),
734 734 width=width,
735 735 git=diffopts.git):
736 736 write(chunk, label=label)
737 737 else:
738 738 for chunk, label in patch.diffui(repo, node1, node2, match,
739 739 changes, diffopts, prefix=prefix):
740 740 write(chunk, label=label)
741 741
742 742 if listsubrepos:
743 743 ctx1 = repo[node1]
744 744 ctx2 = repo[node2]
745 745 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
746 746 if node2 is not None:
747 747 node2 = ctx2.substate[subpath][1]
748 748 submatch = matchmod.narrowmatcher(subpath, match)
749 749 sub.diff(diffopts, node2, submatch, changes=changes,
750 750 stat=stat, fp=fp, prefix=prefix)
751 751
752 752 class changeset_printer(object):
753 753 '''show changeset information when templating not requested.'''
754 754
755 755 def __init__(self, ui, repo, patch, diffopts, buffered):
756 756 self.ui = ui
757 757 self.repo = repo
758 758 self.buffered = buffered
759 759 self.patch = patch
760 760 self.diffopts = diffopts
761 761 self.header = {}
762 762 self.hunk = {}
763 763 self.lastheader = None
764 764 self.footer = None
765 765
766 766 def flush(self, rev):
767 767 if rev in self.header:
768 768 h = self.header[rev]
769 769 if h != self.lastheader:
770 770 self.lastheader = h
771 771 self.ui.write(h)
772 772 del self.header[rev]
773 773 if rev in self.hunk:
774 774 self.ui.write(self.hunk[rev])
775 775 del self.hunk[rev]
776 776 return 1
777 777 return 0
778 778
779 779 def close(self):
780 780 if self.footer:
781 781 self.ui.write(self.footer)
782 782
783 783 def show(self, ctx, copies=None, matchfn=None, **props):
784 784 if self.buffered:
785 785 self.ui.pushbuffer()
786 786 self._show(ctx, copies, matchfn, props)
787 787 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
788 788 else:
789 789 self._show(ctx, copies, matchfn, props)
790 790
791 791 def _show(self, ctx, copies, matchfn, props):
792 792 '''show a single changeset or file revision'''
793 793 changenode = ctx.node()
794 794 rev = ctx.rev()
795 795
796 796 if self.ui.quiet:
797 797 self.ui.write("%d:%s\n" % (rev, short(changenode)),
798 798 label='log.node')
799 799 return
800 800
801 801 log = self.repo.changelog
802 802 date = util.datestr(ctx.date())
803 803
804 804 hexfunc = self.ui.debugflag and hex or short
805 805
806 806 parents = [(p, hexfunc(log.node(p)))
807 807 for p in self._meaningful_parentrevs(log, rev)]
808 808
809 809 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
810 810 label='log.changeset')
811 811
812 812 branch = ctx.branch()
813 813 # don't show the default branch name
814 814 if branch != 'default':
815 815 self.ui.write(_("branch: %s\n") % branch,
816 816 label='log.branch')
817 817 for bookmark in self.repo.nodebookmarks(changenode):
818 818 self.ui.write(_("bookmark: %s\n") % bookmark,
819 819 label='log.bookmark')
820 820 for tag in self.repo.nodetags(changenode):
821 821 self.ui.write(_("tag: %s\n") % tag,
822 822 label='log.tag')
823 823 for parent in parents:
824 824 self.ui.write(_("parent: %d:%s\n") % parent,
825 825 label='log.parent')
826 826
827 827 if self.ui.debugflag:
828 828 mnode = ctx.manifestnode()
829 829 self.ui.write(_("manifest: %d:%s\n") %
830 830 (self.repo.manifest.rev(mnode), hex(mnode)),
831 831 label='ui.debug log.manifest')
832 832 self.ui.write(_("user: %s\n") % ctx.user(),
833 833 label='log.user')
834 834 self.ui.write(_("date: %s\n") % date,
835 835 label='log.date')
836 836
837 837 if self.ui.debugflag:
838 838 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
839 839 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
840 840 files):
841 841 if value:
842 842 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
843 843 label='ui.debug log.files')
844 844 elif ctx.files() and self.ui.verbose:
845 845 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
846 846 label='ui.note log.files')
847 847 if copies and self.ui.verbose:
848 848 copies = ['%s (%s)' % c for c in copies]
849 849 self.ui.write(_("copies: %s\n") % ' '.join(copies),
850 850 label='ui.note log.copies')
851 851
852 852 extra = ctx.extra()
853 853 if extra and self.ui.debugflag:
854 854 for key, value in sorted(extra.items()):
855 855 self.ui.write(_("extra: %s=%s\n")
856 856 % (key, value.encode('string_escape')),
857 857 label='ui.debug log.extra')
858 858
859 859 description = ctx.description().strip()
860 860 if description:
861 861 if self.ui.verbose:
862 862 self.ui.write(_("description:\n"),
863 863 label='ui.note log.description')
864 864 self.ui.write(description,
865 865 label='ui.note log.description')
866 866 self.ui.write("\n\n")
867 867 else:
868 868 self.ui.write(_("summary: %s\n") %
869 869 description.splitlines()[0],
870 870 label='log.summary')
871 871 self.ui.write("\n")
872 872
873 873 self.showpatch(changenode, matchfn)
874 874
875 875 def showpatch(self, node, matchfn):
876 876 if not matchfn:
877 877 matchfn = self.patch
878 878 if matchfn:
879 879 stat = self.diffopts.get('stat')
880 880 diff = self.diffopts.get('patch')
881 881 diffopts = patch.diffopts(self.ui, self.diffopts)
882 882 prev = self.repo.changelog.parents(node)[0]
883 883 if stat:
884 884 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
885 885 match=matchfn, stat=True)
886 886 if diff:
887 887 if stat:
888 888 self.ui.write("\n")
889 889 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
890 890 match=matchfn, stat=False)
891 891 self.ui.write("\n")
892 892
893 893 def _meaningful_parentrevs(self, log, rev):
894 894 """Return list of meaningful (or all if debug) parentrevs for rev.
895 895
896 896 For merges (two non-nullrev revisions) both parents are meaningful.
897 897 Otherwise the first parent revision is considered meaningful if it
898 898 is not the preceding revision.
899 899 """
900 900 parents = log.parentrevs(rev)
901 901 if not self.ui.debugflag and parents[1] == nullrev:
902 902 if parents[0] >= rev - 1:
903 903 parents = []
904 904 else:
905 905 parents = [parents[0]]
906 906 return parents
907 907
908 908
909 909 class changeset_templater(changeset_printer):
910 910 '''format changeset information.'''
911 911
912 912 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
913 913 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
914 914 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
915 915 defaulttempl = {
916 916 'parent': '{rev}:{node|formatnode} ',
917 917 'manifest': '{rev}:{node|formatnode}',
918 918 'file_copy': '{name} ({source})',
919 919 'extra': '{key}={value|stringescape}'
920 920 }
921 921 # filecopy is preserved for compatibility reasons
922 922 defaulttempl['filecopy'] = defaulttempl['file_copy']
923 923 self.t = templater.templater(mapfile, {'formatnode': formatnode},
924 924 cache=defaulttempl)
925 925 self.cache = {}
926 926
927 927 def use_template(self, t):
928 928 '''set template string to use'''
929 929 self.t.cache['changeset'] = t
930 930
931 931 def _meaningful_parentrevs(self, ctx):
932 932 """Return list of meaningful (or all if debug) parentrevs for rev.
933 933 """
934 934 parents = ctx.parents()
935 935 if len(parents) > 1:
936 936 return parents
937 937 if self.ui.debugflag:
938 938 return [parents[0], self.repo['null']]
939 939 if parents[0].rev() >= ctx.rev() - 1:
940 940 return []
941 941 return parents
942 942
943 943 def _show(self, ctx, copies, matchfn, props):
944 944 '''show a single changeset or file revision'''
945 945
946 946 showlist = templatekw.showlist
947 947
948 948 # showparents() behaviour depends on ui trace level which
949 949 # causes unexpected behaviours at templating level and makes
950 950 # it harder to extract it in a standalone function. Its
951 951 # behaviour cannot be changed so leave it here for now.
952 952 def showparents(**args):
953 953 ctx = args['ctx']
954 954 parents = [[('rev', p.rev()), ('node', p.hex())]
955 955 for p in self._meaningful_parentrevs(ctx)]
956 956 return showlist('parent', parents, **args)
957 957
958 958 props = props.copy()
959 959 props.update(templatekw.keywords)
960 960 props['parents'] = showparents
961 961 props['templ'] = self.t
962 962 props['ctx'] = ctx
963 963 props['repo'] = self.repo
964 964 props['revcache'] = {'copies': copies}
965 965 props['cache'] = self.cache
966 966
967 967 # find correct templates for current mode
968 968
969 969 tmplmodes = [
970 970 (True, None),
971 971 (self.ui.verbose, 'verbose'),
972 972 (self.ui.quiet, 'quiet'),
973 973 (self.ui.debugflag, 'debug'),
974 974 ]
975 975
976 976 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
977 977 for mode, postfix in tmplmodes:
978 978 for type in types:
979 979 cur = postfix and ('%s_%s' % (type, postfix)) or type
980 980 if mode and cur in self.t:
981 981 types[type] = cur
982 982
983 983 try:
984 984
985 985 # write header
986 986 if types['header']:
987 987 h = templater.stringify(self.t(types['header'], **props))
988 988 if self.buffered:
989 989 self.header[ctx.rev()] = h
990 990 else:
991 991 if self.lastheader != h:
992 992 self.lastheader = h
993 993 self.ui.write(h)
994 994
995 995 # write changeset metadata, then patch if requested
996 996 key = types['changeset']
997 997 self.ui.write(templater.stringify(self.t(key, **props)))
998 998 self.showpatch(ctx.node(), matchfn)
999 999
1000 1000 if types['footer']:
1001 1001 if not self.footer:
1002 1002 self.footer = templater.stringify(self.t(types['footer'],
1003 1003 **props))
1004 1004
1005 1005 except KeyError, inst:
1006 1006 msg = _("%s: no key named '%s'")
1007 1007 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
1008 1008 except SyntaxError, inst:
1009 1009 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1010 1010
1011 1011 def show_changeset(ui, repo, opts, buffered=False):
1012 1012 """show one changeset using template or regular display.
1013 1013
1014 1014 Display format will be the first non-empty hit of:
1015 1015 1. option 'template'
1016 1016 2. option 'style'
1017 1017 3. [ui] setting 'logtemplate'
1018 1018 4. [ui] setting 'style'
1019 1019 If all of these values are either the unset or the empty string,
1020 1020 regular display via changeset_printer() is done.
1021 1021 """
1022 1022 # options
1023 1023 patch = False
1024 1024 if opts.get('patch') or opts.get('stat'):
1025 1025 patch = matchall(repo)
1026 1026
1027 1027 tmpl = opts.get('template')
1028 1028 style = None
1029 1029 if tmpl:
1030 1030 tmpl = templater.parsestring(tmpl, quoted=False)
1031 1031 else:
1032 1032 style = opts.get('style')
1033 1033
1034 1034 # ui settings
1035 1035 if not (tmpl or style):
1036 1036 tmpl = ui.config('ui', 'logtemplate')
1037 1037 if tmpl:
1038 1038 tmpl = templater.parsestring(tmpl)
1039 1039 else:
1040 1040 style = util.expandpath(ui.config('ui', 'style', ''))
1041 1041
1042 1042 if not (tmpl or style):
1043 1043 return changeset_printer(ui, repo, patch, opts, buffered)
1044 1044
1045 1045 mapfile = None
1046 1046 if style and not tmpl:
1047 1047 mapfile = style
1048 1048 if not os.path.split(mapfile)[0]:
1049 1049 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1050 1050 or templater.templatepath(mapfile))
1051 1051 if mapname:
1052 1052 mapfile = mapname
1053 1053
1054 1054 try:
1055 1055 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
1056 1056 except SyntaxError, inst:
1057 1057 raise util.Abort(inst.args[0])
1058 1058 if tmpl:
1059 1059 t.use_template(tmpl)
1060 1060 return t
1061 1061
1062 1062 def finddate(ui, repo, date):
1063 1063 """Find the tipmost changeset that matches the given date spec"""
1064 1064
1065 1065 df = util.matchdate(date)
1066 1066 m = matchall(repo)
1067 1067 results = {}
1068 1068
1069 1069 def prep(ctx, fns):
1070 1070 d = ctx.date()
1071 1071 if df(d[0]):
1072 1072 results[ctx.rev()] = d
1073 1073
1074 1074 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1075 1075 rev = ctx.rev()
1076 1076 if rev in results:
1077 1077 ui.status(_("Found revision %s from %s\n") %
1078 1078 (rev, util.datestr(results[rev])))
1079 1079 return str(rev)
1080 1080
1081 1081 raise util.Abort(_("revision matching date not found"))
1082 1082
1083 1083 def walkchangerevs(repo, match, opts, prepare):
1084 1084 '''Iterate over files and the revs in which they changed.
1085 1085
1086 1086 Callers most commonly need to iterate backwards over the history
1087 1087 in which they are interested. Doing so has awful (quadratic-looking)
1088 1088 performance, so we use iterators in a "windowed" way.
1089 1089
1090 1090 We walk a window of revisions in the desired order. Within the
1091 1091 window, we first walk forwards to gather data, then in the desired
1092 1092 order (usually backwards) to display it.
1093 1093
1094 1094 This function returns an iterator yielding contexts. Before
1095 1095 yielding each context, the iterator will first call the prepare
1096 1096 function on each context in the window in forward order.'''
1097 1097
1098 1098 def increasing_windows(start, end, windowsize=8, sizelimit=512):
1099 1099 if start < end:
1100 1100 while start < end:
1101 1101 yield start, min(windowsize, end - start)
1102 1102 start += windowsize
1103 1103 if windowsize < sizelimit:
1104 1104 windowsize *= 2
1105 1105 else:
1106 1106 while start > end:
1107 1107 yield start, min(windowsize, start - end - 1)
1108 1108 start -= windowsize
1109 1109 if windowsize < sizelimit:
1110 1110 windowsize *= 2
1111 1111
1112 1112 follow = opts.get('follow') or opts.get('follow_first')
1113 1113
1114 1114 if not len(repo):
1115 1115 return []
1116 1116
1117 1117 if follow:
1118 1118 defrange = '%s:0' % repo['.'].rev()
1119 1119 else:
1120 1120 defrange = '-1:0'
1121 1121 revs = revrange(repo, opts['rev'] or [defrange])
1122 1122 if not revs:
1123 1123 return []
1124 1124 wanted = set()
1125 1125 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1126 1126 fncache = {}
1127 1127 change = util.cachefunc(repo.changectx)
1128 1128
1129 1129 # First step is to fill wanted, the set of revisions that we want to yield.
1130 1130 # When it does not induce extra cost, we also fill fncache for revisions in
1131 1131 # wanted: a cache of filenames that were changed (ctx.files()) and that
1132 1132 # match the file filtering conditions.
1133 1133
1134 1134 if not slowpath and not match.files():
1135 1135 # No files, no patterns. Display all revs.
1136 1136 wanted = set(revs)
1137 1137 copies = []
1138 1138
1139 1139 if not slowpath:
1140 1140 # We only have to read through the filelog to find wanted revisions
1141 1141
1142 1142 minrev, maxrev = min(revs), max(revs)
1143 1143 def filerevgen(filelog, last):
1144 1144 """
1145 1145 Only files, no patterns. Check the history of each file.
1146 1146
1147 1147 Examines filelog entries within minrev, maxrev linkrev range
1148 1148 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1149 1149 tuples in backwards order
1150 1150 """
1151 1151 cl_count = len(repo)
1152 1152 revs = []
1153 1153 for j in xrange(0, last + 1):
1154 1154 linkrev = filelog.linkrev(j)
1155 1155 if linkrev < minrev:
1156 1156 continue
1157 1157 # only yield rev for which we have the changelog, it can
1158 1158 # happen while doing "hg log" during a pull or commit
1159 1159 if linkrev >= cl_count:
1160 1160 break
1161 1161
1162 1162 parentlinkrevs = []
1163 1163 for p in filelog.parentrevs(j):
1164 1164 if p != nullrev:
1165 1165 parentlinkrevs.append(filelog.linkrev(p))
1166 1166 n = filelog.node(j)
1167 1167 revs.append((linkrev, parentlinkrevs,
1168 1168 follow and filelog.renamed(n)))
1169 1169
1170 1170 return reversed(revs)
1171 1171 def iterfiles():
1172 1172 for filename in match.files():
1173 1173 yield filename, None
1174 1174 for filename_node in copies:
1175 1175 yield filename_node
1176 1176 for file_, node in iterfiles():
1177 1177 filelog = repo.file(file_)
1178 1178 if not len(filelog):
1179 1179 if node is None:
1180 1180 # A zero count may be a directory or deleted file, so
1181 1181 # try to find matching entries on the slow path.
1182 1182 if follow:
1183 1183 raise util.Abort(
1184 1184 _('cannot follow nonexistent file: "%s"') % file_)
1185 1185 slowpath = True
1186 1186 break
1187 1187 else:
1188 1188 continue
1189 1189
1190 1190 if node is None:
1191 1191 last = len(filelog) - 1
1192 1192 else:
1193 1193 last = filelog.rev(node)
1194 1194
1195 1195
1196 1196 # keep track of all ancestors of the file
1197 1197 ancestors = set([filelog.linkrev(last)])
1198 1198
1199 1199 # iterate from latest to oldest revision
1200 1200 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1201 1201 if not follow:
1202 1202 if rev > maxrev:
1203 1203 continue
1204 1204 else:
1205 1205 # Note that last might not be the first interesting
1206 1206 # rev to us:
1207 1207 # if the file has been changed after maxrev, we'll
1208 1208 # have linkrev(last) > maxrev, and we still need
1209 1209 # to explore the file graph
1210 1210 if rev not in ancestors:
1211 1211 continue
1212 1212 # XXX insert 1327 fix here
1213 1213 if flparentlinkrevs:
1214 1214 ancestors.update(flparentlinkrevs)
1215 1215
1216 1216 fncache.setdefault(rev, []).append(file_)
1217 1217 wanted.add(rev)
1218 1218 if copied:
1219 1219 copies.append(copied)
1220 1220 if slowpath:
1221 1221 # We have to read the changelog to match filenames against
1222 1222 # changed files
1223 1223
1224 1224 if follow:
1225 1225 raise util.Abort(_('can only follow copies/renames for explicit '
1226 1226 'filenames'))
1227 1227
1228 1228 # The slow path checks files modified in every changeset.
1229 1229 for i in sorted(revs):
1230 1230 ctx = change(i)
1231 1231 matches = filter(match, ctx.files())
1232 1232 if matches:
1233 1233 fncache[i] = matches
1234 1234 wanted.add(i)
1235 1235
1236 1236 class followfilter(object):
1237 1237 def __init__(self, onlyfirst=False):
1238 1238 self.startrev = nullrev
1239 1239 self.roots = set()
1240 1240 self.onlyfirst = onlyfirst
1241 1241
1242 1242 def match(self, rev):
1243 1243 def realparents(rev):
1244 1244 if self.onlyfirst:
1245 1245 return repo.changelog.parentrevs(rev)[0:1]
1246 1246 else:
1247 1247 return filter(lambda x: x != nullrev,
1248 1248 repo.changelog.parentrevs(rev))
1249 1249
1250 1250 if self.startrev == nullrev:
1251 1251 self.startrev = rev
1252 1252 return True
1253 1253
1254 1254 if rev > self.startrev:
1255 1255 # forward: all descendants
1256 1256 if not self.roots:
1257 1257 self.roots.add(self.startrev)
1258 1258 for parent in realparents(rev):
1259 1259 if parent in self.roots:
1260 1260 self.roots.add(rev)
1261 1261 return True
1262 1262 else:
1263 1263 # backwards: all parents
1264 1264 if not self.roots:
1265 1265 self.roots.update(realparents(self.startrev))
1266 1266 if rev in self.roots:
1267 1267 self.roots.remove(rev)
1268 1268 self.roots.update(realparents(rev))
1269 1269 return True
1270 1270
1271 1271 return False
1272 1272
1273 1273 # it might be worthwhile to do this in the iterator if the rev range
1274 1274 # is descending and the prune args are all within that range
1275 1275 for rev in opts.get('prune', ()):
1276 1276 rev = repo.changelog.rev(repo.lookup(rev))
1277 1277 ff = followfilter()
1278 1278 stop = min(revs[0], revs[-1])
1279 1279 for x in xrange(rev, stop - 1, -1):
1280 1280 if ff.match(x):
1281 1281 wanted.discard(x)
1282 1282
1283 1283 # Now that wanted is correctly initialized, we can iterate over the
1284 1284 # revision range, yielding only revisions in wanted.
1285 1285 def iterate():
1286 1286 if follow and not match.files():
1287 1287 ff = followfilter(onlyfirst=opts.get('follow_first'))
1288 1288 def want(rev):
1289 1289 return ff.match(rev) and rev in wanted
1290 1290 else:
1291 1291 def want(rev):
1292 1292 return rev in wanted
1293 1293
1294 1294 for i, window in increasing_windows(0, len(revs)):
1295 1295 nrevs = [rev for rev in revs[i:i + window] if want(rev)]
1296 1296 for rev in sorted(nrevs):
1297 1297 fns = fncache.get(rev)
1298 1298 ctx = change(rev)
1299 1299 if not fns:
1300 1300 def fns_generator():
1301 1301 for f in ctx.files():
1302 1302 if match(f):
1303 1303 yield f
1304 1304 fns = fns_generator()
1305 1305 prepare(ctx, fns)
1306 1306 for rev in nrevs:
1307 1307 yield change(rev)
1308 1308 return iterate()
1309 1309
1310 1310 def add(ui, repo, match, dryrun, listsubrepos, prefix):
1311 1311 join = lambda f: os.path.join(prefix, f)
1312 1312 bad = []
1313 1313 oldbad = match.bad
1314 1314 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1315 1315 names = []
1316 1316 wctx = repo[None]
1317 1317 cca = None
1318 1318 abort, warn = scmutil.checkportabilityalert(ui)
1319 1319 if abort or warn:
1320 1320 cca = scmutil.casecollisionauditor(ui, abort, wctx)
1321 1321 for f in repo.walk(match):
1322 1322 exact = match.exact(f)
1323 1323 if exact or f not in repo.dirstate:
1324 1324 if cca:
1325 1325 cca(f)
1326 1326 names.append(f)
1327 1327 if ui.verbose or not exact:
1328 1328 ui.status(_('adding %s\n') % match.rel(join(f)))
1329 1329
1330 1330 if listsubrepos:
1331 1331 for subpath in wctx.substate:
1332 1332 sub = wctx.sub(subpath)
1333 1333 try:
1334 1334 submatch = matchmod.narrowmatcher(subpath, match)
1335 1335 bad.extend(sub.add(ui, submatch, dryrun, prefix))
1336 1336 except error.LookupError:
1337 1337 ui.status(_("skipping missing subrepository: %s\n")
1338 1338 % join(subpath))
1339 1339
1340 1340 if not dryrun:
1341 1341 rejected = wctx.add(names, prefix)
1342 1342 bad.extend(f for f in rejected if f in match.files())
1343 1343 return bad
1344 1344
1345 1345 def commit(ui, repo, commitfunc, pats, opts):
1346 1346 '''commit the specified files or all outstanding changes'''
1347 1347 date = opts.get('date')
1348 1348 if date:
1349 1349 opts['date'] = util.parsedate(date)
1350 1350 message = logmessage(opts)
1351 1351
1352 1352 # extract addremove carefully -- this function can be called from a command
1353 1353 # that doesn't support addremove
1354 1354 if opts.get('addremove'):
1355 1355 addremove(repo, pats, opts)
1356 1356
1357 1357 return commitfunc(ui, repo, message, match(repo, pats, opts), opts)
1358 1358
1359 1359 def commiteditor(repo, ctx, subs):
1360 1360 if ctx.description():
1361 1361 return ctx.description()
1362 1362 return commitforceeditor(repo, ctx, subs)
1363 1363
1364 1364 def commitforceeditor(repo, ctx, subs):
1365 1365 edittext = []
1366 1366 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1367 1367 if ctx.description():
1368 1368 edittext.append(ctx.description())
1369 1369 edittext.append("")
1370 1370 edittext.append("") # Empty line between message and comments.
1371 1371 edittext.append(_("HG: Enter commit message."
1372 1372 " Lines beginning with 'HG:' are removed."))
1373 1373 edittext.append(_("HG: Leave message empty to abort commit."))
1374 1374 edittext.append("HG: --")
1375 1375 edittext.append(_("HG: user: %s") % ctx.user())
1376 1376 if ctx.p2():
1377 1377 edittext.append(_("HG: branch merge"))
1378 1378 if ctx.branch():
1379 1379 edittext.append(_("HG: branch '%s'") % ctx.branch())
1380 1380 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1381 1381 edittext.extend([_("HG: added %s") % f for f in added])
1382 1382 edittext.extend([_("HG: changed %s") % f for f in modified])
1383 1383 edittext.extend([_("HG: removed %s") % f for f in removed])
1384 1384 if not added and not modified and not removed:
1385 1385 edittext.append(_("HG: no files changed"))
1386 1386 edittext.append("")
1387 1387 # run editor in the repository root
1388 1388 olddir = os.getcwd()
1389 1389 os.chdir(repo.root)
1390 1390 text = repo.ui.edit("\n".join(edittext), ctx.user())
1391 1391 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
1392 1392 os.chdir(olddir)
1393 1393
1394 1394 if not text.strip():
1395 1395 raise util.Abort(_("empty commit message"))
1396 1396
1397 1397 return text
@@ -1,1957 +1,1957 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20
21 21 class localrepository(repo.repository):
22 22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 23 'known', 'getbundle'))
24 24 supportedformats = set(('revlogv1',))
25 25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 26 'dotencode'))
27 27
28 28 def __init__(self, baseui, path=None, create=0):
29 29 repo.repository.__init__(self)
30 30 self.root = os.path.realpath(util.expandpath(path))
31 31 self.path = os.path.join(self.root, ".hg")
32 32 self.origroot = path
33 33 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 34 self.opener = scmutil.opener(self.path)
35 35 self.wopener = scmutil.opener(self.root)
36 36 self.baseui = baseui
37 37 self.ui = baseui.copy()
38 38
39 39 try:
40 40 self.ui.readconfig(self.join("hgrc"), self.root)
41 41 extensions.loadall(self.ui)
42 42 except IOError:
43 43 pass
44 44
45 45 if not os.path.isdir(self.path):
46 46 if create:
47 47 if not os.path.exists(path):
48 48 util.makedirs(path)
49 49 util.makedir(self.path, notindexed=True)
50 50 requirements = ["revlogv1"]
51 51 if self.ui.configbool('format', 'usestore', True):
52 52 os.mkdir(os.path.join(self.path, "store"))
53 53 requirements.append("store")
54 54 if self.ui.configbool('format', 'usefncache', True):
55 55 requirements.append("fncache")
56 56 if self.ui.configbool('format', 'dotencode', True):
57 57 requirements.append('dotencode')
58 58 # create an invalid changelog
59 59 self.opener.append(
60 60 "00changelog.i",
61 61 '\0\0\0\2' # represents revlogv2
62 62 ' dummy changelog to prevent using the old repo layout'
63 63 )
64 64 else:
65 65 raise error.RepoError(_("repository %s not found") % path)
66 66 elif create:
67 67 raise error.RepoError(_("repository %s already exists") % path)
68 68 else:
69 69 # find requirements
70 70 requirements = set()
71 71 try:
72 72 requirements = set(self.opener.read("requires").splitlines())
73 73 except IOError, inst:
74 74 if inst.errno != errno.ENOENT:
75 75 raise
76 76 for r in requirements - self.supported:
77 77 raise error.RequirementError(
78 78 _("requirement '%s' not supported") % r)
79 79
80 80 self.sharedpath = self.path
81 81 try:
82 82 s = os.path.realpath(self.opener.read("sharedpath"))
83 83 if not os.path.exists(s):
84 84 raise error.RepoError(
85 85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 86 self.sharedpath = s
87 87 except IOError, inst:
88 88 if inst.errno != errno.ENOENT:
89 89 raise
90 90
91 91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 92 self.spath = self.store.path
93 93 self.sopener = self.store.opener
94 94 self.sjoin = self.store.join
95 95 self.opener.createmode = self.store.createmode
96 96 self._applyrequirements(requirements)
97 97 if create:
98 98 self._writerequirements()
99 99
100 100 # These two define the set of tags for this repository. _tags
101 101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 102 # 'local'. (Global tags are defined by .hgtags across all
103 103 # heads, and local tags are defined in .hg/localtags.) They
104 104 # constitute the in-memory cache of tags.
105 105 self._tags = None
106 106 self._tagtypes = None
107 107
108 108 self._branchcache = None
109 109 self._branchcachetip = None
110 110 self.nodetagscache = None
111 111 self.filterpats = {}
112 112 self._datafilters = {}
113 113 self._transref = self._lockref = self._wlockref = None
114 114
115 115 def _applyrequirements(self, requirements):
116 116 self.requirements = requirements
117 117 self.sopener.options = {}
118 118
119 119 def _writerequirements(self):
120 120 reqfile = self.opener("requires", "w")
121 121 for r in self.requirements:
122 122 reqfile.write("%s\n" % r)
123 123 reqfile.close()
124 124
125 125 def _checknested(self, path):
126 126 """Determine if path is a legal nested repository."""
127 127 if not path.startswith(self.root):
128 128 return False
129 129 subpath = path[len(self.root) + 1:]
130 130
131 131 # XXX: Checking against the current working copy is wrong in
132 132 # the sense that it can reject things like
133 133 #
134 134 # $ hg cat -r 10 sub/x.txt
135 135 #
136 136 # if sub/ is no longer a subrepository in the working copy
137 137 # parent revision.
138 138 #
139 139 # However, it can of course also allow things that would have
140 140 # been rejected before, such as the above cat command if sub/
141 141 # is a subrepository now, but was a normal directory before.
142 142 # The old path auditor would have rejected by mistake since it
143 143 # panics when it sees sub/.hg/.
144 144 #
145 145 # All in all, checking against the working copy seems sensible
146 146 # since we want to prevent access to nested repositories on
147 147 # the filesystem *now*.
148 148 ctx = self[None]
149 149 parts = util.splitpath(subpath)
150 150 while parts:
151 151 prefix = os.sep.join(parts)
152 152 if prefix in ctx.substate:
153 153 if prefix == subpath:
154 154 return True
155 155 else:
156 156 sub = ctx.sub(prefix)
157 157 return sub.checknested(subpath[len(prefix) + 1:])
158 158 else:
159 159 parts.pop()
160 160 return False
161 161
162 162 @util.propertycache
163 163 def _bookmarks(self):
164 164 return bookmarks.read(self)
165 165
166 166 @util.propertycache
167 167 def _bookmarkcurrent(self):
168 168 return bookmarks.readcurrent(self)
169 169
170 170 @propertycache
171 171 def changelog(self):
172 172 c = changelog.changelog(self.sopener)
173 173 if 'HG_PENDING' in os.environ:
174 174 p = os.environ['HG_PENDING']
175 175 if p.startswith(self.root):
176 176 c.readpending('00changelog.i.a')
177 177 self.sopener.options['defversion'] = c.version
178 178 return c
179 179
180 180 @propertycache
181 181 def manifest(self):
182 182 return manifest.manifest(self.sopener)
183 183
184 184 @propertycache
185 185 def dirstate(self):
186 186 warned = [0]
187 187 def validate(node):
188 188 try:
189 189 self.changelog.rev(node)
190 190 return node
191 191 except error.LookupError:
192 192 if not warned[0]:
193 193 warned[0] = True
194 194 self.ui.warn(_("warning: ignoring unknown"
195 195 " working parent %s!\n") % short(node))
196 196 return nullid
197 197
198 198 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
199 199
200 200 def __getitem__(self, changeid):
201 201 if changeid is None:
202 202 return context.workingctx(self)
203 203 return context.changectx(self, changeid)
204 204
205 205 def __contains__(self, changeid):
206 206 try:
207 207 return bool(self.lookup(changeid))
208 208 except error.RepoLookupError:
209 209 return False
210 210
211 211 def __nonzero__(self):
212 212 return True
213 213
214 214 def __len__(self):
215 215 return len(self.changelog)
216 216
217 217 def __iter__(self):
218 218 for i in xrange(len(self)):
219 219 yield i
220 220
221 221 def url(self):
222 222 return 'file:' + self.root
223 223
224 224 def hook(self, name, throw=False, **args):
225 225 return hook.hook(self.ui, self, name, throw, **args)
226 226
227 227 tag_disallowed = ':\r\n'
228 228
229 229 def _tag(self, names, node, message, local, user, date, extra={}):
230 230 if isinstance(names, str):
231 231 allchars = names
232 232 names = (names,)
233 233 else:
234 234 allchars = ''.join(names)
235 235 for c in self.tag_disallowed:
236 236 if c in allchars:
237 237 raise util.Abort(_('%r cannot be used in a tag name') % c)
238 238
239 239 branches = self.branchmap()
240 240 for name in names:
241 241 self.hook('pretag', throw=True, node=hex(node), tag=name,
242 242 local=local)
243 243 if name in branches:
244 244 self.ui.warn(_("warning: tag %s conflicts with existing"
245 245 " branch name\n") % name)
246 246
247 247 def writetags(fp, names, munge, prevtags):
248 248 fp.seek(0, 2)
249 249 if prevtags and prevtags[-1] != '\n':
250 250 fp.write('\n')
251 251 for name in names:
252 252 m = munge and munge(name) or name
253 253 if self._tagtypes and name in self._tagtypes:
254 254 old = self._tags.get(name, nullid)
255 255 fp.write('%s %s\n' % (hex(old), m))
256 256 fp.write('%s %s\n' % (hex(node), m))
257 257 fp.close()
258 258
259 259 prevtags = ''
260 260 if local:
261 261 try:
262 262 fp = self.opener('localtags', 'r+')
263 263 except IOError:
264 264 fp = self.opener('localtags', 'a')
265 265 else:
266 266 prevtags = fp.read()
267 267
268 268 # local tags are stored in the current charset
269 269 writetags(fp, names, None, prevtags)
270 270 for name in names:
271 271 self.hook('tag', node=hex(node), tag=name, local=local)
272 272 return
273 273
274 274 try:
275 275 fp = self.wfile('.hgtags', 'rb+')
276 276 except IOError:
277 277 fp = self.wfile('.hgtags', 'ab')
278 278 else:
279 279 prevtags = fp.read()
280 280
281 281 # committed tags are stored in UTF-8
282 282 writetags(fp, names, encoding.fromlocal, prevtags)
283 283
284 284 fp.close()
285 285
286 286 if '.hgtags' not in self.dirstate:
287 287 self[None].add(['.hgtags'])
288 288
289 289 m = matchmod.exact(self.root, '', ['.hgtags'])
290 290 tagnode = self.commit(message, user, date, extra=extra, match=m)
291 291
292 292 for name in names:
293 293 self.hook('tag', node=hex(node), tag=name, local=local)
294 294
295 295 return tagnode
296 296
297 297 def tag(self, names, node, message, local, user, date):
298 298 '''tag a revision with one or more symbolic names.
299 299
300 300 names is a list of strings or, when adding a single tag, names may be a
301 301 string.
302 302
303 303 if local is True, the tags are stored in a per-repository file.
304 304 otherwise, they are stored in the .hgtags file, and a new
305 305 changeset is committed with the change.
306 306
307 307 keyword arguments:
308 308
309 309 local: whether to store tags in non-version-controlled file
310 310 (default False)
311 311
312 312 message: commit message to use if committing
313 313
314 314 user: name of user to use if committing
315 315
316 316 date: date tuple to use if committing'''
317 317
318 318 if not local:
319 319 for x in self.status()[:5]:
320 320 if '.hgtags' in x:
321 321 raise util.Abort(_('working copy of .hgtags is changed '
322 322 '(please commit .hgtags manually)'))
323 323
324 324 self.tags() # instantiate the cache
325 325 self._tag(names, node, message, local, user, date)
326 326
327 327 def tags(self):
328 328 '''return a mapping of tag to node'''
329 329 if self._tags is None:
330 330 (self._tags, self._tagtypes) = self._findtags()
331 331
332 332 return self._tags
333 333
334 334 def _findtags(self):
335 335 '''Do the hard work of finding tags. Return a pair of dicts
336 336 (tags, tagtypes) where tags maps tag name to node, and tagtypes
337 337 maps tag name to a string like \'global\' or \'local\'.
338 338 Subclasses or extensions are free to add their own tags, but
339 339 should be aware that the returned dicts will be retained for the
340 340 duration of the localrepo object.'''
341 341
342 342 # XXX what tagtype should subclasses/extensions use? Currently
343 343 # mq and bookmarks add tags, but do not set the tagtype at all.
344 344 # Should each extension invent its own tag type? Should there
345 345 # be one tagtype for all such "virtual" tags? Or is the status
346 346 # quo fine?
347 347
348 348 alltags = {} # map tag name to (node, hist)
349 349 tagtypes = {}
350 350
351 351 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
352 352 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
353 353
354 354 # Build the return dicts. Have to re-encode tag names because
355 355 # the tags module always uses UTF-8 (in order not to lose info
356 356 # writing to the cache), but the rest of Mercurial wants them in
357 357 # local encoding.
358 358 tags = {}
359 359 for (name, (node, hist)) in alltags.iteritems():
360 360 if node != nullid:
361 361 try:
362 362 # ignore tags to unknown nodes
363 363 self.changelog.lookup(node)
364 364 tags[encoding.tolocal(name)] = node
365 365 except error.LookupError:
366 366 pass
367 367 tags['tip'] = self.changelog.tip()
368 368 tagtypes = dict([(encoding.tolocal(name), value)
369 369 for (name, value) in tagtypes.iteritems()])
370 370 return (tags, tagtypes)
371 371
372 372 def tagtype(self, tagname):
373 373 '''
374 374 return the type of the given tag. result can be:
375 375
376 376 'local' : a local tag
377 377 'global' : a global tag
378 378 None : tag does not exist
379 379 '''
380 380
381 381 self.tags()
382 382
383 383 return self._tagtypes.get(tagname)
384 384
385 385 def tagslist(self):
386 386 '''return a list of tags ordered by revision'''
387 387 l = []
388 388 for t, n in self.tags().iteritems():
389 389 r = self.changelog.rev(n)
390 390 l.append((r, t, n))
391 391 return [(t, n) for r, t, n in sorted(l)]
392 392
393 393 def nodetags(self, node):
394 394 '''return the tags associated with a node'''
395 395 if not self.nodetagscache:
396 396 self.nodetagscache = {}
397 397 for t, n in self.tags().iteritems():
398 398 self.nodetagscache.setdefault(n, []).append(t)
399 399 for tags in self.nodetagscache.itervalues():
400 400 tags.sort()
401 401 return self.nodetagscache.get(node, [])
402 402
403 403 def nodebookmarks(self, node):
404 404 marks = []
405 405 for bookmark, n in self._bookmarks.iteritems():
406 406 if n == node:
407 407 marks.append(bookmark)
408 408 return sorted(marks)
409 409
410 410 def _branchtags(self, partial, lrev):
411 411 # TODO: rename this function?
412 412 tiprev = len(self) - 1
413 413 if lrev != tiprev:
414 414 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
415 415 self._updatebranchcache(partial, ctxgen)
416 416 self._writebranchcache(partial, self.changelog.tip(), tiprev)
417 417
418 418 return partial
419 419
420 420 def updatebranchcache(self):
421 421 tip = self.changelog.tip()
422 422 if self._branchcache is not None and self._branchcachetip == tip:
423 423 return self._branchcache
424 424
425 425 oldtip = self._branchcachetip
426 426 self._branchcachetip = tip
427 427 if oldtip is None or oldtip not in self.changelog.nodemap:
428 428 partial, last, lrev = self._readbranchcache()
429 429 else:
430 430 lrev = self.changelog.rev(oldtip)
431 431 partial = self._branchcache
432 432
433 433 self._branchtags(partial, lrev)
434 434 # this private cache holds all heads (not just tips)
435 435 self._branchcache = partial
436 436
437 437 def branchmap(self):
438 438 '''returns a dictionary {branch: [branchheads]}'''
439 439 self.updatebranchcache()
440 440 return self._branchcache
441 441
442 442 def branchtags(self):
443 443 '''return a dict where branch names map to the tipmost head of
444 444 the branch, open heads come before closed'''
445 445 bt = {}
446 446 for bn, heads in self.branchmap().iteritems():
447 447 tip = heads[-1]
448 448 for h in reversed(heads):
449 449 if 'close' not in self.changelog.read(h)[5]:
450 450 tip = h
451 451 break
452 452 bt[bn] = tip
453 453 return bt
454 454
455 455 def _readbranchcache(self):
456 456 partial = {}
457 457 try:
458 458 f = self.opener("cache/branchheads")
459 459 lines = f.read().split('\n')
460 460 f.close()
461 461 except (IOError, OSError):
462 462 return {}, nullid, nullrev
463 463
464 464 try:
465 465 last, lrev = lines.pop(0).split(" ", 1)
466 466 last, lrev = bin(last), int(lrev)
467 467 if lrev >= len(self) or self[lrev].node() != last:
468 468 # invalidate the cache
469 469 raise ValueError('invalidating branch cache (tip differs)')
470 470 for l in lines:
471 471 if not l:
472 472 continue
473 473 node, label = l.split(" ", 1)
474 474 label = encoding.tolocal(label.strip())
475 475 partial.setdefault(label, []).append(bin(node))
476 476 except KeyboardInterrupt:
477 477 raise
478 478 except Exception, inst:
479 479 if self.ui.debugflag:
480 480 self.ui.warn(str(inst), '\n')
481 481 partial, last, lrev = {}, nullid, nullrev
482 482 return partial, last, lrev
483 483
484 484 def _writebranchcache(self, branches, tip, tiprev):
485 485 try:
486 486 f = self.opener("cache/branchheads", "w", atomictemp=True)
487 487 f.write("%s %s\n" % (hex(tip), tiprev))
488 488 for label, nodes in branches.iteritems():
489 489 for node in nodes:
490 490 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
491 491 f.rename()
492 492 except (IOError, OSError):
493 493 pass
494 494
495 495 def _updatebranchcache(self, partial, ctxgen):
496 496 # collect new branch entries
497 497 newbranches = {}
498 498 for c in ctxgen:
499 499 newbranches.setdefault(c.branch(), []).append(c.node())
500 500 # if older branchheads are reachable from new ones, they aren't
501 501 # really branchheads. Note checking parents is insufficient:
502 502 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
503 503 for branch, newnodes in newbranches.iteritems():
504 504 bheads = partial.setdefault(branch, [])
505 505 bheads.extend(newnodes)
506 506 if len(bheads) <= 1:
507 507 continue
508 508 bheads = sorted(bheads, key=lambda x: self[x].rev())
509 509 # starting from tip means fewer passes over reachable
510 510 while newnodes:
511 511 latest = newnodes.pop()
512 512 if latest not in bheads:
513 513 continue
514 514 minbhrev = self[bheads[0]].node()
515 515 reachable = self.changelog.reachable(latest, minbhrev)
516 516 reachable.remove(latest)
517 517 if reachable:
518 518 bheads = [b for b in bheads if b not in reachable]
519 519 partial[branch] = bheads
520 520
521 521 def lookup(self, key):
522 522 if isinstance(key, int):
523 523 return self.changelog.node(key)
524 524 elif key == '.':
525 525 return self.dirstate.p1()
526 526 elif key == 'null':
527 527 return nullid
528 528 elif key == 'tip':
529 529 return self.changelog.tip()
530 530 n = self.changelog._match(key)
531 531 if n:
532 532 return n
533 533 if key in self._bookmarks:
534 534 return self._bookmarks[key]
535 535 if key in self.tags():
536 536 return self.tags()[key]
537 537 if key in self.branchtags():
538 538 return self.branchtags()[key]
539 539 n = self.changelog._partialmatch(key)
540 540 if n:
541 541 return n
542 542
543 543 # can't find key, check if it might have come from damaged dirstate
544 544 if key in self.dirstate.parents():
545 545 raise error.Abort(_("working directory has unknown parent '%s'!")
546 546 % short(key))
547 547 try:
548 548 if len(key) == 20:
549 549 key = hex(key)
550 550 except TypeError:
551 551 pass
552 552 raise error.RepoLookupError(_("unknown revision '%s'") % key)
553 553
554 554 def lookupbranch(self, key, remote=None):
555 555 repo = remote or self
556 556 if key in repo.branchmap():
557 557 return key
558 558
559 559 repo = (remote and remote.local()) and remote or self
560 560 return repo[key].branch()
561 561
562 562 def known(self, nodes):
563 563 nm = self.changelog.nodemap
564 564 return [(n in nm) for n in nodes]
565 565
566 566 def local(self):
567 567 return True
568 568
569 569 def join(self, f):
570 570 return os.path.join(self.path, f)
571 571
572 572 def wjoin(self, f):
573 573 return os.path.join(self.root, f)
574 574
575 575 def file(self, f):
576 576 if f[0] == '/':
577 577 f = f[1:]
578 578 return filelog.filelog(self.sopener, f)
579 579
580 580 def changectx(self, changeid):
581 581 return self[changeid]
582 582
583 583 def parents(self, changeid=None):
584 584 '''get list of changectxs for parents of changeid'''
585 585 return self[changeid].parents()
586 586
587 587 def filectx(self, path, changeid=None, fileid=None):
588 588 """changeid can be a changeset revision, node, or tag.
589 589 fileid can be a file revision or node."""
590 590 return context.filectx(self, path, changeid, fileid)
591 591
592 592 def getcwd(self):
593 593 return self.dirstate.getcwd()
594 594
595 595 def pathto(self, f, cwd=None):
596 596 return self.dirstate.pathto(f, cwd)
597 597
598 598 def wfile(self, f, mode='r'):
599 599 return self.wopener(f, mode)
600 600
601 601 def _link(self, f):
602 602 return os.path.islink(self.wjoin(f))
603 603
604 604 def _loadfilter(self, filter):
605 605 if filter not in self.filterpats:
606 606 l = []
607 607 for pat, cmd in self.ui.configitems(filter):
608 608 if cmd == '!':
609 609 continue
610 610 mf = matchmod.match(self.root, '', [pat])
611 611 fn = None
612 612 params = cmd
613 613 for name, filterfn in self._datafilters.iteritems():
614 614 if cmd.startswith(name):
615 615 fn = filterfn
616 616 params = cmd[len(name):].lstrip()
617 617 break
618 618 if not fn:
619 619 fn = lambda s, c, **kwargs: util.filter(s, c)
620 620 # Wrap old filters not supporting keyword arguments
621 621 if not inspect.getargspec(fn)[2]:
622 622 oldfn = fn
623 623 fn = lambda s, c, **kwargs: oldfn(s, c)
624 624 l.append((mf, fn, params))
625 625 self.filterpats[filter] = l
626 626 return self.filterpats[filter]
627 627
628 628 def _filter(self, filterpats, filename, data):
629 629 for mf, fn, cmd in filterpats:
630 630 if mf(filename):
631 631 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
632 632 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
633 633 break
634 634
635 635 return data
636 636
637 637 @propertycache
638 638 def _encodefilterpats(self):
639 639 return self._loadfilter('encode')
640 640
641 641 @propertycache
642 642 def _decodefilterpats(self):
643 643 return self._loadfilter('decode')
644 644
645 645 def adddatafilter(self, name, filter):
646 646 self._datafilters[name] = filter
647 647
648 648 def wread(self, filename):
649 649 if self._link(filename):
650 650 data = os.readlink(self.wjoin(filename))
651 651 else:
652 652 data = self.wopener.read(filename)
653 653 return self._filter(self._encodefilterpats, filename, data)
654 654
655 655 def wwrite(self, filename, data, flags):
656 656 data = self._filter(self._decodefilterpats, filename, data)
657 657 if 'l' in flags:
658 658 self.wopener.symlink(data, filename)
659 659 else:
660 660 self.wopener.write(filename, data)
661 661 if 'x' in flags:
662 util.set_flags(self.wjoin(filename), False, True)
662 util.setflags(self.wjoin(filename), False, True)
663 663
664 664 def wwritedata(self, filename, data):
665 665 return self._filter(self._decodefilterpats, filename, data)
666 666
667 667 def transaction(self, desc):
668 668 tr = self._transref and self._transref() or None
669 669 if tr and tr.running():
670 670 return tr.nest()
671 671
672 672 # abort here if the journal already exists
673 673 if os.path.exists(self.sjoin("journal")):
674 674 raise error.RepoError(
675 675 _("abandoned transaction found - run hg recover"))
676 676
677 677 # save dirstate for rollback
678 678 try:
679 679 ds = self.opener.read("dirstate")
680 680 except IOError:
681 681 ds = ""
682 682 self.opener.write("journal.dirstate", ds)
683 683 self.opener.write("journal.branch",
684 684 encoding.fromlocal(self.dirstate.branch()))
685 685 self.opener.write("journal.desc",
686 686 "%d\n%s\n" % (len(self), desc))
687 687
688 688 renames = [(self.sjoin("journal"), self.sjoin("undo")),
689 689 (self.join("journal.dirstate"), self.join("undo.dirstate")),
690 690 (self.join("journal.branch"), self.join("undo.branch")),
691 691 (self.join("journal.desc"), self.join("undo.desc"))]
692 692 tr = transaction.transaction(self.ui.warn, self.sopener,
693 693 self.sjoin("journal"),
694 694 aftertrans(renames),
695 695 self.store.createmode)
696 696 self._transref = weakref.ref(tr)
697 697 return tr
698 698
699 699 def recover(self):
700 700 lock = self.lock()
701 701 try:
702 702 if os.path.exists(self.sjoin("journal")):
703 703 self.ui.status(_("rolling back interrupted transaction\n"))
704 704 transaction.rollback(self.sopener, self.sjoin("journal"),
705 705 self.ui.warn)
706 706 self.invalidate()
707 707 return True
708 708 else:
709 709 self.ui.warn(_("no interrupted transaction available\n"))
710 710 return False
711 711 finally:
712 712 lock.release()
713 713
714 714 def rollback(self, dryrun=False):
715 715 wlock = lock = None
716 716 try:
717 717 wlock = self.wlock()
718 718 lock = self.lock()
719 719 if os.path.exists(self.sjoin("undo")):
720 720 try:
721 721 args = self.opener.read("undo.desc").splitlines()
722 722 if len(args) >= 3 and self.ui.verbose:
723 723 desc = _("repository tip rolled back to revision %s"
724 724 " (undo %s: %s)\n") % (
725 725 int(args[0]) - 1, args[1], args[2])
726 726 elif len(args) >= 2:
727 727 desc = _("repository tip rolled back to revision %s"
728 728 " (undo %s)\n") % (
729 729 int(args[0]) - 1, args[1])
730 730 except IOError:
731 731 desc = _("rolling back unknown transaction\n")
732 732 self.ui.status(desc)
733 733 if dryrun:
734 734 return
735 735 transaction.rollback(self.sopener, self.sjoin("undo"),
736 736 self.ui.warn)
737 737 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
738 738 if os.path.exists(self.join('undo.bookmarks')):
739 739 util.rename(self.join('undo.bookmarks'),
740 740 self.join('bookmarks'))
741 741 try:
742 742 branch = self.opener.read("undo.branch")
743 743 self.dirstate.setbranch(branch)
744 744 except IOError:
745 745 self.ui.warn(_("named branch could not be reset, "
746 746 "current branch is still: %s\n")
747 747 % self.dirstate.branch())
748 748 self.invalidate()
749 749 self.dirstate.invalidate()
750 750 self.destroyed()
751 751 parents = tuple([p.rev() for p in self.parents()])
752 752 if len(parents) > 1:
753 753 self.ui.status(_("working directory now based on "
754 754 "revisions %d and %d\n") % parents)
755 755 else:
756 756 self.ui.status(_("working directory now based on "
757 757 "revision %d\n") % parents)
758 758 else:
759 759 self.ui.warn(_("no rollback information available\n"))
760 760 return 1
761 761 finally:
762 762 release(lock, wlock)
763 763
764 764 def invalidatecaches(self):
765 765 self._tags = None
766 766 self._tagtypes = None
767 767 self.nodetagscache = None
768 768 self._branchcache = None # in UTF-8
769 769 self._branchcachetip = None
770 770
771 771 def invalidate(self):
772 772 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
773 773 if a in self.__dict__:
774 774 delattr(self, a)
775 775 self.invalidatecaches()
776 776
777 777 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
778 778 try:
779 779 l = lock.lock(lockname, 0, releasefn, desc=desc)
780 780 except error.LockHeld, inst:
781 781 if not wait:
782 782 raise
783 783 self.ui.warn(_("waiting for lock on %s held by %r\n") %
784 784 (desc, inst.locker))
785 785 # default to 600 seconds timeout
786 786 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
787 787 releasefn, desc=desc)
788 788 if acquirefn:
789 789 acquirefn()
790 790 return l
791 791
792 792 def lock(self, wait=True):
793 793 '''Lock the repository store (.hg/store) and return a weak reference
794 794 to the lock. Use this before modifying the store (e.g. committing or
795 795 stripping). If you are opening a transaction, get a lock as well.)'''
796 796 l = self._lockref and self._lockref()
797 797 if l is not None and l.held:
798 798 l.lock()
799 799 return l
800 800
801 801 l = self._lock(self.sjoin("lock"), wait, self.store.write,
802 802 self.invalidate, _('repository %s') % self.origroot)
803 803 self._lockref = weakref.ref(l)
804 804 return l
805 805
806 806 def wlock(self, wait=True):
807 807 '''Lock the non-store parts of the repository (everything under
808 808 .hg except .hg/store) and return a weak reference to the lock.
809 809 Use this before modifying files in .hg.'''
810 810 l = self._wlockref and self._wlockref()
811 811 if l is not None and l.held:
812 812 l.lock()
813 813 return l
814 814
815 815 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
816 816 self.dirstate.invalidate, _('working directory of %s') %
817 817 self.origroot)
818 818 self._wlockref = weakref.ref(l)
819 819 return l
820 820
821 821 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
822 822 """
823 823 commit an individual file as part of a larger transaction
824 824 """
825 825
826 826 fname = fctx.path()
827 827 text = fctx.data()
828 828 flog = self.file(fname)
829 829 fparent1 = manifest1.get(fname, nullid)
830 830 fparent2 = fparent2o = manifest2.get(fname, nullid)
831 831
832 832 meta = {}
833 833 copy = fctx.renamed()
834 834 if copy and copy[0] != fname:
835 835 # Mark the new revision of this file as a copy of another
836 836 # file. This copy data will effectively act as a parent
837 837 # of this new revision. If this is a merge, the first
838 838 # parent will be the nullid (meaning "look up the copy data")
839 839 # and the second one will be the other parent. For example:
840 840 #
841 841 # 0 --- 1 --- 3 rev1 changes file foo
842 842 # \ / rev2 renames foo to bar and changes it
843 843 # \- 2 -/ rev3 should have bar with all changes and
844 844 # should record that bar descends from
845 845 # bar in rev2 and foo in rev1
846 846 #
847 847 # this allows this merge to succeed:
848 848 #
849 849 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
850 850 # \ / merging rev3 and rev4 should use bar@rev2
851 851 # \- 2 --- 4 as the merge base
852 852 #
853 853
854 854 cfname = copy[0]
855 855 crev = manifest1.get(cfname)
856 856 newfparent = fparent2
857 857
858 858 if manifest2: # branch merge
859 859 if fparent2 == nullid or crev is None: # copied on remote side
860 860 if cfname in manifest2:
861 861 crev = manifest2[cfname]
862 862 newfparent = fparent1
863 863
864 864 # find source in nearest ancestor if we've lost track
865 865 if not crev:
866 866 self.ui.debug(" %s: searching for copy revision for %s\n" %
867 867 (fname, cfname))
868 868 for ancestor in self[None].ancestors():
869 869 if cfname in ancestor:
870 870 crev = ancestor[cfname].filenode()
871 871 break
872 872
873 873 if crev:
874 874 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
875 875 meta["copy"] = cfname
876 876 meta["copyrev"] = hex(crev)
877 877 fparent1, fparent2 = nullid, newfparent
878 878 else:
879 879 self.ui.warn(_("warning: can't find ancestor for '%s' "
880 880 "copied from '%s'!\n") % (fname, cfname))
881 881
882 882 elif fparent2 != nullid:
883 883 # is one parent an ancestor of the other?
884 884 fparentancestor = flog.ancestor(fparent1, fparent2)
885 885 if fparentancestor == fparent1:
886 886 fparent1, fparent2 = fparent2, nullid
887 887 elif fparentancestor == fparent2:
888 888 fparent2 = nullid
889 889
890 890 # is the file changed?
891 891 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
892 892 changelist.append(fname)
893 893 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
894 894
895 895 # are just the flags changed during merge?
896 896 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
897 897 changelist.append(fname)
898 898
899 899 return fparent1
900 900
901 901 def commit(self, text="", user=None, date=None, match=None, force=False,
902 902 editor=False, extra={}):
903 903 """Add a new revision to current repository.
904 904
905 905 Revision information is gathered from the working directory,
906 906 match can be used to filter the committed files. If editor is
907 907 supplied, it is called to get a commit message.
908 908 """
909 909
910 910 def fail(f, msg):
911 911 raise util.Abort('%s: %s' % (f, msg))
912 912
913 913 if not match:
914 914 match = matchmod.always(self.root, '')
915 915
916 916 if not force:
917 917 vdirs = []
918 918 match.dir = vdirs.append
919 919 match.bad = fail
920 920
921 921 wlock = self.wlock()
922 922 try:
923 923 wctx = self[None]
924 924 merge = len(wctx.parents()) > 1
925 925
926 926 if (not force and merge and match and
927 927 (match.files() or match.anypats())):
928 928 raise util.Abort(_('cannot partially commit a merge '
929 929 '(do not specify files or patterns)'))
930 930
931 931 changes = self.status(match=match, clean=force)
932 932 if force:
933 933 changes[0].extend(changes[6]) # mq may commit unchanged files
934 934
935 935 # check subrepos
936 936 subs = []
937 937 removedsubs = set()
938 938 for p in wctx.parents():
939 939 removedsubs.update(s for s in p.substate if match(s))
940 940 for s in wctx.substate:
941 941 removedsubs.discard(s)
942 942 if match(s) and wctx.sub(s).dirty():
943 943 subs.append(s)
944 944 if (subs or removedsubs):
945 945 if (not match('.hgsub') and
946 946 '.hgsub' in (wctx.modified() + wctx.added())):
947 947 raise util.Abort(_("can't commit subrepos without .hgsub"))
948 948 if '.hgsubstate' not in changes[0]:
949 949 changes[0].insert(0, '.hgsubstate')
950 950
951 951 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
952 952 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
953 953 if changedsubs:
954 954 raise util.Abort(_("uncommitted changes in subrepo %s")
955 955 % changedsubs[0])
956 956
957 957 # make sure all explicit patterns are matched
958 958 if not force and match.files():
959 959 matched = set(changes[0] + changes[1] + changes[2])
960 960
961 961 for f in match.files():
962 962 if f == '.' or f in matched or f in wctx.substate:
963 963 continue
964 964 if f in changes[3]: # missing
965 965 fail(f, _('file not found!'))
966 966 if f in vdirs: # visited directory
967 967 d = f + '/'
968 968 for mf in matched:
969 969 if mf.startswith(d):
970 970 break
971 971 else:
972 972 fail(f, _("no match under directory!"))
973 973 elif f not in self.dirstate:
974 974 fail(f, _("file not tracked!"))
975 975
976 976 if (not force and not extra.get("close") and not merge
977 977 and not (changes[0] or changes[1] or changes[2])
978 978 and wctx.branch() == wctx.p1().branch()):
979 979 return None
980 980
981 981 ms = mergemod.mergestate(self)
982 982 for f in changes[0]:
983 983 if f in ms and ms[f] == 'u':
984 984 raise util.Abort(_("unresolved merge conflicts "
985 985 "(see hg help resolve)"))
986 986
987 987 cctx = context.workingctx(self, text, user, date, extra, changes)
988 988 if editor:
989 989 cctx._text = editor(self, cctx, subs)
990 990 edited = (text != cctx._text)
991 991
992 992 # commit subs
993 993 if subs or removedsubs:
994 994 state = wctx.substate.copy()
995 995 for s in sorted(subs):
996 996 sub = wctx.sub(s)
997 997 self.ui.status(_('committing subrepository %s\n') %
998 998 subrepo.subrelpath(sub))
999 999 sr = sub.commit(cctx._text, user, date)
1000 1000 state[s] = (state[s][0], sr)
1001 1001 subrepo.writestate(self, state)
1002 1002
1003 1003 # Save commit message in case this transaction gets rolled back
1004 1004 # (e.g. by a pretxncommit hook). Leave the content alone on
1005 1005 # the assumption that the user will use the same editor again.
1006 1006 msgfile = self.opener('last-message.txt', 'wb')
1007 1007 msgfile.write(cctx._text)
1008 1008 msgfile.close()
1009 1009
1010 1010 p1, p2 = self.dirstate.parents()
1011 1011 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1012 1012 try:
1013 1013 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1014 1014 ret = self.commitctx(cctx, True)
1015 1015 except:
1016 1016 if edited:
1017 1017 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1018 1018 self.ui.write(
1019 1019 _('note: commit message saved in %s\n') % msgfn)
1020 1020 raise
1021 1021
1022 1022 # update bookmarks, dirstate and mergestate
1023 1023 bookmarks.update(self, p1, ret)
1024 1024 for f in changes[0] + changes[1]:
1025 1025 self.dirstate.normal(f)
1026 1026 for f in changes[2]:
1027 1027 self.dirstate.forget(f)
1028 1028 self.dirstate.setparents(ret)
1029 1029 ms.reset()
1030 1030 finally:
1031 1031 wlock.release()
1032 1032
1033 1033 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1034 1034 return ret
1035 1035
1036 1036 def commitctx(self, ctx, error=False):
1037 1037 """Add a new revision to current repository.
1038 1038 Revision information is passed via the context argument.
1039 1039 """
1040 1040
1041 1041 tr = lock = None
1042 1042 removed = list(ctx.removed())
1043 1043 p1, p2 = ctx.p1(), ctx.p2()
1044 1044 user = ctx.user()
1045 1045
1046 1046 lock = self.lock()
1047 1047 try:
1048 1048 tr = self.transaction("commit")
1049 1049 trp = weakref.proxy(tr)
1050 1050
1051 1051 if ctx.files():
1052 1052 m1 = p1.manifest().copy()
1053 1053 m2 = p2.manifest()
1054 1054
1055 1055 # check in files
1056 1056 new = {}
1057 1057 changed = []
1058 1058 linkrev = len(self)
1059 1059 for f in sorted(ctx.modified() + ctx.added()):
1060 1060 self.ui.note(f + "\n")
1061 1061 try:
1062 1062 fctx = ctx[f]
1063 1063 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1064 1064 changed)
1065 1065 m1.set(f, fctx.flags())
1066 1066 except OSError, inst:
1067 1067 self.ui.warn(_("trouble committing %s!\n") % f)
1068 1068 raise
1069 1069 except IOError, inst:
1070 1070 errcode = getattr(inst, 'errno', errno.ENOENT)
1071 1071 if error or errcode and errcode != errno.ENOENT:
1072 1072 self.ui.warn(_("trouble committing %s!\n") % f)
1073 1073 raise
1074 1074 else:
1075 1075 removed.append(f)
1076 1076
1077 1077 # update manifest
1078 1078 m1.update(new)
1079 1079 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1080 1080 drop = [f for f in removed if f in m1]
1081 1081 for f in drop:
1082 1082 del m1[f]
1083 1083 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1084 1084 p2.manifestnode(), (new, drop))
1085 1085 files = changed + removed
1086 1086 else:
1087 1087 mn = p1.manifestnode()
1088 1088 files = []
1089 1089
1090 1090 # update changelog
1091 1091 self.changelog.delayupdate()
1092 1092 n = self.changelog.add(mn, files, ctx.description(),
1093 1093 trp, p1.node(), p2.node(),
1094 1094 user, ctx.date(), ctx.extra().copy())
1095 1095 p = lambda: self.changelog.writepending() and self.root or ""
1096 1096 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1097 1097 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1098 1098 parent2=xp2, pending=p)
1099 1099 self.changelog.finalize(trp)
1100 1100 tr.close()
1101 1101
1102 1102 if self._branchcache:
1103 1103 self.updatebranchcache()
1104 1104 return n
1105 1105 finally:
1106 1106 if tr:
1107 1107 tr.release()
1108 1108 lock.release()
1109 1109
1110 1110 def destroyed(self):
1111 1111 '''Inform the repository that nodes have been destroyed.
1112 1112 Intended for use by strip and rollback, so there's a common
1113 1113 place for anything that has to be done after destroying history.'''
1114 1114 # XXX it might be nice if we could take the list of destroyed
1115 1115 # nodes, but I don't see an easy way for rollback() to do that
1116 1116
1117 1117 # Ensure the persistent tag cache is updated. Doing it now
1118 1118 # means that the tag cache only has to worry about destroyed
1119 1119 # heads immediately after a strip/rollback. That in turn
1120 1120 # guarantees that "cachetip == currenttip" (comparing both rev
1121 1121 # and node) always means no nodes have been added or destroyed.
1122 1122
1123 1123 # XXX this is suboptimal when qrefresh'ing: we strip the current
1124 1124 # head, refresh the tag cache, then immediately add a new head.
1125 1125 # But I think doing it this way is necessary for the "instant
1126 1126 # tag cache retrieval" case to work.
1127 1127 self.invalidatecaches()
1128 1128
1129 1129 def walk(self, match, node=None):
1130 1130 '''
1131 1131 walk recursively through the directory tree or a given
1132 1132 changeset, finding all files matched by the match
1133 1133 function
1134 1134 '''
1135 1135 return self[node].walk(match)
1136 1136
1137 1137 def status(self, node1='.', node2=None, match=None,
1138 1138 ignored=False, clean=False, unknown=False,
1139 1139 listsubrepos=False):
1140 1140 """return status of files between two nodes or node and working directory
1141 1141
1142 1142 If node1 is None, use the first dirstate parent instead.
1143 1143 If node2 is None, compare node1 with working directory.
1144 1144 """
1145 1145
1146 1146 def mfmatches(ctx):
1147 1147 mf = ctx.manifest().copy()
1148 1148 for fn in mf.keys():
1149 1149 if not match(fn):
1150 1150 del mf[fn]
1151 1151 return mf
1152 1152
1153 1153 if isinstance(node1, context.changectx):
1154 1154 ctx1 = node1
1155 1155 else:
1156 1156 ctx1 = self[node1]
1157 1157 if isinstance(node2, context.changectx):
1158 1158 ctx2 = node2
1159 1159 else:
1160 1160 ctx2 = self[node2]
1161 1161
1162 1162 working = ctx2.rev() is None
1163 1163 parentworking = working and ctx1 == self['.']
1164 1164 match = match or matchmod.always(self.root, self.getcwd())
1165 1165 listignored, listclean, listunknown = ignored, clean, unknown
1166 1166
1167 1167 # load earliest manifest first for caching reasons
1168 1168 if not working and ctx2.rev() < ctx1.rev():
1169 1169 ctx2.manifest()
1170 1170
1171 1171 if not parentworking:
1172 1172 def bad(f, msg):
1173 1173 if f not in ctx1:
1174 1174 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1175 1175 match.bad = bad
1176 1176
1177 1177 if working: # we need to scan the working dir
1178 1178 subrepos = []
1179 1179 if '.hgsub' in self.dirstate:
1180 1180 subrepos = ctx1.substate.keys()
1181 1181 s = self.dirstate.status(match, subrepos, listignored,
1182 1182 listclean, listunknown)
1183 1183 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1184 1184
1185 1185 # check for any possibly clean files
1186 1186 if parentworking and cmp:
1187 1187 fixup = []
1188 1188 # do a full compare of any files that might have changed
1189 1189 for f in sorted(cmp):
1190 1190 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1191 1191 or ctx1[f].cmp(ctx2[f])):
1192 1192 modified.append(f)
1193 1193 else:
1194 1194 fixup.append(f)
1195 1195
1196 1196 # update dirstate for files that are actually clean
1197 1197 if fixup:
1198 1198 if listclean:
1199 1199 clean += fixup
1200 1200
1201 1201 try:
1202 1202 # updating the dirstate is optional
1203 1203 # so we don't wait on the lock
1204 1204 wlock = self.wlock(False)
1205 1205 try:
1206 1206 for f in fixup:
1207 1207 self.dirstate.normal(f)
1208 1208 finally:
1209 1209 wlock.release()
1210 1210 except error.LockError:
1211 1211 pass
1212 1212
1213 1213 if not parentworking:
1214 1214 mf1 = mfmatches(ctx1)
1215 1215 if working:
1216 1216 # we are comparing working dir against non-parent
1217 1217 # generate a pseudo-manifest for the working dir
1218 1218 mf2 = mfmatches(self['.'])
1219 1219 for f in cmp + modified + added:
1220 1220 mf2[f] = None
1221 1221 mf2.set(f, ctx2.flags(f))
1222 1222 for f in removed:
1223 1223 if f in mf2:
1224 1224 del mf2[f]
1225 1225 else:
1226 1226 # we are comparing two revisions
1227 1227 deleted, unknown, ignored = [], [], []
1228 1228 mf2 = mfmatches(ctx2)
1229 1229
1230 1230 modified, added, clean = [], [], []
1231 1231 for fn in mf2:
1232 1232 if fn in mf1:
1233 1233 if (fn not in deleted and
1234 1234 (mf1.flags(fn) != mf2.flags(fn) or
1235 1235 (mf1[fn] != mf2[fn] and
1236 1236 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1237 1237 modified.append(fn)
1238 1238 elif listclean:
1239 1239 clean.append(fn)
1240 1240 del mf1[fn]
1241 1241 elif fn not in deleted:
1242 1242 added.append(fn)
1243 1243 removed = mf1.keys()
1244 1244
1245 1245 r = modified, added, removed, deleted, unknown, ignored, clean
1246 1246
1247 1247 if listsubrepos:
1248 1248 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1249 1249 if working:
1250 1250 rev2 = None
1251 1251 else:
1252 1252 rev2 = ctx2.substate[subpath][1]
1253 1253 try:
1254 1254 submatch = matchmod.narrowmatcher(subpath, match)
1255 1255 s = sub.status(rev2, match=submatch, ignored=listignored,
1256 1256 clean=listclean, unknown=listunknown,
1257 1257 listsubrepos=True)
1258 1258 for rfiles, sfiles in zip(r, s):
1259 1259 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1260 1260 except error.LookupError:
1261 1261 self.ui.status(_("skipping missing subrepository: %s\n")
1262 1262 % subpath)
1263 1263
1264 1264 for l in r:
1265 1265 l.sort()
1266 1266 return r
1267 1267
1268 1268 def heads(self, start=None):
1269 1269 heads = self.changelog.heads(start)
1270 1270 # sort the output in rev descending order
1271 1271 return sorted(heads, key=self.changelog.rev, reverse=True)
1272 1272
1273 1273 def branchheads(self, branch=None, start=None, closed=False):
1274 1274 '''return a (possibly filtered) list of heads for the given branch
1275 1275
1276 1276 Heads are returned in topological order, from newest to oldest.
1277 1277 If branch is None, use the dirstate branch.
1278 1278 If start is not None, return only heads reachable from start.
1279 1279 If closed is True, return heads that are marked as closed as well.
1280 1280 '''
1281 1281 if branch is None:
1282 1282 branch = self[None].branch()
1283 1283 branches = self.branchmap()
1284 1284 if branch not in branches:
1285 1285 return []
1286 1286 # the cache returns heads ordered lowest to highest
1287 1287 bheads = list(reversed(branches[branch]))
1288 1288 if start is not None:
1289 1289 # filter out the heads that cannot be reached from startrev
1290 1290 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1291 1291 bheads = [h for h in bheads if h in fbheads]
1292 1292 if not closed:
1293 1293 bheads = [h for h in bheads if
1294 1294 ('close' not in self.changelog.read(h)[5])]
1295 1295 return bheads
1296 1296
1297 1297 def branches(self, nodes):
1298 1298 if not nodes:
1299 1299 nodes = [self.changelog.tip()]
1300 1300 b = []
1301 1301 for n in nodes:
1302 1302 t = n
1303 1303 while 1:
1304 1304 p = self.changelog.parents(n)
1305 1305 if p[1] != nullid or p[0] == nullid:
1306 1306 b.append((t, n, p[0], p[1]))
1307 1307 break
1308 1308 n = p[0]
1309 1309 return b
1310 1310
1311 1311 def between(self, pairs):
1312 1312 r = []
1313 1313
1314 1314 for top, bottom in pairs:
1315 1315 n, l, i = top, [], 0
1316 1316 f = 1
1317 1317
1318 1318 while n != bottom and n != nullid:
1319 1319 p = self.changelog.parents(n)[0]
1320 1320 if i == f:
1321 1321 l.append(n)
1322 1322 f = f * 2
1323 1323 n = p
1324 1324 i += 1
1325 1325
1326 1326 r.append(l)
1327 1327
1328 1328 return r
1329 1329
1330 1330 def pull(self, remote, heads=None, force=False):
1331 1331 lock = self.lock()
1332 1332 try:
1333 1333 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1334 1334 force=force)
1335 1335 common, fetch, rheads = tmp
1336 1336 if not fetch:
1337 1337 self.ui.status(_("no changes found\n"))
1338 1338 result = 0
1339 1339 else:
1340 1340 if heads is None and list(common) == [nullid]:
1341 1341 self.ui.status(_("requesting all changes\n"))
1342 1342 elif heads is None and remote.capable('changegroupsubset'):
1343 1343 # issue1320, avoid a race if remote changed after discovery
1344 1344 heads = rheads
1345 1345
1346 1346 if remote.capable('getbundle'):
1347 1347 cg = remote.getbundle('pull', common=common,
1348 1348 heads=heads or rheads)
1349 1349 elif heads is None:
1350 1350 cg = remote.changegroup(fetch, 'pull')
1351 1351 elif not remote.capable('changegroupsubset'):
1352 1352 raise util.Abort(_("partial pull cannot be done because "
1353 1353 "other repository doesn't support "
1354 1354 "changegroupsubset."))
1355 1355 else:
1356 1356 cg = remote.changegroupsubset(fetch, heads, 'pull')
1357 1357 result = self.addchangegroup(cg, 'pull', remote.url(),
1358 1358 lock=lock)
1359 1359 finally:
1360 1360 lock.release()
1361 1361
1362 1362 return result
1363 1363
1364 1364 def checkpush(self, force, revs):
1365 1365 """Extensions can override this function if additional checks have
1366 1366 to be performed before pushing, or call it if they override push
1367 1367 command.
1368 1368 """
1369 1369 pass
1370 1370
1371 1371 def push(self, remote, force=False, revs=None, newbranch=False):
1372 1372 '''Push outgoing changesets (limited by revs) from the current
1373 1373 repository to remote. Return an integer:
1374 1374 - 0 means HTTP error *or* nothing to push
1375 1375 - 1 means we pushed and remote head count is unchanged *or*
1376 1376 we have outgoing changesets but refused to push
1377 1377 - other values as described by addchangegroup()
1378 1378 '''
1379 1379 # there are two ways to push to remote repo:
1380 1380 #
1381 1381 # addchangegroup assumes local user can lock remote
1382 1382 # repo (local filesystem, old ssh servers).
1383 1383 #
1384 1384 # unbundle assumes local user cannot lock remote repo (new ssh
1385 1385 # servers, http servers).
1386 1386
1387 1387 self.checkpush(force, revs)
1388 1388 lock = None
1389 1389 unbundle = remote.capable('unbundle')
1390 1390 if not unbundle:
1391 1391 lock = remote.lock()
1392 1392 try:
1393 1393 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1394 1394 newbranch)
1395 1395 ret = remote_heads
1396 1396 if cg is not None:
1397 1397 if unbundle:
1398 1398 # local repo finds heads on server, finds out what
1399 1399 # revs it must push. once revs transferred, if server
1400 1400 # finds it has different heads (someone else won
1401 1401 # commit/push race), server aborts.
1402 1402 if force:
1403 1403 remote_heads = ['force']
1404 1404 # ssh: return remote's addchangegroup()
1405 1405 # http: return remote's addchangegroup() or 0 for error
1406 1406 ret = remote.unbundle(cg, remote_heads, 'push')
1407 1407 else:
1408 1408 # we return an integer indicating remote head count change
1409 1409 ret = remote.addchangegroup(cg, 'push', self.url(),
1410 1410 lock=lock)
1411 1411 finally:
1412 1412 if lock is not None:
1413 1413 lock.release()
1414 1414
1415 1415 self.ui.debug("checking for updated bookmarks\n")
1416 1416 rb = remote.listkeys('bookmarks')
1417 1417 for k in rb.keys():
1418 1418 if k in self._bookmarks:
1419 1419 nr, nl = rb[k], hex(self._bookmarks[k])
1420 1420 if nr in self:
1421 1421 cr = self[nr]
1422 1422 cl = self[nl]
1423 1423 if cl in cr.descendants():
1424 1424 r = remote.pushkey('bookmarks', k, nr, nl)
1425 1425 if r:
1426 1426 self.ui.status(_("updating bookmark %s\n") % k)
1427 1427 else:
1428 1428 self.ui.warn(_('updating bookmark %s'
1429 1429 ' failed!\n') % k)
1430 1430
1431 1431 return ret
1432 1432
1433 1433 def changegroupinfo(self, nodes, source):
1434 1434 if self.ui.verbose or source == 'bundle':
1435 1435 self.ui.status(_("%d changesets found\n") % len(nodes))
1436 1436 if self.ui.debugflag:
1437 1437 self.ui.debug("list of changesets:\n")
1438 1438 for node in nodes:
1439 1439 self.ui.debug("%s\n" % hex(node))
1440 1440
1441 1441 def changegroupsubset(self, bases, heads, source):
1442 1442 """Compute a changegroup consisting of all the nodes that are
1443 1443 descendents of any of the bases and ancestors of any of the heads.
1444 1444 Return a chunkbuffer object whose read() method will return
1445 1445 successive changegroup chunks.
1446 1446
1447 1447 It is fairly complex as determining which filenodes and which
1448 1448 manifest nodes need to be included for the changeset to be complete
1449 1449 is non-trivial.
1450 1450
1451 1451 Another wrinkle is doing the reverse, figuring out which changeset in
1452 1452 the changegroup a particular filenode or manifestnode belongs to.
1453 1453 """
1454 1454 cl = self.changelog
1455 1455 if not bases:
1456 1456 bases = [nullid]
1457 1457 csets, bases, heads = cl.nodesbetween(bases, heads)
1458 1458 # We assume that all ancestors of bases are known
1459 1459 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1460 1460 return self._changegroupsubset(common, csets, heads, source)
1461 1461
1462 1462 def getbundle(self, source, heads=None, common=None):
1463 1463 """Like changegroupsubset, but returns the set difference between the
1464 1464 ancestors of heads and the ancestors common.
1465 1465
1466 1466 If heads is None, use the local heads. If common is None, use [nullid].
1467 1467
1468 1468 The nodes in common might not all be known locally due to the way the
1469 1469 current discovery protocol works.
1470 1470 """
1471 1471 cl = self.changelog
1472 1472 if common:
1473 1473 nm = cl.nodemap
1474 1474 common = [n for n in common if n in nm]
1475 1475 else:
1476 1476 common = [nullid]
1477 1477 if not heads:
1478 1478 heads = cl.heads()
1479 1479 common, missing = cl.findcommonmissing(common, heads)
1480 1480 if not missing:
1481 1481 return None
1482 1482 return self._changegroupsubset(common, missing, heads, source)
1483 1483
1484 1484 def _changegroupsubset(self, commonrevs, csets, heads, source):
1485 1485
1486 1486 cl = self.changelog
1487 1487 mf = self.manifest
1488 1488 mfs = {} # needed manifests
1489 1489 fnodes = {} # needed file nodes
1490 1490 changedfiles = set()
1491 1491 fstate = ['', {}]
1492 1492 count = [0]
1493 1493
1494 1494 # can we go through the fast path ?
1495 1495 heads.sort()
1496 1496 if heads == sorted(self.heads()):
1497 1497 return self._changegroup(csets, source)
1498 1498
1499 1499 # slow path
1500 1500 self.hook('preoutgoing', throw=True, source=source)
1501 1501 self.changegroupinfo(csets, source)
1502 1502
1503 1503 # filter any nodes that claim to be part of the known set
1504 1504 def prune(revlog, missing):
1505 1505 for n in missing:
1506 1506 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1507 1507 yield n
1508 1508
1509 1509 def lookup(revlog, x):
1510 1510 if revlog == cl:
1511 1511 c = cl.read(x)
1512 1512 changedfiles.update(c[3])
1513 1513 mfs.setdefault(c[0], x)
1514 1514 count[0] += 1
1515 1515 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1516 1516 return x
1517 1517 elif revlog == mf:
1518 1518 clnode = mfs[x]
1519 1519 mdata = mf.readfast(x)
1520 1520 for f in changedfiles:
1521 1521 if f in mdata:
1522 1522 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1523 1523 count[0] += 1
1524 1524 self.ui.progress(_('bundling'), count[0],
1525 1525 unit=_('manifests'), total=len(mfs))
1526 1526 return mfs[x]
1527 1527 else:
1528 1528 self.ui.progress(
1529 1529 _('bundling'), count[0], item=fstate[0],
1530 1530 unit=_('files'), total=len(changedfiles))
1531 1531 return fstate[1][x]
1532 1532
1533 1533 bundler = changegroup.bundle10(lookup)
1534 1534
1535 1535 def gengroup():
1536 1536 # Create a changenode group generator that will call our functions
1537 1537 # back to lookup the owning changenode and collect information.
1538 1538 for chunk in cl.group(csets, bundler):
1539 1539 yield chunk
1540 1540 self.ui.progress(_('bundling'), None)
1541 1541
1542 1542 # Create a generator for the manifestnodes that calls our lookup
1543 1543 # and data collection functions back.
1544 1544 count[0] = 0
1545 1545 for chunk in mf.group(prune(mf, mfs), bundler):
1546 1546 yield chunk
1547 1547 self.ui.progress(_('bundling'), None)
1548 1548
1549 1549 mfs.clear()
1550 1550
1551 1551 # Go through all our files in order sorted by name.
1552 1552 count[0] = 0
1553 1553 for fname in sorted(changedfiles):
1554 1554 filerevlog = self.file(fname)
1555 1555 if not len(filerevlog):
1556 1556 raise util.Abort(_("empty or missing revlog for %s") % fname)
1557 1557 fstate[0] = fname
1558 1558 fstate[1] = fnodes.pop(fname, {})
1559 1559 first = True
1560 1560
1561 1561 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1562 1562 bundler):
1563 1563 if first:
1564 1564 if chunk == bundler.close():
1565 1565 break
1566 1566 count[0] += 1
1567 1567 yield bundler.fileheader(fname)
1568 1568 first = False
1569 1569 yield chunk
1570 1570 # Signal that no more groups are left.
1571 1571 yield bundler.close()
1572 1572 self.ui.progress(_('bundling'), None)
1573 1573
1574 1574 if csets:
1575 1575 self.hook('outgoing', node=hex(csets[0]), source=source)
1576 1576
1577 1577 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1578 1578
1579 1579 def changegroup(self, basenodes, source):
1580 1580 # to avoid a race we use changegroupsubset() (issue1320)
1581 1581 return self.changegroupsubset(basenodes, self.heads(), source)
1582 1582
1583 1583 def _changegroup(self, nodes, source):
1584 1584 """Compute the changegroup of all nodes that we have that a recipient
1585 1585 doesn't. Return a chunkbuffer object whose read() method will return
1586 1586 successive changegroup chunks.
1587 1587
1588 1588 This is much easier than the previous function as we can assume that
1589 1589 the recipient has any changenode we aren't sending them.
1590 1590
1591 1591 nodes is the set of nodes to send"""
1592 1592
1593 1593 cl = self.changelog
1594 1594 mf = self.manifest
1595 1595 mfs = {}
1596 1596 changedfiles = set()
1597 1597 fstate = ['']
1598 1598 count = [0]
1599 1599
1600 1600 self.hook('preoutgoing', throw=True, source=source)
1601 1601 self.changegroupinfo(nodes, source)
1602 1602
1603 1603 revset = set([cl.rev(n) for n in nodes])
1604 1604
1605 1605 def gennodelst(log):
1606 1606 for r in log:
1607 1607 if log.linkrev(r) in revset:
1608 1608 yield log.node(r)
1609 1609
1610 1610 def lookup(revlog, x):
1611 1611 if revlog == cl:
1612 1612 c = cl.read(x)
1613 1613 changedfiles.update(c[3])
1614 1614 mfs.setdefault(c[0], x)
1615 1615 count[0] += 1
1616 1616 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1617 1617 return x
1618 1618 elif revlog == mf:
1619 1619 count[0] += 1
1620 1620 self.ui.progress(_('bundling'), count[0],
1621 1621 unit=_('manifests'), total=len(mfs))
1622 1622 return cl.node(revlog.linkrev(revlog.rev(x)))
1623 1623 else:
1624 1624 self.ui.progress(
1625 1625 _('bundling'), count[0], item=fstate[0],
1626 1626 total=len(changedfiles), unit=_('files'))
1627 1627 return cl.node(revlog.linkrev(revlog.rev(x)))
1628 1628
1629 1629 bundler = changegroup.bundle10(lookup)
1630 1630
1631 1631 def gengroup():
1632 1632 '''yield a sequence of changegroup chunks (strings)'''
1633 1633 # construct a list of all changed files
1634 1634
1635 1635 for chunk in cl.group(nodes, bundler):
1636 1636 yield chunk
1637 1637 self.ui.progress(_('bundling'), None)
1638 1638
1639 1639 count[0] = 0
1640 1640 for chunk in mf.group(gennodelst(mf), bundler):
1641 1641 yield chunk
1642 1642 self.ui.progress(_('bundling'), None)
1643 1643
1644 1644 count[0] = 0
1645 1645 for fname in sorted(changedfiles):
1646 1646 filerevlog = self.file(fname)
1647 1647 if not len(filerevlog):
1648 1648 raise util.Abort(_("empty or missing revlog for %s") % fname)
1649 1649 fstate[0] = fname
1650 1650 first = True
1651 1651 for chunk in filerevlog.group(gennodelst(filerevlog), bundler):
1652 1652 if first:
1653 1653 if chunk == bundler.close():
1654 1654 break
1655 1655 count[0] += 1
1656 1656 yield bundler.fileheader(fname)
1657 1657 first = False
1658 1658 yield chunk
1659 1659 yield bundler.close()
1660 1660 self.ui.progress(_('bundling'), None)
1661 1661
1662 1662 if nodes:
1663 1663 self.hook('outgoing', node=hex(nodes[0]), source=source)
1664 1664
1665 1665 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1666 1666
1667 1667 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1668 1668 """Add the changegroup returned by source.read() to this repo.
1669 1669 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1670 1670 the URL of the repo where this changegroup is coming from.
1671 1671 If lock is not None, the function takes ownership of the lock
1672 1672 and releases it after the changegroup is added.
1673 1673
1674 1674 Return an integer summarizing the change to this repo:
1675 1675 - nothing changed or no source: 0
1676 1676 - more heads than before: 1+added heads (2..n)
1677 1677 - fewer heads than before: -1-removed heads (-2..-n)
1678 1678 - number of heads stays the same: 1
1679 1679 """
1680 1680 def csmap(x):
1681 1681 self.ui.debug("add changeset %s\n" % short(x))
1682 1682 return len(cl)
1683 1683
1684 1684 def revmap(x):
1685 1685 return cl.rev(x)
1686 1686
1687 1687 if not source:
1688 1688 return 0
1689 1689
1690 1690 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1691 1691
1692 1692 changesets = files = revisions = 0
1693 1693 efiles = set()
1694 1694
1695 1695 # write changelog data to temp files so concurrent readers will not see
1696 1696 # inconsistent view
1697 1697 cl = self.changelog
1698 1698 cl.delayupdate()
1699 1699 oldheads = cl.heads()
1700 1700
1701 1701 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1702 1702 try:
1703 1703 trp = weakref.proxy(tr)
1704 1704 # pull off the changeset group
1705 1705 self.ui.status(_("adding changesets\n"))
1706 1706 clstart = len(cl)
1707 1707 class prog(object):
1708 1708 step = _('changesets')
1709 1709 count = 1
1710 1710 ui = self.ui
1711 1711 total = None
1712 1712 def __call__(self):
1713 1713 self.ui.progress(self.step, self.count, unit=_('chunks'),
1714 1714 total=self.total)
1715 1715 self.count += 1
1716 1716 pr = prog()
1717 1717 source.callback = pr
1718 1718
1719 1719 source.changelogheader()
1720 1720 if (cl.addgroup(source, csmap, trp) is None
1721 1721 and not emptyok):
1722 1722 raise util.Abort(_("received changelog group is empty"))
1723 1723 clend = len(cl)
1724 1724 changesets = clend - clstart
1725 1725 for c in xrange(clstart, clend):
1726 1726 efiles.update(self[c].files())
1727 1727 efiles = len(efiles)
1728 1728 self.ui.progress(_('changesets'), None)
1729 1729
1730 1730 # pull off the manifest group
1731 1731 self.ui.status(_("adding manifests\n"))
1732 1732 pr.step = _('manifests')
1733 1733 pr.count = 1
1734 1734 pr.total = changesets # manifests <= changesets
1735 1735 # no need to check for empty manifest group here:
1736 1736 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1737 1737 # no new manifest will be created and the manifest group will
1738 1738 # be empty during the pull
1739 1739 source.manifestheader()
1740 1740 self.manifest.addgroup(source, revmap, trp)
1741 1741 self.ui.progress(_('manifests'), None)
1742 1742
1743 1743 needfiles = {}
1744 1744 if self.ui.configbool('server', 'validate', default=False):
1745 1745 # validate incoming csets have their manifests
1746 1746 for cset in xrange(clstart, clend):
1747 1747 mfest = self.changelog.read(self.changelog.node(cset))[0]
1748 1748 mfest = self.manifest.readdelta(mfest)
1749 1749 # store file nodes we must see
1750 1750 for f, n in mfest.iteritems():
1751 1751 needfiles.setdefault(f, set()).add(n)
1752 1752
1753 1753 # process the files
1754 1754 self.ui.status(_("adding file changes\n"))
1755 1755 pr.step = 'files'
1756 1756 pr.count = 1
1757 1757 pr.total = efiles
1758 1758 source.callback = None
1759 1759
1760 1760 while 1:
1761 1761 chunkdata = source.filelogheader()
1762 1762 if not chunkdata:
1763 1763 break
1764 1764 f = chunkdata["filename"]
1765 1765 self.ui.debug("adding %s revisions\n" % f)
1766 1766 pr()
1767 1767 fl = self.file(f)
1768 1768 o = len(fl)
1769 1769 if fl.addgroup(source, revmap, trp) is None:
1770 1770 raise util.Abort(_("received file revlog group is empty"))
1771 1771 revisions += len(fl) - o
1772 1772 files += 1
1773 1773 if f in needfiles:
1774 1774 needs = needfiles[f]
1775 1775 for new in xrange(o, len(fl)):
1776 1776 n = fl.node(new)
1777 1777 if n in needs:
1778 1778 needs.remove(n)
1779 1779 if not needs:
1780 1780 del needfiles[f]
1781 1781 self.ui.progress(_('files'), None)
1782 1782
1783 1783 for f, needs in needfiles.iteritems():
1784 1784 fl = self.file(f)
1785 1785 for n in needs:
1786 1786 try:
1787 1787 fl.rev(n)
1788 1788 except error.LookupError:
1789 1789 raise util.Abort(
1790 1790 _('missing file data for %s:%s - run hg verify') %
1791 1791 (f, hex(n)))
1792 1792
1793 1793 dh = 0
1794 1794 if oldheads:
1795 1795 heads = cl.heads()
1796 1796 dh = len(heads) - len(oldheads)
1797 1797 for h in heads:
1798 1798 if h not in oldheads and 'close' in self[h].extra():
1799 1799 dh -= 1
1800 1800 htext = ""
1801 1801 if dh:
1802 1802 htext = _(" (%+d heads)") % dh
1803 1803
1804 1804 self.ui.status(_("added %d changesets"
1805 1805 " with %d changes to %d files%s\n")
1806 1806 % (changesets, revisions, files, htext))
1807 1807
1808 1808 if changesets > 0:
1809 1809 p = lambda: cl.writepending() and self.root or ""
1810 1810 self.hook('pretxnchangegroup', throw=True,
1811 1811 node=hex(cl.node(clstart)), source=srctype,
1812 1812 url=url, pending=p)
1813 1813
1814 1814 # make changelog see real files again
1815 1815 cl.finalize(trp)
1816 1816
1817 1817 tr.close()
1818 1818 finally:
1819 1819 tr.release()
1820 1820 if lock:
1821 1821 lock.release()
1822 1822
1823 1823 if changesets > 0:
1824 1824 # forcefully update the on-disk branch cache
1825 1825 self.ui.debug("updating the branch cache\n")
1826 1826 self.updatebranchcache()
1827 1827 self.hook("changegroup", node=hex(cl.node(clstart)),
1828 1828 source=srctype, url=url)
1829 1829
1830 1830 for i in xrange(clstart, clend):
1831 1831 self.hook("incoming", node=hex(cl.node(i)),
1832 1832 source=srctype, url=url)
1833 1833
1834 1834 # never return 0 here:
1835 1835 if dh < 0:
1836 1836 return dh - 1
1837 1837 else:
1838 1838 return dh + 1
1839 1839
1840 1840 def stream_in(self, remote, requirements):
1841 1841 lock = self.lock()
1842 1842 try:
1843 1843 fp = remote.stream_out()
1844 1844 l = fp.readline()
1845 1845 try:
1846 1846 resp = int(l)
1847 1847 except ValueError:
1848 1848 raise error.ResponseError(
1849 1849 _('Unexpected response from remote server:'), l)
1850 1850 if resp == 1:
1851 1851 raise util.Abort(_('operation forbidden by server'))
1852 1852 elif resp == 2:
1853 1853 raise util.Abort(_('locking the remote repository failed'))
1854 1854 elif resp != 0:
1855 1855 raise util.Abort(_('the server sent an unknown error code'))
1856 1856 self.ui.status(_('streaming all changes\n'))
1857 1857 l = fp.readline()
1858 1858 try:
1859 1859 total_files, total_bytes = map(int, l.split(' ', 1))
1860 1860 except (ValueError, TypeError):
1861 1861 raise error.ResponseError(
1862 1862 _('Unexpected response from remote server:'), l)
1863 1863 self.ui.status(_('%d files to transfer, %s of data\n') %
1864 1864 (total_files, util.bytecount(total_bytes)))
1865 1865 start = time.time()
1866 1866 for i in xrange(total_files):
1867 1867 # XXX doesn't support '\n' or '\r' in filenames
1868 1868 l = fp.readline()
1869 1869 try:
1870 1870 name, size = l.split('\0', 1)
1871 1871 size = int(size)
1872 1872 except (ValueError, TypeError):
1873 1873 raise error.ResponseError(
1874 1874 _('Unexpected response from remote server:'), l)
1875 1875 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1876 1876 # for backwards compat, name was partially encoded
1877 1877 ofp = self.sopener(store.decodedir(name), 'w')
1878 1878 for chunk in util.filechunkiter(fp, limit=size):
1879 1879 ofp.write(chunk)
1880 1880 ofp.close()
1881 1881 elapsed = time.time() - start
1882 1882 if elapsed <= 0:
1883 1883 elapsed = 0.001
1884 1884 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1885 1885 (util.bytecount(total_bytes), elapsed,
1886 1886 util.bytecount(total_bytes / elapsed)))
1887 1887
1888 1888 # new requirements = old non-format requirements + new format-related
1889 1889 # requirements from the streamed-in repository
1890 1890 requirements.update(set(self.requirements) - self.supportedformats)
1891 1891 self._applyrequirements(requirements)
1892 1892 self._writerequirements()
1893 1893
1894 1894 self.invalidate()
1895 1895 return len(self.heads()) + 1
1896 1896 finally:
1897 1897 lock.release()
1898 1898
1899 1899 def clone(self, remote, heads=[], stream=False):
1900 1900 '''clone remote repository.
1901 1901
1902 1902 keyword arguments:
1903 1903 heads: list of revs to clone (forces use of pull)
1904 1904 stream: use streaming clone if possible'''
1905 1905
1906 1906 # now, all clients that can request uncompressed clones can
1907 1907 # read repo formats supported by all servers that can serve
1908 1908 # them.
1909 1909
1910 1910 # if revlog format changes, client will have to check version
1911 1911 # and format flags on "stream" capability, and use
1912 1912 # uncompressed only if compatible.
1913 1913
1914 1914 if stream and not heads:
1915 1915 # 'stream' means remote revlog format is revlogv1 only
1916 1916 if remote.capable('stream'):
1917 1917 return self.stream_in(remote, set(('revlogv1',)))
1918 1918 # otherwise, 'streamreqs' contains the remote revlog format
1919 1919 streamreqs = remote.capable('streamreqs')
1920 1920 if streamreqs:
1921 1921 streamreqs = set(streamreqs.split(','))
1922 1922 # if we support it, stream in and adjust our requirements
1923 1923 if not streamreqs - self.supportedformats:
1924 1924 return self.stream_in(remote, streamreqs)
1925 1925 return self.pull(remote, heads)
1926 1926
1927 1927 def pushkey(self, namespace, key, old, new):
1928 1928 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1929 1929 old=old, new=new)
1930 1930 ret = pushkey.push(self, namespace, key, old, new)
1931 1931 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1932 1932 ret=ret)
1933 1933 return ret
1934 1934
1935 1935 def listkeys(self, namespace):
1936 1936 self.hook('prelistkeys', throw=True, namespace=namespace)
1937 1937 values = pushkey.list(self, namespace)
1938 1938 self.hook('listkeys', namespace=namespace, values=values)
1939 1939 return values
1940 1940
1941 1941 def debugwireargs(self, one, two, three=None, four=None, five=None):
1942 1942 '''used to test argument passing over the wire'''
1943 1943 return "%s %s %s %s %s" % (one, two, three, four, five)
1944 1944
1945 1945 # used to avoid circular references so destructors work
1946 1946 def aftertrans(files):
1947 1947 renamefiles = [tuple(t) for t in files]
1948 1948 def a():
1949 1949 for src, dest in renamefiles:
1950 1950 util.rename(src, dest)
1951 1951 return a
1952 1952
1953 1953 def instance(ui, path, create):
1954 1954 return localrepository(ui, util.localpath(path), create)
1955 1955
1956 1956 def islocal(path):
1957 1957 return True
@@ -1,561 +1,561 b''
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, nullrev, hex, bin
9 9 from i18n import _
10 10 import scmutil, util, filemerge, copies, subrepo
11 11 import errno, os, shutil
12 12
13 13 class mergestate(object):
14 14 '''track 3-way merge state of individual files'''
15 15 def __init__(self, repo):
16 16 self._repo = repo
17 17 self._dirty = False
18 18 self._read()
19 19 def reset(self, node=None):
20 20 self._state = {}
21 21 if node:
22 22 self._local = node
23 23 shutil.rmtree(self._repo.join("merge"), True)
24 24 self._dirty = False
25 25 def _read(self):
26 26 self._state = {}
27 27 try:
28 28 f = self._repo.opener("merge/state")
29 29 for i, l in enumerate(f):
30 30 if i == 0:
31 31 self._local = bin(l[:-1])
32 32 else:
33 33 bits = l[:-1].split("\0")
34 34 self._state[bits[0]] = bits[1:]
35 35 f.close()
36 36 except IOError, err:
37 37 if err.errno != errno.ENOENT:
38 38 raise
39 39 self._dirty = False
40 40 def commit(self):
41 41 if self._dirty:
42 42 f = self._repo.opener("merge/state", "w")
43 43 f.write(hex(self._local) + "\n")
44 44 for d, v in self._state.iteritems():
45 45 f.write("\0".join([d] + v) + "\n")
46 46 f.close()
47 47 self._dirty = False
48 48 def add(self, fcl, fco, fca, fd, flags):
49 49 hash = util.sha1(fcl.path()).hexdigest()
50 50 self._repo.opener.write("merge/" + hash, fcl.data())
51 51 self._state[fd] = ['u', hash, fcl.path(), fca.path(),
52 52 hex(fca.filenode()), fco.path(), flags]
53 53 self._dirty = True
54 54 def __contains__(self, dfile):
55 55 return dfile in self._state
56 56 def __getitem__(self, dfile):
57 57 return self._state[dfile][0]
58 58 def __iter__(self):
59 59 l = self._state.keys()
60 60 l.sort()
61 61 for f in l:
62 62 yield f
63 63 def mark(self, dfile, state):
64 64 self._state[dfile][0] = state
65 65 self._dirty = True
66 66 def resolve(self, dfile, wctx, octx):
67 67 if self[dfile] == 'r':
68 68 return 0
69 69 state, hash, lfile, afile, anode, ofile, flags = self._state[dfile]
70 70 f = self._repo.opener("merge/" + hash)
71 71 self._repo.wwrite(dfile, f.read(), flags)
72 72 f.close()
73 73 fcd = wctx[dfile]
74 74 fco = octx[ofile]
75 75 fca = self._repo.filectx(afile, fileid=anode)
76 76 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
77 77 if r is None:
78 78 # no real conflict
79 79 del self._state[dfile]
80 80 elif not r:
81 81 self.mark(dfile, 'r')
82 82 return r
83 83
84 84 def _checkunknown(wctx, mctx):
85 85 "check for collisions between unknown files and files in mctx"
86 86 for f in wctx.unknown():
87 87 if f in mctx and mctx[f].cmp(wctx[f]):
88 88 raise util.Abort(_("untracked file in working directory differs"
89 89 " from file in requested revision: '%s'") % f)
90 90
91 91 def _checkcollision(mctx):
92 92 "check for case folding collisions in the destination context"
93 93 folded = {}
94 94 for fn in mctx:
95 95 fold = fn.lower()
96 96 if fold in folded:
97 97 raise util.Abort(_("case-folding collision between %s and %s")
98 98 % (fn, folded[fold]))
99 99 folded[fold] = fn
100 100
101 101 def _forgetremoved(wctx, mctx, branchmerge):
102 102 """
103 103 Forget removed files
104 104
105 105 If we're jumping between revisions (as opposed to merging), and if
106 106 neither the working directory nor the target rev has the file,
107 107 then we need to remove it from the dirstate, to prevent the
108 108 dirstate from listing the file when it is no longer in the
109 109 manifest.
110 110
111 111 If we're merging, and the other revision has removed a file
112 112 that is not present in the working directory, we need to mark it
113 113 as removed.
114 114 """
115 115
116 116 action = []
117 117 state = branchmerge and 'r' or 'f'
118 118 for f in wctx.deleted():
119 119 if f not in mctx:
120 120 action.append((f, state))
121 121
122 122 if not branchmerge:
123 123 for f in wctx.removed():
124 124 if f not in mctx:
125 125 action.append((f, "f"))
126 126
127 127 return action
128 128
129 129 def manifestmerge(repo, p1, p2, pa, overwrite, partial):
130 130 """
131 131 Merge p1 and p2 with ancestor pa and generate merge action list
132 132
133 133 overwrite = whether we clobber working files
134 134 partial = function to filter file lists
135 135 """
136 136
137 137 def fmerge(f, f2, fa):
138 138 """merge flags"""
139 139 a, m, n = ma.flags(fa), m1.flags(f), m2.flags(f2)
140 140 if m == n: # flags agree
141 141 return m # unchanged
142 142 if m and n and not a: # flags set, don't agree, differ from parent
143 143 r = repo.ui.promptchoice(
144 144 _(" conflicting flags for %s\n"
145 145 "(n)one, e(x)ec or sym(l)ink?") % f,
146 146 (_("&None"), _("E&xec"), _("Sym&link")), 0)
147 147 if r == 1:
148 148 return "x" # Exec
149 149 if r == 2:
150 150 return "l" # Symlink
151 151 return ""
152 152 if m and m != a: # changed from a to m
153 153 return m
154 154 if n and n != a: # changed from a to n
155 155 return n
156 156 return '' # flag was cleared
157 157
158 158 def act(msg, m, f, *args):
159 159 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
160 160 action.append((f, m) + args)
161 161
162 162 action, copy = [], {}
163 163
164 164 if overwrite:
165 165 pa = p1
166 166 elif pa == p2: # backwards
167 167 pa = p1.p1()
168 168 elif pa and repo.ui.configbool("merge", "followcopies", True):
169 169 dirs = repo.ui.configbool("merge", "followdirs", True)
170 170 copy, diverge = copies.copies(repo, p1, p2, pa, dirs)
171 171 for of, fl in diverge.iteritems():
172 172 act("divergent renames", "dr", of, fl)
173 173
174 174 repo.ui.note(_("resolving manifests\n"))
175 175 repo.ui.debug(" overwrite %s partial %s\n" % (overwrite, bool(partial)))
176 176 repo.ui.debug(" ancestor %s local %s remote %s\n" % (pa, p1, p2))
177 177
178 178 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
179 179 copied = set(copy.values())
180 180
181 181 if '.hgsubstate' in m1:
182 182 # check whether sub state is modified
183 183 for s in p1.substate:
184 184 if p1.sub(s).dirty():
185 185 m1['.hgsubstate'] += "+"
186 186 break
187 187
188 188 # Compare manifests
189 189 for f, n in m1.iteritems():
190 190 if partial and not partial(f):
191 191 continue
192 192 if f in m2:
193 193 rflags = fmerge(f, f, f)
194 194 a = ma.get(f, nullid)
195 195 if n == m2[f] or m2[f] == a: # same or local newer
196 196 # is file locally modified or flags need changing?
197 197 # dirstate flags may need to be made current
198 198 if m1.flags(f) != rflags or n[20:]:
199 199 act("update permissions", "e", f, rflags)
200 200 elif n == a: # remote newer
201 201 act("remote is newer", "g", f, rflags)
202 202 else: # both changed
203 203 act("versions differ", "m", f, f, f, rflags, False)
204 204 elif f in copied: # files we'll deal with on m2 side
205 205 pass
206 206 elif f in copy:
207 207 f2 = copy[f]
208 208 if f2 not in m2: # directory rename
209 209 act("remote renamed directory to " + f2, "d",
210 210 f, None, f2, m1.flags(f))
211 211 else: # case 2 A,B/B/B or case 4,21 A/B/B
212 212 act("local copied/moved to " + f2, "m",
213 213 f, f2, f, fmerge(f, f2, f2), False)
214 214 elif f in ma: # clean, a different, no remote
215 215 if n != ma[f]:
216 216 if repo.ui.promptchoice(
217 217 _(" local changed %s which remote deleted\n"
218 218 "use (c)hanged version or (d)elete?") % f,
219 219 (_("&Changed"), _("&Delete")), 0):
220 220 act("prompt delete", "r", f)
221 221 else:
222 222 act("prompt keep", "a", f)
223 223 elif n[20:] == "a": # added, no remote
224 224 act("remote deleted", "f", f)
225 225 elif n[20:] != "u":
226 226 act("other deleted", "r", f)
227 227
228 228 for f, n in m2.iteritems():
229 229 if partial and not partial(f):
230 230 continue
231 231 if f in m1 or f in copied: # files already visited
232 232 continue
233 233 if f in copy:
234 234 f2 = copy[f]
235 235 if f2 not in m1: # directory rename
236 236 act("local renamed directory to " + f2, "d",
237 237 None, f, f2, m2.flags(f))
238 238 elif f2 in m2: # rename case 1, A/A,B/A
239 239 act("remote copied to " + f, "m",
240 240 f2, f, f, fmerge(f2, f, f2), False)
241 241 else: # case 3,20 A/B/A
242 242 act("remote moved to " + f, "m",
243 243 f2, f, f, fmerge(f2, f, f2), True)
244 244 elif f not in ma:
245 245 act("remote created", "g", f, m2.flags(f))
246 246 elif n != ma[f]:
247 247 if repo.ui.promptchoice(
248 248 _("remote changed %s which local deleted\n"
249 249 "use (c)hanged version or leave (d)eleted?") % f,
250 250 (_("&Changed"), _("&Deleted")), 0) == 0:
251 251 act("prompt recreating", "g", f, m2.flags(f))
252 252
253 253 return action
254 254
255 255 def actionkey(a):
256 256 return a[1] == 'r' and -1 or 0, a
257 257
258 258 def applyupdates(repo, action, wctx, mctx, actx, overwrite):
259 259 """apply the merge action list to the working directory
260 260
261 261 wctx is the working copy context
262 262 mctx is the context to be merged into the working copy
263 263 actx is the context of the common ancestor
264 264
265 265 Return a tuple of counts (updated, merged, removed, unresolved) that
266 266 describes how many files were affected by the update.
267 267 """
268 268
269 269 updated, merged, removed, unresolved = 0, 0, 0, 0
270 270 ms = mergestate(repo)
271 271 ms.reset(wctx.p1().node())
272 272 moves = []
273 273 action.sort(key=actionkey)
274 274
275 275 # prescan for merges
276 276 u = repo.ui
277 277 for a in action:
278 278 f, m = a[:2]
279 279 if m == 'm': # merge
280 280 f2, fd, flags, move = a[2:]
281 281 if f == '.hgsubstate': # merged internally
282 282 continue
283 283 repo.ui.debug("preserving %s for resolve of %s\n" % (f, fd))
284 284 fcl = wctx[f]
285 285 fco = mctx[f2]
286 286 if mctx == actx: # backwards, use working dir parent as ancestor
287 287 if fcl.parents():
288 288 fca = fcl.p1()
289 289 else:
290 290 fca = repo.filectx(f, fileid=nullrev)
291 291 else:
292 292 fca = fcl.ancestor(fco, actx)
293 293 if not fca:
294 294 fca = repo.filectx(f, fileid=nullrev)
295 295 ms.add(fcl, fco, fca, fd, flags)
296 296 if f != fd and move:
297 297 moves.append(f)
298 298
299 299 # remove renamed files after safely stored
300 300 for f in moves:
301 301 if os.path.lexists(repo.wjoin(f)):
302 302 repo.ui.debug("removing %s\n" % f)
303 303 os.unlink(repo.wjoin(f))
304 304
305 305 audit_path = scmutil.pathauditor(repo.root)
306 306
307 307 numupdates = len(action)
308 308 for i, a in enumerate(action):
309 309 f, m = a[:2]
310 310 u.progress(_('updating'), i + 1, item=f, total=numupdates,
311 311 unit=_('files'))
312 312 if f and f[0] == "/":
313 313 continue
314 314 if m == "r": # remove
315 315 repo.ui.note(_("removing %s\n") % f)
316 316 audit_path(f)
317 317 if f == '.hgsubstate': # subrepo states need updating
318 318 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
319 319 try:
320 320 util.unlinkpath(repo.wjoin(f))
321 321 except OSError, inst:
322 322 if inst.errno != errno.ENOENT:
323 323 repo.ui.warn(_("update failed to remove %s: %s!\n") %
324 324 (f, inst.strerror))
325 325 removed += 1
326 326 elif m == "m": # merge
327 327 if f == '.hgsubstate': # subrepo states need updating
328 328 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx), overwrite)
329 329 continue
330 330 f2, fd, flags, move = a[2:]
331 331 r = ms.resolve(fd, wctx, mctx)
332 332 if r is not None and r > 0:
333 333 unresolved += 1
334 334 else:
335 335 if r is None:
336 336 updated += 1
337 337 else:
338 338 merged += 1
339 util.set_flags(repo.wjoin(fd), 'l' in flags, 'x' in flags)
339 util.setflags(repo.wjoin(fd), 'l' in flags, 'x' in flags)
340 340 if (move and repo.dirstate.normalize(fd) != f
341 341 and os.path.lexists(repo.wjoin(f))):
342 342 repo.ui.debug("removing %s\n" % f)
343 343 os.unlink(repo.wjoin(f))
344 344 elif m == "g": # get
345 345 flags = a[2]
346 346 repo.ui.note(_("getting %s\n") % f)
347 347 t = mctx.filectx(f).data()
348 348 repo.wwrite(f, t, flags)
349 349 t = None
350 350 updated += 1
351 351 if f == '.hgsubstate': # subrepo states need updating
352 352 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
353 353 elif m == "d": # directory rename
354 354 f2, fd, flags = a[2:]
355 355 if f:
356 356 repo.ui.note(_("moving %s to %s\n") % (f, fd))
357 357 t = wctx.filectx(f).data()
358 358 repo.wwrite(fd, t, flags)
359 359 util.unlinkpath(repo.wjoin(f))
360 360 if f2:
361 361 repo.ui.note(_("getting %s to %s\n") % (f2, fd))
362 362 t = mctx.filectx(f2).data()
363 363 repo.wwrite(fd, t, flags)
364 364 updated += 1
365 365 elif m == "dr": # divergent renames
366 366 fl = a[2]
367 367 repo.ui.warn(_("note: possible conflict - %s was renamed "
368 368 "multiple times to:\n") % f)
369 369 for nf in fl:
370 370 repo.ui.warn(" %s\n" % nf)
371 371 elif m == "e": # exec
372 372 flags = a[2]
373 util.set_flags(repo.wjoin(f), 'l' in flags, 'x' in flags)
373 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
374 374 ms.commit()
375 375 u.progress(_('updating'), None, total=numupdates, unit=_('files'))
376 376
377 377 return updated, merged, removed, unresolved
378 378
379 379 def recordupdates(repo, action, branchmerge):
380 380 "record merge actions to the dirstate"
381 381
382 382 for a in action:
383 383 f, m = a[:2]
384 384 if m == "r": # remove
385 385 if branchmerge:
386 386 repo.dirstate.remove(f)
387 387 else:
388 388 repo.dirstate.forget(f)
389 389 elif m == "a": # re-add
390 390 if not branchmerge:
391 391 repo.dirstate.add(f)
392 392 elif m == "f": # forget
393 393 repo.dirstate.forget(f)
394 394 elif m == "e": # exec change
395 395 repo.dirstate.normallookup(f)
396 396 elif m == "g": # get
397 397 if branchmerge:
398 398 repo.dirstate.otherparent(f)
399 399 else:
400 400 repo.dirstate.normal(f)
401 401 elif m == "m": # merge
402 402 f2, fd, flag, move = a[2:]
403 403 if branchmerge:
404 404 # We've done a branch merge, mark this file as merged
405 405 # so that we properly record the merger later
406 406 repo.dirstate.merge(fd)
407 407 if f != f2: # copy/rename
408 408 if move:
409 409 repo.dirstate.remove(f)
410 410 if f != fd:
411 411 repo.dirstate.copy(f, fd)
412 412 else:
413 413 repo.dirstate.copy(f2, fd)
414 414 else:
415 415 # We've update-merged a locally modified file, so
416 416 # we set the dirstate to emulate a normal checkout
417 417 # of that file some time in the past. Thus our
418 418 # merge will appear as a normal local file
419 419 # modification.
420 420 if f2 == fd: # file not locally copied/moved
421 421 repo.dirstate.normallookup(fd)
422 422 if move:
423 423 repo.dirstate.forget(f)
424 424 elif m == "d": # directory rename
425 425 f2, fd, flag = a[2:]
426 426 if not f2 and f not in repo.dirstate:
427 427 # untracked file moved
428 428 continue
429 429 if branchmerge:
430 430 repo.dirstate.add(fd)
431 431 if f:
432 432 repo.dirstate.remove(f)
433 433 repo.dirstate.copy(f, fd)
434 434 if f2:
435 435 repo.dirstate.copy(f2, fd)
436 436 else:
437 437 repo.dirstate.normal(fd)
438 438 if f:
439 439 repo.dirstate.forget(f)
440 440
441 441 def update(repo, node, branchmerge, force, partial, ancestor=None):
442 442 """
443 443 Perform a merge between the working directory and the given node
444 444
445 445 node = the node to update to, or None if unspecified
446 446 branchmerge = whether to merge between branches
447 447 force = whether to force branch merging or file overwriting
448 448 partial = a function to filter file lists (dirstate not updated)
449 449
450 450 The table below shows all the behaviors of the update command
451 451 given the -c and -C or no options, whether the working directory
452 452 is dirty, whether a revision is specified, and the relationship of
453 453 the parent rev to the target rev (linear, on the same named
454 454 branch, or on another named branch).
455 455
456 456 This logic is tested by test-update-branches.t.
457 457
458 458 -c -C dirty rev | linear same cross
459 459 n n n n | ok (1) x
460 460 n n n y | ok ok ok
461 461 n n y * | merge (2) (2)
462 462 n y * * | --- discard ---
463 463 y n y * | --- (3) ---
464 464 y n n * | --- ok ---
465 465 y y * * | --- (4) ---
466 466
467 467 x = can't happen
468 468 * = don't-care
469 469 1 = abort: crosses branches (use 'hg merge' or 'hg update -c')
470 470 2 = abort: crosses branches (use 'hg merge' to merge or
471 471 use 'hg update -C' to discard changes)
472 472 3 = abort: uncommitted local changes
473 473 4 = incompatible options (checked in commands.py)
474 474
475 475 Return the same tuple as applyupdates().
476 476 """
477 477
478 478 onode = node
479 479 wlock = repo.wlock()
480 480 try:
481 481 wc = repo[None]
482 482 if node is None:
483 483 # tip of current branch
484 484 try:
485 485 node = repo.branchtags()[wc.branch()]
486 486 except KeyError:
487 487 if wc.branch() == "default": # no default branch!
488 488 node = repo.lookup("tip") # update to tip
489 489 else:
490 490 raise util.Abort(_("branch %s not found") % wc.branch())
491 491 overwrite = force and not branchmerge
492 492 pl = wc.parents()
493 493 p1, p2 = pl[0], repo[node]
494 494 if ancestor:
495 495 pa = repo[ancestor]
496 496 else:
497 497 pa = p1.ancestor(p2)
498 498
499 499 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
500 500
501 501 ### check phase
502 502 if not overwrite and len(pl) > 1:
503 503 raise util.Abort(_("outstanding uncommitted merges"))
504 504 if branchmerge:
505 505 if pa == p2:
506 506 raise util.Abort(_("merging with a working directory ancestor"
507 507 " has no effect"))
508 508 elif pa == p1:
509 509 if p1.branch() == p2.branch():
510 510 raise util.Abort(_("nothing to merge (use 'hg update'"
511 511 " or check 'hg heads')"))
512 512 if not force and (wc.files() or wc.deleted()):
513 513 raise util.Abort(_("outstanding uncommitted changes "
514 514 "(use 'hg status' to list changes)"))
515 515 for s in wc.substate:
516 516 if wc.sub(s).dirty():
517 517 raise util.Abort(_("outstanding uncommitted changes in "
518 518 "subrepository '%s'") % s)
519 519
520 520 elif not overwrite:
521 521 if pa == p1 or pa == p2: # linear
522 522 pass # all good
523 523 elif wc.files() or wc.deleted():
524 524 raise util.Abort(_("crosses branches (merge branches or use"
525 525 " --clean to discard changes)"))
526 526 elif onode is None:
527 527 raise util.Abort(_("crosses branches (merge branches or use"
528 528 " --check to force update)"))
529 529 else:
530 530 # Allow jumping branches if clean and specific rev given
531 531 overwrite = True
532 532
533 533 ### calculate phase
534 534 action = []
535 535 wc.status(unknown=True) # prime cache
536 536 if not force:
537 537 _checkunknown(wc, p2)
538 538 if not util.checkcase(repo.path):
539 539 _checkcollision(p2)
540 540 action += _forgetremoved(wc, p2, branchmerge)
541 541 action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
542 542
543 543 ### apply phase
544 544 if not branchmerge: # just jump to the new rev
545 545 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
546 546 if not partial:
547 547 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
548 548
549 549 stats = applyupdates(repo, action, wc, p2, pa, overwrite)
550 550
551 551 if not partial:
552 552 repo.dirstate.setparents(fp1, fp2)
553 553 recordupdates(repo, action, branchmerge)
554 554 if not branchmerge:
555 555 repo.dirstate.setbranch(p2.branch())
556 556 finally:
557 557 wlock.release()
558 558
559 559 if not partial:
560 560 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
561 561 return stats
@@ -1,331 +1,331 b''
1 1 # posix.py - Posix utility function implementations for Mercurial
2 2 #
3 3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 import os, sys, errno, stat, getpass, pwd, grp, tempfile
10 10
11 11 posixfile = open
12 12 nulldev = '/dev/null'
13 13 normpath = os.path.normpath
14 14 samestat = os.path.samestat
15 15 os_link = os.link
16 16 unlink = os.unlink
17 17 rename = os.rename
18 18 expandglobs = False
19 19
20 20 umask = os.umask(0)
21 21 os.umask(umask)
22 22
23 23 def openhardlinks():
24 24 '''return true if it is safe to hold open file handles to hardlinks'''
25 25 return True
26 26
27 27 def nlinks(name):
28 28 '''return number of hardlinks for the given file'''
29 29 return os.lstat(name).st_nlink
30 30
31 31 def parsepatchoutput(output_line):
32 32 """parses the output produced by patch and returns the filename"""
33 33 pf = output_line[14:]
34 34 if os.sys.platform == 'OpenVMS':
35 35 if pf[0] == '`':
36 36 pf = pf[1:-1] # Remove the quotes
37 37 else:
38 38 if pf.startswith("'") and pf.endswith("'") and " " in pf:
39 39 pf = pf[1:-1] # Remove the quotes
40 40 return pf
41 41
42 42 def sshargs(sshcmd, host, user, port):
43 43 '''Build argument list for ssh'''
44 44 args = user and ("%s@%s" % (user, host)) or host
45 45 return port and ("%s -p %s" % (args, port)) or args
46 46
47 47 def is_exec(f):
48 48 """check whether a file is executable"""
49 49 return (os.lstat(f).st_mode & 0100 != 0)
50 50
51 def set_flags(f, l, x):
51 def setflags(f, l, x):
52 52 s = os.lstat(f).st_mode
53 53 if l:
54 54 if not stat.S_ISLNK(s):
55 55 # switch file to link
56 56 fp = open(f)
57 57 data = fp.read()
58 58 fp.close()
59 59 os.unlink(f)
60 60 try:
61 61 os.symlink(data, f)
62 62 except OSError:
63 63 # failed to make a link, rewrite file
64 64 fp = open(f, "w")
65 65 fp.write(data)
66 66 fp.close()
67 67 # no chmod needed at this point
68 68 return
69 69 if stat.S_ISLNK(s):
70 70 # switch link to file
71 71 data = os.readlink(f)
72 72 os.unlink(f)
73 73 fp = open(f, "w")
74 74 fp.write(data)
75 75 fp.close()
76 76 s = 0666 & ~umask # avoid restatting for chmod
77 77
78 78 sx = s & 0100
79 79 if x and not sx:
80 80 # Turn on +x for every +r bit when making a file executable
81 81 # and obey umask.
82 82 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
83 83 elif not x and sx:
84 84 # Turn off all +x bits
85 85 os.chmod(f, s & 0666)
86 86
87 87 def checkexec(path):
88 88 """
89 89 Check whether the given path is on a filesystem with UNIX-like exec flags
90 90
91 91 Requires a directory (like /foo/.hg)
92 92 """
93 93
94 94 # VFAT on some Linux versions can flip mode but it doesn't persist
95 95 # a FS remount. Frequently we can detect it if files are created
96 96 # with exec bit on.
97 97
98 98 try:
99 99 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
100 100 fh, fn = tempfile.mkstemp(dir=path, prefix='hg-checkexec-')
101 101 try:
102 102 os.close(fh)
103 103 m = os.stat(fn).st_mode & 0777
104 104 new_file_has_exec = m & EXECFLAGS
105 105 os.chmod(fn, m ^ EXECFLAGS)
106 106 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
107 107 finally:
108 108 os.unlink(fn)
109 109 except (IOError, OSError):
110 110 # we don't care, the user probably won't be able to commit anyway
111 111 return False
112 112 return not (new_file_has_exec or exec_flags_cannot_flip)
113 113
114 114 def checklink(path):
115 115 """check whether the given path is on a symlink-capable filesystem"""
116 116 # mktemp is not racy because symlink creation will fail if the
117 117 # file already exists
118 118 name = tempfile.mktemp(dir=path, prefix='hg-checklink-')
119 119 try:
120 120 os.symlink(".", name)
121 121 os.unlink(name)
122 122 return True
123 123 except (OSError, AttributeError):
124 124 return False
125 125
126 126 def checkosfilename(path):
127 127 '''Check that the base-relative path is a valid filename on this platform.
128 128 Returns None if the path is ok, or a UI string describing the problem.'''
129 129 pass # on posix platforms, every path is ok
130 130
131 131 def set_binary(fd):
132 132 pass
133 133
134 134 def pconvert(path):
135 135 return path
136 136
137 137 def localpath(path):
138 138 return path
139 139
140 140 def samefile(fpath1, fpath2):
141 141 """Returns whether path1 and path2 refer to the same file. This is only
142 142 guaranteed to work for files, not directories."""
143 143 return os.path.samefile(fpath1, fpath2)
144 144
145 145 def samedevice(fpath1, fpath2):
146 146 """Returns whether fpath1 and fpath2 are on the same device. This is only
147 147 guaranteed to work for files, not directories."""
148 148 st1 = os.lstat(fpath1)
149 149 st2 = os.lstat(fpath2)
150 150 return st1.st_dev == st2.st_dev
151 151
152 152 if sys.platform == 'darwin':
153 153 import fcntl # only needed on darwin, missing on jython
154 154 def realpath(path):
155 155 '''
156 156 Returns the true, canonical file system path equivalent to the given
157 157 path.
158 158
159 159 Equivalent means, in this case, resulting in the same, unique
160 160 file system link to the path. Every file system entry, whether a file,
161 161 directory, hard link or symbolic link or special, will have a single
162 162 path preferred by the system, but may allow multiple, differing path
163 163 lookups to point to it.
164 164
165 165 Most regular UNIX file systems only allow a file system entry to be
166 166 looked up by its distinct path. Obviously, this does not apply to case
167 167 insensitive file systems, whether case preserving or not. The most
168 168 complex issue to deal with is file systems transparently reencoding the
169 169 path, such as the non-standard Unicode normalisation required for HFS+
170 170 and HFSX.
171 171 '''
172 172 # Constants copied from /usr/include/sys/fcntl.h
173 173 F_GETPATH = 50
174 174 O_SYMLINK = 0x200000
175 175
176 176 try:
177 177 fd = os.open(path, O_SYMLINK)
178 178 except OSError, err:
179 179 if err.errno == errno.ENOENT:
180 180 return path
181 181 raise
182 182
183 183 try:
184 184 return fcntl.fcntl(fd, F_GETPATH, '\0' * 1024).rstrip('\0')
185 185 finally:
186 186 os.close(fd)
187 187 else:
188 188 # Fallback to the likely inadequate Python builtin function.
189 189 realpath = os.path.realpath
190 190
191 191 def shellquote(s):
192 192 if os.sys.platform == 'OpenVMS':
193 193 return '"%s"' % s
194 194 else:
195 195 return "'%s'" % s.replace("'", "'\\''")
196 196
197 197 def quotecommand(cmd):
198 198 return cmd
199 199
200 200 def popen(command, mode='r'):
201 201 return os.popen(command, mode)
202 202
203 203 def testpid(pid):
204 204 '''return False if pid dead, True if running or not sure'''
205 205 if os.sys.platform == 'OpenVMS':
206 206 return True
207 207 try:
208 208 os.kill(pid, 0)
209 209 return True
210 210 except OSError, inst:
211 211 return inst.errno != errno.ESRCH
212 212
213 213 def explain_exit(code):
214 214 """return a 2-tuple (desc, code) describing a subprocess status
215 215 (codes from kill are negative - not os.system/wait encoding)"""
216 216 if code >= 0:
217 217 return _("exited with status %d") % code, code
218 218 return _("killed by signal %d") % -code, -code
219 219
220 220 def isowner(st):
221 221 """Return True if the stat object st is from the current user."""
222 222 return st.st_uid == os.getuid()
223 223
224 224 def find_exe(command):
225 225 '''Find executable for command searching like which does.
226 226 If command is a basename then PATH is searched for command.
227 227 PATH isn't searched if command is an absolute or relative path.
228 228 If command isn't found None is returned.'''
229 229 if sys.platform == 'OpenVMS':
230 230 return command
231 231
232 232 def findexisting(executable):
233 233 'Will return executable if existing file'
234 234 if os.path.exists(executable):
235 235 return executable
236 236 return None
237 237
238 238 if os.sep in command:
239 239 return findexisting(command)
240 240
241 241 for path in os.environ.get('PATH', '').split(os.pathsep):
242 242 executable = findexisting(os.path.join(path, command))
243 243 if executable is not None:
244 244 return executable
245 245 return None
246 246
247 247 def set_signal_handler():
248 248 pass
249 249
250 250 def statfiles(files):
251 251 'Stat each file in files and yield stat or None if file does not exist.'
252 252 lstat = os.lstat
253 253 for nf in files:
254 254 try:
255 255 st = lstat(nf)
256 256 except OSError, err:
257 257 if err.errno not in (errno.ENOENT, errno.ENOTDIR):
258 258 raise
259 259 st = None
260 260 yield st
261 261
262 262 def getuser():
263 263 '''return name of current user'''
264 264 return getpass.getuser()
265 265
266 266 def expand_glob(pats):
267 267 '''On Windows, expand the implicit globs in a list of patterns'''
268 268 return list(pats)
269 269
270 270 def username(uid=None):
271 271 """Return the name of the user with the given uid.
272 272
273 273 If uid is None, return the name of the current user."""
274 274
275 275 if uid is None:
276 276 uid = os.getuid()
277 277 try:
278 278 return pwd.getpwuid(uid)[0]
279 279 except KeyError:
280 280 return str(uid)
281 281
282 282 def groupname(gid=None):
283 283 """Return the name of the group with the given gid.
284 284
285 285 If gid is None, return the name of the current group."""
286 286
287 287 if gid is None:
288 288 gid = os.getgid()
289 289 try:
290 290 return grp.getgrgid(gid)[0]
291 291 except KeyError:
292 292 return str(gid)
293 293
294 294 def groupmembers(name):
295 295 """Return the list of members of the group with the given
296 296 name, KeyError if the group does not exist.
297 297 """
298 298 return list(grp.getgrnam(name).gr_mem)
299 299
300 300 def spawndetached(args):
301 301 return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
302 302 args[0], args)
303 303
304 304 def gethgcmd():
305 305 return sys.argv[:1]
306 306
307 307 def termwidth():
308 308 try:
309 309 import termios, array, fcntl
310 310 for dev in (sys.stderr, sys.stdout, sys.stdin):
311 311 try:
312 312 try:
313 313 fd = dev.fileno()
314 314 except AttributeError:
315 315 continue
316 316 if not os.isatty(fd):
317 317 continue
318 318 arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
319 319 width = array.array('h', arri)[1]
320 320 if width > 0:
321 321 return width
322 322 except ValueError:
323 323 pass
324 324 except IOError, e:
325 325 if e[0] == errno.EINVAL:
326 326 pass
327 327 else:
328 328 raise
329 329 except ImportError:
330 330 pass
331 331 return 80
@@ -1,286 +1,286 b''
1 1 # windows.py - Windows utility function implementations for Mercurial
2 2 #
3 3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 import osutil
10 10 import errno, msvcrt, os, re, sys
11 11
12 12 nulldev = 'NUL:'
13 13 umask = 002
14 14
15 15 # wrap osutil.posixfile to provide friendlier exceptions
16 16 def posixfile(name, mode='r', buffering=-1):
17 17 try:
18 18 return osutil.posixfile(name, mode, buffering)
19 19 except WindowsError, err:
20 20 raise IOError(err.errno, '%s: %s' % (name, err.strerror))
21 21 posixfile.__doc__ = osutil.posixfile.__doc__
22 22
23 23 class winstdout(object):
24 24 '''stdout on windows misbehaves if sent through a pipe'''
25 25
26 26 def __init__(self, fp):
27 27 self.fp = fp
28 28
29 29 def __getattr__(self, key):
30 30 return getattr(self.fp, key)
31 31
32 32 def close(self):
33 33 try:
34 34 self.fp.close()
35 35 except IOError:
36 36 pass
37 37
38 38 def write(self, s):
39 39 try:
40 40 # This is workaround for "Not enough space" error on
41 41 # writing large size of data to console.
42 42 limit = 16000
43 43 l = len(s)
44 44 start = 0
45 45 self.softspace = 0
46 46 while start < l:
47 47 end = start + limit
48 48 self.fp.write(s[start:end])
49 49 start = end
50 50 except IOError, inst:
51 51 if inst.errno != 0:
52 52 raise
53 53 self.close()
54 54 raise IOError(errno.EPIPE, 'Broken pipe')
55 55
56 56 def flush(self):
57 57 try:
58 58 return self.fp.flush()
59 59 except IOError, inst:
60 60 if inst.errno != errno.EINVAL:
61 61 raise
62 62 self.close()
63 63 raise IOError(errno.EPIPE, 'Broken pipe')
64 64
65 65 sys.stdout = winstdout(sys.stdout)
66 66
67 67 def _is_win_9x():
68 68 '''return true if run on windows 95, 98 or me.'''
69 69 try:
70 70 return sys.getwindowsversion()[3] == 1
71 71 except AttributeError:
72 72 return 'command' in os.environ.get('comspec', '')
73 73
74 74 def openhardlinks():
75 75 return not _is_win_9x()
76 76
77 77 def parsepatchoutput(output_line):
78 78 """parses the output produced by patch and returns the filename"""
79 79 pf = output_line[14:]
80 80 if pf[0] == '`':
81 81 pf = pf[1:-1] # Remove the quotes
82 82 return pf
83 83
84 84 def sshargs(sshcmd, host, user, port):
85 85 '''Build argument list for ssh or Plink'''
86 86 pflag = 'plink' in sshcmd.lower() and '-P' or '-p'
87 87 args = user and ("%s@%s" % (user, host)) or host
88 88 return port and ("%s %s %s" % (args, pflag, port)) or args
89 89
90 def set_flags(f, l, x):
90 def setflags(f, l, x):
91 91 pass
92 92
93 93 def checkexec(path):
94 94 return False
95 95
96 96 def checklink(path):
97 97 return False
98 98
99 99 def set_binary(fd):
100 100 # When run without console, pipes may expose invalid
101 101 # fileno(), usually set to -1.
102 102 if hasattr(fd, 'fileno') and fd.fileno() >= 0:
103 103 msvcrt.setmode(fd.fileno(), os.O_BINARY)
104 104
105 105 def pconvert(path):
106 106 return '/'.join(path.split(os.sep))
107 107
108 108 def localpath(path):
109 109 return path.replace('/', '\\')
110 110
111 111 def normpath(path):
112 112 return pconvert(os.path.normpath(path))
113 113
114 114 def realpath(path):
115 115 '''
116 116 Returns the true, canonical file system path equivalent to the given
117 117 path.
118 118 '''
119 119 # TODO: There may be a more clever way to do this that also handles other,
120 120 # less common file systems.
121 121 return os.path.normpath(os.path.normcase(os.path.realpath(path)))
122 122
123 123 def samestat(s1, s2):
124 124 return False
125 125
126 126 # A sequence of backslashes is special iff it precedes a double quote:
127 127 # - if there's an even number of backslashes, the double quote is not
128 128 # quoted (i.e. it ends the quoted region)
129 129 # - if there's an odd number of backslashes, the double quote is quoted
130 130 # - in both cases, every pair of backslashes is unquoted into a single
131 131 # backslash
132 132 # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
133 133 # So, to quote a string, we must surround it in double quotes, double
134 134 # the number of backslashes that preceed double quotes and add another
135 135 # backslash before every double quote (being careful with the double
136 136 # quote we've appended to the end)
137 137 _quotere = None
138 138 def shellquote(s):
139 139 global _quotere
140 140 if _quotere is None:
141 141 _quotere = re.compile(r'(\\*)("|\\$)')
142 142 return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
143 143
144 144 def quotecommand(cmd):
145 145 """Build a command string suitable for os.popen* calls."""
146 146 if sys.version_info < (2, 7, 1):
147 147 # Python versions since 2.7.1 do this extra quoting themselves
148 148 return '"' + cmd + '"'
149 149 return cmd
150 150
151 151 def popen(command, mode='r'):
152 152 # Work around "popen spawned process may not write to stdout
153 153 # under windows"
154 154 # http://bugs.python.org/issue1366
155 155 command += " 2> %s" % nulldev
156 156 return os.popen(quotecommand(command), mode)
157 157
158 158 def explain_exit(code):
159 159 return _("exited with status %d") % code, code
160 160
161 161 # if you change this stub into a real check, please try to implement the
162 162 # username and groupname functions above, too.
163 163 def isowner(st):
164 164 return True
165 165
166 166 def find_exe(command):
167 167 '''Find executable for command searching like cmd.exe does.
168 168 If command is a basename then PATH is searched for command.
169 169 PATH isn't searched if command is an absolute or relative path.
170 170 An extension from PATHEXT is found and added if not present.
171 171 If command isn't found None is returned.'''
172 172 pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')
173 173 pathexts = [ext for ext in pathext.lower().split(os.pathsep)]
174 174 if os.path.splitext(command)[1].lower() in pathexts:
175 175 pathexts = ['']
176 176
177 177 def findexisting(pathcommand):
178 178 'Will append extension (if needed) and return existing file'
179 179 for ext in pathexts:
180 180 executable = pathcommand + ext
181 181 if os.path.exists(executable):
182 182 return executable
183 183 return None
184 184
185 185 if os.sep in command:
186 186 return findexisting(command)
187 187
188 188 for path in os.environ.get('PATH', '').split(os.pathsep):
189 189 executable = findexisting(os.path.join(path, command))
190 190 if executable is not None:
191 191 return executable
192 192 return findexisting(os.path.expanduser(os.path.expandvars(command)))
193 193
194 194 def statfiles(files):
195 195 '''Stat each file in files and yield stat or None if file does not exist.
196 196 Cluster and cache stat per directory to minimize number of OS stat calls.'''
197 197 ncase = os.path.normcase
198 198 dircache = {} # dirname -> filename -> status | None if file does not exist
199 199 for nf in files:
200 200 nf = ncase(nf)
201 201 dir, base = os.path.split(nf)
202 202 if not dir:
203 203 dir = '.'
204 204 cache = dircache.get(dir, None)
205 205 if cache is None:
206 206 try:
207 207 dmap = dict([(ncase(n), s)
208 208 for n, k, s in osutil.listdir(dir, True)])
209 209 except OSError, err:
210 210 # handle directory not found in Python version prior to 2.5
211 211 # Python <= 2.4 returns native Windows code 3 in errno
212 212 # Python >= 2.5 returns ENOENT and adds winerror field
213 213 # EINVAL is raised if dir is not a directory.
214 214 if err.errno not in (3, errno.ENOENT, errno.EINVAL,
215 215 errno.ENOTDIR):
216 216 raise
217 217 dmap = {}
218 218 cache = dircache.setdefault(dir, dmap)
219 219 yield cache.get(base, None)
220 220
221 221 def username(uid=None):
222 222 """Return the name of the user with the given uid.
223 223
224 224 If uid is None, return the name of the current user."""
225 225 return None
226 226
227 227 def groupname(gid=None):
228 228 """Return the name of the group with the given gid.
229 229
230 230 If gid is None, return the name of the current group."""
231 231 return None
232 232
233 233 def _removedirs(name):
234 234 """special version of os.removedirs that does not remove symlinked
235 235 directories or junction points if they actually contain files"""
236 236 if osutil.listdir(name):
237 237 return
238 238 os.rmdir(name)
239 239 head, tail = os.path.split(name)
240 240 if not tail:
241 241 head, tail = os.path.split(head)
242 242 while head and tail:
243 243 try:
244 244 if osutil.listdir(head):
245 245 return
246 246 os.rmdir(head)
247 247 except (ValueError, OSError):
248 248 break
249 249 head, tail = os.path.split(head)
250 250
251 251 def unlinkpath(f):
252 252 """unlink and remove the directory if it is empty"""
253 253 unlink(f)
254 254 # try removing directories that might now be empty
255 255 try:
256 256 _removedirs(os.path.dirname(f))
257 257 except OSError:
258 258 pass
259 259
260 260 def rename(src, dst):
261 261 '''atomically rename file src to dst, replacing dst if it exists'''
262 262 try:
263 263 os.rename(src, dst)
264 264 except OSError, e:
265 265 if e.errno != errno.EEXIST:
266 266 raise
267 267 unlink(dst)
268 268 os.rename(src, dst)
269 269
270 270 def gethgcmd():
271 271 return [sys.executable] + sys.argv[:1]
272 272
273 273 def termwidth():
274 274 # cmd.exe does not handle CR like a unix console, the CR is
275 275 # counted in the line length. On 80 columns consoles, if 80
276 276 # characters are written, the following CR won't apply on the
277 277 # current line but on the new one. Keep room for it.
278 278 return 79
279 279
280 280 def groupmembers(name):
281 281 # Don't support groups on Windows for now
282 282 raise KeyError()
283 283
284 284 from win32 import *
285 285
286 286 expandglobs = True
General Comments 0
You need to be logged in to leave comments. Login now