##// END OF EJS Templates
merge: handle directory renames...
Matt Mackall -
r3733:9e67fecb default
parent child Browse files
Show More
@@ -0,0 +1,32 b''
1 #!/bin/sh
2
3 mkdir t
4 cd t
5 hg init
6
7 mkdir a
8 echo foo > a/a
9 echo bar > a/b
10
11 hg add a
12 hg ci -m "0" -d "0 0"
13
14 hg co -C 0
15 hg mv a b
16 hg ci -m "1 mv a/ b/" -d "0 0"
17
18 hg co -C 0
19 echo baz > a/c
20 hg add a/c
21 hg ci -m "2 add a/c" -d "0 0"
22
23 hg merge --debug 1
24 ls a/ b/
25 hg st -C
26 hg ci -m "3 merge 2+1" -d "0 0"
27
28 hg co -C 1
29 hg merge --debug 2
30 ls a/ b/
31 hg st -C
32 hg ci -m "4 merge 1+2" -d "0 0"
@@ -1,1897 +1,1899 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ('lookup', 'changegroupsubset')
19 19
20 20 def __del__(self):
21 21 self.transhandle = None
22 22 def __init__(self, parentui, path=None, create=0):
23 23 repo.repository.__init__(self)
24 24 if not path:
25 25 p = os.getcwd()
26 26 while not os.path.isdir(os.path.join(p, ".hg")):
27 27 oldp = p
28 28 p = os.path.dirname(p)
29 29 if p == oldp:
30 30 raise repo.RepoError(_("There is no Mercurial repository"
31 31 " here (.hg not found)"))
32 32 path = p
33 33 self.path = os.path.join(path, ".hg")
34 34
35 35 if not os.path.isdir(self.path):
36 36 if create:
37 37 if not os.path.exists(path):
38 38 os.mkdir(path)
39 39 os.mkdir(self.path)
40 40 else:
41 41 raise repo.RepoError(_("repository %s not found") % path)
42 42 elif create:
43 43 raise repo.RepoError(_("repository %s already exists") % path)
44 44
45 45 self.root = os.path.realpath(path)
46 46 self.origroot = path
47 47 self.ui = ui.ui(parentui=parentui)
48 48 self.opener = util.opener(self.path)
49 49 self.sopener = util.opener(self.path)
50 50 self.wopener = util.opener(self.root)
51 51
52 52 try:
53 53 self.ui.readconfig(self.join("hgrc"), self.root)
54 54 except IOError:
55 55 pass
56 56
57 57 v = self.ui.configrevlog()
58 58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 60 fl = v.get('flags', None)
61 61 flags = 0
62 62 if fl != None:
63 63 for x in fl.split():
64 64 flags |= revlog.flagstr(x)
65 65 elif self.revlogv1:
66 66 flags = revlog.REVLOG_DEFAULT_FLAGS
67 67
68 68 v = self.revlogversion | flags
69 69 self.manifest = manifest.manifest(self.sopener, v)
70 70 self.changelog = changelog.changelog(self.sopener, v)
71 71
72 72 # the changelog might not have the inline index flag
73 73 # on. If the format of the changelog is the same as found in
74 74 # .hgrc, apply any flags found in the .hgrc as well.
75 75 # Otherwise, just version from the changelog
76 76 v = self.changelog.version
77 77 if v == self.revlogversion:
78 78 v |= flags
79 79 self.revlogversion = v
80 80
81 81 self.tagscache = None
82 82 self.branchcache = None
83 83 self.nodetagscache = None
84 84 self.encodepats = None
85 85 self.decodepats = None
86 86 self.transhandle = None
87 87
88 88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89 89
90 90 def url(self):
91 91 return 'file:' + self.root
92 92
93 93 def hook(self, name, throw=False, **args):
94 94 def callhook(hname, funcname):
95 95 '''call python hook. hook is callable object, looked up as
96 96 name in python module. if callable returns "true", hook
97 97 fails, else passes. if hook raises exception, treated as
98 98 hook failure. exception propagates if throw is "true".
99 99
100 100 reason for "true" meaning "hook failed" is so that
101 101 unmodified commands (e.g. mercurial.commands.update) can
102 102 be run as hooks without wrappers to convert return values.'''
103 103
104 104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 105 d = funcname.rfind('.')
106 106 if d == -1:
107 107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 108 % (hname, funcname))
109 109 modname = funcname[:d]
110 110 try:
111 111 obj = __import__(modname)
112 112 except ImportError:
113 113 try:
114 114 # extensions are loaded with hgext_ prefix
115 115 obj = __import__("hgext_%s" % modname)
116 116 except ImportError:
117 117 raise util.Abort(_('%s hook is invalid '
118 118 '(import of "%s" failed)') %
119 119 (hname, modname))
120 120 try:
121 121 for p in funcname.split('.')[1:]:
122 122 obj = getattr(obj, p)
123 123 except AttributeError, err:
124 124 raise util.Abort(_('%s hook is invalid '
125 125 '("%s" is not defined)') %
126 126 (hname, funcname))
127 127 if not callable(obj):
128 128 raise util.Abort(_('%s hook is invalid '
129 129 '("%s" is not callable)') %
130 130 (hname, funcname))
131 131 try:
132 132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 133 except (KeyboardInterrupt, util.SignalInterrupt):
134 134 raise
135 135 except Exception, exc:
136 136 if isinstance(exc, util.Abort):
137 137 self.ui.warn(_('error: %s hook failed: %s\n') %
138 138 (hname, exc.args[0]))
139 139 else:
140 140 self.ui.warn(_('error: %s hook raised an exception: '
141 141 '%s\n') % (hname, exc))
142 142 if throw:
143 143 raise
144 144 self.ui.print_exc()
145 145 return True
146 146 if r:
147 147 if throw:
148 148 raise util.Abort(_('%s hook failed') % hname)
149 149 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 150 return r
151 151
152 152 def runhook(name, cmd):
153 153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 155 r = util.system(cmd, environ=env, cwd=self.root)
156 156 if r:
157 157 desc, r = util.explain_exit(r)
158 158 if throw:
159 159 raise util.Abort(_('%s hook %s') % (name, desc))
160 160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 161 return r
162 162
163 163 r = False
164 164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 165 if hname.split(".", 1)[0] == name and cmd]
166 166 hooks.sort()
167 167 for hname, cmd in hooks:
168 168 if cmd.startswith('python:'):
169 169 r = callhook(hname, cmd[7:].strip()) or r
170 170 else:
171 171 r = runhook(hname, cmd) or r
172 172 return r
173 173
174 174 tag_disallowed = ':\r\n'
175 175
176 176 def tag(self, name, node, message, local, user, date):
177 177 '''tag a revision with a symbolic name.
178 178
179 179 if local is True, the tag is stored in a per-repository file.
180 180 otherwise, it is stored in the .hgtags file, and a new
181 181 changeset is committed with the change.
182 182
183 183 keyword arguments:
184 184
185 185 local: whether to store tag in non-version-controlled file
186 186 (default False)
187 187
188 188 message: commit message to use if committing
189 189
190 190 user: name of user to use if committing
191 191
192 192 date: date tuple to use if committing'''
193 193
194 194 for c in self.tag_disallowed:
195 195 if c in name:
196 196 raise util.Abort(_('%r cannot be used in a tag name') % c)
197 197
198 198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199 199
200 200 if local:
201 201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
202 202 self.hook('tag', node=hex(node), tag=name, local=local)
203 203 return
204 204
205 205 for x in self.status()[:5]:
206 206 if '.hgtags' in x:
207 207 raise util.Abort(_('working copy of .hgtags is changed '
208 208 '(please commit .hgtags manually)'))
209 209
210 210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
211 211 if self.dirstate.state('.hgtags') == '?':
212 212 self.add(['.hgtags'])
213 213
214 214 self.commit(['.hgtags'], message, user, date)
215 215 self.hook('tag', node=hex(node), tag=name, local=local)
216 216
217 217 def tags(self):
218 218 '''return a mapping of tag to node'''
219 219 if not self.tagscache:
220 220 self.tagscache = {}
221 221
222 222 def parsetag(line, context):
223 223 if not line:
224 224 return
225 225 s = l.split(" ", 1)
226 226 if len(s) != 2:
227 227 self.ui.warn(_("%s: cannot parse entry\n") % context)
228 228 return
229 229 node, key = s
230 230 key = key.strip()
231 231 try:
232 232 bin_n = bin(node)
233 233 except TypeError:
234 234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
235 235 (context, node))
236 236 return
237 237 if bin_n not in self.changelog.nodemap:
238 238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
239 239 (context, key))
240 240 return
241 241 self.tagscache[key] = bin_n
242 242
243 243 # read the tags file from each head, ending with the tip,
244 244 # and add each tag found to the map, with "newer" ones
245 245 # taking precedence
246 246 f = None
247 247 for rev, node, fnode in self._hgtagsnodes():
248 248 f = (f and f.filectx(fnode) or
249 249 self.filectx('.hgtags', fileid=fnode))
250 250 count = 0
251 251 for l in f.data().splitlines():
252 252 count += 1
253 253 parsetag(l, _("%s, line %d") % (str(f), count))
254 254
255 255 try:
256 256 f = self.opener("localtags")
257 257 count = 0
258 258 for l in f:
259 259 count += 1
260 260 parsetag(l, _("localtags, line %d") % count)
261 261 except IOError:
262 262 pass
263 263
264 264 self.tagscache['tip'] = self.changelog.tip()
265 265
266 266 return self.tagscache
267 267
268 268 def _hgtagsnodes(self):
269 269 heads = self.heads()
270 270 heads.reverse()
271 271 last = {}
272 272 ret = []
273 273 for node in heads:
274 274 c = self.changectx(node)
275 275 rev = c.rev()
276 276 try:
277 277 fnode = c.filenode('.hgtags')
278 278 except repo.LookupError:
279 279 continue
280 280 ret.append((rev, node, fnode))
281 281 if fnode in last:
282 282 ret[last[fnode]] = None
283 283 last[fnode] = len(ret) - 1
284 284 return [item for item in ret if item]
285 285
286 286 def tagslist(self):
287 287 '''return a list of tags ordered by revision'''
288 288 l = []
289 289 for t, n in self.tags().items():
290 290 try:
291 291 r = self.changelog.rev(n)
292 292 except:
293 293 r = -2 # sort to the beginning of the list if unknown
294 294 l.append((r, t, n))
295 295 l.sort()
296 296 return [(t, n) for r, t, n in l]
297 297
298 298 def nodetags(self, node):
299 299 '''return the tags associated with a node'''
300 300 if not self.nodetagscache:
301 301 self.nodetagscache = {}
302 302 for t, n in self.tags().items():
303 303 self.nodetagscache.setdefault(n, []).append(t)
304 304 return self.nodetagscache.get(node, [])
305 305
306 306 def branchtags(self):
307 307 if self.branchcache != None:
308 308 return self.branchcache
309 309
310 310 self.branchcache = {} # avoid recursion in changectx
311 311
312 312 partial, last, lrev = self._readbranchcache()
313 313
314 314 tiprev = self.changelog.count() - 1
315 315 if lrev != tiprev:
316 316 self._updatebranchcache(partial, lrev+1, tiprev+1)
317 317 self._writebranchcache(partial, self.changelog.tip(), tiprev)
318 318
319 319 self.branchcache = partial
320 320 return self.branchcache
321 321
322 322 def _readbranchcache(self):
323 323 partial = {}
324 324 try:
325 325 f = self.opener("branches.cache")
326 326 lines = f.read().split('\n')
327 327 f.close()
328 328 last, lrev = lines.pop(0).rstrip().split(" ", 1)
329 329 last, lrev = bin(last), int(lrev)
330 330 if (lrev < self.changelog.count() and
331 331 self.changelog.node(lrev) == last): # sanity check
332 332 for l in lines:
333 333 if not l: continue
334 334 node, label = l.rstrip().split(" ", 1)
335 335 partial[label] = bin(node)
336 336 else: # invalidate the cache
337 337 last, lrev = nullid, nullrev
338 338 except IOError:
339 339 last, lrev = nullid, nullrev
340 340 return partial, last, lrev
341 341
342 342 def _writebranchcache(self, branches, tip, tiprev):
343 343 try:
344 344 f = self.opener("branches.cache", "w")
345 345 f.write("%s %s\n" % (hex(tip), tiprev))
346 346 for label, node in branches.iteritems():
347 347 f.write("%s %s\n" % (hex(node), label))
348 348 except IOError:
349 349 pass
350 350
351 351 def _updatebranchcache(self, partial, start, end):
352 352 for r in xrange(start, end):
353 353 c = self.changectx(r)
354 354 b = c.branch()
355 355 if b:
356 356 partial[b] = c.node()
357 357
358 358 def lookup(self, key):
359 359 if key == '.':
360 360 key = self.dirstate.parents()[0]
361 361 if key == nullid:
362 362 raise repo.RepoError(_("no revision checked out"))
363 363 n = self.changelog._match(key)
364 364 if n:
365 365 return n
366 366 if key in self.tags():
367 367 return self.tags()[key]
368 368 if key in self.branchtags():
369 369 return self.branchtags()[key]
370 370 n = self.changelog._partialmatch(key)
371 371 if n:
372 372 return n
373 373 raise repo.RepoError(_("unknown revision '%s'") % key)
374 374
375 375 def dev(self):
376 376 return os.lstat(self.path).st_dev
377 377
378 378 def local(self):
379 379 return True
380 380
381 381 def join(self, f):
382 382 return os.path.join(self.path, f)
383 383
384 384 def sjoin(self, f):
385 385 return os.path.join(self.path, f)
386 386
387 387 def wjoin(self, f):
388 388 return os.path.join(self.root, f)
389 389
390 390 def file(self, f):
391 391 if f[0] == '/':
392 392 f = f[1:]
393 393 return filelog.filelog(self.sopener, f, self.revlogversion)
394 394
395 395 def changectx(self, changeid=None):
396 396 return context.changectx(self, changeid)
397 397
398 398 def workingctx(self):
399 399 return context.workingctx(self)
400 400
401 401 def parents(self, changeid=None):
402 402 '''
403 403 get list of changectxs for parents of changeid or working directory
404 404 '''
405 405 if changeid is None:
406 406 pl = self.dirstate.parents()
407 407 else:
408 408 n = self.changelog.lookup(changeid)
409 409 pl = self.changelog.parents(n)
410 410 if pl[1] == nullid:
411 411 return [self.changectx(pl[0])]
412 412 return [self.changectx(pl[0]), self.changectx(pl[1])]
413 413
414 414 def filectx(self, path, changeid=None, fileid=None):
415 415 """changeid can be a changeset revision, node, or tag.
416 416 fileid can be a file revision or node."""
417 417 return context.filectx(self, path, changeid, fileid)
418 418
419 419 def getcwd(self):
420 420 return self.dirstate.getcwd()
421 421
422 422 def wfile(self, f, mode='r'):
423 423 return self.wopener(f, mode)
424 424
425 425 def wread(self, filename):
426 426 if self.encodepats == None:
427 427 l = []
428 428 for pat, cmd in self.ui.configitems("encode"):
429 429 mf = util.matcher(self.root, "", [pat], [], [])[1]
430 430 l.append((mf, cmd))
431 431 self.encodepats = l
432 432
433 433 data = self.wopener(filename, 'r').read()
434 434
435 435 for mf, cmd in self.encodepats:
436 436 if mf(filename):
437 437 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
438 438 data = util.filter(data, cmd)
439 439 break
440 440
441 441 return data
442 442
443 443 def wwrite(self, filename, data, fd=None):
444 444 if self.decodepats == None:
445 445 l = []
446 446 for pat, cmd in self.ui.configitems("decode"):
447 447 mf = util.matcher(self.root, "", [pat], [], [])[1]
448 448 l.append((mf, cmd))
449 449 self.decodepats = l
450 450
451 451 for mf, cmd in self.decodepats:
452 452 if mf(filename):
453 453 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
454 454 data = util.filter(data, cmd)
455 455 break
456 456
457 457 if fd:
458 458 return fd.write(data)
459 459 return self.wopener(filename, 'w').write(data)
460 460
461 461 def transaction(self):
462 462 tr = self.transhandle
463 463 if tr != None and tr.running():
464 464 return tr.nest()
465 465
466 466 # save dirstate for rollback
467 467 try:
468 468 ds = self.opener("dirstate").read()
469 469 except IOError:
470 470 ds = ""
471 471 self.opener("journal.dirstate", "w").write(ds)
472 472
473 473 tr = transaction.transaction(self.ui.warn, self.sopener,
474 474 self.sjoin("journal"),
475 475 aftertrans(self.path))
476 476 self.transhandle = tr
477 477 return tr
478 478
479 479 def recover(self):
480 480 l = self.lock()
481 481 if os.path.exists(self.sjoin("journal")):
482 482 self.ui.status(_("rolling back interrupted transaction\n"))
483 483 transaction.rollback(self.sopener, self.sjoin("journal"))
484 484 self.reload()
485 485 return True
486 486 else:
487 487 self.ui.warn(_("no interrupted transaction available\n"))
488 488 return False
489 489
490 490 def rollback(self, wlock=None):
491 491 if not wlock:
492 492 wlock = self.wlock()
493 493 l = self.lock()
494 494 if os.path.exists(self.sjoin("undo")):
495 495 self.ui.status(_("rolling back last transaction\n"))
496 496 transaction.rollback(self.sopener, self.sjoin("undo"))
497 497 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
498 498 self.reload()
499 499 self.wreload()
500 500 else:
501 501 self.ui.warn(_("no rollback information available\n"))
502 502
503 503 def wreload(self):
504 504 self.dirstate.read()
505 505
506 506 def reload(self):
507 507 self.changelog.load()
508 508 self.manifest.load()
509 509 self.tagscache = None
510 510 self.nodetagscache = None
511 511
512 512 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
513 513 desc=None):
514 514 try:
515 515 l = lock.lock(lockname, 0, releasefn, desc=desc)
516 516 except lock.LockHeld, inst:
517 517 if not wait:
518 518 raise
519 519 self.ui.warn(_("waiting for lock on %s held by %r\n") %
520 520 (desc, inst.locker))
521 521 # default to 600 seconds timeout
522 522 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
523 523 releasefn, desc=desc)
524 524 if acquirefn:
525 525 acquirefn()
526 526 return l
527 527
528 528 def lock(self, wait=1):
529 529 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
530 530 desc=_('repository %s') % self.origroot)
531 531
532 532 def wlock(self, wait=1):
533 533 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
534 534 self.wreload,
535 535 desc=_('working directory of %s') % self.origroot)
536 536
537 537 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
538 538 """
539 539 commit an individual file as part of a larger transaction
540 540 """
541 541
542 542 t = self.wread(fn)
543 543 fl = self.file(fn)
544 544 fp1 = manifest1.get(fn, nullid)
545 545 fp2 = manifest2.get(fn, nullid)
546 546
547 547 meta = {}
548 548 cp = self.dirstate.copied(fn)
549 549 if cp:
550 550 meta["copy"] = cp
551 551 if not manifest2: # not a branch merge
552 552 meta["copyrev"] = hex(manifest1.get(cp, nullid))
553 553 fp2 = nullid
554 554 elif fp2 != nullid: # copied on remote side
555 555 meta["copyrev"] = hex(manifest1.get(cp, nullid))
556 else: # copied on local side, reversed
556 elif fp1 != nullid: # copied on local side, reversed
557 557 meta["copyrev"] = hex(manifest2.get(cp))
558 558 fp2 = nullid
559 else: # directory rename
560 meta["copyrev"] = hex(manifest1.get(cp, nullid))
559 561 self.ui.debug(_(" %s: copy %s:%s\n") %
560 562 (fn, cp, meta["copyrev"]))
561 563 fp1 = nullid
562 564 elif fp2 != nullid:
563 565 # is one parent an ancestor of the other?
564 566 fpa = fl.ancestor(fp1, fp2)
565 567 if fpa == fp1:
566 568 fp1, fp2 = fp2, nullid
567 569 elif fpa == fp2:
568 570 fp2 = nullid
569 571
570 572 # is the file unmodified from the parent? report existing entry
571 573 if fp2 == nullid and not fl.cmp(fp1, t):
572 574 return fp1
573 575
574 576 changelist.append(fn)
575 577 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
576 578
577 579 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
578 580 if p1 is None:
579 581 p1, p2 = self.dirstate.parents()
580 582 return self.commit(files=files, text=text, user=user, date=date,
581 583 p1=p1, p2=p2, wlock=wlock)
582 584
583 585 def commit(self, files=None, text="", user=None, date=None,
584 586 match=util.always, force=False, lock=None, wlock=None,
585 587 force_editor=False, p1=None, p2=None, extra={}):
586 588
587 589 commit = []
588 590 remove = []
589 591 changed = []
590 592 use_dirstate = (p1 is None) # not rawcommit
591 593 extra = extra.copy()
592 594
593 595 if use_dirstate:
594 596 if files:
595 597 for f in files:
596 598 s = self.dirstate.state(f)
597 599 if s in 'nmai':
598 600 commit.append(f)
599 601 elif s == 'r':
600 602 remove.append(f)
601 603 else:
602 604 self.ui.warn(_("%s not tracked!\n") % f)
603 605 else:
604 606 changes = self.status(match=match)[:5]
605 607 modified, added, removed, deleted, unknown = changes
606 608 commit = modified + added
607 609 remove = removed
608 610 else:
609 611 commit = files
610 612
611 613 if use_dirstate:
612 614 p1, p2 = self.dirstate.parents()
613 615 update_dirstate = True
614 616 else:
615 617 p1, p2 = p1, p2 or nullid
616 618 update_dirstate = (self.dirstate.parents()[0] == p1)
617 619
618 620 c1 = self.changelog.read(p1)
619 621 c2 = self.changelog.read(p2)
620 622 m1 = self.manifest.read(c1[0]).copy()
621 623 m2 = self.manifest.read(c2[0])
622 624
623 625 if use_dirstate:
624 626 branchname = self.workingctx().branch()
625 627 else:
626 628 branchname = ""
627 629
628 630 if use_dirstate:
629 631 oldname = c1[5].get("branch", "")
630 632 if not commit and not remove and not force and p2 == nullid and \
631 633 branchname == oldname:
632 634 self.ui.status(_("nothing changed\n"))
633 635 return None
634 636
635 637 xp1 = hex(p1)
636 638 if p2 == nullid: xp2 = ''
637 639 else: xp2 = hex(p2)
638 640
639 641 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
640 642
641 643 if not wlock:
642 644 wlock = self.wlock()
643 645 if not lock:
644 646 lock = self.lock()
645 647 tr = self.transaction()
646 648
647 649 # check in files
648 650 new = {}
649 651 linkrev = self.changelog.count()
650 652 commit.sort()
651 653 for f in commit:
652 654 self.ui.note(f + "\n")
653 655 try:
654 656 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
655 657 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
656 658 except IOError:
657 659 if use_dirstate:
658 660 self.ui.warn(_("trouble committing %s!\n") % f)
659 661 raise
660 662 else:
661 663 remove.append(f)
662 664
663 665 # update manifest
664 666 m1.update(new)
665 667 remove.sort()
666 668
667 669 for f in remove:
668 670 if f in m1:
669 671 del m1[f]
670 672 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
671 673
672 674 # add changeset
673 675 new = new.keys()
674 676 new.sort()
675 677
676 678 user = user or self.ui.username()
677 679 if not text or force_editor:
678 680 edittext = []
679 681 if text:
680 682 edittext.append(text)
681 683 edittext.append("")
682 684 edittext.append("HG: user: %s" % user)
683 685 if p2 != nullid:
684 686 edittext.append("HG: branch merge")
685 687 edittext.extend(["HG: changed %s" % f for f in changed])
686 688 edittext.extend(["HG: removed %s" % f for f in remove])
687 689 if not changed and not remove:
688 690 edittext.append("HG: no files changed")
689 691 edittext.append("")
690 692 # run editor in the repository root
691 693 olddir = os.getcwd()
692 694 os.chdir(self.root)
693 695 text = self.ui.edit("\n".join(edittext), user)
694 696 os.chdir(olddir)
695 697
696 698 lines = [line.rstrip() for line in text.rstrip().splitlines()]
697 699 while lines and not lines[0]:
698 700 del lines[0]
699 701 if not lines:
700 702 return None
701 703 text = '\n'.join(lines)
702 704 if branchname:
703 705 extra["branch"] = branchname
704 706 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
705 707 user, date, extra)
706 708 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
707 709 parent2=xp2)
708 710 tr.close()
709 711
710 712 if use_dirstate or update_dirstate:
711 713 self.dirstate.setparents(n)
712 714 if use_dirstate:
713 715 self.dirstate.update(new, "n")
714 716 self.dirstate.forget(remove)
715 717
716 718 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
717 719 return n
718 720
719 721 def walk(self, node=None, files=[], match=util.always, badmatch=None):
720 722 '''
721 723 walk recursively through the directory tree or a given
722 724 changeset, finding all files matched by the match
723 725 function
724 726
725 727 results are yielded in a tuple (src, filename), where src
726 728 is one of:
727 729 'f' the file was found in the directory tree
728 730 'm' the file was only in the dirstate and not in the tree
729 731 'b' file was not found and matched badmatch
730 732 '''
731 733
732 734 if node:
733 735 fdict = dict.fromkeys(files)
734 736 for fn in self.manifest.read(self.changelog.read(node)[0]):
735 737 for ffn in fdict:
736 738 # match if the file is the exact name or a directory
737 739 if ffn == fn or fn.startswith("%s/" % ffn):
738 740 del fdict[ffn]
739 741 break
740 742 if match(fn):
741 743 yield 'm', fn
742 744 for fn in fdict:
743 745 if badmatch and badmatch(fn):
744 746 if match(fn):
745 747 yield 'b', fn
746 748 else:
747 749 self.ui.warn(_('%s: No such file in rev %s\n') % (
748 750 util.pathto(self.getcwd(), fn), short(node)))
749 751 else:
750 752 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
751 753 yield src, fn
752 754
753 755 def status(self, node1=None, node2=None, files=[], match=util.always,
754 756 wlock=None, list_ignored=False, list_clean=False):
755 757 """return status of files between two nodes or node and working directory
756 758
757 759 If node1 is None, use the first dirstate parent instead.
758 760 If node2 is None, compare node1 with working directory.
759 761 """
760 762
761 763 def fcmp(fn, mf):
762 764 t1 = self.wread(fn)
763 765 return self.file(fn).cmp(mf.get(fn, nullid), t1)
764 766
765 767 def mfmatches(node):
766 768 change = self.changelog.read(node)
767 769 mf = self.manifest.read(change[0]).copy()
768 770 for fn in mf.keys():
769 771 if not match(fn):
770 772 del mf[fn]
771 773 return mf
772 774
773 775 modified, added, removed, deleted, unknown = [], [], [], [], []
774 776 ignored, clean = [], []
775 777
776 778 compareworking = False
777 779 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
778 780 compareworking = True
779 781
780 782 if not compareworking:
781 783 # read the manifest from node1 before the manifest from node2,
782 784 # so that we'll hit the manifest cache if we're going through
783 785 # all the revisions in parent->child order.
784 786 mf1 = mfmatches(node1)
785 787
786 788 # are we comparing the working directory?
787 789 if not node2:
788 790 if not wlock:
789 791 try:
790 792 wlock = self.wlock(wait=0)
791 793 except lock.LockException:
792 794 wlock = None
793 795 (lookup, modified, added, removed, deleted, unknown,
794 796 ignored, clean) = self.dirstate.status(files, match,
795 797 list_ignored, list_clean)
796 798
797 799 # are we comparing working dir against its parent?
798 800 if compareworking:
799 801 if lookup:
800 802 # do a full compare of any files that might have changed
801 803 mf2 = mfmatches(self.dirstate.parents()[0])
802 804 for f in lookup:
803 805 if fcmp(f, mf2):
804 806 modified.append(f)
805 807 else:
806 808 clean.append(f)
807 809 if wlock is not None:
808 810 self.dirstate.update([f], "n")
809 811 else:
810 812 # we are comparing working dir against non-parent
811 813 # generate a pseudo-manifest for the working dir
812 814 # XXX: create it in dirstate.py ?
813 815 mf2 = mfmatches(self.dirstate.parents()[0])
814 816 for f in lookup + modified + added:
815 817 mf2[f] = ""
816 818 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
817 819 for f in removed:
818 820 if f in mf2:
819 821 del mf2[f]
820 822 else:
821 823 # we are comparing two revisions
822 824 mf2 = mfmatches(node2)
823 825
824 826 if not compareworking:
825 827 # flush lists from dirstate before comparing manifests
826 828 modified, added, clean = [], [], []
827 829
828 830 # make sure to sort the files so we talk to the disk in a
829 831 # reasonable order
830 832 mf2keys = mf2.keys()
831 833 mf2keys.sort()
832 834 for fn in mf2keys:
833 835 if mf1.has_key(fn):
834 836 if mf1.flags(fn) != mf2.flags(fn) or \
835 837 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
836 838 modified.append(fn)
837 839 elif list_clean:
838 840 clean.append(fn)
839 841 del mf1[fn]
840 842 else:
841 843 added.append(fn)
842 844
843 845 removed = mf1.keys()
844 846
845 847 # sort and return results:
846 848 for l in modified, added, removed, deleted, unknown, ignored, clean:
847 849 l.sort()
848 850 return (modified, added, removed, deleted, unknown, ignored, clean)
849 851
850 852 def add(self, list, wlock=None):
851 853 if not wlock:
852 854 wlock = self.wlock()
853 855 for f in list:
854 856 p = self.wjoin(f)
855 857 if not os.path.exists(p):
856 858 self.ui.warn(_("%s does not exist!\n") % f)
857 859 elif not os.path.isfile(p):
858 860 self.ui.warn(_("%s not added: only files supported currently\n")
859 861 % f)
860 862 elif self.dirstate.state(f) in 'an':
861 863 self.ui.warn(_("%s already tracked!\n") % f)
862 864 else:
863 865 self.dirstate.update([f], "a")
864 866
865 867 def forget(self, list, wlock=None):
866 868 if not wlock:
867 869 wlock = self.wlock()
868 870 for f in list:
869 871 if self.dirstate.state(f) not in 'ai':
870 872 self.ui.warn(_("%s not added!\n") % f)
871 873 else:
872 874 self.dirstate.forget([f])
873 875
874 876 def remove(self, list, unlink=False, wlock=None):
875 877 if unlink:
876 878 for f in list:
877 879 try:
878 880 util.unlink(self.wjoin(f))
879 881 except OSError, inst:
880 882 if inst.errno != errno.ENOENT:
881 883 raise
882 884 if not wlock:
883 885 wlock = self.wlock()
884 886 for f in list:
885 887 p = self.wjoin(f)
886 888 if os.path.exists(p):
887 889 self.ui.warn(_("%s still exists!\n") % f)
888 890 elif self.dirstate.state(f) == 'a':
889 891 self.dirstate.forget([f])
890 892 elif f not in self.dirstate:
891 893 self.ui.warn(_("%s not tracked!\n") % f)
892 894 else:
893 895 self.dirstate.update([f], "r")
894 896
895 897 def undelete(self, list, wlock=None):
896 898 p = self.dirstate.parents()[0]
897 899 mn = self.changelog.read(p)[0]
898 900 m = self.manifest.read(mn)
899 901 if not wlock:
900 902 wlock = self.wlock()
901 903 for f in list:
902 904 if self.dirstate.state(f) not in "r":
903 905 self.ui.warn("%s not removed!\n" % f)
904 906 else:
905 907 t = self.file(f).read(m[f])
906 908 self.wwrite(f, t)
907 909 util.set_exec(self.wjoin(f), m.execf(f))
908 910 self.dirstate.update([f], "n")
909 911
910 912 def copy(self, source, dest, wlock=None):
911 913 p = self.wjoin(dest)
912 914 if not os.path.exists(p):
913 915 self.ui.warn(_("%s does not exist!\n") % dest)
914 916 elif not os.path.isfile(p):
915 917 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
916 918 else:
917 919 if not wlock:
918 920 wlock = self.wlock()
919 921 if self.dirstate.state(dest) == '?':
920 922 self.dirstate.update([dest], "a")
921 923 self.dirstate.copy(source, dest)
922 924
923 925 def heads(self, start=None):
924 926 heads = self.changelog.heads(start)
925 927 # sort the output in rev descending order
926 928 heads = [(-self.changelog.rev(h), h) for h in heads]
927 929 heads.sort()
928 930 return [n for (r, n) in heads]
929 931
930 932 # branchlookup returns a dict giving a list of branches for
931 933 # each head. A branch is defined as the tag of a node or
932 934 # the branch of the node's parents. If a node has multiple
933 935 # branch tags, tags are eliminated if they are visible from other
934 936 # branch tags.
935 937 #
936 938 # So, for this graph: a->b->c->d->e
937 939 # \ /
938 940 # aa -----/
939 941 # a has tag 2.6.12
940 942 # d has tag 2.6.13
941 943 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
942 944 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
943 945 # from the list.
944 946 #
945 947 # It is possible that more than one head will have the same branch tag.
946 948 # callers need to check the result for multiple heads under the same
947 949 # branch tag if that is a problem for them (ie checkout of a specific
948 950 # branch).
949 951 #
950 952 # passing in a specific branch will limit the depth of the search
951 953 # through the parents. It won't limit the branches returned in the
952 954 # result though.
953 955 def branchlookup(self, heads=None, branch=None):
954 956 if not heads:
955 957 heads = self.heads()
956 958 headt = [ h for h in heads ]
957 959 chlog = self.changelog
958 960 branches = {}
959 961 merges = []
960 962 seenmerge = {}
961 963
962 964 # traverse the tree once for each head, recording in the branches
963 965 # dict which tags are visible from this head. The branches
964 966 # dict also records which tags are visible from each tag
965 967 # while we traverse.
966 968 while headt or merges:
967 969 if merges:
968 970 n, found = merges.pop()
969 971 visit = [n]
970 972 else:
971 973 h = headt.pop()
972 974 visit = [h]
973 975 found = [h]
974 976 seen = {}
975 977 while visit:
976 978 n = visit.pop()
977 979 if n in seen:
978 980 continue
979 981 pp = chlog.parents(n)
980 982 tags = self.nodetags(n)
981 983 if tags:
982 984 for x in tags:
983 985 if x == 'tip':
984 986 continue
985 987 for f in found:
986 988 branches.setdefault(f, {})[n] = 1
987 989 branches.setdefault(n, {})[n] = 1
988 990 break
989 991 if n not in found:
990 992 found.append(n)
991 993 if branch in tags:
992 994 continue
993 995 seen[n] = 1
994 996 if pp[1] != nullid and n not in seenmerge:
995 997 merges.append((pp[1], [x for x in found]))
996 998 seenmerge[n] = 1
997 999 if pp[0] != nullid:
998 1000 visit.append(pp[0])
999 1001 # traverse the branches dict, eliminating branch tags from each
1000 1002 # head that are visible from another branch tag for that head.
1001 1003 out = {}
1002 1004 viscache = {}
1003 1005 for h in heads:
1004 1006 def visible(node):
1005 1007 if node in viscache:
1006 1008 return viscache[node]
1007 1009 ret = {}
1008 1010 visit = [node]
1009 1011 while visit:
1010 1012 x = visit.pop()
1011 1013 if x in viscache:
1012 1014 ret.update(viscache[x])
1013 1015 elif x not in ret:
1014 1016 ret[x] = 1
1015 1017 if x in branches:
1016 1018 visit[len(visit):] = branches[x].keys()
1017 1019 viscache[node] = ret
1018 1020 return ret
1019 1021 if h not in branches:
1020 1022 continue
1021 1023 # O(n^2), but somewhat limited. This only searches the
1022 1024 # tags visible from a specific head, not all the tags in the
1023 1025 # whole repo.
1024 1026 for b in branches[h]:
1025 1027 vis = False
1026 1028 for bb in branches[h].keys():
1027 1029 if b != bb:
1028 1030 if b in visible(bb):
1029 1031 vis = True
1030 1032 break
1031 1033 if not vis:
1032 1034 l = out.setdefault(h, [])
1033 1035 l[len(l):] = self.nodetags(b)
1034 1036 return out
1035 1037
1036 1038 def branches(self, nodes):
1037 1039 if not nodes:
1038 1040 nodes = [self.changelog.tip()]
1039 1041 b = []
1040 1042 for n in nodes:
1041 1043 t = n
1042 1044 while 1:
1043 1045 p = self.changelog.parents(n)
1044 1046 if p[1] != nullid or p[0] == nullid:
1045 1047 b.append((t, n, p[0], p[1]))
1046 1048 break
1047 1049 n = p[0]
1048 1050 return b
1049 1051
1050 1052 def between(self, pairs):
1051 1053 r = []
1052 1054
1053 1055 for top, bottom in pairs:
1054 1056 n, l, i = top, [], 0
1055 1057 f = 1
1056 1058
1057 1059 while n != bottom:
1058 1060 p = self.changelog.parents(n)[0]
1059 1061 if i == f:
1060 1062 l.append(n)
1061 1063 f = f * 2
1062 1064 n = p
1063 1065 i += 1
1064 1066
1065 1067 r.append(l)
1066 1068
1067 1069 return r
1068 1070
1069 1071 def findincoming(self, remote, base=None, heads=None, force=False):
1070 1072 """Return list of roots of the subsets of missing nodes from remote
1071 1073
1072 1074 If base dict is specified, assume that these nodes and their parents
1073 1075 exist on the remote side and that no child of a node of base exists
1074 1076 in both remote and self.
1075 1077 Furthermore base will be updated to include the nodes that exists
1076 1078 in self and remote but no children exists in self and remote.
1077 1079 If a list of heads is specified, return only nodes which are heads
1078 1080 or ancestors of these heads.
1079 1081
1080 1082 All the ancestors of base are in self and in remote.
1081 1083 All the descendants of the list returned are missing in self.
1082 1084 (and so we know that the rest of the nodes are missing in remote, see
1083 1085 outgoing)
1084 1086 """
1085 1087 m = self.changelog.nodemap
1086 1088 search = []
1087 1089 fetch = {}
1088 1090 seen = {}
1089 1091 seenbranch = {}
1090 1092 if base == None:
1091 1093 base = {}
1092 1094
1093 1095 if not heads:
1094 1096 heads = remote.heads()
1095 1097
1096 1098 if self.changelog.tip() == nullid:
1097 1099 base[nullid] = 1
1098 1100 if heads != [nullid]:
1099 1101 return [nullid]
1100 1102 return []
1101 1103
1102 1104 # assume we're closer to the tip than the root
1103 1105 # and start by examining the heads
1104 1106 self.ui.status(_("searching for changes\n"))
1105 1107
1106 1108 unknown = []
1107 1109 for h in heads:
1108 1110 if h not in m:
1109 1111 unknown.append(h)
1110 1112 else:
1111 1113 base[h] = 1
1112 1114
1113 1115 if not unknown:
1114 1116 return []
1115 1117
1116 1118 req = dict.fromkeys(unknown)
1117 1119 reqcnt = 0
1118 1120
1119 1121 # search through remote branches
1120 1122 # a 'branch' here is a linear segment of history, with four parts:
1121 1123 # head, root, first parent, second parent
1122 1124 # (a branch always has two parents (or none) by definition)
1123 1125 unknown = remote.branches(unknown)
1124 1126 while unknown:
1125 1127 r = []
1126 1128 while unknown:
1127 1129 n = unknown.pop(0)
1128 1130 if n[0] in seen:
1129 1131 continue
1130 1132
1131 1133 self.ui.debug(_("examining %s:%s\n")
1132 1134 % (short(n[0]), short(n[1])))
1133 1135 if n[0] == nullid: # found the end of the branch
1134 1136 pass
1135 1137 elif n in seenbranch:
1136 1138 self.ui.debug(_("branch already found\n"))
1137 1139 continue
1138 1140 elif n[1] and n[1] in m: # do we know the base?
1139 1141 self.ui.debug(_("found incomplete branch %s:%s\n")
1140 1142 % (short(n[0]), short(n[1])))
1141 1143 search.append(n) # schedule branch range for scanning
1142 1144 seenbranch[n] = 1
1143 1145 else:
1144 1146 if n[1] not in seen and n[1] not in fetch:
1145 1147 if n[2] in m and n[3] in m:
1146 1148 self.ui.debug(_("found new changeset %s\n") %
1147 1149 short(n[1]))
1148 1150 fetch[n[1]] = 1 # earliest unknown
1149 1151 for p in n[2:4]:
1150 1152 if p in m:
1151 1153 base[p] = 1 # latest known
1152 1154
1153 1155 for p in n[2:4]:
1154 1156 if p not in req and p not in m:
1155 1157 r.append(p)
1156 1158 req[p] = 1
1157 1159 seen[n[0]] = 1
1158 1160
1159 1161 if r:
1160 1162 reqcnt += 1
1161 1163 self.ui.debug(_("request %d: %s\n") %
1162 1164 (reqcnt, " ".join(map(short, r))))
1163 1165 for p in xrange(0, len(r), 10):
1164 1166 for b in remote.branches(r[p:p+10]):
1165 1167 self.ui.debug(_("received %s:%s\n") %
1166 1168 (short(b[0]), short(b[1])))
1167 1169 unknown.append(b)
1168 1170
1169 1171 # do binary search on the branches we found
1170 1172 while search:
1171 1173 n = search.pop(0)
1172 1174 reqcnt += 1
1173 1175 l = remote.between([(n[0], n[1])])[0]
1174 1176 l.append(n[1])
1175 1177 p = n[0]
1176 1178 f = 1
1177 1179 for i in l:
1178 1180 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1179 1181 if i in m:
1180 1182 if f <= 2:
1181 1183 self.ui.debug(_("found new branch changeset %s\n") %
1182 1184 short(p))
1183 1185 fetch[p] = 1
1184 1186 base[i] = 1
1185 1187 else:
1186 1188 self.ui.debug(_("narrowed branch search to %s:%s\n")
1187 1189 % (short(p), short(i)))
1188 1190 search.append((p, i))
1189 1191 break
1190 1192 p, f = i, f * 2
1191 1193
1192 1194 # sanity check our fetch list
1193 1195 for f in fetch.keys():
1194 1196 if f in m:
1195 1197 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1196 1198
1197 1199 if base.keys() == [nullid]:
1198 1200 if force:
1199 1201 self.ui.warn(_("warning: repository is unrelated\n"))
1200 1202 else:
1201 1203 raise util.Abort(_("repository is unrelated"))
1202 1204
1203 1205 self.ui.debug(_("found new changesets starting at ") +
1204 1206 " ".join([short(f) for f in fetch]) + "\n")
1205 1207
1206 1208 self.ui.debug(_("%d total queries\n") % reqcnt)
1207 1209
1208 1210 return fetch.keys()
1209 1211
1210 1212 def findoutgoing(self, remote, base=None, heads=None, force=False):
1211 1213 """Return list of nodes that are roots of subsets not in remote
1212 1214
1213 1215 If base dict is specified, assume that these nodes and their parents
1214 1216 exist on the remote side.
1215 1217 If a list of heads is specified, return only nodes which are heads
1216 1218 or ancestors of these heads, and return a second element which
1217 1219 contains all remote heads which get new children.
1218 1220 """
1219 1221 if base == None:
1220 1222 base = {}
1221 1223 self.findincoming(remote, base, heads, force=force)
1222 1224
1223 1225 self.ui.debug(_("common changesets up to ")
1224 1226 + " ".join(map(short, base.keys())) + "\n")
1225 1227
1226 1228 remain = dict.fromkeys(self.changelog.nodemap)
1227 1229
1228 1230 # prune everything remote has from the tree
1229 1231 del remain[nullid]
1230 1232 remove = base.keys()
1231 1233 while remove:
1232 1234 n = remove.pop(0)
1233 1235 if n in remain:
1234 1236 del remain[n]
1235 1237 for p in self.changelog.parents(n):
1236 1238 remove.append(p)
1237 1239
1238 1240 # find every node whose parents have been pruned
1239 1241 subset = []
1240 1242 # find every remote head that will get new children
1241 1243 updated_heads = {}
1242 1244 for n in remain:
1243 1245 p1, p2 = self.changelog.parents(n)
1244 1246 if p1 not in remain and p2 not in remain:
1245 1247 subset.append(n)
1246 1248 if heads:
1247 1249 if p1 in heads:
1248 1250 updated_heads[p1] = True
1249 1251 if p2 in heads:
1250 1252 updated_heads[p2] = True
1251 1253
1252 1254 # this is the set of all roots we have to push
1253 1255 if heads:
1254 1256 return subset, updated_heads.keys()
1255 1257 else:
1256 1258 return subset
1257 1259
1258 1260 def pull(self, remote, heads=None, force=False, lock=None):
1259 1261 mylock = False
1260 1262 if not lock:
1261 1263 lock = self.lock()
1262 1264 mylock = True
1263 1265
1264 1266 try:
1265 1267 fetch = self.findincoming(remote, force=force)
1266 1268 if fetch == [nullid]:
1267 1269 self.ui.status(_("requesting all changes\n"))
1268 1270
1269 1271 if not fetch:
1270 1272 self.ui.status(_("no changes found\n"))
1271 1273 return 0
1272 1274
1273 1275 if heads is None:
1274 1276 cg = remote.changegroup(fetch, 'pull')
1275 1277 else:
1276 1278 if 'changegroupsubset' not in remote.capabilities:
1277 1279 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1278 1280 cg = remote.changegroupsubset(fetch, heads, 'pull')
1279 1281 return self.addchangegroup(cg, 'pull', remote.url())
1280 1282 finally:
1281 1283 if mylock:
1282 1284 lock.release()
1283 1285
1284 1286 def push(self, remote, force=False, revs=None):
1285 1287 # there are two ways to push to remote repo:
1286 1288 #
1287 1289 # addchangegroup assumes local user can lock remote
1288 1290 # repo (local filesystem, old ssh servers).
1289 1291 #
1290 1292 # unbundle assumes local user cannot lock remote repo (new ssh
1291 1293 # servers, http servers).
1292 1294
1293 1295 if remote.capable('unbundle'):
1294 1296 return self.push_unbundle(remote, force, revs)
1295 1297 return self.push_addchangegroup(remote, force, revs)
1296 1298
1297 1299 def prepush(self, remote, force, revs):
1298 1300 base = {}
1299 1301 remote_heads = remote.heads()
1300 1302 inc = self.findincoming(remote, base, remote_heads, force=force)
1301 1303
1302 1304 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1303 1305 if revs is not None:
1304 1306 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1305 1307 else:
1306 1308 bases, heads = update, self.changelog.heads()
1307 1309
1308 1310 if not bases:
1309 1311 self.ui.status(_("no changes found\n"))
1310 1312 return None, 1
1311 1313 elif not force:
1312 1314 # check if we're creating new remote heads
1313 1315 # to be a remote head after push, node must be either
1314 1316 # - unknown locally
1315 1317 # - a local outgoing head descended from update
1316 1318 # - a remote head that's known locally and not
1317 1319 # ancestral to an outgoing head
1318 1320
1319 1321 warn = 0
1320 1322
1321 1323 if remote_heads == [nullid]:
1322 1324 warn = 0
1323 1325 elif not revs and len(heads) > len(remote_heads):
1324 1326 warn = 1
1325 1327 else:
1326 1328 newheads = list(heads)
1327 1329 for r in remote_heads:
1328 1330 if r in self.changelog.nodemap:
1329 1331 desc = self.changelog.heads(r)
1330 1332 l = [h for h in heads if h in desc]
1331 1333 if not l:
1332 1334 newheads.append(r)
1333 1335 else:
1334 1336 newheads.append(r)
1335 1337 if len(newheads) > len(remote_heads):
1336 1338 warn = 1
1337 1339
1338 1340 if warn:
1339 1341 self.ui.warn(_("abort: push creates new remote branches!\n"))
1340 1342 self.ui.status(_("(did you forget to merge?"
1341 1343 " use push -f to force)\n"))
1342 1344 return None, 1
1343 1345 elif inc:
1344 1346 self.ui.warn(_("note: unsynced remote changes!\n"))
1345 1347
1346 1348
1347 1349 if revs is None:
1348 1350 cg = self.changegroup(update, 'push')
1349 1351 else:
1350 1352 cg = self.changegroupsubset(update, revs, 'push')
1351 1353 return cg, remote_heads
1352 1354
1353 1355 def push_addchangegroup(self, remote, force, revs):
1354 1356 lock = remote.lock()
1355 1357
1356 1358 ret = self.prepush(remote, force, revs)
1357 1359 if ret[0] is not None:
1358 1360 cg, remote_heads = ret
1359 1361 return remote.addchangegroup(cg, 'push', self.url())
1360 1362 return ret[1]
1361 1363
1362 1364 def push_unbundle(self, remote, force, revs):
1363 1365 # local repo finds heads on server, finds out what revs it
1364 1366 # must push. once revs transferred, if server finds it has
1365 1367 # different heads (someone else won commit/push race), server
1366 1368 # aborts.
1367 1369
1368 1370 ret = self.prepush(remote, force, revs)
1369 1371 if ret[0] is not None:
1370 1372 cg, remote_heads = ret
1371 1373 if force: remote_heads = ['force']
1372 1374 return remote.unbundle(cg, remote_heads, 'push')
1373 1375 return ret[1]
1374 1376
1375 1377 def changegroupinfo(self, nodes):
1376 1378 self.ui.note(_("%d changesets found\n") % len(nodes))
1377 1379 if self.ui.debugflag:
1378 1380 self.ui.debug(_("List of changesets:\n"))
1379 1381 for node in nodes:
1380 1382 self.ui.debug("%s\n" % hex(node))
1381 1383
1382 1384 def changegroupsubset(self, bases, heads, source):
1383 1385 """This function generates a changegroup consisting of all the nodes
1384 1386 that are descendents of any of the bases, and ancestors of any of
1385 1387 the heads.
1386 1388
1387 1389 It is fairly complex as determining which filenodes and which
1388 1390 manifest nodes need to be included for the changeset to be complete
1389 1391 is non-trivial.
1390 1392
1391 1393 Another wrinkle is doing the reverse, figuring out which changeset in
1392 1394 the changegroup a particular filenode or manifestnode belongs to."""
1393 1395
1394 1396 self.hook('preoutgoing', throw=True, source=source)
1395 1397
1396 1398 # Set up some initial variables
1397 1399 # Make it easy to refer to self.changelog
1398 1400 cl = self.changelog
1399 1401 # msng is short for missing - compute the list of changesets in this
1400 1402 # changegroup.
1401 1403 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1402 1404 self.changegroupinfo(msng_cl_lst)
1403 1405 # Some bases may turn out to be superfluous, and some heads may be
1404 1406 # too. nodesbetween will return the minimal set of bases and heads
1405 1407 # necessary to re-create the changegroup.
1406 1408
1407 1409 # Known heads are the list of heads that it is assumed the recipient
1408 1410 # of this changegroup will know about.
1409 1411 knownheads = {}
1410 1412 # We assume that all parents of bases are known heads.
1411 1413 for n in bases:
1412 1414 for p in cl.parents(n):
1413 1415 if p != nullid:
1414 1416 knownheads[p] = 1
1415 1417 knownheads = knownheads.keys()
1416 1418 if knownheads:
1417 1419 # Now that we know what heads are known, we can compute which
1418 1420 # changesets are known. The recipient must know about all
1419 1421 # changesets required to reach the known heads from the null
1420 1422 # changeset.
1421 1423 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1422 1424 junk = None
1423 1425 # Transform the list into an ersatz set.
1424 1426 has_cl_set = dict.fromkeys(has_cl_set)
1425 1427 else:
1426 1428 # If there were no known heads, the recipient cannot be assumed to
1427 1429 # know about any changesets.
1428 1430 has_cl_set = {}
1429 1431
1430 1432 # Make it easy to refer to self.manifest
1431 1433 mnfst = self.manifest
1432 1434 # We don't know which manifests are missing yet
1433 1435 msng_mnfst_set = {}
1434 1436 # Nor do we know which filenodes are missing.
1435 1437 msng_filenode_set = {}
1436 1438
1437 1439 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1438 1440 junk = None
1439 1441
1440 1442 # A changeset always belongs to itself, so the changenode lookup
1441 1443 # function for a changenode is identity.
1442 1444 def identity(x):
1443 1445 return x
1444 1446
1445 1447 # A function generating function. Sets up an environment for the
1446 1448 # inner function.
1447 1449 def cmp_by_rev_func(revlog):
1448 1450 # Compare two nodes by their revision number in the environment's
1449 1451 # revision history. Since the revision number both represents the
1450 1452 # most efficient order to read the nodes in, and represents a
1451 1453 # topological sorting of the nodes, this function is often useful.
1452 1454 def cmp_by_rev(a, b):
1453 1455 return cmp(revlog.rev(a), revlog.rev(b))
1454 1456 return cmp_by_rev
1455 1457
1456 1458 # If we determine that a particular file or manifest node must be a
1457 1459 # node that the recipient of the changegroup will already have, we can
1458 1460 # also assume the recipient will have all the parents. This function
1459 1461 # prunes them from the set of missing nodes.
1460 1462 def prune_parents(revlog, hasset, msngset):
1461 1463 haslst = hasset.keys()
1462 1464 haslst.sort(cmp_by_rev_func(revlog))
1463 1465 for node in haslst:
1464 1466 parentlst = [p for p in revlog.parents(node) if p != nullid]
1465 1467 while parentlst:
1466 1468 n = parentlst.pop()
1467 1469 if n not in hasset:
1468 1470 hasset[n] = 1
1469 1471 p = [p for p in revlog.parents(n) if p != nullid]
1470 1472 parentlst.extend(p)
1471 1473 for n in hasset:
1472 1474 msngset.pop(n, None)
1473 1475
1474 1476 # This is a function generating function used to set up an environment
1475 1477 # for the inner function to execute in.
1476 1478 def manifest_and_file_collector(changedfileset):
1477 1479 # This is an information gathering function that gathers
1478 1480 # information from each changeset node that goes out as part of
1479 1481 # the changegroup. The information gathered is a list of which
1480 1482 # manifest nodes are potentially required (the recipient may
1481 1483 # already have them) and total list of all files which were
1482 1484 # changed in any changeset in the changegroup.
1483 1485 #
1484 1486 # We also remember the first changenode we saw any manifest
1485 1487 # referenced by so we can later determine which changenode 'owns'
1486 1488 # the manifest.
1487 1489 def collect_manifests_and_files(clnode):
1488 1490 c = cl.read(clnode)
1489 1491 for f in c[3]:
1490 1492 # This is to make sure we only have one instance of each
1491 1493 # filename string for each filename.
1492 1494 changedfileset.setdefault(f, f)
1493 1495 msng_mnfst_set.setdefault(c[0], clnode)
1494 1496 return collect_manifests_and_files
1495 1497
1496 1498 # Figure out which manifest nodes (of the ones we think might be part
1497 1499 # of the changegroup) the recipient must know about and remove them
1498 1500 # from the changegroup.
1499 1501 def prune_manifests():
1500 1502 has_mnfst_set = {}
1501 1503 for n in msng_mnfst_set:
1502 1504 # If a 'missing' manifest thinks it belongs to a changenode
1503 1505 # the recipient is assumed to have, obviously the recipient
1504 1506 # must have that manifest.
1505 1507 linknode = cl.node(mnfst.linkrev(n))
1506 1508 if linknode in has_cl_set:
1507 1509 has_mnfst_set[n] = 1
1508 1510 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1509 1511
1510 1512 # Use the information collected in collect_manifests_and_files to say
1511 1513 # which changenode any manifestnode belongs to.
1512 1514 def lookup_manifest_link(mnfstnode):
1513 1515 return msng_mnfst_set[mnfstnode]
1514 1516
1515 1517 # A function generating function that sets up the initial environment
1516 1518 # the inner function.
1517 1519 def filenode_collector(changedfiles):
1518 1520 next_rev = [0]
1519 1521 # This gathers information from each manifestnode included in the
1520 1522 # changegroup about which filenodes the manifest node references
1521 1523 # so we can include those in the changegroup too.
1522 1524 #
1523 1525 # It also remembers which changenode each filenode belongs to. It
1524 1526 # does this by assuming the a filenode belongs to the changenode
1525 1527 # the first manifest that references it belongs to.
1526 1528 def collect_msng_filenodes(mnfstnode):
1527 1529 r = mnfst.rev(mnfstnode)
1528 1530 if r == next_rev[0]:
1529 1531 # If the last rev we looked at was the one just previous,
1530 1532 # we only need to see a diff.
1531 1533 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1532 1534 # For each line in the delta
1533 1535 for dline in delta.splitlines():
1534 1536 # get the filename and filenode for that line
1535 1537 f, fnode = dline.split('\0')
1536 1538 fnode = bin(fnode[:40])
1537 1539 f = changedfiles.get(f, None)
1538 1540 # And if the file is in the list of files we care
1539 1541 # about.
1540 1542 if f is not None:
1541 1543 # Get the changenode this manifest belongs to
1542 1544 clnode = msng_mnfst_set[mnfstnode]
1543 1545 # Create the set of filenodes for the file if
1544 1546 # there isn't one already.
1545 1547 ndset = msng_filenode_set.setdefault(f, {})
1546 1548 # And set the filenode's changelog node to the
1547 1549 # manifest's if it hasn't been set already.
1548 1550 ndset.setdefault(fnode, clnode)
1549 1551 else:
1550 1552 # Otherwise we need a full manifest.
1551 1553 m = mnfst.read(mnfstnode)
1552 1554 # For every file in we care about.
1553 1555 for f in changedfiles:
1554 1556 fnode = m.get(f, None)
1555 1557 # If it's in the manifest
1556 1558 if fnode is not None:
1557 1559 # See comments above.
1558 1560 clnode = msng_mnfst_set[mnfstnode]
1559 1561 ndset = msng_filenode_set.setdefault(f, {})
1560 1562 ndset.setdefault(fnode, clnode)
1561 1563 # Remember the revision we hope to see next.
1562 1564 next_rev[0] = r + 1
1563 1565 return collect_msng_filenodes
1564 1566
1565 1567 # We have a list of filenodes we think we need for a file, lets remove
1566 1568 # all those we now the recipient must have.
1567 1569 def prune_filenodes(f, filerevlog):
1568 1570 msngset = msng_filenode_set[f]
1569 1571 hasset = {}
1570 1572 # If a 'missing' filenode thinks it belongs to a changenode we
1571 1573 # assume the recipient must have, then the recipient must have
1572 1574 # that filenode.
1573 1575 for n in msngset:
1574 1576 clnode = cl.node(filerevlog.linkrev(n))
1575 1577 if clnode in has_cl_set:
1576 1578 hasset[n] = 1
1577 1579 prune_parents(filerevlog, hasset, msngset)
1578 1580
1579 1581 # A function generator function that sets up the a context for the
1580 1582 # inner function.
1581 1583 def lookup_filenode_link_func(fname):
1582 1584 msngset = msng_filenode_set[fname]
1583 1585 # Lookup the changenode the filenode belongs to.
1584 1586 def lookup_filenode_link(fnode):
1585 1587 return msngset[fnode]
1586 1588 return lookup_filenode_link
1587 1589
1588 1590 # Now that we have all theses utility functions to help out and
1589 1591 # logically divide up the task, generate the group.
1590 1592 def gengroup():
1591 1593 # The set of changed files starts empty.
1592 1594 changedfiles = {}
1593 1595 # Create a changenode group generator that will call our functions
1594 1596 # back to lookup the owning changenode and collect information.
1595 1597 group = cl.group(msng_cl_lst, identity,
1596 1598 manifest_and_file_collector(changedfiles))
1597 1599 for chnk in group:
1598 1600 yield chnk
1599 1601
1600 1602 # The list of manifests has been collected by the generator
1601 1603 # calling our functions back.
1602 1604 prune_manifests()
1603 1605 msng_mnfst_lst = msng_mnfst_set.keys()
1604 1606 # Sort the manifestnodes by revision number.
1605 1607 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1606 1608 # Create a generator for the manifestnodes that calls our lookup
1607 1609 # and data collection functions back.
1608 1610 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1609 1611 filenode_collector(changedfiles))
1610 1612 for chnk in group:
1611 1613 yield chnk
1612 1614
1613 1615 # These are no longer needed, dereference and toss the memory for
1614 1616 # them.
1615 1617 msng_mnfst_lst = None
1616 1618 msng_mnfst_set.clear()
1617 1619
1618 1620 changedfiles = changedfiles.keys()
1619 1621 changedfiles.sort()
1620 1622 # Go through all our files in order sorted by name.
1621 1623 for fname in changedfiles:
1622 1624 filerevlog = self.file(fname)
1623 1625 # Toss out the filenodes that the recipient isn't really
1624 1626 # missing.
1625 1627 if msng_filenode_set.has_key(fname):
1626 1628 prune_filenodes(fname, filerevlog)
1627 1629 msng_filenode_lst = msng_filenode_set[fname].keys()
1628 1630 else:
1629 1631 msng_filenode_lst = []
1630 1632 # If any filenodes are left, generate the group for them,
1631 1633 # otherwise don't bother.
1632 1634 if len(msng_filenode_lst) > 0:
1633 1635 yield changegroup.genchunk(fname)
1634 1636 # Sort the filenodes by their revision #
1635 1637 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1636 1638 # Create a group generator and only pass in a changenode
1637 1639 # lookup function as we need to collect no information
1638 1640 # from filenodes.
1639 1641 group = filerevlog.group(msng_filenode_lst,
1640 1642 lookup_filenode_link_func(fname))
1641 1643 for chnk in group:
1642 1644 yield chnk
1643 1645 if msng_filenode_set.has_key(fname):
1644 1646 # Don't need this anymore, toss it to free memory.
1645 1647 del msng_filenode_set[fname]
1646 1648 # Signal that no more groups are left.
1647 1649 yield changegroup.closechunk()
1648 1650
1649 1651 if msng_cl_lst:
1650 1652 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1651 1653
1652 1654 return util.chunkbuffer(gengroup())
1653 1655
1654 1656 def changegroup(self, basenodes, source):
1655 1657 """Generate a changegroup of all nodes that we have that a recipient
1656 1658 doesn't.
1657 1659
1658 1660 This is much easier than the previous function as we can assume that
1659 1661 the recipient has any changenode we aren't sending them."""
1660 1662
1661 1663 self.hook('preoutgoing', throw=True, source=source)
1662 1664
1663 1665 cl = self.changelog
1664 1666 nodes = cl.nodesbetween(basenodes, None)[0]
1665 1667 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1666 1668 self.changegroupinfo(nodes)
1667 1669
1668 1670 def identity(x):
1669 1671 return x
1670 1672
1671 1673 def gennodelst(revlog):
1672 1674 for r in xrange(0, revlog.count()):
1673 1675 n = revlog.node(r)
1674 1676 if revlog.linkrev(n) in revset:
1675 1677 yield n
1676 1678
1677 1679 def changed_file_collector(changedfileset):
1678 1680 def collect_changed_files(clnode):
1679 1681 c = cl.read(clnode)
1680 1682 for fname in c[3]:
1681 1683 changedfileset[fname] = 1
1682 1684 return collect_changed_files
1683 1685
1684 1686 def lookuprevlink_func(revlog):
1685 1687 def lookuprevlink(n):
1686 1688 return cl.node(revlog.linkrev(n))
1687 1689 return lookuprevlink
1688 1690
1689 1691 def gengroup():
1690 1692 # construct a list of all changed files
1691 1693 changedfiles = {}
1692 1694
1693 1695 for chnk in cl.group(nodes, identity,
1694 1696 changed_file_collector(changedfiles)):
1695 1697 yield chnk
1696 1698 changedfiles = changedfiles.keys()
1697 1699 changedfiles.sort()
1698 1700
1699 1701 mnfst = self.manifest
1700 1702 nodeiter = gennodelst(mnfst)
1701 1703 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1702 1704 yield chnk
1703 1705
1704 1706 for fname in changedfiles:
1705 1707 filerevlog = self.file(fname)
1706 1708 nodeiter = gennodelst(filerevlog)
1707 1709 nodeiter = list(nodeiter)
1708 1710 if nodeiter:
1709 1711 yield changegroup.genchunk(fname)
1710 1712 lookup = lookuprevlink_func(filerevlog)
1711 1713 for chnk in filerevlog.group(nodeiter, lookup):
1712 1714 yield chnk
1713 1715
1714 1716 yield changegroup.closechunk()
1715 1717
1716 1718 if nodes:
1717 1719 self.hook('outgoing', node=hex(nodes[0]), source=source)
1718 1720
1719 1721 return util.chunkbuffer(gengroup())
1720 1722
1721 1723 def addchangegroup(self, source, srctype, url):
1722 1724 """add changegroup to repo.
1723 1725 returns number of heads modified or added + 1."""
1724 1726
1725 1727 def csmap(x):
1726 1728 self.ui.debug(_("add changeset %s\n") % short(x))
1727 1729 return cl.count()
1728 1730
1729 1731 def revmap(x):
1730 1732 return cl.rev(x)
1731 1733
1732 1734 if not source:
1733 1735 return 0
1734 1736
1735 1737 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1736 1738
1737 1739 changesets = files = revisions = 0
1738 1740
1739 1741 tr = self.transaction()
1740 1742
1741 1743 # write changelog data to temp files so concurrent readers will not see
1742 1744 # inconsistent view
1743 1745 cl = None
1744 1746 try:
1745 1747 cl = appendfile.appendchangelog(self.sopener,
1746 1748 self.changelog.version)
1747 1749
1748 1750 oldheads = len(cl.heads())
1749 1751
1750 1752 # pull off the changeset group
1751 1753 self.ui.status(_("adding changesets\n"))
1752 1754 cor = cl.count() - 1
1753 1755 chunkiter = changegroup.chunkiter(source)
1754 1756 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1755 1757 raise util.Abort(_("received changelog group is empty"))
1756 1758 cnr = cl.count() - 1
1757 1759 changesets = cnr - cor
1758 1760
1759 1761 # pull off the manifest group
1760 1762 self.ui.status(_("adding manifests\n"))
1761 1763 chunkiter = changegroup.chunkiter(source)
1762 1764 # no need to check for empty manifest group here:
1763 1765 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1764 1766 # no new manifest will be created and the manifest group will
1765 1767 # be empty during the pull
1766 1768 self.manifest.addgroup(chunkiter, revmap, tr)
1767 1769
1768 1770 # process the files
1769 1771 self.ui.status(_("adding file changes\n"))
1770 1772 while 1:
1771 1773 f = changegroup.getchunk(source)
1772 1774 if not f:
1773 1775 break
1774 1776 self.ui.debug(_("adding %s revisions\n") % f)
1775 1777 fl = self.file(f)
1776 1778 o = fl.count()
1777 1779 chunkiter = changegroup.chunkiter(source)
1778 1780 if fl.addgroup(chunkiter, revmap, tr) is None:
1779 1781 raise util.Abort(_("received file revlog group is empty"))
1780 1782 revisions += fl.count() - o
1781 1783 files += 1
1782 1784
1783 1785 cl.writedata()
1784 1786 finally:
1785 1787 if cl:
1786 1788 cl.cleanup()
1787 1789
1788 1790 # make changelog see real files again
1789 1791 self.changelog = changelog.changelog(self.sopener,
1790 1792 self.changelog.version)
1791 1793 self.changelog.checkinlinesize(tr)
1792 1794
1793 1795 newheads = len(self.changelog.heads())
1794 1796 heads = ""
1795 1797 if oldheads and newheads != oldheads:
1796 1798 heads = _(" (%+d heads)") % (newheads - oldheads)
1797 1799
1798 1800 self.ui.status(_("added %d changesets"
1799 1801 " with %d changes to %d files%s\n")
1800 1802 % (changesets, revisions, files, heads))
1801 1803
1802 1804 if changesets > 0:
1803 1805 self.hook('pretxnchangegroup', throw=True,
1804 1806 node=hex(self.changelog.node(cor+1)), source=srctype,
1805 1807 url=url)
1806 1808
1807 1809 tr.close()
1808 1810
1809 1811 if changesets > 0:
1810 1812 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1811 1813 source=srctype, url=url)
1812 1814
1813 1815 for i in xrange(cor + 1, cnr + 1):
1814 1816 self.hook("incoming", node=hex(self.changelog.node(i)),
1815 1817 source=srctype, url=url)
1816 1818
1817 1819 return newheads - oldheads + 1
1818 1820
1819 1821
1820 1822 def stream_in(self, remote):
1821 1823 fp = remote.stream_out()
1822 1824 l = fp.readline()
1823 1825 try:
1824 1826 resp = int(l)
1825 1827 except ValueError:
1826 1828 raise util.UnexpectedOutput(
1827 1829 _('Unexpected response from remote server:'), l)
1828 1830 if resp == 1:
1829 1831 raise util.Abort(_('operation forbidden by server'))
1830 1832 elif resp == 2:
1831 1833 raise util.Abort(_('locking the remote repository failed'))
1832 1834 elif resp != 0:
1833 1835 raise util.Abort(_('the server sent an unknown error code'))
1834 1836 self.ui.status(_('streaming all changes\n'))
1835 1837 l = fp.readline()
1836 1838 try:
1837 1839 total_files, total_bytes = map(int, l.split(' ', 1))
1838 1840 except ValueError, TypeError:
1839 1841 raise util.UnexpectedOutput(
1840 1842 _('Unexpected response from remote server:'), l)
1841 1843 self.ui.status(_('%d files to transfer, %s of data\n') %
1842 1844 (total_files, util.bytecount(total_bytes)))
1843 1845 start = time.time()
1844 1846 for i in xrange(total_files):
1845 1847 # XXX doesn't support '\n' or '\r' in filenames
1846 1848 l = fp.readline()
1847 1849 try:
1848 1850 name, size = l.split('\0', 1)
1849 1851 size = int(size)
1850 1852 except ValueError, TypeError:
1851 1853 raise util.UnexpectedOutput(
1852 1854 _('Unexpected response from remote server:'), l)
1853 1855 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1854 1856 ofp = self.sopener(name, 'w')
1855 1857 for chunk in util.filechunkiter(fp, limit=size):
1856 1858 ofp.write(chunk)
1857 1859 ofp.close()
1858 1860 elapsed = time.time() - start
1859 1861 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1860 1862 (util.bytecount(total_bytes), elapsed,
1861 1863 util.bytecount(total_bytes / elapsed)))
1862 1864 self.reload()
1863 1865 return len(self.heads()) + 1
1864 1866
1865 1867 def clone(self, remote, heads=[], stream=False):
1866 1868 '''clone remote repository.
1867 1869
1868 1870 keyword arguments:
1869 1871 heads: list of revs to clone (forces use of pull)
1870 1872 stream: use streaming clone if possible'''
1871 1873
1872 1874 # now, all clients that can request uncompressed clones can
1873 1875 # read repo formats supported by all servers that can serve
1874 1876 # them.
1875 1877
1876 1878 # if revlog format changes, client will have to check version
1877 1879 # and format flags on "stream" capability, and use
1878 1880 # uncompressed only if compatible.
1879 1881
1880 1882 if stream and not heads and remote.capable('stream'):
1881 1883 return self.stream_in(remote)
1882 1884 return self.pull(remote, heads)
1883 1885
1884 1886 # used to avoid circular references so destructors work
1885 1887 def aftertrans(base):
1886 1888 p = base
1887 1889 def a():
1888 1890 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1889 1891 util.rename(os.path.join(p, "journal.dirstate"),
1890 1892 os.path.join(p, "undo.dirstate"))
1891 1893 return a
1892 1894
1893 1895 def instance(ui, path, create):
1894 1896 return localrepository(ui, util.drop_scheme('file', path), create)
1895 1897
1896 1898 def islocal(path):
1897 1899 return True
@@ -1,408 +1,483 b''
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 demandload(globals(), "errno util os tempfile")
12 12
13 13 def filemerge(repo, fw, fo, wctx, mctx):
14 14 """perform a 3-way merge in the working directory
15 15
16 16 fw = filename in the working directory
17 17 fo = filename in other parent
18 18 wctx, mctx = working and merge changecontexts
19 19 """
20 20
21 21 def temp(prefix, ctx):
22 22 pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
23 23 (fd, name) = tempfile.mkstemp(prefix=pre)
24 24 f = os.fdopen(fd, "wb")
25 25 repo.wwrite(ctx.path(), ctx.data(), f)
26 26 f.close()
27 27 return name
28 28
29 29 fcm = wctx.filectx(fw)
30 30 fco = mctx.filectx(fo)
31 31
32 32 if not fco.cmp(fcm.data()): # files identical?
33 33 return None
34 34
35 35 fca = fcm.ancestor(fco)
36 36 if not fca:
37 37 fca = repo.filectx(fw, fileid=nullrev)
38 38 a = repo.wjoin(fw)
39 39 b = temp("base", fca)
40 40 c = temp("other", fco)
41 41
42 42 if fw != fo:
43 43 repo.ui.status(_("merging %s and %s\n") % (fw, fo))
44 44 else:
45 45 repo.ui.status(_("merging %s\n") % fw)
46 46
47 47 repo.ui.debug(_("my %s other %s ancestor %s\n") % (fcm, fco, fca))
48 48
49 49 cmd = (os.environ.get("HGMERGE") or repo.ui.config("ui", "merge")
50 50 or "hgmerge")
51 51 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=repo.root,
52 52 environ={'HG_FILE': fw,
53 53 'HG_MY_NODE': str(wctx.parents()[0]),
54 54 'HG_OTHER_NODE': str(mctx)})
55 55 if r:
56 56 repo.ui.warn(_("merging %s failed!\n") % fw)
57 57
58 58 os.unlink(b)
59 59 os.unlink(c)
60 60 return r
61 61
62 62 def checkunknown(wctx, mctx):
63 63 "check for collisions between unknown files and files in mctx"
64 64 man = mctx.manifest()
65 65 for f in wctx.unknown():
66 66 if f in man:
67 67 if mctx.filectx(f).cmp(wctx.filectx(f).data()):
68 68 raise util.Abort(_("untracked local file '%s' differs"\
69 69 " from remote version") % f)
70 70
71 71 def forgetremoved(wctx, mctx):
72 72 """
73 73 Forget removed files
74 74
75 75 If we're jumping between revisions (as opposed to merging), and if
76 76 neither the working directory nor the target rev has the file,
77 77 then we need to remove it from the dirstate, to prevent the
78 78 dirstate from listing the file when it is no longer in the
79 79 manifest.
80 80 """
81 81
82 82 action = []
83 83 man = mctx.manifest()
84 84 for f in wctx.deleted() + wctx.removed():
85 85 if f not in man:
86 86 action.append((f, "f"))
87 87
88 88 return action
89 89
90 90 def findcopies(repo, m1, m2, ma, limit):
91 91 """
92 92 Find moves and copies between m1 and m2 back to limit linkrev
93 93 """
94 94
95 95 def findold(fctx):
96 96 "find files that path was copied from, back to linkrev limit"
97 97 old = {}
98 98 orig = fctx.path()
99 99 visit = [fctx]
100 100 while visit:
101 101 fc = visit.pop()
102 102 if fc.rev() < limit:
103 103 continue
104 104 if fc.path() != orig and fc.path() not in old:
105 105 old[fc.path()] = 1
106 106 visit += fc.parents()
107 107
108 108 old = old.keys()
109 109 old.sort()
110 110 return old
111 111
112 112 def nonoverlap(d1, d2, d3):
113 113 "Return list of elements in d1 not in d2 or d3"
114 114 l = [d for d in d1 if d not in d3 and d not in d2]
115 115 l.sort()
116 116 return l
117 117
118 118 def checkcopies(c, man):
119 119 '''check possible copies for filectx c'''
120 120 for of in findold(c):
121 121 if of not in man:
122 122 return
123 123 c2 = ctx(of, man[of])
124 124 ca = c.ancestor(c2)
125 if not ca or c == ca or c2 == ca:
125 if not ca: # unrelated
126 126 return
127 127 if ca.path() == c.path() or ca.path() == c2.path():
128 fullcopy[c.path()] = of
129 if c == ca or c2 == ca: # no merge needed, ignore copy
130 return
128 131 copy[c.path()] = of
129 132
133 def dirs(files):
134 d = {}
135 for f in files:
136 d[os.path.dirname(f)] = True
137 return d
138
130 139 if not repo.ui.configbool("merge", "followcopies", True):
131 140 return {}
132 141
133 142 # avoid silly behavior for update from empty dir
134 143 if not m1 or not m2 or not ma:
135 144 return {}
136 145
137 146 dcopies = repo.dirstate.copies()
138 147 copy = {}
148 fullcopy = {}
139 149 u1 = nonoverlap(m1, m2, ma)
140 150 u2 = nonoverlap(m2, m1, ma)
141 151 ctx = util.cachefunc(lambda f, n: repo.filectx(f, fileid=n[:20]))
142 152
143 153 for f in u1:
144 154 checkcopies(ctx(dcopies.get(f, f), m1[f]), m2)
145 155
146 156 for f in u2:
147 157 checkcopies(ctx(f, m2[f]), m1)
148 158
159 if not fullcopy or not repo.ui.configbool("merge", "followdirs", True):
160 return copy
161
162 # generate a directory move map
163 d1, d2 = dirs(m1), dirs(m2)
164 invalid = {}
165 dirmove = {}
166
167 for dst, src in fullcopy.items():
168 dsrc, ddst = os.path.dirname(src), os.path.dirname(dst)
169 if dsrc in invalid:
170 continue
171 elif (dsrc in d1 and ddst in d1) or (dsrc in d2 and ddst in d2):
172 invalid[dsrc] = True
173 elif dsrc in dirmove and dirmove[dsrc] != ddst:
174 invalid[dsrc] = True
175 del dirmove[dsrc]
176 else:
177 dirmove[dsrc] = ddst
178
179 del d1, d2, invalid
180
181 if not dirmove:
182 return copy
183
184 # check unaccounted nonoverlapping files
185 for f in u1 + u2:
186 if f not in fullcopy:
187 d = os.path.dirname(f)
188 if d in dirmove:
189 copy[f] = dirmove[d] + "/" + os.path.basename(f)
190
149 191 return copy
150 192
151 193 def manifestmerge(repo, p1, p2, pa, overwrite, partial):
152 194 """
153 195 Merge p1 and p2 with ancestor ma and generate merge action list
154 196
155 197 overwrite = whether we clobber working files
156 198 partial = function to filter file lists
157 199 """
158 200
159 201 repo.ui.note(_("resolving manifests\n"))
160 202 repo.ui.debug(_(" overwrite %s partial %s\n") % (overwrite, bool(partial)))
161 203 repo.ui.debug(_(" ancestor %s local %s remote %s\n") % (pa, p1, p2))
162 204
163 205 m1 = p1.manifest()
164 206 m2 = p2.manifest()
165 207 ma = pa.manifest()
166 208 backwards = (pa == p2)
167 209 action = []
168 210 copy = {}
169 211
170 212 def fmerge(f, f2=None, fa=None):
171 213 """merge executable flags"""
172 214 if not f2:
173 215 f2 = f
174 216 fa = f
175 217 a, b, c = ma.execf(fa), m1.execf(f), m2.execf(f2)
176 218 return ((a^b) | (a^c)) ^ a
177 219
178 220 def act(msg, m, f, *args):
179 221 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
180 222 action.append((f, m) + args)
181 223
182 224 if not (backwards or overwrite):
183 225 copy = findcopies(repo, m1, m2, ma, pa.rev())
184 226 copied = dict.fromkeys(copy.values())
185 227
186 228 # Compare manifests
187 229 for f, n in m1.iteritems():
188 230 if partial and not partial(f):
189 231 continue
190 232 if f in m2:
191 233 # are files different?
192 234 if n != m2[f]:
193 235 a = ma.get(f, nullid)
194 236 # are both different from the ancestor?
195 237 if not overwrite and n != a and m2[f] != a:
196 238 act("versions differ", "m", f, f, f, fmerge(f), False)
197 239 # are we clobbering?
198 240 # is remote's version newer?
199 241 # or are we going back in time and clean?
200 242 elif overwrite or m2[f] != a or (backwards and not n[20:]):
201 243 act("remote is newer", "g", f, m2.execf(f))
202 244 # local is newer, not overwrite, check mode bits
203 245 elif fmerge(f) != m1.execf(f):
204 246 act("update permissions", "e", f, m2.execf(f))
205 247 # contents same, check mode bits
206 248 elif m1.execf(f) != m2.execf(f):
207 249 if overwrite or fmerge(f) != m1.execf(f):
208 250 act("update permissions", "e", f, m2.execf(f))
209 251 elif f in copied:
210 252 continue
211 253 elif f in copy:
212 254 f2 = copy[f]
213 if f2 in m1: # case 2 A,B/B/B
255 if f2 not in m2: # directory rename
256 act("remote renamed directory to " + f2, "d",
257 f, None, f2, m1.execf(f))
258 elif f2 in m1: # case 2 A,B/B/B
214 259 act("local copied to " + f2, "m",
215 260 f, f2, f, fmerge(f, f2, f2), False)
216 261 else: # case 4,21 A/B/B
217 262 act("local moved to " + f2, "m",
218 263 f, f2, f, fmerge(f, f2, f2), False)
219 264 elif f in ma:
220 265 if n != ma[f] and not overwrite:
221 266 if repo.ui.prompt(
222 267 (_(" local changed %s which remote deleted\n") % f) +
223 268 _("(k)eep or (d)elete?"), _("[kd]"), _("k")) == _("d"):
224 269 act("prompt delete", "r", f)
225 270 else:
226 271 act("other deleted", "r", f)
227 272 else:
228 273 # file is created on branch or in working directory
229 274 if (overwrite and n[20:] != "u") or (backwards and not n[20:]):
230 275 act("remote deleted", "r", f)
231 276
232 277 for f, n in m2.iteritems():
233 278 if partial and not partial(f):
234 279 continue
235 280 if f in m1:
236 281 continue
237 282 if f in copied:
238 283 continue
239 284 if f in copy:
240 285 f2 = copy[f]
241 if f2 in m2: # rename case 1, A/A,B/A
286 if f2 not in m1: # directory rename
287 act("local renamed directory to " + f2, "d",
288 None, f, f2, m2.execf(f))
289 elif f2 in m2: # rename case 1, A/A,B/A
242 290 act("remote copied to " + f, "m",
243 291 f2, f, f, fmerge(f2, f, f2), False)
244 292 else: # case 3,20 A/B/A
245 293 act("remote moved to " + f, "m",
246 294 f2, f, f, fmerge(f2, f, f2), True)
247 295 elif f in ma:
248 296 if overwrite or backwards:
249 297 act("recreating", "g", f, m2.execf(f))
250 298 elif n != ma[f]:
251 299 if repo.ui.prompt(
252 300 (_("remote changed %s which local deleted\n") % f) +
253 301 _("(k)eep or (d)elete?"), _("[kd]"), _("k")) == _("k"):
254 302 act("prompt recreating", "g", f, m2.execf(f))
255 303 else:
256 304 act("remote created", "g", f, m2.execf(f))
257 305
258 306 return action
259 307
260 308 def applyupdates(repo, action, wctx, mctx):
261 309 "apply the merge action list to the working directory"
262 310
263 311 updated, merged, removed, unresolved = 0, 0, 0, 0
264 312 action.sort()
265 313 for a in action:
266 314 f, m = a[:2]
267 if f[0] == "/":
315 if f and f[0] == "/":
268 316 continue
269 317 if m == "r": # remove
270 318 repo.ui.note(_("removing %s\n") % f)
271 319 util.audit_path(f)
272 320 try:
273 321 util.unlink(repo.wjoin(f))
274 322 except OSError, inst:
275 323 if inst.errno != errno.ENOENT:
276 324 repo.ui.warn(_("update failed to remove %s: %s!\n") %
277 325 (f, inst.strerror))
278 326 removed += 1
279 327 elif m == "m": # merge
280 328 f2, fd, flag, move = a[2:]
281 329 r = filemerge(repo, f, f2, wctx, mctx)
282 330 if r > 0:
283 331 unresolved += 1
284 332 else:
285 333 if r is None:
286 334 updated += 1
287 335 else:
288 336 merged += 1
289 337 if f != fd:
290 338 repo.ui.debug(_("copying %s to %s\n") % (f, fd))
291 339 repo.wwrite(fd, repo.wread(f))
292 340 if move:
293 341 repo.ui.debug(_("removing %s\n") % f)
294 342 os.unlink(repo.wjoin(f))
295 343 util.set_exec(repo.wjoin(fd), flag)
296 344 elif m == "g": # get
297 345 flag = a[2]
298 346 repo.ui.note(_("getting %s\n") % f)
299 347 t = mctx.filectx(f).data()
300 348 repo.wwrite(f, t)
301 349 util.set_exec(repo.wjoin(f), flag)
302 350 updated += 1
351 elif m == "d": # directory rename
352 f2, fd, flag = a[2:]
353 if f:
354 repo.ui.note(_("moving %s to %s\n") % (f, fd))
355 t = wctx.filectx(f).data()
356 repo.wwrite(fd, t)
357 util.set_exec(repo.wjoin(fd), flag)
358 util.unlink(repo.wjoin(f))
359 if f2:
360 repo.ui.note(_("getting %s to %s\n") % (f2, fd))
361 t = mctx.filectx(f2).data()
362 repo.wwrite(fd, t)
363 util.set_exec(repo.wjoin(fd), flag)
364 updated += 1
303 365 elif m == "e": # exec
304 366 flag = a[2]
305 367 util.set_exec(repo.wjoin(f), flag)
306 368
307 369 return updated, merged, removed, unresolved
308 370
309 371 def recordupdates(repo, action, branchmerge):
310 372 "record merge actions to the dirstate"
311 373
312 374 for a in action:
313 375 f, m = a[:2]
314 376 if m == "r": # remove
315 377 if branchmerge:
316 378 repo.dirstate.update([f], 'r')
317 379 else:
318 380 repo.dirstate.forget([f])
319 381 elif m == "f": # forget
320 382 repo.dirstate.forget([f])
321 383 elif m == "g": # get
322 384 if branchmerge:
323 385 repo.dirstate.update([f], 'n', st_mtime=-1)
324 386 else:
325 387 repo.dirstate.update([f], 'n')
326 388 elif m == "m": # merge
327 389 f2, fd, flag, move = a[2:]
328 390 if branchmerge:
329 391 # We've done a branch merge, mark this file as merged
330 392 # so that we properly record the merger later
331 393 repo.dirstate.update([fd], 'm')
332 394 if f != f2: # copy/rename
333 395 if move:
334 396 repo.dirstate.update([f], 'r')
335 397 if f != fd:
336 398 repo.dirstate.copy(f, fd)
337 399 else:
338 400 repo.dirstate.copy(f2, fd)
339 401 else:
340 402 # We've update-merged a locally modified file, so
341 403 # we set the dirstate to emulate a normal checkout
342 404 # of that file some time in the past. Thus our
343 405 # merge will appear as a normal local file
344 406 # modification.
345 407 repo.dirstate.update([fd], 'n', st_size=-1, st_mtime=-1)
346 408 if move:
347 409 repo.dirstate.forget([f])
410 elif m == "d": # directory rename
411 f2, fd, flag = a[2:]
412 if branchmerge:
413 repo.dirstate.update([fd], 'a')
414 if f:
415 repo.dirstate.update([f], 'r')
416 repo.dirstate.copy(f, fd)
417 if f2:
418 repo.dirstate.copy(f2, fd)
419 else:
420 repo.dirstate.update([fd], 'n')
421 if f:
422 repo.dirstate.forget([f])
348 423
349 424 def update(repo, node, branchmerge, force, partial, wlock):
350 425 """
351 426 Perform a merge between the working directory and the given node
352 427
353 428 branchmerge = whether to merge between branches
354 429 force = whether to force branch merging or file overwriting
355 430 partial = a function to filter file lists (dirstate not updated)
356 431 wlock = working dir lock, if already held
357 432 """
358 433
359 434 if not wlock:
360 435 wlock = repo.wlock()
361 436
362 437 overwrite = force and not branchmerge
363 438 forcemerge = force and branchmerge
364 439 wc = repo.workingctx()
365 440 pl = wc.parents()
366 441 p1, p2 = pl[0], repo.changectx(node)
367 442 pa = p1.ancestor(p2)
368 443 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
369 444
370 445 ### check phase
371 446 if not overwrite and len(pl) > 1:
372 447 raise util.Abort(_("outstanding uncommitted merges"))
373 448 if pa == p1 or pa == p2: # is there a linear path from p1 to p2?
374 449 if branchmerge:
375 450 raise util.Abort(_("there is nothing to merge, just use "
376 451 "'hg update' or look at 'hg heads'"))
377 452 elif not (overwrite or branchmerge):
378 453 raise util.Abort(_("update spans branches, use 'hg merge' "
379 454 "or 'hg update -C' to lose changes"))
380 455 if branchmerge and not forcemerge:
381 456 if wc.files():
382 457 raise util.Abort(_("outstanding uncommitted changes"))
383 458
384 459 ### calculate phase
385 460 action = []
386 461 if not force:
387 462 checkunknown(wc, p2)
388 463 if not branchmerge:
389 464 action += forgetremoved(wc, p2)
390 465 action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
391 466
392 467 ### apply phase
393 468 if not branchmerge: # just jump to the new rev
394 469 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
395 470 if not partial:
396 471 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
397 472
398 473 stats = applyupdates(repo, action, wc, p2)
399 474
400 475 if not partial:
401 476 recordupdates(repo, action, branchmerge)
402 477 repo.dirstate.setparents(fp1, fp2)
403 478 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
404 479 if not branchmerge:
405 480 repo.opener("branch", "w").write(p2.branch() + "\n")
406 481
407 482 return stats
408 483
General Comments 0
You need to be logged in to leave comments. Login now