##// END OF EJS Templates
make commit fail when committing unresolved files
Stefano Tortarolo -
r6888:7c36a4fb default
parent child Browse files
Show More
@@ -0,0 +1,40 b''
1 #!/bin/sh
2
3 echo "[extensions]" >> $HGRCPATH
4 echo "graphlog=" >> $HGRCPATH
5
6 function addcommit {
7 echo $1 > $1
8 hg add $1
9 hg commit -d "${2} 0" -u test -m $1
10 }
11 function commit {
12 hg commit -d "${2} 0" -u test -m $1
13 }
14
15 hg init a
16 cd a
17 addcommit "A" 0
18 addcommit "B" 1
19 echo "C" >> A
20 commit "C" 2
21
22 hg update -C 0
23 echo "D" >> A
24 commit "D" 3
25
26 echo
27 echo "% Merging a conflict araises"
28 hg merge
29
30 echo
31 echo "% Correct the conflict without marking the file as resolved"
32 echo "ABCD" > A
33 hg commit -m "Merged"
34
35 echo
36 echo "% Mark the conflict as resolved and commit"
37 hg resolve -m A
38 hg commit -m "Merged"
39
40 exit 0
@@ -0,0 +1,14 b''
1 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
2 created new head
3
4 % Merging a conflict araises
5 merging A
6 warning: conflicts during merge.
7 merging A failed!
8 1 files updated, 0 files merged, 0 files removed, 1 files unresolved
9 use 'hg resolve' to retry unresolved file merges
10
11 % Correct the conflict without marking the file as resolved
12 abort: unresolved merge conflicts (see hg resolve)
13
14 % Mark the conflict as resolved and commit
@@ -1,2085 +1,2091 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import lock, transaction, stat, errno, ui, store
13 13 import os, revlog, time, util, extensions, hook, inspect
14 14 import match as match_
15 import merge as merge_
15 16
16 17 class localrepository(repo.repository):
17 18 capabilities = util.set(('lookup', 'changegroupsubset'))
18 19 supported = ('revlogv1', 'store')
19 20
20 21 def __init__(self, parentui, path=None, create=0):
21 22 repo.repository.__init__(self)
22 23 self.root = os.path.realpath(path)
23 24 self.path = os.path.join(self.root, ".hg")
24 25 self.origroot = path
25 26 self.opener = util.opener(self.path)
26 27 self.wopener = util.opener(self.root)
27 28
28 29 if not os.path.isdir(self.path):
29 30 if create:
30 31 if not os.path.exists(path):
31 32 os.mkdir(path)
32 33 os.mkdir(self.path)
33 34 requirements = ["revlogv1"]
34 35 if parentui.configbool('format', 'usestore', True):
35 36 os.mkdir(os.path.join(self.path, "store"))
36 37 requirements.append("store")
37 38 # create an invalid changelog
38 39 self.opener("00changelog.i", "a").write(
39 40 '\0\0\0\2' # represents revlogv2
40 41 ' dummy changelog to prevent using the old repo layout'
41 42 )
42 43 reqfile = self.opener("requires", "w")
43 44 for r in requirements:
44 45 reqfile.write("%s\n" % r)
45 46 reqfile.close()
46 47 else:
47 48 raise repo.RepoError(_("repository %s not found") % path)
48 49 elif create:
49 50 raise repo.RepoError(_("repository %s already exists") % path)
50 51 else:
51 52 # find requirements
52 53 try:
53 54 requirements = self.opener("requires").read().splitlines()
54 55 except IOError, inst:
55 56 if inst.errno != errno.ENOENT:
56 57 raise
57 58 requirements = []
58 59 # check them
59 60 for r in requirements:
60 61 if r not in self.supported:
61 62 raise repo.RepoError(_("requirement '%s' not supported") % r)
62 63
63 64 self.store = store.store(requirements, self.path)
64 65
65 66 self.spath = self.store.path
66 67 self.sopener = self.store.opener
67 68 self.sjoin = self.store.join
68 69 self._createmode = self.store.createmode
69 70 self.opener.createmode = self.store.createmode
70 71
71 72 self.ui = ui.ui(parentui=parentui)
72 73 try:
73 74 self.ui.readconfig(self.join("hgrc"), self.root)
74 75 extensions.loadall(self.ui)
75 76 except IOError:
76 77 pass
77 78
78 79 self.tagscache = None
79 80 self._tagstypecache = None
80 81 self.branchcache = None
81 82 self._ubranchcache = None # UTF-8 version of branchcache
82 83 self._branchcachetip = None
83 84 self.nodetagscache = None
84 85 self.filterpats = {}
85 86 self._datafilters = {}
86 87 self._transref = self._lockref = self._wlockref = None
87 88
88 89 def __getattr__(self, name):
89 90 if name == 'changelog':
90 91 self.changelog = changelog.changelog(self.sopener)
91 92 self.sopener.defversion = self.changelog.version
92 93 return self.changelog
93 94 if name == 'manifest':
94 95 self.changelog
95 96 self.manifest = manifest.manifest(self.sopener)
96 97 return self.manifest
97 98 if name == 'dirstate':
98 99 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
99 100 return self.dirstate
100 101 else:
101 102 raise AttributeError, name
102 103
103 104 def __getitem__(self, changeid):
104 105 if changeid == None:
105 106 return context.workingctx(self)
106 107 return context.changectx(self, changeid)
107 108
108 109 def __nonzero__(self):
109 110 return True
110 111
111 112 def __len__(self):
112 113 return len(self.changelog)
113 114
114 115 def __iter__(self):
115 116 for i in xrange(len(self)):
116 117 yield i
117 118
118 119 def url(self):
119 120 return 'file:' + self.root
120 121
121 122 def hook(self, name, throw=False, **args):
122 123 return hook.hook(self.ui, self, name, throw, **args)
123 124
124 125 tag_disallowed = ':\r\n'
125 126
126 127 def _tag(self, names, node, message, local, user, date, parent=None,
127 128 extra={}):
128 129 use_dirstate = parent is None
129 130
130 131 if isinstance(names, str):
131 132 allchars = names
132 133 names = (names,)
133 134 else:
134 135 allchars = ''.join(names)
135 136 for c in self.tag_disallowed:
136 137 if c in allchars:
137 138 raise util.Abort(_('%r cannot be used in a tag name') % c)
138 139
139 140 for name in names:
140 141 self.hook('pretag', throw=True, node=hex(node), tag=name,
141 142 local=local)
142 143
143 144 def writetags(fp, names, munge, prevtags):
144 145 fp.seek(0, 2)
145 146 if prevtags and prevtags[-1] != '\n':
146 147 fp.write('\n')
147 148 for name in names:
148 149 m = munge and munge(name) or name
149 150 if self._tagstypecache and name in self._tagstypecache:
150 151 old = self.tagscache.get(name, nullid)
151 152 fp.write('%s %s\n' % (hex(old), m))
152 153 fp.write('%s %s\n' % (hex(node), m))
153 154 fp.close()
154 155
155 156 prevtags = ''
156 157 if local:
157 158 try:
158 159 fp = self.opener('localtags', 'r+')
159 160 except IOError, err:
160 161 fp = self.opener('localtags', 'a')
161 162 else:
162 163 prevtags = fp.read()
163 164
164 165 # local tags are stored in the current charset
165 166 writetags(fp, names, None, prevtags)
166 167 for name in names:
167 168 self.hook('tag', node=hex(node), tag=name, local=local)
168 169 return
169 170
170 171 if use_dirstate:
171 172 try:
172 173 fp = self.wfile('.hgtags', 'rb+')
173 174 except IOError, err:
174 175 fp = self.wfile('.hgtags', 'ab')
175 176 else:
176 177 prevtags = fp.read()
177 178 else:
178 179 try:
179 180 prevtags = self.filectx('.hgtags', parent).data()
180 181 except revlog.LookupError:
181 182 pass
182 183 fp = self.wfile('.hgtags', 'wb')
183 184 if prevtags:
184 185 fp.write(prevtags)
185 186
186 187 # committed tags are stored in UTF-8
187 188 writetags(fp, names, util.fromlocal, prevtags)
188 189
189 190 if use_dirstate and '.hgtags' not in self.dirstate:
190 191 self.add(['.hgtags'])
191 192
192 193 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
193 194 extra=extra)
194 195
195 196 for name in names:
196 197 self.hook('tag', node=hex(node), tag=name, local=local)
197 198
198 199 return tagnode
199 200
200 201 def tag(self, names, node, message, local, user, date):
201 202 '''tag a revision with one or more symbolic names.
202 203
203 204 names is a list of strings or, when adding a single tag, names may be a
204 205 string.
205 206
206 207 if local is True, the tags are stored in a per-repository file.
207 208 otherwise, they are stored in the .hgtags file, and a new
208 209 changeset is committed with the change.
209 210
210 211 keyword arguments:
211 212
212 213 local: whether to store tags in non-version-controlled file
213 214 (default False)
214 215
215 216 message: commit message to use if committing
216 217
217 218 user: name of user to use if committing
218 219
219 220 date: date tuple to use if committing'''
220 221
221 222 for x in self.status()[:5]:
222 223 if '.hgtags' in x:
223 224 raise util.Abort(_('working copy of .hgtags is changed '
224 225 '(please commit .hgtags manually)'))
225 226
226 227 self._tag(names, node, message, local, user, date)
227 228
228 229 def tags(self):
229 230 '''return a mapping of tag to node'''
230 231 if self.tagscache:
231 232 return self.tagscache
232 233
233 234 globaltags = {}
234 235 tagtypes = {}
235 236
236 237 def readtags(lines, fn, tagtype):
237 238 filetags = {}
238 239 count = 0
239 240
240 241 def warn(msg):
241 242 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
242 243
243 244 for l in lines:
244 245 count += 1
245 246 if not l:
246 247 continue
247 248 s = l.split(" ", 1)
248 249 if len(s) != 2:
249 250 warn(_("cannot parse entry"))
250 251 continue
251 252 node, key = s
252 253 key = util.tolocal(key.strip()) # stored in UTF-8
253 254 try:
254 255 bin_n = bin(node)
255 256 except TypeError:
256 257 warn(_("node '%s' is not well formed") % node)
257 258 continue
258 259 if bin_n not in self.changelog.nodemap:
259 260 warn(_("tag '%s' refers to unknown node") % key)
260 261 continue
261 262
262 263 h = []
263 264 if key in filetags:
264 265 n, h = filetags[key]
265 266 h.append(n)
266 267 filetags[key] = (bin_n, h)
267 268
268 269 for k, nh in filetags.items():
269 270 if k not in globaltags:
270 271 globaltags[k] = nh
271 272 tagtypes[k] = tagtype
272 273 continue
273 274
274 275 # we prefer the global tag if:
275 276 # it supercedes us OR
276 277 # mutual supercedes and it has a higher rank
277 278 # otherwise we win because we're tip-most
278 279 an, ah = nh
279 280 bn, bh = globaltags[k]
280 281 if (bn != an and an in bh and
281 282 (bn not in ah or len(bh) > len(ah))):
282 283 an = bn
283 284 ah.extend([n for n in bh if n not in ah])
284 285 globaltags[k] = an, ah
285 286 tagtypes[k] = tagtype
286 287
287 288 # read the tags file from each head, ending with the tip
288 289 f = None
289 290 for rev, node, fnode in self._hgtagsnodes():
290 291 f = (f and f.filectx(fnode) or
291 292 self.filectx('.hgtags', fileid=fnode))
292 293 readtags(f.data().splitlines(), f, "global")
293 294
294 295 try:
295 296 data = util.fromlocal(self.opener("localtags").read())
296 297 # localtags are stored in the local character set
297 298 # while the internal tag table is stored in UTF-8
298 299 readtags(data.splitlines(), "localtags", "local")
299 300 except IOError:
300 301 pass
301 302
302 303 self.tagscache = {}
303 304 self._tagstypecache = {}
304 305 for k,nh in globaltags.items():
305 306 n = nh[0]
306 307 if n != nullid:
307 308 self.tagscache[k] = n
308 309 self._tagstypecache[k] = tagtypes[k]
309 310 self.tagscache['tip'] = self.changelog.tip()
310 311 return self.tagscache
311 312
312 313 def tagtype(self, tagname):
313 314 '''
314 315 return the type of the given tag. result can be:
315 316
316 317 'local' : a local tag
317 318 'global' : a global tag
318 319 None : tag does not exist
319 320 '''
320 321
321 322 self.tags()
322 323
323 324 return self._tagstypecache.get(tagname)
324 325
325 326 def _hgtagsnodes(self):
326 327 heads = self.heads()
327 328 heads.reverse()
328 329 last = {}
329 330 ret = []
330 331 for node in heads:
331 332 c = self[node]
332 333 rev = c.rev()
333 334 try:
334 335 fnode = c.filenode('.hgtags')
335 336 except revlog.LookupError:
336 337 continue
337 338 ret.append((rev, node, fnode))
338 339 if fnode in last:
339 340 ret[last[fnode]] = None
340 341 last[fnode] = len(ret) - 1
341 342 return [item for item in ret if item]
342 343
343 344 def tagslist(self):
344 345 '''return a list of tags ordered by revision'''
345 346 l = []
346 347 for t, n in self.tags().items():
347 348 try:
348 349 r = self.changelog.rev(n)
349 350 except:
350 351 r = -2 # sort to the beginning of the list if unknown
351 352 l.append((r, t, n))
352 353 return [(t, n) for r, t, n in util.sort(l)]
353 354
354 355 def nodetags(self, node):
355 356 '''return the tags associated with a node'''
356 357 if not self.nodetagscache:
357 358 self.nodetagscache = {}
358 359 for t, n in self.tags().items():
359 360 self.nodetagscache.setdefault(n, []).append(t)
360 361 return self.nodetagscache.get(node, [])
361 362
362 363 def _branchtags(self, partial, lrev):
363 364 tiprev = len(self) - 1
364 365 if lrev != tiprev:
365 366 self._updatebranchcache(partial, lrev+1, tiprev+1)
366 367 self._writebranchcache(partial, self.changelog.tip(), tiprev)
367 368
368 369 return partial
369 370
370 371 def branchtags(self):
371 372 tip = self.changelog.tip()
372 373 if self.branchcache is not None and self._branchcachetip == tip:
373 374 return self.branchcache
374 375
375 376 oldtip = self._branchcachetip
376 377 self._branchcachetip = tip
377 378 if self.branchcache is None:
378 379 self.branchcache = {} # avoid recursion in changectx
379 380 else:
380 381 self.branchcache.clear() # keep using the same dict
381 382 if oldtip is None or oldtip not in self.changelog.nodemap:
382 383 partial, last, lrev = self._readbranchcache()
383 384 else:
384 385 lrev = self.changelog.rev(oldtip)
385 386 partial = self._ubranchcache
386 387
387 388 self._branchtags(partial, lrev)
388 389
389 390 # the branch cache is stored on disk as UTF-8, but in the local
390 391 # charset internally
391 392 for k, v in partial.items():
392 393 self.branchcache[util.tolocal(k)] = v
393 394 self._ubranchcache = partial
394 395 return self.branchcache
395 396
396 397 def _readbranchcache(self):
397 398 partial = {}
398 399 try:
399 400 f = self.opener("branch.cache")
400 401 lines = f.read().split('\n')
401 402 f.close()
402 403 except (IOError, OSError):
403 404 return {}, nullid, nullrev
404 405
405 406 try:
406 407 last, lrev = lines.pop(0).split(" ", 1)
407 408 last, lrev = bin(last), int(lrev)
408 409 if lrev >= len(self) or self[lrev].node() != last:
409 410 # invalidate the cache
410 411 raise ValueError('invalidating branch cache (tip differs)')
411 412 for l in lines:
412 413 if not l: continue
413 414 node, label = l.split(" ", 1)
414 415 partial[label.strip()] = bin(node)
415 416 except (KeyboardInterrupt, util.SignalInterrupt):
416 417 raise
417 418 except Exception, inst:
418 419 if self.ui.debugflag:
419 420 self.ui.warn(str(inst), '\n')
420 421 partial, last, lrev = {}, nullid, nullrev
421 422 return partial, last, lrev
422 423
423 424 def _writebranchcache(self, branches, tip, tiprev):
424 425 try:
425 426 f = self.opener("branch.cache", "w", atomictemp=True)
426 427 f.write("%s %s\n" % (hex(tip), tiprev))
427 428 for label, node in branches.iteritems():
428 429 f.write("%s %s\n" % (hex(node), label))
429 430 f.rename()
430 431 except (IOError, OSError):
431 432 pass
432 433
433 434 def _updatebranchcache(self, partial, start, end):
434 435 for r in xrange(start, end):
435 436 c = self[r]
436 437 b = c.branch()
437 438 partial[b] = c.node()
438 439
439 440 def lookup(self, key):
440 441 if key == '.':
441 442 return self.dirstate.parents()[0]
442 443 elif key == 'null':
443 444 return nullid
444 445 n = self.changelog._match(key)
445 446 if n:
446 447 return n
447 448 if key in self.tags():
448 449 return self.tags()[key]
449 450 if key in self.branchtags():
450 451 return self.branchtags()[key]
451 452 n = self.changelog._partialmatch(key)
452 453 if n:
453 454 return n
454 455 try:
455 456 if len(key) == 20:
456 457 key = hex(key)
457 458 except:
458 459 pass
459 460 raise repo.RepoError(_("unknown revision '%s'") % key)
460 461
461 462 def local(self):
462 463 return True
463 464
464 465 def join(self, f):
465 466 return os.path.join(self.path, f)
466 467
467 468 def wjoin(self, f):
468 469 return os.path.join(self.root, f)
469 470
470 471 def rjoin(self, f):
471 472 return os.path.join(self.root, util.pconvert(f))
472 473
473 474 def file(self, f):
474 475 if f[0] == '/':
475 476 f = f[1:]
476 477 return filelog.filelog(self.sopener, f)
477 478
478 479 def changectx(self, changeid):
479 480 return self[changeid]
480 481
481 482 def parents(self, changeid=None):
482 483 '''get list of changectxs for parents of changeid'''
483 484 return self[changeid].parents()
484 485
485 486 def filectx(self, path, changeid=None, fileid=None):
486 487 """changeid can be a changeset revision, node, or tag.
487 488 fileid can be a file revision or node."""
488 489 return context.filectx(self, path, changeid, fileid)
489 490
490 491 def getcwd(self):
491 492 return self.dirstate.getcwd()
492 493
493 494 def pathto(self, f, cwd=None):
494 495 return self.dirstate.pathto(f, cwd)
495 496
496 497 def wfile(self, f, mode='r'):
497 498 return self.wopener(f, mode)
498 499
499 500 def _link(self, f):
500 501 return os.path.islink(self.wjoin(f))
501 502
502 503 def _filter(self, filter, filename, data):
503 504 if filter not in self.filterpats:
504 505 l = []
505 506 for pat, cmd in self.ui.configitems(filter):
506 507 mf = util.matcher(self.root, "", [pat], [], [])[1]
507 508 fn = None
508 509 params = cmd
509 510 for name, filterfn in self._datafilters.iteritems():
510 511 if cmd.startswith(name):
511 512 fn = filterfn
512 513 params = cmd[len(name):].lstrip()
513 514 break
514 515 if not fn:
515 516 fn = lambda s, c, **kwargs: util.filter(s, c)
516 517 # Wrap old filters not supporting keyword arguments
517 518 if not inspect.getargspec(fn)[2]:
518 519 oldfn = fn
519 520 fn = lambda s, c, **kwargs: oldfn(s, c)
520 521 l.append((mf, fn, params))
521 522 self.filterpats[filter] = l
522 523
523 524 for mf, fn, cmd in self.filterpats[filter]:
524 525 if mf(filename):
525 526 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
526 527 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
527 528 break
528 529
529 530 return data
530 531
531 532 def adddatafilter(self, name, filter):
532 533 self._datafilters[name] = filter
533 534
534 535 def wread(self, filename):
535 536 if self._link(filename):
536 537 data = os.readlink(self.wjoin(filename))
537 538 else:
538 539 data = self.wopener(filename, 'r').read()
539 540 return self._filter("encode", filename, data)
540 541
541 542 def wwrite(self, filename, data, flags):
542 543 data = self._filter("decode", filename, data)
543 544 try:
544 545 os.unlink(self.wjoin(filename))
545 546 except OSError:
546 547 pass
547 548 if 'l' in flags:
548 549 self.wopener.symlink(data, filename)
549 550 else:
550 551 self.wopener(filename, 'w').write(data)
551 552 if 'x' in flags:
552 553 util.set_flags(self.wjoin(filename), False, True)
553 554
554 555 def wwritedata(self, filename, data):
555 556 return self._filter("decode", filename, data)
556 557
557 558 def transaction(self):
558 559 if self._transref and self._transref():
559 560 return self._transref().nest()
560 561
561 562 # abort here if the journal already exists
562 563 if os.path.exists(self.sjoin("journal")):
563 564 raise repo.RepoError(_("journal already exists - run hg recover"))
564 565
565 566 # save dirstate for rollback
566 567 try:
567 568 ds = self.opener("dirstate").read()
568 569 except IOError:
569 570 ds = ""
570 571 self.opener("journal.dirstate", "w").write(ds)
571 572 self.opener("journal.branch", "w").write(self.dirstate.branch())
572 573
573 574 renames = [(self.sjoin("journal"), self.sjoin("undo")),
574 575 (self.join("journal.dirstate"), self.join("undo.dirstate")),
575 576 (self.join("journal.branch"), self.join("undo.branch"))]
576 577 tr = transaction.transaction(self.ui.warn, self.sopener,
577 578 self.sjoin("journal"),
578 579 aftertrans(renames),
579 580 self._createmode)
580 581 self._transref = weakref.ref(tr)
581 582 return tr
582 583
583 584 def recover(self):
584 585 l = self.lock()
585 586 try:
586 587 if os.path.exists(self.sjoin("journal")):
587 588 self.ui.status(_("rolling back interrupted transaction\n"))
588 589 transaction.rollback(self.sopener, self.sjoin("journal"))
589 590 self.invalidate()
590 591 return True
591 592 else:
592 593 self.ui.warn(_("no interrupted transaction available\n"))
593 594 return False
594 595 finally:
595 596 del l
596 597
597 598 def rollback(self):
598 599 wlock = lock = None
599 600 try:
600 601 wlock = self.wlock()
601 602 lock = self.lock()
602 603 if os.path.exists(self.sjoin("undo")):
603 604 self.ui.status(_("rolling back last transaction\n"))
604 605 transaction.rollback(self.sopener, self.sjoin("undo"))
605 606 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
606 607 try:
607 608 branch = self.opener("undo.branch").read()
608 609 self.dirstate.setbranch(branch)
609 610 except IOError:
610 611 self.ui.warn(_("Named branch could not be reset, "
611 612 "current branch still is: %s\n")
612 613 % util.tolocal(self.dirstate.branch()))
613 614 self.invalidate()
614 615 self.dirstate.invalidate()
615 616 else:
616 617 self.ui.warn(_("no rollback information available\n"))
617 618 finally:
618 619 del lock, wlock
619 620
620 621 def invalidate(self):
621 622 for a in "changelog manifest".split():
622 623 if a in self.__dict__:
623 624 delattr(self, a)
624 625 self.tagscache = None
625 626 self._tagstypecache = None
626 627 self.nodetagscache = None
627 628 self.branchcache = None
628 629 self._ubranchcache = None
629 630 self._branchcachetip = None
630 631
631 632 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
632 633 try:
633 634 l = lock.lock(lockname, 0, releasefn, desc=desc)
634 635 except lock.LockHeld, inst:
635 636 if not wait:
636 637 raise
637 638 self.ui.warn(_("waiting for lock on %s held by %r\n") %
638 639 (desc, inst.locker))
639 640 # default to 600 seconds timeout
640 641 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
641 642 releasefn, desc=desc)
642 643 if acquirefn:
643 644 acquirefn()
644 645 return l
645 646
646 647 def lock(self, wait=True):
647 648 if self._lockref and self._lockref():
648 649 return self._lockref()
649 650
650 651 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
651 652 _('repository %s') % self.origroot)
652 653 self._lockref = weakref.ref(l)
653 654 return l
654 655
655 656 def wlock(self, wait=True):
656 657 if self._wlockref and self._wlockref():
657 658 return self._wlockref()
658 659
659 660 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
660 661 self.dirstate.invalidate, _('working directory of %s') %
661 662 self.origroot)
662 663 self._wlockref = weakref.ref(l)
663 664 return l
664 665
665 666 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
666 667 """
667 668 commit an individual file as part of a larger transaction
668 669 """
669 670
670 671 fn = fctx.path()
671 672 t = fctx.data()
672 673 fl = self.file(fn)
673 674 fp1 = manifest1.get(fn, nullid)
674 675 fp2 = manifest2.get(fn, nullid)
675 676
676 677 meta = {}
677 678 cp = fctx.renamed()
678 679 if cp and cp[0] != fn:
679 680 # Mark the new revision of this file as a copy of another
680 681 # file. This copy data will effectively act as a parent
681 682 # of this new revision. If this is a merge, the first
682 683 # parent will be the nullid (meaning "look up the copy data")
683 684 # and the second one will be the other parent. For example:
684 685 #
685 686 # 0 --- 1 --- 3 rev1 changes file foo
686 687 # \ / rev2 renames foo to bar and changes it
687 688 # \- 2 -/ rev3 should have bar with all changes and
688 689 # should record that bar descends from
689 690 # bar in rev2 and foo in rev1
690 691 #
691 692 # this allows this merge to succeed:
692 693 #
693 694 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
694 695 # \ / merging rev3 and rev4 should use bar@rev2
695 696 # \- 2 --- 4 as the merge base
696 697 #
697 698
698 699 cf = cp[0]
699 700 cr = manifest1.get(cf)
700 701 nfp = fp2
701 702
702 703 if manifest2: # branch merge
703 704 if fp2 == nullid: # copied on remote side
704 705 if fp1 != nullid or cf in manifest2:
705 706 cr = manifest2[cf]
706 707 nfp = fp1
707 708
708 709 # find source in nearest ancestor if we've lost track
709 710 if not cr:
710 711 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
711 712 (fn, cf))
712 713 for a in self['.'].ancestors():
713 714 if cf in a:
714 715 cr = a[cf].filenode()
715 716 break
716 717
717 718 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
718 719 meta["copy"] = cf
719 720 meta["copyrev"] = hex(cr)
720 721 fp1, fp2 = nullid, nfp
721 722 elif fp2 != nullid:
722 723 # is one parent an ancestor of the other?
723 724 fpa = fl.ancestor(fp1, fp2)
724 725 if fpa == fp1:
725 726 fp1, fp2 = fp2, nullid
726 727 elif fpa == fp2:
727 728 fp2 = nullid
728 729
729 730 # is the file unmodified from the parent? report existing entry
730 731 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
731 732 return fp1
732 733
733 734 changelist.append(fn)
734 735 return fl.add(t, meta, tr, linkrev, fp1, fp2)
735 736
736 737 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
737 738 if p1 is None:
738 739 p1, p2 = self.dirstate.parents()
739 740 return self.commit(files=files, text=text, user=user, date=date,
740 741 p1=p1, p2=p2, extra=extra, empty_ok=True)
741 742
742 743 def commit(self, files=None, text="", user=None, date=None,
743 744 match=None, force=False, force_editor=False,
744 745 p1=None, p2=None, extra={}, empty_ok=False):
745 746 wlock = lock = None
746 747 if files:
747 748 files = util.unique(files)
748 749 try:
749 750 wlock = self.wlock()
750 751 lock = self.lock()
751 752 use_dirstate = (p1 is None) # not rawcommit
752 753
753 754 if use_dirstate:
754 755 p1, p2 = self.dirstate.parents()
755 756 update_dirstate = True
756 757
757 758 if (not force and p2 != nullid and
758 759 (match and (match.files() or match.anypats()))):
759 760 raise util.Abort(_('cannot partially commit a merge '
760 761 '(do not specify files or patterns)'))
761 762
762 763 if files:
763 764 modified, removed = [], []
764 765 for f in files:
765 766 s = self.dirstate[f]
766 767 if s in 'nma':
767 768 modified.append(f)
768 769 elif s == 'r':
769 770 removed.append(f)
770 771 else:
771 772 self.ui.warn(_("%s not tracked!\n") % f)
772 773 changes = [modified, [], removed, [], []]
773 774 else:
774 775 changes = self.status(match=match)
775 776 else:
776 777 p1, p2 = p1, p2 or nullid
777 778 update_dirstate = (self.dirstate.parents()[0] == p1)
778 779 changes = [files, [], [], [], []]
779 780
781 ms = merge_.mergestate(self)
782 for f in changes[0]:
783 if f in ms and ms[f] == 'u':
784 raise util.Abort(_("unresolved merge conflicts "
785 "(see hg resolve)"))
780 786 wctx = context.workingctx(self, (p1, p2), text, user, date,
781 787 extra, changes)
782 788 return self._commitctx(wctx, force, force_editor, empty_ok,
783 789 use_dirstate, update_dirstate)
784 790 finally:
785 791 del lock, wlock
786 792
787 793 def commitctx(self, ctx):
788 794 wlock = lock = None
789 795 try:
790 796 wlock = self.wlock()
791 797 lock = self.lock()
792 798 return self._commitctx(ctx, force=True, force_editor=False,
793 799 empty_ok=True, use_dirstate=False,
794 800 update_dirstate=False)
795 801 finally:
796 802 del lock, wlock
797 803
798 804 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
799 805 use_dirstate=True, update_dirstate=True):
800 806 tr = None
801 807 valid = 0 # don't save the dirstate if this isn't set
802 808 try:
803 809 commit = util.sort(wctx.modified() + wctx.added())
804 810 remove = wctx.removed()
805 811 extra = wctx.extra().copy()
806 812 branchname = extra['branch']
807 813 user = wctx.user()
808 814 text = wctx.description()
809 815
810 816 p1, p2 = [p.node() for p in wctx.parents()]
811 817 c1 = self.changelog.read(p1)
812 818 c2 = self.changelog.read(p2)
813 819 m1 = self.manifest.read(c1[0]).copy()
814 820 m2 = self.manifest.read(c2[0])
815 821
816 822 if use_dirstate:
817 823 oldname = c1[5].get("branch") # stored in UTF-8
818 824 if (not commit and not remove and not force and p2 == nullid
819 825 and branchname == oldname):
820 826 self.ui.status(_("nothing changed\n"))
821 827 return None
822 828
823 829 xp1 = hex(p1)
824 830 if p2 == nullid: xp2 = ''
825 831 else: xp2 = hex(p2)
826 832
827 833 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
828 834
829 835 tr = self.transaction()
830 836 trp = weakref.proxy(tr)
831 837
832 838 # check in files
833 839 new = {}
834 840 changed = []
835 841 linkrev = len(self)
836 842 for f in commit:
837 843 self.ui.note(f + "\n")
838 844 try:
839 845 fctx = wctx.filectx(f)
840 846 newflags = fctx.flags()
841 847 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
842 848 if ((not changed or changed[-1] != f) and
843 849 m2.get(f) != new[f]):
844 850 # mention the file in the changelog if some
845 851 # flag changed, even if there was no content
846 852 # change.
847 853 if m1.flags(f) != newflags:
848 854 changed.append(f)
849 855 m1.set(f, newflags)
850 856 if use_dirstate:
851 857 self.dirstate.normal(f)
852 858
853 859 except (OSError, IOError):
854 860 if use_dirstate:
855 861 self.ui.warn(_("trouble committing %s!\n") % f)
856 862 raise
857 863 else:
858 864 remove.append(f)
859 865
860 866 # update manifest
861 867 m1.update(new)
862 868 removed = []
863 869
864 870 for f in util.sort(remove):
865 871 if f in m1:
866 872 del m1[f]
867 873 removed.append(f)
868 874 elif f in m2:
869 875 removed.append(f)
870 876 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
871 877 (new, removed))
872 878
873 879 # add changeset
874 880 if (not empty_ok and not text) or force_editor:
875 881 edittext = []
876 882 if text:
877 883 edittext.append(text)
878 884 edittext.append("")
879 885 edittext.append(_("HG: Enter commit message."
880 886 " Lines beginning with 'HG:' are removed."))
881 887 edittext.append("HG: --")
882 888 edittext.append("HG: user: %s" % user)
883 889 if p2 != nullid:
884 890 edittext.append("HG: branch merge")
885 891 if branchname:
886 892 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
887 893 edittext.extend(["HG: changed %s" % f for f in changed])
888 894 edittext.extend(["HG: removed %s" % f for f in removed])
889 895 if not changed and not remove:
890 896 edittext.append("HG: no files changed")
891 897 edittext.append("")
892 898 # run editor in the repository root
893 899 olddir = os.getcwd()
894 900 os.chdir(self.root)
895 901 text = self.ui.edit("\n".join(edittext), user)
896 902 os.chdir(olddir)
897 903
898 904 lines = [line.rstrip() for line in text.rstrip().splitlines()]
899 905 while lines and not lines[0]:
900 906 del lines[0]
901 907 if not lines and use_dirstate:
902 908 raise util.Abort(_("empty commit message"))
903 909 text = '\n'.join(lines)
904 910
905 911 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
906 912 user, wctx.date(), extra)
907 913 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
908 914 parent2=xp2)
909 915 tr.close()
910 916
911 917 if self.branchcache:
912 918 self.branchtags()
913 919
914 920 if use_dirstate or update_dirstate:
915 921 self.dirstate.setparents(n)
916 922 if use_dirstate:
917 923 for f in removed:
918 924 self.dirstate.forget(f)
919 925 valid = 1 # our dirstate updates are complete
920 926
921 927 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
922 928 return n
923 929 finally:
924 930 if not valid: # don't save our updated dirstate
925 931 self.dirstate.invalidate()
926 932 del tr
927 933
928 934 def walk(self, match, node=None):
929 935 '''
930 936 walk recursively through the directory tree or a given
931 937 changeset, finding all files matched by the match
932 938 function
933 939 '''
934 940 return self[node].walk(match)
935 941
936 942 def status(self, node1='.', node2=None, match=None,
937 943 ignored=False, clean=False, unknown=False):
938 944 """return status of files between two nodes or node and working directory
939 945
940 946 If node1 is None, use the first dirstate parent instead.
941 947 If node2 is None, compare node1 with working directory.
942 948 """
943 949
944 950 def mfmatches(ctx):
945 951 mf = ctx.manifest().copy()
946 952 for fn in mf.keys():
947 953 if not match(fn):
948 954 del mf[fn]
949 955 return mf
950 956
951 957 ctx1 = self[node1]
952 958 ctx2 = self[node2]
953 959 working = ctx2 == self[None]
954 960 parentworking = working and ctx1 == self['.']
955 961 match = match or match_.always(self.root, self.getcwd())
956 962 listignored, listclean, listunknown = ignored, clean, unknown
957 963
958 964 if working: # we need to scan the working dir
959 965 s = self.dirstate.status(match, listignored, listclean, listunknown)
960 966 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
961 967
962 968 # check for any possibly clean files
963 969 if parentworking and cmp:
964 970 fixup = []
965 971 # do a full compare of any files that might have changed
966 972 for f in cmp:
967 973 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
968 974 or ctx1[f].cmp(ctx2[f].data())):
969 975 modified.append(f)
970 976 else:
971 977 fixup.append(f)
972 978
973 979 if listclean:
974 980 clean += fixup
975 981
976 982 # update dirstate for files that are actually clean
977 983 if fixup:
978 984 wlock = None
979 985 try:
980 986 try:
981 987 wlock = self.wlock(False)
982 988 for f in fixup:
983 989 self.dirstate.normal(f)
984 990 except lock.LockException:
985 991 pass
986 992 finally:
987 993 del wlock
988 994
989 995 if not parentworking:
990 996 mf1 = mfmatches(ctx1)
991 997 if working:
992 998 # we are comparing working dir against non-parent
993 999 # generate a pseudo-manifest for the working dir
994 1000 mf2 = mfmatches(self['.'])
995 1001 for f in cmp + modified + added:
996 1002 mf2[f] = None
997 1003 mf2.set(f, ctx2.flags(f))
998 1004 for f in removed:
999 1005 if f in mf2:
1000 1006 del mf2[f]
1001 1007 else:
1002 1008 # we are comparing two revisions
1003 1009 deleted, unknown, ignored = [], [], []
1004 1010 mf2 = mfmatches(ctx2)
1005 1011
1006 1012 modified, added, clean = [], [], []
1007 1013 for fn in mf2:
1008 1014 if fn in mf1:
1009 1015 if (mf1.flags(fn) != mf2.flags(fn) or
1010 1016 (mf1[fn] != mf2[fn] and
1011 1017 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1012 1018 modified.append(fn)
1013 1019 elif listclean:
1014 1020 clean.append(fn)
1015 1021 del mf1[fn]
1016 1022 else:
1017 1023 added.append(fn)
1018 1024 removed = mf1.keys()
1019 1025
1020 1026 r = modified, added, removed, deleted, unknown, ignored, clean
1021 1027 [l.sort() for l in r]
1022 1028 return r
1023 1029
1024 1030 def add(self, list):
1025 1031 wlock = self.wlock()
1026 1032 try:
1027 1033 rejected = []
1028 1034 for f in list:
1029 1035 p = self.wjoin(f)
1030 1036 try:
1031 1037 st = os.lstat(p)
1032 1038 except:
1033 1039 self.ui.warn(_("%s does not exist!\n") % f)
1034 1040 rejected.append(f)
1035 1041 continue
1036 1042 if st.st_size > 10000000:
1037 1043 self.ui.warn(_("%s: files over 10MB may cause memory and"
1038 1044 " performance problems\n"
1039 1045 "(use 'hg revert %s' to unadd the file)\n")
1040 1046 % (f, f))
1041 1047 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1042 1048 self.ui.warn(_("%s not added: only files and symlinks "
1043 1049 "supported currently\n") % f)
1044 1050 rejected.append(p)
1045 1051 elif self.dirstate[f] in 'amn':
1046 1052 self.ui.warn(_("%s already tracked!\n") % f)
1047 1053 elif self.dirstate[f] == 'r':
1048 1054 self.dirstate.normallookup(f)
1049 1055 else:
1050 1056 self.dirstate.add(f)
1051 1057 return rejected
1052 1058 finally:
1053 1059 del wlock
1054 1060
1055 1061 def forget(self, list):
1056 1062 wlock = self.wlock()
1057 1063 try:
1058 1064 for f in list:
1059 1065 if self.dirstate[f] != 'a':
1060 1066 self.ui.warn(_("%s not added!\n") % f)
1061 1067 else:
1062 1068 self.dirstate.forget(f)
1063 1069 finally:
1064 1070 del wlock
1065 1071
1066 1072 def remove(self, list, unlink=False):
1067 1073 wlock = None
1068 1074 try:
1069 1075 if unlink:
1070 1076 for f in list:
1071 1077 try:
1072 1078 util.unlink(self.wjoin(f))
1073 1079 except OSError, inst:
1074 1080 if inst.errno != errno.ENOENT:
1075 1081 raise
1076 1082 wlock = self.wlock()
1077 1083 for f in list:
1078 1084 if unlink and os.path.exists(self.wjoin(f)):
1079 1085 self.ui.warn(_("%s still exists!\n") % f)
1080 1086 elif self.dirstate[f] == 'a':
1081 1087 self.dirstate.forget(f)
1082 1088 elif f not in self.dirstate:
1083 1089 self.ui.warn(_("%s not tracked!\n") % f)
1084 1090 else:
1085 1091 self.dirstate.remove(f)
1086 1092 finally:
1087 1093 del wlock
1088 1094
1089 1095 def undelete(self, list):
1090 1096 wlock = None
1091 1097 try:
1092 1098 manifests = [self.manifest.read(self.changelog.read(p)[0])
1093 1099 for p in self.dirstate.parents() if p != nullid]
1094 1100 wlock = self.wlock()
1095 1101 for f in list:
1096 1102 if self.dirstate[f] != 'r':
1097 1103 self.ui.warn("%s not removed!\n" % f)
1098 1104 else:
1099 1105 m = f in manifests[0] and manifests[0] or manifests[1]
1100 1106 t = self.file(f).read(m[f])
1101 1107 self.wwrite(f, t, m.flags(f))
1102 1108 self.dirstate.normal(f)
1103 1109 finally:
1104 1110 del wlock
1105 1111
1106 1112 def copy(self, source, dest):
1107 1113 wlock = None
1108 1114 try:
1109 1115 p = self.wjoin(dest)
1110 1116 if not (os.path.exists(p) or os.path.islink(p)):
1111 1117 self.ui.warn(_("%s does not exist!\n") % dest)
1112 1118 elif not (os.path.isfile(p) or os.path.islink(p)):
1113 1119 self.ui.warn(_("copy failed: %s is not a file or a "
1114 1120 "symbolic link\n") % dest)
1115 1121 else:
1116 1122 wlock = self.wlock()
1117 1123 if dest not in self.dirstate:
1118 1124 self.dirstate.add(dest)
1119 1125 self.dirstate.copy(source, dest)
1120 1126 finally:
1121 1127 del wlock
1122 1128
1123 1129 def heads(self, start=None):
1124 1130 heads = self.changelog.heads(start)
1125 1131 # sort the output in rev descending order
1126 1132 heads = [(-self.changelog.rev(h), h) for h in heads]
1127 1133 return [n for (r, n) in util.sort(heads)]
1128 1134
1129 1135 def branchheads(self, branch=None, start=None):
1130 1136 if branch is None:
1131 1137 branch = self[None].branch()
1132 1138 branches = self.branchtags()
1133 1139 if branch not in branches:
1134 1140 return []
1135 1141 # The basic algorithm is this:
1136 1142 #
1137 1143 # Start from the branch tip since there are no later revisions that can
1138 1144 # possibly be in this branch, and the tip is a guaranteed head.
1139 1145 #
1140 1146 # Remember the tip's parents as the first ancestors, since these by
1141 1147 # definition are not heads.
1142 1148 #
1143 1149 # Step backwards from the brach tip through all the revisions. We are
1144 1150 # guaranteed by the rules of Mercurial that we will now be visiting the
1145 1151 # nodes in reverse topological order (children before parents).
1146 1152 #
1147 1153 # If a revision is one of the ancestors of a head then we can toss it
1148 1154 # out of the ancestors set (we've already found it and won't be
1149 1155 # visiting it again) and put its parents in the ancestors set.
1150 1156 #
1151 1157 # Otherwise, if a revision is in the branch it's another head, since it
1152 1158 # wasn't in the ancestor list of an existing head. So add it to the
1153 1159 # head list, and add its parents to the ancestor list.
1154 1160 #
1155 1161 # If it is not in the branch ignore it.
1156 1162 #
1157 1163 # Once we have a list of heads, use nodesbetween to filter out all the
1158 1164 # heads that cannot be reached from startrev. There may be a more
1159 1165 # efficient way to do this as part of the previous algorithm.
1160 1166
1161 1167 set = util.set
1162 1168 heads = [self.changelog.rev(branches[branch])]
1163 1169 # Don't care if ancestors contains nullrev or not.
1164 1170 ancestors = set(self.changelog.parentrevs(heads[0]))
1165 1171 for rev in xrange(heads[0] - 1, nullrev, -1):
1166 1172 if rev in ancestors:
1167 1173 ancestors.update(self.changelog.parentrevs(rev))
1168 1174 ancestors.remove(rev)
1169 1175 elif self[rev].branch() == branch:
1170 1176 heads.append(rev)
1171 1177 ancestors.update(self.changelog.parentrevs(rev))
1172 1178 heads = [self.changelog.node(rev) for rev in heads]
1173 1179 if start is not None:
1174 1180 heads = self.changelog.nodesbetween([start], heads)[2]
1175 1181 return heads
1176 1182
1177 1183 def branches(self, nodes):
1178 1184 if not nodes:
1179 1185 nodes = [self.changelog.tip()]
1180 1186 b = []
1181 1187 for n in nodes:
1182 1188 t = n
1183 1189 while 1:
1184 1190 p = self.changelog.parents(n)
1185 1191 if p[1] != nullid or p[0] == nullid:
1186 1192 b.append((t, n, p[0], p[1]))
1187 1193 break
1188 1194 n = p[0]
1189 1195 return b
1190 1196
1191 1197 def between(self, pairs):
1192 1198 r = []
1193 1199
1194 1200 for top, bottom in pairs:
1195 1201 n, l, i = top, [], 0
1196 1202 f = 1
1197 1203
1198 1204 while n != bottom:
1199 1205 p = self.changelog.parents(n)[0]
1200 1206 if i == f:
1201 1207 l.append(n)
1202 1208 f = f * 2
1203 1209 n = p
1204 1210 i += 1
1205 1211
1206 1212 r.append(l)
1207 1213
1208 1214 return r
1209 1215
1210 1216 def findincoming(self, remote, base=None, heads=None, force=False):
1211 1217 """Return list of roots of the subsets of missing nodes from remote
1212 1218
1213 1219 If base dict is specified, assume that these nodes and their parents
1214 1220 exist on the remote side and that no child of a node of base exists
1215 1221 in both remote and self.
1216 1222 Furthermore base will be updated to include the nodes that exists
1217 1223 in self and remote but no children exists in self and remote.
1218 1224 If a list of heads is specified, return only nodes which are heads
1219 1225 or ancestors of these heads.
1220 1226
1221 1227 All the ancestors of base are in self and in remote.
1222 1228 All the descendants of the list returned are missing in self.
1223 1229 (and so we know that the rest of the nodes are missing in remote, see
1224 1230 outgoing)
1225 1231 """
1226 1232 m = self.changelog.nodemap
1227 1233 search = []
1228 1234 fetch = {}
1229 1235 seen = {}
1230 1236 seenbranch = {}
1231 1237 if base == None:
1232 1238 base = {}
1233 1239
1234 1240 if not heads:
1235 1241 heads = remote.heads()
1236 1242
1237 1243 if self.changelog.tip() == nullid:
1238 1244 base[nullid] = 1
1239 1245 if heads != [nullid]:
1240 1246 return [nullid]
1241 1247 return []
1242 1248
1243 1249 # assume we're closer to the tip than the root
1244 1250 # and start by examining the heads
1245 1251 self.ui.status(_("searching for changes\n"))
1246 1252
1247 1253 unknown = []
1248 1254 for h in heads:
1249 1255 if h not in m:
1250 1256 unknown.append(h)
1251 1257 else:
1252 1258 base[h] = 1
1253 1259
1254 1260 if not unknown:
1255 1261 return []
1256 1262
1257 1263 req = dict.fromkeys(unknown)
1258 1264 reqcnt = 0
1259 1265
1260 1266 # search through remote branches
1261 1267 # a 'branch' here is a linear segment of history, with four parts:
1262 1268 # head, root, first parent, second parent
1263 1269 # (a branch always has two parents (or none) by definition)
1264 1270 unknown = remote.branches(unknown)
1265 1271 while unknown:
1266 1272 r = []
1267 1273 while unknown:
1268 1274 n = unknown.pop(0)
1269 1275 if n[0] in seen:
1270 1276 continue
1271 1277
1272 1278 self.ui.debug(_("examining %s:%s\n")
1273 1279 % (short(n[0]), short(n[1])))
1274 1280 if n[0] == nullid: # found the end of the branch
1275 1281 pass
1276 1282 elif n in seenbranch:
1277 1283 self.ui.debug(_("branch already found\n"))
1278 1284 continue
1279 1285 elif n[1] and n[1] in m: # do we know the base?
1280 1286 self.ui.debug(_("found incomplete branch %s:%s\n")
1281 1287 % (short(n[0]), short(n[1])))
1282 1288 search.append(n) # schedule branch range for scanning
1283 1289 seenbranch[n] = 1
1284 1290 else:
1285 1291 if n[1] not in seen and n[1] not in fetch:
1286 1292 if n[2] in m and n[3] in m:
1287 1293 self.ui.debug(_("found new changeset %s\n") %
1288 1294 short(n[1]))
1289 1295 fetch[n[1]] = 1 # earliest unknown
1290 1296 for p in n[2:4]:
1291 1297 if p in m:
1292 1298 base[p] = 1 # latest known
1293 1299
1294 1300 for p in n[2:4]:
1295 1301 if p not in req and p not in m:
1296 1302 r.append(p)
1297 1303 req[p] = 1
1298 1304 seen[n[0]] = 1
1299 1305
1300 1306 if r:
1301 1307 reqcnt += 1
1302 1308 self.ui.debug(_("request %d: %s\n") %
1303 1309 (reqcnt, " ".join(map(short, r))))
1304 1310 for p in xrange(0, len(r), 10):
1305 1311 for b in remote.branches(r[p:p+10]):
1306 1312 self.ui.debug(_("received %s:%s\n") %
1307 1313 (short(b[0]), short(b[1])))
1308 1314 unknown.append(b)
1309 1315
1310 1316 # do binary search on the branches we found
1311 1317 while search:
1312 1318 n = search.pop(0)
1313 1319 reqcnt += 1
1314 1320 l = remote.between([(n[0], n[1])])[0]
1315 1321 l.append(n[1])
1316 1322 p = n[0]
1317 1323 f = 1
1318 1324 for i in l:
1319 1325 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1320 1326 if i in m:
1321 1327 if f <= 2:
1322 1328 self.ui.debug(_("found new branch changeset %s\n") %
1323 1329 short(p))
1324 1330 fetch[p] = 1
1325 1331 base[i] = 1
1326 1332 else:
1327 1333 self.ui.debug(_("narrowed branch search to %s:%s\n")
1328 1334 % (short(p), short(i)))
1329 1335 search.append((p, i))
1330 1336 break
1331 1337 p, f = i, f * 2
1332 1338
1333 1339 # sanity check our fetch list
1334 1340 for f in fetch.keys():
1335 1341 if f in m:
1336 1342 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1337 1343
1338 1344 if base.keys() == [nullid]:
1339 1345 if force:
1340 1346 self.ui.warn(_("warning: repository is unrelated\n"))
1341 1347 else:
1342 1348 raise util.Abort(_("repository is unrelated"))
1343 1349
1344 1350 self.ui.debug(_("found new changesets starting at ") +
1345 1351 " ".join([short(f) for f in fetch]) + "\n")
1346 1352
1347 1353 self.ui.debug(_("%d total queries\n") % reqcnt)
1348 1354
1349 1355 return fetch.keys()
1350 1356
1351 1357 def findoutgoing(self, remote, base=None, heads=None, force=False):
1352 1358 """Return list of nodes that are roots of subsets not in remote
1353 1359
1354 1360 If base dict is specified, assume that these nodes and their parents
1355 1361 exist on the remote side.
1356 1362 If a list of heads is specified, return only nodes which are heads
1357 1363 or ancestors of these heads, and return a second element which
1358 1364 contains all remote heads which get new children.
1359 1365 """
1360 1366 if base == None:
1361 1367 base = {}
1362 1368 self.findincoming(remote, base, heads, force=force)
1363 1369
1364 1370 self.ui.debug(_("common changesets up to ")
1365 1371 + " ".join(map(short, base.keys())) + "\n")
1366 1372
1367 1373 remain = dict.fromkeys(self.changelog.nodemap)
1368 1374
1369 1375 # prune everything remote has from the tree
1370 1376 del remain[nullid]
1371 1377 remove = base.keys()
1372 1378 while remove:
1373 1379 n = remove.pop(0)
1374 1380 if n in remain:
1375 1381 del remain[n]
1376 1382 for p in self.changelog.parents(n):
1377 1383 remove.append(p)
1378 1384
1379 1385 # find every node whose parents have been pruned
1380 1386 subset = []
1381 1387 # find every remote head that will get new children
1382 1388 updated_heads = {}
1383 1389 for n in remain:
1384 1390 p1, p2 = self.changelog.parents(n)
1385 1391 if p1 not in remain and p2 not in remain:
1386 1392 subset.append(n)
1387 1393 if heads:
1388 1394 if p1 in heads:
1389 1395 updated_heads[p1] = True
1390 1396 if p2 in heads:
1391 1397 updated_heads[p2] = True
1392 1398
1393 1399 # this is the set of all roots we have to push
1394 1400 if heads:
1395 1401 return subset, updated_heads.keys()
1396 1402 else:
1397 1403 return subset
1398 1404
1399 1405 def pull(self, remote, heads=None, force=False):
1400 1406 lock = self.lock()
1401 1407 try:
1402 1408 fetch = self.findincoming(remote, heads=heads, force=force)
1403 1409 if fetch == [nullid]:
1404 1410 self.ui.status(_("requesting all changes\n"))
1405 1411
1406 1412 if not fetch:
1407 1413 self.ui.status(_("no changes found\n"))
1408 1414 return 0
1409 1415
1410 1416 if heads is None:
1411 1417 cg = remote.changegroup(fetch, 'pull')
1412 1418 else:
1413 1419 if 'changegroupsubset' not in remote.capabilities:
1414 1420 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1415 1421 cg = remote.changegroupsubset(fetch, heads, 'pull')
1416 1422 return self.addchangegroup(cg, 'pull', remote.url())
1417 1423 finally:
1418 1424 del lock
1419 1425
1420 1426 def push(self, remote, force=False, revs=None):
1421 1427 # there are two ways to push to remote repo:
1422 1428 #
1423 1429 # addchangegroup assumes local user can lock remote
1424 1430 # repo (local filesystem, old ssh servers).
1425 1431 #
1426 1432 # unbundle assumes local user cannot lock remote repo (new ssh
1427 1433 # servers, http servers).
1428 1434
1429 1435 if remote.capable('unbundle'):
1430 1436 return self.push_unbundle(remote, force, revs)
1431 1437 return self.push_addchangegroup(remote, force, revs)
1432 1438
1433 1439 def prepush(self, remote, force, revs):
1434 1440 base = {}
1435 1441 remote_heads = remote.heads()
1436 1442 inc = self.findincoming(remote, base, remote_heads, force=force)
1437 1443
1438 1444 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1439 1445 if revs is not None:
1440 1446 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1441 1447 else:
1442 1448 bases, heads = update, self.changelog.heads()
1443 1449
1444 1450 if not bases:
1445 1451 self.ui.status(_("no changes found\n"))
1446 1452 return None, 1
1447 1453 elif not force:
1448 1454 # check if we're creating new remote heads
1449 1455 # to be a remote head after push, node must be either
1450 1456 # - unknown locally
1451 1457 # - a local outgoing head descended from update
1452 1458 # - a remote head that's known locally and not
1453 1459 # ancestral to an outgoing head
1454 1460
1455 1461 warn = 0
1456 1462
1457 1463 if remote_heads == [nullid]:
1458 1464 warn = 0
1459 1465 elif not revs and len(heads) > len(remote_heads):
1460 1466 warn = 1
1461 1467 else:
1462 1468 newheads = list(heads)
1463 1469 for r in remote_heads:
1464 1470 if r in self.changelog.nodemap:
1465 1471 desc = self.changelog.heads(r, heads)
1466 1472 l = [h for h in heads if h in desc]
1467 1473 if not l:
1468 1474 newheads.append(r)
1469 1475 else:
1470 1476 newheads.append(r)
1471 1477 if len(newheads) > len(remote_heads):
1472 1478 warn = 1
1473 1479
1474 1480 if warn:
1475 1481 self.ui.warn(_("abort: push creates new remote heads!\n"))
1476 1482 self.ui.status(_("(did you forget to merge?"
1477 1483 " use push -f to force)\n"))
1478 1484 return None, 0
1479 1485 elif inc:
1480 1486 self.ui.warn(_("note: unsynced remote changes!\n"))
1481 1487
1482 1488
1483 1489 if revs is None:
1484 1490 cg = self.changegroup(update, 'push')
1485 1491 else:
1486 1492 cg = self.changegroupsubset(update, revs, 'push')
1487 1493 return cg, remote_heads
1488 1494
1489 1495 def push_addchangegroup(self, remote, force, revs):
1490 1496 lock = remote.lock()
1491 1497 try:
1492 1498 ret = self.prepush(remote, force, revs)
1493 1499 if ret[0] is not None:
1494 1500 cg, remote_heads = ret
1495 1501 return remote.addchangegroup(cg, 'push', self.url())
1496 1502 return ret[1]
1497 1503 finally:
1498 1504 del lock
1499 1505
1500 1506 def push_unbundle(self, remote, force, revs):
1501 1507 # local repo finds heads on server, finds out what revs it
1502 1508 # must push. once revs transferred, if server finds it has
1503 1509 # different heads (someone else won commit/push race), server
1504 1510 # aborts.
1505 1511
1506 1512 ret = self.prepush(remote, force, revs)
1507 1513 if ret[0] is not None:
1508 1514 cg, remote_heads = ret
1509 1515 if force: remote_heads = ['force']
1510 1516 return remote.unbundle(cg, remote_heads, 'push')
1511 1517 return ret[1]
1512 1518
1513 1519 def changegroupinfo(self, nodes, source):
1514 1520 if self.ui.verbose or source == 'bundle':
1515 1521 self.ui.status(_("%d changesets found\n") % len(nodes))
1516 1522 if self.ui.debugflag:
1517 1523 self.ui.debug(_("List of changesets:\n"))
1518 1524 for node in nodes:
1519 1525 self.ui.debug("%s\n" % hex(node))
1520 1526
1521 1527 def changegroupsubset(self, bases, heads, source, extranodes=None):
1522 1528 """This function generates a changegroup consisting of all the nodes
1523 1529 that are descendents of any of the bases, and ancestors of any of
1524 1530 the heads.
1525 1531
1526 1532 It is fairly complex as determining which filenodes and which
1527 1533 manifest nodes need to be included for the changeset to be complete
1528 1534 is non-trivial.
1529 1535
1530 1536 Another wrinkle is doing the reverse, figuring out which changeset in
1531 1537 the changegroup a particular filenode or manifestnode belongs to.
1532 1538
1533 1539 The caller can specify some nodes that must be included in the
1534 1540 changegroup using the extranodes argument. It should be a dict
1535 1541 where the keys are the filenames (or 1 for the manifest), and the
1536 1542 values are lists of (node, linknode) tuples, where node is a wanted
1537 1543 node and linknode is the changelog node that should be transmitted as
1538 1544 the linkrev.
1539 1545 """
1540 1546
1541 1547 self.hook('preoutgoing', throw=True, source=source)
1542 1548
1543 1549 # Set up some initial variables
1544 1550 # Make it easy to refer to self.changelog
1545 1551 cl = self.changelog
1546 1552 # msng is short for missing - compute the list of changesets in this
1547 1553 # changegroup.
1548 1554 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1549 1555 self.changegroupinfo(msng_cl_lst, source)
1550 1556 # Some bases may turn out to be superfluous, and some heads may be
1551 1557 # too. nodesbetween will return the minimal set of bases and heads
1552 1558 # necessary to re-create the changegroup.
1553 1559
1554 1560 # Known heads are the list of heads that it is assumed the recipient
1555 1561 # of this changegroup will know about.
1556 1562 knownheads = {}
1557 1563 # We assume that all parents of bases are known heads.
1558 1564 for n in bases:
1559 1565 for p in cl.parents(n):
1560 1566 if p != nullid:
1561 1567 knownheads[p] = 1
1562 1568 knownheads = knownheads.keys()
1563 1569 if knownheads:
1564 1570 # Now that we know what heads are known, we can compute which
1565 1571 # changesets are known. The recipient must know about all
1566 1572 # changesets required to reach the known heads from the null
1567 1573 # changeset.
1568 1574 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1569 1575 junk = None
1570 1576 # Transform the list into an ersatz set.
1571 1577 has_cl_set = dict.fromkeys(has_cl_set)
1572 1578 else:
1573 1579 # If there were no known heads, the recipient cannot be assumed to
1574 1580 # know about any changesets.
1575 1581 has_cl_set = {}
1576 1582
1577 1583 # Make it easy to refer to self.manifest
1578 1584 mnfst = self.manifest
1579 1585 # We don't know which manifests are missing yet
1580 1586 msng_mnfst_set = {}
1581 1587 # Nor do we know which filenodes are missing.
1582 1588 msng_filenode_set = {}
1583 1589
1584 1590 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1585 1591 junk = None
1586 1592
1587 1593 # A changeset always belongs to itself, so the changenode lookup
1588 1594 # function for a changenode is identity.
1589 1595 def identity(x):
1590 1596 return x
1591 1597
1592 1598 # A function generating function. Sets up an environment for the
1593 1599 # inner function.
1594 1600 def cmp_by_rev_func(revlog):
1595 1601 # Compare two nodes by their revision number in the environment's
1596 1602 # revision history. Since the revision number both represents the
1597 1603 # most efficient order to read the nodes in, and represents a
1598 1604 # topological sorting of the nodes, this function is often useful.
1599 1605 def cmp_by_rev(a, b):
1600 1606 return cmp(revlog.rev(a), revlog.rev(b))
1601 1607 return cmp_by_rev
1602 1608
1603 1609 # If we determine that a particular file or manifest node must be a
1604 1610 # node that the recipient of the changegroup will already have, we can
1605 1611 # also assume the recipient will have all the parents. This function
1606 1612 # prunes them from the set of missing nodes.
1607 1613 def prune_parents(revlog, hasset, msngset):
1608 1614 haslst = hasset.keys()
1609 1615 haslst.sort(cmp_by_rev_func(revlog))
1610 1616 for node in haslst:
1611 1617 parentlst = [p for p in revlog.parents(node) if p != nullid]
1612 1618 while parentlst:
1613 1619 n = parentlst.pop()
1614 1620 if n not in hasset:
1615 1621 hasset[n] = 1
1616 1622 p = [p for p in revlog.parents(n) if p != nullid]
1617 1623 parentlst.extend(p)
1618 1624 for n in hasset:
1619 1625 msngset.pop(n, None)
1620 1626
1621 1627 # This is a function generating function used to set up an environment
1622 1628 # for the inner function to execute in.
1623 1629 def manifest_and_file_collector(changedfileset):
1624 1630 # This is an information gathering function that gathers
1625 1631 # information from each changeset node that goes out as part of
1626 1632 # the changegroup. The information gathered is a list of which
1627 1633 # manifest nodes are potentially required (the recipient may
1628 1634 # already have them) and total list of all files which were
1629 1635 # changed in any changeset in the changegroup.
1630 1636 #
1631 1637 # We also remember the first changenode we saw any manifest
1632 1638 # referenced by so we can later determine which changenode 'owns'
1633 1639 # the manifest.
1634 1640 def collect_manifests_and_files(clnode):
1635 1641 c = cl.read(clnode)
1636 1642 for f in c[3]:
1637 1643 # This is to make sure we only have one instance of each
1638 1644 # filename string for each filename.
1639 1645 changedfileset.setdefault(f, f)
1640 1646 msng_mnfst_set.setdefault(c[0], clnode)
1641 1647 return collect_manifests_and_files
1642 1648
1643 1649 # Figure out which manifest nodes (of the ones we think might be part
1644 1650 # of the changegroup) the recipient must know about and remove them
1645 1651 # from the changegroup.
1646 1652 def prune_manifests():
1647 1653 has_mnfst_set = {}
1648 1654 for n in msng_mnfst_set:
1649 1655 # If a 'missing' manifest thinks it belongs to a changenode
1650 1656 # the recipient is assumed to have, obviously the recipient
1651 1657 # must have that manifest.
1652 1658 linknode = cl.node(mnfst.linkrev(n))
1653 1659 if linknode in has_cl_set:
1654 1660 has_mnfst_set[n] = 1
1655 1661 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1656 1662
1657 1663 # Use the information collected in collect_manifests_and_files to say
1658 1664 # which changenode any manifestnode belongs to.
1659 1665 def lookup_manifest_link(mnfstnode):
1660 1666 return msng_mnfst_set[mnfstnode]
1661 1667
1662 1668 # A function generating function that sets up the initial environment
1663 1669 # the inner function.
1664 1670 def filenode_collector(changedfiles):
1665 1671 next_rev = [0]
1666 1672 # This gathers information from each manifestnode included in the
1667 1673 # changegroup about which filenodes the manifest node references
1668 1674 # so we can include those in the changegroup too.
1669 1675 #
1670 1676 # It also remembers which changenode each filenode belongs to. It
1671 1677 # does this by assuming the a filenode belongs to the changenode
1672 1678 # the first manifest that references it belongs to.
1673 1679 def collect_msng_filenodes(mnfstnode):
1674 1680 r = mnfst.rev(mnfstnode)
1675 1681 if r == next_rev[0]:
1676 1682 # If the last rev we looked at was the one just previous,
1677 1683 # we only need to see a diff.
1678 1684 deltamf = mnfst.readdelta(mnfstnode)
1679 1685 # For each line in the delta
1680 1686 for f, fnode in deltamf.items():
1681 1687 f = changedfiles.get(f, None)
1682 1688 # And if the file is in the list of files we care
1683 1689 # about.
1684 1690 if f is not None:
1685 1691 # Get the changenode this manifest belongs to
1686 1692 clnode = msng_mnfst_set[mnfstnode]
1687 1693 # Create the set of filenodes for the file if
1688 1694 # there isn't one already.
1689 1695 ndset = msng_filenode_set.setdefault(f, {})
1690 1696 # And set the filenode's changelog node to the
1691 1697 # manifest's if it hasn't been set already.
1692 1698 ndset.setdefault(fnode, clnode)
1693 1699 else:
1694 1700 # Otherwise we need a full manifest.
1695 1701 m = mnfst.read(mnfstnode)
1696 1702 # For every file in we care about.
1697 1703 for f in changedfiles:
1698 1704 fnode = m.get(f, None)
1699 1705 # If it's in the manifest
1700 1706 if fnode is not None:
1701 1707 # See comments above.
1702 1708 clnode = msng_mnfst_set[mnfstnode]
1703 1709 ndset = msng_filenode_set.setdefault(f, {})
1704 1710 ndset.setdefault(fnode, clnode)
1705 1711 # Remember the revision we hope to see next.
1706 1712 next_rev[0] = r + 1
1707 1713 return collect_msng_filenodes
1708 1714
1709 1715 # We have a list of filenodes we think we need for a file, lets remove
1710 1716 # all those we now the recipient must have.
1711 1717 def prune_filenodes(f, filerevlog):
1712 1718 msngset = msng_filenode_set[f]
1713 1719 hasset = {}
1714 1720 # If a 'missing' filenode thinks it belongs to a changenode we
1715 1721 # assume the recipient must have, then the recipient must have
1716 1722 # that filenode.
1717 1723 for n in msngset:
1718 1724 clnode = cl.node(filerevlog.linkrev(n))
1719 1725 if clnode in has_cl_set:
1720 1726 hasset[n] = 1
1721 1727 prune_parents(filerevlog, hasset, msngset)
1722 1728
1723 1729 # A function generator function that sets up the a context for the
1724 1730 # inner function.
1725 1731 def lookup_filenode_link_func(fname):
1726 1732 msngset = msng_filenode_set[fname]
1727 1733 # Lookup the changenode the filenode belongs to.
1728 1734 def lookup_filenode_link(fnode):
1729 1735 return msngset[fnode]
1730 1736 return lookup_filenode_link
1731 1737
1732 1738 # Add the nodes that were explicitly requested.
1733 1739 def add_extra_nodes(name, nodes):
1734 1740 if not extranodes or name not in extranodes:
1735 1741 return
1736 1742
1737 1743 for node, linknode in extranodes[name]:
1738 1744 if node not in nodes:
1739 1745 nodes[node] = linknode
1740 1746
1741 1747 # Now that we have all theses utility functions to help out and
1742 1748 # logically divide up the task, generate the group.
1743 1749 def gengroup():
1744 1750 # The set of changed files starts empty.
1745 1751 changedfiles = {}
1746 1752 # Create a changenode group generator that will call our functions
1747 1753 # back to lookup the owning changenode and collect information.
1748 1754 group = cl.group(msng_cl_lst, identity,
1749 1755 manifest_and_file_collector(changedfiles))
1750 1756 for chnk in group:
1751 1757 yield chnk
1752 1758
1753 1759 # The list of manifests has been collected by the generator
1754 1760 # calling our functions back.
1755 1761 prune_manifests()
1756 1762 add_extra_nodes(1, msng_mnfst_set)
1757 1763 msng_mnfst_lst = msng_mnfst_set.keys()
1758 1764 # Sort the manifestnodes by revision number.
1759 1765 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1760 1766 # Create a generator for the manifestnodes that calls our lookup
1761 1767 # and data collection functions back.
1762 1768 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1763 1769 filenode_collector(changedfiles))
1764 1770 for chnk in group:
1765 1771 yield chnk
1766 1772
1767 1773 # These are no longer needed, dereference and toss the memory for
1768 1774 # them.
1769 1775 msng_mnfst_lst = None
1770 1776 msng_mnfst_set.clear()
1771 1777
1772 1778 if extranodes:
1773 1779 for fname in extranodes:
1774 1780 if isinstance(fname, int):
1775 1781 continue
1776 1782 add_extra_nodes(fname,
1777 1783 msng_filenode_set.setdefault(fname, {}))
1778 1784 changedfiles[fname] = 1
1779 1785 # Go through all our files in order sorted by name.
1780 1786 for fname in util.sort(changedfiles):
1781 1787 filerevlog = self.file(fname)
1782 1788 if not len(filerevlog):
1783 1789 raise util.Abort(_("empty or missing revlog for %s") % fname)
1784 1790 # Toss out the filenodes that the recipient isn't really
1785 1791 # missing.
1786 1792 if fname in msng_filenode_set:
1787 1793 prune_filenodes(fname, filerevlog)
1788 1794 msng_filenode_lst = msng_filenode_set[fname].keys()
1789 1795 else:
1790 1796 msng_filenode_lst = []
1791 1797 # If any filenodes are left, generate the group for them,
1792 1798 # otherwise don't bother.
1793 1799 if len(msng_filenode_lst) > 0:
1794 1800 yield changegroup.chunkheader(len(fname))
1795 1801 yield fname
1796 1802 # Sort the filenodes by their revision #
1797 1803 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1798 1804 # Create a group generator and only pass in a changenode
1799 1805 # lookup function as we need to collect no information
1800 1806 # from filenodes.
1801 1807 group = filerevlog.group(msng_filenode_lst,
1802 1808 lookup_filenode_link_func(fname))
1803 1809 for chnk in group:
1804 1810 yield chnk
1805 1811 if fname in msng_filenode_set:
1806 1812 # Don't need this anymore, toss it to free memory.
1807 1813 del msng_filenode_set[fname]
1808 1814 # Signal that no more groups are left.
1809 1815 yield changegroup.closechunk()
1810 1816
1811 1817 if msng_cl_lst:
1812 1818 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1813 1819
1814 1820 return util.chunkbuffer(gengroup())
1815 1821
1816 1822 def changegroup(self, basenodes, source):
1817 1823 """Generate a changegroup of all nodes that we have that a recipient
1818 1824 doesn't.
1819 1825
1820 1826 This is much easier than the previous function as we can assume that
1821 1827 the recipient has any changenode we aren't sending them."""
1822 1828
1823 1829 self.hook('preoutgoing', throw=True, source=source)
1824 1830
1825 1831 cl = self.changelog
1826 1832 nodes = cl.nodesbetween(basenodes, None)[0]
1827 1833 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1828 1834 self.changegroupinfo(nodes, source)
1829 1835
1830 1836 def identity(x):
1831 1837 return x
1832 1838
1833 1839 def gennodelst(log):
1834 1840 for r in log:
1835 1841 n = log.node(r)
1836 1842 if log.linkrev(n) in revset:
1837 1843 yield n
1838 1844
1839 1845 def changed_file_collector(changedfileset):
1840 1846 def collect_changed_files(clnode):
1841 1847 c = cl.read(clnode)
1842 1848 for fname in c[3]:
1843 1849 changedfileset[fname] = 1
1844 1850 return collect_changed_files
1845 1851
1846 1852 def lookuprevlink_func(revlog):
1847 1853 def lookuprevlink(n):
1848 1854 return cl.node(revlog.linkrev(n))
1849 1855 return lookuprevlink
1850 1856
1851 1857 def gengroup():
1852 1858 # construct a list of all changed files
1853 1859 changedfiles = {}
1854 1860
1855 1861 for chnk in cl.group(nodes, identity,
1856 1862 changed_file_collector(changedfiles)):
1857 1863 yield chnk
1858 1864
1859 1865 mnfst = self.manifest
1860 1866 nodeiter = gennodelst(mnfst)
1861 1867 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1862 1868 yield chnk
1863 1869
1864 1870 for fname in util.sort(changedfiles):
1865 1871 filerevlog = self.file(fname)
1866 1872 if not len(filerevlog):
1867 1873 raise util.Abort(_("empty or missing revlog for %s") % fname)
1868 1874 nodeiter = gennodelst(filerevlog)
1869 1875 nodeiter = list(nodeiter)
1870 1876 if nodeiter:
1871 1877 yield changegroup.chunkheader(len(fname))
1872 1878 yield fname
1873 1879 lookup = lookuprevlink_func(filerevlog)
1874 1880 for chnk in filerevlog.group(nodeiter, lookup):
1875 1881 yield chnk
1876 1882
1877 1883 yield changegroup.closechunk()
1878 1884
1879 1885 if nodes:
1880 1886 self.hook('outgoing', node=hex(nodes[0]), source=source)
1881 1887
1882 1888 return util.chunkbuffer(gengroup())
1883 1889
1884 1890 def addchangegroup(self, source, srctype, url, emptyok=False):
1885 1891 """add changegroup to repo.
1886 1892
1887 1893 return values:
1888 1894 - nothing changed or no source: 0
1889 1895 - more heads than before: 1+added heads (2..n)
1890 1896 - less heads than before: -1-removed heads (-2..-n)
1891 1897 - number of heads stays the same: 1
1892 1898 """
1893 1899 def csmap(x):
1894 1900 self.ui.debug(_("add changeset %s\n") % short(x))
1895 1901 return len(cl)
1896 1902
1897 1903 def revmap(x):
1898 1904 return cl.rev(x)
1899 1905
1900 1906 if not source:
1901 1907 return 0
1902 1908
1903 1909 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1904 1910
1905 1911 changesets = files = revisions = 0
1906 1912
1907 1913 # write changelog data to temp files so concurrent readers will not see
1908 1914 # inconsistent view
1909 1915 cl = self.changelog
1910 1916 cl.delayupdate()
1911 1917 oldheads = len(cl.heads())
1912 1918
1913 1919 tr = self.transaction()
1914 1920 try:
1915 1921 trp = weakref.proxy(tr)
1916 1922 # pull off the changeset group
1917 1923 self.ui.status(_("adding changesets\n"))
1918 1924 cor = len(cl) - 1
1919 1925 chunkiter = changegroup.chunkiter(source)
1920 1926 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1921 1927 raise util.Abort(_("received changelog group is empty"))
1922 1928 cnr = len(cl) - 1
1923 1929 changesets = cnr - cor
1924 1930
1925 1931 # pull off the manifest group
1926 1932 self.ui.status(_("adding manifests\n"))
1927 1933 chunkiter = changegroup.chunkiter(source)
1928 1934 # no need to check for empty manifest group here:
1929 1935 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1930 1936 # no new manifest will be created and the manifest group will
1931 1937 # be empty during the pull
1932 1938 self.manifest.addgroup(chunkiter, revmap, trp)
1933 1939
1934 1940 # process the files
1935 1941 self.ui.status(_("adding file changes\n"))
1936 1942 while 1:
1937 1943 f = changegroup.getchunk(source)
1938 1944 if not f:
1939 1945 break
1940 1946 self.ui.debug(_("adding %s revisions\n") % f)
1941 1947 fl = self.file(f)
1942 1948 o = len(fl)
1943 1949 chunkiter = changegroup.chunkiter(source)
1944 1950 if fl.addgroup(chunkiter, revmap, trp) is None:
1945 1951 raise util.Abort(_("received file revlog group is empty"))
1946 1952 revisions += len(fl) - o
1947 1953 files += 1
1948 1954
1949 1955 # make changelog see real files again
1950 1956 cl.finalize(trp)
1951 1957
1952 1958 newheads = len(self.changelog.heads())
1953 1959 heads = ""
1954 1960 if oldheads and newheads != oldheads:
1955 1961 heads = _(" (%+d heads)") % (newheads - oldheads)
1956 1962
1957 1963 self.ui.status(_("added %d changesets"
1958 1964 " with %d changes to %d files%s\n")
1959 1965 % (changesets, revisions, files, heads))
1960 1966
1961 1967 if changesets > 0:
1962 1968 self.hook('pretxnchangegroup', throw=True,
1963 1969 node=hex(self.changelog.node(cor+1)), source=srctype,
1964 1970 url=url)
1965 1971
1966 1972 tr.close()
1967 1973 finally:
1968 1974 del tr
1969 1975
1970 1976 if changesets > 0:
1971 1977 # forcefully update the on-disk branch cache
1972 1978 self.ui.debug(_("updating the branch cache\n"))
1973 1979 self.branchtags()
1974 1980 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1975 1981 source=srctype, url=url)
1976 1982
1977 1983 for i in xrange(cor + 1, cnr + 1):
1978 1984 self.hook("incoming", node=hex(self.changelog.node(i)),
1979 1985 source=srctype, url=url)
1980 1986
1981 1987 # never return 0 here:
1982 1988 if newheads < oldheads:
1983 1989 return newheads - oldheads - 1
1984 1990 else:
1985 1991 return newheads - oldheads + 1
1986 1992
1987 1993
1988 1994 def stream_in(self, remote):
1989 1995 fp = remote.stream_out()
1990 1996 l = fp.readline()
1991 1997 try:
1992 1998 resp = int(l)
1993 1999 except ValueError:
1994 2000 raise util.UnexpectedOutput(
1995 2001 _('Unexpected response from remote server:'), l)
1996 2002 if resp == 1:
1997 2003 raise util.Abort(_('operation forbidden by server'))
1998 2004 elif resp == 2:
1999 2005 raise util.Abort(_('locking the remote repository failed'))
2000 2006 elif resp != 0:
2001 2007 raise util.Abort(_('the server sent an unknown error code'))
2002 2008 self.ui.status(_('streaming all changes\n'))
2003 2009 l = fp.readline()
2004 2010 try:
2005 2011 total_files, total_bytes = map(int, l.split(' ', 1))
2006 2012 except (ValueError, TypeError):
2007 2013 raise util.UnexpectedOutput(
2008 2014 _('Unexpected response from remote server:'), l)
2009 2015 self.ui.status(_('%d files to transfer, %s of data\n') %
2010 2016 (total_files, util.bytecount(total_bytes)))
2011 2017 start = time.time()
2012 2018 for i in xrange(total_files):
2013 2019 # XXX doesn't support '\n' or '\r' in filenames
2014 2020 l = fp.readline()
2015 2021 try:
2016 2022 name, size = l.split('\0', 1)
2017 2023 size = int(size)
2018 2024 except ValueError, TypeError:
2019 2025 raise util.UnexpectedOutput(
2020 2026 _('Unexpected response from remote server:'), l)
2021 2027 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2022 2028 ofp = self.sopener(name, 'w')
2023 2029 for chunk in util.filechunkiter(fp, limit=size):
2024 2030 ofp.write(chunk)
2025 2031 ofp.close()
2026 2032 elapsed = time.time() - start
2027 2033 if elapsed <= 0:
2028 2034 elapsed = 0.001
2029 2035 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2030 2036 (util.bytecount(total_bytes), elapsed,
2031 2037 util.bytecount(total_bytes / elapsed)))
2032 2038 self.invalidate()
2033 2039 return len(self.heads()) + 1
2034 2040
2035 2041 def clone(self, remote, heads=[], stream=False):
2036 2042 '''clone remote repository.
2037 2043
2038 2044 keyword arguments:
2039 2045 heads: list of revs to clone (forces use of pull)
2040 2046 stream: use streaming clone if possible'''
2041 2047
2042 2048 # now, all clients that can request uncompressed clones can
2043 2049 # read repo formats supported by all servers that can serve
2044 2050 # them.
2045 2051
2046 2052 # if revlog format changes, client will have to check version
2047 2053 # and format flags on "stream" capability, and use
2048 2054 # uncompressed only if compatible.
2049 2055
2050 2056 if stream and not heads and remote.capable('stream'):
2051 2057 return self.stream_in(remote)
2052 2058 return self.pull(remote, heads)
2053 2059
2054 2060 def storefiles(self):
2055 2061 '''get all *.i and *.d files in the store
2056 2062
2057 2063 Returns (list of (filename, size), total_bytes)'''
2058 2064
2059 2065 lock = None
2060 2066 try:
2061 2067 self.ui.debug('scanning\n')
2062 2068 entries = []
2063 2069 total_bytes = 0
2064 2070 # get consistent snapshot of repo, lock during scan
2065 2071 lock = self.lock()
2066 2072 for name, size in self.store.walk():
2067 2073 entries.append((name, size))
2068 2074 total_bytes += size
2069 2075 return entries, total_bytes
2070 2076 finally:
2071 2077 del lock
2072 2078
2073 2079 # used to avoid circular references so destructors work
2074 2080 def aftertrans(files):
2075 2081 renamefiles = [tuple(t) for t in files]
2076 2082 def a():
2077 2083 for src, dest in renamefiles:
2078 2084 util.rename(src, dest)
2079 2085 return a
2080 2086
2081 2087 def instance(ui, path, create):
2082 2088 return localrepository(ui, util.drop_scheme('file', path), create)
2083 2089
2084 2090 def islocal(path):
2085 2091 return True
@@ -1,48 +1,49 b''
1 1 #!/bin/sh
2 2
3 3 hg init a
4 4 cd a
5 5 echo a > a
6 6 hg add -n
7 7 hg st
8 8 hg add
9 9 hg st
10 10
11 11 echo b > b
12 12 hg add -n b
13 13 hg st
14 14 hg add b || echo "failed to add b"
15 15 hg st
16 16 echo % should fail
17 17 hg add b
18 18 hg st
19 19
20 20 hg ci -m 0
21 21 echo % should fail
22 22 hg add a
23 23
24 24 echo aa > a
25 25 hg ci -m 1
26 26 hg up 0
27 27 echo aaa > a
28 28 hg ci -m 2
29 29
30 30 hg merge
31 31 hg st
32 32 echo % should fail
33 33 hg add a
34 34 hg st
35 hg resolve -m a
35 36 hg ci -m merge
36 37
37 38 echo % issue683
38 39 hg rm a
39 40 hg st
40 41 echo a > a
41 42 hg add a
42 43 hg st
43 44
44 45 hg add c && echo "unexpected addition of missing file"
45 46 echo c > c
46 47 hg add d c && echo "unexpected addition of missing file"
47 48 hg st
48 49
@@ -1,135 +1,136 b''
1 1 #!/bin/sh
2 2
3 3 "$TESTDIR/hghave" svn svn-bindings || exit 80
4 4
5 5 fixpath()
6 6 {
7 7 tr '\\' /
8 8 }
9 9
10 10 svnupanddisplay()
11 11 {
12 12 (
13 13 cd $1;
14 14 svn up;
15 15 svn st -v | fixpath
16 16 limit=''
17 17 if [ $2 -gt 0 ]; then
18 18 limit="--limit=$2"
19 19 fi
20 20 svn log --xml -v $limit | fixpath | sed 's,<date>.*,<date/>,'
21 21 )
22 22 }
23 23
24 24 echo "[extensions]" >> $HGRCPATH
25 25 echo "convert = " >> $HGRCPATH
26 26
27 27 hg init a
28 28
29 29 echo a > a/a
30 30 mkdir -p a/d1/d2
31 31 echo b > a/d1/d2/b
32 32 echo % add
33 33 hg --cwd a ci -d '0 0' -A -m 'add a file'
34 34
35 35 "$TESTDIR/svn-safe-append.py" a a/a
36 36 echo % modify
37 37 hg --cwd a ci -d '1 0' -m 'modify a file'
38 38 hg --cwd a tip -q
39 39
40 40 hg convert -d svn a
41 41 svnupanddisplay a-hg-wc 2
42 42 ls a a-hg-wc
43 43 cmp a/a a-hg-wc/a && echo same || echo different
44 44
45 45 hg --cwd a mv a b
46 46 echo % rename
47 47 hg --cwd a ci -d '2 0' -m 'rename a file'
48 48 hg --cwd a tip -q
49 49
50 50 hg convert -d svn a
51 51 svnupanddisplay a-hg-wc 1
52 52 ls a a-hg-wc
53 53
54 54 hg --cwd a cp b c
55 55 echo % copy
56 56 hg --cwd a ci -d '3 0' -m 'copy a file'
57 57 hg --cwd a tip -q
58 58
59 59 hg convert -d svn a
60 60 svnupanddisplay a-hg-wc 1
61 61 ls a a-hg-wc
62 62
63 63 hg --cwd a rm b
64 64 echo % remove
65 65 hg --cwd a ci -d '4 0' -m 'remove a file'
66 66 hg --cwd a tip -q
67 67
68 68 hg convert -d svn a
69 69 svnupanddisplay a-hg-wc 1
70 70 ls a a-hg-wc
71 71
72 72 chmod +x a/c
73 73 echo % executable
74 74 hg --cwd a ci -d '5 0' -m 'make a file executable'
75 75 hg --cwd a tip -q
76 76
77 77 hg convert -d svn a
78 78 svnupanddisplay a-hg-wc 1
79 79 test -x a-hg-wc/c && echo executable || echo not executable
80 80
81 81 echo % executable in new directory
82 82
83 83 rm -rf a a-hg a-hg-wc
84 84 hg init a
85 85
86 86 mkdir a/d1
87 87 echo a > a/d1/a
88 88 chmod +x a/d1/a
89 89 hg --cwd a ci -d '0 0' -A -m 'add executable file in new directory'
90 90
91 91 hg convert -d svn a
92 92 svnupanddisplay a-hg-wc 1
93 93 test -x a-hg-wc/d1/a && echo executable || echo not executable
94 94
95 95 echo % copy to new directory
96 96
97 97 mkdir a/d2
98 98 hg --cwd a cp d1/a d2/a
99 99 hg --cwd a ci -d '1 0' -A -m 'copy file to new directory'
100 100
101 101 hg convert -d svn a
102 102 svnupanddisplay a-hg-wc 1
103 103
104 104 echo % branchy history
105 105
106 106 hg init b
107 107 echo base > b/b
108 108 hg --cwd b ci -d '0 0' -Ambase
109 109
110 110 "$TESTDIR/svn-safe-append.py" left-1 b/b
111 111 echo left-1 > b/left-1
112 112 hg --cwd b ci -d '1 0' -Amleft-1
113 113
114 114 "$TESTDIR/svn-safe-append.py" left-2 b/b
115 115 echo left-2 > b/left-2
116 116 hg --cwd b ci -d '2 0' -Amleft-2
117 117
118 118 hg --cwd b up 0
119 119
120 120 "$TESTDIR/svn-safe-append.py" right-1 b/b
121 121 echo right-1 > b/right-1
122 122 hg --cwd b ci -d '3 0' -Amright-1
123 123
124 124 "$TESTDIR/svn-safe-append.py" right-2 b/b
125 125 echo right-2 > b/right-2
126 126 hg --cwd b ci -d '4 0' -Amright-2
127 127
128 128 hg --cwd b up -C 2
129 129 hg --cwd b merge
130 130 hg --cwd b revert -r 2 b
131 hg resolve -m b
131 132 hg --cwd b ci -d '5 0' -m 'merge'
132 133
133 134 hg convert -d svn b
134 135 echo % expect 4 changes
135 136 svnupanddisplay b-hg-wc 0
@@ -1,66 +1,67 b''
1 1 #!/bin/sh
2 2
3 3 # initial
4 4 hg init test-a
5 5 cd test-a
6 6 cat >test.txt <<"EOF"
7 7 1
8 8 2
9 9 3
10 10 EOF
11 11 hg add test.txt
12 12 hg commit -m "Initial" -d "1000000 0"
13 13
14 14 # clone
15 15 cd ..
16 16 hg clone test-a test-b
17 17
18 18 # change test-a
19 19 cd test-a
20 20 cat >test.txt <<"EOF"
21 21 one
22 22 two
23 23 three
24 24 EOF
25 25 hg commit -m "Numbers as words" -d "1000000 0"
26 26
27 27 # change test-b
28 28 cd ../test-b
29 29 cat >test.txt <<"EOF"
30 30 1
31 31 2.5
32 32 3
33 33 EOF
34 34 hg commit -m "2 -> 2.5" -d "1000000 0"
35 35
36 36 # now pull and merge from test-a
37 37 hg pull ../test-a
38 38 hg merge
39 39 # resolve conflict
40 40 cat >test.txt <<"EOF"
41 41 one
42 42 two-point-five
43 43 three
44 44 EOF
45 45 rm -f *.orig
46 hg resolve -m test.txt
46 47 hg commit -m "Merge 1" -d "1000000 0"
47 48
48 49 # change test-a again
49 50 cd ../test-a
50 51 cat >test.txt <<"EOF"
51 52 one
52 53 two-point-one
53 54 three
54 55 EOF
55 56 hg commit -m "two -> two-point-one" -d "1000000 0"
56 57
57 58 # pull and merge from test-a again
58 59 cd ../test-b
59 60 hg pull ../test-a
60 61 hg merge --debug
61 62
62 63 cat test.txt
63 64
64 65 hg debugindex .hg/store/data/test.txt.i
65 66
66 67 hg log
General Comments 0
You need to be logged in to leave comments. Login now