##// END OF EJS Templates
document and fix findincoming...
Benoit Boissinot -
r2339:11422943 default
parent child Browse files
Show More
@@ -0,0 +1,49 b''
1 #!/bin/sh
2 #
3 # A B
4 #
5 # 3 4 3
6 # |\/| |\
7 # |/\| | \
8 # 1 2 1 2
9 # \ / \ /
10 # 0 0
11 #
12 # if the result of the merge of 1 and 2
13 # is the same in 3 and 4, no new manifest
14 # will be created and the manifest group
15 # will be empty during the pull
16 #
17 # (plus we test a failure where outgoing
18 # wrongly reported the number of csets)
19 #
20
21 hg init a
22 cd a
23 touch init
24 hg ci -A -m 0 -d "1000000 0"
25 touch x y
26 hg ci -A -m 1 -d "1000000 0"
27 hg update 0
28 touch x y
29 hg ci -A -m 2 -d "1000000 0"
30 hg merge 1
31 hg ci -A -m m1 -d "1000000 0"
32 #hg log
33 #hg debugindex .hg/00manifest.i
34 hg update -C 1
35 hg merge 2
36 hg ci -A -m m2 -d "1000000 0"
37 #hg log
38 #hg debugindex .hg/00manifest.i
39
40 cd ..
41 hg clone -r 3 a b
42 hg clone -r 4 a c
43 hg -R a outgoing b
44 hg -R a outgoing c
45 hg -R b outgoing c
46 hg -R c outgoing b
47
48 hg -R b pull a
49 hg -R c pull a
@@ -0,0 +1,72 b''
1 adding init
2 adding x
3 adding y
4 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
5 adding x
6 adding y
7 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
8 (branch merge, don't forget to commit)
9 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
10 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
11 (branch merge, don't forget to commit)
12 requesting all changes
13 adding changesets
14 adding manifests
15 adding file changes
16 added 4 changesets with 3 changes to 3 files
17 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
18 requesting all changes
19 adding changesets
20 adding manifests
21 adding file changes
22 added 4 changesets with 3 changes to 3 files
23 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 searching for changes
25 changeset: 4:fdb3c546e859
26 tag: tip
27 parent: 1:1f703b3fcbc6
28 parent: 2:de997049e034
29 user: test
30 date: Mon Jan 12 13:46:40 1970 +0000
31 summary: m2
32
33 searching for changes
34 changeset: 3:f40f830c0024
35 parent: 2:de997049e034
36 parent: 1:1f703b3fcbc6
37 user: test
38 date: Mon Jan 12 13:46:40 1970 +0000
39 summary: m1
40
41 searching for changes
42 changeset: 3:f40f830c0024
43 tag: tip
44 parent: 2:de997049e034
45 parent: 1:1f703b3fcbc6
46 user: test
47 date: Mon Jan 12 13:46:40 1970 +0000
48 summary: m1
49
50 searching for changes
51 changeset: 3:fdb3c546e859
52 tag: tip
53 parent: 1:1f703b3fcbc6
54 parent: 2:de997049e034
55 user: test
56 date: Mon Jan 12 13:46:40 1970 +0000
57 summary: m2
58
59 pulling from a
60 searching for changes
61 adding changesets
62 adding manifests
63 adding file changes
64 added 1 changesets with 0 changes to 0 files (+1 heads)
65 (run 'hg heads' to see heads, 'hg merge' to merge)
66 pulling from a
67 searching for changes
68 adding changesets
69 adding manifests
70 adding file changes
71 added 1 changesets with 0 changes to 0 files (+1 heads)
72 (run 'hg heads' to see heads, 'hg merge' to merge)
@@ -1,2108 +1,2119 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import os, util
9 9 import filelog, manifest, changelog, dirstate, repo
10 10 from node import *
11 11 from i18n import gettext as _
12 12 from demandload import *
13 13 demandload(globals(), "appendfile changegroup")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "revlog")
16 16
17 17 class localrepository(object):
18 18 def __del__(self):
19 19 self.transhandle = None
20 20 def __init__(self, parentui, path=None, create=0):
21 21 if not path:
22 22 p = os.getcwd()
23 23 while not os.path.isdir(os.path.join(p, ".hg")):
24 24 oldp = p
25 25 p = os.path.dirname(p)
26 26 if p == oldp:
27 27 raise repo.RepoError(_("no repo found"))
28 28 path = p
29 29 self.path = os.path.join(path, ".hg")
30 30
31 31 if not create and not os.path.isdir(self.path):
32 32 raise repo.RepoError(_("repository %s not found") % path)
33 33
34 34 self.root = os.path.abspath(path)
35 35 self.origroot = path
36 36 self.ui = ui.ui(parentui=parentui)
37 37 self.opener = util.opener(self.path)
38 38 self.wopener = util.opener(self.root)
39 39
40 40 try:
41 41 self.ui.readconfig(self.join("hgrc"), self.root)
42 42 except IOError:
43 43 pass
44 44
45 45 v = self.ui.revlogopts
46 46 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
47 47 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
48 48 fl = v.get('flags', None)
49 49 flags = 0
50 50 if fl != None:
51 51 for x in fl.split():
52 52 flags |= revlog.flagstr(x)
53 53 elif self.revlogv1:
54 54 flags = revlog.REVLOG_DEFAULT_FLAGS
55 55
56 56 v = self.revlogversion | flags
57 57 self.manifest = manifest.manifest(self.opener, v)
58 58 self.changelog = changelog.changelog(self.opener, v)
59 59
60 60 # the changelog might not have the inline index flag
61 61 # on. If the format of the changelog is the same as found in
62 62 # .hgrc, apply any flags found in the .hgrc as well.
63 63 # Otherwise, just version from the changelog
64 64 v = self.changelog.version
65 65 if v == self.revlogversion:
66 66 v |= flags
67 67 self.revlogversion = v
68 68
69 69 self.tagscache = None
70 70 self.nodetagscache = None
71 71 self.encodepats = None
72 72 self.decodepats = None
73 73 self.transhandle = None
74 74
75 75 if create:
76 76 os.mkdir(self.path)
77 77 os.mkdir(self.join("data"))
78 78
79 79 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
80 80
81 81 def hook(self, name, throw=False, **args):
82 82 def callhook(hname, funcname):
83 83 '''call python hook. hook is callable object, looked up as
84 84 name in python module. if callable returns "true", hook
85 85 fails, else passes. if hook raises exception, treated as
86 86 hook failure. exception propagates if throw is "true".
87 87
88 88 reason for "true" meaning "hook failed" is so that
89 89 unmodified commands (e.g. mercurial.commands.update) can
90 90 be run as hooks without wrappers to convert return values.'''
91 91
92 92 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
93 93 d = funcname.rfind('.')
94 94 if d == -1:
95 95 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
96 96 % (hname, funcname))
97 97 modname = funcname[:d]
98 98 try:
99 99 obj = __import__(modname)
100 100 except ImportError:
101 101 raise util.Abort(_('%s hook is invalid '
102 102 '(import of "%s" failed)') %
103 103 (hname, modname))
104 104 try:
105 105 for p in funcname.split('.')[1:]:
106 106 obj = getattr(obj, p)
107 107 except AttributeError, err:
108 108 raise util.Abort(_('%s hook is invalid '
109 109 '("%s" is not defined)') %
110 110 (hname, funcname))
111 111 if not callable(obj):
112 112 raise util.Abort(_('%s hook is invalid '
113 113 '("%s" is not callable)') %
114 114 (hname, funcname))
115 115 try:
116 116 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
117 117 except (KeyboardInterrupt, util.SignalInterrupt):
118 118 raise
119 119 except Exception, exc:
120 120 if isinstance(exc, util.Abort):
121 121 self.ui.warn(_('error: %s hook failed: %s\n') %
122 122 (hname, exc.args[0] % exc.args[1:]))
123 123 else:
124 124 self.ui.warn(_('error: %s hook raised an exception: '
125 125 '%s\n') % (hname, exc))
126 126 if throw:
127 127 raise
128 128 self.ui.print_exc()
129 129 return True
130 130 if r:
131 131 if throw:
132 132 raise util.Abort(_('%s hook failed') % hname)
133 133 self.ui.warn(_('warning: %s hook failed\n') % hname)
134 134 return r
135 135
136 136 def runhook(name, cmd):
137 137 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
138 138 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
139 139 r = util.system(cmd, environ=env, cwd=self.root)
140 140 if r:
141 141 desc, r = util.explain_exit(r)
142 142 if throw:
143 143 raise util.Abort(_('%s hook %s') % (name, desc))
144 144 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
145 145 return r
146 146
147 147 r = False
148 148 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
149 149 if hname.split(".", 1)[0] == name and cmd]
150 150 hooks.sort()
151 151 for hname, cmd in hooks:
152 152 if cmd.startswith('python:'):
153 153 r = callhook(hname, cmd[7:].strip()) or r
154 154 else:
155 155 r = runhook(hname, cmd) or r
156 156 return r
157 157
158 158 def tags(self):
159 159 '''return a mapping of tag to node'''
160 160 if not self.tagscache:
161 161 self.tagscache = {}
162 162
163 163 def parsetag(line, context):
164 164 if not line:
165 165 return
166 166 s = l.split(" ", 1)
167 167 if len(s) != 2:
168 168 self.ui.warn(_("%s: cannot parse entry\n") % context)
169 169 return
170 170 node, key = s
171 171 key = key.strip()
172 172 try:
173 173 bin_n = bin(node)
174 174 except TypeError:
175 175 self.ui.warn(_("%s: node '%s' is not well formed\n") %
176 176 (context, node))
177 177 return
178 178 if bin_n not in self.changelog.nodemap:
179 179 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
180 180 (context, key))
181 181 return
182 182 self.tagscache[key] = bin_n
183 183
184 184 # read the tags file from each head, ending with the tip,
185 185 # and add each tag found to the map, with "newer" ones
186 186 # taking precedence
187 187 heads = self.heads()
188 188 heads.reverse()
189 189 fl = self.file(".hgtags")
190 190 for node in heads:
191 191 change = self.changelog.read(node)
192 192 rev = self.changelog.rev(node)
193 193 fn, ff = self.manifest.find(change[0], '.hgtags')
194 194 if fn is None: continue
195 195 count = 0
196 196 for l in fl.read(fn).splitlines():
197 197 count += 1
198 198 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
199 199 (rev, short(node), count))
200 200 try:
201 201 f = self.opener("localtags")
202 202 count = 0
203 203 for l in f:
204 204 count += 1
205 205 parsetag(l, _("localtags, line %d") % count)
206 206 except IOError:
207 207 pass
208 208
209 209 self.tagscache['tip'] = self.changelog.tip()
210 210
211 211 return self.tagscache
212 212
213 213 def tagslist(self):
214 214 '''return a list of tags ordered by revision'''
215 215 l = []
216 216 for t, n in self.tags().items():
217 217 try:
218 218 r = self.changelog.rev(n)
219 219 except:
220 220 r = -2 # sort to the beginning of the list if unknown
221 221 l.append((r, t, n))
222 222 l.sort()
223 223 return [(t, n) for r, t, n in l]
224 224
225 225 def nodetags(self, node):
226 226 '''return the tags associated with a node'''
227 227 if not self.nodetagscache:
228 228 self.nodetagscache = {}
229 229 for t, n in self.tags().items():
230 230 self.nodetagscache.setdefault(n, []).append(t)
231 231 return self.nodetagscache.get(node, [])
232 232
233 233 def lookup(self, key):
234 234 try:
235 235 return self.tags()[key]
236 236 except KeyError:
237 237 try:
238 238 return self.changelog.lookup(key)
239 239 except:
240 240 raise repo.RepoError(_("unknown revision '%s'") % key)
241 241
242 242 def dev(self):
243 243 return os.stat(self.path).st_dev
244 244
245 245 def local(self):
246 246 return True
247 247
248 248 def join(self, f):
249 249 return os.path.join(self.path, f)
250 250
251 251 def wjoin(self, f):
252 252 return os.path.join(self.root, f)
253 253
254 254 def file(self, f):
255 255 if f[0] == '/':
256 256 f = f[1:]
257 257 return filelog.filelog(self.opener, f, self.revlogversion)
258 258
259 259 def getcwd(self):
260 260 return self.dirstate.getcwd()
261 261
262 262 def wfile(self, f, mode='r'):
263 263 return self.wopener(f, mode)
264 264
265 265 def wread(self, filename):
266 266 if self.encodepats == None:
267 267 l = []
268 268 for pat, cmd in self.ui.configitems("encode"):
269 269 mf = util.matcher(self.root, "", [pat], [], [])[1]
270 270 l.append((mf, cmd))
271 271 self.encodepats = l
272 272
273 273 data = self.wopener(filename, 'r').read()
274 274
275 275 for mf, cmd in self.encodepats:
276 276 if mf(filename):
277 277 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
278 278 data = util.filter(data, cmd)
279 279 break
280 280
281 281 return data
282 282
283 283 def wwrite(self, filename, data, fd=None):
284 284 if self.decodepats == None:
285 285 l = []
286 286 for pat, cmd in self.ui.configitems("decode"):
287 287 mf = util.matcher(self.root, "", [pat], [], [])[1]
288 288 l.append((mf, cmd))
289 289 self.decodepats = l
290 290
291 291 for mf, cmd in self.decodepats:
292 292 if mf(filename):
293 293 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
294 294 data = util.filter(data, cmd)
295 295 break
296 296
297 297 if fd:
298 298 return fd.write(data)
299 299 return self.wopener(filename, 'w').write(data)
300 300
301 301 def transaction(self):
302 302 tr = self.transhandle
303 303 if tr != None and tr.running():
304 304 return tr.nest()
305 305
306 306 # save dirstate for undo
307 307 try:
308 308 ds = self.opener("dirstate").read()
309 309 except IOError:
310 310 ds = ""
311 311 self.opener("journal.dirstate", "w").write(ds)
312 312
313 313 tr = transaction.transaction(self.ui.warn, self.opener,
314 314 self.join("journal"),
315 315 aftertrans(self.path))
316 316 self.transhandle = tr
317 317 return tr
318 318
319 319 def recover(self):
320 320 l = self.lock()
321 321 if os.path.exists(self.join("journal")):
322 322 self.ui.status(_("rolling back interrupted transaction\n"))
323 323 transaction.rollback(self.opener, self.join("journal"))
324 324 self.reload()
325 325 return True
326 326 else:
327 327 self.ui.warn(_("no interrupted transaction available\n"))
328 328 return False
329 329
330 330 def undo(self, wlock=None):
331 331 if not wlock:
332 332 wlock = self.wlock()
333 333 l = self.lock()
334 334 if os.path.exists(self.join("undo")):
335 335 self.ui.status(_("rolling back last transaction\n"))
336 336 transaction.rollback(self.opener, self.join("undo"))
337 337 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
338 338 self.reload()
339 339 self.wreload()
340 340 else:
341 341 self.ui.warn(_("no undo information available\n"))
342 342
343 343 def wreload(self):
344 344 self.dirstate.read()
345 345
346 346 def reload(self):
347 347 self.changelog.load()
348 348 self.manifest.load()
349 349 self.tagscache = None
350 350 self.nodetagscache = None
351 351
352 352 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
353 353 desc=None):
354 354 try:
355 355 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
356 356 except lock.LockHeld, inst:
357 357 if not wait:
358 358 raise
359 359 self.ui.warn(_("waiting for lock on %s held by %s\n") %
360 360 (desc, inst.args[0]))
361 361 # default to 600 seconds timeout
362 362 l = lock.lock(self.join(lockname),
363 363 int(self.ui.config("ui", "timeout") or 600),
364 364 releasefn, desc=desc)
365 365 if acquirefn:
366 366 acquirefn()
367 367 return l
368 368
369 369 def lock(self, wait=1):
370 370 return self.do_lock("lock", wait, acquirefn=self.reload,
371 371 desc=_('repository %s') % self.origroot)
372 372
373 373 def wlock(self, wait=1):
374 374 return self.do_lock("wlock", wait, self.dirstate.write,
375 375 self.wreload,
376 376 desc=_('working directory of %s') % self.origroot)
377 377
378 378 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
379 379 "determine whether a new filenode is needed"
380 380 fp1 = manifest1.get(filename, nullid)
381 381 fp2 = manifest2.get(filename, nullid)
382 382
383 383 if fp2 != nullid:
384 384 # is one parent an ancestor of the other?
385 385 fpa = filelog.ancestor(fp1, fp2)
386 386 if fpa == fp1:
387 387 fp1, fp2 = fp2, nullid
388 388 elif fpa == fp2:
389 389 fp2 = nullid
390 390
391 391 # is the file unmodified from the parent? report existing entry
392 392 if fp2 == nullid and text == filelog.read(fp1):
393 393 return (fp1, None, None)
394 394
395 395 return (None, fp1, fp2)
396 396
397 397 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
398 398 orig_parent = self.dirstate.parents()[0] or nullid
399 399 p1 = p1 or self.dirstate.parents()[0] or nullid
400 400 p2 = p2 or self.dirstate.parents()[1] or nullid
401 401 c1 = self.changelog.read(p1)
402 402 c2 = self.changelog.read(p2)
403 403 m1 = self.manifest.read(c1[0])
404 404 mf1 = self.manifest.readflags(c1[0])
405 405 m2 = self.manifest.read(c2[0])
406 406 changed = []
407 407
408 408 if orig_parent == p1:
409 409 update_dirstate = 1
410 410 else:
411 411 update_dirstate = 0
412 412
413 413 if not wlock:
414 414 wlock = self.wlock()
415 415 l = self.lock()
416 416 tr = self.transaction()
417 417 mm = m1.copy()
418 418 mfm = mf1.copy()
419 419 linkrev = self.changelog.count()
420 420 for f in files:
421 421 try:
422 422 t = self.wread(f)
423 423 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
424 424 r = self.file(f)
425 425 mfm[f] = tm
426 426
427 427 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
428 428 if entry:
429 429 mm[f] = entry
430 430 continue
431 431
432 432 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
433 433 changed.append(f)
434 434 if update_dirstate:
435 435 self.dirstate.update([f], "n")
436 436 except IOError:
437 437 try:
438 438 del mm[f]
439 439 del mfm[f]
440 440 if update_dirstate:
441 441 self.dirstate.forget([f])
442 442 except:
443 443 # deleted from p2?
444 444 pass
445 445
446 446 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
447 447 user = user or self.ui.username()
448 448 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
449 449 tr.close()
450 450 if update_dirstate:
451 451 self.dirstate.setparents(n, nullid)
452 452
453 453 def commit(self, files=None, text="", user=None, date=None,
454 454 match=util.always, force=False, lock=None, wlock=None,
455 455 force_editor=False):
456 456 commit = []
457 457 remove = []
458 458 changed = []
459 459
460 460 if files:
461 461 for f in files:
462 462 s = self.dirstate.state(f)
463 463 if s in 'nmai':
464 464 commit.append(f)
465 465 elif s == 'r':
466 466 remove.append(f)
467 467 else:
468 468 self.ui.warn(_("%s not tracked!\n") % f)
469 469 else:
470 470 modified, added, removed, deleted, unknown = self.changes(match=match)
471 471 commit = modified + added
472 472 remove = removed
473 473
474 474 p1, p2 = self.dirstate.parents()
475 475 c1 = self.changelog.read(p1)
476 476 c2 = self.changelog.read(p2)
477 477 m1 = self.manifest.read(c1[0])
478 478 mf1 = self.manifest.readflags(c1[0])
479 479 m2 = self.manifest.read(c2[0])
480 480
481 481 if not commit and not remove and not force and p2 == nullid:
482 482 self.ui.status(_("nothing changed\n"))
483 483 return None
484 484
485 485 xp1 = hex(p1)
486 486 if p2 == nullid: xp2 = ''
487 487 else: xp2 = hex(p2)
488 488
489 489 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
490 490
491 491 if not wlock:
492 492 wlock = self.wlock()
493 493 if not lock:
494 494 lock = self.lock()
495 495 tr = self.transaction()
496 496
497 497 # check in files
498 498 new = {}
499 499 linkrev = self.changelog.count()
500 500 commit.sort()
501 501 for f in commit:
502 502 self.ui.note(f + "\n")
503 503 try:
504 504 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
505 505 t = self.wread(f)
506 506 except IOError:
507 507 self.ui.warn(_("trouble committing %s!\n") % f)
508 508 raise
509 509
510 510 r = self.file(f)
511 511
512 512 meta = {}
513 513 cp = self.dirstate.copied(f)
514 514 if cp:
515 515 meta["copy"] = cp
516 516 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
517 517 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
518 518 fp1, fp2 = nullid, nullid
519 519 else:
520 520 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
521 521 if entry:
522 522 new[f] = entry
523 523 continue
524 524
525 525 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
526 526 # remember what we've added so that we can later calculate
527 527 # the files to pull from a set of changesets
528 528 changed.append(f)
529 529
530 530 # update manifest
531 531 m1 = m1.copy()
532 532 m1.update(new)
533 533 for f in remove:
534 534 if f in m1:
535 535 del m1[f]
536 536 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
537 537 (new, remove))
538 538
539 539 # add changeset
540 540 new = new.keys()
541 541 new.sort()
542 542
543 543 user = user or self.ui.username()
544 544 if not text or force_editor:
545 545 edittext = []
546 546 if text:
547 547 edittext.append(text)
548 548 edittext.append("")
549 549 if p2 != nullid:
550 550 edittext.append("HG: branch merge")
551 551 edittext.extend(["HG: changed %s" % f for f in changed])
552 552 edittext.extend(["HG: removed %s" % f for f in remove])
553 553 if not changed and not remove:
554 554 edittext.append("HG: no files changed")
555 555 edittext.append("")
556 556 # run editor in the repository root
557 557 olddir = os.getcwd()
558 558 os.chdir(self.root)
559 559 text = self.ui.edit("\n".join(edittext), user)
560 560 os.chdir(olddir)
561 561
562 562 lines = [line.rstrip() for line in text.rstrip().splitlines()]
563 563 while lines and not lines[0]:
564 564 del lines[0]
565 565 if not lines:
566 566 return None
567 567 text = '\n'.join(lines)
568 568 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
569 569 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
570 570 parent2=xp2)
571 571 tr.close()
572 572
573 573 self.dirstate.setparents(n)
574 574 self.dirstate.update(new, "n")
575 575 self.dirstate.forget(remove)
576 576
577 577 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
578 578 return n
579 579
580 580 def walk(self, node=None, files=[], match=util.always, badmatch=None):
581 581 if node:
582 582 fdict = dict.fromkeys(files)
583 583 for fn in self.manifest.read(self.changelog.read(node)[0]):
584 584 fdict.pop(fn, None)
585 585 if match(fn):
586 586 yield 'm', fn
587 587 for fn in fdict:
588 588 if badmatch and badmatch(fn):
589 589 if match(fn):
590 590 yield 'b', fn
591 591 else:
592 592 self.ui.warn(_('%s: No such file in rev %s\n') % (
593 593 util.pathto(self.getcwd(), fn), short(node)))
594 594 else:
595 595 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
596 596 yield src, fn
597 597
598 598 def changes(self, node1=None, node2=None, files=[], match=util.always,
599 599 wlock=None, show_ignored=None):
600 600 """return changes between two nodes or node and working directory
601 601
602 602 If node1 is None, use the first dirstate parent instead.
603 603 If node2 is None, compare node1 with working directory.
604 604 """
605 605
606 606 def fcmp(fn, mf):
607 607 t1 = self.wread(fn)
608 608 t2 = self.file(fn).read(mf.get(fn, nullid))
609 609 return cmp(t1, t2)
610 610
611 611 def mfmatches(node):
612 612 change = self.changelog.read(node)
613 613 mf = dict(self.manifest.read(change[0]))
614 614 for fn in mf.keys():
615 615 if not match(fn):
616 616 del mf[fn]
617 617 return mf
618 618
619 619 if node1:
620 620 # read the manifest from node1 before the manifest from node2,
621 621 # so that we'll hit the manifest cache if we're going through
622 622 # all the revisions in parent->child order.
623 623 mf1 = mfmatches(node1)
624 624
625 625 # are we comparing the working directory?
626 626 if not node2:
627 627 if not wlock:
628 628 try:
629 629 wlock = self.wlock(wait=0)
630 630 except lock.LockException:
631 631 wlock = None
632 632 lookup, modified, added, removed, deleted, unknown, ignored = (
633 633 self.dirstate.changes(files, match, show_ignored))
634 634
635 635 # are we comparing working dir against its parent?
636 636 if not node1:
637 637 if lookup:
638 638 # do a full compare of any files that might have changed
639 639 mf2 = mfmatches(self.dirstate.parents()[0])
640 640 for f in lookup:
641 641 if fcmp(f, mf2):
642 642 modified.append(f)
643 643 elif wlock is not None:
644 644 self.dirstate.update([f], "n")
645 645 else:
646 646 # we are comparing working dir against non-parent
647 647 # generate a pseudo-manifest for the working dir
648 648 mf2 = mfmatches(self.dirstate.parents()[0])
649 649 for f in lookup + modified + added:
650 650 mf2[f] = ""
651 651 for f in removed:
652 652 if f in mf2:
653 653 del mf2[f]
654 654 else:
655 655 # we are comparing two revisions
656 656 deleted, unknown, ignored = [], [], []
657 657 mf2 = mfmatches(node2)
658 658
659 659 if node1:
660 660 # flush lists from dirstate before comparing manifests
661 661 modified, added = [], []
662 662
663 663 for fn in mf2:
664 664 if mf1.has_key(fn):
665 665 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
666 666 modified.append(fn)
667 667 del mf1[fn]
668 668 else:
669 669 added.append(fn)
670 670
671 671 removed = mf1.keys()
672 672
673 673 # sort and return results:
674 674 for l in modified, added, removed, deleted, unknown, ignored:
675 675 l.sort()
676 676 if show_ignored is None:
677 677 return (modified, added, removed, deleted, unknown)
678 678 else:
679 679 return (modified, added, removed, deleted, unknown, ignored)
680 680
681 681 def add(self, list, wlock=None):
682 682 if not wlock:
683 683 wlock = self.wlock()
684 684 for f in list:
685 685 p = self.wjoin(f)
686 686 if not os.path.exists(p):
687 687 self.ui.warn(_("%s does not exist!\n") % f)
688 688 elif not os.path.isfile(p):
689 689 self.ui.warn(_("%s not added: only files supported currently\n")
690 690 % f)
691 691 elif self.dirstate.state(f) in 'an':
692 692 self.ui.warn(_("%s already tracked!\n") % f)
693 693 else:
694 694 self.dirstate.update([f], "a")
695 695
696 696 def forget(self, list, wlock=None):
697 697 if not wlock:
698 698 wlock = self.wlock()
699 699 for f in list:
700 700 if self.dirstate.state(f) not in 'ai':
701 701 self.ui.warn(_("%s not added!\n") % f)
702 702 else:
703 703 self.dirstate.forget([f])
704 704
705 705 def remove(self, list, unlink=False, wlock=None):
706 706 if unlink:
707 707 for f in list:
708 708 try:
709 709 util.unlink(self.wjoin(f))
710 710 except OSError, inst:
711 711 if inst.errno != errno.ENOENT:
712 712 raise
713 713 if not wlock:
714 714 wlock = self.wlock()
715 715 for f in list:
716 716 p = self.wjoin(f)
717 717 if os.path.exists(p):
718 718 self.ui.warn(_("%s still exists!\n") % f)
719 719 elif self.dirstate.state(f) == 'a':
720 720 self.dirstate.forget([f])
721 721 elif f not in self.dirstate:
722 722 self.ui.warn(_("%s not tracked!\n") % f)
723 723 else:
724 724 self.dirstate.update([f], "r")
725 725
726 726 def undelete(self, list, wlock=None):
727 727 p = self.dirstate.parents()[0]
728 728 mn = self.changelog.read(p)[0]
729 729 mf = self.manifest.readflags(mn)
730 730 m = self.manifest.read(mn)
731 731 if not wlock:
732 732 wlock = self.wlock()
733 733 for f in list:
734 734 if self.dirstate.state(f) not in "r":
735 735 self.ui.warn("%s not removed!\n" % f)
736 736 else:
737 737 t = self.file(f).read(m[f])
738 738 self.wwrite(f, t)
739 739 util.set_exec(self.wjoin(f), mf[f])
740 740 self.dirstate.update([f], "n")
741 741
742 742 def copy(self, source, dest, wlock=None):
743 743 p = self.wjoin(dest)
744 744 if not os.path.exists(p):
745 745 self.ui.warn(_("%s does not exist!\n") % dest)
746 746 elif not os.path.isfile(p):
747 747 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
748 748 else:
749 749 if not wlock:
750 750 wlock = self.wlock()
751 751 if self.dirstate.state(dest) == '?':
752 752 self.dirstate.update([dest], "a")
753 753 self.dirstate.copy(source, dest)
754 754
755 755 def heads(self, start=None):
756 756 heads = self.changelog.heads(start)
757 757 # sort the output in rev descending order
758 758 heads = [(-self.changelog.rev(h), h) for h in heads]
759 759 heads.sort()
760 760 return [n for (r, n) in heads]
761 761
762 762 # branchlookup returns a dict giving a list of branches for
763 763 # each head. A branch is defined as the tag of a node or
764 764 # the branch of the node's parents. If a node has multiple
765 765 # branch tags, tags are eliminated if they are visible from other
766 766 # branch tags.
767 767 #
768 768 # So, for this graph: a->b->c->d->e
769 769 # \ /
770 770 # aa -----/
771 771 # a has tag 2.6.12
772 772 # d has tag 2.6.13
773 773 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
774 774 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
775 775 # from the list.
776 776 #
777 777 # It is possible that more than one head will have the same branch tag.
778 778 # callers need to check the result for multiple heads under the same
779 779 # branch tag if that is a problem for them (ie checkout of a specific
780 780 # branch).
781 781 #
782 782 # passing in a specific branch will limit the depth of the search
783 783 # through the parents. It won't limit the branches returned in the
784 784 # result though.
785 785 def branchlookup(self, heads=None, branch=None):
786 786 if not heads:
787 787 heads = self.heads()
788 788 headt = [ h for h in heads ]
789 789 chlog = self.changelog
790 790 branches = {}
791 791 merges = []
792 792 seenmerge = {}
793 793
794 794 # traverse the tree once for each head, recording in the branches
795 795 # dict which tags are visible from this head. The branches
796 796 # dict also records which tags are visible from each tag
797 797 # while we traverse.
798 798 while headt or merges:
799 799 if merges:
800 800 n, found = merges.pop()
801 801 visit = [n]
802 802 else:
803 803 h = headt.pop()
804 804 visit = [h]
805 805 found = [h]
806 806 seen = {}
807 807 while visit:
808 808 n = visit.pop()
809 809 if n in seen:
810 810 continue
811 811 pp = chlog.parents(n)
812 812 tags = self.nodetags(n)
813 813 if tags:
814 814 for x in tags:
815 815 if x == 'tip':
816 816 continue
817 817 for f in found:
818 818 branches.setdefault(f, {})[n] = 1
819 819 branches.setdefault(n, {})[n] = 1
820 820 break
821 821 if n not in found:
822 822 found.append(n)
823 823 if branch in tags:
824 824 continue
825 825 seen[n] = 1
826 826 if pp[1] != nullid and n not in seenmerge:
827 827 merges.append((pp[1], [x for x in found]))
828 828 seenmerge[n] = 1
829 829 if pp[0] != nullid:
830 830 visit.append(pp[0])
831 831 # traverse the branches dict, eliminating branch tags from each
832 832 # head that are visible from another branch tag for that head.
833 833 out = {}
834 834 viscache = {}
835 835 for h in heads:
836 836 def visible(node):
837 837 if node in viscache:
838 838 return viscache[node]
839 839 ret = {}
840 840 visit = [node]
841 841 while visit:
842 842 x = visit.pop()
843 843 if x in viscache:
844 844 ret.update(viscache[x])
845 845 elif x not in ret:
846 846 ret[x] = 1
847 847 if x in branches:
848 848 visit[len(visit):] = branches[x].keys()
849 849 viscache[node] = ret
850 850 return ret
851 851 if h not in branches:
852 852 continue
853 853 # O(n^2), but somewhat limited. This only searches the
854 854 # tags visible from a specific head, not all the tags in the
855 855 # whole repo.
856 856 for b in branches[h]:
857 857 vis = False
858 858 for bb in branches[h].keys():
859 859 if b != bb:
860 860 if b in visible(bb):
861 861 vis = True
862 862 break
863 863 if not vis:
864 864 l = out.setdefault(h, [])
865 865 l[len(l):] = self.nodetags(b)
866 866 return out
867 867
868 868 def branches(self, nodes):
869 869 if not nodes:
870 870 nodes = [self.changelog.tip()]
871 871 b = []
872 872 for n in nodes:
873 873 t = n
874 874 while n:
875 875 p = self.changelog.parents(n)
876 876 if p[1] != nullid or p[0] == nullid:
877 877 b.append((t, n, p[0], p[1]))
878 878 break
879 879 n = p[0]
880 880 return b
881 881
882 882 def between(self, pairs):
883 883 r = []
884 884
885 885 for top, bottom in pairs:
886 886 n, l, i = top, [], 0
887 887 f = 1
888 888
889 889 while n != bottom:
890 890 p = self.changelog.parents(n)[0]
891 891 if i == f:
892 892 l.append(n)
893 893 f = f * 2
894 894 n = p
895 895 i += 1
896 896
897 897 r.append(l)
898 898
899 899 return r
900 900
901 901 def findincoming(self, remote, base=None, heads=None, force=False):
902 """Return list of roots of the subsets of missing nodes from remote
903
904 If base dict is specified, assume that these nodes and their parents
905 exist on the remote side and that no child of a node of base exists
906 in both remote and self.
907 Furthermore base will be updated to include the nodes that exists
908 in self and remote but no children exists in self and remote.
909 If a list of heads is specified, return only nodes which are heads
910 or ancestors of these heads.
911
912 All the ancestors of base are in self and in remote.
913 All the descendants of the list returned are missing in self.
914 (and so we know that the rest of the nodes are missing in remote, see
915 outgoing)
916 """
902 917 m = self.changelog.nodemap
903 918 search = []
904 919 fetch = {}
905 920 seen = {}
906 921 seenbranch = {}
907 922 if base == None:
908 923 base = {}
909 924
910 925 if not heads:
911 926 heads = remote.heads()
912 927
913 928 if self.changelog.tip() == nullid:
929 base[nullid] = 1
914 930 if heads != [nullid]:
915 931 return [nullid]
916 932 return []
917 933
918 934 # assume we're closer to the tip than the root
919 935 # and start by examining the heads
920 936 self.ui.status(_("searching for changes\n"))
921 937
922 938 unknown = []
923 939 for h in heads:
924 940 if h not in m:
925 941 unknown.append(h)
926 942 else:
927 943 base[h] = 1
928 944
929 945 if not unknown:
930 946 return []
931 947
932 rep = {}
948 req = dict.fromkeys(unknown)
933 949 reqcnt = 0
934 950
935 951 # search through remote branches
936 952 # a 'branch' here is a linear segment of history, with four parts:
937 953 # head, root, first parent, second parent
938 954 # (a branch always has two parents (or none) by definition)
939 955 unknown = remote.branches(unknown)
940 956 while unknown:
941 957 r = []
942 958 while unknown:
943 959 n = unknown.pop(0)
944 960 if n[0] in seen:
945 961 continue
946 962
947 963 self.ui.debug(_("examining %s:%s\n")
948 964 % (short(n[0]), short(n[1])))
949 if n[0] == nullid:
950 break
951 if n in seenbranch:
965 if n[0] == nullid: # found the end of the branch
966 pass
967 elif n in seenbranch:
952 968 self.ui.debug(_("branch already found\n"))
953 969 continue
954 if n[1] and n[1] in m: # do we know the base?
970 elif n[1] and n[1] in m: # do we know the base?
955 971 self.ui.debug(_("found incomplete branch %s:%s\n")
956 972 % (short(n[0]), short(n[1])))
957 973 search.append(n) # schedule branch range for scanning
958 974 seenbranch[n] = 1
959 975 else:
960 976 if n[1] not in seen and n[1] not in fetch:
961 977 if n[2] in m and n[3] in m:
962 978 self.ui.debug(_("found new changeset %s\n") %
963 979 short(n[1]))
964 980 fetch[n[1]] = 1 # earliest unknown
965 base[n[2]] = 1 # latest known
966 continue
981 for p in n[2:4]:
982 if p in m:
983 base[p] = 1 # latest known
967 984
968 for a in n[2:4]:
969 if a not in rep:
970 r.append(a)
971 rep[a] = 1
972
985 for p in n[2:4]:
986 if p not in req and p not in m:
987 r.append(p)
988 req[p] = 1
973 989 seen[n[0]] = 1
974 990
975 991 if r:
976 992 reqcnt += 1
977 993 self.ui.debug(_("request %d: %s\n") %
978 994 (reqcnt, " ".join(map(short, r))))
979 995 for p in range(0, len(r), 10):
980 996 for b in remote.branches(r[p:p+10]):
981 997 self.ui.debug(_("received %s:%s\n") %
982 998 (short(b[0]), short(b[1])))
983 if b[0] in m:
984 self.ui.debug(_("found base node %s\n")
985 % short(b[0]))
986 base[b[0]] = 1
987 elif b[0] not in seen:
988 999 unknown.append(b)
989 1000
990 1001 # do binary search on the branches we found
991 1002 while search:
992 1003 n = search.pop(0)
993 1004 reqcnt += 1
994 1005 l = remote.between([(n[0], n[1])])[0]
995 1006 l.append(n[1])
996 1007 p = n[0]
997 1008 f = 1
998 1009 for i in l:
999 1010 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1000 1011 if i in m:
1001 1012 if f <= 2:
1002 1013 self.ui.debug(_("found new branch changeset %s\n") %
1003 1014 short(p))
1004 1015 fetch[p] = 1
1005 1016 base[i] = 1
1006 1017 else:
1007 1018 self.ui.debug(_("narrowed branch search to %s:%s\n")
1008 1019 % (short(p), short(i)))
1009 1020 search.append((p, i))
1010 1021 break
1011 1022 p, f = i, f * 2
1012 1023
1013 1024 # sanity check our fetch list
1014 1025 for f in fetch.keys():
1015 1026 if f in m:
1016 1027 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1017 1028
1018 1029 if base.keys() == [nullid]:
1019 1030 if force:
1020 1031 self.ui.warn(_("warning: repository is unrelated\n"))
1021 1032 else:
1022 1033 raise util.Abort(_("repository is unrelated"))
1023 1034
1024 1035 self.ui.note(_("found new changesets starting at ") +
1025 1036 " ".join([short(f) for f in fetch]) + "\n")
1026 1037
1027 1038 self.ui.debug(_("%d total queries\n") % reqcnt)
1028 1039
1029 1040 return fetch.keys()
1030 1041
1031 1042 def findoutgoing(self, remote, base=None, heads=None, force=False):
1032 1043 """Return list of nodes that are roots of subsets not in remote
1033 1044
1034 1045 If base dict is specified, assume that these nodes and their parents
1035 1046 exist on the remote side.
1036 1047 If a list of heads is specified, return only nodes which are heads
1037 1048 or ancestors of these heads, and return a second element which
1038 1049 contains all remote heads which get new children.
1039 1050 """
1040 1051 if base == None:
1041 1052 base = {}
1042 1053 self.findincoming(remote, base, heads, force=force)
1043 1054
1044 1055 self.ui.debug(_("common changesets up to ")
1045 1056 + " ".join(map(short, base.keys())) + "\n")
1046 1057
1047 1058 remain = dict.fromkeys(self.changelog.nodemap)
1048 1059
1049 1060 # prune everything remote has from the tree
1050 1061 del remain[nullid]
1051 1062 remove = base.keys()
1052 1063 while remove:
1053 1064 n = remove.pop(0)
1054 1065 if n in remain:
1055 1066 del remain[n]
1056 1067 for p in self.changelog.parents(n):
1057 1068 remove.append(p)
1058 1069
1059 1070 # find every node whose parents have been pruned
1060 1071 subset = []
1061 1072 # find every remote head that will get new children
1062 1073 updated_heads = {}
1063 1074 for n in remain:
1064 1075 p1, p2 = self.changelog.parents(n)
1065 1076 if p1 not in remain and p2 not in remain:
1066 1077 subset.append(n)
1067 1078 if heads:
1068 1079 if p1 in heads:
1069 1080 updated_heads[p1] = True
1070 1081 if p2 in heads:
1071 1082 updated_heads[p2] = True
1072 1083
1073 1084 # this is the set of all roots we have to push
1074 1085 if heads:
1075 1086 return subset, updated_heads.keys()
1076 1087 else:
1077 1088 return subset
1078 1089
1079 1090 def pull(self, remote, heads=None, force=False):
1080 1091 l = self.lock()
1081 1092
1082 1093 fetch = self.findincoming(remote, force=force)
1083 1094 if fetch == [nullid]:
1084 1095 self.ui.status(_("requesting all changes\n"))
1085 1096
1086 1097 if not fetch:
1087 1098 self.ui.status(_("no changes found\n"))
1088 1099 return 0
1089 1100
1090 1101 if heads is None:
1091 1102 cg = remote.changegroup(fetch, 'pull')
1092 1103 else:
1093 1104 cg = remote.changegroupsubset(fetch, heads, 'pull')
1094 1105 return self.addchangegroup(cg, 'pull')
1095 1106
1096 1107 def push(self, remote, force=False, revs=None):
1097 1108 lock = remote.lock()
1098 1109
1099 1110 base = {}
1100 1111 remote_heads = remote.heads()
1101 1112 inc = self.findincoming(remote, base, remote_heads, force=force)
1102 1113 if not force and inc:
1103 1114 self.ui.warn(_("abort: unsynced remote changes!\n"))
1104 1115 self.ui.status(_("(did you forget to sync?"
1105 1116 " use push -f to force)\n"))
1106 1117 return 1
1107 1118
1108 1119 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1109 1120 if revs is not None:
1110 1121 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1111 1122 else:
1112 1123 bases, heads = update, self.changelog.heads()
1113 1124
1114 1125 if not bases:
1115 1126 self.ui.status(_("no changes found\n"))
1116 1127 return 1
1117 1128 elif not force:
1118 1129 # FIXME we don't properly detect creation of new heads
1119 1130 # in the push -r case, assume the user knows what he's doing
1120 1131 if not revs and len(remote_heads) < len(heads) \
1121 1132 and remote_heads != [nullid]:
1122 1133 self.ui.warn(_("abort: push creates new remote branches!\n"))
1123 1134 self.ui.status(_("(did you forget to merge?"
1124 1135 " use push -f to force)\n"))
1125 1136 return 1
1126 1137
1127 1138 if revs is None:
1128 1139 cg = self.changegroup(update, 'push')
1129 1140 else:
1130 1141 cg = self.changegroupsubset(update, revs, 'push')
1131 1142 return remote.addchangegroup(cg, 'push')
1132 1143
1133 1144 def changegroupsubset(self, bases, heads, source):
1134 1145 """This function generates a changegroup consisting of all the nodes
1135 1146 that are descendents of any of the bases, and ancestors of any of
1136 1147 the heads.
1137 1148
1138 1149 It is fairly complex as determining which filenodes and which
1139 1150 manifest nodes need to be included for the changeset to be complete
1140 1151 is non-trivial.
1141 1152
1142 1153 Another wrinkle is doing the reverse, figuring out which changeset in
1143 1154 the changegroup a particular filenode or manifestnode belongs to."""
1144 1155
1145 1156 self.hook('preoutgoing', throw=True, source=source)
1146 1157
1147 1158 # Set up some initial variables
1148 1159 # Make it easy to refer to self.changelog
1149 1160 cl = self.changelog
1150 1161 # msng is short for missing - compute the list of changesets in this
1151 1162 # changegroup.
1152 1163 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1153 1164 # Some bases may turn out to be superfluous, and some heads may be
1154 1165 # too. nodesbetween will return the minimal set of bases and heads
1155 1166 # necessary to re-create the changegroup.
1156 1167
1157 1168 # Known heads are the list of heads that it is assumed the recipient
1158 1169 # of this changegroup will know about.
1159 1170 knownheads = {}
1160 1171 # We assume that all parents of bases are known heads.
1161 1172 for n in bases:
1162 1173 for p in cl.parents(n):
1163 1174 if p != nullid:
1164 1175 knownheads[p] = 1
1165 1176 knownheads = knownheads.keys()
1166 1177 if knownheads:
1167 1178 # Now that we know what heads are known, we can compute which
1168 1179 # changesets are known. The recipient must know about all
1169 1180 # changesets required to reach the known heads from the null
1170 1181 # changeset.
1171 1182 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1172 1183 junk = None
1173 1184 # Transform the list into an ersatz set.
1174 1185 has_cl_set = dict.fromkeys(has_cl_set)
1175 1186 else:
1176 1187 # If there were no known heads, the recipient cannot be assumed to
1177 1188 # know about any changesets.
1178 1189 has_cl_set = {}
1179 1190
1180 1191 # Make it easy to refer to self.manifest
1181 1192 mnfst = self.manifest
1182 1193 # We don't know which manifests are missing yet
1183 1194 msng_mnfst_set = {}
1184 1195 # Nor do we know which filenodes are missing.
1185 1196 msng_filenode_set = {}
1186 1197
1187 1198 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1188 1199 junk = None
1189 1200
1190 1201 # A changeset always belongs to itself, so the changenode lookup
1191 1202 # function for a changenode is identity.
1192 1203 def identity(x):
1193 1204 return x
1194 1205
1195 1206 # A function generating function. Sets up an environment for the
1196 1207 # inner function.
1197 1208 def cmp_by_rev_func(revlog):
1198 1209 # Compare two nodes by their revision number in the environment's
1199 1210 # revision history. Since the revision number both represents the
1200 1211 # most efficient order to read the nodes in, and represents a
1201 1212 # topological sorting of the nodes, this function is often useful.
1202 1213 def cmp_by_rev(a, b):
1203 1214 return cmp(revlog.rev(a), revlog.rev(b))
1204 1215 return cmp_by_rev
1205 1216
1206 1217 # If we determine that a particular file or manifest node must be a
1207 1218 # node that the recipient of the changegroup will already have, we can
1208 1219 # also assume the recipient will have all the parents. This function
1209 1220 # prunes them from the set of missing nodes.
1210 1221 def prune_parents(revlog, hasset, msngset):
1211 1222 haslst = hasset.keys()
1212 1223 haslst.sort(cmp_by_rev_func(revlog))
1213 1224 for node in haslst:
1214 1225 parentlst = [p for p in revlog.parents(node) if p != nullid]
1215 1226 while parentlst:
1216 1227 n = parentlst.pop()
1217 1228 if n not in hasset:
1218 1229 hasset[n] = 1
1219 1230 p = [p for p in revlog.parents(n) if p != nullid]
1220 1231 parentlst.extend(p)
1221 1232 for n in hasset:
1222 1233 msngset.pop(n, None)
1223 1234
1224 1235 # This is a function generating function used to set up an environment
1225 1236 # for the inner function to execute in.
1226 1237 def manifest_and_file_collector(changedfileset):
1227 1238 # This is an information gathering function that gathers
1228 1239 # information from each changeset node that goes out as part of
1229 1240 # the changegroup. The information gathered is a list of which
1230 1241 # manifest nodes are potentially required (the recipient may
1231 1242 # already have them) and total list of all files which were
1232 1243 # changed in any changeset in the changegroup.
1233 1244 #
1234 1245 # We also remember the first changenode we saw any manifest
1235 1246 # referenced by so we can later determine which changenode 'owns'
1236 1247 # the manifest.
1237 1248 def collect_manifests_and_files(clnode):
1238 1249 c = cl.read(clnode)
1239 1250 for f in c[3]:
1240 1251 # This is to make sure we only have one instance of each
1241 1252 # filename string for each filename.
1242 1253 changedfileset.setdefault(f, f)
1243 1254 msng_mnfst_set.setdefault(c[0], clnode)
1244 1255 return collect_manifests_and_files
1245 1256
1246 1257 # Figure out which manifest nodes (of the ones we think might be part
1247 1258 # of the changegroup) the recipient must know about and remove them
1248 1259 # from the changegroup.
1249 1260 def prune_manifests():
1250 1261 has_mnfst_set = {}
1251 1262 for n in msng_mnfst_set:
1252 1263 # If a 'missing' manifest thinks it belongs to a changenode
1253 1264 # the recipient is assumed to have, obviously the recipient
1254 1265 # must have that manifest.
1255 1266 linknode = cl.node(mnfst.linkrev(n))
1256 1267 if linknode in has_cl_set:
1257 1268 has_mnfst_set[n] = 1
1258 1269 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1259 1270
1260 1271 # Use the information collected in collect_manifests_and_files to say
1261 1272 # which changenode any manifestnode belongs to.
1262 1273 def lookup_manifest_link(mnfstnode):
1263 1274 return msng_mnfst_set[mnfstnode]
1264 1275
1265 1276 # A function generating function that sets up the initial environment
1266 1277 # the inner function.
1267 1278 def filenode_collector(changedfiles):
1268 1279 next_rev = [0]
1269 1280 # This gathers information from each manifestnode included in the
1270 1281 # changegroup about which filenodes the manifest node references
1271 1282 # so we can include those in the changegroup too.
1272 1283 #
1273 1284 # It also remembers which changenode each filenode belongs to. It
1274 1285 # does this by assuming the a filenode belongs to the changenode
1275 1286 # the first manifest that references it belongs to.
1276 1287 def collect_msng_filenodes(mnfstnode):
1277 1288 r = mnfst.rev(mnfstnode)
1278 1289 if r == next_rev[0]:
1279 1290 # If the last rev we looked at was the one just previous,
1280 1291 # we only need to see a diff.
1281 1292 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1282 1293 # For each line in the delta
1283 1294 for dline in delta.splitlines():
1284 1295 # get the filename and filenode for that line
1285 1296 f, fnode = dline.split('\0')
1286 1297 fnode = bin(fnode[:40])
1287 1298 f = changedfiles.get(f, None)
1288 1299 # And if the file is in the list of files we care
1289 1300 # about.
1290 1301 if f is not None:
1291 1302 # Get the changenode this manifest belongs to
1292 1303 clnode = msng_mnfst_set[mnfstnode]
1293 1304 # Create the set of filenodes for the file if
1294 1305 # there isn't one already.
1295 1306 ndset = msng_filenode_set.setdefault(f, {})
1296 1307 # And set the filenode's changelog node to the
1297 1308 # manifest's if it hasn't been set already.
1298 1309 ndset.setdefault(fnode, clnode)
1299 1310 else:
1300 1311 # Otherwise we need a full manifest.
1301 1312 m = mnfst.read(mnfstnode)
1302 1313 # For every file in we care about.
1303 1314 for f in changedfiles:
1304 1315 fnode = m.get(f, None)
1305 1316 # If it's in the manifest
1306 1317 if fnode is not None:
1307 1318 # See comments above.
1308 1319 clnode = msng_mnfst_set[mnfstnode]
1309 1320 ndset = msng_filenode_set.setdefault(f, {})
1310 1321 ndset.setdefault(fnode, clnode)
1311 1322 # Remember the revision we hope to see next.
1312 1323 next_rev[0] = r + 1
1313 1324 return collect_msng_filenodes
1314 1325
1315 1326 # We have a list of filenodes we think we need for a file, lets remove
1316 1327 # all those we now the recipient must have.
1317 1328 def prune_filenodes(f, filerevlog):
1318 1329 msngset = msng_filenode_set[f]
1319 1330 hasset = {}
1320 1331 # If a 'missing' filenode thinks it belongs to a changenode we
1321 1332 # assume the recipient must have, then the recipient must have
1322 1333 # that filenode.
1323 1334 for n in msngset:
1324 1335 clnode = cl.node(filerevlog.linkrev(n))
1325 1336 if clnode in has_cl_set:
1326 1337 hasset[n] = 1
1327 1338 prune_parents(filerevlog, hasset, msngset)
1328 1339
1329 1340 # A function generator function that sets up the a context for the
1330 1341 # inner function.
1331 1342 def lookup_filenode_link_func(fname):
1332 1343 msngset = msng_filenode_set[fname]
1333 1344 # Lookup the changenode the filenode belongs to.
1334 1345 def lookup_filenode_link(fnode):
1335 1346 return msngset[fnode]
1336 1347 return lookup_filenode_link
1337 1348
1338 1349 # Now that we have all theses utility functions to help out and
1339 1350 # logically divide up the task, generate the group.
1340 1351 def gengroup():
1341 1352 # The set of changed files starts empty.
1342 1353 changedfiles = {}
1343 1354 # Create a changenode group generator that will call our functions
1344 1355 # back to lookup the owning changenode and collect information.
1345 1356 group = cl.group(msng_cl_lst, identity,
1346 1357 manifest_and_file_collector(changedfiles))
1347 1358 for chnk in group:
1348 1359 yield chnk
1349 1360
1350 1361 # The list of manifests has been collected by the generator
1351 1362 # calling our functions back.
1352 1363 prune_manifests()
1353 1364 msng_mnfst_lst = msng_mnfst_set.keys()
1354 1365 # Sort the manifestnodes by revision number.
1355 1366 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1356 1367 # Create a generator for the manifestnodes that calls our lookup
1357 1368 # and data collection functions back.
1358 1369 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1359 1370 filenode_collector(changedfiles))
1360 1371 for chnk in group:
1361 1372 yield chnk
1362 1373
1363 1374 # These are no longer needed, dereference and toss the memory for
1364 1375 # them.
1365 1376 msng_mnfst_lst = None
1366 1377 msng_mnfst_set.clear()
1367 1378
1368 1379 changedfiles = changedfiles.keys()
1369 1380 changedfiles.sort()
1370 1381 # Go through all our files in order sorted by name.
1371 1382 for fname in changedfiles:
1372 1383 filerevlog = self.file(fname)
1373 1384 # Toss out the filenodes that the recipient isn't really
1374 1385 # missing.
1375 1386 if msng_filenode_set.has_key(fname):
1376 1387 prune_filenodes(fname, filerevlog)
1377 1388 msng_filenode_lst = msng_filenode_set[fname].keys()
1378 1389 else:
1379 1390 msng_filenode_lst = []
1380 1391 # If any filenodes are left, generate the group for them,
1381 1392 # otherwise don't bother.
1382 1393 if len(msng_filenode_lst) > 0:
1383 1394 yield changegroup.genchunk(fname)
1384 1395 # Sort the filenodes by their revision #
1385 1396 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1386 1397 # Create a group generator and only pass in a changenode
1387 1398 # lookup function as we need to collect no information
1388 1399 # from filenodes.
1389 1400 group = filerevlog.group(msng_filenode_lst,
1390 1401 lookup_filenode_link_func(fname))
1391 1402 for chnk in group:
1392 1403 yield chnk
1393 1404 if msng_filenode_set.has_key(fname):
1394 1405 # Don't need this anymore, toss it to free memory.
1395 1406 del msng_filenode_set[fname]
1396 1407 # Signal that no more groups are left.
1397 1408 yield changegroup.closechunk()
1398 1409
1399 1410 if msng_cl_lst:
1400 1411 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1401 1412
1402 1413 return util.chunkbuffer(gengroup())
1403 1414
1404 1415 def changegroup(self, basenodes, source):
1405 1416 """Generate a changegroup of all nodes that we have that a recipient
1406 1417 doesn't.
1407 1418
1408 1419 This is much easier than the previous function as we can assume that
1409 1420 the recipient has any changenode we aren't sending them."""
1410 1421
1411 1422 self.hook('preoutgoing', throw=True, source=source)
1412 1423
1413 1424 cl = self.changelog
1414 1425 nodes = cl.nodesbetween(basenodes, None)[0]
1415 1426 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1416 1427
1417 1428 def identity(x):
1418 1429 return x
1419 1430
1420 1431 def gennodelst(revlog):
1421 1432 for r in xrange(0, revlog.count()):
1422 1433 n = revlog.node(r)
1423 1434 if revlog.linkrev(n) in revset:
1424 1435 yield n
1425 1436
1426 1437 def changed_file_collector(changedfileset):
1427 1438 def collect_changed_files(clnode):
1428 1439 c = cl.read(clnode)
1429 1440 for fname in c[3]:
1430 1441 changedfileset[fname] = 1
1431 1442 return collect_changed_files
1432 1443
1433 1444 def lookuprevlink_func(revlog):
1434 1445 def lookuprevlink(n):
1435 1446 return cl.node(revlog.linkrev(n))
1436 1447 return lookuprevlink
1437 1448
1438 1449 def gengroup():
1439 1450 # construct a list of all changed files
1440 1451 changedfiles = {}
1441 1452
1442 1453 for chnk in cl.group(nodes, identity,
1443 1454 changed_file_collector(changedfiles)):
1444 1455 yield chnk
1445 1456 changedfiles = changedfiles.keys()
1446 1457 changedfiles.sort()
1447 1458
1448 1459 mnfst = self.manifest
1449 1460 nodeiter = gennodelst(mnfst)
1450 1461 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1451 1462 yield chnk
1452 1463
1453 1464 for fname in changedfiles:
1454 1465 filerevlog = self.file(fname)
1455 1466 nodeiter = gennodelst(filerevlog)
1456 1467 nodeiter = list(nodeiter)
1457 1468 if nodeiter:
1458 1469 yield changegroup.genchunk(fname)
1459 1470 lookup = lookuprevlink_func(filerevlog)
1460 1471 for chnk in filerevlog.group(nodeiter, lookup):
1461 1472 yield chnk
1462 1473
1463 1474 yield changegroup.closechunk()
1464 1475
1465 1476 if nodes:
1466 1477 self.hook('outgoing', node=hex(nodes[0]), source=source)
1467 1478
1468 1479 return util.chunkbuffer(gengroup())
1469 1480
1470 1481 def addchangegroup(self, source, srctype):
1471 1482 """add changegroup to repo.
1472 1483 returns number of heads modified or added + 1."""
1473 1484
1474 1485 def csmap(x):
1475 1486 self.ui.debug(_("add changeset %s\n") % short(x))
1476 1487 return cl.count()
1477 1488
1478 1489 def revmap(x):
1479 1490 return cl.rev(x)
1480 1491
1481 1492 if not source:
1482 1493 return 0
1483 1494
1484 1495 self.hook('prechangegroup', throw=True, source=srctype)
1485 1496
1486 1497 changesets = files = revisions = 0
1487 1498
1488 1499 tr = self.transaction()
1489 1500
1490 1501 # write changelog and manifest data to temp files so
1491 1502 # concurrent readers will not see inconsistent view
1492 1503 cl = None
1493 1504 try:
1494 1505 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1495 1506
1496 1507 oldheads = len(cl.heads())
1497 1508
1498 1509 # pull off the changeset group
1499 1510 self.ui.status(_("adding changesets\n"))
1500 1511 co = cl.tip()
1501 1512 chunkiter = changegroup.chunkiter(source)
1502 1513 cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique
1503 1514 cnr, cor = map(cl.rev, (cn, co))
1504 1515 if cn == nullid:
1505 1516 cnr = cor
1506 1517 changesets = cnr - cor
1507 1518
1508 1519 mf = None
1509 1520 try:
1510 1521 mf = appendfile.appendmanifest(self.opener,
1511 1522 self.manifest.version)
1512 1523
1513 1524 # pull off the manifest group
1514 1525 self.ui.status(_("adding manifests\n"))
1515 1526 mm = mf.tip()
1516 1527 chunkiter = changegroup.chunkiter(source)
1517 1528 mo = mf.addgroup(chunkiter, revmap, tr)
1518 1529
1519 1530 # process the files
1520 1531 self.ui.status(_("adding file changes\n"))
1521 1532 while 1:
1522 1533 f = changegroup.getchunk(source)
1523 1534 if not f:
1524 1535 break
1525 1536 self.ui.debug(_("adding %s revisions\n") % f)
1526 1537 fl = self.file(f)
1527 1538 o = fl.count()
1528 1539 chunkiter = changegroup.chunkiter(source)
1529 1540 n = fl.addgroup(chunkiter, revmap, tr)
1530 1541 revisions += fl.count() - o
1531 1542 files += 1
1532 1543
1533 1544 # write order here is important so concurrent readers will see
1534 1545 # consistent view of repo
1535 1546 mf.writedata()
1536 1547 finally:
1537 1548 if mf:
1538 1549 mf.cleanup()
1539 1550 cl.writedata()
1540 1551 finally:
1541 1552 if cl:
1542 1553 cl.cleanup()
1543 1554
1544 1555 # make changelog and manifest see real files again
1545 1556 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1546 1557 self.manifest = manifest.manifest(self.opener, self.manifest.version)
1547 1558 self.changelog.checkinlinesize(tr)
1548 1559 self.manifest.checkinlinesize(tr)
1549 1560
1550 1561 newheads = len(self.changelog.heads())
1551 1562 heads = ""
1552 1563 if oldheads and newheads > oldheads:
1553 1564 heads = _(" (+%d heads)") % (newheads - oldheads)
1554 1565
1555 1566 self.ui.status(_("added %d changesets"
1556 1567 " with %d changes to %d files%s\n")
1557 1568 % (changesets, revisions, files, heads))
1558 1569
1559 1570 if changesets > 0:
1560 1571 self.hook('pretxnchangegroup', throw=True,
1561 1572 node=hex(self.changelog.node(cor+1)), source=srctype)
1562 1573
1563 1574 tr.close()
1564 1575
1565 1576 if changesets > 0:
1566 1577 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1567 1578 source=srctype)
1568 1579
1569 1580 for i in range(cor + 1, cnr + 1):
1570 1581 self.hook("incoming", node=hex(self.changelog.node(i)),
1571 1582 source=srctype)
1572 1583
1573 1584 return newheads - oldheads + 1
1574 1585
1575 1586 def update(self, node, allow=False, force=False, choose=None,
1576 1587 moddirstate=True, forcemerge=False, wlock=None, show_stats=True):
1577 1588 pl = self.dirstate.parents()
1578 1589 if not force and pl[1] != nullid:
1579 1590 raise util.Abort(_("outstanding uncommitted merges"))
1580 1591
1581 1592 err = False
1582 1593
1583 1594 p1, p2 = pl[0], node
1584 1595 pa = self.changelog.ancestor(p1, p2)
1585 1596 m1n = self.changelog.read(p1)[0]
1586 1597 m2n = self.changelog.read(p2)[0]
1587 1598 man = self.manifest.ancestor(m1n, m2n)
1588 1599 m1 = self.manifest.read(m1n)
1589 1600 mf1 = self.manifest.readflags(m1n)
1590 1601 m2 = self.manifest.read(m2n).copy()
1591 1602 mf2 = self.manifest.readflags(m2n)
1592 1603 ma = self.manifest.read(man)
1593 1604 mfa = self.manifest.readflags(man)
1594 1605
1595 1606 modified, added, removed, deleted, unknown = self.changes()
1596 1607
1597 1608 # is this a jump, or a merge? i.e. is there a linear path
1598 1609 # from p1 to p2?
1599 1610 linear_path = (pa == p1 or pa == p2)
1600 1611
1601 1612 if allow and linear_path:
1602 1613 raise util.Abort(_("there is nothing to merge, "
1603 1614 "just use 'hg update'"))
1604 1615 if allow and not forcemerge:
1605 1616 if modified or added or removed:
1606 1617 raise util.Abort(_("outstanding uncommitted changes"))
1607 1618
1608 1619 if not forcemerge and not force:
1609 1620 for f in unknown:
1610 1621 if f in m2:
1611 1622 t1 = self.wread(f)
1612 1623 t2 = self.file(f).read(m2[f])
1613 1624 if cmp(t1, t2) != 0:
1614 1625 raise util.Abort(_("'%s' already exists in the working"
1615 1626 " dir and differs from remote") % f)
1616 1627
1617 1628 # resolve the manifest to determine which files
1618 1629 # we care about merging
1619 1630 self.ui.note(_("resolving manifests\n"))
1620 1631 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1621 1632 (force, allow, moddirstate, linear_path))
1622 1633 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1623 1634 (short(man), short(m1n), short(m2n)))
1624 1635
1625 1636 merge = {}
1626 1637 get = {}
1627 1638 remove = []
1628 1639
1629 1640 # construct a working dir manifest
1630 1641 mw = m1.copy()
1631 1642 mfw = mf1.copy()
1632 1643 umap = dict.fromkeys(unknown)
1633 1644
1634 1645 for f in added + modified + unknown:
1635 1646 mw[f] = ""
1636 1647 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1637 1648
1638 1649 if moddirstate and not wlock:
1639 1650 wlock = self.wlock()
1640 1651
1641 1652 for f in deleted + removed:
1642 1653 if f in mw:
1643 1654 del mw[f]
1644 1655
1645 1656 # If we're jumping between revisions (as opposed to merging),
1646 1657 # and if neither the working directory nor the target rev has
1647 1658 # the file, then we need to remove it from the dirstate, to
1648 1659 # prevent the dirstate from listing the file when it is no
1649 1660 # longer in the manifest.
1650 1661 if moddirstate and linear_path and f not in m2:
1651 1662 self.dirstate.forget((f,))
1652 1663
1653 1664 # Compare manifests
1654 1665 for f, n in mw.iteritems():
1655 1666 if choose and not choose(f):
1656 1667 continue
1657 1668 if f in m2:
1658 1669 s = 0
1659 1670
1660 1671 # is the wfile new since m1, and match m2?
1661 1672 if f not in m1:
1662 1673 t1 = self.wread(f)
1663 1674 t2 = self.file(f).read(m2[f])
1664 1675 if cmp(t1, t2) == 0:
1665 1676 n = m2[f]
1666 1677 del t1, t2
1667 1678
1668 1679 # are files different?
1669 1680 if n != m2[f]:
1670 1681 a = ma.get(f, nullid)
1671 1682 # are both different from the ancestor?
1672 1683 if n != a and m2[f] != a:
1673 1684 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1674 1685 # merge executable bits
1675 1686 # "if we changed or they changed, change in merge"
1676 1687 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1677 1688 mode = ((a^b) | (a^c)) ^ a
1678 1689 merge[f] = (m1.get(f, nullid), m2[f], mode)
1679 1690 s = 1
1680 1691 # are we clobbering?
1681 1692 # is remote's version newer?
1682 1693 # or are we going back in time?
1683 1694 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1684 1695 self.ui.debug(_(" remote %s is newer, get\n") % f)
1685 1696 get[f] = m2[f]
1686 1697 s = 1
1687 1698 elif f in umap or f in added:
1688 1699 # this unknown file is the same as the checkout
1689 1700 # we need to reset the dirstate if the file was added
1690 1701 get[f] = m2[f]
1691 1702
1692 1703 if not s and mfw[f] != mf2[f]:
1693 1704 if force:
1694 1705 self.ui.debug(_(" updating permissions for %s\n") % f)
1695 1706 util.set_exec(self.wjoin(f), mf2[f])
1696 1707 else:
1697 1708 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1698 1709 mode = ((a^b) | (a^c)) ^ a
1699 1710 if mode != b:
1700 1711 self.ui.debug(_(" updating permissions for %s\n")
1701 1712 % f)
1702 1713 util.set_exec(self.wjoin(f), mode)
1703 1714 del m2[f]
1704 1715 elif f in ma:
1705 1716 if n != ma[f]:
1706 1717 r = _("d")
1707 1718 if not force and (linear_path or allow):
1708 1719 r = self.ui.prompt(
1709 1720 (_(" local changed %s which remote deleted\n") % f) +
1710 1721 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1711 1722 if r == _("d"):
1712 1723 remove.append(f)
1713 1724 else:
1714 1725 self.ui.debug(_("other deleted %s\n") % f)
1715 1726 remove.append(f) # other deleted it
1716 1727 else:
1717 1728 # file is created on branch or in working directory
1718 1729 if force and f not in umap:
1719 1730 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1720 1731 remove.append(f)
1721 1732 elif n == m1.get(f, nullid): # same as parent
1722 1733 if p2 == pa: # going backwards?
1723 1734 self.ui.debug(_("remote deleted %s\n") % f)
1724 1735 remove.append(f)
1725 1736 else:
1726 1737 self.ui.debug(_("local modified %s, keeping\n") % f)
1727 1738 else:
1728 1739 self.ui.debug(_("working dir created %s, keeping\n") % f)
1729 1740
1730 1741 for f, n in m2.iteritems():
1731 1742 if choose and not choose(f):
1732 1743 continue
1733 1744 if f[0] == "/":
1734 1745 continue
1735 1746 if f in ma and n != ma[f]:
1736 1747 r = _("k")
1737 1748 if not force and (linear_path or allow):
1738 1749 r = self.ui.prompt(
1739 1750 (_("remote changed %s which local deleted\n") % f) +
1740 1751 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1741 1752 if r == _("k"):
1742 1753 get[f] = n
1743 1754 elif f not in ma:
1744 1755 self.ui.debug(_("remote created %s\n") % f)
1745 1756 get[f] = n
1746 1757 else:
1747 1758 if force or p2 == pa: # going backwards?
1748 1759 self.ui.debug(_("local deleted %s, recreating\n") % f)
1749 1760 get[f] = n
1750 1761 else:
1751 1762 self.ui.debug(_("local deleted %s\n") % f)
1752 1763
1753 1764 del mw, m1, m2, ma
1754 1765
1755 1766 if force:
1756 1767 for f in merge:
1757 1768 get[f] = merge[f][1]
1758 1769 merge = {}
1759 1770
1760 1771 if linear_path or force:
1761 1772 # we don't need to do any magic, just jump to the new rev
1762 1773 branch_merge = False
1763 1774 p1, p2 = p2, nullid
1764 1775 else:
1765 1776 if not allow:
1766 1777 self.ui.status(_("this update spans a branch"
1767 1778 " affecting the following files:\n"))
1768 1779 fl = merge.keys() + get.keys()
1769 1780 fl.sort()
1770 1781 for f in fl:
1771 1782 cf = ""
1772 1783 if f in merge:
1773 1784 cf = _(" (resolve)")
1774 1785 self.ui.status(" %s%s\n" % (f, cf))
1775 1786 self.ui.warn(_("aborting update spanning branches!\n"))
1776 1787 self.ui.status(_("(use 'hg merge' to merge across branches"
1777 1788 " or 'hg update -C' to lose changes)\n"))
1778 1789 return 1
1779 1790 branch_merge = True
1780 1791
1781 1792 xp1 = hex(p1)
1782 1793 xp2 = hex(p2)
1783 1794 if p2 == nullid: xxp2 = ''
1784 1795 else: xxp2 = xp2
1785 1796
1786 1797 self.hook('preupdate', throw=True, parent1=xp1, parent2=xxp2)
1787 1798
1788 1799 # get the files we don't need to change
1789 1800 files = get.keys()
1790 1801 files.sort()
1791 1802 for f in files:
1792 1803 if f[0] == "/":
1793 1804 continue
1794 1805 self.ui.note(_("getting %s\n") % f)
1795 1806 t = self.file(f).read(get[f])
1796 1807 self.wwrite(f, t)
1797 1808 util.set_exec(self.wjoin(f), mf2[f])
1798 1809 if moddirstate:
1799 1810 if branch_merge:
1800 1811 self.dirstate.update([f], 'n', st_mtime=-1)
1801 1812 else:
1802 1813 self.dirstate.update([f], 'n')
1803 1814
1804 1815 # merge the tricky bits
1805 1816 failedmerge = []
1806 1817 files = merge.keys()
1807 1818 files.sort()
1808 1819 for f in files:
1809 1820 self.ui.status(_("merging %s\n") % f)
1810 1821 my, other, flag = merge[f]
1811 1822 ret = self.merge3(f, my, other, xp1, xp2)
1812 1823 if ret:
1813 1824 err = True
1814 1825 failedmerge.append(f)
1815 1826 util.set_exec(self.wjoin(f), flag)
1816 1827 if moddirstate:
1817 1828 if branch_merge:
1818 1829 # We've done a branch merge, mark this file as merged
1819 1830 # so that we properly record the merger later
1820 1831 self.dirstate.update([f], 'm')
1821 1832 else:
1822 1833 # We've update-merged a locally modified file, so
1823 1834 # we set the dirstate to emulate a normal checkout
1824 1835 # of that file some time in the past. Thus our
1825 1836 # merge will appear as a normal local file
1826 1837 # modification.
1827 1838 f_len = len(self.file(f).read(other))
1828 1839 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1829 1840
1830 1841 remove.sort()
1831 1842 for f in remove:
1832 1843 self.ui.note(_("removing %s\n") % f)
1833 1844 util.audit_path(f)
1834 1845 try:
1835 1846 util.unlink(self.wjoin(f))
1836 1847 except OSError, inst:
1837 1848 if inst.errno != errno.ENOENT:
1838 1849 self.ui.warn(_("update failed to remove %s: %s!\n") %
1839 1850 (f, inst.strerror))
1840 1851 if moddirstate:
1841 1852 if branch_merge:
1842 1853 self.dirstate.update(remove, 'r')
1843 1854 else:
1844 1855 self.dirstate.forget(remove)
1845 1856
1846 1857 if moddirstate:
1847 1858 self.dirstate.setparents(p1, p2)
1848 1859
1849 1860 if show_stats:
1850 1861 stats = ((len(get), _("updated")),
1851 1862 (len(merge) - len(failedmerge), _("merged")),
1852 1863 (len(remove), _("removed")),
1853 1864 (len(failedmerge), _("unresolved")))
1854 1865 note = ", ".join([_("%d files %s") % s for s in stats])
1855 1866 self.ui.status("%s\n" % note)
1856 1867 if moddirstate:
1857 1868 if branch_merge:
1858 1869 if failedmerge:
1859 1870 self.ui.status(_("There are unresolved merges,"
1860 1871 " you can redo the full merge using:\n"
1861 1872 " hg update -C %s\n"
1862 1873 " hg merge %s\n"
1863 1874 % (self.changelog.rev(p1),
1864 1875 self.changelog.rev(p2))))
1865 1876 else:
1866 1877 self.ui.status(_("(branch merge, don't forget to commit)\n"))
1867 1878 elif failedmerge:
1868 1879 self.ui.status(_("There are unresolved merges with"
1869 1880 " locally modified files.\n"))
1870 1881
1871 1882 self.hook('update', parent1=xp1, parent2=xxp2, error=int(err))
1872 1883 return err
1873 1884
1874 1885 def merge3(self, fn, my, other, p1, p2):
1875 1886 """perform a 3-way merge in the working directory"""
1876 1887
1877 1888 def temp(prefix, node):
1878 1889 pre = "%s~%s." % (os.path.basename(fn), prefix)
1879 1890 (fd, name) = tempfile.mkstemp(prefix=pre)
1880 1891 f = os.fdopen(fd, "wb")
1881 1892 self.wwrite(fn, fl.read(node), f)
1882 1893 f.close()
1883 1894 return name
1884 1895
1885 1896 fl = self.file(fn)
1886 1897 base = fl.ancestor(my, other)
1887 1898 a = self.wjoin(fn)
1888 1899 b = temp("base", base)
1889 1900 c = temp("other", other)
1890 1901
1891 1902 self.ui.note(_("resolving %s\n") % fn)
1892 1903 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1893 1904 (fn, short(my), short(other), short(base)))
1894 1905
1895 1906 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1896 1907 or "hgmerge")
1897 1908 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1898 1909 environ={'HG_FILE': fn,
1899 1910 'HG_MY_NODE': p1,
1900 1911 'HG_OTHER_NODE': p2,
1901 1912 'HG_FILE_MY_NODE': hex(my),
1902 1913 'HG_FILE_OTHER_NODE': hex(other),
1903 1914 'HG_FILE_BASE_NODE': hex(base)})
1904 1915 if r:
1905 1916 self.ui.warn(_("merging %s failed!\n") % fn)
1906 1917
1907 1918 os.unlink(b)
1908 1919 os.unlink(c)
1909 1920 return r
1910 1921
1911 1922 def verify(self):
1912 1923 filelinkrevs = {}
1913 1924 filenodes = {}
1914 1925 changesets = revisions = files = 0
1915 1926 errors = [0]
1916 1927 warnings = [0]
1917 1928 neededmanifests = {}
1918 1929
1919 1930 def err(msg):
1920 1931 self.ui.warn(msg + "\n")
1921 1932 errors[0] += 1
1922 1933
1923 1934 def warn(msg):
1924 1935 self.ui.warn(msg + "\n")
1925 1936 warnings[0] += 1
1926 1937
1927 1938 def checksize(obj, name):
1928 1939 d = obj.checksize()
1929 1940 if d[0]:
1930 1941 err(_("%s data length off by %d bytes") % (name, d[0]))
1931 1942 if d[1]:
1932 1943 err(_("%s index contains %d extra bytes") % (name, d[1]))
1933 1944
1934 1945 def checkversion(obj, name):
1935 1946 if obj.version != revlog.REVLOGV0:
1936 1947 if not revlogv1:
1937 1948 warn(_("warning: `%s' uses revlog format 1") % name)
1938 1949 elif revlogv1:
1939 1950 warn(_("warning: `%s' uses revlog format 0") % name)
1940 1951
1941 1952 revlogv1 = self.revlogversion != revlog.REVLOGV0
1942 1953 if self.ui.verbose or revlogv1 != self.revlogv1:
1943 1954 self.ui.status(_("repository uses revlog format %d\n") %
1944 1955 (revlogv1 and 1 or 0))
1945 1956
1946 1957 seen = {}
1947 1958 self.ui.status(_("checking changesets\n"))
1948 1959 checksize(self.changelog, "changelog")
1949 1960
1950 1961 for i in range(self.changelog.count()):
1951 1962 changesets += 1
1952 1963 n = self.changelog.node(i)
1953 1964 l = self.changelog.linkrev(n)
1954 1965 if l != i:
1955 1966 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1956 1967 if n in seen:
1957 1968 err(_("duplicate changeset at revision %d") % i)
1958 1969 seen[n] = 1
1959 1970
1960 1971 for p in self.changelog.parents(n):
1961 1972 if p not in self.changelog.nodemap:
1962 1973 err(_("changeset %s has unknown parent %s") %
1963 1974 (short(n), short(p)))
1964 1975 try:
1965 1976 changes = self.changelog.read(n)
1966 1977 except KeyboardInterrupt:
1967 1978 self.ui.warn(_("interrupted"))
1968 1979 raise
1969 1980 except Exception, inst:
1970 1981 err(_("unpacking changeset %s: %s") % (short(n), inst))
1971 1982 continue
1972 1983
1973 1984 neededmanifests[changes[0]] = n
1974 1985
1975 1986 for f in changes[3]:
1976 1987 filelinkrevs.setdefault(f, []).append(i)
1977 1988
1978 1989 seen = {}
1979 1990 self.ui.status(_("checking manifests\n"))
1980 1991 checkversion(self.manifest, "manifest")
1981 1992 checksize(self.manifest, "manifest")
1982 1993
1983 1994 for i in range(self.manifest.count()):
1984 1995 n = self.manifest.node(i)
1985 1996 l = self.manifest.linkrev(n)
1986 1997
1987 1998 if l < 0 or l >= self.changelog.count():
1988 1999 err(_("bad manifest link (%d) at revision %d") % (l, i))
1989 2000
1990 2001 if n in neededmanifests:
1991 2002 del neededmanifests[n]
1992 2003
1993 2004 if n in seen:
1994 2005 err(_("duplicate manifest at revision %d") % i)
1995 2006
1996 2007 seen[n] = 1
1997 2008
1998 2009 for p in self.manifest.parents(n):
1999 2010 if p not in self.manifest.nodemap:
2000 2011 err(_("manifest %s has unknown parent %s") %
2001 2012 (short(n), short(p)))
2002 2013
2003 2014 try:
2004 2015 delta = mdiff.patchtext(self.manifest.delta(n))
2005 2016 except KeyboardInterrupt:
2006 2017 self.ui.warn(_("interrupted"))
2007 2018 raise
2008 2019 except Exception, inst:
2009 2020 err(_("unpacking manifest %s: %s") % (short(n), inst))
2010 2021 continue
2011 2022
2012 2023 try:
2013 2024 ff = [ l.split('\0') for l in delta.splitlines() ]
2014 2025 for f, fn in ff:
2015 2026 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
2016 2027 except (ValueError, TypeError), inst:
2017 2028 err(_("broken delta in manifest %s: %s") % (short(n), inst))
2018 2029
2019 2030 self.ui.status(_("crosschecking files in changesets and manifests\n"))
2020 2031
2021 2032 for m, c in neededmanifests.items():
2022 2033 err(_("Changeset %s refers to unknown manifest %s") %
2023 2034 (short(m), short(c)))
2024 2035 del neededmanifests
2025 2036
2026 2037 for f in filenodes:
2027 2038 if f not in filelinkrevs:
2028 2039 err(_("file %s in manifest but not in changesets") % f)
2029 2040
2030 2041 for f in filelinkrevs:
2031 2042 if f not in filenodes:
2032 2043 err(_("file %s in changeset but not in manifest") % f)
2033 2044
2034 2045 self.ui.status(_("checking files\n"))
2035 2046 ff = filenodes.keys()
2036 2047 ff.sort()
2037 2048 for f in ff:
2038 2049 if f == "/dev/null":
2039 2050 continue
2040 2051 files += 1
2041 2052 if not f:
2042 2053 err(_("file without name in manifest %s") % short(n))
2043 2054 continue
2044 2055 fl = self.file(f)
2045 2056 checkversion(fl, f)
2046 2057 checksize(fl, f)
2047 2058
2048 2059 nodes = {nullid: 1}
2049 2060 seen = {}
2050 2061 for i in range(fl.count()):
2051 2062 revisions += 1
2052 2063 n = fl.node(i)
2053 2064
2054 2065 if n in seen:
2055 2066 err(_("%s: duplicate revision %d") % (f, i))
2056 2067 if n not in filenodes[f]:
2057 2068 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
2058 2069 else:
2059 2070 del filenodes[f][n]
2060 2071
2061 2072 flr = fl.linkrev(n)
2062 2073 if flr not in filelinkrevs.get(f, []):
2063 2074 err(_("%s:%s points to unexpected changeset %d")
2064 2075 % (f, short(n), flr))
2065 2076 else:
2066 2077 filelinkrevs[f].remove(flr)
2067 2078
2068 2079 # verify contents
2069 2080 try:
2070 2081 t = fl.read(n)
2071 2082 except KeyboardInterrupt:
2072 2083 self.ui.warn(_("interrupted"))
2073 2084 raise
2074 2085 except Exception, inst:
2075 2086 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
2076 2087
2077 2088 # verify parents
2078 2089 (p1, p2) = fl.parents(n)
2079 2090 if p1 not in nodes:
2080 2091 err(_("file %s:%s unknown parent 1 %s") %
2081 2092 (f, short(n), short(p1)))
2082 2093 if p2 not in nodes:
2083 2094 err(_("file %s:%s unknown parent 2 %s") %
2084 2095 (f, short(n), short(p1)))
2085 2096 nodes[n] = 1
2086 2097
2087 2098 # cross-check
2088 2099 for node in filenodes[f]:
2089 2100 err(_("node %s in manifests not in %s") % (hex(node), f))
2090 2101
2091 2102 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
2092 2103 (files, changesets, revisions))
2093 2104
2094 2105 if warnings[0]:
2095 2106 self.ui.warn(_("%d warnings encountered!\n") % warnings[0])
2096 2107 if errors[0]:
2097 2108 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
2098 2109 return 1
2099 2110
2100 2111 # used to avoid circular references so destructors work
2101 2112 def aftertrans(base):
2102 2113 p = base
2103 2114 def a():
2104 2115 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
2105 2116 util.rename(os.path.join(p, "journal.dirstate"),
2106 2117 os.path.join(p, "undo.dirstate"))
2107 2118 return a
2108 2119
General Comments 0
You need to be logged in to leave comments. Login now