##// END OF EJS Templates
If we can't write the branch cache, fail quietly.
Matt Mackall -
r3452:fcf14d87 default
parent child Browse files
Show More
@@ -1,1814 +1,1817 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ('lookup', 'changegroupsubset')
19 19
20 20 def __del__(self):
21 21 self.transhandle = None
22 22 def __init__(self, parentui, path=None, create=0):
23 23 repo.repository.__init__(self)
24 24 if not path:
25 25 p = os.getcwd()
26 26 while not os.path.isdir(os.path.join(p, ".hg")):
27 27 oldp = p
28 28 p = os.path.dirname(p)
29 29 if p == oldp:
30 30 raise repo.RepoError(_("There is no Mercurial repository"
31 31 " here (.hg not found)"))
32 32 path = p
33 33 self.path = os.path.join(path, ".hg")
34 34
35 35 if not os.path.isdir(self.path):
36 36 if create:
37 37 if not os.path.exists(path):
38 38 os.mkdir(path)
39 39 os.mkdir(self.path)
40 40 os.mkdir(self.join("data"))
41 41 else:
42 42 raise repo.RepoError(_("repository %s not found") % path)
43 43 elif create:
44 44 raise repo.RepoError(_("repository %s already exists") % path)
45 45
46 46 self.root = os.path.abspath(path)
47 47 self.origroot = path
48 48 self.ui = ui.ui(parentui=parentui)
49 49 self.opener = util.opener(self.path)
50 50 self.wopener = util.opener(self.root)
51 51
52 52 try:
53 53 self.ui.readconfig(self.join("hgrc"), self.root)
54 54 except IOError:
55 55 pass
56 56
57 57 v = self.ui.configrevlog()
58 58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 60 fl = v.get('flags', None)
61 61 flags = 0
62 62 if fl != None:
63 63 for x in fl.split():
64 64 flags |= revlog.flagstr(x)
65 65 elif self.revlogv1:
66 66 flags = revlog.REVLOG_DEFAULT_FLAGS
67 67
68 68 v = self.revlogversion | flags
69 69 self.manifest = manifest.manifest(self.opener, v)
70 70 self.changelog = changelog.changelog(self.opener, v)
71 71
72 72 # the changelog might not have the inline index flag
73 73 # on. If the format of the changelog is the same as found in
74 74 # .hgrc, apply any flags found in the .hgrc as well.
75 75 # Otherwise, just version from the changelog
76 76 v = self.changelog.version
77 77 if v == self.revlogversion:
78 78 v |= flags
79 79 self.revlogversion = v
80 80
81 81 self.tagscache = None
82 82 self.branchcache = None
83 83 self.nodetagscache = None
84 84 self.encodepats = None
85 85 self.decodepats = None
86 86 self.transhandle = None
87 87
88 88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89 89
90 90 def url(self):
91 91 return 'file:' + self.root
92 92
93 93 def hook(self, name, throw=False, **args):
94 94 def callhook(hname, funcname):
95 95 '''call python hook. hook is callable object, looked up as
96 96 name in python module. if callable returns "true", hook
97 97 fails, else passes. if hook raises exception, treated as
98 98 hook failure. exception propagates if throw is "true".
99 99
100 100 reason for "true" meaning "hook failed" is so that
101 101 unmodified commands (e.g. mercurial.commands.update) can
102 102 be run as hooks without wrappers to convert return values.'''
103 103
104 104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 105 d = funcname.rfind('.')
106 106 if d == -1:
107 107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 108 % (hname, funcname))
109 109 modname = funcname[:d]
110 110 try:
111 111 obj = __import__(modname)
112 112 except ImportError:
113 113 try:
114 114 # extensions are loaded with hgext_ prefix
115 115 obj = __import__("hgext_%s" % modname)
116 116 except ImportError:
117 117 raise util.Abort(_('%s hook is invalid '
118 118 '(import of "%s" failed)') %
119 119 (hname, modname))
120 120 try:
121 121 for p in funcname.split('.')[1:]:
122 122 obj = getattr(obj, p)
123 123 except AttributeError, err:
124 124 raise util.Abort(_('%s hook is invalid '
125 125 '("%s" is not defined)') %
126 126 (hname, funcname))
127 127 if not callable(obj):
128 128 raise util.Abort(_('%s hook is invalid '
129 129 '("%s" is not callable)') %
130 130 (hname, funcname))
131 131 try:
132 132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 133 except (KeyboardInterrupt, util.SignalInterrupt):
134 134 raise
135 135 except Exception, exc:
136 136 if isinstance(exc, util.Abort):
137 137 self.ui.warn(_('error: %s hook failed: %s\n') %
138 138 (hname, exc.args[0]))
139 139 else:
140 140 self.ui.warn(_('error: %s hook raised an exception: '
141 141 '%s\n') % (hname, exc))
142 142 if throw:
143 143 raise
144 144 self.ui.print_exc()
145 145 return True
146 146 if r:
147 147 if throw:
148 148 raise util.Abort(_('%s hook failed') % hname)
149 149 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 150 return r
151 151
152 152 def runhook(name, cmd):
153 153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 155 r = util.system(cmd, environ=env, cwd=self.root)
156 156 if r:
157 157 desc, r = util.explain_exit(r)
158 158 if throw:
159 159 raise util.Abort(_('%s hook %s') % (name, desc))
160 160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 161 return r
162 162
163 163 r = False
164 164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 165 if hname.split(".", 1)[0] == name and cmd]
166 166 hooks.sort()
167 167 for hname, cmd in hooks:
168 168 if cmd.startswith('python:'):
169 169 r = callhook(hname, cmd[7:].strip()) or r
170 170 else:
171 171 r = runhook(hname, cmd) or r
172 172 return r
173 173
174 174 tag_disallowed = ':\r\n'
175 175
176 176 def tag(self, name, node, message, local, user, date):
177 177 '''tag a revision with a symbolic name.
178 178
179 179 if local is True, the tag is stored in a per-repository file.
180 180 otherwise, it is stored in the .hgtags file, and a new
181 181 changeset is committed with the change.
182 182
183 183 keyword arguments:
184 184
185 185 local: whether to store tag in non-version-controlled file
186 186 (default False)
187 187
188 188 message: commit message to use if committing
189 189
190 190 user: name of user to use if committing
191 191
192 192 date: date tuple to use if committing'''
193 193
194 194 for c in self.tag_disallowed:
195 195 if c in name:
196 196 raise util.Abort(_('%r cannot be used in a tag name') % c)
197 197
198 198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199 199
200 200 if local:
201 201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
202 202 self.hook('tag', node=hex(node), tag=name, local=local)
203 203 return
204 204
205 205 for x in self.status()[:5]:
206 206 if '.hgtags' in x:
207 207 raise util.Abort(_('working copy of .hgtags is changed '
208 208 '(please commit .hgtags manually)'))
209 209
210 210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
211 211 if self.dirstate.state('.hgtags') == '?':
212 212 self.add(['.hgtags'])
213 213
214 214 self.commit(['.hgtags'], message, user, date)
215 215 self.hook('tag', node=hex(node), tag=name, local=local)
216 216
217 217 def tags(self):
218 218 '''return a mapping of tag to node'''
219 219 if not self.tagscache:
220 220 self.tagscache = {}
221 221
222 222 def parsetag(line, context):
223 223 if not line:
224 224 return
225 225 s = l.split(" ", 1)
226 226 if len(s) != 2:
227 227 self.ui.warn(_("%s: cannot parse entry\n") % context)
228 228 return
229 229 node, key = s
230 230 key = key.strip()
231 231 try:
232 232 bin_n = bin(node)
233 233 except TypeError:
234 234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
235 235 (context, node))
236 236 return
237 237 if bin_n not in self.changelog.nodemap:
238 238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
239 239 (context, key))
240 240 return
241 241 self.tagscache[key] = bin_n
242 242
243 243 # read the tags file from each head, ending with the tip,
244 244 # and add each tag found to the map, with "newer" ones
245 245 # taking precedence
246 246 heads = self.heads()
247 247 heads.reverse()
248 248 fl = self.file(".hgtags")
249 249 for node in heads:
250 250 change = self.changelog.read(node)
251 251 rev = self.changelog.rev(node)
252 252 fn, ff = self.manifest.find(change[0], '.hgtags')
253 253 if fn is None: continue
254 254 count = 0
255 255 for l in fl.read(fn).splitlines():
256 256 count += 1
257 257 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
258 258 (rev, short(node), count))
259 259 try:
260 260 f = self.opener("localtags")
261 261 count = 0
262 262 for l in f:
263 263 count += 1
264 264 parsetag(l, _("localtags, line %d") % count)
265 265 except IOError:
266 266 pass
267 267
268 268 self.tagscache['tip'] = self.changelog.tip()
269 269
270 270 return self.tagscache
271 271
272 272 def tagslist(self):
273 273 '''return a list of tags ordered by revision'''
274 274 l = []
275 275 for t, n in self.tags().items():
276 276 try:
277 277 r = self.changelog.rev(n)
278 278 except:
279 279 r = -2 # sort to the beginning of the list if unknown
280 280 l.append((r, t, n))
281 281 l.sort()
282 282 return [(t, n) for r, t, n in l]
283 283
284 284 def nodetags(self, node):
285 285 '''return the tags associated with a node'''
286 286 if not self.nodetagscache:
287 287 self.nodetagscache = {}
288 288 for t, n in self.tags().items():
289 289 self.nodetagscache.setdefault(n, []).append(t)
290 290 return self.nodetagscache.get(node, [])
291 291
292 292 def branchtags(self):
293 293 if self.branchcache != None:
294 294 return self.branchcache
295 295
296 296 self.branchcache = {} # avoid recursion in changectx
297 297
298 298 try:
299 299 f = self.opener("branches.cache")
300 300 last, lrev = f.readline().rstrip().split(" ", 1)
301 301 last, lrev = bin(last), int(lrev)
302 302 if (lrev < self.changelog.count() and
303 303 self.changelog.node(lrev) == last): # sanity check
304 304 for l in f:
305 305 node, label = l.rstrip().split(" ", 1)
306 306 self.branchcache[label] = bin(node)
307 307 else: # invalidate the cache
308 308 last, lrev = nullid, -1
309 309 f.close()
310 310 except IOError:
311 311 last, lrev = nullid, -1
312 312
313 313 tip = self.changelog.count() - 1
314 314 if lrev != tip:
315 315 for r in xrange(lrev + 1, tip + 1):
316 316 c = self.changectx(r)
317 317 b = c.branch()
318 318 if b:
319 319 self.branchcache[b] = c.node()
320 320 self._writebranchcache()
321 321
322 322 return self.branchcache
323 323
324 324 def _writebranchcache(self):
325 f = self.opener("branches.cache", "w")
326 t = self.changelog.tip()
327 f.write("%s %s\n" % (hex(t), self.changelog.count() - 1))
328 for label, node in self.branchcache.iteritems():
329 f.write("%s %s\n" % (hex(node), label))
325 try:
326 f = self.opener("branches.cache", "w")
327 t = self.changelog.tip()
328 f.write("%s %s\n" % (hex(t), self.changelog.count() - 1))
329 for label, node in self.branchcache.iteritems():
330 f.write("%s %s\n" % (hex(node), label))
331 except IOError:
332 pass
330 333
331 334 def lookup(self, key):
332 335 if key == '.':
333 336 key = self.dirstate.parents()[0]
334 337 if key == nullid:
335 338 raise repo.RepoError(_("no revision checked out"))
336 339 if key in self.tags():
337 340 return self.tags()[key]
338 341 if key in self.branchtags():
339 342 return self.branchtags()[key]
340 343 try:
341 344 return self.changelog.lookup(key)
342 345 except:
343 346 raise repo.RepoError(_("unknown revision '%s'") % key)
344 347
345 348 def dev(self):
346 349 return os.lstat(self.path).st_dev
347 350
348 351 def local(self):
349 352 return True
350 353
351 354 def join(self, f):
352 355 return os.path.join(self.path, f)
353 356
354 357 def wjoin(self, f):
355 358 return os.path.join(self.root, f)
356 359
357 360 def file(self, f):
358 361 if f[0] == '/':
359 362 f = f[1:]
360 363 return filelog.filelog(self.opener, f, self.revlogversion)
361 364
362 365 def changectx(self, changeid=None):
363 366 return context.changectx(self, changeid)
364 367
365 368 def workingctx(self):
366 369 return context.workingctx(self)
367 370
368 371 def parents(self, changeid=None):
369 372 '''
370 373 get list of changectxs for parents of changeid or working directory
371 374 '''
372 375 if changeid is None:
373 376 pl = self.dirstate.parents()
374 377 else:
375 378 n = self.changelog.lookup(changeid)
376 379 pl = self.changelog.parents(n)
377 380 if pl[1] == nullid:
378 381 return [self.changectx(pl[0])]
379 382 return [self.changectx(pl[0]), self.changectx(pl[1])]
380 383
381 384 def filectx(self, path, changeid=None, fileid=None):
382 385 """changeid can be a changeset revision, node, or tag.
383 386 fileid can be a file revision or node."""
384 387 return context.filectx(self, path, changeid, fileid)
385 388
386 389 def getcwd(self):
387 390 return self.dirstate.getcwd()
388 391
389 392 def wfile(self, f, mode='r'):
390 393 return self.wopener(f, mode)
391 394
392 395 def wread(self, filename):
393 396 if self.encodepats == None:
394 397 l = []
395 398 for pat, cmd in self.ui.configitems("encode"):
396 399 mf = util.matcher(self.root, "", [pat], [], [])[1]
397 400 l.append((mf, cmd))
398 401 self.encodepats = l
399 402
400 403 data = self.wopener(filename, 'r').read()
401 404
402 405 for mf, cmd in self.encodepats:
403 406 if mf(filename):
404 407 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
405 408 data = util.filter(data, cmd)
406 409 break
407 410
408 411 return data
409 412
410 413 def wwrite(self, filename, data, fd=None):
411 414 if self.decodepats == None:
412 415 l = []
413 416 for pat, cmd in self.ui.configitems("decode"):
414 417 mf = util.matcher(self.root, "", [pat], [], [])[1]
415 418 l.append((mf, cmd))
416 419 self.decodepats = l
417 420
418 421 for mf, cmd in self.decodepats:
419 422 if mf(filename):
420 423 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
421 424 data = util.filter(data, cmd)
422 425 break
423 426
424 427 if fd:
425 428 return fd.write(data)
426 429 return self.wopener(filename, 'w').write(data)
427 430
428 431 def transaction(self):
429 432 tr = self.transhandle
430 433 if tr != None and tr.running():
431 434 return tr.nest()
432 435
433 436 # save dirstate for rollback
434 437 try:
435 438 ds = self.opener("dirstate").read()
436 439 except IOError:
437 440 ds = ""
438 441 self.opener("journal.dirstate", "w").write(ds)
439 442
440 443 tr = transaction.transaction(self.ui.warn, self.opener,
441 444 self.join("journal"),
442 445 aftertrans(self.path))
443 446 self.transhandle = tr
444 447 return tr
445 448
446 449 def recover(self):
447 450 l = self.lock()
448 451 if os.path.exists(self.join("journal")):
449 452 self.ui.status(_("rolling back interrupted transaction\n"))
450 453 transaction.rollback(self.opener, self.join("journal"))
451 454 self.reload()
452 455 return True
453 456 else:
454 457 self.ui.warn(_("no interrupted transaction available\n"))
455 458 return False
456 459
457 460 def rollback(self, wlock=None):
458 461 if not wlock:
459 462 wlock = self.wlock()
460 463 l = self.lock()
461 464 if os.path.exists(self.join("undo")):
462 465 self.ui.status(_("rolling back last transaction\n"))
463 466 transaction.rollback(self.opener, self.join("undo"))
464 467 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
465 468 self.reload()
466 469 self.wreload()
467 470 else:
468 471 self.ui.warn(_("no rollback information available\n"))
469 472
470 473 def wreload(self):
471 474 self.dirstate.read()
472 475
473 476 def reload(self):
474 477 self.changelog.load()
475 478 self.manifest.load()
476 479 self.tagscache = None
477 480 self.nodetagscache = None
478 481
479 482 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
480 483 desc=None):
481 484 try:
482 485 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
483 486 except lock.LockHeld, inst:
484 487 if not wait:
485 488 raise
486 489 self.ui.warn(_("waiting for lock on %s held by %s\n") %
487 490 (desc, inst.args[0]))
488 491 # default to 600 seconds timeout
489 492 l = lock.lock(self.join(lockname),
490 493 int(self.ui.config("ui", "timeout") or 600),
491 494 releasefn, desc=desc)
492 495 if acquirefn:
493 496 acquirefn()
494 497 return l
495 498
496 499 def lock(self, wait=1):
497 500 return self.do_lock("lock", wait, acquirefn=self.reload,
498 501 desc=_('repository %s') % self.origroot)
499 502
500 503 def wlock(self, wait=1):
501 504 return self.do_lock("wlock", wait, self.dirstate.write,
502 505 self.wreload,
503 506 desc=_('working directory of %s') % self.origroot)
504 507
505 508 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
506 509 """
507 510 commit an individual file as part of a larger transaction
508 511 """
509 512
510 513 t = self.wread(fn)
511 514 fl = self.file(fn)
512 515 fp1 = manifest1.get(fn, nullid)
513 516 fp2 = manifest2.get(fn, nullid)
514 517
515 518 meta = {}
516 519 cp = self.dirstate.copied(fn)
517 520 if cp:
518 521 meta["copy"] = cp
519 522 if not manifest2: # not a branch merge
520 523 meta["copyrev"] = hex(manifest1.get(cp, nullid))
521 524 fp2 = nullid
522 525 elif fp2 != nullid: # copied on remote side
523 526 meta["copyrev"] = hex(manifest1.get(cp, nullid))
524 527 else: # copied on local side, reversed
525 528 meta["copyrev"] = hex(manifest2.get(cp))
526 529 fp2 = nullid
527 530 self.ui.debug(_(" %s: copy %s:%s\n") %
528 531 (fn, cp, meta["copyrev"]))
529 532 fp1 = nullid
530 533 elif fp2 != nullid:
531 534 # is one parent an ancestor of the other?
532 535 fpa = fl.ancestor(fp1, fp2)
533 536 if fpa == fp1:
534 537 fp1, fp2 = fp2, nullid
535 538 elif fpa == fp2:
536 539 fp2 = nullid
537 540
538 541 # is the file unmodified from the parent? report existing entry
539 542 if fp2 == nullid and not fl.cmp(fp1, t):
540 543 return fp1
541 544
542 545 changelist.append(fn)
543 546 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
544 547
545 548 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
546 549 orig_parent = self.dirstate.parents()[0] or nullid
547 550 p1 = p1 or self.dirstate.parents()[0] or nullid
548 551 p2 = p2 or self.dirstate.parents()[1] or nullid
549 552 c1 = self.changelog.read(p1)
550 553 c2 = self.changelog.read(p2)
551 554 m1 = self.manifest.read(c1[0]).copy()
552 555 m2 = self.manifest.read(c2[0])
553 556 changed = []
554 557 removed = []
555 558
556 559 if orig_parent == p1:
557 560 update_dirstate = 1
558 561 else:
559 562 update_dirstate = 0
560 563
561 564 if not wlock:
562 565 wlock = self.wlock()
563 566 l = self.lock()
564 567 tr = self.transaction()
565 568 linkrev = self.changelog.count()
566 569 for f in files:
567 570 try:
568 571 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
569 572 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
570 573 except IOError:
571 574 try:
572 575 del m1[f]
573 576 if update_dirstate:
574 577 self.dirstate.forget([f])
575 578 removed.append(f)
576 579 except:
577 580 # deleted from p2?
578 581 pass
579 582
580 583 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
581 584 user = user or self.ui.username()
582 585 n = self.changelog.add(mnode, changed + removed, text,
583 586 tr, p1, p2, user, date)
584 587 tr.close()
585 588 if update_dirstate:
586 589 self.dirstate.setparents(n, nullid)
587 590
588 591 def commit(self, files=None, text="", user=None, date=None,
589 592 match=util.always, force=False, lock=None, wlock=None,
590 593 force_editor=False):
591 594 commit = []
592 595 remove = []
593 596 changed = []
594 597
595 598 if files:
596 599 for f in files:
597 600 s = self.dirstate.state(f)
598 601 if s in 'nmai':
599 602 commit.append(f)
600 603 elif s == 'r':
601 604 remove.append(f)
602 605 else:
603 606 self.ui.warn(_("%s not tracked!\n") % f)
604 607 else:
605 608 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
606 609 commit = modified + added
607 610 remove = removed
608 611
609 612 p1, p2 = self.dirstate.parents()
610 613 c1 = self.changelog.read(p1)
611 614 c2 = self.changelog.read(p2)
612 615 m1 = self.manifest.read(c1[0]).copy()
613 616 m2 = self.manifest.read(c2[0])
614 617
615 618 branchname = self.workingctx().branch()
616 619 oldname = c1[5].get("branch", "")
617 620
618 621 if not commit and not remove and not force and p2 == nullid and \
619 622 branchname == oldname:
620 623 self.ui.status(_("nothing changed\n"))
621 624 return None
622 625
623 626 xp1 = hex(p1)
624 627 if p2 == nullid: xp2 = ''
625 628 else: xp2 = hex(p2)
626 629
627 630 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
628 631
629 632 if not wlock:
630 633 wlock = self.wlock()
631 634 if not lock:
632 635 lock = self.lock()
633 636 tr = self.transaction()
634 637
635 638 # check in files
636 639 new = {}
637 640 linkrev = self.changelog.count()
638 641 commit.sort()
639 642 for f in commit:
640 643 self.ui.note(f + "\n")
641 644 try:
642 645 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
643 646 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
644 647 except IOError:
645 648 self.ui.warn(_("trouble committing %s!\n") % f)
646 649 raise
647 650
648 651 # update manifest
649 652 m1.update(new)
650 653 for f in remove:
651 654 if f in m1:
652 655 del m1[f]
653 656 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
654 657
655 658 # add changeset
656 659 new = new.keys()
657 660 new.sort()
658 661
659 662 user = user or self.ui.username()
660 663 if not text or force_editor:
661 664 edittext = []
662 665 if text:
663 666 edittext.append(text)
664 667 edittext.append("")
665 668 if p2 != nullid:
666 669 edittext.append("HG: branch merge")
667 670 edittext.extend(["HG: changed %s" % f for f in changed])
668 671 edittext.extend(["HG: removed %s" % f for f in remove])
669 672 if not changed and not remove:
670 673 edittext.append("HG: no files changed")
671 674 edittext.append("")
672 675 # run editor in the repository root
673 676 olddir = os.getcwd()
674 677 os.chdir(self.root)
675 678 text = self.ui.edit("\n".join(edittext), user)
676 679 os.chdir(olddir)
677 680
678 681 lines = [line.rstrip() for line in text.rstrip().splitlines()]
679 682 while lines and not lines[0]:
680 683 del lines[0]
681 684 if not lines:
682 685 return None
683 686 text = '\n'.join(lines)
684 687 extra = {}
685 688 if branchname:
686 689 extra["branch"] = branchname
687 690 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
688 691 user, date, extra)
689 692 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
690 693 parent2=xp2)
691 694 tr.close()
692 695
693 696 self.dirstate.setparents(n)
694 697 self.dirstate.update(new, "n")
695 698 self.dirstate.forget(remove)
696 699
697 700 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
698 701 return n
699 702
700 703 def walk(self, node=None, files=[], match=util.always, badmatch=None):
701 704 if node:
702 705 fdict = dict.fromkeys(files)
703 706 for fn in self.manifest.read(self.changelog.read(node)[0]):
704 707 for ffn in fdict:
705 708 # match if the file is the exact name or a directory
706 709 if ffn == fn or fn.startswith("%s/" % ffn):
707 710 del fdict[ffn]
708 711 break
709 712 if match(fn):
710 713 yield 'm', fn
711 714 for fn in fdict:
712 715 if badmatch and badmatch(fn):
713 716 if match(fn):
714 717 yield 'b', fn
715 718 else:
716 719 self.ui.warn(_('%s: No such file in rev %s\n') % (
717 720 util.pathto(self.getcwd(), fn), short(node)))
718 721 else:
719 722 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
720 723 yield src, fn
721 724
722 725 def status(self, node1=None, node2=None, files=[], match=util.always,
723 726 wlock=None, list_ignored=False, list_clean=False):
724 727 """return status of files between two nodes or node and working directory
725 728
726 729 If node1 is None, use the first dirstate parent instead.
727 730 If node2 is None, compare node1 with working directory.
728 731 """
729 732
730 733 def fcmp(fn, mf):
731 734 t1 = self.wread(fn)
732 735 return self.file(fn).cmp(mf.get(fn, nullid), t1)
733 736
734 737 def mfmatches(node):
735 738 change = self.changelog.read(node)
736 739 mf = self.manifest.read(change[0]).copy()
737 740 for fn in mf.keys():
738 741 if not match(fn):
739 742 del mf[fn]
740 743 return mf
741 744
742 745 modified, added, removed, deleted, unknown = [], [], [], [], []
743 746 ignored, clean = [], []
744 747
745 748 compareworking = False
746 749 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
747 750 compareworking = True
748 751
749 752 if not compareworking:
750 753 # read the manifest from node1 before the manifest from node2,
751 754 # so that we'll hit the manifest cache if we're going through
752 755 # all the revisions in parent->child order.
753 756 mf1 = mfmatches(node1)
754 757
755 758 # are we comparing the working directory?
756 759 if not node2:
757 760 if not wlock:
758 761 try:
759 762 wlock = self.wlock(wait=0)
760 763 except lock.LockException:
761 764 wlock = None
762 765 (lookup, modified, added, removed, deleted, unknown,
763 766 ignored, clean) = self.dirstate.status(files, match,
764 767 list_ignored, list_clean)
765 768
766 769 # are we comparing working dir against its parent?
767 770 if compareworking:
768 771 if lookup:
769 772 # do a full compare of any files that might have changed
770 773 mf2 = mfmatches(self.dirstate.parents()[0])
771 774 for f in lookup:
772 775 if fcmp(f, mf2):
773 776 modified.append(f)
774 777 else:
775 778 clean.append(f)
776 779 if wlock is not None:
777 780 self.dirstate.update([f], "n")
778 781 else:
779 782 # we are comparing working dir against non-parent
780 783 # generate a pseudo-manifest for the working dir
781 784 # XXX: create it in dirstate.py ?
782 785 mf2 = mfmatches(self.dirstate.parents()[0])
783 786 for f in lookup + modified + added:
784 787 mf2[f] = ""
785 788 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
786 789 for f in removed:
787 790 if f in mf2:
788 791 del mf2[f]
789 792 else:
790 793 # we are comparing two revisions
791 794 mf2 = mfmatches(node2)
792 795
793 796 if not compareworking:
794 797 # flush lists from dirstate before comparing manifests
795 798 modified, added, clean = [], [], []
796 799
797 800 # make sure to sort the files so we talk to the disk in a
798 801 # reasonable order
799 802 mf2keys = mf2.keys()
800 803 mf2keys.sort()
801 804 for fn in mf2keys:
802 805 if mf1.has_key(fn):
803 806 if mf1.flags(fn) != mf2.flags(fn) or \
804 807 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
805 808 modified.append(fn)
806 809 elif list_clean:
807 810 clean.append(fn)
808 811 del mf1[fn]
809 812 else:
810 813 added.append(fn)
811 814
812 815 removed = mf1.keys()
813 816
814 817 # sort and return results:
815 818 for l in modified, added, removed, deleted, unknown, ignored, clean:
816 819 l.sort()
817 820 return (modified, added, removed, deleted, unknown, ignored, clean)
818 821
819 822 def add(self, list, wlock=None):
820 823 if not wlock:
821 824 wlock = self.wlock()
822 825 for f in list:
823 826 p = self.wjoin(f)
824 827 if not os.path.exists(p):
825 828 self.ui.warn(_("%s does not exist!\n") % f)
826 829 elif not os.path.isfile(p):
827 830 self.ui.warn(_("%s not added: only files supported currently\n")
828 831 % f)
829 832 elif self.dirstate.state(f) in 'an':
830 833 self.ui.warn(_("%s already tracked!\n") % f)
831 834 else:
832 835 self.dirstate.update([f], "a")
833 836
834 837 def forget(self, list, wlock=None):
835 838 if not wlock:
836 839 wlock = self.wlock()
837 840 for f in list:
838 841 if self.dirstate.state(f) not in 'ai':
839 842 self.ui.warn(_("%s not added!\n") % f)
840 843 else:
841 844 self.dirstate.forget([f])
842 845
843 846 def remove(self, list, unlink=False, wlock=None):
844 847 if unlink:
845 848 for f in list:
846 849 try:
847 850 util.unlink(self.wjoin(f))
848 851 except OSError, inst:
849 852 if inst.errno != errno.ENOENT:
850 853 raise
851 854 if not wlock:
852 855 wlock = self.wlock()
853 856 for f in list:
854 857 p = self.wjoin(f)
855 858 if os.path.exists(p):
856 859 self.ui.warn(_("%s still exists!\n") % f)
857 860 elif self.dirstate.state(f) == 'a':
858 861 self.dirstate.forget([f])
859 862 elif f not in self.dirstate:
860 863 self.ui.warn(_("%s not tracked!\n") % f)
861 864 else:
862 865 self.dirstate.update([f], "r")
863 866
864 867 def undelete(self, list, wlock=None):
865 868 p = self.dirstate.parents()[0]
866 869 mn = self.changelog.read(p)[0]
867 870 m = self.manifest.read(mn)
868 871 if not wlock:
869 872 wlock = self.wlock()
870 873 for f in list:
871 874 if self.dirstate.state(f) not in "r":
872 875 self.ui.warn("%s not removed!\n" % f)
873 876 else:
874 877 t = self.file(f).read(m[f])
875 878 self.wwrite(f, t)
876 879 util.set_exec(self.wjoin(f), m.execf(f))
877 880 self.dirstate.update([f], "n")
878 881
879 882 def copy(self, source, dest, wlock=None):
880 883 p = self.wjoin(dest)
881 884 if not os.path.exists(p):
882 885 self.ui.warn(_("%s does not exist!\n") % dest)
883 886 elif not os.path.isfile(p):
884 887 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
885 888 else:
886 889 if not wlock:
887 890 wlock = self.wlock()
888 891 if self.dirstate.state(dest) == '?':
889 892 self.dirstate.update([dest], "a")
890 893 self.dirstate.copy(source, dest)
891 894
892 895 def heads(self, start=None):
893 896 heads = self.changelog.heads(start)
894 897 # sort the output in rev descending order
895 898 heads = [(-self.changelog.rev(h), h) for h in heads]
896 899 heads.sort()
897 900 return [n for (r, n) in heads]
898 901
899 902 # branchlookup returns a dict giving a list of branches for
900 903 # each head. A branch is defined as the tag of a node or
901 904 # the branch of the node's parents. If a node has multiple
902 905 # branch tags, tags are eliminated if they are visible from other
903 906 # branch tags.
904 907 #
905 908 # So, for this graph: a->b->c->d->e
906 909 # \ /
907 910 # aa -----/
908 911 # a has tag 2.6.12
909 912 # d has tag 2.6.13
910 913 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
911 914 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
912 915 # from the list.
913 916 #
914 917 # It is possible that more than one head will have the same branch tag.
915 918 # callers need to check the result for multiple heads under the same
916 919 # branch tag if that is a problem for them (ie checkout of a specific
917 920 # branch).
918 921 #
919 922 # passing in a specific branch will limit the depth of the search
920 923 # through the parents. It won't limit the branches returned in the
921 924 # result though.
922 925 def branchlookup(self, heads=None, branch=None):
923 926 if not heads:
924 927 heads = self.heads()
925 928 headt = [ h for h in heads ]
926 929 chlog = self.changelog
927 930 branches = {}
928 931 merges = []
929 932 seenmerge = {}
930 933
931 934 # traverse the tree once for each head, recording in the branches
932 935 # dict which tags are visible from this head. The branches
933 936 # dict also records which tags are visible from each tag
934 937 # while we traverse.
935 938 while headt or merges:
936 939 if merges:
937 940 n, found = merges.pop()
938 941 visit = [n]
939 942 else:
940 943 h = headt.pop()
941 944 visit = [h]
942 945 found = [h]
943 946 seen = {}
944 947 while visit:
945 948 n = visit.pop()
946 949 if n in seen:
947 950 continue
948 951 pp = chlog.parents(n)
949 952 tags = self.nodetags(n)
950 953 if tags:
951 954 for x in tags:
952 955 if x == 'tip':
953 956 continue
954 957 for f in found:
955 958 branches.setdefault(f, {})[n] = 1
956 959 branches.setdefault(n, {})[n] = 1
957 960 break
958 961 if n not in found:
959 962 found.append(n)
960 963 if branch in tags:
961 964 continue
962 965 seen[n] = 1
963 966 if pp[1] != nullid and n not in seenmerge:
964 967 merges.append((pp[1], [x for x in found]))
965 968 seenmerge[n] = 1
966 969 if pp[0] != nullid:
967 970 visit.append(pp[0])
968 971 # traverse the branches dict, eliminating branch tags from each
969 972 # head that are visible from another branch tag for that head.
970 973 out = {}
971 974 viscache = {}
972 975 for h in heads:
973 976 def visible(node):
974 977 if node in viscache:
975 978 return viscache[node]
976 979 ret = {}
977 980 visit = [node]
978 981 while visit:
979 982 x = visit.pop()
980 983 if x in viscache:
981 984 ret.update(viscache[x])
982 985 elif x not in ret:
983 986 ret[x] = 1
984 987 if x in branches:
985 988 visit[len(visit):] = branches[x].keys()
986 989 viscache[node] = ret
987 990 return ret
988 991 if h not in branches:
989 992 continue
990 993 # O(n^2), but somewhat limited. This only searches the
991 994 # tags visible from a specific head, not all the tags in the
992 995 # whole repo.
993 996 for b in branches[h]:
994 997 vis = False
995 998 for bb in branches[h].keys():
996 999 if b != bb:
997 1000 if b in visible(bb):
998 1001 vis = True
999 1002 break
1000 1003 if not vis:
1001 1004 l = out.setdefault(h, [])
1002 1005 l[len(l):] = self.nodetags(b)
1003 1006 return out
1004 1007
1005 1008 def branches(self, nodes):
1006 1009 if not nodes:
1007 1010 nodes = [self.changelog.tip()]
1008 1011 b = []
1009 1012 for n in nodes:
1010 1013 t = n
1011 1014 while 1:
1012 1015 p = self.changelog.parents(n)
1013 1016 if p[1] != nullid or p[0] == nullid:
1014 1017 b.append((t, n, p[0], p[1]))
1015 1018 break
1016 1019 n = p[0]
1017 1020 return b
1018 1021
1019 1022 def between(self, pairs):
1020 1023 r = []
1021 1024
1022 1025 for top, bottom in pairs:
1023 1026 n, l, i = top, [], 0
1024 1027 f = 1
1025 1028
1026 1029 while n != bottom:
1027 1030 p = self.changelog.parents(n)[0]
1028 1031 if i == f:
1029 1032 l.append(n)
1030 1033 f = f * 2
1031 1034 n = p
1032 1035 i += 1
1033 1036
1034 1037 r.append(l)
1035 1038
1036 1039 return r
1037 1040
1038 1041 def findincoming(self, remote, base=None, heads=None, force=False):
1039 1042 """Return list of roots of the subsets of missing nodes from remote
1040 1043
1041 1044 If base dict is specified, assume that these nodes and their parents
1042 1045 exist on the remote side and that no child of a node of base exists
1043 1046 in both remote and self.
1044 1047 Furthermore base will be updated to include the nodes that exists
1045 1048 in self and remote but no children exists in self and remote.
1046 1049 If a list of heads is specified, return only nodes which are heads
1047 1050 or ancestors of these heads.
1048 1051
1049 1052 All the ancestors of base are in self and in remote.
1050 1053 All the descendants of the list returned are missing in self.
1051 1054 (and so we know that the rest of the nodes are missing in remote, see
1052 1055 outgoing)
1053 1056 """
1054 1057 m = self.changelog.nodemap
1055 1058 search = []
1056 1059 fetch = {}
1057 1060 seen = {}
1058 1061 seenbranch = {}
1059 1062 if base == None:
1060 1063 base = {}
1061 1064
1062 1065 if not heads:
1063 1066 heads = remote.heads()
1064 1067
1065 1068 if self.changelog.tip() == nullid:
1066 1069 base[nullid] = 1
1067 1070 if heads != [nullid]:
1068 1071 return [nullid]
1069 1072 return []
1070 1073
1071 1074 # assume we're closer to the tip than the root
1072 1075 # and start by examining the heads
1073 1076 self.ui.status(_("searching for changes\n"))
1074 1077
1075 1078 unknown = []
1076 1079 for h in heads:
1077 1080 if h not in m:
1078 1081 unknown.append(h)
1079 1082 else:
1080 1083 base[h] = 1
1081 1084
1082 1085 if not unknown:
1083 1086 return []
1084 1087
1085 1088 req = dict.fromkeys(unknown)
1086 1089 reqcnt = 0
1087 1090
1088 1091 # search through remote branches
1089 1092 # a 'branch' here is a linear segment of history, with four parts:
1090 1093 # head, root, first parent, second parent
1091 1094 # (a branch always has two parents (or none) by definition)
1092 1095 unknown = remote.branches(unknown)
1093 1096 while unknown:
1094 1097 r = []
1095 1098 while unknown:
1096 1099 n = unknown.pop(0)
1097 1100 if n[0] in seen:
1098 1101 continue
1099 1102
1100 1103 self.ui.debug(_("examining %s:%s\n")
1101 1104 % (short(n[0]), short(n[1])))
1102 1105 if n[0] == nullid: # found the end of the branch
1103 1106 pass
1104 1107 elif n in seenbranch:
1105 1108 self.ui.debug(_("branch already found\n"))
1106 1109 continue
1107 1110 elif n[1] and n[1] in m: # do we know the base?
1108 1111 self.ui.debug(_("found incomplete branch %s:%s\n")
1109 1112 % (short(n[0]), short(n[1])))
1110 1113 search.append(n) # schedule branch range for scanning
1111 1114 seenbranch[n] = 1
1112 1115 else:
1113 1116 if n[1] not in seen and n[1] not in fetch:
1114 1117 if n[2] in m and n[3] in m:
1115 1118 self.ui.debug(_("found new changeset %s\n") %
1116 1119 short(n[1]))
1117 1120 fetch[n[1]] = 1 # earliest unknown
1118 1121 for p in n[2:4]:
1119 1122 if p in m:
1120 1123 base[p] = 1 # latest known
1121 1124
1122 1125 for p in n[2:4]:
1123 1126 if p not in req and p not in m:
1124 1127 r.append(p)
1125 1128 req[p] = 1
1126 1129 seen[n[0]] = 1
1127 1130
1128 1131 if r:
1129 1132 reqcnt += 1
1130 1133 self.ui.debug(_("request %d: %s\n") %
1131 1134 (reqcnt, " ".join(map(short, r))))
1132 1135 for p in range(0, len(r), 10):
1133 1136 for b in remote.branches(r[p:p+10]):
1134 1137 self.ui.debug(_("received %s:%s\n") %
1135 1138 (short(b[0]), short(b[1])))
1136 1139 unknown.append(b)
1137 1140
1138 1141 # do binary search on the branches we found
1139 1142 while search:
1140 1143 n = search.pop(0)
1141 1144 reqcnt += 1
1142 1145 l = remote.between([(n[0], n[1])])[0]
1143 1146 l.append(n[1])
1144 1147 p = n[0]
1145 1148 f = 1
1146 1149 for i in l:
1147 1150 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1148 1151 if i in m:
1149 1152 if f <= 2:
1150 1153 self.ui.debug(_("found new branch changeset %s\n") %
1151 1154 short(p))
1152 1155 fetch[p] = 1
1153 1156 base[i] = 1
1154 1157 else:
1155 1158 self.ui.debug(_("narrowed branch search to %s:%s\n")
1156 1159 % (short(p), short(i)))
1157 1160 search.append((p, i))
1158 1161 break
1159 1162 p, f = i, f * 2
1160 1163
1161 1164 # sanity check our fetch list
1162 1165 for f in fetch.keys():
1163 1166 if f in m:
1164 1167 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1165 1168
1166 1169 if base.keys() == [nullid]:
1167 1170 if force:
1168 1171 self.ui.warn(_("warning: repository is unrelated\n"))
1169 1172 else:
1170 1173 raise util.Abort(_("repository is unrelated"))
1171 1174
1172 1175 self.ui.debug(_("found new changesets starting at ") +
1173 1176 " ".join([short(f) for f in fetch]) + "\n")
1174 1177
1175 1178 self.ui.debug(_("%d total queries\n") % reqcnt)
1176 1179
1177 1180 return fetch.keys()
1178 1181
1179 1182 def findoutgoing(self, remote, base=None, heads=None, force=False):
1180 1183 """Return list of nodes that are roots of subsets not in remote
1181 1184
1182 1185 If base dict is specified, assume that these nodes and their parents
1183 1186 exist on the remote side.
1184 1187 If a list of heads is specified, return only nodes which are heads
1185 1188 or ancestors of these heads, and return a second element which
1186 1189 contains all remote heads which get new children.
1187 1190 """
1188 1191 if base == None:
1189 1192 base = {}
1190 1193 self.findincoming(remote, base, heads, force=force)
1191 1194
1192 1195 self.ui.debug(_("common changesets up to ")
1193 1196 + " ".join(map(short, base.keys())) + "\n")
1194 1197
1195 1198 remain = dict.fromkeys(self.changelog.nodemap)
1196 1199
1197 1200 # prune everything remote has from the tree
1198 1201 del remain[nullid]
1199 1202 remove = base.keys()
1200 1203 while remove:
1201 1204 n = remove.pop(0)
1202 1205 if n in remain:
1203 1206 del remain[n]
1204 1207 for p in self.changelog.parents(n):
1205 1208 remove.append(p)
1206 1209
1207 1210 # find every node whose parents have been pruned
1208 1211 subset = []
1209 1212 # find every remote head that will get new children
1210 1213 updated_heads = {}
1211 1214 for n in remain:
1212 1215 p1, p2 = self.changelog.parents(n)
1213 1216 if p1 not in remain and p2 not in remain:
1214 1217 subset.append(n)
1215 1218 if heads:
1216 1219 if p1 in heads:
1217 1220 updated_heads[p1] = True
1218 1221 if p2 in heads:
1219 1222 updated_heads[p2] = True
1220 1223
1221 1224 # this is the set of all roots we have to push
1222 1225 if heads:
1223 1226 return subset, updated_heads.keys()
1224 1227 else:
1225 1228 return subset
1226 1229
1227 1230 def pull(self, remote, heads=None, force=False, lock=None):
1228 1231 mylock = False
1229 1232 if not lock:
1230 1233 lock = self.lock()
1231 1234 mylock = True
1232 1235
1233 1236 try:
1234 1237 fetch = self.findincoming(remote, force=force)
1235 1238 if fetch == [nullid]:
1236 1239 self.ui.status(_("requesting all changes\n"))
1237 1240
1238 1241 if not fetch:
1239 1242 self.ui.status(_("no changes found\n"))
1240 1243 return 0
1241 1244
1242 1245 if heads is None:
1243 1246 cg = remote.changegroup(fetch, 'pull')
1244 1247 else:
1245 1248 if 'changegroupsubset' not in remote.capabilities:
1246 1249 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1247 1250 cg = remote.changegroupsubset(fetch, heads, 'pull')
1248 1251 return self.addchangegroup(cg, 'pull', remote.url())
1249 1252 finally:
1250 1253 if mylock:
1251 1254 lock.release()
1252 1255
1253 1256 def push(self, remote, force=False, revs=None):
1254 1257 # there are two ways to push to remote repo:
1255 1258 #
1256 1259 # addchangegroup assumes local user can lock remote
1257 1260 # repo (local filesystem, old ssh servers).
1258 1261 #
1259 1262 # unbundle assumes local user cannot lock remote repo (new ssh
1260 1263 # servers, http servers).
1261 1264
1262 1265 if remote.capable('unbundle'):
1263 1266 return self.push_unbundle(remote, force, revs)
1264 1267 return self.push_addchangegroup(remote, force, revs)
1265 1268
1266 1269 def prepush(self, remote, force, revs):
1267 1270 base = {}
1268 1271 remote_heads = remote.heads()
1269 1272 inc = self.findincoming(remote, base, remote_heads, force=force)
1270 1273 if not force and inc:
1271 1274 self.ui.warn(_("abort: unsynced remote changes!\n"))
1272 1275 self.ui.status(_("(did you forget to sync?"
1273 1276 " use push -f to force)\n"))
1274 1277 return None, 1
1275 1278
1276 1279 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1277 1280 if revs is not None:
1278 1281 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1279 1282 else:
1280 1283 bases, heads = update, self.changelog.heads()
1281 1284
1282 1285 if not bases:
1283 1286 self.ui.status(_("no changes found\n"))
1284 1287 return None, 1
1285 1288 elif not force:
1286 1289 # FIXME we don't properly detect creation of new heads
1287 1290 # in the push -r case, assume the user knows what he's doing
1288 1291 if not revs and len(remote_heads) < len(heads) \
1289 1292 and remote_heads != [nullid]:
1290 1293 self.ui.warn(_("abort: push creates new remote branches!\n"))
1291 1294 self.ui.status(_("(did you forget to merge?"
1292 1295 " use push -f to force)\n"))
1293 1296 return None, 1
1294 1297
1295 1298 if revs is None:
1296 1299 cg = self.changegroup(update, 'push')
1297 1300 else:
1298 1301 cg = self.changegroupsubset(update, revs, 'push')
1299 1302 return cg, remote_heads
1300 1303
1301 1304 def push_addchangegroup(self, remote, force, revs):
1302 1305 lock = remote.lock()
1303 1306
1304 1307 ret = self.prepush(remote, force, revs)
1305 1308 if ret[0] is not None:
1306 1309 cg, remote_heads = ret
1307 1310 return remote.addchangegroup(cg, 'push', self.url())
1308 1311 return ret[1]
1309 1312
1310 1313 def push_unbundle(self, remote, force, revs):
1311 1314 # local repo finds heads on server, finds out what revs it
1312 1315 # must push. once revs transferred, if server finds it has
1313 1316 # different heads (someone else won commit/push race), server
1314 1317 # aborts.
1315 1318
1316 1319 ret = self.prepush(remote, force, revs)
1317 1320 if ret[0] is not None:
1318 1321 cg, remote_heads = ret
1319 1322 if force: remote_heads = ['force']
1320 1323 return remote.unbundle(cg, remote_heads, 'push')
1321 1324 return ret[1]
1322 1325
1323 1326 def changegroupsubset(self, bases, heads, source):
1324 1327 """This function generates a changegroup consisting of all the nodes
1325 1328 that are descendents of any of the bases, and ancestors of any of
1326 1329 the heads.
1327 1330
1328 1331 It is fairly complex as determining which filenodes and which
1329 1332 manifest nodes need to be included for the changeset to be complete
1330 1333 is non-trivial.
1331 1334
1332 1335 Another wrinkle is doing the reverse, figuring out which changeset in
1333 1336 the changegroup a particular filenode or manifestnode belongs to."""
1334 1337
1335 1338 self.hook('preoutgoing', throw=True, source=source)
1336 1339
1337 1340 # Set up some initial variables
1338 1341 # Make it easy to refer to self.changelog
1339 1342 cl = self.changelog
1340 1343 # msng is short for missing - compute the list of changesets in this
1341 1344 # changegroup.
1342 1345 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1343 1346 # Some bases may turn out to be superfluous, and some heads may be
1344 1347 # too. nodesbetween will return the minimal set of bases and heads
1345 1348 # necessary to re-create the changegroup.
1346 1349
1347 1350 # Known heads are the list of heads that it is assumed the recipient
1348 1351 # of this changegroup will know about.
1349 1352 knownheads = {}
1350 1353 # We assume that all parents of bases are known heads.
1351 1354 for n in bases:
1352 1355 for p in cl.parents(n):
1353 1356 if p != nullid:
1354 1357 knownheads[p] = 1
1355 1358 knownheads = knownheads.keys()
1356 1359 if knownheads:
1357 1360 # Now that we know what heads are known, we can compute which
1358 1361 # changesets are known. The recipient must know about all
1359 1362 # changesets required to reach the known heads from the null
1360 1363 # changeset.
1361 1364 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1362 1365 junk = None
1363 1366 # Transform the list into an ersatz set.
1364 1367 has_cl_set = dict.fromkeys(has_cl_set)
1365 1368 else:
1366 1369 # If there were no known heads, the recipient cannot be assumed to
1367 1370 # know about any changesets.
1368 1371 has_cl_set = {}
1369 1372
1370 1373 # Make it easy to refer to self.manifest
1371 1374 mnfst = self.manifest
1372 1375 # We don't know which manifests are missing yet
1373 1376 msng_mnfst_set = {}
1374 1377 # Nor do we know which filenodes are missing.
1375 1378 msng_filenode_set = {}
1376 1379
1377 1380 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1378 1381 junk = None
1379 1382
1380 1383 # A changeset always belongs to itself, so the changenode lookup
1381 1384 # function for a changenode is identity.
1382 1385 def identity(x):
1383 1386 return x
1384 1387
1385 1388 # A function generating function. Sets up an environment for the
1386 1389 # inner function.
1387 1390 def cmp_by_rev_func(revlog):
1388 1391 # Compare two nodes by their revision number in the environment's
1389 1392 # revision history. Since the revision number both represents the
1390 1393 # most efficient order to read the nodes in, and represents a
1391 1394 # topological sorting of the nodes, this function is often useful.
1392 1395 def cmp_by_rev(a, b):
1393 1396 return cmp(revlog.rev(a), revlog.rev(b))
1394 1397 return cmp_by_rev
1395 1398
1396 1399 # If we determine that a particular file or manifest node must be a
1397 1400 # node that the recipient of the changegroup will already have, we can
1398 1401 # also assume the recipient will have all the parents. This function
1399 1402 # prunes them from the set of missing nodes.
1400 1403 def prune_parents(revlog, hasset, msngset):
1401 1404 haslst = hasset.keys()
1402 1405 haslst.sort(cmp_by_rev_func(revlog))
1403 1406 for node in haslst:
1404 1407 parentlst = [p for p in revlog.parents(node) if p != nullid]
1405 1408 while parentlst:
1406 1409 n = parentlst.pop()
1407 1410 if n not in hasset:
1408 1411 hasset[n] = 1
1409 1412 p = [p for p in revlog.parents(n) if p != nullid]
1410 1413 parentlst.extend(p)
1411 1414 for n in hasset:
1412 1415 msngset.pop(n, None)
1413 1416
1414 1417 # This is a function generating function used to set up an environment
1415 1418 # for the inner function to execute in.
1416 1419 def manifest_and_file_collector(changedfileset):
1417 1420 # This is an information gathering function that gathers
1418 1421 # information from each changeset node that goes out as part of
1419 1422 # the changegroup. The information gathered is a list of which
1420 1423 # manifest nodes are potentially required (the recipient may
1421 1424 # already have them) and total list of all files which were
1422 1425 # changed in any changeset in the changegroup.
1423 1426 #
1424 1427 # We also remember the first changenode we saw any manifest
1425 1428 # referenced by so we can later determine which changenode 'owns'
1426 1429 # the manifest.
1427 1430 def collect_manifests_and_files(clnode):
1428 1431 c = cl.read(clnode)
1429 1432 for f in c[3]:
1430 1433 # This is to make sure we only have one instance of each
1431 1434 # filename string for each filename.
1432 1435 changedfileset.setdefault(f, f)
1433 1436 msng_mnfst_set.setdefault(c[0], clnode)
1434 1437 return collect_manifests_and_files
1435 1438
1436 1439 # Figure out which manifest nodes (of the ones we think might be part
1437 1440 # of the changegroup) the recipient must know about and remove them
1438 1441 # from the changegroup.
1439 1442 def prune_manifests():
1440 1443 has_mnfst_set = {}
1441 1444 for n in msng_mnfst_set:
1442 1445 # If a 'missing' manifest thinks it belongs to a changenode
1443 1446 # the recipient is assumed to have, obviously the recipient
1444 1447 # must have that manifest.
1445 1448 linknode = cl.node(mnfst.linkrev(n))
1446 1449 if linknode in has_cl_set:
1447 1450 has_mnfst_set[n] = 1
1448 1451 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1449 1452
1450 1453 # Use the information collected in collect_manifests_and_files to say
1451 1454 # which changenode any manifestnode belongs to.
1452 1455 def lookup_manifest_link(mnfstnode):
1453 1456 return msng_mnfst_set[mnfstnode]
1454 1457
1455 1458 # A function generating function that sets up the initial environment
1456 1459 # the inner function.
1457 1460 def filenode_collector(changedfiles):
1458 1461 next_rev = [0]
1459 1462 # This gathers information from each manifestnode included in the
1460 1463 # changegroup about which filenodes the manifest node references
1461 1464 # so we can include those in the changegroup too.
1462 1465 #
1463 1466 # It also remembers which changenode each filenode belongs to. It
1464 1467 # does this by assuming the a filenode belongs to the changenode
1465 1468 # the first manifest that references it belongs to.
1466 1469 def collect_msng_filenodes(mnfstnode):
1467 1470 r = mnfst.rev(mnfstnode)
1468 1471 if r == next_rev[0]:
1469 1472 # If the last rev we looked at was the one just previous,
1470 1473 # we only need to see a diff.
1471 1474 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1472 1475 # For each line in the delta
1473 1476 for dline in delta.splitlines():
1474 1477 # get the filename and filenode for that line
1475 1478 f, fnode = dline.split('\0')
1476 1479 fnode = bin(fnode[:40])
1477 1480 f = changedfiles.get(f, None)
1478 1481 # And if the file is in the list of files we care
1479 1482 # about.
1480 1483 if f is not None:
1481 1484 # Get the changenode this manifest belongs to
1482 1485 clnode = msng_mnfst_set[mnfstnode]
1483 1486 # Create the set of filenodes for the file if
1484 1487 # there isn't one already.
1485 1488 ndset = msng_filenode_set.setdefault(f, {})
1486 1489 # And set the filenode's changelog node to the
1487 1490 # manifest's if it hasn't been set already.
1488 1491 ndset.setdefault(fnode, clnode)
1489 1492 else:
1490 1493 # Otherwise we need a full manifest.
1491 1494 m = mnfst.read(mnfstnode)
1492 1495 # For every file in we care about.
1493 1496 for f in changedfiles:
1494 1497 fnode = m.get(f, None)
1495 1498 # If it's in the manifest
1496 1499 if fnode is not None:
1497 1500 # See comments above.
1498 1501 clnode = msng_mnfst_set[mnfstnode]
1499 1502 ndset = msng_filenode_set.setdefault(f, {})
1500 1503 ndset.setdefault(fnode, clnode)
1501 1504 # Remember the revision we hope to see next.
1502 1505 next_rev[0] = r + 1
1503 1506 return collect_msng_filenodes
1504 1507
1505 1508 # We have a list of filenodes we think we need for a file, lets remove
1506 1509 # all those we now the recipient must have.
1507 1510 def prune_filenodes(f, filerevlog):
1508 1511 msngset = msng_filenode_set[f]
1509 1512 hasset = {}
1510 1513 # If a 'missing' filenode thinks it belongs to a changenode we
1511 1514 # assume the recipient must have, then the recipient must have
1512 1515 # that filenode.
1513 1516 for n in msngset:
1514 1517 clnode = cl.node(filerevlog.linkrev(n))
1515 1518 if clnode in has_cl_set:
1516 1519 hasset[n] = 1
1517 1520 prune_parents(filerevlog, hasset, msngset)
1518 1521
1519 1522 # A function generator function that sets up the a context for the
1520 1523 # inner function.
1521 1524 def lookup_filenode_link_func(fname):
1522 1525 msngset = msng_filenode_set[fname]
1523 1526 # Lookup the changenode the filenode belongs to.
1524 1527 def lookup_filenode_link(fnode):
1525 1528 return msngset[fnode]
1526 1529 return lookup_filenode_link
1527 1530
1528 1531 # Now that we have all theses utility functions to help out and
1529 1532 # logically divide up the task, generate the group.
1530 1533 def gengroup():
1531 1534 # The set of changed files starts empty.
1532 1535 changedfiles = {}
1533 1536 # Create a changenode group generator that will call our functions
1534 1537 # back to lookup the owning changenode and collect information.
1535 1538 group = cl.group(msng_cl_lst, identity,
1536 1539 manifest_and_file_collector(changedfiles))
1537 1540 for chnk in group:
1538 1541 yield chnk
1539 1542
1540 1543 # The list of manifests has been collected by the generator
1541 1544 # calling our functions back.
1542 1545 prune_manifests()
1543 1546 msng_mnfst_lst = msng_mnfst_set.keys()
1544 1547 # Sort the manifestnodes by revision number.
1545 1548 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1546 1549 # Create a generator for the manifestnodes that calls our lookup
1547 1550 # and data collection functions back.
1548 1551 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1549 1552 filenode_collector(changedfiles))
1550 1553 for chnk in group:
1551 1554 yield chnk
1552 1555
1553 1556 # These are no longer needed, dereference and toss the memory for
1554 1557 # them.
1555 1558 msng_mnfst_lst = None
1556 1559 msng_mnfst_set.clear()
1557 1560
1558 1561 changedfiles = changedfiles.keys()
1559 1562 changedfiles.sort()
1560 1563 # Go through all our files in order sorted by name.
1561 1564 for fname in changedfiles:
1562 1565 filerevlog = self.file(fname)
1563 1566 # Toss out the filenodes that the recipient isn't really
1564 1567 # missing.
1565 1568 if msng_filenode_set.has_key(fname):
1566 1569 prune_filenodes(fname, filerevlog)
1567 1570 msng_filenode_lst = msng_filenode_set[fname].keys()
1568 1571 else:
1569 1572 msng_filenode_lst = []
1570 1573 # If any filenodes are left, generate the group for them,
1571 1574 # otherwise don't bother.
1572 1575 if len(msng_filenode_lst) > 0:
1573 1576 yield changegroup.genchunk(fname)
1574 1577 # Sort the filenodes by their revision #
1575 1578 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1576 1579 # Create a group generator and only pass in a changenode
1577 1580 # lookup function as we need to collect no information
1578 1581 # from filenodes.
1579 1582 group = filerevlog.group(msng_filenode_lst,
1580 1583 lookup_filenode_link_func(fname))
1581 1584 for chnk in group:
1582 1585 yield chnk
1583 1586 if msng_filenode_set.has_key(fname):
1584 1587 # Don't need this anymore, toss it to free memory.
1585 1588 del msng_filenode_set[fname]
1586 1589 # Signal that no more groups are left.
1587 1590 yield changegroup.closechunk()
1588 1591
1589 1592 if msng_cl_lst:
1590 1593 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1591 1594
1592 1595 return util.chunkbuffer(gengroup())
1593 1596
1594 1597 def changegroup(self, basenodes, source):
1595 1598 """Generate a changegroup of all nodes that we have that a recipient
1596 1599 doesn't.
1597 1600
1598 1601 This is much easier than the previous function as we can assume that
1599 1602 the recipient has any changenode we aren't sending them."""
1600 1603
1601 1604 self.hook('preoutgoing', throw=True, source=source)
1602 1605
1603 1606 cl = self.changelog
1604 1607 nodes = cl.nodesbetween(basenodes, None)[0]
1605 1608 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1606 1609
1607 1610 def identity(x):
1608 1611 return x
1609 1612
1610 1613 def gennodelst(revlog):
1611 1614 for r in xrange(0, revlog.count()):
1612 1615 n = revlog.node(r)
1613 1616 if revlog.linkrev(n) in revset:
1614 1617 yield n
1615 1618
1616 1619 def changed_file_collector(changedfileset):
1617 1620 def collect_changed_files(clnode):
1618 1621 c = cl.read(clnode)
1619 1622 for fname in c[3]:
1620 1623 changedfileset[fname] = 1
1621 1624 return collect_changed_files
1622 1625
1623 1626 def lookuprevlink_func(revlog):
1624 1627 def lookuprevlink(n):
1625 1628 return cl.node(revlog.linkrev(n))
1626 1629 return lookuprevlink
1627 1630
1628 1631 def gengroup():
1629 1632 # construct a list of all changed files
1630 1633 changedfiles = {}
1631 1634
1632 1635 for chnk in cl.group(nodes, identity,
1633 1636 changed_file_collector(changedfiles)):
1634 1637 yield chnk
1635 1638 changedfiles = changedfiles.keys()
1636 1639 changedfiles.sort()
1637 1640
1638 1641 mnfst = self.manifest
1639 1642 nodeiter = gennodelst(mnfst)
1640 1643 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1641 1644 yield chnk
1642 1645
1643 1646 for fname in changedfiles:
1644 1647 filerevlog = self.file(fname)
1645 1648 nodeiter = gennodelst(filerevlog)
1646 1649 nodeiter = list(nodeiter)
1647 1650 if nodeiter:
1648 1651 yield changegroup.genchunk(fname)
1649 1652 lookup = lookuprevlink_func(filerevlog)
1650 1653 for chnk in filerevlog.group(nodeiter, lookup):
1651 1654 yield chnk
1652 1655
1653 1656 yield changegroup.closechunk()
1654 1657
1655 1658 if nodes:
1656 1659 self.hook('outgoing', node=hex(nodes[0]), source=source)
1657 1660
1658 1661 return util.chunkbuffer(gengroup())
1659 1662
1660 1663 def addchangegroup(self, source, srctype, url):
1661 1664 """add changegroup to repo.
1662 1665 returns number of heads modified or added + 1."""
1663 1666
1664 1667 def csmap(x):
1665 1668 self.ui.debug(_("add changeset %s\n") % short(x))
1666 1669 return cl.count()
1667 1670
1668 1671 def revmap(x):
1669 1672 return cl.rev(x)
1670 1673
1671 1674 if not source:
1672 1675 return 0
1673 1676
1674 1677 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1675 1678
1676 1679 changesets = files = revisions = 0
1677 1680
1678 1681 tr = self.transaction()
1679 1682
1680 1683 # write changelog data to temp files so concurrent readers will not see
1681 1684 # inconsistent view
1682 1685 cl = None
1683 1686 try:
1684 1687 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1685 1688
1686 1689 oldheads = len(cl.heads())
1687 1690
1688 1691 # pull off the changeset group
1689 1692 self.ui.status(_("adding changesets\n"))
1690 1693 cor = cl.count() - 1
1691 1694 chunkiter = changegroup.chunkiter(source)
1692 1695 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1693 1696 raise util.Abort(_("received changelog group is empty"))
1694 1697 cnr = cl.count() - 1
1695 1698 changesets = cnr - cor
1696 1699
1697 1700 # pull off the manifest group
1698 1701 self.ui.status(_("adding manifests\n"))
1699 1702 chunkiter = changegroup.chunkiter(source)
1700 1703 # no need to check for empty manifest group here:
1701 1704 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1702 1705 # no new manifest will be created and the manifest group will
1703 1706 # be empty during the pull
1704 1707 self.manifest.addgroup(chunkiter, revmap, tr)
1705 1708
1706 1709 # process the files
1707 1710 self.ui.status(_("adding file changes\n"))
1708 1711 while 1:
1709 1712 f = changegroup.getchunk(source)
1710 1713 if not f:
1711 1714 break
1712 1715 self.ui.debug(_("adding %s revisions\n") % f)
1713 1716 fl = self.file(f)
1714 1717 o = fl.count()
1715 1718 chunkiter = changegroup.chunkiter(source)
1716 1719 if fl.addgroup(chunkiter, revmap, tr) is None:
1717 1720 raise util.Abort(_("received file revlog group is empty"))
1718 1721 revisions += fl.count() - o
1719 1722 files += 1
1720 1723
1721 1724 cl.writedata()
1722 1725 finally:
1723 1726 if cl:
1724 1727 cl.cleanup()
1725 1728
1726 1729 # make changelog see real files again
1727 1730 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1728 1731 self.changelog.checkinlinesize(tr)
1729 1732
1730 1733 newheads = len(self.changelog.heads())
1731 1734 heads = ""
1732 1735 if oldheads and newheads != oldheads:
1733 1736 heads = _(" (%+d heads)") % (newheads - oldheads)
1734 1737
1735 1738 self.ui.status(_("added %d changesets"
1736 1739 " with %d changes to %d files%s\n")
1737 1740 % (changesets, revisions, files, heads))
1738 1741
1739 1742 if changesets > 0:
1740 1743 self.hook('pretxnchangegroup', throw=True,
1741 1744 node=hex(self.changelog.node(cor+1)), source=srctype,
1742 1745 url=url)
1743 1746
1744 1747 tr.close()
1745 1748
1746 1749 if changesets > 0:
1747 1750 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1748 1751 source=srctype, url=url)
1749 1752
1750 1753 for i in range(cor + 1, cnr + 1):
1751 1754 self.hook("incoming", node=hex(self.changelog.node(i)),
1752 1755 source=srctype, url=url)
1753 1756
1754 1757 return newheads - oldheads + 1
1755 1758
1756 1759
1757 1760 def stream_in(self, remote):
1758 1761 fp = remote.stream_out()
1759 1762 resp = int(fp.readline())
1760 1763 if resp != 0:
1761 1764 raise util.Abort(_('operation forbidden by server'))
1762 1765 self.ui.status(_('streaming all changes\n'))
1763 1766 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1764 1767 self.ui.status(_('%d files to transfer, %s of data\n') %
1765 1768 (total_files, util.bytecount(total_bytes)))
1766 1769 start = time.time()
1767 1770 for i in xrange(total_files):
1768 1771 name, size = fp.readline().split('\0', 1)
1769 1772 size = int(size)
1770 1773 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1771 1774 ofp = self.opener(name, 'w')
1772 1775 for chunk in util.filechunkiter(fp, limit=size):
1773 1776 ofp.write(chunk)
1774 1777 ofp.close()
1775 1778 elapsed = time.time() - start
1776 1779 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1777 1780 (util.bytecount(total_bytes), elapsed,
1778 1781 util.bytecount(total_bytes / elapsed)))
1779 1782 self.reload()
1780 1783 return len(self.heads()) + 1
1781 1784
1782 1785 def clone(self, remote, heads=[], stream=False):
1783 1786 '''clone remote repository.
1784 1787
1785 1788 keyword arguments:
1786 1789 heads: list of revs to clone (forces use of pull)
1787 1790 stream: use streaming clone if possible'''
1788 1791
1789 1792 # now, all clients that can request uncompressed clones can
1790 1793 # read repo formats supported by all servers that can serve
1791 1794 # them.
1792 1795
1793 1796 # if revlog format changes, client will have to check version
1794 1797 # and format flags on "stream" capability, and use
1795 1798 # uncompressed only if compatible.
1796 1799
1797 1800 if stream and not heads and remote.capable('stream'):
1798 1801 return self.stream_in(remote)
1799 1802 return self.pull(remote, heads)
1800 1803
1801 1804 # used to avoid circular references so destructors work
1802 1805 def aftertrans(base):
1803 1806 p = base
1804 1807 def a():
1805 1808 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1806 1809 util.rename(os.path.join(p, "journal.dirstate"),
1807 1810 os.path.join(p, "undo.dirstate"))
1808 1811 return a
1809 1812
1810 1813 def instance(ui, path, create):
1811 1814 return localrepository(ui, util.drop_scheme('file', path), create)
1812 1815
1813 1816 def islocal(path):
1814 1817 return True
General Comments 0
You need to be logged in to leave comments. Login now