##// END OF EJS Templates
transfer branchmap branch names over the wire in utf-8
Henrik Stuart -
r9671:9471d9a9 default
parent child Browse files
Show More
@@ -0,0 +1,23 b''
1 #!/bin/sh
2
3 hgserve()
4 {
5 hg serve -a localhost -p $HGPORT1 -d --pid-file=hg.pid -E errors.log -v $@ \
6 | sed -e 's/:[0-9][0-9]*//g' -e 's/http:\/\/[^/]*\//http:\/\/localhost\//'
7 cat hg.pid >> "$DAEMON_PIDS"
8 }
9
10 hg init a
11 hg --encoding utf-8 -R a branch Γ¦
12 echo foo > a/foo
13 hg -R a ci -Am foo
14
15 hgserve -R a --config web.push_ssl=False --config web.allow_push=* --encoding latin1
16 hg clone http://localhost:$HGPORT1 b
17 hg --encoding utf-8 -R b log
18 echo bar >> b/foo
19 hg -R b ci -m bar
20 hg --encoding utf-8 -R b push | sed "s/$HGPORT1/PORT/"
21 hg -R a --encoding utf-8 log
22
23 kill `cat hg.pid`
@@ -0,0 +1,36 b''
1 marked working directory as branch Γ¦
2 adding foo
3 listening at http://localhost/ (bound to 127.0.0.1)
4 requesting all changes
5 adding changesets
6 adding manifests
7 adding file changes
8 added 1 changesets with 1 changes to 1 files
9 updating working directory
10 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
11 changeset: 0:867c11ce77b8
12 branch: Γ¦
13 tag: tip
14 user: test
15 date: Thu Jan 01 00:00:00 1970 +0000
16 summary: foo
17
18 pushing to http://localhost:PORT
19 searching for changes
20 adding changesets
21 adding manifests
22 adding file changes
23 added 1 changesets with 1 changes to 1 files
24 changeset: 1:58e7c90d67cb
25 branch: Γ¦
26 tag: tip
27 user: test
28 date: Thu Jan 01 00:00:00 1970 +0000
29 summary: bar
30
31 changeset: 0:867c11ce77b8
32 branch: Γ¦
33 user: test
34 date: Thu Jan 01 00:00:00 1970 +0000
35 summary: foo
36
@@ -1,2171 +1,2179 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as match_
15 15 import merge as merge_
16 16 import tags as tags_
17 17 from lock import release
18 18 import weakref, stat, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20
21 21 class localrepository(repo.repository):
22 22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 23 supported = set('revlogv1 store fncache shared'.split())
24 24
25 25 def __init__(self, baseui, path=None, create=0):
26 26 repo.repository.__init__(self)
27 27 self.root = os.path.realpath(path)
28 28 self.path = os.path.join(self.root, ".hg")
29 29 self.origroot = path
30 30 self.opener = util.opener(self.path)
31 31 self.wopener = util.opener(self.root)
32 32 self.baseui = baseui
33 33 self.ui = baseui.copy()
34 34
35 35 try:
36 36 self.ui.readconfig(self.join("hgrc"), self.root)
37 37 extensions.loadall(self.ui)
38 38 except IOError:
39 39 pass
40 40
41 41 if not os.path.isdir(self.path):
42 42 if create:
43 43 if not os.path.exists(path):
44 44 os.mkdir(path)
45 45 os.mkdir(self.path)
46 46 requirements = ["revlogv1"]
47 47 if self.ui.configbool('format', 'usestore', True):
48 48 os.mkdir(os.path.join(self.path, "store"))
49 49 requirements.append("store")
50 50 if self.ui.configbool('format', 'usefncache', True):
51 51 requirements.append("fncache")
52 52 # create an invalid changelog
53 53 self.opener("00changelog.i", "a").write(
54 54 '\0\0\0\2' # represents revlogv2
55 55 ' dummy changelog to prevent using the old repo layout'
56 56 )
57 57 reqfile = self.opener("requires", "w")
58 58 for r in requirements:
59 59 reqfile.write("%s\n" % r)
60 60 reqfile.close()
61 61 else:
62 62 raise error.RepoError(_("repository %s not found") % path)
63 63 elif create:
64 64 raise error.RepoError(_("repository %s already exists") % path)
65 65 else:
66 66 # find requirements
67 67 requirements = set()
68 68 try:
69 69 requirements = set(self.opener("requires").read().splitlines())
70 70 except IOError, inst:
71 71 if inst.errno != errno.ENOENT:
72 72 raise
73 73 for r in requirements - self.supported:
74 74 raise error.RepoError(_("requirement '%s' not supported") % r)
75 75
76 76 self.sharedpath = self.path
77 77 try:
78 78 s = os.path.realpath(self.opener("sharedpath").read())
79 79 if not os.path.exists(s):
80 80 raise error.RepoError(
81 81 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 82 self.sharedpath = s
83 83 except IOError, inst:
84 84 if inst.errno != errno.ENOENT:
85 85 raise
86 86
87 87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 88 self.spath = self.store.path
89 89 self.sopener = self.store.opener
90 90 self.sjoin = self.store.join
91 91 self.opener.createmode = self.store.createmode
92 92
93 93 # These two define the set of tags for this repository. _tags
94 94 # maps tag name to node; _tagtypes maps tag name to 'global' or
95 95 # 'local'. (Global tags are defined by .hgtags across all
96 96 # heads, and local tags are defined in .hg/localtags.) They
97 97 # constitute the in-memory cache of tags.
98 98 self._tags = None
99 99 self._tagtypes = None
100 100
101 101 self.branchcache = None
102 102 self._ubranchcache = None # UTF-8 version of branchcache
103 103 self._branchcachetip = None
104 104 self.nodetagscache = None
105 105 self.filterpats = {}
106 106 self._datafilters = {}
107 107 self._transref = self._lockref = self._wlockref = None
108 108
109 109 @propertycache
110 110 def changelog(self):
111 111 c = changelog.changelog(self.sopener)
112 112 if 'HG_PENDING' in os.environ:
113 113 p = os.environ['HG_PENDING']
114 114 if p.startswith(self.root):
115 115 c.readpending('00changelog.i.a')
116 116 self.sopener.defversion = c.version
117 117 return c
118 118
119 119 @propertycache
120 120 def manifest(self):
121 121 return manifest.manifest(self.sopener)
122 122
123 123 @propertycache
124 124 def dirstate(self):
125 125 return dirstate.dirstate(self.opener, self.ui, self.root)
126 126
127 127 def __getitem__(self, changeid):
128 128 if changeid is None:
129 129 return context.workingctx(self)
130 130 return context.changectx(self, changeid)
131 131
132 132 def __nonzero__(self):
133 133 return True
134 134
135 135 def __len__(self):
136 136 return len(self.changelog)
137 137
138 138 def __iter__(self):
139 139 for i in xrange(len(self)):
140 140 yield i
141 141
142 142 def url(self):
143 143 return 'file:' + self.root
144 144
145 145 def hook(self, name, throw=False, **args):
146 146 return hook.hook(self.ui, self, name, throw, **args)
147 147
148 148 tag_disallowed = ':\r\n'
149 149
150 150 def _tag(self, names, node, message, local, user, date, extra={}):
151 151 if isinstance(names, str):
152 152 allchars = names
153 153 names = (names,)
154 154 else:
155 155 allchars = ''.join(names)
156 156 for c in self.tag_disallowed:
157 157 if c in allchars:
158 158 raise util.Abort(_('%r cannot be used in a tag name') % c)
159 159
160 160 for name in names:
161 161 self.hook('pretag', throw=True, node=hex(node), tag=name,
162 162 local=local)
163 163
164 164 def writetags(fp, names, munge, prevtags):
165 165 fp.seek(0, 2)
166 166 if prevtags and prevtags[-1] != '\n':
167 167 fp.write('\n')
168 168 for name in names:
169 169 m = munge and munge(name) or name
170 170 if self._tagtypes and name in self._tagtypes:
171 171 old = self._tags.get(name, nullid)
172 172 fp.write('%s %s\n' % (hex(old), m))
173 173 fp.write('%s %s\n' % (hex(node), m))
174 174 fp.close()
175 175
176 176 prevtags = ''
177 177 if local:
178 178 try:
179 179 fp = self.opener('localtags', 'r+')
180 180 except IOError:
181 181 fp = self.opener('localtags', 'a')
182 182 else:
183 183 prevtags = fp.read()
184 184
185 185 # local tags are stored in the current charset
186 186 writetags(fp, names, None, prevtags)
187 187 for name in names:
188 188 self.hook('tag', node=hex(node), tag=name, local=local)
189 189 return
190 190
191 191 try:
192 192 fp = self.wfile('.hgtags', 'rb+')
193 193 except IOError:
194 194 fp = self.wfile('.hgtags', 'ab')
195 195 else:
196 196 prevtags = fp.read()
197 197
198 198 # committed tags are stored in UTF-8
199 199 writetags(fp, names, encoding.fromlocal, prevtags)
200 200
201 201 if '.hgtags' not in self.dirstate:
202 202 self.add(['.hgtags'])
203 203
204 204 m = match_.exact(self.root, '', ['.hgtags'])
205 205 tagnode = self.commit(message, user, date, extra=extra, match=m)
206 206
207 207 for name in names:
208 208 self.hook('tag', node=hex(node), tag=name, local=local)
209 209
210 210 return tagnode
211 211
212 212 def tag(self, names, node, message, local, user, date):
213 213 '''tag a revision with one or more symbolic names.
214 214
215 215 names is a list of strings or, when adding a single tag, names may be a
216 216 string.
217 217
218 218 if local is True, the tags are stored in a per-repository file.
219 219 otherwise, they are stored in the .hgtags file, and a new
220 220 changeset is committed with the change.
221 221
222 222 keyword arguments:
223 223
224 224 local: whether to store tags in non-version-controlled file
225 225 (default False)
226 226
227 227 message: commit message to use if committing
228 228
229 229 user: name of user to use if committing
230 230
231 231 date: date tuple to use if committing'''
232 232
233 233 for x in self.status()[:5]:
234 234 if '.hgtags' in x:
235 235 raise util.Abort(_('working copy of .hgtags is changed '
236 236 '(please commit .hgtags manually)'))
237 237
238 238 self.tags() # instantiate the cache
239 239 self._tag(names, node, message, local, user, date)
240 240
241 241 def tags(self):
242 242 '''return a mapping of tag to node'''
243 243 if self._tags is None:
244 244 (self._tags, self._tagtypes) = self._findtags()
245 245
246 246 return self._tags
247 247
248 248 def _findtags(self):
249 249 '''Do the hard work of finding tags. Return a pair of dicts
250 250 (tags, tagtypes) where tags maps tag name to node, and tagtypes
251 251 maps tag name to a string like \'global\' or \'local\'.
252 252 Subclasses or extensions are free to add their own tags, but
253 253 should be aware that the returned dicts will be retained for the
254 254 duration of the localrepo object.'''
255 255
256 256 # XXX what tagtype should subclasses/extensions use? Currently
257 257 # mq and bookmarks add tags, but do not set the tagtype at all.
258 258 # Should each extension invent its own tag type? Should there
259 259 # be one tagtype for all such "virtual" tags? Or is the status
260 260 # quo fine?
261 261
262 262 alltags = {} # map tag name to (node, hist)
263 263 tagtypes = {}
264 264
265 265 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
266 266 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
267 267
268 268 # Build the return dicts. Have to re-encode tag names because
269 269 # the tags module always uses UTF-8 (in order not to lose info
270 270 # writing to the cache), but the rest of Mercurial wants them in
271 271 # local encoding.
272 272 tags = {}
273 273 for (name, (node, hist)) in alltags.iteritems():
274 274 if node != nullid:
275 275 tags[encoding.tolocal(name)] = node
276 276 tags['tip'] = self.changelog.tip()
277 277 tagtypes = dict([(encoding.tolocal(name), value)
278 278 for (name, value) in tagtypes.iteritems()])
279 279 return (tags, tagtypes)
280 280
281 281 def tagtype(self, tagname):
282 282 '''
283 283 return the type of the given tag. result can be:
284 284
285 285 'local' : a local tag
286 286 'global' : a global tag
287 287 None : tag does not exist
288 288 '''
289 289
290 290 self.tags()
291 291
292 292 return self._tagtypes.get(tagname)
293 293
294 294 def tagslist(self):
295 295 '''return a list of tags ordered by revision'''
296 296 l = []
297 297 for t, n in self.tags().iteritems():
298 298 try:
299 299 r = self.changelog.rev(n)
300 300 except:
301 301 r = -2 # sort to the beginning of the list if unknown
302 302 l.append((r, t, n))
303 303 return [(t, n) for r, t, n in sorted(l)]
304 304
305 305 def nodetags(self, node):
306 306 '''return the tags associated with a node'''
307 307 if not self.nodetagscache:
308 308 self.nodetagscache = {}
309 309 for t, n in self.tags().iteritems():
310 310 self.nodetagscache.setdefault(n, []).append(t)
311 311 return self.nodetagscache.get(node, [])
312 312
313 313 def _branchtags(self, partial, lrev):
314 314 # TODO: rename this function?
315 315 tiprev = len(self) - 1
316 316 if lrev != tiprev:
317 317 self._updatebranchcache(partial, lrev+1, tiprev+1)
318 318 self._writebranchcache(partial, self.changelog.tip(), tiprev)
319 319
320 320 return partial
321 321
322 def branchmap(self):
322 def lbranchmap(self):
323 323 tip = self.changelog.tip()
324 324 if self.branchcache is not None and self._branchcachetip == tip:
325 325 return self.branchcache
326 326
327 partial = self.branchmap()
328
329 # the branch cache is stored on disk as UTF-8, but in the local
330 # charset internally
331 for k, v in partial.iteritems():
332 self.branchcache[encoding.tolocal(k)] = v
333 return self.branchcache
334
335 def branchmap(self):
336 tip = self.changelog.tip()
337 if self._ubranchcache is not None and self._branchcachetip == tip:
338 return self._ubranchcache
339
327 340 oldtip = self._branchcachetip
328 341 self._branchcachetip = tip
329 342 if self.branchcache is None:
330 343 self.branchcache = {} # avoid recursion in changectx
331 344 else:
332 345 self.branchcache.clear() # keep using the same dict
333 346 if oldtip is None or oldtip not in self.changelog.nodemap:
334 347 partial, last, lrev = self._readbranchcache()
335 348 else:
336 349 lrev = self.changelog.rev(oldtip)
337 350 partial = self._ubranchcache
338 351
339 352 self._branchtags(partial, lrev)
340 353 # this private cache holds all heads (not just tips)
341 354 self._ubranchcache = partial
342 355
343 # the branch cache is stored on disk as UTF-8, but in the local
344 # charset internally
345 for k, v in partial.iteritems():
346 self.branchcache[encoding.tolocal(k)] = v
347 return self.branchcache
348
356 return self._ubranchcache
349 357
350 358 def branchtags(self):
351 359 '''return a dict where branch names map to the tipmost head of
352 360 the branch, open heads come before closed'''
353 361 bt = {}
354 for bn, heads in self.branchmap().iteritems():
362 for bn, heads in self.lbranchmap().iteritems():
355 363 head = None
356 364 for i in range(len(heads)-1, -1, -1):
357 365 h = heads[i]
358 366 if 'close' not in self.changelog.read(h)[5]:
359 367 head = h
360 368 break
361 369 # no open heads were found
362 370 if head is None:
363 371 head = heads[-1]
364 372 bt[bn] = head
365 373 return bt
366 374
367 375
368 376 def _readbranchcache(self):
369 377 partial = {}
370 378 try:
371 379 f = self.opener("branchheads.cache")
372 380 lines = f.read().split('\n')
373 381 f.close()
374 382 except (IOError, OSError):
375 383 return {}, nullid, nullrev
376 384
377 385 try:
378 386 last, lrev = lines.pop(0).split(" ", 1)
379 387 last, lrev = bin(last), int(lrev)
380 388 if lrev >= len(self) or self[lrev].node() != last:
381 389 # invalidate the cache
382 390 raise ValueError('invalidating branch cache (tip differs)')
383 391 for l in lines:
384 392 if not l: continue
385 393 node, label = l.split(" ", 1)
386 394 partial.setdefault(label.strip(), []).append(bin(node))
387 395 except KeyboardInterrupt:
388 396 raise
389 397 except Exception, inst:
390 398 if self.ui.debugflag:
391 399 self.ui.warn(str(inst), '\n')
392 400 partial, last, lrev = {}, nullid, nullrev
393 401 return partial, last, lrev
394 402
395 403 def _writebranchcache(self, branches, tip, tiprev):
396 404 try:
397 405 f = self.opener("branchheads.cache", "w", atomictemp=True)
398 406 f.write("%s %s\n" % (hex(tip), tiprev))
399 407 for label, nodes in branches.iteritems():
400 408 for node in nodes:
401 409 f.write("%s %s\n" % (hex(node), label))
402 410 f.rename()
403 411 except (IOError, OSError):
404 412 pass
405 413
406 414 def _updatebranchcache(self, partial, start, end):
407 415 # collect new branch entries
408 416 newbranches = {}
409 417 for r in xrange(start, end):
410 418 c = self[r]
411 419 newbranches.setdefault(c.branch(), []).append(c.node())
412 420 # if older branchheads are reachable from new ones, they aren't
413 421 # really branchheads. Note checking parents is insufficient:
414 422 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
415 423 for branch, newnodes in newbranches.iteritems():
416 424 bheads = partial.setdefault(branch, [])
417 425 bheads.extend(newnodes)
418 426 if len(bheads) < 2:
419 427 continue
420 428 newbheads = []
421 429 # starting from tip means fewer passes over reachable
422 430 while newnodes:
423 431 latest = newnodes.pop()
424 432 if latest not in bheads:
425 433 continue
426 434 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
427 435 reachable = self.changelog.reachable(latest, minbhrev)
428 436 bheads = [b for b in bheads if b not in reachable]
429 437 newbheads.insert(0, latest)
430 438 bheads.extend(newbheads)
431 439 partial[branch] = bheads
432 440
433 441 def lookup(self, key):
434 442 if isinstance(key, int):
435 443 return self.changelog.node(key)
436 444 elif key == '.':
437 445 return self.dirstate.parents()[0]
438 446 elif key == 'null':
439 447 return nullid
440 448 elif key == 'tip':
441 449 return self.changelog.tip()
442 450 n = self.changelog._match(key)
443 451 if n:
444 452 return n
445 453 if key in self.tags():
446 454 return self.tags()[key]
447 455 if key in self.branchtags():
448 456 return self.branchtags()[key]
449 457 n = self.changelog._partialmatch(key)
450 458 if n:
451 459 return n
452 460
453 461 # can't find key, check if it might have come from damaged dirstate
454 462 if key in self.dirstate.parents():
455 463 raise error.Abort(_("working directory has unknown parent '%s'!")
456 464 % short(key))
457 465 try:
458 466 if len(key) == 20:
459 467 key = hex(key)
460 468 except:
461 469 pass
462 470 raise error.RepoLookupError(_("unknown revision '%s'") % key)
463 471
464 472 def local(self):
465 473 return True
466 474
467 475 def join(self, f):
468 476 return os.path.join(self.path, f)
469 477
470 478 def wjoin(self, f):
471 479 return os.path.join(self.root, f)
472 480
473 481 def rjoin(self, f):
474 482 return os.path.join(self.root, util.pconvert(f))
475 483
476 484 def file(self, f):
477 485 if f[0] == '/':
478 486 f = f[1:]
479 487 return filelog.filelog(self.sopener, f)
480 488
481 489 def changectx(self, changeid):
482 490 return self[changeid]
483 491
484 492 def parents(self, changeid=None):
485 493 '''get list of changectxs for parents of changeid'''
486 494 return self[changeid].parents()
487 495
488 496 def filectx(self, path, changeid=None, fileid=None):
489 497 """changeid can be a changeset revision, node, or tag.
490 498 fileid can be a file revision or node."""
491 499 return context.filectx(self, path, changeid, fileid)
492 500
493 501 def getcwd(self):
494 502 return self.dirstate.getcwd()
495 503
496 504 def pathto(self, f, cwd=None):
497 505 return self.dirstate.pathto(f, cwd)
498 506
499 507 def wfile(self, f, mode='r'):
500 508 return self.wopener(f, mode)
501 509
502 510 def _link(self, f):
503 511 return os.path.islink(self.wjoin(f))
504 512
505 513 def _filter(self, filter, filename, data):
506 514 if filter not in self.filterpats:
507 515 l = []
508 516 for pat, cmd in self.ui.configitems(filter):
509 517 if cmd == '!':
510 518 continue
511 519 mf = match_.match(self.root, '', [pat])
512 520 fn = None
513 521 params = cmd
514 522 for name, filterfn in self._datafilters.iteritems():
515 523 if cmd.startswith(name):
516 524 fn = filterfn
517 525 params = cmd[len(name):].lstrip()
518 526 break
519 527 if not fn:
520 528 fn = lambda s, c, **kwargs: util.filter(s, c)
521 529 # Wrap old filters not supporting keyword arguments
522 530 if not inspect.getargspec(fn)[2]:
523 531 oldfn = fn
524 532 fn = lambda s, c, **kwargs: oldfn(s, c)
525 533 l.append((mf, fn, params))
526 534 self.filterpats[filter] = l
527 535
528 536 for mf, fn, cmd in self.filterpats[filter]:
529 537 if mf(filename):
530 538 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
531 539 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
532 540 break
533 541
534 542 return data
535 543
536 544 def adddatafilter(self, name, filter):
537 545 self._datafilters[name] = filter
538 546
539 547 def wread(self, filename):
540 548 if self._link(filename):
541 549 data = os.readlink(self.wjoin(filename))
542 550 else:
543 551 data = self.wopener(filename, 'r').read()
544 552 return self._filter("encode", filename, data)
545 553
546 554 def wwrite(self, filename, data, flags):
547 555 data = self._filter("decode", filename, data)
548 556 try:
549 557 os.unlink(self.wjoin(filename))
550 558 except OSError:
551 559 pass
552 560 if 'l' in flags:
553 561 self.wopener.symlink(data, filename)
554 562 else:
555 563 self.wopener(filename, 'w').write(data)
556 564 if 'x' in flags:
557 565 util.set_flags(self.wjoin(filename), False, True)
558 566
559 567 def wwritedata(self, filename, data):
560 568 return self._filter("decode", filename, data)
561 569
562 570 def transaction(self):
563 571 tr = self._transref and self._transref() or None
564 572 if tr and tr.running():
565 573 return tr.nest()
566 574
567 575 # abort here if the journal already exists
568 576 if os.path.exists(self.sjoin("journal")):
569 577 raise error.RepoError(_("journal already exists - run hg recover"))
570 578
571 579 # save dirstate for rollback
572 580 try:
573 581 ds = self.opener("dirstate").read()
574 582 except IOError:
575 583 ds = ""
576 584 self.opener("journal.dirstate", "w").write(ds)
577 585 self.opener("journal.branch", "w").write(self.dirstate.branch())
578 586
579 587 renames = [(self.sjoin("journal"), self.sjoin("undo")),
580 588 (self.join("journal.dirstate"), self.join("undo.dirstate")),
581 589 (self.join("journal.branch"), self.join("undo.branch"))]
582 590 tr = transaction.transaction(self.ui.warn, self.sopener,
583 591 self.sjoin("journal"),
584 592 aftertrans(renames),
585 593 self.store.createmode)
586 594 self._transref = weakref.ref(tr)
587 595 return tr
588 596
589 597 def recover(self):
590 598 lock = self.lock()
591 599 try:
592 600 if os.path.exists(self.sjoin("journal")):
593 601 self.ui.status(_("rolling back interrupted transaction\n"))
594 602 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
595 603 self.invalidate()
596 604 return True
597 605 else:
598 606 self.ui.warn(_("no interrupted transaction available\n"))
599 607 return False
600 608 finally:
601 609 lock.release()
602 610
603 611 def rollback(self):
604 612 wlock = lock = None
605 613 try:
606 614 wlock = self.wlock()
607 615 lock = self.lock()
608 616 if os.path.exists(self.sjoin("undo")):
609 617 self.ui.status(_("rolling back last transaction\n"))
610 618 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
611 619 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
612 620 try:
613 621 branch = self.opener("undo.branch").read()
614 622 self.dirstate.setbranch(branch)
615 623 except IOError:
616 624 self.ui.warn(_("Named branch could not be reset, "
617 625 "current branch still is: %s\n")
618 626 % encoding.tolocal(self.dirstate.branch()))
619 627 self.invalidate()
620 628 self.dirstate.invalidate()
621 629 self.destroyed()
622 630 else:
623 631 self.ui.warn(_("no rollback information available\n"))
624 632 finally:
625 633 release(lock, wlock)
626 634
627 635 def invalidate(self):
628 636 for a in "changelog manifest".split():
629 637 if a in self.__dict__:
630 638 delattr(self, a)
631 639 self._tags = None
632 640 self._tagtypes = None
633 641 self.nodetagscache = None
634 642 self.branchcache = None
635 643 self._ubranchcache = None
636 644 self._branchcachetip = None
637 645
638 646 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
639 647 try:
640 648 l = lock.lock(lockname, 0, releasefn, desc=desc)
641 649 except error.LockHeld, inst:
642 650 if not wait:
643 651 raise
644 652 self.ui.warn(_("waiting for lock on %s held by %r\n") %
645 653 (desc, inst.locker))
646 654 # default to 600 seconds timeout
647 655 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
648 656 releasefn, desc=desc)
649 657 if acquirefn:
650 658 acquirefn()
651 659 return l
652 660
653 661 def lock(self, wait=True):
654 662 '''Lock the repository store (.hg/store) and return a weak reference
655 663 to the lock. Use this before modifying the store (e.g. committing or
656 664 stripping). If you are opening a transaction, get a lock as well.)'''
657 665 l = self._lockref and self._lockref()
658 666 if l is not None and l.held:
659 667 l.lock()
660 668 return l
661 669
662 670 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
663 671 _('repository %s') % self.origroot)
664 672 self._lockref = weakref.ref(l)
665 673 return l
666 674
667 675 def wlock(self, wait=True):
668 676 '''Lock the non-store parts of the repository (everything under
669 677 .hg except .hg/store) and return a weak reference to the lock.
670 678 Use this before modifying files in .hg.'''
671 679 l = self._wlockref and self._wlockref()
672 680 if l is not None and l.held:
673 681 l.lock()
674 682 return l
675 683
676 684 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
677 685 self.dirstate.invalidate, _('working directory of %s') %
678 686 self.origroot)
679 687 self._wlockref = weakref.ref(l)
680 688 return l
681 689
682 690 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
683 691 """
684 692 commit an individual file as part of a larger transaction
685 693 """
686 694
687 695 fname = fctx.path()
688 696 text = fctx.data()
689 697 flog = self.file(fname)
690 698 fparent1 = manifest1.get(fname, nullid)
691 699 fparent2 = fparent2o = manifest2.get(fname, nullid)
692 700
693 701 meta = {}
694 702 copy = fctx.renamed()
695 703 if copy and copy[0] != fname:
696 704 # Mark the new revision of this file as a copy of another
697 705 # file. This copy data will effectively act as a parent
698 706 # of this new revision. If this is a merge, the first
699 707 # parent will be the nullid (meaning "look up the copy data")
700 708 # and the second one will be the other parent. For example:
701 709 #
702 710 # 0 --- 1 --- 3 rev1 changes file foo
703 711 # \ / rev2 renames foo to bar and changes it
704 712 # \- 2 -/ rev3 should have bar with all changes and
705 713 # should record that bar descends from
706 714 # bar in rev2 and foo in rev1
707 715 #
708 716 # this allows this merge to succeed:
709 717 #
710 718 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
711 719 # \ / merging rev3 and rev4 should use bar@rev2
712 720 # \- 2 --- 4 as the merge base
713 721 #
714 722
715 723 cfname = copy[0]
716 724 crev = manifest1.get(cfname)
717 725 newfparent = fparent2
718 726
719 727 if manifest2: # branch merge
720 728 if fparent2 == nullid or crev is None: # copied on remote side
721 729 if cfname in manifest2:
722 730 crev = manifest2[cfname]
723 731 newfparent = fparent1
724 732
725 733 # find source in nearest ancestor if we've lost track
726 734 if not crev:
727 735 self.ui.debug(" %s: searching for copy revision for %s\n" %
728 736 (fname, cfname))
729 737 for ancestor in self['.'].ancestors():
730 738 if cfname in ancestor:
731 739 crev = ancestor[cfname].filenode()
732 740 break
733 741
734 742 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
735 743 meta["copy"] = cfname
736 744 meta["copyrev"] = hex(crev)
737 745 fparent1, fparent2 = nullid, newfparent
738 746 elif fparent2 != nullid:
739 747 # is one parent an ancestor of the other?
740 748 fparentancestor = flog.ancestor(fparent1, fparent2)
741 749 if fparentancestor == fparent1:
742 750 fparent1, fparent2 = fparent2, nullid
743 751 elif fparentancestor == fparent2:
744 752 fparent2 = nullid
745 753
746 754 # is the file changed?
747 755 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
748 756 changelist.append(fname)
749 757 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
750 758
751 759 # are just the flags changed during merge?
752 760 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
753 761 changelist.append(fname)
754 762
755 763 return fparent1
756 764
757 765 def commit(self, text="", user=None, date=None, match=None, force=False,
758 766 editor=False, extra={}):
759 767 """Add a new revision to current repository.
760 768
761 769 Revision information is gathered from the working directory,
762 770 match can be used to filter the committed files. If editor is
763 771 supplied, it is called to get a commit message.
764 772 """
765 773
766 774 def fail(f, msg):
767 775 raise util.Abort('%s: %s' % (f, msg))
768 776
769 777 if not match:
770 778 match = match_.always(self.root, '')
771 779
772 780 if not force:
773 781 vdirs = []
774 782 match.dir = vdirs.append
775 783 match.bad = fail
776 784
777 785 wlock = self.wlock()
778 786 try:
779 787 p1, p2 = self.dirstate.parents()
780 788 wctx = self[None]
781 789
782 790 if (not force and p2 != nullid and match and
783 791 (match.files() or match.anypats())):
784 792 raise util.Abort(_('cannot partially commit a merge '
785 793 '(do not specify files or patterns)'))
786 794
787 795 changes = self.status(match=match, clean=force)
788 796 if force:
789 797 changes[0].extend(changes[6]) # mq may commit unchanged files
790 798
791 799 # check subrepos
792 800 subs = []
793 801 for s in wctx.substate:
794 802 if match(s) and wctx.sub(s).dirty():
795 803 subs.append(s)
796 804 if subs and '.hgsubstate' not in changes[0]:
797 805 changes[0].insert(0, '.hgsubstate')
798 806
799 807 # make sure all explicit patterns are matched
800 808 if not force and match.files():
801 809 matched = set(changes[0] + changes[1] + changes[2])
802 810
803 811 for f in match.files():
804 812 if f == '.' or f in matched or f in wctx.substate:
805 813 continue
806 814 if f in changes[3]: # missing
807 815 fail(f, _('file not found!'))
808 816 if f in vdirs: # visited directory
809 817 d = f + '/'
810 818 for mf in matched:
811 819 if mf.startswith(d):
812 820 break
813 821 else:
814 822 fail(f, _("no match under directory!"))
815 823 elif f not in self.dirstate:
816 824 fail(f, _("file not tracked!"))
817 825
818 826 if (not force and not extra.get("close") and p2 == nullid
819 827 and not (changes[0] or changes[1] or changes[2])
820 828 and self[None].branch() == self['.'].branch()):
821 829 return None
822 830
823 831 ms = merge_.mergestate(self)
824 832 for f in changes[0]:
825 833 if f in ms and ms[f] == 'u':
826 834 raise util.Abort(_("unresolved merge conflicts "
827 835 "(see hg resolve)"))
828 836
829 837 cctx = context.workingctx(self, (p1, p2), text, user, date,
830 838 extra, changes)
831 839 if editor:
832 840 cctx._text = editor(self, cctx, subs)
833 841
834 842 # commit subs
835 843 if subs:
836 844 state = wctx.substate.copy()
837 845 for s in subs:
838 846 self.ui.status(_('committing subrepository %s\n') % s)
839 847 sr = wctx.sub(s).commit(cctx._text, user, date)
840 848 state[s] = (state[s][0], sr)
841 849 subrepo.writestate(self, state)
842 850
843 851 ret = self.commitctx(cctx, True)
844 852
845 853 # update dirstate and mergestate
846 854 for f in changes[0] + changes[1]:
847 855 self.dirstate.normal(f)
848 856 for f in changes[2]:
849 857 self.dirstate.forget(f)
850 858 self.dirstate.setparents(ret)
851 859 ms.reset()
852 860
853 861 return ret
854 862
855 863 finally:
856 864 wlock.release()
857 865
858 866 def commitctx(self, ctx, error=False):
859 867 """Add a new revision to current repository.
860 868
861 869 Revision information is passed via the context argument.
862 870 """
863 871
864 872 tr = lock = None
865 873 removed = ctx.removed()
866 874 p1, p2 = ctx.p1(), ctx.p2()
867 875 m1 = p1.manifest().copy()
868 876 m2 = p2.manifest()
869 877 user = ctx.user()
870 878
871 879 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
872 880 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
873 881
874 882 lock = self.lock()
875 883 try:
876 884 tr = self.transaction()
877 885 trp = weakref.proxy(tr)
878 886
879 887 # check in files
880 888 new = {}
881 889 changed = []
882 890 linkrev = len(self)
883 891 for f in sorted(ctx.modified() + ctx.added()):
884 892 self.ui.note(f + "\n")
885 893 try:
886 894 fctx = ctx[f]
887 895 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
888 896 changed)
889 897 m1.set(f, fctx.flags())
890 898 except (OSError, IOError):
891 899 if error:
892 900 self.ui.warn(_("trouble committing %s!\n") % f)
893 901 raise
894 902 else:
895 903 removed.append(f)
896 904
897 905 # update manifest
898 906 m1.update(new)
899 907 removed = [f for f in sorted(removed) if f in m1 or f in m2]
900 908 drop = [f for f in removed if f in m1]
901 909 for f in drop:
902 910 del m1[f]
903 911 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
904 912 p2.manifestnode(), (new, drop))
905 913
906 914 # update changelog
907 915 self.changelog.delayupdate()
908 916 n = self.changelog.add(mn, changed + removed, ctx.description(),
909 917 trp, p1.node(), p2.node(),
910 918 user, ctx.date(), ctx.extra().copy())
911 919 p = lambda: self.changelog.writepending() and self.root or ""
912 920 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
913 921 parent2=xp2, pending=p)
914 922 self.changelog.finalize(trp)
915 923 tr.close()
916 924
917 925 if self.branchcache:
918 926 self.branchtags()
919 927
920 928 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
921 929 return n
922 930 finally:
923 931 del tr
924 932 lock.release()
925 933
926 934 def destroyed(self):
927 935 '''Inform the repository that nodes have been destroyed.
928 936 Intended for use by strip and rollback, so there's a common
929 937 place for anything that has to be done after destroying history.'''
930 938 # XXX it might be nice if we could take the list of destroyed
931 939 # nodes, but I don't see an easy way for rollback() to do that
932 940
933 941 # Ensure the persistent tag cache is updated. Doing it now
934 942 # means that the tag cache only has to worry about destroyed
935 943 # heads immediately after a strip/rollback. That in turn
936 944 # guarantees that "cachetip == currenttip" (comparing both rev
937 945 # and node) always means no nodes have been added or destroyed.
938 946
939 947 # XXX this is suboptimal when qrefresh'ing: we strip the current
940 948 # head, refresh the tag cache, then immediately add a new head.
941 949 # But I think doing it this way is necessary for the "instant
942 950 # tag cache retrieval" case to work.
943 951 tags_.findglobaltags(self.ui, self, {}, {})
944 952
945 953 def walk(self, match, node=None):
946 954 '''
947 955 walk recursively through the directory tree or a given
948 956 changeset, finding all files matched by the match
949 957 function
950 958 '''
951 959 return self[node].walk(match)
952 960
953 961 def status(self, node1='.', node2=None, match=None,
954 962 ignored=False, clean=False, unknown=False):
955 963 """return status of files between two nodes or node and working directory
956 964
957 965 If node1 is None, use the first dirstate parent instead.
958 966 If node2 is None, compare node1 with working directory.
959 967 """
960 968
961 969 def mfmatches(ctx):
962 970 mf = ctx.manifest().copy()
963 971 for fn in mf.keys():
964 972 if not match(fn):
965 973 del mf[fn]
966 974 return mf
967 975
968 976 if isinstance(node1, context.changectx):
969 977 ctx1 = node1
970 978 else:
971 979 ctx1 = self[node1]
972 980 if isinstance(node2, context.changectx):
973 981 ctx2 = node2
974 982 else:
975 983 ctx2 = self[node2]
976 984
977 985 working = ctx2.rev() is None
978 986 parentworking = working and ctx1 == self['.']
979 987 match = match or match_.always(self.root, self.getcwd())
980 988 listignored, listclean, listunknown = ignored, clean, unknown
981 989
982 990 # load earliest manifest first for caching reasons
983 991 if not working and ctx2.rev() < ctx1.rev():
984 992 ctx2.manifest()
985 993
986 994 if not parentworking:
987 995 def bad(f, msg):
988 996 if f not in ctx1:
989 997 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
990 998 match.bad = bad
991 999
992 1000 if working: # we need to scan the working dir
993 1001 s = self.dirstate.status(match, listignored, listclean, listunknown)
994 1002 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
995 1003
996 1004 # check for any possibly clean files
997 1005 if parentworking and cmp:
998 1006 fixup = []
999 1007 # do a full compare of any files that might have changed
1000 1008 for f in sorted(cmp):
1001 1009 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1002 1010 or ctx1[f].cmp(ctx2[f].data())):
1003 1011 modified.append(f)
1004 1012 else:
1005 1013 fixup.append(f)
1006 1014
1007 1015 if listclean:
1008 1016 clean += fixup
1009 1017
1010 1018 # update dirstate for files that are actually clean
1011 1019 if fixup:
1012 1020 try:
1013 1021 # updating the dirstate is optional
1014 1022 # so we don't wait on the lock
1015 1023 wlock = self.wlock(False)
1016 1024 try:
1017 1025 for f in fixup:
1018 1026 self.dirstate.normal(f)
1019 1027 finally:
1020 1028 wlock.release()
1021 1029 except error.LockError:
1022 1030 pass
1023 1031
1024 1032 if not parentworking:
1025 1033 mf1 = mfmatches(ctx1)
1026 1034 if working:
1027 1035 # we are comparing working dir against non-parent
1028 1036 # generate a pseudo-manifest for the working dir
1029 1037 mf2 = mfmatches(self['.'])
1030 1038 for f in cmp + modified + added:
1031 1039 mf2[f] = None
1032 1040 mf2.set(f, ctx2.flags(f))
1033 1041 for f in removed:
1034 1042 if f in mf2:
1035 1043 del mf2[f]
1036 1044 else:
1037 1045 # we are comparing two revisions
1038 1046 deleted, unknown, ignored = [], [], []
1039 1047 mf2 = mfmatches(ctx2)
1040 1048
1041 1049 modified, added, clean = [], [], []
1042 1050 for fn in mf2:
1043 1051 if fn in mf1:
1044 1052 if (mf1.flags(fn) != mf2.flags(fn) or
1045 1053 (mf1[fn] != mf2[fn] and
1046 1054 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1047 1055 modified.append(fn)
1048 1056 elif listclean:
1049 1057 clean.append(fn)
1050 1058 del mf1[fn]
1051 1059 else:
1052 1060 added.append(fn)
1053 1061 removed = mf1.keys()
1054 1062
1055 1063 r = modified, added, removed, deleted, unknown, ignored, clean
1056 1064 [l.sort() for l in r]
1057 1065 return r
1058 1066
1059 1067 def add(self, list):
1060 1068 wlock = self.wlock()
1061 1069 try:
1062 1070 rejected = []
1063 1071 for f in list:
1064 1072 p = self.wjoin(f)
1065 1073 try:
1066 1074 st = os.lstat(p)
1067 1075 except:
1068 1076 self.ui.warn(_("%s does not exist!\n") % f)
1069 1077 rejected.append(f)
1070 1078 continue
1071 1079 if st.st_size > 10000000:
1072 1080 self.ui.warn(_("%s: files over 10MB may cause memory and"
1073 1081 " performance problems\n"
1074 1082 "(use 'hg revert %s' to unadd the file)\n")
1075 1083 % (f, f))
1076 1084 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1077 1085 self.ui.warn(_("%s not added: only files and symlinks "
1078 1086 "supported currently\n") % f)
1079 1087 rejected.append(p)
1080 1088 elif self.dirstate[f] in 'amn':
1081 1089 self.ui.warn(_("%s already tracked!\n") % f)
1082 1090 elif self.dirstate[f] == 'r':
1083 1091 self.dirstate.normallookup(f)
1084 1092 else:
1085 1093 self.dirstate.add(f)
1086 1094 return rejected
1087 1095 finally:
1088 1096 wlock.release()
1089 1097
1090 1098 def forget(self, list):
1091 1099 wlock = self.wlock()
1092 1100 try:
1093 1101 for f in list:
1094 1102 if self.dirstate[f] != 'a':
1095 1103 self.ui.warn(_("%s not added!\n") % f)
1096 1104 else:
1097 1105 self.dirstate.forget(f)
1098 1106 finally:
1099 1107 wlock.release()
1100 1108
1101 1109 def remove(self, list, unlink=False):
1102 1110 if unlink:
1103 1111 for f in list:
1104 1112 try:
1105 1113 util.unlink(self.wjoin(f))
1106 1114 except OSError, inst:
1107 1115 if inst.errno != errno.ENOENT:
1108 1116 raise
1109 1117 wlock = self.wlock()
1110 1118 try:
1111 1119 for f in list:
1112 1120 if unlink and os.path.exists(self.wjoin(f)):
1113 1121 self.ui.warn(_("%s still exists!\n") % f)
1114 1122 elif self.dirstate[f] == 'a':
1115 1123 self.dirstate.forget(f)
1116 1124 elif f not in self.dirstate:
1117 1125 self.ui.warn(_("%s not tracked!\n") % f)
1118 1126 else:
1119 1127 self.dirstate.remove(f)
1120 1128 finally:
1121 1129 wlock.release()
1122 1130
1123 1131 def undelete(self, list):
1124 1132 manifests = [self.manifest.read(self.changelog.read(p)[0])
1125 1133 for p in self.dirstate.parents() if p != nullid]
1126 1134 wlock = self.wlock()
1127 1135 try:
1128 1136 for f in list:
1129 1137 if self.dirstate[f] != 'r':
1130 1138 self.ui.warn(_("%s not removed!\n") % f)
1131 1139 else:
1132 1140 m = f in manifests[0] and manifests[0] or manifests[1]
1133 1141 t = self.file(f).read(m[f])
1134 1142 self.wwrite(f, t, m.flags(f))
1135 1143 self.dirstate.normal(f)
1136 1144 finally:
1137 1145 wlock.release()
1138 1146
1139 1147 def copy(self, source, dest):
1140 1148 p = self.wjoin(dest)
1141 1149 if not (os.path.exists(p) or os.path.islink(p)):
1142 1150 self.ui.warn(_("%s does not exist!\n") % dest)
1143 1151 elif not (os.path.isfile(p) or os.path.islink(p)):
1144 1152 self.ui.warn(_("copy failed: %s is not a file or a "
1145 1153 "symbolic link\n") % dest)
1146 1154 else:
1147 1155 wlock = self.wlock()
1148 1156 try:
1149 1157 if self.dirstate[dest] in '?r':
1150 1158 self.dirstate.add(dest)
1151 1159 self.dirstate.copy(source, dest)
1152 1160 finally:
1153 1161 wlock.release()
1154 1162
1155 1163 def heads(self, start=None):
1156 1164 heads = self.changelog.heads(start)
1157 1165 # sort the output in rev descending order
1158 1166 heads = [(-self.changelog.rev(h), h) for h in heads]
1159 1167 return [n for (r, n) in sorted(heads)]
1160 1168
1161 1169 def branchheads(self, branch=None, start=None, closed=False):
1162 1170 '''return a (possibly filtered) list of heads for the given branch
1163 1171
1164 1172 Heads are returned in topological order, from newest to oldest.
1165 1173 If branch is None, use the dirstate branch.
1166 1174 If start is not None, return only heads reachable from start.
1167 1175 If closed is True, return heads that are marked as closed as well.
1168 1176 '''
1169 1177 if branch is None:
1170 1178 branch = self[None].branch()
1171 branches = self.branchmap()
1179 branches = self.lbranchmap()
1172 1180 if branch not in branches:
1173 1181 return []
1174 1182 # the cache returns heads ordered lowest to highest
1175 1183 bheads = list(reversed(branches[branch]))
1176 1184 if start is not None:
1177 1185 # filter out the heads that cannot be reached from startrev
1178 1186 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1179 1187 bheads = [h for h in bheads if h in fbheads]
1180 1188 if not closed:
1181 1189 bheads = [h for h in bheads if
1182 1190 ('close' not in self.changelog.read(h)[5])]
1183 1191 return bheads
1184 1192
1185 1193 def branches(self, nodes):
1186 1194 if not nodes:
1187 1195 nodes = [self.changelog.tip()]
1188 1196 b = []
1189 1197 for n in nodes:
1190 1198 t = n
1191 1199 while 1:
1192 1200 p = self.changelog.parents(n)
1193 1201 if p[1] != nullid or p[0] == nullid:
1194 1202 b.append((t, n, p[0], p[1]))
1195 1203 break
1196 1204 n = p[0]
1197 1205 return b
1198 1206
1199 1207 def between(self, pairs):
1200 1208 r = []
1201 1209
1202 1210 for top, bottom in pairs:
1203 1211 n, l, i = top, [], 0
1204 1212 f = 1
1205 1213
1206 1214 while n != bottom and n != nullid:
1207 1215 p = self.changelog.parents(n)[0]
1208 1216 if i == f:
1209 1217 l.append(n)
1210 1218 f = f * 2
1211 1219 n = p
1212 1220 i += 1
1213 1221
1214 1222 r.append(l)
1215 1223
1216 1224 return r
1217 1225
1218 1226 def findincoming(self, remote, base=None, heads=None, force=False):
1219 1227 """Return list of roots of the subsets of missing nodes from remote
1220 1228
1221 1229 If base dict is specified, assume that these nodes and their parents
1222 1230 exist on the remote side and that no child of a node of base exists
1223 1231 in both remote and self.
1224 1232 Furthermore base will be updated to include the nodes that exists
1225 1233 in self and remote but no children exists in self and remote.
1226 1234 If a list of heads is specified, return only nodes which are heads
1227 1235 or ancestors of these heads.
1228 1236
1229 1237 All the ancestors of base are in self and in remote.
1230 1238 All the descendants of the list returned are missing in self.
1231 1239 (and so we know that the rest of the nodes are missing in remote, see
1232 1240 outgoing)
1233 1241 """
1234 1242 return self.findcommonincoming(remote, base, heads, force)[1]
1235 1243
1236 1244 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1237 1245 """Return a tuple (common, missing roots, heads) used to identify
1238 1246 missing nodes from remote.
1239 1247
1240 1248 If base dict is specified, assume that these nodes and their parents
1241 1249 exist on the remote side and that no child of a node of base exists
1242 1250 in both remote and self.
1243 1251 Furthermore base will be updated to include the nodes that exists
1244 1252 in self and remote but no children exists in self and remote.
1245 1253 If a list of heads is specified, return only nodes which are heads
1246 1254 or ancestors of these heads.
1247 1255
1248 1256 All the ancestors of base are in self and in remote.
1249 1257 """
1250 1258 m = self.changelog.nodemap
1251 1259 search = []
1252 1260 fetch = set()
1253 1261 seen = set()
1254 1262 seenbranch = set()
1255 1263 if base is None:
1256 1264 base = {}
1257 1265
1258 1266 if not heads:
1259 1267 heads = remote.heads()
1260 1268
1261 1269 if self.changelog.tip() == nullid:
1262 1270 base[nullid] = 1
1263 1271 if heads != [nullid]:
1264 1272 return [nullid], [nullid], list(heads)
1265 1273 return [nullid], [], []
1266 1274
1267 1275 # assume we're closer to the tip than the root
1268 1276 # and start by examining the heads
1269 1277 self.ui.status(_("searching for changes\n"))
1270 1278
1271 1279 unknown = []
1272 1280 for h in heads:
1273 1281 if h not in m:
1274 1282 unknown.append(h)
1275 1283 else:
1276 1284 base[h] = 1
1277 1285
1278 1286 heads = unknown
1279 1287 if not unknown:
1280 1288 return base.keys(), [], []
1281 1289
1282 1290 req = set(unknown)
1283 1291 reqcnt = 0
1284 1292
1285 1293 # search through remote branches
1286 1294 # a 'branch' here is a linear segment of history, with four parts:
1287 1295 # head, root, first parent, second parent
1288 1296 # (a branch always has two parents (or none) by definition)
1289 1297 unknown = remote.branches(unknown)
1290 1298 while unknown:
1291 1299 r = []
1292 1300 while unknown:
1293 1301 n = unknown.pop(0)
1294 1302 if n[0] in seen:
1295 1303 continue
1296 1304
1297 1305 self.ui.debug("examining %s:%s\n"
1298 1306 % (short(n[0]), short(n[1])))
1299 1307 if n[0] == nullid: # found the end of the branch
1300 1308 pass
1301 1309 elif n in seenbranch:
1302 1310 self.ui.debug("branch already found\n")
1303 1311 continue
1304 1312 elif n[1] and n[1] in m: # do we know the base?
1305 1313 self.ui.debug("found incomplete branch %s:%s\n"
1306 1314 % (short(n[0]), short(n[1])))
1307 1315 search.append(n[0:2]) # schedule branch range for scanning
1308 1316 seenbranch.add(n)
1309 1317 else:
1310 1318 if n[1] not in seen and n[1] not in fetch:
1311 1319 if n[2] in m and n[3] in m:
1312 1320 self.ui.debug("found new changeset %s\n" %
1313 1321 short(n[1]))
1314 1322 fetch.add(n[1]) # earliest unknown
1315 1323 for p in n[2:4]:
1316 1324 if p in m:
1317 1325 base[p] = 1 # latest known
1318 1326
1319 1327 for p in n[2:4]:
1320 1328 if p not in req and p not in m:
1321 1329 r.append(p)
1322 1330 req.add(p)
1323 1331 seen.add(n[0])
1324 1332
1325 1333 if r:
1326 1334 reqcnt += 1
1327 1335 self.ui.debug("request %d: %s\n" %
1328 1336 (reqcnt, " ".join(map(short, r))))
1329 1337 for p in xrange(0, len(r), 10):
1330 1338 for b in remote.branches(r[p:p+10]):
1331 1339 self.ui.debug("received %s:%s\n" %
1332 1340 (short(b[0]), short(b[1])))
1333 1341 unknown.append(b)
1334 1342
1335 1343 # do binary search on the branches we found
1336 1344 while search:
1337 1345 newsearch = []
1338 1346 reqcnt += 1
1339 1347 for n, l in zip(search, remote.between(search)):
1340 1348 l.append(n[1])
1341 1349 p = n[0]
1342 1350 f = 1
1343 1351 for i in l:
1344 1352 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1345 1353 if i in m:
1346 1354 if f <= 2:
1347 1355 self.ui.debug("found new branch changeset %s\n" %
1348 1356 short(p))
1349 1357 fetch.add(p)
1350 1358 base[i] = 1
1351 1359 else:
1352 1360 self.ui.debug("narrowed branch search to %s:%s\n"
1353 1361 % (short(p), short(i)))
1354 1362 newsearch.append((p, i))
1355 1363 break
1356 1364 p, f = i, f * 2
1357 1365 search = newsearch
1358 1366
1359 1367 # sanity check our fetch list
1360 1368 for f in fetch:
1361 1369 if f in m:
1362 1370 raise error.RepoError(_("already have changeset ")
1363 1371 + short(f[:4]))
1364 1372
1365 1373 if base.keys() == [nullid]:
1366 1374 if force:
1367 1375 self.ui.warn(_("warning: repository is unrelated\n"))
1368 1376 else:
1369 1377 raise util.Abort(_("repository is unrelated"))
1370 1378
1371 1379 self.ui.debug("found new changesets starting at " +
1372 1380 " ".join([short(f) for f in fetch]) + "\n")
1373 1381
1374 1382 self.ui.debug("%d total queries\n" % reqcnt)
1375 1383
1376 1384 return base.keys(), list(fetch), heads
1377 1385
1378 1386 def findoutgoing(self, remote, base=None, heads=None, force=False):
1379 1387 """Return list of nodes that are roots of subsets not in remote
1380 1388
1381 1389 If base dict is specified, assume that these nodes and their parents
1382 1390 exist on the remote side.
1383 1391 If a list of heads is specified, return only nodes which are heads
1384 1392 or ancestors of these heads, and return a second element which
1385 1393 contains all remote heads which get new children.
1386 1394 """
1387 1395 if base is None:
1388 1396 base = {}
1389 1397 self.findincoming(remote, base, heads, force=force)
1390 1398
1391 1399 self.ui.debug("common changesets up to "
1392 1400 + " ".join(map(short, base.keys())) + "\n")
1393 1401
1394 1402 remain = set(self.changelog.nodemap)
1395 1403
1396 1404 # prune everything remote has from the tree
1397 1405 remain.remove(nullid)
1398 1406 remove = base.keys()
1399 1407 while remove:
1400 1408 n = remove.pop(0)
1401 1409 if n in remain:
1402 1410 remain.remove(n)
1403 1411 for p in self.changelog.parents(n):
1404 1412 remove.append(p)
1405 1413
1406 1414 # find every node whose parents have been pruned
1407 1415 subset = []
1408 1416 # find every remote head that will get new children
1409 1417 updated_heads = set()
1410 1418 for n in remain:
1411 1419 p1, p2 = self.changelog.parents(n)
1412 1420 if p1 not in remain and p2 not in remain:
1413 1421 subset.append(n)
1414 1422 if heads:
1415 1423 if p1 in heads:
1416 1424 updated_heads.add(p1)
1417 1425 if p2 in heads:
1418 1426 updated_heads.add(p2)
1419 1427
1420 1428 # this is the set of all roots we have to push
1421 1429 if heads:
1422 1430 return subset, list(updated_heads)
1423 1431 else:
1424 1432 return subset
1425 1433
1426 1434 def pull(self, remote, heads=None, force=False):
1427 1435 lock = self.lock()
1428 1436 try:
1429 1437 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1430 1438 force=force)
1431 1439 if fetch == [nullid]:
1432 1440 self.ui.status(_("requesting all changes\n"))
1433 1441
1434 1442 if not fetch:
1435 1443 self.ui.status(_("no changes found\n"))
1436 1444 return 0
1437 1445
1438 1446 if heads is None and remote.capable('changegroupsubset'):
1439 1447 heads = rheads
1440 1448
1441 1449 if heads is None:
1442 1450 cg = remote.changegroup(fetch, 'pull')
1443 1451 else:
1444 1452 if not remote.capable('changegroupsubset'):
1445 1453 raise util.Abort(_("Partial pull cannot be done because "
1446 1454 "other repository doesn't support "
1447 1455 "changegroupsubset."))
1448 1456 cg = remote.changegroupsubset(fetch, heads, 'pull')
1449 1457 return self.addchangegroup(cg, 'pull', remote.url())
1450 1458 finally:
1451 1459 lock.release()
1452 1460
1453 1461 def push(self, remote, force=False, revs=None):
1454 1462 # there are two ways to push to remote repo:
1455 1463 #
1456 1464 # addchangegroup assumes local user can lock remote
1457 1465 # repo (local filesystem, old ssh servers).
1458 1466 #
1459 1467 # unbundle assumes local user cannot lock remote repo (new ssh
1460 1468 # servers, http servers).
1461 1469
1462 1470 if remote.capable('unbundle'):
1463 1471 return self.push_unbundle(remote, force, revs)
1464 1472 return self.push_addchangegroup(remote, force, revs)
1465 1473
1466 1474 def prepush(self, remote, force, revs):
1467 1475 '''Analyze the local and remote repositories and determine which
1468 1476 changesets need to be pushed to the remote. Return a tuple
1469 1477 (changegroup, remoteheads). changegroup is a readable file-like
1470 1478 object whose read() returns successive changegroup chunks ready to
1471 1479 be sent over the wire. remoteheads is the list of remote heads.
1472 1480 '''
1473 1481 common = {}
1474 1482 remote_heads = remote.heads()
1475 1483 inc = self.findincoming(remote, common, remote_heads, force=force)
1476 1484
1477 1485 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1478 1486 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1479 1487
1480 1488 def checkbranch(lheads, rheads, updatelb):
1481 1489 '''
1482 1490 check whether there are more local heads than remote heads on
1483 1491 a specific branch.
1484 1492
1485 1493 lheads: local branch heads
1486 1494 rheads: remote branch heads
1487 1495 updatelb: outgoing local branch bases
1488 1496 '''
1489 1497
1490 1498 warn = 0
1491 1499
1492 1500 if not revs and len(lheads) > len(rheads):
1493 1501 warn = 1
1494 1502 else:
1495 1503 # add local heads involved in the push
1496 1504 updatelheads = [self.changelog.heads(x, lheads)
1497 1505 for x in updatelb]
1498 1506 newheads = set(sum(updatelheads, [])) & set(lheads)
1499 1507
1500 1508 if not newheads:
1501 1509 return True
1502 1510
1503 1511 # add heads we don't have or that are not involved in the push
1504 1512 for r in rheads:
1505 1513 if r in self.changelog.nodemap:
1506 1514 desc = self.changelog.heads(r, heads)
1507 1515 l = [h for h in heads if h in desc]
1508 1516 if not l:
1509 1517 newheads.add(r)
1510 1518 else:
1511 1519 newheads.add(r)
1512 1520 if len(newheads) > len(rheads):
1513 1521 warn = 1
1514 1522
1515 1523 if warn:
1516 1524 if not rheads: # new branch requires --force
1517 1525 self.ui.warn(_("abort: push creates new"
1518 1526 " remote branch '%s'!\n") %
1519 1527 self[updatelb[0]].branch())
1520 1528 else:
1521 1529 self.ui.warn(_("abort: push creates new remote heads!\n"))
1522 1530
1523 1531 self.ui.status(_("(did you forget to merge?"
1524 1532 " use push -f to force)\n"))
1525 1533 return False
1526 1534 return True
1527 1535
1528 1536 if not bases:
1529 1537 self.ui.status(_("no changes found\n"))
1530 1538 return None, 1
1531 1539 elif not force:
1532 1540 # Check for each named branch if we're creating new remote heads.
1533 1541 # To be a remote head after push, node must be either:
1534 1542 # - unknown locally
1535 1543 # - a local outgoing head descended from update
1536 1544 # - a remote head that's known locally and not
1537 1545 # ancestral to an outgoing head
1538 1546 #
1539 1547 # New named branches cannot be created without --force.
1540 1548
1541 1549 if remote_heads != [nullid]:
1542 1550 if remote.capable('branchmap'):
1543 1551 localhds = {}
1544 1552 if not revs:
1545 1553 localhds = self.branchmap()
1546 1554 else:
1547 1555 for n in heads:
1548 1556 branch = self[n].branch()
1549 1557 if branch in localhds:
1550 1558 localhds[branch].append(n)
1551 1559 else:
1552 1560 localhds[branch] = [n]
1553 1561
1554 1562 remotehds = remote.branchmap()
1555 1563
1556 1564 for lh in localhds:
1557 1565 if lh in remotehds:
1558 1566 rheads = remotehds[lh]
1559 1567 else:
1560 1568 rheads = []
1561 1569 lheads = localhds[lh]
1562 1570 updatelb = [upd for upd in update
1563 1571 if self[upd].branch() == lh]
1564 1572 if not updatelb:
1565 1573 continue
1566 1574 if not checkbranch(lheads, rheads, updatelb):
1567 1575 return None, 0
1568 1576 else:
1569 1577 if not checkbranch(heads, remote_heads, update):
1570 1578 return None, 0
1571 1579
1572 1580 if inc:
1573 1581 self.ui.warn(_("note: unsynced remote changes!\n"))
1574 1582
1575 1583
1576 1584 if revs is None:
1577 1585 # use the fast path, no race possible on push
1578 1586 cg = self._changegroup(common.keys(), 'push')
1579 1587 else:
1580 1588 cg = self.changegroupsubset(update, revs, 'push')
1581 1589 return cg, remote_heads
1582 1590
1583 1591 def push_addchangegroup(self, remote, force, revs):
1584 1592 lock = remote.lock()
1585 1593 try:
1586 1594 ret = self.prepush(remote, force, revs)
1587 1595 if ret[0] is not None:
1588 1596 cg, remote_heads = ret
1589 1597 return remote.addchangegroup(cg, 'push', self.url())
1590 1598 return ret[1]
1591 1599 finally:
1592 1600 lock.release()
1593 1601
1594 1602 def push_unbundle(self, remote, force, revs):
1595 1603 # local repo finds heads on server, finds out what revs it
1596 1604 # must push. once revs transferred, if server finds it has
1597 1605 # different heads (someone else won commit/push race), server
1598 1606 # aborts.
1599 1607
1600 1608 ret = self.prepush(remote, force, revs)
1601 1609 if ret[0] is not None:
1602 1610 cg, remote_heads = ret
1603 1611 if force: remote_heads = ['force']
1604 1612 return remote.unbundle(cg, remote_heads, 'push')
1605 1613 return ret[1]
1606 1614
1607 1615 def changegroupinfo(self, nodes, source):
1608 1616 if self.ui.verbose or source == 'bundle':
1609 1617 self.ui.status(_("%d changesets found\n") % len(nodes))
1610 1618 if self.ui.debugflag:
1611 1619 self.ui.debug("list of changesets:\n")
1612 1620 for node in nodes:
1613 1621 self.ui.debug("%s\n" % hex(node))
1614 1622
1615 1623 def changegroupsubset(self, bases, heads, source, extranodes=None):
1616 1624 """Compute a changegroup consisting of all the nodes that are
1617 1625 descendents of any of the bases and ancestors of any of the heads.
1618 1626 Return a chunkbuffer object whose read() method will return
1619 1627 successive changegroup chunks.
1620 1628
1621 1629 It is fairly complex as determining which filenodes and which
1622 1630 manifest nodes need to be included for the changeset to be complete
1623 1631 is non-trivial.
1624 1632
1625 1633 Another wrinkle is doing the reverse, figuring out which changeset in
1626 1634 the changegroup a particular filenode or manifestnode belongs to.
1627 1635
1628 1636 The caller can specify some nodes that must be included in the
1629 1637 changegroup using the extranodes argument. It should be a dict
1630 1638 where the keys are the filenames (or 1 for the manifest), and the
1631 1639 values are lists of (node, linknode) tuples, where node is a wanted
1632 1640 node and linknode is the changelog node that should be transmitted as
1633 1641 the linkrev.
1634 1642 """
1635 1643
1636 1644 if extranodes is None:
1637 1645 # can we go through the fast path ?
1638 1646 heads.sort()
1639 1647 allheads = self.heads()
1640 1648 allheads.sort()
1641 1649 if heads == allheads:
1642 1650 common = []
1643 1651 # parents of bases are known from both sides
1644 1652 for n in bases:
1645 1653 for p in self.changelog.parents(n):
1646 1654 if p != nullid:
1647 1655 common.append(p)
1648 1656 return self._changegroup(common, source)
1649 1657
1650 1658 self.hook('preoutgoing', throw=True, source=source)
1651 1659
1652 1660 # Set up some initial variables
1653 1661 # Make it easy to refer to self.changelog
1654 1662 cl = self.changelog
1655 1663 # msng is short for missing - compute the list of changesets in this
1656 1664 # changegroup.
1657 1665 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1658 1666 self.changegroupinfo(msng_cl_lst, source)
1659 1667 # Some bases may turn out to be superfluous, and some heads may be
1660 1668 # too. nodesbetween will return the minimal set of bases and heads
1661 1669 # necessary to re-create the changegroup.
1662 1670
1663 1671 # Known heads are the list of heads that it is assumed the recipient
1664 1672 # of this changegroup will know about.
1665 1673 knownheads = set()
1666 1674 # We assume that all parents of bases are known heads.
1667 1675 for n in bases:
1668 1676 knownheads.update(cl.parents(n))
1669 1677 knownheads.discard(nullid)
1670 1678 knownheads = list(knownheads)
1671 1679 if knownheads:
1672 1680 # Now that we know what heads are known, we can compute which
1673 1681 # changesets are known. The recipient must know about all
1674 1682 # changesets required to reach the known heads from the null
1675 1683 # changeset.
1676 1684 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1677 1685 junk = None
1678 1686 # Transform the list into a set.
1679 1687 has_cl_set = set(has_cl_set)
1680 1688 else:
1681 1689 # If there were no known heads, the recipient cannot be assumed to
1682 1690 # know about any changesets.
1683 1691 has_cl_set = set()
1684 1692
1685 1693 # Make it easy to refer to self.manifest
1686 1694 mnfst = self.manifest
1687 1695 # We don't know which manifests are missing yet
1688 1696 msng_mnfst_set = {}
1689 1697 # Nor do we know which filenodes are missing.
1690 1698 msng_filenode_set = {}
1691 1699
1692 1700 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1693 1701 junk = None
1694 1702
1695 1703 # A changeset always belongs to itself, so the changenode lookup
1696 1704 # function for a changenode is identity.
1697 1705 def identity(x):
1698 1706 return x
1699 1707
1700 1708 # If we determine that a particular file or manifest node must be a
1701 1709 # node that the recipient of the changegroup will already have, we can
1702 1710 # also assume the recipient will have all the parents. This function
1703 1711 # prunes them from the set of missing nodes.
1704 1712 def prune_parents(revlog, hasset, msngset):
1705 1713 haslst = list(hasset)
1706 1714 haslst.sort(key=revlog.rev)
1707 1715 for node in haslst:
1708 1716 parentlst = [p for p in revlog.parents(node) if p != nullid]
1709 1717 while parentlst:
1710 1718 n = parentlst.pop()
1711 1719 if n not in hasset:
1712 1720 hasset.add(n)
1713 1721 p = [p for p in revlog.parents(n) if p != nullid]
1714 1722 parentlst.extend(p)
1715 1723 for n in hasset:
1716 1724 msngset.pop(n, None)
1717 1725
1718 1726 # This is a function generating function used to set up an environment
1719 1727 # for the inner function to execute in.
1720 1728 def manifest_and_file_collector(changedfileset):
1721 1729 # This is an information gathering function that gathers
1722 1730 # information from each changeset node that goes out as part of
1723 1731 # the changegroup. The information gathered is a list of which
1724 1732 # manifest nodes are potentially required (the recipient may
1725 1733 # already have them) and total list of all files which were
1726 1734 # changed in any changeset in the changegroup.
1727 1735 #
1728 1736 # We also remember the first changenode we saw any manifest
1729 1737 # referenced by so we can later determine which changenode 'owns'
1730 1738 # the manifest.
1731 1739 def collect_manifests_and_files(clnode):
1732 1740 c = cl.read(clnode)
1733 1741 for f in c[3]:
1734 1742 # This is to make sure we only have one instance of each
1735 1743 # filename string for each filename.
1736 1744 changedfileset.setdefault(f, f)
1737 1745 msng_mnfst_set.setdefault(c[0], clnode)
1738 1746 return collect_manifests_and_files
1739 1747
1740 1748 # Figure out which manifest nodes (of the ones we think might be part
1741 1749 # of the changegroup) the recipient must know about and remove them
1742 1750 # from the changegroup.
1743 1751 def prune_manifests():
1744 1752 has_mnfst_set = set()
1745 1753 for n in msng_mnfst_set:
1746 1754 # If a 'missing' manifest thinks it belongs to a changenode
1747 1755 # the recipient is assumed to have, obviously the recipient
1748 1756 # must have that manifest.
1749 1757 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1750 1758 if linknode in has_cl_set:
1751 1759 has_mnfst_set.add(n)
1752 1760 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1753 1761
1754 1762 # Use the information collected in collect_manifests_and_files to say
1755 1763 # which changenode any manifestnode belongs to.
1756 1764 def lookup_manifest_link(mnfstnode):
1757 1765 return msng_mnfst_set[mnfstnode]
1758 1766
1759 1767 # A function generating function that sets up the initial environment
1760 1768 # the inner function.
1761 1769 def filenode_collector(changedfiles):
1762 1770 next_rev = [0]
1763 1771 # This gathers information from each manifestnode included in the
1764 1772 # changegroup about which filenodes the manifest node references
1765 1773 # so we can include those in the changegroup too.
1766 1774 #
1767 1775 # It also remembers which changenode each filenode belongs to. It
1768 1776 # does this by assuming the a filenode belongs to the changenode
1769 1777 # the first manifest that references it belongs to.
1770 1778 def collect_msng_filenodes(mnfstnode):
1771 1779 r = mnfst.rev(mnfstnode)
1772 1780 if r == next_rev[0]:
1773 1781 # If the last rev we looked at was the one just previous,
1774 1782 # we only need to see a diff.
1775 1783 deltamf = mnfst.readdelta(mnfstnode)
1776 1784 # For each line in the delta
1777 1785 for f, fnode in deltamf.iteritems():
1778 1786 f = changedfiles.get(f, None)
1779 1787 # And if the file is in the list of files we care
1780 1788 # about.
1781 1789 if f is not None:
1782 1790 # Get the changenode this manifest belongs to
1783 1791 clnode = msng_mnfst_set[mnfstnode]
1784 1792 # Create the set of filenodes for the file if
1785 1793 # there isn't one already.
1786 1794 ndset = msng_filenode_set.setdefault(f, {})
1787 1795 # And set the filenode's changelog node to the
1788 1796 # manifest's if it hasn't been set already.
1789 1797 ndset.setdefault(fnode, clnode)
1790 1798 else:
1791 1799 # Otherwise we need a full manifest.
1792 1800 m = mnfst.read(mnfstnode)
1793 1801 # For every file in we care about.
1794 1802 for f in changedfiles:
1795 1803 fnode = m.get(f, None)
1796 1804 # If it's in the manifest
1797 1805 if fnode is not None:
1798 1806 # See comments above.
1799 1807 clnode = msng_mnfst_set[mnfstnode]
1800 1808 ndset = msng_filenode_set.setdefault(f, {})
1801 1809 ndset.setdefault(fnode, clnode)
1802 1810 # Remember the revision we hope to see next.
1803 1811 next_rev[0] = r + 1
1804 1812 return collect_msng_filenodes
1805 1813
1806 1814 # We have a list of filenodes we think we need for a file, lets remove
1807 1815 # all those we know the recipient must have.
1808 1816 def prune_filenodes(f, filerevlog):
1809 1817 msngset = msng_filenode_set[f]
1810 1818 hasset = set()
1811 1819 # If a 'missing' filenode thinks it belongs to a changenode we
1812 1820 # assume the recipient must have, then the recipient must have
1813 1821 # that filenode.
1814 1822 for n in msngset:
1815 1823 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1816 1824 if clnode in has_cl_set:
1817 1825 hasset.add(n)
1818 1826 prune_parents(filerevlog, hasset, msngset)
1819 1827
1820 1828 # A function generator function that sets up the a context for the
1821 1829 # inner function.
1822 1830 def lookup_filenode_link_func(fname):
1823 1831 msngset = msng_filenode_set[fname]
1824 1832 # Lookup the changenode the filenode belongs to.
1825 1833 def lookup_filenode_link(fnode):
1826 1834 return msngset[fnode]
1827 1835 return lookup_filenode_link
1828 1836
1829 1837 # Add the nodes that were explicitly requested.
1830 1838 def add_extra_nodes(name, nodes):
1831 1839 if not extranodes or name not in extranodes:
1832 1840 return
1833 1841
1834 1842 for node, linknode in extranodes[name]:
1835 1843 if node not in nodes:
1836 1844 nodes[node] = linknode
1837 1845
1838 1846 # Now that we have all theses utility functions to help out and
1839 1847 # logically divide up the task, generate the group.
1840 1848 def gengroup():
1841 1849 # The set of changed files starts empty.
1842 1850 changedfiles = {}
1843 1851 # Create a changenode group generator that will call our functions
1844 1852 # back to lookup the owning changenode and collect information.
1845 1853 group = cl.group(msng_cl_lst, identity,
1846 1854 manifest_and_file_collector(changedfiles))
1847 1855 for chnk in group:
1848 1856 yield chnk
1849 1857
1850 1858 # The list of manifests has been collected by the generator
1851 1859 # calling our functions back.
1852 1860 prune_manifests()
1853 1861 add_extra_nodes(1, msng_mnfst_set)
1854 1862 msng_mnfst_lst = msng_mnfst_set.keys()
1855 1863 # Sort the manifestnodes by revision number.
1856 1864 msng_mnfst_lst.sort(key=mnfst.rev)
1857 1865 # Create a generator for the manifestnodes that calls our lookup
1858 1866 # and data collection functions back.
1859 1867 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1860 1868 filenode_collector(changedfiles))
1861 1869 for chnk in group:
1862 1870 yield chnk
1863 1871
1864 1872 # These are no longer needed, dereference and toss the memory for
1865 1873 # them.
1866 1874 msng_mnfst_lst = None
1867 1875 msng_mnfst_set.clear()
1868 1876
1869 1877 if extranodes:
1870 1878 for fname in extranodes:
1871 1879 if isinstance(fname, int):
1872 1880 continue
1873 1881 msng_filenode_set.setdefault(fname, {})
1874 1882 changedfiles[fname] = 1
1875 1883 # Go through all our files in order sorted by name.
1876 1884 for fname in sorted(changedfiles):
1877 1885 filerevlog = self.file(fname)
1878 1886 if not len(filerevlog):
1879 1887 raise util.Abort(_("empty or missing revlog for %s") % fname)
1880 1888 # Toss out the filenodes that the recipient isn't really
1881 1889 # missing.
1882 1890 if fname in msng_filenode_set:
1883 1891 prune_filenodes(fname, filerevlog)
1884 1892 add_extra_nodes(fname, msng_filenode_set[fname])
1885 1893 msng_filenode_lst = msng_filenode_set[fname].keys()
1886 1894 else:
1887 1895 msng_filenode_lst = []
1888 1896 # If any filenodes are left, generate the group for them,
1889 1897 # otherwise don't bother.
1890 1898 if len(msng_filenode_lst) > 0:
1891 1899 yield changegroup.chunkheader(len(fname))
1892 1900 yield fname
1893 1901 # Sort the filenodes by their revision #
1894 1902 msng_filenode_lst.sort(key=filerevlog.rev)
1895 1903 # Create a group generator and only pass in a changenode
1896 1904 # lookup function as we need to collect no information
1897 1905 # from filenodes.
1898 1906 group = filerevlog.group(msng_filenode_lst,
1899 1907 lookup_filenode_link_func(fname))
1900 1908 for chnk in group:
1901 1909 yield chnk
1902 1910 if fname in msng_filenode_set:
1903 1911 # Don't need this anymore, toss it to free memory.
1904 1912 del msng_filenode_set[fname]
1905 1913 # Signal that no more groups are left.
1906 1914 yield changegroup.closechunk()
1907 1915
1908 1916 if msng_cl_lst:
1909 1917 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1910 1918
1911 1919 return util.chunkbuffer(gengroup())
1912 1920
1913 1921 def changegroup(self, basenodes, source):
1914 1922 # to avoid a race we use changegroupsubset() (issue1320)
1915 1923 return self.changegroupsubset(basenodes, self.heads(), source)
1916 1924
1917 1925 def _changegroup(self, common, source):
1918 1926 """Compute the changegroup of all nodes that we have that a recipient
1919 1927 doesn't. Return a chunkbuffer object whose read() method will return
1920 1928 successive changegroup chunks.
1921 1929
1922 1930 This is much easier than the previous function as we can assume that
1923 1931 the recipient has any changenode we aren't sending them.
1924 1932
1925 1933 common is the set of common nodes between remote and self"""
1926 1934
1927 1935 self.hook('preoutgoing', throw=True, source=source)
1928 1936
1929 1937 cl = self.changelog
1930 1938 nodes = cl.findmissing(common)
1931 1939 revset = set([cl.rev(n) for n in nodes])
1932 1940 self.changegroupinfo(nodes, source)
1933 1941
1934 1942 def identity(x):
1935 1943 return x
1936 1944
1937 1945 def gennodelst(log):
1938 1946 for r in log:
1939 1947 if log.linkrev(r) in revset:
1940 1948 yield log.node(r)
1941 1949
1942 1950 def changed_file_collector(changedfileset):
1943 1951 def collect_changed_files(clnode):
1944 1952 c = cl.read(clnode)
1945 1953 changedfileset.update(c[3])
1946 1954 return collect_changed_files
1947 1955
1948 1956 def lookuprevlink_func(revlog):
1949 1957 def lookuprevlink(n):
1950 1958 return cl.node(revlog.linkrev(revlog.rev(n)))
1951 1959 return lookuprevlink
1952 1960
1953 1961 def gengroup():
1954 1962 '''yield a sequence of changegroup chunks (strings)'''
1955 1963 # construct a list of all changed files
1956 1964 changedfiles = set()
1957 1965
1958 1966 for chnk in cl.group(nodes, identity,
1959 1967 changed_file_collector(changedfiles)):
1960 1968 yield chnk
1961 1969
1962 1970 mnfst = self.manifest
1963 1971 nodeiter = gennodelst(mnfst)
1964 1972 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1965 1973 yield chnk
1966 1974
1967 1975 for fname in sorted(changedfiles):
1968 1976 filerevlog = self.file(fname)
1969 1977 if not len(filerevlog):
1970 1978 raise util.Abort(_("empty or missing revlog for %s") % fname)
1971 1979 nodeiter = gennodelst(filerevlog)
1972 1980 nodeiter = list(nodeiter)
1973 1981 if nodeiter:
1974 1982 yield changegroup.chunkheader(len(fname))
1975 1983 yield fname
1976 1984 lookup = lookuprevlink_func(filerevlog)
1977 1985 for chnk in filerevlog.group(nodeiter, lookup):
1978 1986 yield chnk
1979 1987
1980 1988 yield changegroup.closechunk()
1981 1989
1982 1990 if nodes:
1983 1991 self.hook('outgoing', node=hex(nodes[0]), source=source)
1984 1992
1985 1993 return util.chunkbuffer(gengroup())
1986 1994
1987 1995 def addchangegroup(self, source, srctype, url, emptyok=False):
1988 1996 """add changegroup to repo.
1989 1997
1990 1998 return values:
1991 1999 - nothing changed or no source: 0
1992 2000 - more heads than before: 1+added heads (2..n)
1993 2001 - less heads than before: -1-removed heads (-2..-n)
1994 2002 - number of heads stays the same: 1
1995 2003 """
1996 2004 def csmap(x):
1997 2005 self.ui.debug("add changeset %s\n" % short(x))
1998 2006 return len(cl)
1999 2007
2000 2008 def revmap(x):
2001 2009 return cl.rev(x)
2002 2010
2003 2011 if not source:
2004 2012 return 0
2005 2013
2006 2014 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2007 2015
2008 2016 changesets = files = revisions = 0
2009 2017
2010 2018 # write changelog data to temp files so concurrent readers will not see
2011 2019 # inconsistent view
2012 2020 cl = self.changelog
2013 2021 cl.delayupdate()
2014 2022 oldheads = len(cl.heads())
2015 2023
2016 2024 tr = self.transaction()
2017 2025 try:
2018 2026 trp = weakref.proxy(tr)
2019 2027 # pull off the changeset group
2020 2028 self.ui.status(_("adding changesets\n"))
2021 2029 clstart = len(cl)
2022 2030 chunkiter = changegroup.chunkiter(source)
2023 2031 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2024 2032 raise util.Abort(_("received changelog group is empty"))
2025 2033 clend = len(cl)
2026 2034 changesets = clend - clstart
2027 2035
2028 2036 # pull off the manifest group
2029 2037 self.ui.status(_("adding manifests\n"))
2030 2038 chunkiter = changegroup.chunkiter(source)
2031 2039 # no need to check for empty manifest group here:
2032 2040 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2033 2041 # no new manifest will be created and the manifest group will
2034 2042 # be empty during the pull
2035 2043 self.manifest.addgroup(chunkiter, revmap, trp)
2036 2044
2037 2045 # process the files
2038 2046 self.ui.status(_("adding file changes\n"))
2039 2047 while 1:
2040 2048 f = changegroup.getchunk(source)
2041 2049 if not f:
2042 2050 break
2043 2051 self.ui.debug("adding %s revisions\n" % f)
2044 2052 fl = self.file(f)
2045 2053 o = len(fl)
2046 2054 chunkiter = changegroup.chunkiter(source)
2047 2055 if fl.addgroup(chunkiter, revmap, trp) is None:
2048 2056 raise util.Abort(_("received file revlog group is empty"))
2049 2057 revisions += len(fl) - o
2050 2058 files += 1
2051 2059
2052 2060 newheads = len(cl.heads())
2053 2061 heads = ""
2054 2062 if oldheads and newheads != oldheads:
2055 2063 heads = _(" (%+d heads)") % (newheads - oldheads)
2056 2064
2057 2065 self.ui.status(_("added %d changesets"
2058 2066 " with %d changes to %d files%s\n")
2059 2067 % (changesets, revisions, files, heads))
2060 2068
2061 2069 if changesets > 0:
2062 2070 p = lambda: cl.writepending() and self.root or ""
2063 2071 self.hook('pretxnchangegroup', throw=True,
2064 2072 node=hex(cl.node(clstart)), source=srctype,
2065 2073 url=url, pending=p)
2066 2074
2067 2075 # make changelog see real files again
2068 2076 cl.finalize(trp)
2069 2077
2070 2078 tr.close()
2071 2079 finally:
2072 2080 del tr
2073 2081
2074 2082 if changesets > 0:
2075 2083 # forcefully update the on-disk branch cache
2076 2084 self.ui.debug("updating the branch cache\n")
2077 2085 self.branchtags()
2078 2086 self.hook("changegroup", node=hex(cl.node(clstart)),
2079 2087 source=srctype, url=url)
2080 2088
2081 2089 for i in xrange(clstart, clend):
2082 2090 self.hook("incoming", node=hex(cl.node(i)),
2083 2091 source=srctype, url=url)
2084 2092
2085 2093 # never return 0 here:
2086 2094 if newheads < oldheads:
2087 2095 return newheads - oldheads - 1
2088 2096 else:
2089 2097 return newheads - oldheads + 1
2090 2098
2091 2099
2092 2100 def stream_in(self, remote):
2093 2101 fp = remote.stream_out()
2094 2102 l = fp.readline()
2095 2103 try:
2096 2104 resp = int(l)
2097 2105 except ValueError:
2098 2106 raise error.ResponseError(
2099 2107 _('Unexpected response from remote server:'), l)
2100 2108 if resp == 1:
2101 2109 raise util.Abort(_('operation forbidden by server'))
2102 2110 elif resp == 2:
2103 2111 raise util.Abort(_('locking the remote repository failed'))
2104 2112 elif resp != 0:
2105 2113 raise util.Abort(_('the server sent an unknown error code'))
2106 2114 self.ui.status(_('streaming all changes\n'))
2107 2115 l = fp.readline()
2108 2116 try:
2109 2117 total_files, total_bytes = map(int, l.split(' ', 1))
2110 2118 except (ValueError, TypeError):
2111 2119 raise error.ResponseError(
2112 2120 _('Unexpected response from remote server:'), l)
2113 2121 self.ui.status(_('%d files to transfer, %s of data\n') %
2114 2122 (total_files, util.bytecount(total_bytes)))
2115 2123 start = time.time()
2116 2124 for i in xrange(total_files):
2117 2125 # XXX doesn't support '\n' or '\r' in filenames
2118 2126 l = fp.readline()
2119 2127 try:
2120 2128 name, size = l.split('\0', 1)
2121 2129 size = int(size)
2122 2130 except (ValueError, TypeError):
2123 2131 raise error.ResponseError(
2124 2132 _('Unexpected response from remote server:'), l)
2125 2133 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2126 2134 # for backwards compat, name was partially encoded
2127 2135 ofp = self.sopener(store.decodedir(name), 'w')
2128 2136 for chunk in util.filechunkiter(fp, limit=size):
2129 2137 ofp.write(chunk)
2130 2138 ofp.close()
2131 2139 elapsed = time.time() - start
2132 2140 if elapsed <= 0:
2133 2141 elapsed = 0.001
2134 2142 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2135 2143 (util.bytecount(total_bytes), elapsed,
2136 2144 util.bytecount(total_bytes / elapsed)))
2137 2145 self.invalidate()
2138 2146 return len(self.heads()) + 1
2139 2147
2140 2148 def clone(self, remote, heads=[], stream=False):
2141 2149 '''clone remote repository.
2142 2150
2143 2151 keyword arguments:
2144 2152 heads: list of revs to clone (forces use of pull)
2145 2153 stream: use streaming clone if possible'''
2146 2154
2147 2155 # now, all clients that can request uncompressed clones can
2148 2156 # read repo formats supported by all servers that can serve
2149 2157 # them.
2150 2158
2151 2159 # if revlog format changes, client will have to check version
2152 2160 # and format flags on "stream" capability, and use
2153 2161 # uncompressed only if compatible.
2154 2162
2155 2163 if stream and not heads and remote.capable('stream'):
2156 2164 return self.stream_in(remote)
2157 2165 return self.pull(remote, heads)
2158 2166
2159 2167 # used to avoid circular references so destructors work
2160 2168 def aftertrans(files):
2161 2169 renamefiles = [tuple(t) for t in files]
2162 2170 def a():
2163 2171 for src, dest in renamefiles:
2164 2172 util.rename(src, dest)
2165 2173 return a
2166 2174
2167 2175 def instance(ui, path, create):
2168 2176 return localrepository(ui, util.drop_scheme('file', path), create)
2169 2177
2170 2178 def islocal(path):
2171 2179 return True
General Comments 0
You need to be logged in to leave comments. Login now