##// END OF EJS Templates
hooks: separate hook code into a separate module
Matt Mackall -
r4622:fff50306 default
parent child Browse files
Show More
@@ -0,0 +1,96
1 # hook.py - hook support for mercurial
2 #
3 # Copyright 2007 Matt Mackall <mpm@selenic.com>
4 #
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
7
8 from i18n import _
9 import util
10
11 def _pythonhook(ui, repo, name, hname, funcname, args, throw):
12 '''call python hook. hook is callable object, looked up as
13 name in python module. if callable returns "true", hook
14 fails, else passes. if hook raises exception, treated as
15 hook failure. exception propagates if throw is "true".
16
17 reason for "true" meaning "hook failed" is so that
18 unmodified commands (e.g. mercurial.commands.update) can
19 be run as hooks without wrappers to convert return values.'''
20
21 ui.note(_("calling hook %s: %s\n") % (hname, funcname))
22 obj = funcname
23 if not callable(obj):
24 d = funcname.rfind('.')
25 if d == -1:
26 raise util.Abort(_('%s hook is invalid ("%s" not in '
27 'a module)') % (hname, funcname))
28 modname = funcname[:d]
29 try:
30 obj = __import__(modname)
31 except ImportError:
32 try:
33 # extensions are loaded with hgext_ prefix
34 obj = __import__("hgext_%s" % modname)
35 except ImportError:
36 raise util.Abort(_('%s hook is invalid '
37 '(import of "%s" failed)') %
38 (hname, modname))
39 try:
40 for p in funcname.split('.')[1:]:
41 obj = getattr(obj, p)
42 except AttributeError, err:
43 raise util.Abort(_('%s hook is invalid '
44 '("%s" is not defined)') %
45 (hname, funcname))
46 if not callable(obj):
47 raise util.Abort(_('%s hook is invalid '
48 '("%s" is not callable)') %
49 (hname, funcname))
50 try:
51 r = obj(ui=ui, repo=repo, hooktype=name, **args)
52 except (KeyboardInterrupt, util.SignalInterrupt):
53 raise
54 except Exception, exc:
55 if isinstance(exc, util.Abort):
56 ui.warn(_('error: %s hook failed: %s\n') %
57 (hname, exc.args[0]))
58 else:
59 ui.warn(_('error: %s hook raised an exception: '
60 '%s\n') % (hname, exc))
61 if throw:
62 raise
63 ui.print_exc()
64 return True
65 if r:
66 if throw:
67 raise util.Abort(_('%s hook failed') % hname)
68 ui.warn(_('warning: %s hook failed\n') % hname)
69 return r
70
71 def _exthook(ui, repo, name, cmd, args, throw):
72 ui.note(_("running hook %s: %s\n") % (name, cmd))
73 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
74 r = util.system(cmd, environ=env, cwd=repo.root)
75 if r:
76 desc, r = util.explain_exit(r)
77 if throw:
78 raise util.Abort(_('%s hook %s') % (name, desc))
79 ui.warn(_('warning: %s hook %s\n') % (name, desc))
80 return r
81
82 def hook(ui, repo, name, throw=False, **args):
83 r = False
84 hooks = [(hname, cmd) for hname, cmd in ui.configitems("hooks")
85 if hname.split(".", 1)[0] == name and cmd]
86 hooks.sort()
87 for hname, cmd in hooks:
88 if callable(cmd):
89 r = _pythonhook(ui, repo, name, hname, cmd, args, throw) or r
90 elif cmd.startswith('python:'):
91 r = _pythonhook(ui, repo, name, hname, cmd[7:].strip(),
92 args, throw) or r
93 else:
94 r = _exthook(ui, repo, hname, cmd, args, throw) or r
95 return r
96
@@ -1,1965 +1,1883
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 import os, revlog, time, util, extensions
13 import os, revlog, time, util, extensions, hook
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = ('lookup', 'changegroupsubset')
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __del__(self):
20 20 self.transhandle = None
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 self.path = path
24 24 self.root = os.path.realpath(path)
25 25 self.path = os.path.join(self.root, ".hg")
26 26 self.origroot = path
27 27 self.opener = util.opener(self.path)
28 28 self.wopener = util.opener(self.root)
29 29
30 30 if not os.path.isdir(self.path):
31 31 if create:
32 32 if not os.path.exists(path):
33 33 os.mkdir(path)
34 34 os.mkdir(self.path)
35 35 requirements = ["revlogv1"]
36 36 if parentui.configbool('format', 'usestore', True):
37 37 os.mkdir(os.path.join(self.path, "store"))
38 38 requirements.append("store")
39 39 # create an invalid changelog
40 40 self.opener("00changelog.i", "a").write(
41 41 '\0\0\0\2' # represents revlogv2
42 42 ' dummy changelog to prevent using the old repo layout'
43 43 )
44 44 reqfile = self.opener("requires", "w")
45 45 for r in requirements:
46 46 reqfile.write("%s\n" % r)
47 47 reqfile.close()
48 48 else:
49 49 raise repo.RepoError(_("repository %s not found") % path)
50 50 elif create:
51 51 raise repo.RepoError(_("repository %s already exists") % path)
52 52 else:
53 53 # find requirements
54 54 try:
55 55 requirements = self.opener("requires").read().splitlines()
56 56 except IOError, inst:
57 57 if inst.errno != errno.ENOENT:
58 58 raise
59 59 requirements = []
60 60 # check them
61 61 for r in requirements:
62 62 if r not in self.supported:
63 63 raise repo.RepoError(_("requirement '%s' not supported") % r)
64 64
65 65 # setup store
66 66 if "store" in requirements:
67 67 self.encodefn = util.encodefilename
68 68 self.decodefn = util.decodefilename
69 69 self.spath = os.path.join(self.path, "store")
70 70 else:
71 71 self.encodefn = lambda x: x
72 72 self.decodefn = lambda x: x
73 73 self.spath = self.path
74 74 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
75 75
76 76 self.ui = ui.ui(parentui=parentui)
77 77 try:
78 78 self.ui.readconfig(self.join("hgrc"), self.root)
79 79 extensions.loadall(self.ui)
80 80 except IOError:
81 81 pass
82 82
83 83 self.tagscache = None
84 84 self.branchcache = None
85 85 self.nodetagscache = None
86 86 self.filterpats = {}
87 87 self.transhandle = None
88 88
89 89 def __getattr__(self, name):
90 90 if name == 'changelog':
91 91 self.changelog = changelog.changelog(self.sopener)
92 92 self.sopener.defversion = self.changelog.version
93 93 return self.changelog
94 94 if name == 'manifest':
95 95 self.changelog
96 96 self.manifest = manifest.manifest(self.sopener)
97 97 return self.manifest
98 98 if name == 'dirstate':
99 99 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
100 100 return self.dirstate
101 101 else:
102 102 raise AttributeError, name
103 103
104 104 def url(self):
105 105 return 'file:' + self.root
106 106
107 107 def hook(self, name, throw=False, **args):
108 def callhook(hname, funcname):
109 '''call python hook. hook is callable object, looked up as
110 name in python module. if callable returns "true", hook
111 fails, else passes. if hook raises exception, treated as
112 hook failure. exception propagates if throw is "true".
113
114 reason for "true" meaning "hook failed" is so that
115 unmodified commands (e.g. mercurial.commands.update) can
116 be run as hooks without wrappers to convert return values.'''
117
118 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
119 obj = funcname
120 if not callable(obj):
121 d = funcname.rfind('.')
122 if d == -1:
123 raise util.Abort(_('%s hook is invalid ("%s" not in '
124 'a module)') % (hname, funcname))
125 modname = funcname[:d]
126 try:
127 obj = __import__(modname)
128 except ImportError:
129 try:
130 # extensions are loaded with hgext_ prefix
131 obj = __import__("hgext_%s" % modname)
132 except ImportError:
133 raise util.Abort(_('%s hook is invalid '
134 '(import of "%s" failed)') %
135 (hname, modname))
136 try:
137 for p in funcname.split('.')[1:]:
138 obj = getattr(obj, p)
139 except AttributeError, err:
140 raise util.Abort(_('%s hook is invalid '
141 '("%s" is not defined)') %
142 (hname, funcname))
143 if not callable(obj):
144 raise util.Abort(_('%s hook is invalid '
145 '("%s" is not callable)') %
146 (hname, funcname))
147 try:
148 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
149 except (KeyboardInterrupt, util.SignalInterrupt):
150 raise
151 except Exception, exc:
152 if isinstance(exc, util.Abort):
153 self.ui.warn(_('error: %s hook failed: %s\n') %
154 (hname, exc.args[0]))
155 else:
156 self.ui.warn(_('error: %s hook raised an exception: '
157 '%s\n') % (hname, exc))
158 if throw:
159 raise
160 self.ui.print_exc()
161 return True
162 if r:
163 if throw:
164 raise util.Abort(_('%s hook failed') % hname)
165 self.ui.warn(_('warning: %s hook failed\n') % hname)
166 return r
167
168 def runhook(name, cmd):
169 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
170 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
171 r = util.system(cmd, environ=env, cwd=self.root)
172 if r:
173 desc, r = util.explain_exit(r)
174 if throw:
175 raise util.Abort(_('%s hook %s') % (name, desc))
176 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
177 return r
178
179 r = False
180 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
181 if hname.split(".", 1)[0] == name and cmd]
182 hooks.sort()
183 for hname, cmd in hooks:
184 if callable(cmd):
185 r = callhook(hname, cmd) or r
186 elif cmd.startswith('python:'):
187 r = callhook(hname, cmd[7:].strip()) or r
188 else:
189 r = runhook(hname, cmd) or r
190 return r
108 return hook.hook(self.ui, self, name, throw, **args)
191 109
192 110 tag_disallowed = ':\r\n'
193 111
194 112 def _tag(self, name, node, message, local, user, date, parent=None):
195 113 use_dirstate = parent is None
196 114
197 115 for c in self.tag_disallowed:
198 116 if c in name:
199 117 raise util.Abort(_('%r cannot be used in a tag name') % c)
200 118
201 119 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
202 120
203 121 if local:
204 122 # local tags are stored in the current charset
205 123 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
206 124 self.hook('tag', node=hex(node), tag=name, local=local)
207 125 return
208 126
209 127 # committed tags are stored in UTF-8
210 128 line = '%s %s\n' % (hex(node), util.fromlocal(name))
211 129 if use_dirstate:
212 130 self.wfile('.hgtags', 'ab').write(line)
213 131 else:
214 132 ntags = self.filectx('.hgtags', parent).data()
215 133 self.wfile('.hgtags', 'ab').write(ntags + line)
216 134 if use_dirstate and self.dirstate.state('.hgtags') == '?':
217 135 self.add(['.hgtags'])
218 136
219 137 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
220 138
221 139 self.hook('tag', node=hex(node), tag=name, local=local)
222 140
223 141 return tagnode
224 142
225 143 def tag(self, name, node, message, local, user, date):
226 144 '''tag a revision with a symbolic name.
227 145
228 146 if local is True, the tag is stored in a per-repository file.
229 147 otherwise, it is stored in the .hgtags file, and a new
230 148 changeset is committed with the change.
231 149
232 150 keyword arguments:
233 151
234 152 local: whether to store tag in non-version-controlled file
235 153 (default False)
236 154
237 155 message: commit message to use if committing
238 156
239 157 user: name of user to use if committing
240 158
241 159 date: date tuple to use if committing'''
242 160
243 161 for x in self.status()[:5]:
244 162 if '.hgtags' in x:
245 163 raise util.Abort(_('working copy of .hgtags is changed '
246 164 '(please commit .hgtags manually)'))
247 165
248 166
249 167 self._tag(name, node, message, local, user, date)
250 168
251 169 def tags(self):
252 170 '''return a mapping of tag to node'''
253 171 if self.tagscache:
254 172 return self.tagscache
255 173
256 174 globaltags = {}
257 175
258 176 def readtags(lines, fn):
259 177 filetags = {}
260 178 count = 0
261 179
262 180 def warn(msg):
263 181 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
264 182
265 183 for l in lines:
266 184 count += 1
267 185 if not l:
268 186 continue
269 187 s = l.split(" ", 1)
270 188 if len(s) != 2:
271 189 warn(_("cannot parse entry"))
272 190 continue
273 191 node, key = s
274 192 key = util.tolocal(key.strip()) # stored in UTF-8
275 193 try:
276 194 bin_n = bin(node)
277 195 except TypeError:
278 196 warn(_("node '%s' is not well formed") % node)
279 197 continue
280 198 if bin_n not in self.changelog.nodemap:
281 199 warn(_("tag '%s' refers to unknown node") % key)
282 200 continue
283 201
284 202 h = []
285 203 if key in filetags:
286 204 n, h = filetags[key]
287 205 h.append(n)
288 206 filetags[key] = (bin_n, h)
289 207
290 208 for k,nh in filetags.items():
291 209 if k not in globaltags:
292 210 globaltags[k] = nh
293 211 continue
294 212 # we prefer the global tag if:
295 213 # it supercedes us OR
296 214 # mutual supercedes and it has a higher rank
297 215 # otherwise we win because we're tip-most
298 216 an, ah = nh
299 217 bn, bh = globaltags[k]
300 218 if bn != an and an in bh and \
301 219 (bn not in ah or len(bh) > len(ah)):
302 220 an = bn
303 221 ah.extend([n for n in bh if n not in ah])
304 222 globaltags[k] = an, ah
305 223
306 224 # read the tags file from each head, ending with the tip
307 225 f = None
308 226 for rev, node, fnode in self._hgtagsnodes():
309 227 f = (f and f.filectx(fnode) or
310 228 self.filectx('.hgtags', fileid=fnode))
311 229 readtags(f.data().splitlines(), f)
312 230
313 231 try:
314 232 data = util.fromlocal(self.opener("localtags").read())
315 233 # localtags are stored in the local character set
316 234 # while the internal tag table is stored in UTF-8
317 235 readtags(data.splitlines(), "localtags")
318 236 except IOError:
319 237 pass
320 238
321 239 self.tagscache = {}
322 240 for k,nh in globaltags.items():
323 241 n = nh[0]
324 242 if n != nullid:
325 243 self.tagscache[k] = n
326 244 self.tagscache['tip'] = self.changelog.tip()
327 245
328 246 return self.tagscache
329 247
330 248 def _hgtagsnodes(self):
331 249 heads = self.heads()
332 250 heads.reverse()
333 251 last = {}
334 252 ret = []
335 253 for node in heads:
336 254 c = self.changectx(node)
337 255 rev = c.rev()
338 256 try:
339 257 fnode = c.filenode('.hgtags')
340 258 except revlog.LookupError:
341 259 continue
342 260 ret.append((rev, node, fnode))
343 261 if fnode in last:
344 262 ret[last[fnode]] = None
345 263 last[fnode] = len(ret) - 1
346 264 return [item for item in ret if item]
347 265
348 266 def tagslist(self):
349 267 '''return a list of tags ordered by revision'''
350 268 l = []
351 269 for t, n in self.tags().items():
352 270 try:
353 271 r = self.changelog.rev(n)
354 272 except:
355 273 r = -2 # sort to the beginning of the list if unknown
356 274 l.append((r, t, n))
357 275 l.sort()
358 276 return [(t, n) for r, t, n in l]
359 277
360 278 def nodetags(self, node):
361 279 '''return the tags associated with a node'''
362 280 if not self.nodetagscache:
363 281 self.nodetagscache = {}
364 282 for t, n in self.tags().items():
365 283 self.nodetagscache.setdefault(n, []).append(t)
366 284 return self.nodetagscache.get(node, [])
367 285
368 286 def _branchtags(self):
369 287 partial, last, lrev = self._readbranchcache()
370 288
371 289 tiprev = self.changelog.count() - 1
372 290 if lrev != tiprev:
373 291 self._updatebranchcache(partial, lrev+1, tiprev+1)
374 292 self._writebranchcache(partial, self.changelog.tip(), tiprev)
375 293
376 294 return partial
377 295
378 296 def branchtags(self):
379 297 if self.branchcache is not None:
380 298 return self.branchcache
381 299
382 300 self.branchcache = {} # avoid recursion in changectx
383 301 partial = self._branchtags()
384 302
385 303 # the branch cache is stored on disk as UTF-8, but in the local
386 304 # charset internally
387 305 for k, v in partial.items():
388 306 self.branchcache[util.tolocal(k)] = v
389 307 return self.branchcache
390 308
391 309 def _readbranchcache(self):
392 310 partial = {}
393 311 try:
394 312 f = self.opener("branch.cache")
395 313 lines = f.read().split('\n')
396 314 f.close()
397 315 except (IOError, OSError):
398 316 return {}, nullid, nullrev
399 317
400 318 try:
401 319 last, lrev = lines.pop(0).split(" ", 1)
402 320 last, lrev = bin(last), int(lrev)
403 321 if not (lrev < self.changelog.count() and
404 322 self.changelog.node(lrev) == last): # sanity check
405 323 # invalidate the cache
406 324 raise ValueError('Invalid branch cache: unknown tip')
407 325 for l in lines:
408 326 if not l: continue
409 327 node, label = l.split(" ", 1)
410 328 partial[label.strip()] = bin(node)
411 329 except (KeyboardInterrupt, util.SignalInterrupt):
412 330 raise
413 331 except Exception, inst:
414 332 if self.ui.debugflag:
415 333 self.ui.warn(str(inst), '\n')
416 334 partial, last, lrev = {}, nullid, nullrev
417 335 return partial, last, lrev
418 336
419 337 def _writebranchcache(self, branches, tip, tiprev):
420 338 try:
421 339 f = self.opener("branch.cache", "w", atomictemp=True)
422 340 f.write("%s %s\n" % (hex(tip), tiprev))
423 341 for label, node in branches.iteritems():
424 342 f.write("%s %s\n" % (hex(node), label))
425 343 f.rename()
426 344 except (IOError, OSError):
427 345 pass
428 346
429 347 def _updatebranchcache(self, partial, start, end):
430 348 for r in xrange(start, end):
431 349 c = self.changectx(r)
432 350 b = c.branch()
433 351 partial[b] = c.node()
434 352
435 353 def lookup(self, key):
436 354 if key == '.':
437 355 key, second = self.dirstate.parents()
438 356 if key == nullid:
439 357 raise repo.RepoError(_("no revision checked out"))
440 358 if second != nullid:
441 359 self.ui.warn(_("warning: working directory has two parents, "
442 360 "tag '.' uses the first\n"))
443 361 elif key == 'null':
444 362 return nullid
445 363 n = self.changelog._match(key)
446 364 if n:
447 365 return n
448 366 if key in self.tags():
449 367 return self.tags()[key]
450 368 if key in self.branchtags():
451 369 return self.branchtags()[key]
452 370 n = self.changelog._partialmatch(key)
453 371 if n:
454 372 return n
455 373 raise repo.RepoError(_("unknown revision '%s'") % key)
456 374
457 375 def dev(self):
458 376 return os.lstat(self.path).st_dev
459 377
460 378 def local(self):
461 379 return True
462 380
463 381 def join(self, f):
464 382 return os.path.join(self.path, f)
465 383
466 384 def sjoin(self, f):
467 385 f = self.encodefn(f)
468 386 return os.path.join(self.spath, f)
469 387
470 388 def wjoin(self, f):
471 389 return os.path.join(self.root, f)
472 390
473 391 def file(self, f):
474 392 if f[0] == '/':
475 393 f = f[1:]
476 394 return filelog.filelog(self.sopener, f)
477 395
478 396 def changectx(self, changeid=None):
479 397 return context.changectx(self, changeid)
480 398
481 399 def workingctx(self):
482 400 return context.workingctx(self)
483 401
484 402 def parents(self, changeid=None):
485 403 '''
486 404 get list of changectxs for parents of changeid or working directory
487 405 '''
488 406 if changeid is None:
489 407 pl = self.dirstate.parents()
490 408 else:
491 409 n = self.changelog.lookup(changeid)
492 410 pl = self.changelog.parents(n)
493 411 if pl[1] == nullid:
494 412 return [self.changectx(pl[0])]
495 413 return [self.changectx(pl[0]), self.changectx(pl[1])]
496 414
497 415 def filectx(self, path, changeid=None, fileid=None):
498 416 """changeid can be a changeset revision, node, or tag.
499 417 fileid can be a file revision or node."""
500 418 return context.filectx(self, path, changeid, fileid)
501 419
502 420 def getcwd(self):
503 421 return self.dirstate.getcwd()
504 422
505 423 def pathto(self, f, cwd=None):
506 424 return self.dirstate.pathto(f, cwd)
507 425
508 426 def wfile(self, f, mode='r'):
509 427 return self.wopener(f, mode)
510 428
511 429 def _link(self, f):
512 430 return os.path.islink(self.wjoin(f))
513 431
514 432 def _filter(self, filter, filename, data):
515 433 if filter not in self.filterpats:
516 434 l = []
517 435 for pat, cmd in self.ui.configitems(filter):
518 436 mf = util.matcher(self.root, "", [pat], [], [])[1]
519 437 l.append((mf, cmd))
520 438 self.filterpats[filter] = l
521 439
522 440 for mf, cmd in self.filterpats[filter]:
523 441 if mf(filename):
524 442 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
525 443 data = util.filter(data, cmd)
526 444 break
527 445
528 446 return data
529 447
530 448 def wread(self, filename):
531 449 if self._link(filename):
532 450 data = os.readlink(self.wjoin(filename))
533 451 else:
534 452 data = self.wopener(filename, 'r').read()
535 453 return self._filter("encode", filename, data)
536 454
537 455 def wwrite(self, filename, data, flags):
538 456 data = self._filter("decode", filename, data)
539 457 if "l" in flags:
540 458 f = self.wjoin(filename)
541 459 try:
542 460 os.unlink(f)
543 461 except OSError:
544 462 pass
545 463 d = os.path.dirname(f)
546 464 if not os.path.exists(d):
547 465 os.makedirs(d)
548 466 os.symlink(data, f)
549 467 else:
550 468 try:
551 469 if self._link(filename):
552 470 os.unlink(self.wjoin(filename))
553 471 except OSError:
554 472 pass
555 473 self.wopener(filename, 'w').write(data)
556 474 util.set_exec(self.wjoin(filename), "x" in flags)
557 475
558 476 def wwritedata(self, filename, data):
559 477 return self._filter("decode", filename, data)
560 478
561 479 def transaction(self):
562 480 tr = self.transhandle
563 481 if tr != None and tr.running():
564 482 return tr.nest()
565 483
566 484 # save dirstate for rollback
567 485 try:
568 486 ds = self.opener("dirstate").read()
569 487 except IOError:
570 488 ds = ""
571 489 self.opener("journal.dirstate", "w").write(ds)
572 490
573 491 renames = [(self.sjoin("journal"), self.sjoin("undo")),
574 492 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
575 493 tr = transaction.transaction(self.ui.warn, self.sopener,
576 494 self.sjoin("journal"),
577 495 aftertrans(renames))
578 496 self.transhandle = tr
579 497 return tr
580 498
581 499 def recover(self):
582 500 l = self.lock()
583 501 if os.path.exists(self.sjoin("journal")):
584 502 self.ui.status(_("rolling back interrupted transaction\n"))
585 503 transaction.rollback(self.sopener, self.sjoin("journal"))
586 504 self.invalidate()
587 505 return True
588 506 else:
589 507 self.ui.warn(_("no interrupted transaction available\n"))
590 508 return False
591 509
592 510 def rollback(self, wlock=None, lock=None):
593 511 if not wlock:
594 512 wlock = self.wlock()
595 513 if not lock:
596 514 lock = self.lock()
597 515 if os.path.exists(self.sjoin("undo")):
598 516 self.ui.status(_("rolling back last transaction\n"))
599 517 transaction.rollback(self.sopener, self.sjoin("undo"))
600 518 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
601 519 self.invalidate()
602 520 self.dirstate.invalidate()
603 521 else:
604 522 self.ui.warn(_("no rollback information available\n"))
605 523
606 524 def invalidate(self):
607 525 for a in "changelog manifest".split():
608 526 if hasattr(self, a):
609 527 self.__delattr__(a)
610 528 self.tagscache = None
611 529 self.nodetagscache = None
612 530
613 531 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
614 532 desc=None):
615 533 try:
616 534 l = lock.lock(lockname, 0, releasefn, desc=desc)
617 535 except lock.LockHeld, inst:
618 536 if not wait:
619 537 raise
620 538 self.ui.warn(_("waiting for lock on %s held by %r\n") %
621 539 (desc, inst.locker))
622 540 # default to 600 seconds timeout
623 541 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
624 542 releasefn, desc=desc)
625 543 if acquirefn:
626 544 acquirefn()
627 545 return l
628 546
629 547 def lock(self, wait=1):
630 548 return self.do_lock(self.sjoin("lock"), wait,
631 549 acquirefn=self.invalidate,
632 550 desc=_('repository %s') % self.origroot)
633 551
634 552 def wlock(self, wait=1):
635 553 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
636 554 self.dirstate.invalidate,
637 555 desc=_('working directory of %s') % self.origroot)
638 556
639 557 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
640 558 """
641 559 commit an individual file as part of a larger transaction
642 560 """
643 561
644 562 t = self.wread(fn)
645 563 fl = self.file(fn)
646 564 fp1 = manifest1.get(fn, nullid)
647 565 fp2 = manifest2.get(fn, nullid)
648 566
649 567 meta = {}
650 568 cp = self.dirstate.copied(fn)
651 569 if cp:
652 570 # Mark the new revision of this file as a copy of another
653 571 # file. This copy data will effectively act as a parent
654 572 # of this new revision. If this is a merge, the first
655 573 # parent will be the nullid (meaning "look up the copy data")
656 574 # and the second one will be the other parent. For example:
657 575 #
658 576 # 0 --- 1 --- 3 rev1 changes file foo
659 577 # \ / rev2 renames foo to bar and changes it
660 578 # \- 2 -/ rev3 should have bar with all changes and
661 579 # should record that bar descends from
662 580 # bar in rev2 and foo in rev1
663 581 #
664 582 # this allows this merge to succeed:
665 583 #
666 584 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
667 585 # \ / merging rev3 and rev4 should use bar@rev2
668 586 # \- 2 --- 4 as the merge base
669 587 #
670 588 meta["copy"] = cp
671 589 if not manifest2: # not a branch merge
672 590 meta["copyrev"] = hex(manifest1.get(cp, nullid))
673 591 fp2 = nullid
674 592 elif fp2 != nullid: # copied on remote side
675 593 meta["copyrev"] = hex(manifest1.get(cp, nullid))
676 594 elif fp1 != nullid: # copied on local side, reversed
677 595 meta["copyrev"] = hex(manifest2.get(cp))
678 596 fp2 = fp1
679 597 else: # directory rename
680 598 meta["copyrev"] = hex(manifest1.get(cp, nullid))
681 599 self.ui.debug(_(" %s: copy %s:%s\n") %
682 600 (fn, cp, meta["copyrev"]))
683 601 fp1 = nullid
684 602 elif fp2 != nullid:
685 603 # is one parent an ancestor of the other?
686 604 fpa = fl.ancestor(fp1, fp2)
687 605 if fpa == fp1:
688 606 fp1, fp2 = fp2, nullid
689 607 elif fpa == fp2:
690 608 fp2 = nullid
691 609
692 610 # is the file unmodified from the parent? report existing entry
693 611 if fp2 == nullid and not fl.cmp(fp1, t):
694 612 return fp1
695 613
696 614 changelist.append(fn)
697 615 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
698 616
699 617 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
700 618 if p1 is None:
701 619 p1, p2 = self.dirstate.parents()
702 620 return self.commit(files=files, text=text, user=user, date=date,
703 621 p1=p1, p2=p2, wlock=wlock, extra=extra)
704 622
705 623 def commit(self, files=None, text="", user=None, date=None,
706 624 match=util.always, force=False, lock=None, wlock=None,
707 625 force_editor=False, p1=None, p2=None, extra={}):
708 626
709 627 commit = []
710 628 remove = []
711 629 changed = []
712 630 use_dirstate = (p1 is None) # not rawcommit
713 631 extra = extra.copy()
714 632
715 633 if use_dirstate:
716 634 if files:
717 635 for f in files:
718 636 s = self.dirstate.state(f)
719 637 if s in 'nmai':
720 638 commit.append(f)
721 639 elif s == 'r':
722 640 remove.append(f)
723 641 else:
724 642 self.ui.warn(_("%s not tracked!\n") % f)
725 643 else:
726 644 changes = self.status(match=match)[:5]
727 645 modified, added, removed, deleted, unknown = changes
728 646 commit = modified + added
729 647 remove = removed
730 648 else:
731 649 commit = files
732 650
733 651 if use_dirstate:
734 652 p1, p2 = self.dirstate.parents()
735 653 update_dirstate = True
736 654 else:
737 655 p1, p2 = p1, p2 or nullid
738 656 update_dirstate = (self.dirstate.parents()[0] == p1)
739 657
740 658 c1 = self.changelog.read(p1)
741 659 c2 = self.changelog.read(p2)
742 660 m1 = self.manifest.read(c1[0]).copy()
743 661 m2 = self.manifest.read(c2[0])
744 662
745 663 if use_dirstate:
746 664 branchname = self.workingctx().branch()
747 665 try:
748 666 branchname = branchname.decode('UTF-8').encode('UTF-8')
749 667 except UnicodeDecodeError:
750 668 raise util.Abort(_('branch name not in UTF-8!'))
751 669 else:
752 670 branchname = ""
753 671
754 672 if use_dirstate:
755 673 oldname = c1[5].get("branch") # stored in UTF-8
756 674 if not commit and not remove and not force and p2 == nullid and \
757 675 branchname == oldname:
758 676 self.ui.status(_("nothing changed\n"))
759 677 return None
760 678
761 679 xp1 = hex(p1)
762 680 if p2 == nullid: xp2 = ''
763 681 else: xp2 = hex(p2)
764 682
765 683 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
766 684
767 685 if not wlock:
768 686 wlock = self.wlock()
769 687 if not lock:
770 688 lock = self.lock()
771 689 tr = self.transaction()
772 690
773 691 # check in files
774 692 new = {}
775 693 linkrev = self.changelog.count()
776 694 commit.sort()
777 695 is_exec = util.execfunc(self.root, m1.execf)
778 696 is_link = util.linkfunc(self.root, m1.linkf)
779 697 for f in commit:
780 698 self.ui.note(f + "\n")
781 699 try:
782 700 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
783 701 new_exec = is_exec(f)
784 702 new_link = is_link(f)
785 703 if not changed or changed[-1] != f:
786 704 # mention the file in the changelog if some flag changed,
787 705 # even if there was no content change.
788 706 old_exec = m1.execf(f)
789 707 old_link = m1.linkf(f)
790 708 if old_exec != new_exec or old_link != new_link:
791 709 changed.append(f)
792 710 m1.set(f, new_exec, new_link)
793 711 except (OSError, IOError):
794 712 if use_dirstate:
795 713 self.ui.warn(_("trouble committing %s!\n") % f)
796 714 raise
797 715 else:
798 716 remove.append(f)
799 717
800 718 # update manifest
801 719 m1.update(new)
802 720 remove.sort()
803 721 removed = []
804 722
805 723 for f in remove:
806 724 if f in m1:
807 725 del m1[f]
808 726 removed.append(f)
809 727 elif f in m2:
810 728 removed.append(f)
811 729 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
812 730
813 731 # add changeset
814 732 new = new.keys()
815 733 new.sort()
816 734
817 735 user = user or self.ui.username()
818 736 if not text or force_editor:
819 737 edittext = []
820 738 if text:
821 739 edittext.append(text)
822 740 edittext.append("")
823 741 edittext.append("HG: user: %s" % user)
824 742 if p2 != nullid:
825 743 edittext.append("HG: branch merge")
826 744 if branchname:
827 745 edittext.append("HG: branch %s" % util.tolocal(branchname))
828 746 edittext.extend(["HG: changed %s" % f for f in changed])
829 747 edittext.extend(["HG: removed %s" % f for f in removed])
830 748 if not changed and not remove:
831 749 edittext.append("HG: no files changed")
832 750 edittext.append("")
833 751 # run editor in the repository root
834 752 olddir = os.getcwd()
835 753 os.chdir(self.root)
836 754 text = self.ui.edit("\n".join(edittext), user)
837 755 os.chdir(olddir)
838 756
839 757 lines = [line.rstrip() for line in text.rstrip().splitlines()]
840 758 while lines and not lines[0]:
841 759 del lines[0]
842 760 if not lines:
843 761 return None
844 762 text = '\n'.join(lines)
845 763 if branchname:
846 764 extra["branch"] = branchname
847 765 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
848 766 user, date, extra)
849 767 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
850 768 parent2=xp2)
851 769 tr.close()
852 770
853 771 if self.branchcache and "branch" in extra:
854 772 self.branchcache[util.tolocal(extra["branch"])] = n
855 773
856 774 if use_dirstate or update_dirstate:
857 775 self.dirstate.setparents(n)
858 776 if use_dirstate:
859 777 self.dirstate.update(new, "n")
860 778 self.dirstate.forget(removed)
861 779
862 780 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
863 781 return n
864 782
865 783 def walk(self, node=None, files=[], match=util.always, badmatch=None):
866 784 '''
867 785 walk recursively through the directory tree or a given
868 786 changeset, finding all files matched by the match
869 787 function
870 788
871 789 results are yielded in a tuple (src, filename), where src
872 790 is one of:
873 791 'f' the file was found in the directory tree
874 792 'm' the file was only in the dirstate and not in the tree
875 793 'b' file was not found and matched badmatch
876 794 '''
877 795
878 796 if node:
879 797 fdict = dict.fromkeys(files)
880 798 # for dirstate.walk, files=['.'] means "walk the whole tree".
881 799 # follow that here, too
882 800 fdict.pop('.', None)
883 801 mdict = self.manifest.read(self.changelog.read(node)[0])
884 802 mfiles = mdict.keys()
885 803 mfiles.sort()
886 804 for fn in mfiles:
887 805 for ffn in fdict:
888 806 # match if the file is the exact name or a directory
889 807 if ffn == fn or fn.startswith("%s/" % ffn):
890 808 del fdict[ffn]
891 809 break
892 810 if match(fn):
893 811 yield 'm', fn
894 812 ffiles = fdict.keys()
895 813 ffiles.sort()
896 814 for fn in ffiles:
897 815 if badmatch and badmatch(fn):
898 816 if match(fn):
899 817 yield 'b', fn
900 818 else:
901 819 self.ui.warn(_('%s: No such file in rev %s\n')
902 820 % (self.pathto(fn), short(node)))
903 821 else:
904 822 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
905 823 yield src, fn
906 824
907 825 def status(self, node1=None, node2=None, files=[], match=util.always,
908 826 wlock=None, list_ignored=False, list_clean=False):
909 827 """return status of files between two nodes or node and working directory
910 828
911 829 If node1 is None, use the first dirstate parent instead.
912 830 If node2 is None, compare node1 with working directory.
913 831 """
914 832
915 833 def fcmp(fn, getnode):
916 834 t1 = self.wread(fn)
917 835 return self.file(fn).cmp(getnode(fn), t1)
918 836
919 837 def mfmatches(node):
920 838 change = self.changelog.read(node)
921 839 mf = self.manifest.read(change[0]).copy()
922 840 for fn in mf.keys():
923 841 if not match(fn):
924 842 del mf[fn]
925 843 return mf
926 844
927 845 modified, added, removed, deleted, unknown = [], [], [], [], []
928 846 ignored, clean = [], []
929 847
930 848 compareworking = False
931 849 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
932 850 compareworking = True
933 851
934 852 if not compareworking:
935 853 # read the manifest from node1 before the manifest from node2,
936 854 # so that we'll hit the manifest cache if we're going through
937 855 # all the revisions in parent->child order.
938 856 mf1 = mfmatches(node1)
939 857
940 858 mywlock = False
941 859
942 860 # are we comparing the working directory?
943 861 if not node2:
944 862 (lookup, modified, added, removed, deleted, unknown,
945 863 ignored, clean) = self.dirstate.status(files, match,
946 864 list_ignored, list_clean)
947 865
948 866 # are we comparing working dir against its parent?
949 867 if compareworking:
950 868 if lookup:
951 869 # do a full compare of any files that might have changed
952 870 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
953 871 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
954 872 nullid)
955 873 for f in lookup:
956 874 if fcmp(f, getnode):
957 875 modified.append(f)
958 876 else:
959 877 if list_clean:
960 878 clean.append(f)
961 879 if not wlock and not mywlock:
962 880 mywlock = True
963 881 try:
964 882 wlock = self.wlock(wait=0)
965 883 except lock.LockException:
966 884 pass
967 885 if wlock:
968 886 self.dirstate.update([f], "n")
969 887 else:
970 888 # we are comparing working dir against non-parent
971 889 # generate a pseudo-manifest for the working dir
972 890 # XXX: create it in dirstate.py ?
973 891 mf2 = mfmatches(self.dirstate.parents()[0])
974 892 is_exec = util.execfunc(self.root, mf2.execf)
975 893 is_link = util.linkfunc(self.root, mf2.linkf)
976 894 for f in lookup + modified + added:
977 895 mf2[f] = ""
978 896 mf2.set(f, is_exec(f), is_link(f))
979 897 for f in removed:
980 898 if f in mf2:
981 899 del mf2[f]
982 900
983 901 if mywlock and wlock:
984 902 wlock.release()
985 903 else:
986 904 # we are comparing two revisions
987 905 mf2 = mfmatches(node2)
988 906
989 907 if not compareworking:
990 908 # flush lists from dirstate before comparing manifests
991 909 modified, added, clean = [], [], []
992 910
993 911 # make sure to sort the files so we talk to the disk in a
994 912 # reasonable order
995 913 mf2keys = mf2.keys()
996 914 mf2keys.sort()
997 915 getnode = lambda fn: mf1.get(fn, nullid)
998 916 for fn in mf2keys:
999 917 if mf1.has_key(fn):
1000 918 if mf1.flags(fn) != mf2.flags(fn) or \
1001 919 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
1002 920 fcmp(fn, getnode))):
1003 921 modified.append(fn)
1004 922 elif list_clean:
1005 923 clean.append(fn)
1006 924 del mf1[fn]
1007 925 else:
1008 926 added.append(fn)
1009 927
1010 928 removed = mf1.keys()
1011 929
1012 930 # sort and return results:
1013 931 for l in modified, added, removed, deleted, unknown, ignored, clean:
1014 932 l.sort()
1015 933 return (modified, added, removed, deleted, unknown, ignored, clean)
1016 934
1017 935 def add(self, list, wlock=None):
1018 936 if not wlock:
1019 937 wlock = self.wlock()
1020 938 for f in list:
1021 939 p = self.wjoin(f)
1022 940 try:
1023 941 st = os.lstat(p)
1024 942 except:
1025 943 self.ui.warn(_("%s does not exist!\n") % f)
1026 944 continue
1027 945 if st.st_size > 10000000:
1028 946 self.ui.warn(_("%s: files over 10MB may cause memory and"
1029 947 " performance problems\n"
1030 948 "(use 'hg revert %s' to unadd the file)\n")
1031 949 % (f, f))
1032 950 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1033 951 self.ui.warn(_("%s not added: only files and symlinks "
1034 952 "supported currently\n") % f)
1035 953 elif self.dirstate.state(f) in 'an':
1036 954 self.ui.warn(_("%s already tracked!\n") % f)
1037 955 else:
1038 956 self.dirstate.update([f], "a")
1039 957
1040 958 def forget(self, list, wlock=None):
1041 959 if not wlock:
1042 960 wlock = self.wlock()
1043 961 for f in list:
1044 962 if self.dirstate.state(f) not in 'ai':
1045 963 self.ui.warn(_("%s not added!\n") % f)
1046 964 else:
1047 965 self.dirstate.forget([f])
1048 966
1049 967 def remove(self, list, unlink=False, wlock=None):
1050 968 if unlink:
1051 969 for f in list:
1052 970 try:
1053 971 util.unlink(self.wjoin(f))
1054 972 except OSError, inst:
1055 973 if inst.errno != errno.ENOENT:
1056 974 raise
1057 975 if not wlock:
1058 976 wlock = self.wlock()
1059 977 for f in list:
1060 978 if unlink and os.path.exists(self.wjoin(f)):
1061 979 self.ui.warn(_("%s still exists!\n") % f)
1062 980 elif self.dirstate.state(f) == 'a':
1063 981 self.dirstate.forget([f])
1064 982 elif f not in self.dirstate:
1065 983 self.ui.warn(_("%s not tracked!\n") % f)
1066 984 else:
1067 985 self.dirstate.update([f], "r")
1068 986
1069 987 def undelete(self, list, wlock=None):
1070 988 p = self.dirstate.parents()[0]
1071 989 mn = self.changelog.read(p)[0]
1072 990 m = self.manifest.read(mn)
1073 991 if not wlock:
1074 992 wlock = self.wlock()
1075 993 for f in list:
1076 994 if self.dirstate.state(f) not in "r":
1077 995 self.ui.warn("%s not removed!\n" % f)
1078 996 else:
1079 997 t = self.file(f).read(m[f])
1080 998 self.wwrite(f, t, m.flags(f))
1081 999 self.dirstate.update([f], "n")
1082 1000
1083 1001 def copy(self, source, dest, wlock=None):
1084 1002 p = self.wjoin(dest)
1085 1003 if not (os.path.exists(p) or os.path.islink(p)):
1086 1004 self.ui.warn(_("%s does not exist!\n") % dest)
1087 1005 elif not (os.path.isfile(p) or os.path.islink(p)):
1088 1006 self.ui.warn(_("copy failed: %s is not a file or a "
1089 1007 "symbolic link\n") % dest)
1090 1008 else:
1091 1009 if not wlock:
1092 1010 wlock = self.wlock()
1093 1011 if self.dirstate.state(dest) == '?':
1094 1012 self.dirstate.update([dest], "a")
1095 1013 self.dirstate.copy(source, dest)
1096 1014
1097 1015 def heads(self, start=None):
1098 1016 heads = self.changelog.heads(start)
1099 1017 # sort the output in rev descending order
1100 1018 heads = [(-self.changelog.rev(h), h) for h in heads]
1101 1019 heads.sort()
1102 1020 return [n for (r, n) in heads]
1103 1021
1104 1022 def branches(self, nodes):
1105 1023 if not nodes:
1106 1024 nodes = [self.changelog.tip()]
1107 1025 b = []
1108 1026 for n in nodes:
1109 1027 t = n
1110 1028 while 1:
1111 1029 p = self.changelog.parents(n)
1112 1030 if p[1] != nullid or p[0] == nullid:
1113 1031 b.append((t, n, p[0], p[1]))
1114 1032 break
1115 1033 n = p[0]
1116 1034 return b
1117 1035
1118 1036 def between(self, pairs):
1119 1037 r = []
1120 1038
1121 1039 for top, bottom in pairs:
1122 1040 n, l, i = top, [], 0
1123 1041 f = 1
1124 1042
1125 1043 while n != bottom:
1126 1044 p = self.changelog.parents(n)[0]
1127 1045 if i == f:
1128 1046 l.append(n)
1129 1047 f = f * 2
1130 1048 n = p
1131 1049 i += 1
1132 1050
1133 1051 r.append(l)
1134 1052
1135 1053 return r
1136 1054
1137 1055 def findincoming(self, remote, base=None, heads=None, force=False):
1138 1056 """Return list of roots of the subsets of missing nodes from remote
1139 1057
1140 1058 If base dict is specified, assume that these nodes and their parents
1141 1059 exist on the remote side and that no child of a node of base exists
1142 1060 in both remote and self.
1143 1061 Furthermore base will be updated to include the nodes that exists
1144 1062 in self and remote but no children exists in self and remote.
1145 1063 If a list of heads is specified, return only nodes which are heads
1146 1064 or ancestors of these heads.
1147 1065
1148 1066 All the ancestors of base are in self and in remote.
1149 1067 All the descendants of the list returned are missing in self.
1150 1068 (and so we know that the rest of the nodes are missing in remote, see
1151 1069 outgoing)
1152 1070 """
1153 1071 m = self.changelog.nodemap
1154 1072 search = []
1155 1073 fetch = {}
1156 1074 seen = {}
1157 1075 seenbranch = {}
1158 1076 if base == None:
1159 1077 base = {}
1160 1078
1161 1079 if not heads:
1162 1080 heads = remote.heads()
1163 1081
1164 1082 if self.changelog.tip() == nullid:
1165 1083 base[nullid] = 1
1166 1084 if heads != [nullid]:
1167 1085 return [nullid]
1168 1086 return []
1169 1087
1170 1088 # assume we're closer to the tip than the root
1171 1089 # and start by examining the heads
1172 1090 self.ui.status(_("searching for changes\n"))
1173 1091
1174 1092 unknown = []
1175 1093 for h in heads:
1176 1094 if h not in m:
1177 1095 unknown.append(h)
1178 1096 else:
1179 1097 base[h] = 1
1180 1098
1181 1099 if not unknown:
1182 1100 return []
1183 1101
1184 1102 req = dict.fromkeys(unknown)
1185 1103 reqcnt = 0
1186 1104
1187 1105 # search through remote branches
1188 1106 # a 'branch' here is a linear segment of history, with four parts:
1189 1107 # head, root, first parent, second parent
1190 1108 # (a branch always has two parents (or none) by definition)
1191 1109 unknown = remote.branches(unknown)
1192 1110 while unknown:
1193 1111 r = []
1194 1112 while unknown:
1195 1113 n = unknown.pop(0)
1196 1114 if n[0] in seen:
1197 1115 continue
1198 1116
1199 1117 self.ui.debug(_("examining %s:%s\n")
1200 1118 % (short(n[0]), short(n[1])))
1201 1119 if n[0] == nullid: # found the end of the branch
1202 1120 pass
1203 1121 elif n in seenbranch:
1204 1122 self.ui.debug(_("branch already found\n"))
1205 1123 continue
1206 1124 elif n[1] and n[1] in m: # do we know the base?
1207 1125 self.ui.debug(_("found incomplete branch %s:%s\n")
1208 1126 % (short(n[0]), short(n[1])))
1209 1127 search.append(n) # schedule branch range for scanning
1210 1128 seenbranch[n] = 1
1211 1129 else:
1212 1130 if n[1] not in seen and n[1] not in fetch:
1213 1131 if n[2] in m and n[3] in m:
1214 1132 self.ui.debug(_("found new changeset %s\n") %
1215 1133 short(n[1]))
1216 1134 fetch[n[1]] = 1 # earliest unknown
1217 1135 for p in n[2:4]:
1218 1136 if p in m:
1219 1137 base[p] = 1 # latest known
1220 1138
1221 1139 for p in n[2:4]:
1222 1140 if p not in req and p not in m:
1223 1141 r.append(p)
1224 1142 req[p] = 1
1225 1143 seen[n[0]] = 1
1226 1144
1227 1145 if r:
1228 1146 reqcnt += 1
1229 1147 self.ui.debug(_("request %d: %s\n") %
1230 1148 (reqcnt, " ".join(map(short, r))))
1231 1149 for p in xrange(0, len(r), 10):
1232 1150 for b in remote.branches(r[p:p+10]):
1233 1151 self.ui.debug(_("received %s:%s\n") %
1234 1152 (short(b[0]), short(b[1])))
1235 1153 unknown.append(b)
1236 1154
1237 1155 # do binary search on the branches we found
1238 1156 while search:
1239 1157 n = search.pop(0)
1240 1158 reqcnt += 1
1241 1159 l = remote.between([(n[0], n[1])])[0]
1242 1160 l.append(n[1])
1243 1161 p = n[0]
1244 1162 f = 1
1245 1163 for i in l:
1246 1164 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1247 1165 if i in m:
1248 1166 if f <= 2:
1249 1167 self.ui.debug(_("found new branch changeset %s\n") %
1250 1168 short(p))
1251 1169 fetch[p] = 1
1252 1170 base[i] = 1
1253 1171 else:
1254 1172 self.ui.debug(_("narrowed branch search to %s:%s\n")
1255 1173 % (short(p), short(i)))
1256 1174 search.append((p, i))
1257 1175 break
1258 1176 p, f = i, f * 2
1259 1177
1260 1178 # sanity check our fetch list
1261 1179 for f in fetch.keys():
1262 1180 if f in m:
1263 1181 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1264 1182
1265 1183 if base.keys() == [nullid]:
1266 1184 if force:
1267 1185 self.ui.warn(_("warning: repository is unrelated\n"))
1268 1186 else:
1269 1187 raise util.Abort(_("repository is unrelated"))
1270 1188
1271 1189 self.ui.debug(_("found new changesets starting at ") +
1272 1190 " ".join([short(f) for f in fetch]) + "\n")
1273 1191
1274 1192 self.ui.debug(_("%d total queries\n") % reqcnt)
1275 1193
1276 1194 return fetch.keys()
1277 1195
1278 1196 def findoutgoing(self, remote, base=None, heads=None, force=False):
1279 1197 """Return list of nodes that are roots of subsets not in remote
1280 1198
1281 1199 If base dict is specified, assume that these nodes and their parents
1282 1200 exist on the remote side.
1283 1201 If a list of heads is specified, return only nodes which are heads
1284 1202 or ancestors of these heads, and return a second element which
1285 1203 contains all remote heads which get new children.
1286 1204 """
1287 1205 if base == None:
1288 1206 base = {}
1289 1207 self.findincoming(remote, base, heads, force=force)
1290 1208
1291 1209 self.ui.debug(_("common changesets up to ")
1292 1210 + " ".join(map(short, base.keys())) + "\n")
1293 1211
1294 1212 remain = dict.fromkeys(self.changelog.nodemap)
1295 1213
1296 1214 # prune everything remote has from the tree
1297 1215 del remain[nullid]
1298 1216 remove = base.keys()
1299 1217 while remove:
1300 1218 n = remove.pop(0)
1301 1219 if n in remain:
1302 1220 del remain[n]
1303 1221 for p in self.changelog.parents(n):
1304 1222 remove.append(p)
1305 1223
1306 1224 # find every node whose parents have been pruned
1307 1225 subset = []
1308 1226 # find every remote head that will get new children
1309 1227 updated_heads = {}
1310 1228 for n in remain:
1311 1229 p1, p2 = self.changelog.parents(n)
1312 1230 if p1 not in remain and p2 not in remain:
1313 1231 subset.append(n)
1314 1232 if heads:
1315 1233 if p1 in heads:
1316 1234 updated_heads[p1] = True
1317 1235 if p2 in heads:
1318 1236 updated_heads[p2] = True
1319 1237
1320 1238 # this is the set of all roots we have to push
1321 1239 if heads:
1322 1240 return subset, updated_heads.keys()
1323 1241 else:
1324 1242 return subset
1325 1243
1326 1244 def pull(self, remote, heads=None, force=False, lock=None):
1327 1245 mylock = False
1328 1246 if not lock:
1329 1247 lock = self.lock()
1330 1248 mylock = True
1331 1249
1332 1250 try:
1333 1251 fetch = self.findincoming(remote, force=force)
1334 1252 if fetch == [nullid]:
1335 1253 self.ui.status(_("requesting all changes\n"))
1336 1254
1337 1255 if not fetch:
1338 1256 self.ui.status(_("no changes found\n"))
1339 1257 return 0
1340 1258
1341 1259 if heads is None:
1342 1260 cg = remote.changegroup(fetch, 'pull')
1343 1261 else:
1344 1262 if 'changegroupsubset' not in remote.capabilities:
1345 1263 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1346 1264 cg = remote.changegroupsubset(fetch, heads, 'pull')
1347 1265 return self.addchangegroup(cg, 'pull', remote.url())
1348 1266 finally:
1349 1267 if mylock:
1350 1268 lock.release()
1351 1269
1352 1270 def push(self, remote, force=False, revs=None):
1353 1271 # there are two ways to push to remote repo:
1354 1272 #
1355 1273 # addchangegroup assumes local user can lock remote
1356 1274 # repo (local filesystem, old ssh servers).
1357 1275 #
1358 1276 # unbundle assumes local user cannot lock remote repo (new ssh
1359 1277 # servers, http servers).
1360 1278
1361 1279 if remote.capable('unbundle'):
1362 1280 return self.push_unbundle(remote, force, revs)
1363 1281 return self.push_addchangegroup(remote, force, revs)
1364 1282
1365 1283 def prepush(self, remote, force, revs):
1366 1284 base = {}
1367 1285 remote_heads = remote.heads()
1368 1286 inc = self.findincoming(remote, base, remote_heads, force=force)
1369 1287
1370 1288 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1371 1289 if revs is not None:
1372 1290 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1373 1291 else:
1374 1292 bases, heads = update, self.changelog.heads()
1375 1293
1376 1294 if not bases:
1377 1295 self.ui.status(_("no changes found\n"))
1378 1296 return None, 1
1379 1297 elif not force:
1380 1298 # check if we're creating new remote heads
1381 1299 # to be a remote head after push, node must be either
1382 1300 # - unknown locally
1383 1301 # - a local outgoing head descended from update
1384 1302 # - a remote head that's known locally and not
1385 1303 # ancestral to an outgoing head
1386 1304
1387 1305 warn = 0
1388 1306
1389 1307 if remote_heads == [nullid]:
1390 1308 warn = 0
1391 1309 elif not revs and len(heads) > len(remote_heads):
1392 1310 warn = 1
1393 1311 else:
1394 1312 newheads = list(heads)
1395 1313 for r in remote_heads:
1396 1314 if r in self.changelog.nodemap:
1397 1315 desc = self.changelog.heads(r, heads)
1398 1316 l = [h for h in heads if h in desc]
1399 1317 if not l:
1400 1318 newheads.append(r)
1401 1319 else:
1402 1320 newheads.append(r)
1403 1321 if len(newheads) > len(remote_heads):
1404 1322 warn = 1
1405 1323
1406 1324 if warn:
1407 1325 self.ui.warn(_("abort: push creates new remote branches!\n"))
1408 1326 self.ui.status(_("(did you forget to merge?"
1409 1327 " use push -f to force)\n"))
1410 1328 return None, 1
1411 1329 elif inc:
1412 1330 self.ui.warn(_("note: unsynced remote changes!\n"))
1413 1331
1414 1332
1415 1333 if revs is None:
1416 1334 cg = self.changegroup(update, 'push')
1417 1335 else:
1418 1336 cg = self.changegroupsubset(update, revs, 'push')
1419 1337 return cg, remote_heads
1420 1338
1421 1339 def push_addchangegroup(self, remote, force, revs):
1422 1340 lock = remote.lock()
1423 1341
1424 1342 ret = self.prepush(remote, force, revs)
1425 1343 if ret[0] is not None:
1426 1344 cg, remote_heads = ret
1427 1345 return remote.addchangegroup(cg, 'push', self.url())
1428 1346 return ret[1]
1429 1347
1430 1348 def push_unbundle(self, remote, force, revs):
1431 1349 # local repo finds heads on server, finds out what revs it
1432 1350 # must push. once revs transferred, if server finds it has
1433 1351 # different heads (someone else won commit/push race), server
1434 1352 # aborts.
1435 1353
1436 1354 ret = self.prepush(remote, force, revs)
1437 1355 if ret[0] is not None:
1438 1356 cg, remote_heads = ret
1439 1357 if force: remote_heads = ['force']
1440 1358 return remote.unbundle(cg, remote_heads, 'push')
1441 1359 return ret[1]
1442 1360
1443 1361 def changegroupinfo(self, nodes):
1444 1362 self.ui.note(_("%d changesets found\n") % len(nodes))
1445 1363 if self.ui.debugflag:
1446 1364 self.ui.debug(_("List of changesets:\n"))
1447 1365 for node in nodes:
1448 1366 self.ui.debug("%s\n" % hex(node))
1449 1367
1450 1368 def changegroupsubset(self, bases, heads, source):
1451 1369 """This function generates a changegroup consisting of all the nodes
1452 1370 that are descendents of any of the bases, and ancestors of any of
1453 1371 the heads.
1454 1372
1455 1373 It is fairly complex as determining which filenodes and which
1456 1374 manifest nodes need to be included for the changeset to be complete
1457 1375 is non-trivial.
1458 1376
1459 1377 Another wrinkle is doing the reverse, figuring out which changeset in
1460 1378 the changegroup a particular filenode or manifestnode belongs to."""
1461 1379
1462 1380 self.hook('preoutgoing', throw=True, source=source)
1463 1381
1464 1382 # Set up some initial variables
1465 1383 # Make it easy to refer to self.changelog
1466 1384 cl = self.changelog
1467 1385 # msng is short for missing - compute the list of changesets in this
1468 1386 # changegroup.
1469 1387 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1470 1388 self.changegroupinfo(msng_cl_lst)
1471 1389 # Some bases may turn out to be superfluous, and some heads may be
1472 1390 # too. nodesbetween will return the minimal set of bases and heads
1473 1391 # necessary to re-create the changegroup.
1474 1392
1475 1393 # Known heads are the list of heads that it is assumed the recipient
1476 1394 # of this changegroup will know about.
1477 1395 knownheads = {}
1478 1396 # We assume that all parents of bases are known heads.
1479 1397 for n in bases:
1480 1398 for p in cl.parents(n):
1481 1399 if p != nullid:
1482 1400 knownheads[p] = 1
1483 1401 knownheads = knownheads.keys()
1484 1402 if knownheads:
1485 1403 # Now that we know what heads are known, we can compute which
1486 1404 # changesets are known. The recipient must know about all
1487 1405 # changesets required to reach the known heads from the null
1488 1406 # changeset.
1489 1407 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1490 1408 junk = None
1491 1409 # Transform the list into an ersatz set.
1492 1410 has_cl_set = dict.fromkeys(has_cl_set)
1493 1411 else:
1494 1412 # If there were no known heads, the recipient cannot be assumed to
1495 1413 # know about any changesets.
1496 1414 has_cl_set = {}
1497 1415
1498 1416 # Make it easy to refer to self.manifest
1499 1417 mnfst = self.manifest
1500 1418 # We don't know which manifests are missing yet
1501 1419 msng_mnfst_set = {}
1502 1420 # Nor do we know which filenodes are missing.
1503 1421 msng_filenode_set = {}
1504 1422
1505 1423 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1506 1424 junk = None
1507 1425
1508 1426 # A changeset always belongs to itself, so the changenode lookup
1509 1427 # function for a changenode is identity.
1510 1428 def identity(x):
1511 1429 return x
1512 1430
1513 1431 # A function generating function. Sets up an environment for the
1514 1432 # inner function.
1515 1433 def cmp_by_rev_func(revlog):
1516 1434 # Compare two nodes by their revision number in the environment's
1517 1435 # revision history. Since the revision number both represents the
1518 1436 # most efficient order to read the nodes in, and represents a
1519 1437 # topological sorting of the nodes, this function is often useful.
1520 1438 def cmp_by_rev(a, b):
1521 1439 return cmp(revlog.rev(a), revlog.rev(b))
1522 1440 return cmp_by_rev
1523 1441
1524 1442 # If we determine that a particular file or manifest node must be a
1525 1443 # node that the recipient of the changegroup will already have, we can
1526 1444 # also assume the recipient will have all the parents. This function
1527 1445 # prunes them from the set of missing nodes.
1528 1446 def prune_parents(revlog, hasset, msngset):
1529 1447 haslst = hasset.keys()
1530 1448 haslst.sort(cmp_by_rev_func(revlog))
1531 1449 for node in haslst:
1532 1450 parentlst = [p for p in revlog.parents(node) if p != nullid]
1533 1451 while parentlst:
1534 1452 n = parentlst.pop()
1535 1453 if n not in hasset:
1536 1454 hasset[n] = 1
1537 1455 p = [p for p in revlog.parents(n) if p != nullid]
1538 1456 parentlst.extend(p)
1539 1457 for n in hasset:
1540 1458 msngset.pop(n, None)
1541 1459
1542 1460 # This is a function generating function used to set up an environment
1543 1461 # for the inner function to execute in.
1544 1462 def manifest_and_file_collector(changedfileset):
1545 1463 # This is an information gathering function that gathers
1546 1464 # information from each changeset node that goes out as part of
1547 1465 # the changegroup. The information gathered is a list of which
1548 1466 # manifest nodes are potentially required (the recipient may
1549 1467 # already have them) and total list of all files which were
1550 1468 # changed in any changeset in the changegroup.
1551 1469 #
1552 1470 # We also remember the first changenode we saw any manifest
1553 1471 # referenced by so we can later determine which changenode 'owns'
1554 1472 # the manifest.
1555 1473 def collect_manifests_and_files(clnode):
1556 1474 c = cl.read(clnode)
1557 1475 for f in c[3]:
1558 1476 # This is to make sure we only have one instance of each
1559 1477 # filename string for each filename.
1560 1478 changedfileset.setdefault(f, f)
1561 1479 msng_mnfst_set.setdefault(c[0], clnode)
1562 1480 return collect_manifests_and_files
1563 1481
1564 1482 # Figure out which manifest nodes (of the ones we think might be part
1565 1483 # of the changegroup) the recipient must know about and remove them
1566 1484 # from the changegroup.
1567 1485 def prune_manifests():
1568 1486 has_mnfst_set = {}
1569 1487 for n in msng_mnfst_set:
1570 1488 # If a 'missing' manifest thinks it belongs to a changenode
1571 1489 # the recipient is assumed to have, obviously the recipient
1572 1490 # must have that manifest.
1573 1491 linknode = cl.node(mnfst.linkrev(n))
1574 1492 if linknode in has_cl_set:
1575 1493 has_mnfst_set[n] = 1
1576 1494 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1577 1495
1578 1496 # Use the information collected in collect_manifests_and_files to say
1579 1497 # which changenode any manifestnode belongs to.
1580 1498 def lookup_manifest_link(mnfstnode):
1581 1499 return msng_mnfst_set[mnfstnode]
1582 1500
1583 1501 # A function generating function that sets up the initial environment
1584 1502 # the inner function.
1585 1503 def filenode_collector(changedfiles):
1586 1504 next_rev = [0]
1587 1505 # This gathers information from each manifestnode included in the
1588 1506 # changegroup about which filenodes the manifest node references
1589 1507 # so we can include those in the changegroup too.
1590 1508 #
1591 1509 # It also remembers which changenode each filenode belongs to. It
1592 1510 # does this by assuming the a filenode belongs to the changenode
1593 1511 # the first manifest that references it belongs to.
1594 1512 def collect_msng_filenodes(mnfstnode):
1595 1513 r = mnfst.rev(mnfstnode)
1596 1514 if r == next_rev[0]:
1597 1515 # If the last rev we looked at was the one just previous,
1598 1516 # we only need to see a diff.
1599 1517 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1600 1518 # For each line in the delta
1601 1519 for dline in delta.splitlines():
1602 1520 # get the filename and filenode for that line
1603 1521 f, fnode = dline.split('\0')
1604 1522 fnode = bin(fnode[:40])
1605 1523 f = changedfiles.get(f, None)
1606 1524 # And if the file is in the list of files we care
1607 1525 # about.
1608 1526 if f is not None:
1609 1527 # Get the changenode this manifest belongs to
1610 1528 clnode = msng_mnfst_set[mnfstnode]
1611 1529 # Create the set of filenodes for the file if
1612 1530 # there isn't one already.
1613 1531 ndset = msng_filenode_set.setdefault(f, {})
1614 1532 # And set the filenode's changelog node to the
1615 1533 # manifest's if it hasn't been set already.
1616 1534 ndset.setdefault(fnode, clnode)
1617 1535 else:
1618 1536 # Otherwise we need a full manifest.
1619 1537 m = mnfst.read(mnfstnode)
1620 1538 # For every file in we care about.
1621 1539 for f in changedfiles:
1622 1540 fnode = m.get(f, None)
1623 1541 # If it's in the manifest
1624 1542 if fnode is not None:
1625 1543 # See comments above.
1626 1544 clnode = msng_mnfst_set[mnfstnode]
1627 1545 ndset = msng_filenode_set.setdefault(f, {})
1628 1546 ndset.setdefault(fnode, clnode)
1629 1547 # Remember the revision we hope to see next.
1630 1548 next_rev[0] = r + 1
1631 1549 return collect_msng_filenodes
1632 1550
1633 1551 # We have a list of filenodes we think we need for a file, lets remove
1634 1552 # all those we now the recipient must have.
1635 1553 def prune_filenodes(f, filerevlog):
1636 1554 msngset = msng_filenode_set[f]
1637 1555 hasset = {}
1638 1556 # If a 'missing' filenode thinks it belongs to a changenode we
1639 1557 # assume the recipient must have, then the recipient must have
1640 1558 # that filenode.
1641 1559 for n in msngset:
1642 1560 clnode = cl.node(filerevlog.linkrev(n))
1643 1561 if clnode in has_cl_set:
1644 1562 hasset[n] = 1
1645 1563 prune_parents(filerevlog, hasset, msngset)
1646 1564
1647 1565 # A function generator function that sets up the a context for the
1648 1566 # inner function.
1649 1567 def lookup_filenode_link_func(fname):
1650 1568 msngset = msng_filenode_set[fname]
1651 1569 # Lookup the changenode the filenode belongs to.
1652 1570 def lookup_filenode_link(fnode):
1653 1571 return msngset[fnode]
1654 1572 return lookup_filenode_link
1655 1573
1656 1574 # Now that we have all theses utility functions to help out and
1657 1575 # logically divide up the task, generate the group.
1658 1576 def gengroup():
1659 1577 # The set of changed files starts empty.
1660 1578 changedfiles = {}
1661 1579 # Create a changenode group generator that will call our functions
1662 1580 # back to lookup the owning changenode and collect information.
1663 1581 group = cl.group(msng_cl_lst, identity,
1664 1582 manifest_and_file_collector(changedfiles))
1665 1583 for chnk in group:
1666 1584 yield chnk
1667 1585
1668 1586 # The list of manifests has been collected by the generator
1669 1587 # calling our functions back.
1670 1588 prune_manifests()
1671 1589 msng_mnfst_lst = msng_mnfst_set.keys()
1672 1590 # Sort the manifestnodes by revision number.
1673 1591 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1674 1592 # Create a generator for the manifestnodes that calls our lookup
1675 1593 # and data collection functions back.
1676 1594 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1677 1595 filenode_collector(changedfiles))
1678 1596 for chnk in group:
1679 1597 yield chnk
1680 1598
1681 1599 # These are no longer needed, dereference and toss the memory for
1682 1600 # them.
1683 1601 msng_mnfst_lst = None
1684 1602 msng_mnfst_set.clear()
1685 1603
1686 1604 changedfiles = changedfiles.keys()
1687 1605 changedfiles.sort()
1688 1606 # Go through all our files in order sorted by name.
1689 1607 for fname in changedfiles:
1690 1608 filerevlog = self.file(fname)
1691 1609 # Toss out the filenodes that the recipient isn't really
1692 1610 # missing.
1693 1611 if msng_filenode_set.has_key(fname):
1694 1612 prune_filenodes(fname, filerevlog)
1695 1613 msng_filenode_lst = msng_filenode_set[fname].keys()
1696 1614 else:
1697 1615 msng_filenode_lst = []
1698 1616 # If any filenodes are left, generate the group for them,
1699 1617 # otherwise don't bother.
1700 1618 if len(msng_filenode_lst) > 0:
1701 1619 yield changegroup.genchunk(fname)
1702 1620 # Sort the filenodes by their revision #
1703 1621 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1704 1622 # Create a group generator and only pass in a changenode
1705 1623 # lookup function as we need to collect no information
1706 1624 # from filenodes.
1707 1625 group = filerevlog.group(msng_filenode_lst,
1708 1626 lookup_filenode_link_func(fname))
1709 1627 for chnk in group:
1710 1628 yield chnk
1711 1629 if msng_filenode_set.has_key(fname):
1712 1630 # Don't need this anymore, toss it to free memory.
1713 1631 del msng_filenode_set[fname]
1714 1632 # Signal that no more groups are left.
1715 1633 yield changegroup.closechunk()
1716 1634
1717 1635 if msng_cl_lst:
1718 1636 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1719 1637
1720 1638 return util.chunkbuffer(gengroup())
1721 1639
1722 1640 def changegroup(self, basenodes, source):
1723 1641 """Generate a changegroup of all nodes that we have that a recipient
1724 1642 doesn't.
1725 1643
1726 1644 This is much easier than the previous function as we can assume that
1727 1645 the recipient has any changenode we aren't sending them."""
1728 1646
1729 1647 self.hook('preoutgoing', throw=True, source=source)
1730 1648
1731 1649 cl = self.changelog
1732 1650 nodes = cl.nodesbetween(basenodes, None)[0]
1733 1651 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1734 1652 self.changegroupinfo(nodes)
1735 1653
1736 1654 def identity(x):
1737 1655 return x
1738 1656
1739 1657 def gennodelst(revlog):
1740 1658 for r in xrange(0, revlog.count()):
1741 1659 n = revlog.node(r)
1742 1660 if revlog.linkrev(n) in revset:
1743 1661 yield n
1744 1662
1745 1663 def changed_file_collector(changedfileset):
1746 1664 def collect_changed_files(clnode):
1747 1665 c = cl.read(clnode)
1748 1666 for fname in c[3]:
1749 1667 changedfileset[fname] = 1
1750 1668 return collect_changed_files
1751 1669
1752 1670 def lookuprevlink_func(revlog):
1753 1671 def lookuprevlink(n):
1754 1672 return cl.node(revlog.linkrev(n))
1755 1673 return lookuprevlink
1756 1674
1757 1675 def gengroup():
1758 1676 # construct a list of all changed files
1759 1677 changedfiles = {}
1760 1678
1761 1679 for chnk in cl.group(nodes, identity,
1762 1680 changed_file_collector(changedfiles)):
1763 1681 yield chnk
1764 1682 changedfiles = changedfiles.keys()
1765 1683 changedfiles.sort()
1766 1684
1767 1685 mnfst = self.manifest
1768 1686 nodeiter = gennodelst(mnfst)
1769 1687 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1770 1688 yield chnk
1771 1689
1772 1690 for fname in changedfiles:
1773 1691 filerevlog = self.file(fname)
1774 1692 nodeiter = gennodelst(filerevlog)
1775 1693 nodeiter = list(nodeiter)
1776 1694 if nodeiter:
1777 1695 yield changegroup.genchunk(fname)
1778 1696 lookup = lookuprevlink_func(filerevlog)
1779 1697 for chnk in filerevlog.group(nodeiter, lookup):
1780 1698 yield chnk
1781 1699
1782 1700 yield changegroup.closechunk()
1783 1701
1784 1702 if nodes:
1785 1703 self.hook('outgoing', node=hex(nodes[0]), source=source)
1786 1704
1787 1705 return util.chunkbuffer(gengroup())
1788 1706
1789 1707 def addchangegroup(self, source, srctype, url):
1790 1708 """add changegroup to repo.
1791 1709
1792 1710 return values:
1793 1711 - nothing changed or no source: 0
1794 1712 - more heads than before: 1+added heads (2..n)
1795 1713 - less heads than before: -1-removed heads (-2..-n)
1796 1714 - number of heads stays the same: 1
1797 1715 """
1798 1716 def csmap(x):
1799 1717 self.ui.debug(_("add changeset %s\n") % short(x))
1800 1718 return cl.count()
1801 1719
1802 1720 def revmap(x):
1803 1721 return cl.rev(x)
1804 1722
1805 1723 if not source:
1806 1724 return 0
1807 1725
1808 1726 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1809 1727
1810 1728 changesets = files = revisions = 0
1811 1729
1812 1730 tr = self.transaction()
1813 1731
1814 1732 # write changelog data to temp files so concurrent readers will not see
1815 1733 # inconsistent view
1816 1734 cl = self.changelog
1817 1735 cl.delayupdate()
1818 1736 oldheads = len(cl.heads())
1819 1737
1820 1738 # pull off the changeset group
1821 1739 self.ui.status(_("adding changesets\n"))
1822 1740 cor = cl.count() - 1
1823 1741 chunkiter = changegroup.chunkiter(source)
1824 1742 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1825 1743 raise util.Abort(_("received changelog group is empty"))
1826 1744 cnr = cl.count() - 1
1827 1745 changesets = cnr - cor
1828 1746
1829 1747 # pull off the manifest group
1830 1748 self.ui.status(_("adding manifests\n"))
1831 1749 chunkiter = changegroup.chunkiter(source)
1832 1750 # no need to check for empty manifest group here:
1833 1751 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1834 1752 # no new manifest will be created and the manifest group will
1835 1753 # be empty during the pull
1836 1754 self.manifest.addgroup(chunkiter, revmap, tr)
1837 1755
1838 1756 # process the files
1839 1757 self.ui.status(_("adding file changes\n"))
1840 1758 while 1:
1841 1759 f = changegroup.getchunk(source)
1842 1760 if not f:
1843 1761 break
1844 1762 self.ui.debug(_("adding %s revisions\n") % f)
1845 1763 fl = self.file(f)
1846 1764 o = fl.count()
1847 1765 chunkiter = changegroup.chunkiter(source)
1848 1766 if fl.addgroup(chunkiter, revmap, tr) is None:
1849 1767 raise util.Abort(_("received file revlog group is empty"))
1850 1768 revisions += fl.count() - o
1851 1769 files += 1
1852 1770
1853 1771 # make changelog see real files again
1854 1772 cl.finalize(tr)
1855 1773
1856 1774 newheads = len(self.changelog.heads())
1857 1775 heads = ""
1858 1776 if oldheads and newheads != oldheads:
1859 1777 heads = _(" (%+d heads)") % (newheads - oldheads)
1860 1778
1861 1779 self.ui.status(_("added %d changesets"
1862 1780 " with %d changes to %d files%s\n")
1863 1781 % (changesets, revisions, files, heads))
1864 1782
1865 1783 if changesets > 0:
1866 1784 self.hook('pretxnchangegroup', throw=True,
1867 1785 node=hex(self.changelog.node(cor+1)), source=srctype,
1868 1786 url=url)
1869 1787
1870 1788 tr.close()
1871 1789
1872 1790 if changesets > 0:
1873 1791 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1874 1792 source=srctype, url=url)
1875 1793
1876 1794 for i in xrange(cor + 1, cnr + 1):
1877 1795 self.hook("incoming", node=hex(self.changelog.node(i)),
1878 1796 source=srctype, url=url)
1879 1797
1880 1798 # never return 0 here:
1881 1799 if newheads < oldheads:
1882 1800 return newheads - oldheads - 1
1883 1801 else:
1884 1802 return newheads - oldheads + 1
1885 1803
1886 1804
1887 1805 def stream_in(self, remote):
1888 1806 fp = remote.stream_out()
1889 1807 l = fp.readline()
1890 1808 try:
1891 1809 resp = int(l)
1892 1810 except ValueError:
1893 1811 raise util.UnexpectedOutput(
1894 1812 _('Unexpected response from remote server:'), l)
1895 1813 if resp == 1:
1896 1814 raise util.Abort(_('operation forbidden by server'))
1897 1815 elif resp == 2:
1898 1816 raise util.Abort(_('locking the remote repository failed'))
1899 1817 elif resp != 0:
1900 1818 raise util.Abort(_('the server sent an unknown error code'))
1901 1819 self.ui.status(_('streaming all changes\n'))
1902 1820 l = fp.readline()
1903 1821 try:
1904 1822 total_files, total_bytes = map(int, l.split(' ', 1))
1905 1823 except ValueError, TypeError:
1906 1824 raise util.UnexpectedOutput(
1907 1825 _('Unexpected response from remote server:'), l)
1908 1826 self.ui.status(_('%d files to transfer, %s of data\n') %
1909 1827 (total_files, util.bytecount(total_bytes)))
1910 1828 start = time.time()
1911 1829 for i in xrange(total_files):
1912 1830 # XXX doesn't support '\n' or '\r' in filenames
1913 1831 l = fp.readline()
1914 1832 try:
1915 1833 name, size = l.split('\0', 1)
1916 1834 size = int(size)
1917 1835 except ValueError, TypeError:
1918 1836 raise util.UnexpectedOutput(
1919 1837 _('Unexpected response from remote server:'), l)
1920 1838 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1921 1839 ofp = self.sopener(name, 'w')
1922 1840 for chunk in util.filechunkiter(fp, limit=size):
1923 1841 ofp.write(chunk)
1924 1842 ofp.close()
1925 1843 elapsed = time.time() - start
1926 1844 if elapsed <= 0:
1927 1845 elapsed = 0.001
1928 1846 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1929 1847 (util.bytecount(total_bytes), elapsed,
1930 1848 util.bytecount(total_bytes / elapsed)))
1931 1849 self.invalidate()
1932 1850 return len(self.heads()) + 1
1933 1851
1934 1852 def clone(self, remote, heads=[], stream=False):
1935 1853 '''clone remote repository.
1936 1854
1937 1855 keyword arguments:
1938 1856 heads: list of revs to clone (forces use of pull)
1939 1857 stream: use streaming clone if possible'''
1940 1858
1941 1859 # now, all clients that can request uncompressed clones can
1942 1860 # read repo formats supported by all servers that can serve
1943 1861 # them.
1944 1862
1945 1863 # if revlog format changes, client will have to check version
1946 1864 # and format flags on "stream" capability, and use
1947 1865 # uncompressed only if compatible.
1948 1866
1949 1867 if stream and not heads and remote.capable('stream'):
1950 1868 return self.stream_in(remote)
1951 1869 return self.pull(remote, heads)
1952 1870
1953 1871 # used to avoid circular references so destructors work
1954 1872 def aftertrans(files):
1955 1873 renamefiles = [tuple(t) for t in files]
1956 1874 def a():
1957 1875 for src, dest in renamefiles:
1958 1876 util.rename(src, dest)
1959 1877 return a
1960 1878
1961 1879 def instance(ui, path, create):
1962 1880 return localrepository(ui, util.drop_scheme('file', path), create)
1963 1881
1964 1882 def islocal(path):
1965 1883 return True
General Comments 0
You need to be logged in to leave comments. Login now