##// END OF EJS Templates
filecommit: don't forget the local parent on a merge with a local rename
Alexis S. L. Carvalho -
r4058:e7282ded default
parent child Browse files
Show More
@@ -0,0 +1,77 b''
1 #!/bin/sh
2 # check that renames are correctly saved by a commit after a merge
3
4 HGMERGE=merge
5 export HGMERGE
6
7 # test with the merge on 3 having the rename on the local parent
8 hg init a
9 cd a
10
11 echo line1 > foo
12 hg add foo
13 hg ci -m '0: add foo' -d '0 0'
14
15 echo line2 >> foo
16 hg ci -m '1: change foo' -d '0 0'
17
18 hg up -C 0
19 hg mv foo bar
20 rm bar
21 echo line0 > bar
22 echo line1 >> bar
23 hg ci -m '2: mv foo bar; change bar' -d '0 0'
24
25 hg merge 1
26 echo '% contents of bar should be line0 line1 line2'
27 cat bar
28 hg ci -m '3: merge with local rename' -d '0 0'
29 hg debugindex .hg/store/data/bar.i
30 hg debugrename bar
31 hg debugindex .hg/store/data/foo.i
32
33 # revert the content change from rev 2
34 hg up -C 2
35 rm bar
36 echo line1 > bar
37 hg ci -m '4: revert content change from rev 2' -d '0 0'
38
39 hg log --template '#rev#:#node|short# #parents#\n'
40 echo '% this should use bar@rev2 as the ancestor'
41 hg --debug merge 3
42 echo '% contents of bar should be line1 line2'
43 cat bar
44 hg ci -m '5: merge' -d '0 0'
45 hg debugindex .hg/store/data/bar.i
46
47
48 # same thing, but with the merge on 3 having the rename on the remote parent
49 echo
50 echo
51 cd ..
52 hg clone -U -r 1 -r 2 a b
53 cd b
54
55 hg up -C 1
56 hg merge 2
57 echo '% contents of bar should be line0 line1 line2'
58 cat bar
59 hg ci -m '3: merge with remote rename' -d '0 0'
60 hg debugindex .hg/store/data/bar.i
61 hg debugrename bar
62 hg debugindex .hg/store/data/foo.i
63
64 # revert the content change from rev 2
65 hg up -C 2
66 rm bar
67 echo line1 > bar
68 hg ci -m '4: revert content change from rev 2' -d '0 0'
69
70 hg log --template '#rev#:#node|short# #parents#\n'
71 echo '% this should use bar@rev2 as the ancestor'
72 hg --debug merge 3
73 echo '% contents of bar should be line1 line2'
74 cat bar
75 hg ci -m '5: merge' -d '0 0'
76 hg debugindex .hg/store/data/bar.i
77
@@ -0,0 +1,83 b''
1 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
2 merging bar and foo
3 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
4 (branch merge, don't forget to commit)
5 % contents of bar should be line0 line1 line2
6 line0
7 line1
8 line2
9 rev offset length base linkrev nodeid p1 p2
10 0 0 77 0 2 da78c0659611 000000000000 000000000000
11 1 77 76 0 3 4b358025380b 000000000000 da78c0659611
12 bar renamed from foo:9e25c27b87571a1edee5ae4dddee5687746cc8e2
13 rev offset length base linkrev nodeid p1 p2
14 0 0 7 0 0 690b295714ae 000000000000 000000000000
15 1 7 13 1 1 9e25c27b8757 690b295714ae 000000000000
16 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
17 4:2d2f9a22c82b 2:0a3ab4856510
18 3:7d3b554bfdf1 2:0a3ab4856510 1:5cd961e4045d
19 2:0a3ab4856510 0:2665aaee66e9
20 1:5cd961e4045d
21 0:2665aaee66e9
22 % this should use bar@rev2 as the ancestor
23 resolving manifests
24 overwrite None partial False
25 ancestor 0a3ab4856510 local 2d2f9a22c82b+ remote 7d3b554bfdf1
26 bar: versions differ -> m
27 merging bar
28 my bar@2d2f9a22c82b+ other bar@7d3b554bfdf1 ancestor bar@0a3ab4856510
29 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
30 (branch merge, don't forget to commit)
31 % contents of bar should be line1 line2
32 line1
33 line2
34 rev offset length base linkrev nodeid p1 p2
35 0 0 77 0 2 da78c0659611 000000000000 000000000000
36 1 77 76 0 3 4b358025380b 000000000000 da78c0659611
37 2 153 7 2 4 4defe5eec418 da78c0659611 000000000000
38 3 160 13 3 5 4663501da27b 4defe5eec418 4b358025380b
39
40
41 requesting all changes
42 adding changesets
43 adding manifests
44 adding file changes
45 added 3 changesets with 3 changes to 2 files (+1 heads)
46 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
47 merging foo and bar
48 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
49 (branch merge, don't forget to commit)
50 % contents of bar should be line0 line1 line2
51 line0
52 line1
53 line2
54 rev offset length base linkrev nodeid p1 p2
55 0 0 77 0 2 da78c0659611 000000000000 000000000000
56 1 77 76 0 3 4b358025380b 000000000000 da78c0659611
57 bar renamed from foo:9e25c27b87571a1edee5ae4dddee5687746cc8e2
58 rev offset length base linkrev nodeid p1 p2
59 0 0 7 0 0 690b295714ae 000000000000 000000000000
60 1 7 13 1 1 9e25c27b8757 690b295714ae 000000000000
61 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
62 4:2d2f9a22c82b 2:0a3ab4856510
63 3:96ab80c60897 1:5cd961e4045d 2:0a3ab4856510
64 2:0a3ab4856510 0:2665aaee66e9
65 1:5cd961e4045d
66 0:2665aaee66e9
67 % this should use bar@rev2 as the ancestor
68 resolving manifests
69 overwrite None partial False
70 ancestor 0a3ab4856510 local 2d2f9a22c82b+ remote 96ab80c60897
71 bar: versions differ -> m
72 merging bar
73 my bar@2d2f9a22c82b+ other bar@96ab80c60897 ancestor bar@0a3ab4856510
74 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
75 (branch merge, don't forget to commit)
76 % contents of bar should be line1 line2
77 line1
78 line2
79 rev offset length base linkrev nodeid p1 p2
80 0 0 77 0 2 da78c0659611 000000000000 000000000000
81 1 77 76 0 3 4b358025380b 000000000000 da78c0659611
82 2 153 7 2 4 4defe5eec418 da78c0659611 000000000000
83 3 160 13 3 5 4663501da27b 4defe5eec418 4b358025380b
@@ -1,1971 +1,1989 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ('lookup', 'changegroupsubset')
19 19 supported = ('revlogv1', 'store')
20 20
21 21 def __del__(self):
22 22 self.transhandle = None
23 23 def __init__(self, parentui, path=None, create=0):
24 24 repo.repository.__init__(self)
25 25 if not path:
26 26 p = os.getcwd()
27 27 while not os.path.isdir(os.path.join(p, ".hg")):
28 28 oldp = p
29 29 p = os.path.dirname(p)
30 30 if p == oldp:
31 31 raise repo.RepoError(_("There is no Mercurial repository"
32 32 " here (.hg not found)"))
33 33 path = p
34 34
35 35 self.path = os.path.join(path, ".hg")
36 36 self.root = os.path.realpath(path)
37 37 self.origroot = path
38 38 self.opener = util.opener(self.path)
39 39 self.wopener = util.opener(self.root)
40 40
41 41 if not os.path.isdir(self.path):
42 42 if create:
43 43 if not os.path.exists(path):
44 44 os.mkdir(path)
45 45 os.mkdir(self.path)
46 46 os.mkdir(os.path.join(self.path, "store"))
47 47 requirements = ("revlogv1", "store")
48 48 reqfile = self.opener("requires", "w")
49 49 for r in requirements:
50 50 reqfile.write("%s\n" % r)
51 51 reqfile.close()
52 52 # create an invalid changelog
53 53 self.opener("00changelog.i", "a").write(
54 54 '\0\0\0\2' # represents revlogv2
55 55 ' dummy changelog to prevent using the old repo layout'
56 56 )
57 57 else:
58 58 raise repo.RepoError(_("repository %s not found") % path)
59 59 elif create:
60 60 raise repo.RepoError(_("repository %s already exists") % path)
61 61 else:
62 62 # find requirements
63 63 try:
64 64 requirements = self.opener("requires").read().splitlines()
65 65 except IOError, inst:
66 66 if inst.errno != errno.ENOENT:
67 67 raise
68 68 requirements = []
69 69 # check them
70 70 for r in requirements:
71 71 if r not in self.supported:
72 72 raise repo.RepoError(_("requirement '%s' not supported") % r)
73 73
74 74 # setup store
75 75 if "store" in requirements:
76 76 self.encodefn = util.encodefilename
77 77 self.decodefn = util.decodefilename
78 78 self.spath = os.path.join(self.path, "store")
79 79 else:
80 80 self.encodefn = lambda x: x
81 81 self.decodefn = lambda x: x
82 82 self.spath = self.path
83 83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
84 84
85 85 self.ui = ui.ui(parentui=parentui)
86 86 try:
87 87 self.ui.readconfig(self.join("hgrc"), self.root)
88 88 except IOError:
89 89 pass
90 90
91 91 v = self.ui.configrevlog()
92 92 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
93 93 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
94 94 fl = v.get('flags', None)
95 95 flags = 0
96 96 if fl != None:
97 97 for x in fl.split():
98 98 flags |= revlog.flagstr(x)
99 99 elif self.revlogv1:
100 100 flags = revlog.REVLOG_DEFAULT_FLAGS
101 101
102 102 v = self.revlogversion | flags
103 103 self.manifest = manifest.manifest(self.sopener, v)
104 104 self.changelog = changelog.changelog(self.sopener, v)
105 105
106 106 fallback = self.ui.config('ui', 'fallbackencoding')
107 107 if fallback:
108 108 util._fallbackencoding = fallback
109 109
110 110 # the changelog might not have the inline index flag
111 111 # on. If the format of the changelog is the same as found in
112 112 # .hgrc, apply any flags found in the .hgrc as well.
113 113 # Otherwise, just version from the changelog
114 114 v = self.changelog.version
115 115 if v == self.revlogversion:
116 116 v |= flags
117 117 self.revlogversion = v
118 118
119 119 self.tagscache = None
120 120 self.branchcache = None
121 121 self.nodetagscache = None
122 122 self.encodepats = None
123 123 self.decodepats = None
124 124 self.transhandle = None
125 125
126 126 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
127 127
128 128 def url(self):
129 129 return 'file:' + self.root
130 130
131 131 def hook(self, name, throw=False, **args):
132 132 def callhook(hname, funcname):
133 133 '''call python hook. hook is callable object, looked up as
134 134 name in python module. if callable returns "true", hook
135 135 fails, else passes. if hook raises exception, treated as
136 136 hook failure. exception propagates if throw is "true".
137 137
138 138 reason for "true" meaning "hook failed" is so that
139 139 unmodified commands (e.g. mercurial.commands.update) can
140 140 be run as hooks without wrappers to convert return values.'''
141 141
142 142 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
143 143 d = funcname.rfind('.')
144 144 if d == -1:
145 145 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
146 146 % (hname, funcname))
147 147 modname = funcname[:d]
148 148 try:
149 149 obj = __import__(modname)
150 150 except ImportError:
151 151 try:
152 152 # extensions are loaded with hgext_ prefix
153 153 obj = __import__("hgext_%s" % modname)
154 154 except ImportError:
155 155 raise util.Abort(_('%s hook is invalid '
156 156 '(import of "%s" failed)') %
157 157 (hname, modname))
158 158 try:
159 159 for p in funcname.split('.')[1:]:
160 160 obj = getattr(obj, p)
161 161 except AttributeError, err:
162 162 raise util.Abort(_('%s hook is invalid '
163 163 '("%s" is not defined)') %
164 164 (hname, funcname))
165 165 if not callable(obj):
166 166 raise util.Abort(_('%s hook is invalid '
167 167 '("%s" is not callable)') %
168 168 (hname, funcname))
169 169 try:
170 170 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
171 171 except (KeyboardInterrupt, util.SignalInterrupt):
172 172 raise
173 173 except Exception, exc:
174 174 if isinstance(exc, util.Abort):
175 175 self.ui.warn(_('error: %s hook failed: %s\n') %
176 176 (hname, exc.args[0]))
177 177 else:
178 178 self.ui.warn(_('error: %s hook raised an exception: '
179 179 '%s\n') % (hname, exc))
180 180 if throw:
181 181 raise
182 182 self.ui.print_exc()
183 183 return True
184 184 if r:
185 185 if throw:
186 186 raise util.Abort(_('%s hook failed') % hname)
187 187 self.ui.warn(_('warning: %s hook failed\n') % hname)
188 188 return r
189 189
190 190 def runhook(name, cmd):
191 191 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
192 192 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
193 193 r = util.system(cmd, environ=env, cwd=self.root)
194 194 if r:
195 195 desc, r = util.explain_exit(r)
196 196 if throw:
197 197 raise util.Abort(_('%s hook %s') % (name, desc))
198 198 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
199 199 return r
200 200
201 201 r = False
202 202 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
203 203 if hname.split(".", 1)[0] == name and cmd]
204 204 hooks.sort()
205 205 for hname, cmd in hooks:
206 206 if cmd.startswith('python:'):
207 207 r = callhook(hname, cmd[7:].strip()) or r
208 208 else:
209 209 r = runhook(hname, cmd) or r
210 210 return r
211 211
212 212 tag_disallowed = ':\r\n'
213 213
214 214 def tag(self, name, node, message, local, user, date):
215 215 '''tag a revision with a symbolic name.
216 216
217 217 if local is True, the tag is stored in a per-repository file.
218 218 otherwise, it is stored in the .hgtags file, and a new
219 219 changeset is committed with the change.
220 220
221 221 keyword arguments:
222 222
223 223 local: whether to store tag in non-version-controlled file
224 224 (default False)
225 225
226 226 message: commit message to use if committing
227 227
228 228 user: name of user to use if committing
229 229
230 230 date: date tuple to use if committing'''
231 231
232 232 for c in self.tag_disallowed:
233 233 if c in name:
234 234 raise util.Abort(_('%r cannot be used in a tag name') % c)
235 235
236 236 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
237 237
238 238 if local:
239 239 # local tags are stored in the current charset
240 240 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
241 241 self.hook('tag', node=hex(node), tag=name, local=local)
242 242 return
243 243
244 244 for x in self.status()[:5]:
245 245 if '.hgtags' in x:
246 246 raise util.Abort(_('working copy of .hgtags is changed '
247 247 '(please commit .hgtags manually)'))
248 248
249 249 # committed tags are stored in UTF-8
250 250 line = '%s %s\n' % (hex(node), util.fromlocal(name))
251 251 self.wfile('.hgtags', 'ab').write(line)
252 252 if self.dirstate.state('.hgtags') == '?':
253 253 self.add(['.hgtags'])
254 254
255 255 self.commit(['.hgtags'], message, user, date)
256 256 self.hook('tag', node=hex(node), tag=name, local=local)
257 257
258 258 def tags(self):
259 259 '''return a mapping of tag to node'''
260 260 if not self.tagscache:
261 261 self.tagscache = {}
262 262
263 263 def parsetag(line, context):
264 264 if not line:
265 265 return
266 266 s = l.split(" ", 1)
267 267 if len(s) != 2:
268 268 self.ui.warn(_("%s: cannot parse entry\n") % context)
269 269 return
270 270 node, key = s
271 271 key = util.tolocal(key.strip()) # stored in UTF-8
272 272 try:
273 273 bin_n = bin(node)
274 274 except TypeError:
275 275 self.ui.warn(_("%s: node '%s' is not well formed\n") %
276 276 (context, node))
277 277 return
278 278 if bin_n not in self.changelog.nodemap:
279 279 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
280 280 (context, key))
281 281 return
282 282 self.tagscache[key] = bin_n
283 283
284 284 # read the tags file from each head, ending with the tip,
285 285 # and add each tag found to the map, with "newer" ones
286 286 # taking precedence
287 287 f = None
288 288 for rev, node, fnode in self._hgtagsnodes():
289 289 f = (f and f.filectx(fnode) or
290 290 self.filectx('.hgtags', fileid=fnode))
291 291 count = 0
292 292 for l in f.data().splitlines():
293 293 count += 1
294 294 parsetag(l, _("%s, line %d") % (str(f), count))
295 295
296 296 try:
297 297 f = self.opener("localtags")
298 298 count = 0
299 299 for l in f:
300 300 # localtags are stored in the local character set
301 301 # while the internal tag table is stored in UTF-8
302 302 l = util.fromlocal(l)
303 303 count += 1
304 304 parsetag(l, _("localtags, line %d") % count)
305 305 except IOError:
306 306 pass
307 307
308 308 self.tagscache['tip'] = self.changelog.tip()
309 309
310 310 return self.tagscache
311 311
312 312 def _hgtagsnodes(self):
313 313 heads = self.heads()
314 314 heads.reverse()
315 315 last = {}
316 316 ret = []
317 317 for node in heads:
318 318 c = self.changectx(node)
319 319 rev = c.rev()
320 320 try:
321 321 fnode = c.filenode('.hgtags')
322 322 except repo.LookupError:
323 323 continue
324 324 ret.append((rev, node, fnode))
325 325 if fnode in last:
326 326 ret[last[fnode]] = None
327 327 last[fnode] = len(ret) - 1
328 328 return [item for item in ret if item]
329 329
330 330 def tagslist(self):
331 331 '''return a list of tags ordered by revision'''
332 332 l = []
333 333 for t, n in self.tags().items():
334 334 try:
335 335 r = self.changelog.rev(n)
336 336 except:
337 337 r = -2 # sort to the beginning of the list if unknown
338 338 l.append((r, t, n))
339 339 l.sort()
340 340 return [(t, n) for r, t, n in l]
341 341
342 342 def nodetags(self, node):
343 343 '''return the tags associated with a node'''
344 344 if not self.nodetagscache:
345 345 self.nodetagscache = {}
346 346 for t, n in self.tags().items():
347 347 self.nodetagscache.setdefault(n, []).append(t)
348 348 return self.nodetagscache.get(node, [])
349 349
350 350 def _branchtags(self):
351 351 partial, last, lrev = self._readbranchcache()
352 352
353 353 tiprev = self.changelog.count() - 1
354 354 if lrev != tiprev:
355 355 self._updatebranchcache(partial, lrev+1, tiprev+1)
356 356 self._writebranchcache(partial, self.changelog.tip(), tiprev)
357 357
358 358 return partial
359 359
360 360 def branchtags(self):
361 361 if self.branchcache is not None:
362 362 return self.branchcache
363 363
364 364 self.branchcache = {} # avoid recursion in changectx
365 365 partial = self._branchtags()
366 366
367 367 # the branch cache is stored on disk as UTF-8, but in the local
368 368 # charset internally
369 369 for k, v in partial.items():
370 370 self.branchcache[util.tolocal(k)] = v
371 371 return self.branchcache
372 372
373 373 def _readbranchcache(self):
374 374 partial = {}
375 375 try:
376 376 f = self.opener("branches.cache")
377 377 lines = f.read().split('\n')
378 378 f.close()
379 379 last, lrev = lines.pop(0).rstrip().split(" ", 1)
380 380 last, lrev = bin(last), int(lrev)
381 381 if not (lrev < self.changelog.count() and
382 382 self.changelog.node(lrev) == last): # sanity check
383 383 # invalidate the cache
384 384 raise ValueError('Invalid branch cache: unknown tip')
385 385 for l in lines:
386 386 if not l: continue
387 387 node, label = l.rstrip().split(" ", 1)
388 388 partial[label] = bin(node)
389 389 except (KeyboardInterrupt, util.SignalInterrupt):
390 390 raise
391 391 except Exception, inst:
392 392 if self.ui.debugflag:
393 393 self.ui.warn(str(inst), '\n')
394 394 partial, last, lrev = {}, nullid, nullrev
395 395 return partial, last, lrev
396 396
397 397 def _writebranchcache(self, branches, tip, tiprev):
398 398 try:
399 399 f = self.opener("branches.cache", "w")
400 400 f.write("%s %s\n" % (hex(tip), tiprev))
401 401 for label, node in branches.iteritems():
402 402 f.write("%s %s\n" % (hex(node), label))
403 403 except IOError:
404 404 pass
405 405
406 406 def _updatebranchcache(self, partial, start, end):
407 407 for r in xrange(start, end):
408 408 c = self.changectx(r)
409 409 b = c.branch()
410 410 if b:
411 411 partial[b] = c.node()
412 412
413 413 def lookup(self, key):
414 414 if key == '.':
415 415 key = self.dirstate.parents()[0]
416 416 if key == nullid:
417 417 raise repo.RepoError(_("no revision checked out"))
418 418 elif key == 'null':
419 419 return nullid
420 420 n = self.changelog._match(key)
421 421 if n:
422 422 return n
423 423 if key in self.tags():
424 424 return self.tags()[key]
425 425 if key in self.branchtags():
426 426 return self.branchtags()[key]
427 427 n = self.changelog._partialmatch(key)
428 428 if n:
429 429 return n
430 430 raise repo.RepoError(_("unknown revision '%s'") % key)
431 431
432 432 def dev(self):
433 433 return os.lstat(self.path).st_dev
434 434
435 435 def local(self):
436 436 return True
437 437
438 438 def join(self, f):
439 439 return os.path.join(self.path, f)
440 440
441 441 def sjoin(self, f):
442 442 f = self.encodefn(f)
443 443 return os.path.join(self.spath, f)
444 444
445 445 def wjoin(self, f):
446 446 return os.path.join(self.root, f)
447 447
448 448 def file(self, f):
449 449 if f[0] == '/':
450 450 f = f[1:]
451 451 return filelog.filelog(self.sopener, f, self.revlogversion)
452 452
453 453 def changectx(self, changeid=None):
454 454 return context.changectx(self, changeid)
455 455
456 456 def workingctx(self):
457 457 return context.workingctx(self)
458 458
459 459 def parents(self, changeid=None):
460 460 '''
461 461 get list of changectxs for parents of changeid or working directory
462 462 '''
463 463 if changeid is None:
464 464 pl = self.dirstate.parents()
465 465 else:
466 466 n = self.changelog.lookup(changeid)
467 467 pl = self.changelog.parents(n)
468 468 if pl[1] == nullid:
469 469 return [self.changectx(pl[0])]
470 470 return [self.changectx(pl[0]), self.changectx(pl[1])]
471 471
472 472 def filectx(self, path, changeid=None, fileid=None):
473 473 """changeid can be a changeset revision, node, or tag.
474 474 fileid can be a file revision or node."""
475 475 return context.filectx(self, path, changeid, fileid)
476 476
477 477 def getcwd(self):
478 478 return self.dirstate.getcwd()
479 479
480 480 def wfile(self, f, mode='r'):
481 481 return self.wopener(f, mode)
482 482
483 483 def wread(self, filename):
484 484 if self.encodepats == None:
485 485 l = []
486 486 for pat, cmd in self.ui.configitems("encode"):
487 487 mf = util.matcher(self.root, "", [pat], [], [])[1]
488 488 l.append((mf, cmd))
489 489 self.encodepats = l
490 490
491 491 data = self.wopener(filename, 'r').read()
492 492
493 493 for mf, cmd in self.encodepats:
494 494 if mf(filename):
495 495 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
496 496 data = util.filter(data, cmd)
497 497 break
498 498
499 499 return data
500 500
501 501 def wwrite(self, filename, data, fd=None):
502 502 if self.decodepats == None:
503 503 l = []
504 504 for pat, cmd in self.ui.configitems("decode"):
505 505 mf = util.matcher(self.root, "", [pat], [], [])[1]
506 506 l.append((mf, cmd))
507 507 self.decodepats = l
508 508
509 509 for mf, cmd in self.decodepats:
510 510 if mf(filename):
511 511 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
512 512 data = util.filter(data, cmd)
513 513 break
514 514
515 515 if fd:
516 516 return fd.write(data)
517 517 return self.wopener(filename, 'w').write(data)
518 518
519 519 def transaction(self):
520 520 tr = self.transhandle
521 521 if tr != None and tr.running():
522 522 return tr.nest()
523 523
524 524 # save dirstate for rollback
525 525 try:
526 526 ds = self.opener("dirstate").read()
527 527 except IOError:
528 528 ds = ""
529 529 self.opener("journal.dirstate", "w").write(ds)
530 530
531 531 renames = [(self.sjoin("journal"), self.sjoin("undo")),
532 532 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
533 533 tr = transaction.transaction(self.ui.warn, self.sopener,
534 534 self.sjoin("journal"),
535 535 aftertrans(renames))
536 536 self.transhandle = tr
537 537 return tr
538 538
539 539 def recover(self):
540 540 l = self.lock()
541 541 if os.path.exists(self.sjoin("journal")):
542 542 self.ui.status(_("rolling back interrupted transaction\n"))
543 543 transaction.rollback(self.sopener, self.sjoin("journal"))
544 544 self.reload()
545 545 return True
546 546 else:
547 547 self.ui.warn(_("no interrupted transaction available\n"))
548 548 return False
549 549
550 550 def rollback(self, wlock=None):
551 551 if not wlock:
552 552 wlock = self.wlock()
553 553 l = self.lock()
554 554 if os.path.exists(self.sjoin("undo")):
555 555 self.ui.status(_("rolling back last transaction\n"))
556 556 transaction.rollback(self.sopener, self.sjoin("undo"))
557 557 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
558 558 self.reload()
559 559 self.wreload()
560 560 else:
561 561 self.ui.warn(_("no rollback information available\n"))
562 562
563 563 def wreload(self):
564 564 self.dirstate.read()
565 565
566 566 def reload(self):
567 567 self.changelog.load()
568 568 self.manifest.load()
569 569 self.tagscache = None
570 570 self.nodetagscache = None
571 571
572 572 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
573 573 desc=None):
574 574 try:
575 575 l = lock.lock(lockname, 0, releasefn, desc=desc)
576 576 except lock.LockHeld, inst:
577 577 if not wait:
578 578 raise
579 579 self.ui.warn(_("waiting for lock on %s held by %r\n") %
580 580 (desc, inst.locker))
581 581 # default to 600 seconds timeout
582 582 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
583 583 releasefn, desc=desc)
584 584 if acquirefn:
585 585 acquirefn()
586 586 return l
587 587
588 588 def lock(self, wait=1):
589 589 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
590 590 desc=_('repository %s') % self.origroot)
591 591
592 592 def wlock(self, wait=1):
593 593 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
594 594 self.wreload,
595 595 desc=_('working directory of %s') % self.origroot)
596 596
597 597 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
598 598 """
599 599 commit an individual file as part of a larger transaction
600 600 """
601 601
602 602 t = self.wread(fn)
603 603 fl = self.file(fn)
604 604 fp1 = manifest1.get(fn, nullid)
605 605 fp2 = manifest2.get(fn, nullid)
606 606
607 607 meta = {}
608 608 cp = self.dirstate.copied(fn)
609 609 if cp:
610 # Mark the new revision of this file as a copy of another
611 # file. This copy data will effectively act as a parent
612 # of this new revision. If this is a merge, the first
613 # parent will be the nullid (meaning "look up the copy data")
614 # and the second one will be the other parent. For example:
615 #
616 # 0 --- 1 --- 3 rev1 changes file foo
617 # \ / rev2 renames foo to bar and changes it
618 # \- 2 -/ rev3 should have bar with all changes and
619 # should record that bar descends from
620 # bar in rev2 and foo in rev1
621 #
622 # this allows this merge to succeed:
623 #
624 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
625 # \ / merging rev3 and rev4 should use bar@rev2
626 # \- 2 --- 4 as the merge base
627 #
610 628 meta["copy"] = cp
611 629 if not manifest2: # not a branch merge
612 630 meta["copyrev"] = hex(manifest1.get(cp, nullid))
613 631 fp2 = nullid
614 632 elif fp2 != nullid: # copied on remote side
615 633 meta["copyrev"] = hex(manifest1.get(cp, nullid))
616 634 elif fp1 != nullid: # copied on local side, reversed
617 635 meta["copyrev"] = hex(manifest2.get(cp))
618 fp2 = nullid
636 fp2 = fp1
619 637 else: # directory rename
620 638 meta["copyrev"] = hex(manifest1.get(cp, nullid))
621 639 self.ui.debug(_(" %s: copy %s:%s\n") %
622 640 (fn, cp, meta["copyrev"]))
623 641 fp1 = nullid
624 642 elif fp2 != nullid:
625 643 # is one parent an ancestor of the other?
626 644 fpa = fl.ancestor(fp1, fp2)
627 645 if fpa == fp1:
628 646 fp1, fp2 = fp2, nullid
629 647 elif fpa == fp2:
630 648 fp2 = nullid
631 649
632 650 # is the file unmodified from the parent? report existing entry
633 651 if fp2 == nullid and not fl.cmp(fp1, t):
634 652 return fp1
635 653
636 654 changelist.append(fn)
637 655 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
638 656
639 657 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
640 658 if p1 is None:
641 659 p1, p2 = self.dirstate.parents()
642 660 return self.commit(files=files, text=text, user=user, date=date,
643 661 p1=p1, p2=p2, wlock=wlock)
644 662
645 663 def commit(self, files=None, text="", user=None, date=None,
646 664 match=util.always, force=False, lock=None, wlock=None,
647 665 force_editor=False, p1=None, p2=None, extra={}):
648 666
649 667 commit = []
650 668 remove = []
651 669 changed = []
652 670 use_dirstate = (p1 is None) # not rawcommit
653 671 extra = extra.copy()
654 672
655 673 if use_dirstate:
656 674 if files:
657 675 for f in files:
658 676 s = self.dirstate.state(f)
659 677 if s in 'nmai':
660 678 commit.append(f)
661 679 elif s == 'r':
662 680 remove.append(f)
663 681 else:
664 682 self.ui.warn(_("%s not tracked!\n") % f)
665 683 else:
666 684 changes = self.status(match=match)[:5]
667 685 modified, added, removed, deleted, unknown = changes
668 686 commit = modified + added
669 687 remove = removed
670 688 else:
671 689 commit = files
672 690
673 691 if use_dirstate:
674 692 p1, p2 = self.dirstate.parents()
675 693 update_dirstate = True
676 694 else:
677 695 p1, p2 = p1, p2 or nullid
678 696 update_dirstate = (self.dirstate.parents()[0] == p1)
679 697
680 698 c1 = self.changelog.read(p1)
681 699 c2 = self.changelog.read(p2)
682 700 m1 = self.manifest.read(c1[0]).copy()
683 701 m2 = self.manifest.read(c2[0])
684 702
685 703 if use_dirstate:
686 704 branchname = self.workingctx().branch()
687 705 try:
688 706 branchname = branchname.decode('UTF-8').encode('UTF-8')
689 707 except UnicodeDecodeError:
690 708 raise util.Abort(_('branch name not in UTF-8!'))
691 709 else:
692 710 branchname = ""
693 711
694 712 if use_dirstate:
695 713 oldname = c1[5].get("branch", "") # stored in UTF-8
696 714 if not commit and not remove and not force and p2 == nullid and \
697 715 branchname == oldname:
698 716 self.ui.status(_("nothing changed\n"))
699 717 return None
700 718
701 719 xp1 = hex(p1)
702 720 if p2 == nullid: xp2 = ''
703 721 else: xp2 = hex(p2)
704 722
705 723 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
706 724
707 725 if not wlock:
708 726 wlock = self.wlock()
709 727 if not lock:
710 728 lock = self.lock()
711 729 tr = self.transaction()
712 730
713 731 # check in files
714 732 new = {}
715 733 linkrev = self.changelog.count()
716 734 commit.sort()
717 735 for f in commit:
718 736 self.ui.note(f + "\n")
719 737 try:
720 738 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
721 739 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
722 740 except IOError:
723 741 if use_dirstate:
724 742 self.ui.warn(_("trouble committing %s!\n") % f)
725 743 raise
726 744 else:
727 745 remove.append(f)
728 746
729 747 # update manifest
730 748 m1.update(new)
731 749 remove.sort()
732 750
733 751 for f in remove:
734 752 if f in m1:
735 753 del m1[f]
736 754 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
737 755
738 756 # add changeset
739 757 new = new.keys()
740 758 new.sort()
741 759
742 760 user = user or self.ui.username()
743 761 if not text or force_editor:
744 762 edittext = []
745 763 if text:
746 764 edittext.append(text)
747 765 edittext.append("")
748 766 edittext.append("HG: user: %s" % user)
749 767 if p2 != nullid:
750 768 edittext.append("HG: branch merge")
751 769 edittext.extend(["HG: changed %s" % f for f in changed])
752 770 edittext.extend(["HG: removed %s" % f for f in remove])
753 771 if not changed and not remove:
754 772 edittext.append("HG: no files changed")
755 773 edittext.append("")
756 774 # run editor in the repository root
757 775 olddir = os.getcwd()
758 776 os.chdir(self.root)
759 777 text = self.ui.edit("\n".join(edittext), user)
760 778 os.chdir(olddir)
761 779
762 780 lines = [line.rstrip() for line in text.rstrip().splitlines()]
763 781 while lines and not lines[0]:
764 782 del lines[0]
765 783 if not lines:
766 784 return None
767 785 text = '\n'.join(lines)
768 786 if branchname:
769 787 extra["branch"] = branchname
770 788 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
771 789 user, date, extra)
772 790 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
773 791 parent2=xp2)
774 792 tr.close()
775 793
776 794 if use_dirstate or update_dirstate:
777 795 self.dirstate.setparents(n)
778 796 if use_dirstate:
779 797 self.dirstate.update(new, "n")
780 798 self.dirstate.forget(remove)
781 799
782 800 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
783 801 return n
784 802
785 803 def walk(self, node=None, files=[], match=util.always, badmatch=None):
786 804 '''
787 805 walk recursively through the directory tree or a given
788 806 changeset, finding all files matched by the match
789 807 function
790 808
791 809 results are yielded in a tuple (src, filename), where src
792 810 is one of:
793 811 'f' the file was found in the directory tree
794 812 'm' the file was only in the dirstate and not in the tree
795 813 'b' file was not found and matched badmatch
796 814 '''
797 815
798 816 if node:
799 817 fdict = dict.fromkeys(files)
800 818 for fn in self.manifest.read(self.changelog.read(node)[0]):
801 819 for ffn in fdict:
802 820 # match if the file is the exact name or a directory
803 821 if ffn == fn or fn.startswith("%s/" % ffn):
804 822 del fdict[ffn]
805 823 break
806 824 if match(fn):
807 825 yield 'm', fn
808 826 for fn in fdict:
809 827 if badmatch and badmatch(fn):
810 828 if match(fn):
811 829 yield 'b', fn
812 830 else:
813 831 self.ui.warn(_('%s: No such file in rev %s\n') % (
814 832 util.pathto(self.getcwd(), fn), short(node)))
815 833 else:
816 834 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
817 835 yield src, fn
818 836
819 837 def status(self, node1=None, node2=None, files=[], match=util.always,
820 838 wlock=None, list_ignored=False, list_clean=False):
821 839 """return status of files between two nodes or node and working directory
822 840
823 841 If node1 is None, use the first dirstate parent instead.
824 842 If node2 is None, compare node1 with working directory.
825 843 """
826 844
827 845 def fcmp(fn, mf):
828 846 t1 = self.wread(fn)
829 847 return self.file(fn).cmp(mf.get(fn, nullid), t1)
830 848
831 849 def mfmatches(node):
832 850 change = self.changelog.read(node)
833 851 mf = self.manifest.read(change[0]).copy()
834 852 for fn in mf.keys():
835 853 if not match(fn):
836 854 del mf[fn]
837 855 return mf
838 856
839 857 modified, added, removed, deleted, unknown = [], [], [], [], []
840 858 ignored, clean = [], []
841 859
842 860 compareworking = False
843 861 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
844 862 compareworking = True
845 863
846 864 if not compareworking:
847 865 # read the manifest from node1 before the manifest from node2,
848 866 # so that we'll hit the manifest cache if we're going through
849 867 # all the revisions in parent->child order.
850 868 mf1 = mfmatches(node1)
851 869
852 870 # are we comparing the working directory?
853 871 if not node2:
854 872 if not wlock:
855 873 try:
856 874 wlock = self.wlock(wait=0)
857 875 except lock.LockException:
858 876 wlock = None
859 877 (lookup, modified, added, removed, deleted, unknown,
860 878 ignored, clean) = self.dirstate.status(files, match,
861 879 list_ignored, list_clean)
862 880
863 881 # are we comparing working dir against its parent?
864 882 if compareworking:
865 883 if lookup:
866 884 # do a full compare of any files that might have changed
867 885 mf2 = mfmatches(self.dirstate.parents()[0])
868 886 for f in lookup:
869 887 if fcmp(f, mf2):
870 888 modified.append(f)
871 889 else:
872 890 clean.append(f)
873 891 if wlock is not None:
874 892 self.dirstate.update([f], "n")
875 893 else:
876 894 # we are comparing working dir against non-parent
877 895 # generate a pseudo-manifest for the working dir
878 896 # XXX: create it in dirstate.py ?
879 897 mf2 = mfmatches(self.dirstate.parents()[0])
880 898 for f in lookup + modified + added:
881 899 mf2[f] = ""
882 900 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
883 901 for f in removed:
884 902 if f in mf2:
885 903 del mf2[f]
886 904 else:
887 905 # we are comparing two revisions
888 906 mf2 = mfmatches(node2)
889 907
890 908 if not compareworking:
891 909 # flush lists from dirstate before comparing manifests
892 910 modified, added, clean = [], [], []
893 911
894 912 # make sure to sort the files so we talk to the disk in a
895 913 # reasonable order
896 914 mf2keys = mf2.keys()
897 915 mf2keys.sort()
898 916 for fn in mf2keys:
899 917 if mf1.has_key(fn):
900 918 if mf1.flags(fn) != mf2.flags(fn) or \
901 919 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
902 920 modified.append(fn)
903 921 elif list_clean:
904 922 clean.append(fn)
905 923 del mf1[fn]
906 924 else:
907 925 added.append(fn)
908 926
909 927 removed = mf1.keys()
910 928
911 929 # sort and return results:
912 930 for l in modified, added, removed, deleted, unknown, ignored, clean:
913 931 l.sort()
914 932 return (modified, added, removed, deleted, unknown, ignored, clean)
915 933
916 934 def add(self, list, wlock=None):
917 935 if not wlock:
918 936 wlock = self.wlock()
919 937 for f in list:
920 938 p = self.wjoin(f)
921 939 if not os.path.exists(p):
922 940 self.ui.warn(_("%s does not exist!\n") % f)
923 941 elif not os.path.isfile(p):
924 942 self.ui.warn(_("%s not added: only files supported currently\n")
925 943 % f)
926 944 elif self.dirstate.state(f) in 'an':
927 945 self.ui.warn(_("%s already tracked!\n") % f)
928 946 else:
929 947 self.dirstate.update([f], "a")
930 948
931 949 def forget(self, list, wlock=None):
932 950 if not wlock:
933 951 wlock = self.wlock()
934 952 for f in list:
935 953 if self.dirstate.state(f) not in 'ai':
936 954 self.ui.warn(_("%s not added!\n") % f)
937 955 else:
938 956 self.dirstate.forget([f])
939 957
940 958 def remove(self, list, unlink=False, wlock=None):
941 959 if unlink:
942 960 for f in list:
943 961 try:
944 962 util.unlink(self.wjoin(f))
945 963 except OSError, inst:
946 964 if inst.errno != errno.ENOENT:
947 965 raise
948 966 if not wlock:
949 967 wlock = self.wlock()
950 968 for f in list:
951 969 p = self.wjoin(f)
952 970 if os.path.exists(p):
953 971 self.ui.warn(_("%s still exists!\n") % f)
954 972 elif self.dirstate.state(f) == 'a':
955 973 self.dirstate.forget([f])
956 974 elif f not in self.dirstate:
957 975 self.ui.warn(_("%s not tracked!\n") % f)
958 976 else:
959 977 self.dirstate.update([f], "r")
960 978
961 979 def undelete(self, list, wlock=None):
962 980 p = self.dirstate.parents()[0]
963 981 mn = self.changelog.read(p)[0]
964 982 m = self.manifest.read(mn)
965 983 if not wlock:
966 984 wlock = self.wlock()
967 985 for f in list:
968 986 if self.dirstate.state(f) not in "r":
969 987 self.ui.warn("%s not removed!\n" % f)
970 988 else:
971 989 t = self.file(f).read(m[f])
972 990 self.wwrite(f, t)
973 991 util.set_exec(self.wjoin(f), m.execf(f))
974 992 self.dirstate.update([f], "n")
975 993
976 994 def copy(self, source, dest, wlock=None):
977 995 p = self.wjoin(dest)
978 996 if not os.path.exists(p):
979 997 self.ui.warn(_("%s does not exist!\n") % dest)
980 998 elif not os.path.isfile(p):
981 999 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
982 1000 else:
983 1001 if not wlock:
984 1002 wlock = self.wlock()
985 1003 if self.dirstate.state(dest) == '?':
986 1004 self.dirstate.update([dest], "a")
987 1005 self.dirstate.copy(source, dest)
988 1006
989 1007 def heads(self, start=None):
990 1008 heads = self.changelog.heads(start)
991 1009 # sort the output in rev descending order
992 1010 heads = [(-self.changelog.rev(h), h) for h in heads]
993 1011 heads.sort()
994 1012 return [n for (r, n) in heads]
995 1013
996 1014 # branchlookup returns a dict giving a list of branches for
997 1015 # each head. A branch is defined as the tag of a node or
998 1016 # the branch of the node's parents. If a node has multiple
999 1017 # branch tags, tags are eliminated if they are visible from other
1000 1018 # branch tags.
1001 1019 #
1002 1020 # So, for this graph: a->b->c->d->e
1003 1021 # \ /
1004 1022 # aa -----/
1005 1023 # a has tag 2.6.12
1006 1024 # d has tag 2.6.13
1007 1025 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1008 1026 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1009 1027 # from the list.
1010 1028 #
1011 1029 # It is possible that more than one head will have the same branch tag.
1012 1030 # callers need to check the result for multiple heads under the same
1013 1031 # branch tag if that is a problem for them (ie checkout of a specific
1014 1032 # branch).
1015 1033 #
1016 1034 # passing in a specific branch will limit the depth of the search
1017 1035 # through the parents. It won't limit the branches returned in the
1018 1036 # result though.
1019 1037 def branchlookup(self, heads=None, branch=None):
1020 1038 if not heads:
1021 1039 heads = self.heads()
1022 1040 headt = [ h for h in heads ]
1023 1041 chlog = self.changelog
1024 1042 branches = {}
1025 1043 merges = []
1026 1044 seenmerge = {}
1027 1045
1028 1046 # traverse the tree once for each head, recording in the branches
1029 1047 # dict which tags are visible from this head. The branches
1030 1048 # dict also records which tags are visible from each tag
1031 1049 # while we traverse.
1032 1050 while headt or merges:
1033 1051 if merges:
1034 1052 n, found = merges.pop()
1035 1053 visit = [n]
1036 1054 else:
1037 1055 h = headt.pop()
1038 1056 visit = [h]
1039 1057 found = [h]
1040 1058 seen = {}
1041 1059 while visit:
1042 1060 n = visit.pop()
1043 1061 if n in seen:
1044 1062 continue
1045 1063 pp = chlog.parents(n)
1046 1064 tags = self.nodetags(n)
1047 1065 if tags:
1048 1066 for x in tags:
1049 1067 if x == 'tip':
1050 1068 continue
1051 1069 for f in found:
1052 1070 branches.setdefault(f, {})[n] = 1
1053 1071 branches.setdefault(n, {})[n] = 1
1054 1072 break
1055 1073 if n not in found:
1056 1074 found.append(n)
1057 1075 if branch in tags:
1058 1076 continue
1059 1077 seen[n] = 1
1060 1078 if pp[1] != nullid and n not in seenmerge:
1061 1079 merges.append((pp[1], [x for x in found]))
1062 1080 seenmerge[n] = 1
1063 1081 if pp[0] != nullid:
1064 1082 visit.append(pp[0])
1065 1083 # traverse the branches dict, eliminating branch tags from each
1066 1084 # head that are visible from another branch tag for that head.
1067 1085 out = {}
1068 1086 viscache = {}
1069 1087 for h in heads:
1070 1088 def visible(node):
1071 1089 if node in viscache:
1072 1090 return viscache[node]
1073 1091 ret = {}
1074 1092 visit = [node]
1075 1093 while visit:
1076 1094 x = visit.pop()
1077 1095 if x in viscache:
1078 1096 ret.update(viscache[x])
1079 1097 elif x not in ret:
1080 1098 ret[x] = 1
1081 1099 if x in branches:
1082 1100 visit[len(visit):] = branches[x].keys()
1083 1101 viscache[node] = ret
1084 1102 return ret
1085 1103 if h not in branches:
1086 1104 continue
1087 1105 # O(n^2), but somewhat limited. This only searches the
1088 1106 # tags visible from a specific head, not all the tags in the
1089 1107 # whole repo.
1090 1108 for b in branches[h]:
1091 1109 vis = False
1092 1110 for bb in branches[h].keys():
1093 1111 if b != bb:
1094 1112 if b in visible(bb):
1095 1113 vis = True
1096 1114 break
1097 1115 if not vis:
1098 1116 l = out.setdefault(h, [])
1099 1117 l[len(l):] = self.nodetags(b)
1100 1118 return out
1101 1119
1102 1120 def branches(self, nodes):
1103 1121 if not nodes:
1104 1122 nodes = [self.changelog.tip()]
1105 1123 b = []
1106 1124 for n in nodes:
1107 1125 t = n
1108 1126 while 1:
1109 1127 p = self.changelog.parents(n)
1110 1128 if p[1] != nullid or p[0] == nullid:
1111 1129 b.append((t, n, p[0], p[1]))
1112 1130 break
1113 1131 n = p[0]
1114 1132 return b
1115 1133
1116 1134 def between(self, pairs):
1117 1135 r = []
1118 1136
1119 1137 for top, bottom in pairs:
1120 1138 n, l, i = top, [], 0
1121 1139 f = 1
1122 1140
1123 1141 while n != bottom:
1124 1142 p = self.changelog.parents(n)[0]
1125 1143 if i == f:
1126 1144 l.append(n)
1127 1145 f = f * 2
1128 1146 n = p
1129 1147 i += 1
1130 1148
1131 1149 r.append(l)
1132 1150
1133 1151 return r
1134 1152
1135 1153 def findincoming(self, remote, base=None, heads=None, force=False):
1136 1154 """Return list of roots of the subsets of missing nodes from remote
1137 1155
1138 1156 If base dict is specified, assume that these nodes and their parents
1139 1157 exist on the remote side and that no child of a node of base exists
1140 1158 in both remote and self.
1141 1159 Furthermore base will be updated to include the nodes that exists
1142 1160 in self and remote but no children exists in self and remote.
1143 1161 If a list of heads is specified, return only nodes which are heads
1144 1162 or ancestors of these heads.
1145 1163
1146 1164 All the ancestors of base are in self and in remote.
1147 1165 All the descendants of the list returned are missing in self.
1148 1166 (and so we know that the rest of the nodes are missing in remote, see
1149 1167 outgoing)
1150 1168 """
1151 1169 m = self.changelog.nodemap
1152 1170 search = []
1153 1171 fetch = {}
1154 1172 seen = {}
1155 1173 seenbranch = {}
1156 1174 if base == None:
1157 1175 base = {}
1158 1176
1159 1177 if not heads:
1160 1178 heads = remote.heads()
1161 1179
1162 1180 if self.changelog.tip() == nullid:
1163 1181 base[nullid] = 1
1164 1182 if heads != [nullid]:
1165 1183 return [nullid]
1166 1184 return []
1167 1185
1168 1186 # assume we're closer to the tip than the root
1169 1187 # and start by examining the heads
1170 1188 self.ui.status(_("searching for changes\n"))
1171 1189
1172 1190 unknown = []
1173 1191 for h in heads:
1174 1192 if h not in m:
1175 1193 unknown.append(h)
1176 1194 else:
1177 1195 base[h] = 1
1178 1196
1179 1197 if not unknown:
1180 1198 return []
1181 1199
1182 1200 req = dict.fromkeys(unknown)
1183 1201 reqcnt = 0
1184 1202
1185 1203 # search through remote branches
1186 1204 # a 'branch' here is a linear segment of history, with four parts:
1187 1205 # head, root, first parent, second parent
1188 1206 # (a branch always has two parents (or none) by definition)
1189 1207 unknown = remote.branches(unknown)
1190 1208 while unknown:
1191 1209 r = []
1192 1210 while unknown:
1193 1211 n = unknown.pop(0)
1194 1212 if n[0] in seen:
1195 1213 continue
1196 1214
1197 1215 self.ui.debug(_("examining %s:%s\n")
1198 1216 % (short(n[0]), short(n[1])))
1199 1217 if n[0] == nullid: # found the end of the branch
1200 1218 pass
1201 1219 elif n in seenbranch:
1202 1220 self.ui.debug(_("branch already found\n"))
1203 1221 continue
1204 1222 elif n[1] and n[1] in m: # do we know the base?
1205 1223 self.ui.debug(_("found incomplete branch %s:%s\n")
1206 1224 % (short(n[0]), short(n[1])))
1207 1225 search.append(n) # schedule branch range for scanning
1208 1226 seenbranch[n] = 1
1209 1227 else:
1210 1228 if n[1] not in seen and n[1] not in fetch:
1211 1229 if n[2] in m and n[3] in m:
1212 1230 self.ui.debug(_("found new changeset %s\n") %
1213 1231 short(n[1]))
1214 1232 fetch[n[1]] = 1 # earliest unknown
1215 1233 for p in n[2:4]:
1216 1234 if p in m:
1217 1235 base[p] = 1 # latest known
1218 1236
1219 1237 for p in n[2:4]:
1220 1238 if p not in req and p not in m:
1221 1239 r.append(p)
1222 1240 req[p] = 1
1223 1241 seen[n[0]] = 1
1224 1242
1225 1243 if r:
1226 1244 reqcnt += 1
1227 1245 self.ui.debug(_("request %d: %s\n") %
1228 1246 (reqcnt, " ".join(map(short, r))))
1229 1247 for p in xrange(0, len(r), 10):
1230 1248 for b in remote.branches(r[p:p+10]):
1231 1249 self.ui.debug(_("received %s:%s\n") %
1232 1250 (short(b[0]), short(b[1])))
1233 1251 unknown.append(b)
1234 1252
1235 1253 # do binary search on the branches we found
1236 1254 while search:
1237 1255 n = search.pop(0)
1238 1256 reqcnt += 1
1239 1257 l = remote.between([(n[0], n[1])])[0]
1240 1258 l.append(n[1])
1241 1259 p = n[0]
1242 1260 f = 1
1243 1261 for i in l:
1244 1262 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1245 1263 if i in m:
1246 1264 if f <= 2:
1247 1265 self.ui.debug(_("found new branch changeset %s\n") %
1248 1266 short(p))
1249 1267 fetch[p] = 1
1250 1268 base[i] = 1
1251 1269 else:
1252 1270 self.ui.debug(_("narrowed branch search to %s:%s\n")
1253 1271 % (short(p), short(i)))
1254 1272 search.append((p, i))
1255 1273 break
1256 1274 p, f = i, f * 2
1257 1275
1258 1276 # sanity check our fetch list
1259 1277 for f in fetch.keys():
1260 1278 if f in m:
1261 1279 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1262 1280
1263 1281 if base.keys() == [nullid]:
1264 1282 if force:
1265 1283 self.ui.warn(_("warning: repository is unrelated\n"))
1266 1284 else:
1267 1285 raise util.Abort(_("repository is unrelated"))
1268 1286
1269 1287 self.ui.debug(_("found new changesets starting at ") +
1270 1288 " ".join([short(f) for f in fetch]) + "\n")
1271 1289
1272 1290 self.ui.debug(_("%d total queries\n") % reqcnt)
1273 1291
1274 1292 return fetch.keys()
1275 1293
1276 1294 def findoutgoing(self, remote, base=None, heads=None, force=False):
1277 1295 """Return list of nodes that are roots of subsets not in remote
1278 1296
1279 1297 If base dict is specified, assume that these nodes and their parents
1280 1298 exist on the remote side.
1281 1299 If a list of heads is specified, return only nodes which are heads
1282 1300 or ancestors of these heads, and return a second element which
1283 1301 contains all remote heads which get new children.
1284 1302 """
1285 1303 if base == None:
1286 1304 base = {}
1287 1305 self.findincoming(remote, base, heads, force=force)
1288 1306
1289 1307 self.ui.debug(_("common changesets up to ")
1290 1308 + " ".join(map(short, base.keys())) + "\n")
1291 1309
1292 1310 remain = dict.fromkeys(self.changelog.nodemap)
1293 1311
1294 1312 # prune everything remote has from the tree
1295 1313 del remain[nullid]
1296 1314 remove = base.keys()
1297 1315 while remove:
1298 1316 n = remove.pop(0)
1299 1317 if n in remain:
1300 1318 del remain[n]
1301 1319 for p in self.changelog.parents(n):
1302 1320 remove.append(p)
1303 1321
1304 1322 # find every node whose parents have been pruned
1305 1323 subset = []
1306 1324 # find every remote head that will get new children
1307 1325 updated_heads = {}
1308 1326 for n in remain:
1309 1327 p1, p2 = self.changelog.parents(n)
1310 1328 if p1 not in remain and p2 not in remain:
1311 1329 subset.append(n)
1312 1330 if heads:
1313 1331 if p1 in heads:
1314 1332 updated_heads[p1] = True
1315 1333 if p2 in heads:
1316 1334 updated_heads[p2] = True
1317 1335
1318 1336 # this is the set of all roots we have to push
1319 1337 if heads:
1320 1338 return subset, updated_heads.keys()
1321 1339 else:
1322 1340 return subset
1323 1341
1324 1342 def pull(self, remote, heads=None, force=False, lock=None):
1325 1343 mylock = False
1326 1344 if not lock:
1327 1345 lock = self.lock()
1328 1346 mylock = True
1329 1347
1330 1348 try:
1331 1349 fetch = self.findincoming(remote, force=force)
1332 1350 if fetch == [nullid]:
1333 1351 self.ui.status(_("requesting all changes\n"))
1334 1352
1335 1353 if not fetch:
1336 1354 self.ui.status(_("no changes found\n"))
1337 1355 return 0
1338 1356
1339 1357 if heads is None:
1340 1358 cg = remote.changegroup(fetch, 'pull')
1341 1359 else:
1342 1360 if 'changegroupsubset' not in remote.capabilities:
1343 1361 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1344 1362 cg = remote.changegroupsubset(fetch, heads, 'pull')
1345 1363 return self.addchangegroup(cg, 'pull', remote.url())
1346 1364 finally:
1347 1365 if mylock:
1348 1366 lock.release()
1349 1367
1350 1368 def push(self, remote, force=False, revs=None):
1351 1369 # there are two ways to push to remote repo:
1352 1370 #
1353 1371 # addchangegroup assumes local user can lock remote
1354 1372 # repo (local filesystem, old ssh servers).
1355 1373 #
1356 1374 # unbundle assumes local user cannot lock remote repo (new ssh
1357 1375 # servers, http servers).
1358 1376
1359 1377 if remote.capable('unbundle'):
1360 1378 return self.push_unbundle(remote, force, revs)
1361 1379 return self.push_addchangegroup(remote, force, revs)
1362 1380
1363 1381 def prepush(self, remote, force, revs):
1364 1382 base = {}
1365 1383 remote_heads = remote.heads()
1366 1384 inc = self.findincoming(remote, base, remote_heads, force=force)
1367 1385
1368 1386 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1369 1387 if revs is not None:
1370 1388 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1371 1389 else:
1372 1390 bases, heads = update, self.changelog.heads()
1373 1391
1374 1392 if not bases:
1375 1393 self.ui.status(_("no changes found\n"))
1376 1394 return None, 1
1377 1395 elif not force:
1378 1396 # check if we're creating new remote heads
1379 1397 # to be a remote head after push, node must be either
1380 1398 # - unknown locally
1381 1399 # - a local outgoing head descended from update
1382 1400 # - a remote head that's known locally and not
1383 1401 # ancestral to an outgoing head
1384 1402
1385 1403 warn = 0
1386 1404
1387 1405 if remote_heads == [nullid]:
1388 1406 warn = 0
1389 1407 elif not revs and len(heads) > len(remote_heads):
1390 1408 warn = 1
1391 1409 else:
1392 1410 newheads = list(heads)
1393 1411 for r in remote_heads:
1394 1412 if r in self.changelog.nodemap:
1395 1413 desc = self.changelog.heads(r, heads)
1396 1414 l = [h for h in heads if h in desc]
1397 1415 if not l:
1398 1416 newheads.append(r)
1399 1417 else:
1400 1418 newheads.append(r)
1401 1419 if len(newheads) > len(remote_heads):
1402 1420 warn = 1
1403 1421
1404 1422 if warn:
1405 1423 self.ui.warn(_("abort: push creates new remote branches!\n"))
1406 1424 self.ui.status(_("(did you forget to merge?"
1407 1425 " use push -f to force)\n"))
1408 1426 return None, 1
1409 1427 elif inc:
1410 1428 self.ui.warn(_("note: unsynced remote changes!\n"))
1411 1429
1412 1430
1413 1431 if revs is None:
1414 1432 cg = self.changegroup(update, 'push')
1415 1433 else:
1416 1434 cg = self.changegroupsubset(update, revs, 'push')
1417 1435 return cg, remote_heads
1418 1436
1419 1437 def push_addchangegroup(self, remote, force, revs):
1420 1438 lock = remote.lock()
1421 1439
1422 1440 ret = self.prepush(remote, force, revs)
1423 1441 if ret[0] is not None:
1424 1442 cg, remote_heads = ret
1425 1443 return remote.addchangegroup(cg, 'push', self.url())
1426 1444 return ret[1]
1427 1445
1428 1446 def push_unbundle(self, remote, force, revs):
1429 1447 # local repo finds heads on server, finds out what revs it
1430 1448 # must push. once revs transferred, if server finds it has
1431 1449 # different heads (someone else won commit/push race), server
1432 1450 # aborts.
1433 1451
1434 1452 ret = self.prepush(remote, force, revs)
1435 1453 if ret[0] is not None:
1436 1454 cg, remote_heads = ret
1437 1455 if force: remote_heads = ['force']
1438 1456 return remote.unbundle(cg, remote_heads, 'push')
1439 1457 return ret[1]
1440 1458
1441 1459 def changegroupinfo(self, nodes):
1442 1460 self.ui.note(_("%d changesets found\n") % len(nodes))
1443 1461 if self.ui.debugflag:
1444 1462 self.ui.debug(_("List of changesets:\n"))
1445 1463 for node in nodes:
1446 1464 self.ui.debug("%s\n" % hex(node))
1447 1465
1448 1466 def changegroupsubset(self, bases, heads, source):
1449 1467 """This function generates a changegroup consisting of all the nodes
1450 1468 that are descendents of any of the bases, and ancestors of any of
1451 1469 the heads.
1452 1470
1453 1471 It is fairly complex as determining which filenodes and which
1454 1472 manifest nodes need to be included for the changeset to be complete
1455 1473 is non-trivial.
1456 1474
1457 1475 Another wrinkle is doing the reverse, figuring out which changeset in
1458 1476 the changegroup a particular filenode or manifestnode belongs to."""
1459 1477
1460 1478 self.hook('preoutgoing', throw=True, source=source)
1461 1479
1462 1480 # Set up some initial variables
1463 1481 # Make it easy to refer to self.changelog
1464 1482 cl = self.changelog
1465 1483 # msng is short for missing - compute the list of changesets in this
1466 1484 # changegroup.
1467 1485 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1468 1486 self.changegroupinfo(msng_cl_lst)
1469 1487 # Some bases may turn out to be superfluous, and some heads may be
1470 1488 # too. nodesbetween will return the minimal set of bases and heads
1471 1489 # necessary to re-create the changegroup.
1472 1490
1473 1491 # Known heads are the list of heads that it is assumed the recipient
1474 1492 # of this changegroup will know about.
1475 1493 knownheads = {}
1476 1494 # We assume that all parents of bases are known heads.
1477 1495 for n in bases:
1478 1496 for p in cl.parents(n):
1479 1497 if p != nullid:
1480 1498 knownheads[p] = 1
1481 1499 knownheads = knownheads.keys()
1482 1500 if knownheads:
1483 1501 # Now that we know what heads are known, we can compute which
1484 1502 # changesets are known. The recipient must know about all
1485 1503 # changesets required to reach the known heads from the null
1486 1504 # changeset.
1487 1505 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1488 1506 junk = None
1489 1507 # Transform the list into an ersatz set.
1490 1508 has_cl_set = dict.fromkeys(has_cl_set)
1491 1509 else:
1492 1510 # If there were no known heads, the recipient cannot be assumed to
1493 1511 # know about any changesets.
1494 1512 has_cl_set = {}
1495 1513
1496 1514 # Make it easy to refer to self.manifest
1497 1515 mnfst = self.manifest
1498 1516 # We don't know which manifests are missing yet
1499 1517 msng_mnfst_set = {}
1500 1518 # Nor do we know which filenodes are missing.
1501 1519 msng_filenode_set = {}
1502 1520
1503 1521 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1504 1522 junk = None
1505 1523
1506 1524 # A changeset always belongs to itself, so the changenode lookup
1507 1525 # function for a changenode is identity.
1508 1526 def identity(x):
1509 1527 return x
1510 1528
1511 1529 # A function generating function. Sets up an environment for the
1512 1530 # inner function.
1513 1531 def cmp_by_rev_func(revlog):
1514 1532 # Compare two nodes by their revision number in the environment's
1515 1533 # revision history. Since the revision number both represents the
1516 1534 # most efficient order to read the nodes in, and represents a
1517 1535 # topological sorting of the nodes, this function is often useful.
1518 1536 def cmp_by_rev(a, b):
1519 1537 return cmp(revlog.rev(a), revlog.rev(b))
1520 1538 return cmp_by_rev
1521 1539
1522 1540 # If we determine that a particular file or manifest node must be a
1523 1541 # node that the recipient of the changegroup will already have, we can
1524 1542 # also assume the recipient will have all the parents. This function
1525 1543 # prunes them from the set of missing nodes.
1526 1544 def prune_parents(revlog, hasset, msngset):
1527 1545 haslst = hasset.keys()
1528 1546 haslst.sort(cmp_by_rev_func(revlog))
1529 1547 for node in haslst:
1530 1548 parentlst = [p for p in revlog.parents(node) if p != nullid]
1531 1549 while parentlst:
1532 1550 n = parentlst.pop()
1533 1551 if n not in hasset:
1534 1552 hasset[n] = 1
1535 1553 p = [p for p in revlog.parents(n) if p != nullid]
1536 1554 parentlst.extend(p)
1537 1555 for n in hasset:
1538 1556 msngset.pop(n, None)
1539 1557
1540 1558 # This is a function generating function used to set up an environment
1541 1559 # for the inner function to execute in.
1542 1560 def manifest_and_file_collector(changedfileset):
1543 1561 # This is an information gathering function that gathers
1544 1562 # information from each changeset node that goes out as part of
1545 1563 # the changegroup. The information gathered is a list of which
1546 1564 # manifest nodes are potentially required (the recipient may
1547 1565 # already have them) and total list of all files which were
1548 1566 # changed in any changeset in the changegroup.
1549 1567 #
1550 1568 # We also remember the first changenode we saw any manifest
1551 1569 # referenced by so we can later determine which changenode 'owns'
1552 1570 # the manifest.
1553 1571 def collect_manifests_and_files(clnode):
1554 1572 c = cl.read(clnode)
1555 1573 for f in c[3]:
1556 1574 # This is to make sure we only have one instance of each
1557 1575 # filename string for each filename.
1558 1576 changedfileset.setdefault(f, f)
1559 1577 msng_mnfst_set.setdefault(c[0], clnode)
1560 1578 return collect_manifests_and_files
1561 1579
1562 1580 # Figure out which manifest nodes (of the ones we think might be part
1563 1581 # of the changegroup) the recipient must know about and remove them
1564 1582 # from the changegroup.
1565 1583 def prune_manifests():
1566 1584 has_mnfst_set = {}
1567 1585 for n in msng_mnfst_set:
1568 1586 # If a 'missing' manifest thinks it belongs to a changenode
1569 1587 # the recipient is assumed to have, obviously the recipient
1570 1588 # must have that manifest.
1571 1589 linknode = cl.node(mnfst.linkrev(n))
1572 1590 if linknode in has_cl_set:
1573 1591 has_mnfst_set[n] = 1
1574 1592 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1575 1593
1576 1594 # Use the information collected in collect_manifests_and_files to say
1577 1595 # which changenode any manifestnode belongs to.
1578 1596 def lookup_manifest_link(mnfstnode):
1579 1597 return msng_mnfst_set[mnfstnode]
1580 1598
1581 1599 # A function generating function that sets up the initial environment
1582 1600 # the inner function.
1583 1601 def filenode_collector(changedfiles):
1584 1602 next_rev = [0]
1585 1603 # This gathers information from each manifestnode included in the
1586 1604 # changegroup about which filenodes the manifest node references
1587 1605 # so we can include those in the changegroup too.
1588 1606 #
1589 1607 # It also remembers which changenode each filenode belongs to. It
1590 1608 # does this by assuming the a filenode belongs to the changenode
1591 1609 # the first manifest that references it belongs to.
1592 1610 def collect_msng_filenodes(mnfstnode):
1593 1611 r = mnfst.rev(mnfstnode)
1594 1612 if r == next_rev[0]:
1595 1613 # If the last rev we looked at was the one just previous,
1596 1614 # we only need to see a diff.
1597 1615 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1598 1616 # For each line in the delta
1599 1617 for dline in delta.splitlines():
1600 1618 # get the filename and filenode for that line
1601 1619 f, fnode = dline.split('\0')
1602 1620 fnode = bin(fnode[:40])
1603 1621 f = changedfiles.get(f, None)
1604 1622 # And if the file is in the list of files we care
1605 1623 # about.
1606 1624 if f is not None:
1607 1625 # Get the changenode this manifest belongs to
1608 1626 clnode = msng_mnfst_set[mnfstnode]
1609 1627 # Create the set of filenodes for the file if
1610 1628 # there isn't one already.
1611 1629 ndset = msng_filenode_set.setdefault(f, {})
1612 1630 # And set the filenode's changelog node to the
1613 1631 # manifest's if it hasn't been set already.
1614 1632 ndset.setdefault(fnode, clnode)
1615 1633 else:
1616 1634 # Otherwise we need a full manifest.
1617 1635 m = mnfst.read(mnfstnode)
1618 1636 # For every file in we care about.
1619 1637 for f in changedfiles:
1620 1638 fnode = m.get(f, None)
1621 1639 # If it's in the manifest
1622 1640 if fnode is not None:
1623 1641 # See comments above.
1624 1642 clnode = msng_mnfst_set[mnfstnode]
1625 1643 ndset = msng_filenode_set.setdefault(f, {})
1626 1644 ndset.setdefault(fnode, clnode)
1627 1645 # Remember the revision we hope to see next.
1628 1646 next_rev[0] = r + 1
1629 1647 return collect_msng_filenodes
1630 1648
1631 1649 # We have a list of filenodes we think we need for a file, lets remove
1632 1650 # all those we now the recipient must have.
1633 1651 def prune_filenodes(f, filerevlog):
1634 1652 msngset = msng_filenode_set[f]
1635 1653 hasset = {}
1636 1654 # If a 'missing' filenode thinks it belongs to a changenode we
1637 1655 # assume the recipient must have, then the recipient must have
1638 1656 # that filenode.
1639 1657 for n in msngset:
1640 1658 clnode = cl.node(filerevlog.linkrev(n))
1641 1659 if clnode in has_cl_set:
1642 1660 hasset[n] = 1
1643 1661 prune_parents(filerevlog, hasset, msngset)
1644 1662
1645 1663 # A function generator function that sets up the a context for the
1646 1664 # inner function.
1647 1665 def lookup_filenode_link_func(fname):
1648 1666 msngset = msng_filenode_set[fname]
1649 1667 # Lookup the changenode the filenode belongs to.
1650 1668 def lookup_filenode_link(fnode):
1651 1669 return msngset[fnode]
1652 1670 return lookup_filenode_link
1653 1671
1654 1672 # Now that we have all theses utility functions to help out and
1655 1673 # logically divide up the task, generate the group.
1656 1674 def gengroup():
1657 1675 # The set of changed files starts empty.
1658 1676 changedfiles = {}
1659 1677 # Create a changenode group generator that will call our functions
1660 1678 # back to lookup the owning changenode and collect information.
1661 1679 group = cl.group(msng_cl_lst, identity,
1662 1680 manifest_and_file_collector(changedfiles))
1663 1681 for chnk in group:
1664 1682 yield chnk
1665 1683
1666 1684 # The list of manifests has been collected by the generator
1667 1685 # calling our functions back.
1668 1686 prune_manifests()
1669 1687 msng_mnfst_lst = msng_mnfst_set.keys()
1670 1688 # Sort the manifestnodes by revision number.
1671 1689 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1672 1690 # Create a generator for the manifestnodes that calls our lookup
1673 1691 # and data collection functions back.
1674 1692 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1675 1693 filenode_collector(changedfiles))
1676 1694 for chnk in group:
1677 1695 yield chnk
1678 1696
1679 1697 # These are no longer needed, dereference and toss the memory for
1680 1698 # them.
1681 1699 msng_mnfst_lst = None
1682 1700 msng_mnfst_set.clear()
1683 1701
1684 1702 changedfiles = changedfiles.keys()
1685 1703 changedfiles.sort()
1686 1704 # Go through all our files in order sorted by name.
1687 1705 for fname in changedfiles:
1688 1706 filerevlog = self.file(fname)
1689 1707 # Toss out the filenodes that the recipient isn't really
1690 1708 # missing.
1691 1709 if msng_filenode_set.has_key(fname):
1692 1710 prune_filenodes(fname, filerevlog)
1693 1711 msng_filenode_lst = msng_filenode_set[fname].keys()
1694 1712 else:
1695 1713 msng_filenode_lst = []
1696 1714 # If any filenodes are left, generate the group for them,
1697 1715 # otherwise don't bother.
1698 1716 if len(msng_filenode_lst) > 0:
1699 1717 yield changegroup.genchunk(fname)
1700 1718 # Sort the filenodes by their revision #
1701 1719 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1702 1720 # Create a group generator and only pass in a changenode
1703 1721 # lookup function as we need to collect no information
1704 1722 # from filenodes.
1705 1723 group = filerevlog.group(msng_filenode_lst,
1706 1724 lookup_filenode_link_func(fname))
1707 1725 for chnk in group:
1708 1726 yield chnk
1709 1727 if msng_filenode_set.has_key(fname):
1710 1728 # Don't need this anymore, toss it to free memory.
1711 1729 del msng_filenode_set[fname]
1712 1730 # Signal that no more groups are left.
1713 1731 yield changegroup.closechunk()
1714 1732
1715 1733 if msng_cl_lst:
1716 1734 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1717 1735
1718 1736 return util.chunkbuffer(gengroup())
1719 1737
1720 1738 def changegroup(self, basenodes, source):
1721 1739 """Generate a changegroup of all nodes that we have that a recipient
1722 1740 doesn't.
1723 1741
1724 1742 This is much easier than the previous function as we can assume that
1725 1743 the recipient has any changenode we aren't sending them."""
1726 1744
1727 1745 self.hook('preoutgoing', throw=True, source=source)
1728 1746
1729 1747 cl = self.changelog
1730 1748 nodes = cl.nodesbetween(basenodes, None)[0]
1731 1749 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1732 1750 self.changegroupinfo(nodes)
1733 1751
1734 1752 def identity(x):
1735 1753 return x
1736 1754
1737 1755 def gennodelst(revlog):
1738 1756 for r in xrange(0, revlog.count()):
1739 1757 n = revlog.node(r)
1740 1758 if revlog.linkrev(n) in revset:
1741 1759 yield n
1742 1760
1743 1761 def changed_file_collector(changedfileset):
1744 1762 def collect_changed_files(clnode):
1745 1763 c = cl.read(clnode)
1746 1764 for fname in c[3]:
1747 1765 changedfileset[fname] = 1
1748 1766 return collect_changed_files
1749 1767
1750 1768 def lookuprevlink_func(revlog):
1751 1769 def lookuprevlink(n):
1752 1770 return cl.node(revlog.linkrev(n))
1753 1771 return lookuprevlink
1754 1772
1755 1773 def gengroup():
1756 1774 # construct a list of all changed files
1757 1775 changedfiles = {}
1758 1776
1759 1777 for chnk in cl.group(nodes, identity,
1760 1778 changed_file_collector(changedfiles)):
1761 1779 yield chnk
1762 1780 changedfiles = changedfiles.keys()
1763 1781 changedfiles.sort()
1764 1782
1765 1783 mnfst = self.manifest
1766 1784 nodeiter = gennodelst(mnfst)
1767 1785 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1768 1786 yield chnk
1769 1787
1770 1788 for fname in changedfiles:
1771 1789 filerevlog = self.file(fname)
1772 1790 nodeiter = gennodelst(filerevlog)
1773 1791 nodeiter = list(nodeiter)
1774 1792 if nodeiter:
1775 1793 yield changegroup.genchunk(fname)
1776 1794 lookup = lookuprevlink_func(filerevlog)
1777 1795 for chnk in filerevlog.group(nodeiter, lookup):
1778 1796 yield chnk
1779 1797
1780 1798 yield changegroup.closechunk()
1781 1799
1782 1800 if nodes:
1783 1801 self.hook('outgoing', node=hex(nodes[0]), source=source)
1784 1802
1785 1803 return util.chunkbuffer(gengroup())
1786 1804
1787 1805 def addchangegroup(self, source, srctype, url):
1788 1806 """add changegroup to repo.
1789 1807
1790 1808 return values:
1791 1809 - nothing changed or no source: 0
1792 1810 - more heads than before: 1+added heads (2..n)
1793 1811 - less heads than before: -1-removed heads (-2..-n)
1794 1812 - number of heads stays the same: 1
1795 1813 """
1796 1814 def csmap(x):
1797 1815 self.ui.debug(_("add changeset %s\n") % short(x))
1798 1816 return cl.count()
1799 1817
1800 1818 def revmap(x):
1801 1819 return cl.rev(x)
1802 1820
1803 1821 if not source:
1804 1822 return 0
1805 1823
1806 1824 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1807 1825
1808 1826 changesets = files = revisions = 0
1809 1827
1810 1828 tr = self.transaction()
1811 1829
1812 1830 # write changelog data to temp files so concurrent readers will not see
1813 1831 # inconsistent view
1814 1832 cl = None
1815 1833 try:
1816 1834 cl = appendfile.appendchangelog(self.sopener,
1817 1835 self.changelog.version)
1818 1836
1819 1837 oldheads = len(cl.heads())
1820 1838
1821 1839 # pull off the changeset group
1822 1840 self.ui.status(_("adding changesets\n"))
1823 1841 cor = cl.count() - 1
1824 1842 chunkiter = changegroup.chunkiter(source)
1825 1843 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1826 1844 raise util.Abort(_("received changelog group is empty"))
1827 1845 cnr = cl.count() - 1
1828 1846 changesets = cnr - cor
1829 1847
1830 1848 # pull off the manifest group
1831 1849 self.ui.status(_("adding manifests\n"))
1832 1850 chunkiter = changegroup.chunkiter(source)
1833 1851 # no need to check for empty manifest group here:
1834 1852 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1835 1853 # no new manifest will be created and the manifest group will
1836 1854 # be empty during the pull
1837 1855 self.manifest.addgroup(chunkiter, revmap, tr)
1838 1856
1839 1857 # process the files
1840 1858 self.ui.status(_("adding file changes\n"))
1841 1859 while 1:
1842 1860 f = changegroup.getchunk(source)
1843 1861 if not f:
1844 1862 break
1845 1863 self.ui.debug(_("adding %s revisions\n") % f)
1846 1864 fl = self.file(f)
1847 1865 o = fl.count()
1848 1866 chunkiter = changegroup.chunkiter(source)
1849 1867 if fl.addgroup(chunkiter, revmap, tr) is None:
1850 1868 raise util.Abort(_("received file revlog group is empty"))
1851 1869 revisions += fl.count() - o
1852 1870 files += 1
1853 1871
1854 1872 cl.writedata()
1855 1873 finally:
1856 1874 if cl:
1857 1875 cl.cleanup()
1858 1876
1859 1877 # make changelog see real files again
1860 1878 self.changelog = changelog.changelog(self.sopener,
1861 1879 self.changelog.version)
1862 1880 self.changelog.checkinlinesize(tr)
1863 1881
1864 1882 newheads = len(self.changelog.heads())
1865 1883 heads = ""
1866 1884 if oldheads and newheads != oldheads:
1867 1885 heads = _(" (%+d heads)") % (newheads - oldheads)
1868 1886
1869 1887 self.ui.status(_("added %d changesets"
1870 1888 " with %d changes to %d files%s\n")
1871 1889 % (changesets, revisions, files, heads))
1872 1890
1873 1891 if changesets > 0:
1874 1892 self.hook('pretxnchangegroup', throw=True,
1875 1893 node=hex(self.changelog.node(cor+1)), source=srctype,
1876 1894 url=url)
1877 1895
1878 1896 tr.close()
1879 1897
1880 1898 if changesets > 0:
1881 1899 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1882 1900 source=srctype, url=url)
1883 1901
1884 1902 for i in xrange(cor + 1, cnr + 1):
1885 1903 self.hook("incoming", node=hex(self.changelog.node(i)),
1886 1904 source=srctype, url=url)
1887 1905
1888 1906 # never return 0 here:
1889 1907 if newheads < oldheads:
1890 1908 return newheads - oldheads - 1
1891 1909 else:
1892 1910 return newheads - oldheads + 1
1893 1911
1894 1912
1895 1913 def stream_in(self, remote):
1896 1914 fp = remote.stream_out()
1897 1915 l = fp.readline()
1898 1916 try:
1899 1917 resp = int(l)
1900 1918 except ValueError:
1901 1919 raise util.UnexpectedOutput(
1902 1920 _('Unexpected response from remote server:'), l)
1903 1921 if resp == 1:
1904 1922 raise util.Abort(_('operation forbidden by server'))
1905 1923 elif resp == 2:
1906 1924 raise util.Abort(_('locking the remote repository failed'))
1907 1925 elif resp != 0:
1908 1926 raise util.Abort(_('the server sent an unknown error code'))
1909 1927 self.ui.status(_('streaming all changes\n'))
1910 1928 l = fp.readline()
1911 1929 try:
1912 1930 total_files, total_bytes = map(int, l.split(' ', 1))
1913 1931 except ValueError, TypeError:
1914 1932 raise util.UnexpectedOutput(
1915 1933 _('Unexpected response from remote server:'), l)
1916 1934 self.ui.status(_('%d files to transfer, %s of data\n') %
1917 1935 (total_files, util.bytecount(total_bytes)))
1918 1936 start = time.time()
1919 1937 for i in xrange(total_files):
1920 1938 # XXX doesn't support '\n' or '\r' in filenames
1921 1939 l = fp.readline()
1922 1940 try:
1923 1941 name, size = l.split('\0', 1)
1924 1942 size = int(size)
1925 1943 except ValueError, TypeError:
1926 1944 raise util.UnexpectedOutput(
1927 1945 _('Unexpected response from remote server:'), l)
1928 1946 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1929 1947 ofp = self.sopener(name, 'w')
1930 1948 for chunk in util.filechunkiter(fp, limit=size):
1931 1949 ofp.write(chunk)
1932 1950 ofp.close()
1933 1951 elapsed = time.time() - start
1934 1952 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1935 1953 (util.bytecount(total_bytes), elapsed,
1936 1954 util.bytecount(total_bytes / elapsed)))
1937 1955 self.reload()
1938 1956 return len(self.heads()) + 1
1939 1957
1940 1958 def clone(self, remote, heads=[], stream=False):
1941 1959 '''clone remote repository.
1942 1960
1943 1961 keyword arguments:
1944 1962 heads: list of revs to clone (forces use of pull)
1945 1963 stream: use streaming clone if possible'''
1946 1964
1947 1965 # now, all clients that can request uncompressed clones can
1948 1966 # read repo formats supported by all servers that can serve
1949 1967 # them.
1950 1968
1951 1969 # if revlog format changes, client will have to check version
1952 1970 # and format flags on "stream" capability, and use
1953 1971 # uncompressed only if compatible.
1954 1972
1955 1973 if stream and not heads and remote.capable('stream'):
1956 1974 return self.stream_in(remote)
1957 1975 return self.pull(remote, heads)
1958 1976
1959 1977 # used to avoid circular references so destructors work
1960 1978 def aftertrans(files):
1961 1979 renamefiles = [tuple(t) for t in files]
1962 1980 def a():
1963 1981 for src, dest in renamefiles:
1964 1982 util.rename(src, dest)
1965 1983 return a
1966 1984
1967 1985 def instance(ui, path, create):
1968 1986 return localrepository(ui, util.drop_scheme('file', path), create)
1969 1987
1970 1988 def islocal(path):
1971 1989 return True
General Comments 0
You need to be logged in to leave comments. Login now