##// END OF EJS Templates
Merge with crew-stable.
Alexis S. L. Carvalho -
r5249:0d28d4e5 merge default
parent child Browse files
Show More
@@ -0,0 +1,32 b''
1 #!/bin/sh
2 # b51a8138292a introduced a regression where we would mention in the
3 # changelog executable files added by the second parent of a merge.
4 # Test that that doesn't happen anymore
5
6 "$TESTDIR/hghave" execbit || exit 80
7
8 hg init repo
9 cd repo
10 echo foo > foo
11 hg ci -qAm 'add foo' -d '0 0'
12
13 echo bar > bar
14 chmod +x bar
15 hg ci -qAm 'add bar' -d '0 0'
16 echo '% manifest of p2:'
17 hg manifest
18 echo
19
20 hg up -qC 0
21 echo >> foo
22 hg ci -m 'change foo' -d '0 0'
23 echo '% manifest of p1:'
24 hg manifest
25
26 hg merge
27 hg ci -m 'merge' -d '0 0'
28
29 echo '% this should not mention bar:'
30 hg tip -v
31
32 hg debugindex .hg/store/data/bar.i
@@ -0,0 +1,21 b''
1 % manifest of p2:
2 bar
3 foo
4
5 % manifest of p1:
6 foo
7 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
8 (branch merge, don't forget to commit)
9 % this should not mention bar:
10 changeset: 3:ef2fc9b4a51b
11 tag: tip
12 parent: 2:ed1b79f46b9a
13 parent: 1:d394a8db219b
14 user: test
15 date: Thu Jan 01 00:00:00 1970 +0000
16 description:
17 merge
18
19
20 rev offset length base linkrev nodeid p1 p2
21 0 0 5 0 1 b004912a8510 000000000000 000000000000
@@ -1,1988 +1,1991 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import re, lock, transaction, tempfile, stat, errno, ui
12 import re, lock, transaction, tempfile, stat, errno, ui
13 import os, revlog, time, util, extensions, hook
13 import os, revlog, time, util, extensions, hook
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = ('lookup', 'changegroupsubset')
16 capabilities = ('lookup', 'changegroupsubset')
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 repo.repository.__init__(self)
20 repo.repository.__init__(self)
21 self.path = path
21 self.path = path
22 self.root = os.path.realpath(path)
22 self.root = os.path.realpath(path)
23 self.path = os.path.join(self.root, ".hg")
23 self.path = os.path.join(self.root, ".hg")
24 self.origroot = path
24 self.origroot = path
25 self.opener = util.opener(self.path)
25 self.opener = util.opener(self.path)
26 self.wopener = util.opener(self.root)
26 self.wopener = util.opener(self.root)
27
27
28 if not os.path.isdir(self.path):
28 if not os.path.isdir(self.path):
29 if create:
29 if create:
30 if not os.path.exists(path):
30 if not os.path.exists(path):
31 os.mkdir(path)
31 os.mkdir(path)
32 os.mkdir(self.path)
32 os.mkdir(self.path)
33 requirements = ["revlogv1"]
33 requirements = ["revlogv1"]
34 if parentui.configbool('format', 'usestore', True):
34 if parentui.configbool('format', 'usestore', True):
35 os.mkdir(os.path.join(self.path, "store"))
35 os.mkdir(os.path.join(self.path, "store"))
36 requirements.append("store")
36 requirements.append("store")
37 # create an invalid changelog
37 # create an invalid changelog
38 self.opener("00changelog.i", "a").write(
38 self.opener("00changelog.i", "a").write(
39 '\0\0\0\2' # represents revlogv2
39 '\0\0\0\2' # represents revlogv2
40 ' dummy changelog to prevent using the old repo layout'
40 ' dummy changelog to prevent using the old repo layout'
41 )
41 )
42 reqfile = self.opener("requires", "w")
42 reqfile = self.opener("requires", "w")
43 for r in requirements:
43 for r in requirements:
44 reqfile.write("%s\n" % r)
44 reqfile.write("%s\n" % r)
45 reqfile.close()
45 reqfile.close()
46 else:
46 else:
47 raise repo.RepoError(_("repository %s not found") % path)
47 raise repo.RepoError(_("repository %s not found") % path)
48 elif create:
48 elif create:
49 raise repo.RepoError(_("repository %s already exists") % path)
49 raise repo.RepoError(_("repository %s already exists") % path)
50 else:
50 else:
51 # find requirements
51 # find requirements
52 try:
52 try:
53 requirements = self.opener("requires").read().splitlines()
53 requirements = self.opener("requires").read().splitlines()
54 except IOError, inst:
54 except IOError, inst:
55 if inst.errno != errno.ENOENT:
55 if inst.errno != errno.ENOENT:
56 raise
56 raise
57 requirements = []
57 requirements = []
58 # check them
58 # check them
59 for r in requirements:
59 for r in requirements:
60 if r not in self.supported:
60 if r not in self.supported:
61 raise repo.RepoError(_("requirement '%s' not supported") % r)
61 raise repo.RepoError(_("requirement '%s' not supported") % r)
62
62
63 # setup store
63 # setup store
64 if "store" in requirements:
64 if "store" in requirements:
65 self.encodefn = util.encodefilename
65 self.encodefn = util.encodefilename
66 self.decodefn = util.decodefilename
66 self.decodefn = util.decodefilename
67 self.spath = os.path.join(self.path, "store")
67 self.spath = os.path.join(self.path, "store")
68 else:
68 else:
69 self.encodefn = lambda x: x
69 self.encodefn = lambda x: x
70 self.decodefn = lambda x: x
70 self.decodefn = lambda x: x
71 self.spath = self.path
71 self.spath = self.path
72 self.sopener = util.encodedopener(util.opener(self.spath),
72 self.sopener = util.encodedopener(util.opener(self.spath),
73 self.encodefn)
73 self.encodefn)
74
74
75 self.ui = ui.ui(parentui=parentui)
75 self.ui = ui.ui(parentui=parentui)
76 try:
76 try:
77 self.ui.readconfig(self.join("hgrc"), self.root)
77 self.ui.readconfig(self.join("hgrc"), self.root)
78 extensions.loadall(self.ui)
78 extensions.loadall(self.ui)
79 except IOError:
79 except IOError:
80 pass
80 pass
81
81
82 self.tagscache = None
82 self.tagscache = None
83 self.branchcache = None
83 self.branchcache = None
84 self.nodetagscache = None
84 self.nodetagscache = None
85 self.filterpats = {}
85 self.filterpats = {}
86 self._transref = self._lockref = self._wlockref = None
86 self._transref = self._lockref = self._wlockref = None
87
87
88 def __getattr__(self, name):
88 def __getattr__(self, name):
89 if name == 'changelog':
89 if name == 'changelog':
90 self.changelog = changelog.changelog(self.sopener)
90 self.changelog = changelog.changelog(self.sopener)
91 self.sopener.defversion = self.changelog.version
91 self.sopener.defversion = self.changelog.version
92 return self.changelog
92 return self.changelog
93 if name == 'manifest':
93 if name == 'manifest':
94 self.changelog
94 self.changelog
95 self.manifest = manifest.manifest(self.sopener)
95 self.manifest = manifest.manifest(self.sopener)
96 return self.manifest
96 return self.manifest
97 if name == 'dirstate':
97 if name == 'dirstate':
98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
99 return self.dirstate
99 return self.dirstate
100 else:
100 else:
101 raise AttributeError, name
101 raise AttributeError, name
102
102
103 def url(self):
103 def url(self):
104 return 'file:' + self.root
104 return 'file:' + self.root
105
105
106 def hook(self, name, throw=False, **args):
106 def hook(self, name, throw=False, **args):
107 return hook.hook(self.ui, self, name, throw, **args)
107 return hook.hook(self.ui, self, name, throw, **args)
108
108
109 tag_disallowed = ':\r\n'
109 tag_disallowed = ':\r\n'
110
110
111 def _tag(self, name, node, message, local, user, date, parent=None,
111 def _tag(self, name, node, message, local, user, date, parent=None,
112 extra={}):
112 extra={}):
113 use_dirstate = parent is None
113 use_dirstate = parent is None
114
114
115 for c in self.tag_disallowed:
115 for c in self.tag_disallowed:
116 if c in name:
116 if c in name:
117 raise util.Abort(_('%r cannot be used in a tag name') % c)
117 raise util.Abort(_('%r cannot be used in a tag name') % c)
118
118
119 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
119 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
120
120
121 def writetag(fp, name, munge, prevtags):
121 def writetag(fp, name, munge, prevtags):
122 if prevtags and prevtags[-1] != '\n':
122 if prevtags and prevtags[-1] != '\n':
123 fp.write('\n')
123 fp.write('\n')
124 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
124 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
125 fp.close()
125 fp.close()
126 self.hook('tag', node=hex(node), tag=name, local=local)
126 self.hook('tag', node=hex(node), tag=name, local=local)
127
127
128 prevtags = ''
128 prevtags = ''
129 if local:
129 if local:
130 try:
130 try:
131 fp = self.opener('localtags', 'r+')
131 fp = self.opener('localtags', 'r+')
132 except IOError, err:
132 except IOError, err:
133 fp = self.opener('localtags', 'a')
133 fp = self.opener('localtags', 'a')
134 else:
134 else:
135 prevtags = fp.read()
135 prevtags = fp.read()
136
136
137 # local tags are stored in the current charset
137 # local tags are stored in the current charset
138 writetag(fp, name, None, prevtags)
138 writetag(fp, name, None, prevtags)
139 return
139 return
140
140
141 if use_dirstate:
141 if use_dirstate:
142 try:
142 try:
143 fp = self.wfile('.hgtags', 'rb+')
143 fp = self.wfile('.hgtags', 'rb+')
144 except IOError, err:
144 except IOError, err:
145 fp = self.wfile('.hgtags', 'ab')
145 fp = self.wfile('.hgtags', 'ab')
146 else:
146 else:
147 prevtags = fp.read()
147 prevtags = fp.read()
148 else:
148 else:
149 try:
149 try:
150 prevtags = self.filectx('.hgtags', parent).data()
150 prevtags = self.filectx('.hgtags', parent).data()
151 except revlog.LookupError:
151 except revlog.LookupError:
152 pass
152 pass
153 fp = self.wfile('.hgtags', 'wb')
153 fp = self.wfile('.hgtags', 'wb')
154 if prevtags:
154 if prevtags:
155 fp.write(prevtags)
155 fp.write(prevtags)
156
156
157 # committed tags are stored in UTF-8
157 # committed tags are stored in UTF-8
158 writetag(fp, name, util.fromlocal, prevtags)
158 writetag(fp, name, util.fromlocal, prevtags)
159
159
160 if use_dirstate and '.hgtags' not in self.dirstate:
160 if use_dirstate and '.hgtags' not in self.dirstate:
161 self.add(['.hgtags'])
161 self.add(['.hgtags'])
162
162
163 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
163 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
164 extra=extra)
164 extra=extra)
165
165
166 self.hook('tag', node=hex(node), tag=name, local=local)
166 self.hook('tag', node=hex(node), tag=name, local=local)
167
167
168 return tagnode
168 return tagnode
169
169
170 def tag(self, name, node, message, local, user, date):
170 def tag(self, name, node, message, local, user, date):
171 '''tag a revision with a symbolic name.
171 '''tag a revision with a symbolic name.
172
172
173 if local is True, the tag is stored in a per-repository file.
173 if local is True, the tag is stored in a per-repository file.
174 otherwise, it is stored in the .hgtags file, and a new
174 otherwise, it is stored in the .hgtags file, and a new
175 changeset is committed with the change.
175 changeset is committed with the change.
176
176
177 keyword arguments:
177 keyword arguments:
178
178
179 local: whether to store tag in non-version-controlled file
179 local: whether to store tag in non-version-controlled file
180 (default False)
180 (default False)
181
181
182 message: commit message to use if committing
182 message: commit message to use if committing
183
183
184 user: name of user to use if committing
184 user: name of user to use if committing
185
185
186 date: date tuple to use if committing'''
186 date: date tuple to use if committing'''
187
187
188 for x in self.status()[:5]:
188 for x in self.status()[:5]:
189 if '.hgtags' in x:
189 if '.hgtags' in x:
190 raise util.Abort(_('working copy of .hgtags is changed '
190 raise util.Abort(_('working copy of .hgtags is changed '
191 '(please commit .hgtags manually)'))
191 '(please commit .hgtags manually)'))
192
192
193
193
194 self._tag(name, node, message, local, user, date)
194 self._tag(name, node, message, local, user, date)
195
195
196 def tags(self):
196 def tags(self):
197 '''return a mapping of tag to node'''
197 '''return a mapping of tag to node'''
198 if self.tagscache:
198 if self.tagscache:
199 return self.tagscache
199 return self.tagscache
200
200
201 globaltags = {}
201 globaltags = {}
202
202
203 def readtags(lines, fn):
203 def readtags(lines, fn):
204 filetags = {}
204 filetags = {}
205 count = 0
205 count = 0
206
206
207 def warn(msg):
207 def warn(msg):
208 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
208 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
209
209
210 for l in lines:
210 for l in lines:
211 count += 1
211 count += 1
212 if not l:
212 if not l:
213 continue
213 continue
214 s = l.split(" ", 1)
214 s = l.split(" ", 1)
215 if len(s) != 2:
215 if len(s) != 2:
216 warn(_("cannot parse entry"))
216 warn(_("cannot parse entry"))
217 continue
217 continue
218 node, key = s
218 node, key = s
219 key = util.tolocal(key.strip()) # stored in UTF-8
219 key = util.tolocal(key.strip()) # stored in UTF-8
220 try:
220 try:
221 bin_n = bin(node)
221 bin_n = bin(node)
222 except TypeError:
222 except TypeError:
223 warn(_("node '%s' is not well formed") % node)
223 warn(_("node '%s' is not well formed") % node)
224 continue
224 continue
225 if bin_n not in self.changelog.nodemap:
225 if bin_n not in self.changelog.nodemap:
226 warn(_("tag '%s' refers to unknown node") % key)
226 warn(_("tag '%s' refers to unknown node") % key)
227 continue
227 continue
228
228
229 h = []
229 h = []
230 if key in filetags:
230 if key in filetags:
231 n, h = filetags[key]
231 n, h = filetags[key]
232 h.append(n)
232 h.append(n)
233 filetags[key] = (bin_n, h)
233 filetags[key] = (bin_n, h)
234
234
235 for k, nh in filetags.items():
235 for k, nh in filetags.items():
236 if k not in globaltags:
236 if k not in globaltags:
237 globaltags[k] = nh
237 globaltags[k] = nh
238 continue
238 continue
239 # we prefer the global tag if:
239 # we prefer the global tag if:
240 # it supercedes us OR
240 # it supercedes us OR
241 # mutual supercedes and it has a higher rank
241 # mutual supercedes and it has a higher rank
242 # otherwise we win because we're tip-most
242 # otherwise we win because we're tip-most
243 an, ah = nh
243 an, ah = nh
244 bn, bh = globaltags[k]
244 bn, bh = globaltags[k]
245 if (bn != an and an in bh and
245 if (bn != an and an in bh and
246 (bn not in ah or len(bh) > len(ah))):
246 (bn not in ah or len(bh) > len(ah))):
247 an = bn
247 an = bn
248 ah.extend([n for n in bh if n not in ah])
248 ah.extend([n for n in bh if n not in ah])
249 globaltags[k] = an, ah
249 globaltags[k] = an, ah
250
250
251 # read the tags file from each head, ending with the tip
251 # read the tags file from each head, ending with the tip
252 f = None
252 f = None
253 for rev, node, fnode in self._hgtagsnodes():
253 for rev, node, fnode in self._hgtagsnodes():
254 f = (f and f.filectx(fnode) or
254 f = (f and f.filectx(fnode) or
255 self.filectx('.hgtags', fileid=fnode))
255 self.filectx('.hgtags', fileid=fnode))
256 readtags(f.data().splitlines(), f)
256 readtags(f.data().splitlines(), f)
257
257
258 try:
258 try:
259 data = util.fromlocal(self.opener("localtags").read())
259 data = util.fromlocal(self.opener("localtags").read())
260 # localtags are stored in the local character set
260 # localtags are stored in the local character set
261 # while the internal tag table is stored in UTF-8
261 # while the internal tag table is stored in UTF-8
262 readtags(data.splitlines(), "localtags")
262 readtags(data.splitlines(), "localtags")
263 except IOError:
263 except IOError:
264 pass
264 pass
265
265
266 self.tagscache = {}
266 self.tagscache = {}
267 for k,nh in globaltags.items():
267 for k,nh in globaltags.items():
268 n = nh[0]
268 n = nh[0]
269 if n != nullid:
269 if n != nullid:
270 self.tagscache[k] = n
270 self.tagscache[k] = n
271 self.tagscache['tip'] = self.changelog.tip()
271 self.tagscache['tip'] = self.changelog.tip()
272
272
273 return self.tagscache
273 return self.tagscache
274
274
275 def _hgtagsnodes(self):
275 def _hgtagsnodes(self):
276 heads = self.heads()
276 heads = self.heads()
277 heads.reverse()
277 heads.reverse()
278 last = {}
278 last = {}
279 ret = []
279 ret = []
280 for node in heads:
280 for node in heads:
281 c = self.changectx(node)
281 c = self.changectx(node)
282 rev = c.rev()
282 rev = c.rev()
283 try:
283 try:
284 fnode = c.filenode('.hgtags')
284 fnode = c.filenode('.hgtags')
285 except revlog.LookupError:
285 except revlog.LookupError:
286 continue
286 continue
287 ret.append((rev, node, fnode))
287 ret.append((rev, node, fnode))
288 if fnode in last:
288 if fnode in last:
289 ret[last[fnode]] = None
289 ret[last[fnode]] = None
290 last[fnode] = len(ret) - 1
290 last[fnode] = len(ret) - 1
291 return [item for item in ret if item]
291 return [item for item in ret if item]
292
292
293 def tagslist(self):
293 def tagslist(self):
294 '''return a list of tags ordered by revision'''
294 '''return a list of tags ordered by revision'''
295 l = []
295 l = []
296 for t, n in self.tags().items():
296 for t, n in self.tags().items():
297 try:
297 try:
298 r = self.changelog.rev(n)
298 r = self.changelog.rev(n)
299 except:
299 except:
300 r = -2 # sort to the beginning of the list if unknown
300 r = -2 # sort to the beginning of the list if unknown
301 l.append((r, t, n))
301 l.append((r, t, n))
302 l.sort()
302 l.sort()
303 return [(t, n) for r, t, n in l]
303 return [(t, n) for r, t, n in l]
304
304
305 def nodetags(self, node):
305 def nodetags(self, node):
306 '''return the tags associated with a node'''
306 '''return the tags associated with a node'''
307 if not self.nodetagscache:
307 if not self.nodetagscache:
308 self.nodetagscache = {}
308 self.nodetagscache = {}
309 for t, n in self.tags().items():
309 for t, n in self.tags().items():
310 self.nodetagscache.setdefault(n, []).append(t)
310 self.nodetagscache.setdefault(n, []).append(t)
311 return self.nodetagscache.get(node, [])
311 return self.nodetagscache.get(node, [])
312
312
313 def _branchtags(self):
313 def _branchtags(self):
314 partial, last, lrev = self._readbranchcache()
314 partial, last, lrev = self._readbranchcache()
315
315
316 tiprev = self.changelog.count() - 1
316 tiprev = self.changelog.count() - 1
317 if lrev != tiprev:
317 if lrev != tiprev:
318 self._updatebranchcache(partial, lrev+1, tiprev+1)
318 self._updatebranchcache(partial, lrev+1, tiprev+1)
319 self._writebranchcache(partial, self.changelog.tip(), tiprev)
319 self._writebranchcache(partial, self.changelog.tip(), tiprev)
320
320
321 return partial
321 return partial
322
322
323 def branchtags(self):
323 def branchtags(self):
324 if self.branchcache is not None:
324 if self.branchcache is not None:
325 return self.branchcache
325 return self.branchcache
326
326
327 self.branchcache = {} # avoid recursion in changectx
327 self.branchcache = {} # avoid recursion in changectx
328 partial = self._branchtags()
328 partial = self._branchtags()
329
329
330 # the branch cache is stored on disk as UTF-8, but in the local
330 # the branch cache is stored on disk as UTF-8, but in the local
331 # charset internally
331 # charset internally
332 for k, v in partial.items():
332 for k, v in partial.items():
333 self.branchcache[util.tolocal(k)] = v
333 self.branchcache[util.tolocal(k)] = v
334 return self.branchcache
334 return self.branchcache
335
335
336 def _readbranchcache(self):
336 def _readbranchcache(self):
337 partial = {}
337 partial = {}
338 try:
338 try:
339 f = self.opener("branch.cache")
339 f = self.opener("branch.cache")
340 lines = f.read().split('\n')
340 lines = f.read().split('\n')
341 f.close()
341 f.close()
342 except (IOError, OSError):
342 except (IOError, OSError):
343 return {}, nullid, nullrev
343 return {}, nullid, nullrev
344
344
345 try:
345 try:
346 last, lrev = lines.pop(0).split(" ", 1)
346 last, lrev = lines.pop(0).split(" ", 1)
347 last, lrev = bin(last), int(lrev)
347 last, lrev = bin(last), int(lrev)
348 if not (lrev < self.changelog.count() and
348 if not (lrev < self.changelog.count() and
349 self.changelog.node(lrev) == last): # sanity check
349 self.changelog.node(lrev) == last): # sanity check
350 # invalidate the cache
350 # invalidate the cache
351 raise ValueError('Invalid branch cache: unknown tip')
351 raise ValueError('Invalid branch cache: unknown tip')
352 for l in lines:
352 for l in lines:
353 if not l: continue
353 if not l: continue
354 node, label = l.split(" ", 1)
354 node, label = l.split(" ", 1)
355 partial[label.strip()] = bin(node)
355 partial[label.strip()] = bin(node)
356 except (KeyboardInterrupt, util.SignalInterrupt):
356 except (KeyboardInterrupt, util.SignalInterrupt):
357 raise
357 raise
358 except Exception, inst:
358 except Exception, inst:
359 if self.ui.debugflag:
359 if self.ui.debugflag:
360 self.ui.warn(str(inst), '\n')
360 self.ui.warn(str(inst), '\n')
361 partial, last, lrev = {}, nullid, nullrev
361 partial, last, lrev = {}, nullid, nullrev
362 return partial, last, lrev
362 return partial, last, lrev
363
363
364 def _writebranchcache(self, branches, tip, tiprev):
364 def _writebranchcache(self, branches, tip, tiprev):
365 try:
365 try:
366 f = self.opener("branch.cache", "w", atomictemp=True)
366 f = self.opener("branch.cache", "w", atomictemp=True)
367 f.write("%s %s\n" % (hex(tip), tiprev))
367 f.write("%s %s\n" % (hex(tip), tiprev))
368 for label, node in branches.iteritems():
368 for label, node in branches.iteritems():
369 f.write("%s %s\n" % (hex(node), label))
369 f.write("%s %s\n" % (hex(node), label))
370 f.rename()
370 f.rename()
371 except (IOError, OSError):
371 except (IOError, OSError):
372 pass
372 pass
373
373
374 def _updatebranchcache(self, partial, start, end):
374 def _updatebranchcache(self, partial, start, end):
375 for r in xrange(start, end):
375 for r in xrange(start, end):
376 c = self.changectx(r)
376 c = self.changectx(r)
377 b = c.branch()
377 b = c.branch()
378 partial[b] = c.node()
378 partial[b] = c.node()
379
379
380 def lookup(self, key):
380 def lookup(self, key):
381 if key == '.':
381 if key == '.':
382 key, second = self.dirstate.parents()
382 key, second = self.dirstate.parents()
383 if key == nullid:
383 if key == nullid:
384 raise repo.RepoError(_("no revision checked out"))
384 raise repo.RepoError(_("no revision checked out"))
385 if second != nullid:
385 if second != nullid:
386 self.ui.warn(_("warning: working directory has two parents, "
386 self.ui.warn(_("warning: working directory has two parents, "
387 "tag '.' uses the first\n"))
387 "tag '.' uses the first\n"))
388 elif key == 'null':
388 elif key == 'null':
389 return nullid
389 return nullid
390 n = self.changelog._match(key)
390 n = self.changelog._match(key)
391 if n:
391 if n:
392 return n
392 return n
393 if key in self.tags():
393 if key in self.tags():
394 return self.tags()[key]
394 return self.tags()[key]
395 if key in self.branchtags():
395 if key in self.branchtags():
396 return self.branchtags()[key]
396 return self.branchtags()[key]
397 n = self.changelog._partialmatch(key)
397 n = self.changelog._partialmatch(key)
398 if n:
398 if n:
399 return n
399 return n
400 try:
400 try:
401 if len(key) == 20:
401 if len(key) == 20:
402 key = hex(key)
402 key = hex(key)
403 except:
403 except:
404 pass
404 pass
405 raise repo.RepoError(_("unknown revision '%s'") % key)
405 raise repo.RepoError(_("unknown revision '%s'") % key)
406
406
407 def dev(self):
407 def dev(self):
408 return os.lstat(self.path).st_dev
408 return os.lstat(self.path).st_dev
409
409
410 def local(self):
410 def local(self):
411 return True
411 return True
412
412
413 def join(self, f):
413 def join(self, f):
414 return os.path.join(self.path, f)
414 return os.path.join(self.path, f)
415
415
416 def sjoin(self, f):
416 def sjoin(self, f):
417 f = self.encodefn(f)
417 f = self.encodefn(f)
418 return os.path.join(self.spath, f)
418 return os.path.join(self.spath, f)
419
419
420 def wjoin(self, f):
420 def wjoin(self, f):
421 return os.path.join(self.root, f)
421 return os.path.join(self.root, f)
422
422
423 def file(self, f):
423 def file(self, f):
424 if f[0] == '/':
424 if f[0] == '/':
425 f = f[1:]
425 f = f[1:]
426 return filelog.filelog(self.sopener, f)
426 return filelog.filelog(self.sopener, f)
427
427
428 def changectx(self, changeid=None):
428 def changectx(self, changeid=None):
429 return context.changectx(self, changeid)
429 return context.changectx(self, changeid)
430
430
431 def workingctx(self):
431 def workingctx(self):
432 return context.workingctx(self)
432 return context.workingctx(self)
433
433
434 def parents(self, changeid=None):
434 def parents(self, changeid=None):
435 '''
435 '''
436 get list of changectxs for parents of changeid or working directory
436 get list of changectxs for parents of changeid or working directory
437 '''
437 '''
438 if changeid is None:
438 if changeid is None:
439 pl = self.dirstate.parents()
439 pl = self.dirstate.parents()
440 else:
440 else:
441 n = self.changelog.lookup(changeid)
441 n = self.changelog.lookup(changeid)
442 pl = self.changelog.parents(n)
442 pl = self.changelog.parents(n)
443 if pl[1] == nullid:
443 if pl[1] == nullid:
444 return [self.changectx(pl[0])]
444 return [self.changectx(pl[0])]
445 return [self.changectx(pl[0]), self.changectx(pl[1])]
445 return [self.changectx(pl[0]), self.changectx(pl[1])]
446
446
447 def filectx(self, path, changeid=None, fileid=None):
447 def filectx(self, path, changeid=None, fileid=None):
448 """changeid can be a changeset revision, node, or tag.
448 """changeid can be a changeset revision, node, or tag.
449 fileid can be a file revision or node."""
449 fileid can be a file revision or node."""
450 return context.filectx(self, path, changeid, fileid)
450 return context.filectx(self, path, changeid, fileid)
451
451
452 def getcwd(self):
452 def getcwd(self):
453 return self.dirstate.getcwd()
453 return self.dirstate.getcwd()
454
454
455 def pathto(self, f, cwd=None):
455 def pathto(self, f, cwd=None):
456 return self.dirstate.pathto(f, cwd)
456 return self.dirstate.pathto(f, cwd)
457
457
458 def wfile(self, f, mode='r'):
458 def wfile(self, f, mode='r'):
459 return self.wopener(f, mode)
459 return self.wopener(f, mode)
460
460
461 def _link(self, f):
461 def _link(self, f):
462 return os.path.islink(self.wjoin(f))
462 return os.path.islink(self.wjoin(f))
463
463
464 def _filter(self, filter, filename, data):
464 def _filter(self, filter, filename, data):
465 if filter not in self.filterpats:
465 if filter not in self.filterpats:
466 l = []
466 l = []
467 for pat, cmd in self.ui.configitems(filter):
467 for pat, cmd in self.ui.configitems(filter):
468 mf = util.matcher(self.root, "", [pat], [], [])[1]
468 mf = util.matcher(self.root, "", [pat], [], [])[1]
469 l.append((mf, cmd))
469 l.append((mf, cmd))
470 self.filterpats[filter] = l
470 self.filterpats[filter] = l
471
471
472 for mf, cmd in self.filterpats[filter]:
472 for mf, cmd in self.filterpats[filter]:
473 if mf(filename):
473 if mf(filename):
474 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
474 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
475 data = util.filter(data, cmd)
475 data = util.filter(data, cmd)
476 break
476 break
477
477
478 return data
478 return data
479
479
480 def wread(self, filename):
480 def wread(self, filename):
481 if self._link(filename):
481 if self._link(filename):
482 data = os.readlink(self.wjoin(filename))
482 data = os.readlink(self.wjoin(filename))
483 else:
483 else:
484 data = self.wopener(filename, 'r').read()
484 data = self.wopener(filename, 'r').read()
485 return self._filter("encode", filename, data)
485 return self._filter("encode", filename, data)
486
486
487 def wwrite(self, filename, data, flags):
487 def wwrite(self, filename, data, flags):
488 data = self._filter("decode", filename, data)
488 data = self._filter("decode", filename, data)
489 if "l" in flags:
489 if "l" in flags:
490 self.wopener.symlink(data, filename)
490 self.wopener.symlink(data, filename)
491 else:
491 else:
492 try:
492 try:
493 if self._link(filename):
493 if self._link(filename):
494 os.unlink(self.wjoin(filename))
494 os.unlink(self.wjoin(filename))
495 except OSError:
495 except OSError:
496 pass
496 pass
497 self.wopener(filename, 'w').write(data)
497 self.wopener(filename, 'w').write(data)
498 util.set_exec(self.wjoin(filename), "x" in flags)
498 util.set_exec(self.wjoin(filename), "x" in flags)
499
499
500 def wwritedata(self, filename, data):
500 def wwritedata(self, filename, data):
501 return self._filter("decode", filename, data)
501 return self._filter("decode", filename, data)
502
502
503 def transaction(self):
503 def transaction(self):
504 if self._transref and self._transref():
504 if self._transref and self._transref():
505 return self._transref().nest()
505 return self._transref().nest()
506
506
507 # save dirstate for rollback
507 # save dirstate for rollback
508 try:
508 try:
509 ds = self.opener("dirstate").read()
509 ds = self.opener("dirstate").read()
510 except IOError:
510 except IOError:
511 ds = ""
511 ds = ""
512 self.opener("journal.dirstate", "w").write(ds)
512 self.opener("journal.dirstate", "w").write(ds)
513
513
514 renames = [(self.sjoin("journal"), self.sjoin("undo")),
514 renames = [(self.sjoin("journal"), self.sjoin("undo")),
515 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
515 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
516 tr = transaction.transaction(self.ui.warn, self.sopener,
516 tr = transaction.transaction(self.ui.warn, self.sopener,
517 self.sjoin("journal"),
517 self.sjoin("journal"),
518 aftertrans(renames))
518 aftertrans(renames))
519 self._transref = weakref.ref(tr)
519 self._transref = weakref.ref(tr)
520 return tr
520 return tr
521
521
522 def recover(self):
522 def recover(self):
523 l = self.lock()
523 l = self.lock()
524 try:
524 try:
525 if os.path.exists(self.sjoin("journal")):
525 if os.path.exists(self.sjoin("journal")):
526 self.ui.status(_("rolling back interrupted transaction\n"))
526 self.ui.status(_("rolling back interrupted transaction\n"))
527 transaction.rollback(self.sopener, self.sjoin("journal"))
527 transaction.rollback(self.sopener, self.sjoin("journal"))
528 self.invalidate()
528 self.invalidate()
529 return True
529 return True
530 else:
530 else:
531 self.ui.warn(_("no interrupted transaction available\n"))
531 self.ui.warn(_("no interrupted transaction available\n"))
532 return False
532 return False
533 finally:
533 finally:
534 del l
534 del l
535
535
536 def rollback(self):
536 def rollback(self):
537 wlock = lock = None
537 wlock = lock = None
538 try:
538 try:
539 wlock = self.wlock()
539 wlock = self.wlock()
540 lock = self.lock()
540 lock = self.lock()
541 if os.path.exists(self.sjoin("undo")):
541 if os.path.exists(self.sjoin("undo")):
542 self.ui.status(_("rolling back last transaction\n"))
542 self.ui.status(_("rolling back last transaction\n"))
543 transaction.rollback(self.sopener, self.sjoin("undo"))
543 transaction.rollback(self.sopener, self.sjoin("undo"))
544 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
544 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
545 self.invalidate()
545 self.invalidate()
546 self.dirstate.invalidate()
546 self.dirstate.invalidate()
547 else:
547 else:
548 self.ui.warn(_("no rollback information available\n"))
548 self.ui.warn(_("no rollback information available\n"))
549 finally:
549 finally:
550 del lock, wlock
550 del lock, wlock
551
551
552 def invalidate(self):
552 def invalidate(self):
553 for a in "changelog manifest".split():
553 for a in "changelog manifest".split():
554 if hasattr(self, a):
554 if hasattr(self, a):
555 self.__delattr__(a)
555 self.__delattr__(a)
556 self.tagscache = None
556 self.tagscache = None
557 self.nodetagscache = None
557 self.nodetagscache = None
558
558
559 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
559 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
560 try:
560 try:
561 l = lock.lock(lockname, 0, releasefn, desc=desc)
561 l = lock.lock(lockname, 0, releasefn, desc=desc)
562 except lock.LockHeld, inst:
562 except lock.LockHeld, inst:
563 if not wait:
563 if not wait:
564 raise
564 raise
565 self.ui.warn(_("waiting for lock on %s held by %r\n") %
565 self.ui.warn(_("waiting for lock on %s held by %r\n") %
566 (desc, inst.locker))
566 (desc, inst.locker))
567 # default to 600 seconds timeout
567 # default to 600 seconds timeout
568 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
568 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
569 releasefn, desc=desc)
569 releasefn, desc=desc)
570 if acquirefn:
570 if acquirefn:
571 acquirefn()
571 acquirefn()
572 return l
572 return l
573
573
574 def lock(self, wait=True):
574 def lock(self, wait=True):
575 if self._lockref and self._lockref():
575 if self._lockref and self._lockref():
576 return self._lockref()
576 return self._lockref()
577
577
578 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
578 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
579 _('repository %s') % self.origroot)
579 _('repository %s') % self.origroot)
580 self._lockref = weakref.ref(l)
580 self._lockref = weakref.ref(l)
581 return l
581 return l
582
582
583 def wlock(self, wait=True):
583 def wlock(self, wait=True):
584 if self._wlockref and self._wlockref():
584 if self._wlockref and self._wlockref():
585 return self._wlockref()
585 return self._wlockref()
586
586
587 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
587 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
588 self.dirstate.invalidate, _('working directory of %s') %
588 self.dirstate.invalidate, _('working directory of %s') %
589 self.origroot)
589 self.origroot)
590 self._wlockref = weakref.ref(l)
590 self._wlockref = weakref.ref(l)
591 return l
591 return l
592
592
593 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
593 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
594 """
594 """
595 commit an individual file as part of a larger transaction
595 commit an individual file as part of a larger transaction
596 """
596 """
597
597
598 t = self.wread(fn)
598 t = self.wread(fn)
599 fl = self.file(fn)
599 fl = self.file(fn)
600 fp1 = manifest1.get(fn, nullid)
600 fp1 = manifest1.get(fn, nullid)
601 fp2 = manifest2.get(fn, nullid)
601 fp2 = manifest2.get(fn, nullid)
602
602
603 meta = {}
603 meta = {}
604 cp = self.dirstate.copied(fn)
604 cp = self.dirstate.copied(fn)
605 if cp:
605 if cp:
606 # Mark the new revision of this file as a copy of another
606 # Mark the new revision of this file as a copy of another
607 # file. This copy data will effectively act as a parent
607 # file. This copy data will effectively act as a parent
608 # of this new revision. If this is a merge, the first
608 # of this new revision. If this is a merge, the first
609 # parent will be the nullid (meaning "look up the copy data")
609 # parent will be the nullid (meaning "look up the copy data")
610 # and the second one will be the other parent. For example:
610 # and the second one will be the other parent. For example:
611 #
611 #
612 # 0 --- 1 --- 3 rev1 changes file foo
612 # 0 --- 1 --- 3 rev1 changes file foo
613 # \ / rev2 renames foo to bar and changes it
613 # \ / rev2 renames foo to bar and changes it
614 # \- 2 -/ rev3 should have bar with all changes and
614 # \- 2 -/ rev3 should have bar with all changes and
615 # should record that bar descends from
615 # should record that bar descends from
616 # bar in rev2 and foo in rev1
616 # bar in rev2 and foo in rev1
617 #
617 #
618 # this allows this merge to succeed:
618 # this allows this merge to succeed:
619 #
619 #
620 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
620 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
621 # \ / merging rev3 and rev4 should use bar@rev2
621 # \ / merging rev3 and rev4 should use bar@rev2
622 # \- 2 --- 4 as the merge base
622 # \- 2 --- 4 as the merge base
623 #
623 #
624 meta["copy"] = cp
624 meta["copy"] = cp
625 if not manifest2: # not a branch merge
625 if not manifest2: # not a branch merge
626 meta["copyrev"] = hex(manifest1.get(cp, nullid))
626 meta["copyrev"] = hex(manifest1.get(cp, nullid))
627 fp2 = nullid
627 fp2 = nullid
628 elif fp2 != nullid: # copied on remote side
628 elif fp2 != nullid: # copied on remote side
629 meta["copyrev"] = hex(manifest1.get(cp, nullid))
629 meta["copyrev"] = hex(manifest1.get(cp, nullid))
630 elif fp1 != nullid: # copied on local side, reversed
630 elif fp1 != nullid: # copied on local side, reversed
631 meta["copyrev"] = hex(manifest2.get(cp))
631 meta["copyrev"] = hex(manifest2.get(cp))
632 fp2 = fp1
632 fp2 = fp1
633 else: # directory rename
633 elif cp in manifest2: # directory rename on local side
634 meta["copyrev"] = hex(manifest2[cp])
635 else: # directory rename on remote side
634 meta["copyrev"] = hex(manifest1.get(cp, nullid))
636 meta["copyrev"] = hex(manifest1.get(cp, nullid))
635 self.ui.debug(_(" %s: copy %s:%s\n") %
637 self.ui.debug(_(" %s: copy %s:%s\n") %
636 (fn, cp, meta["copyrev"]))
638 (fn, cp, meta["copyrev"]))
637 fp1 = nullid
639 fp1 = nullid
638 elif fp2 != nullid:
640 elif fp2 != nullid:
639 # is one parent an ancestor of the other?
641 # is one parent an ancestor of the other?
640 fpa = fl.ancestor(fp1, fp2)
642 fpa = fl.ancestor(fp1, fp2)
641 if fpa == fp1:
643 if fpa == fp1:
642 fp1, fp2 = fp2, nullid
644 fp1, fp2 = fp2, nullid
643 elif fpa == fp2:
645 elif fpa == fp2:
644 fp2 = nullid
646 fp2 = nullid
645
647
646 # is the file unmodified from the parent? report existing entry
648 # is the file unmodified from the parent? report existing entry
647 if fp2 == nullid and not fl.cmp(fp1, t):
649 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
648 return fp1
650 return fp1
649
651
650 changelist.append(fn)
652 changelist.append(fn)
651 return fl.add(t, meta, tr, linkrev, fp1, fp2)
653 return fl.add(t, meta, tr, linkrev, fp1, fp2)
652
654
653 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
655 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
654 if p1 is None:
656 if p1 is None:
655 p1, p2 = self.dirstate.parents()
657 p1, p2 = self.dirstate.parents()
656 return self.commit(files=files, text=text, user=user, date=date,
658 return self.commit(files=files, text=text, user=user, date=date,
657 p1=p1, p2=p2, extra=extra, empty_ok=True)
659 p1=p1, p2=p2, extra=extra, empty_ok=True)
658
660
659 def commit(self, files=None, text="", user=None, date=None,
661 def commit(self, files=None, text="", user=None, date=None,
660 match=util.always, force=False, force_editor=False,
662 match=util.always, force=False, force_editor=False,
661 p1=None, p2=None, extra={}, empty_ok=False):
663 p1=None, p2=None, extra={}, empty_ok=False):
662 wlock = lock = tr = None
664 wlock = lock = tr = None
663 try:
665 try:
664 commit = []
666 commit = []
665 remove = []
667 remove = []
666 changed = []
668 changed = []
667 use_dirstate = (p1 is None) # not rawcommit
669 use_dirstate = (p1 is None) # not rawcommit
668 extra = extra.copy()
670 extra = extra.copy()
669
671
670 if use_dirstate:
672 if use_dirstate:
671 if files:
673 if files:
672 for f in files:
674 for f in files:
673 s = self.dirstate[f]
675 s = self.dirstate[f]
674 if s in 'nma':
676 if s in 'nma':
675 commit.append(f)
677 commit.append(f)
676 elif s == 'r':
678 elif s == 'r':
677 remove.append(f)
679 remove.append(f)
678 else:
680 else:
679 self.ui.warn(_("%s not tracked!\n") % f)
681 self.ui.warn(_("%s not tracked!\n") % f)
680 else:
682 else:
681 changes = self.status(match=match)[:5]
683 changes = self.status(match=match)[:5]
682 modified, added, removed, deleted, unknown = changes
684 modified, added, removed, deleted, unknown = changes
683 commit = modified + added
685 commit = modified + added
684 remove = removed
686 remove = removed
685 else:
687 else:
686 commit = files
688 commit = files
687
689
688 if use_dirstate:
690 if use_dirstate:
689 p1, p2 = self.dirstate.parents()
691 p1, p2 = self.dirstate.parents()
690 update_dirstate = True
692 update_dirstate = True
691 else:
693 else:
692 p1, p2 = p1, p2 or nullid
694 p1, p2 = p1, p2 or nullid
693 update_dirstate = (self.dirstate.parents()[0] == p1)
695 update_dirstate = (self.dirstate.parents()[0] == p1)
694
696
695 c1 = self.changelog.read(p1)
697 c1 = self.changelog.read(p1)
696 c2 = self.changelog.read(p2)
698 c2 = self.changelog.read(p2)
697 m1 = self.manifest.read(c1[0]).copy()
699 m1 = self.manifest.read(c1[0]).copy()
698 m2 = self.manifest.read(c2[0])
700 m2 = self.manifest.read(c2[0])
699
701
700 if use_dirstate:
702 if use_dirstate:
701 branchname = self.workingctx().branch()
703 branchname = self.workingctx().branch()
702 try:
704 try:
703 branchname = branchname.decode('UTF-8').encode('UTF-8')
705 branchname = branchname.decode('UTF-8').encode('UTF-8')
704 except UnicodeDecodeError:
706 except UnicodeDecodeError:
705 raise util.Abort(_('branch name not in UTF-8!'))
707 raise util.Abort(_('branch name not in UTF-8!'))
706 else:
708 else:
707 branchname = ""
709 branchname = ""
708
710
709 if use_dirstate:
711 if use_dirstate:
710 oldname = c1[5].get("branch") # stored in UTF-8
712 oldname = c1[5].get("branch") # stored in UTF-8
711 if (not commit and not remove and not force and p2 == nullid
713 if (not commit and not remove and not force and p2 == nullid
712 and branchname == oldname):
714 and branchname == oldname):
713 self.ui.status(_("nothing changed\n"))
715 self.ui.status(_("nothing changed\n"))
714 return None
716 return None
715
717
716 xp1 = hex(p1)
718 xp1 = hex(p1)
717 if p2 == nullid: xp2 = ''
719 if p2 == nullid: xp2 = ''
718 else: xp2 = hex(p2)
720 else: xp2 = hex(p2)
719
721
720 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
722 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
721
723
722 wlock = self.wlock()
724 wlock = self.wlock()
723 lock = self.lock()
725 lock = self.lock()
724 tr = self.transaction()
726 tr = self.transaction()
725 trp = weakref.proxy(tr)
727 trp = weakref.proxy(tr)
726
728
727 # check in files
729 # check in files
728 new = {}
730 new = {}
729 linkrev = self.changelog.count()
731 linkrev = self.changelog.count()
730 commit.sort()
732 commit.sort()
731 is_exec = util.execfunc(self.root, m1.execf)
733 is_exec = util.execfunc(self.root, m1.execf)
732 is_link = util.linkfunc(self.root, m1.linkf)
734 is_link = util.linkfunc(self.root, m1.linkf)
733 for f in commit:
735 for f in commit:
734 self.ui.note(f + "\n")
736 self.ui.note(f + "\n")
735 try:
737 try:
736 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
738 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
737 new_exec = is_exec(f)
739 new_exec = is_exec(f)
738 new_link = is_link(f)
740 new_link = is_link(f)
739 if not changed or changed[-1] != f:
741 if ((not changed or changed[-1] != f) and
742 m2.get(f) != new[f]):
740 # mention the file in the changelog if some
743 # mention the file in the changelog if some
741 # flag changed, even if there was no content
744 # flag changed, even if there was no content
742 # change.
745 # change.
743 old_exec = m1.execf(f)
746 old_exec = m1.execf(f)
744 old_link = m1.linkf(f)
747 old_link = m1.linkf(f)
745 if old_exec != new_exec or old_link != new_link:
748 if old_exec != new_exec or old_link != new_link:
746 changed.append(f)
749 changed.append(f)
747 m1.set(f, new_exec, new_link)
750 m1.set(f, new_exec, new_link)
748 except (OSError, IOError):
751 except (OSError, IOError):
749 if use_dirstate:
752 if use_dirstate:
750 self.ui.warn(_("trouble committing %s!\n") % f)
753 self.ui.warn(_("trouble committing %s!\n") % f)
751 raise
754 raise
752 else:
755 else:
753 remove.append(f)
756 remove.append(f)
754
757
755 # update manifest
758 # update manifest
756 m1.update(new)
759 m1.update(new)
757 remove.sort()
760 remove.sort()
758 removed = []
761 removed = []
759
762
760 for f in remove:
763 for f in remove:
761 if f in m1:
764 if f in m1:
762 del m1[f]
765 del m1[f]
763 removed.append(f)
766 removed.append(f)
764 elif f in m2:
767 elif f in m2:
765 removed.append(f)
768 removed.append(f)
766 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
769 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
767 (new, removed))
770 (new, removed))
768
771
769 # add changeset
772 # add changeset
770 new = new.keys()
773 new = new.keys()
771 new.sort()
774 new.sort()
772
775
773 user = user or self.ui.username()
776 user = user or self.ui.username()
774 if (not empty_ok and not text) or force_editor:
777 if (not empty_ok and not text) or force_editor:
775 edittext = []
778 edittext = []
776 if text:
779 if text:
777 edittext.append(text)
780 edittext.append(text)
778 edittext.append("")
781 edittext.append("")
779 edittext.append("HG: user: %s" % user)
782 edittext.append("HG: user: %s" % user)
780 if p2 != nullid:
783 if p2 != nullid:
781 edittext.append("HG: branch merge")
784 edittext.append("HG: branch merge")
782 if branchname:
785 if branchname:
783 edittext.append("HG: branch %s" % util.tolocal(branchname))
786 edittext.append("HG: branch %s" % util.tolocal(branchname))
784 edittext.extend(["HG: changed %s" % f for f in changed])
787 edittext.extend(["HG: changed %s" % f for f in changed])
785 edittext.extend(["HG: removed %s" % f for f in removed])
788 edittext.extend(["HG: removed %s" % f for f in removed])
786 if not changed and not remove:
789 if not changed and not remove:
787 edittext.append("HG: no files changed")
790 edittext.append("HG: no files changed")
788 edittext.append("")
791 edittext.append("")
789 # run editor in the repository root
792 # run editor in the repository root
790 olddir = os.getcwd()
793 olddir = os.getcwd()
791 os.chdir(self.root)
794 os.chdir(self.root)
792 text = self.ui.edit("\n".join(edittext), user)
795 text = self.ui.edit("\n".join(edittext), user)
793 os.chdir(olddir)
796 os.chdir(olddir)
794
797
795 if branchname:
798 if branchname:
796 extra["branch"] = branchname
799 extra["branch"] = branchname
797
800
798 if use_dirstate:
801 if use_dirstate:
799 lines = [line.rstrip() for line in text.rstrip().splitlines()]
802 lines = [line.rstrip() for line in text.rstrip().splitlines()]
800 while lines and not lines[0]:
803 while lines and not lines[0]:
801 del lines[0]
804 del lines[0]
802 if not lines:
805 if not lines:
803 return None
806 return None
804 text = '\n'.join(lines)
807 text = '\n'.join(lines)
805
808
806 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
809 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
807 user, date, extra)
810 user, date, extra)
808 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
811 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
809 parent2=xp2)
812 parent2=xp2)
810 tr.close()
813 tr.close()
811
814
812 if self.branchcache and "branch" in extra:
815 if self.branchcache and "branch" in extra:
813 self.branchcache[util.tolocal(extra["branch"])] = n
816 self.branchcache[util.tolocal(extra["branch"])] = n
814
817
815 if use_dirstate or update_dirstate:
818 if use_dirstate or update_dirstate:
816 self.dirstate.setparents(n)
819 self.dirstate.setparents(n)
817 if use_dirstate:
820 if use_dirstate:
818 for f in new:
821 for f in new:
819 self.dirstate.normal(f)
822 self.dirstate.normal(f)
820 for f in removed:
823 for f in removed:
821 self.dirstate.forget(f)
824 self.dirstate.forget(f)
822
825
823 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
826 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
824 return n
827 return n
825 finally:
828 finally:
826 del tr, lock, wlock
829 del tr, lock, wlock
827
830
828 def walk(self, node=None, files=[], match=util.always, badmatch=None):
831 def walk(self, node=None, files=[], match=util.always, badmatch=None):
829 '''
832 '''
830 walk recursively through the directory tree or a given
833 walk recursively through the directory tree or a given
831 changeset, finding all files matched by the match
834 changeset, finding all files matched by the match
832 function
835 function
833
836
834 results are yielded in a tuple (src, filename), where src
837 results are yielded in a tuple (src, filename), where src
835 is one of:
838 is one of:
836 'f' the file was found in the directory tree
839 'f' the file was found in the directory tree
837 'm' the file was only in the dirstate and not in the tree
840 'm' the file was only in the dirstate and not in the tree
838 'b' file was not found and matched badmatch
841 'b' file was not found and matched badmatch
839 '''
842 '''
840
843
841 if node:
844 if node:
842 fdict = dict.fromkeys(files)
845 fdict = dict.fromkeys(files)
843 # for dirstate.walk, files=['.'] means "walk the whole tree".
846 # for dirstate.walk, files=['.'] means "walk the whole tree".
844 # follow that here, too
847 # follow that here, too
845 fdict.pop('.', None)
848 fdict.pop('.', None)
846 mdict = self.manifest.read(self.changelog.read(node)[0])
849 mdict = self.manifest.read(self.changelog.read(node)[0])
847 mfiles = mdict.keys()
850 mfiles = mdict.keys()
848 mfiles.sort()
851 mfiles.sort()
849 for fn in mfiles:
852 for fn in mfiles:
850 for ffn in fdict:
853 for ffn in fdict:
851 # match if the file is the exact name or a directory
854 # match if the file is the exact name or a directory
852 if ffn == fn or fn.startswith("%s/" % ffn):
855 if ffn == fn or fn.startswith("%s/" % ffn):
853 del fdict[ffn]
856 del fdict[ffn]
854 break
857 break
855 if match(fn):
858 if match(fn):
856 yield 'm', fn
859 yield 'm', fn
857 ffiles = fdict.keys()
860 ffiles = fdict.keys()
858 ffiles.sort()
861 ffiles.sort()
859 for fn in ffiles:
862 for fn in ffiles:
860 if badmatch and badmatch(fn):
863 if badmatch and badmatch(fn):
861 if match(fn):
864 if match(fn):
862 yield 'b', fn
865 yield 'b', fn
863 else:
866 else:
864 self.ui.warn(_('%s: No such file in rev %s\n')
867 self.ui.warn(_('%s: No such file in rev %s\n')
865 % (self.pathto(fn), short(node)))
868 % (self.pathto(fn), short(node)))
866 else:
869 else:
867 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
870 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
868 yield src, fn
871 yield src, fn
869
872
870 def status(self, node1=None, node2=None, files=[], match=util.always,
873 def status(self, node1=None, node2=None, files=[], match=util.always,
871 list_ignored=False, list_clean=False):
874 list_ignored=False, list_clean=False):
872 """return status of files between two nodes or node and working directory
875 """return status of files between two nodes or node and working directory
873
876
874 If node1 is None, use the first dirstate parent instead.
877 If node1 is None, use the first dirstate parent instead.
875 If node2 is None, compare node1 with working directory.
878 If node2 is None, compare node1 with working directory.
876 """
879 """
877
880
878 def fcmp(fn, getnode):
881 def fcmp(fn, getnode):
879 t1 = self.wread(fn)
882 t1 = self.wread(fn)
880 return self.file(fn).cmp(getnode(fn), t1)
883 return self.file(fn).cmp(getnode(fn), t1)
881
884
882 def mfmatches(node):
885 def mfmatches(node):
883 change = self.changelog.read(node)
886 change = self.changelog.read(node)
884 mf = self.manifest.read(change[0]).copy()
887 mf = self.manifest.read(change[0]).copy()
885 for fn in mf.keys():
888 for fn in mf.keys():
886 if not match(fn):
889 if not match(fn):
887 del mf[fn]
890 del mf[fn]
888 return mf
891 return mf
889
892
890 modified, added, removed, deleted, unknown = [], [], [], [], []
893 modified, added, removed, deleted, unknown = [], [], [], [], []
891 ignored, clean = [], []
894 ignored, clean = [], []
892
895
893 compareworking = False
896 compareworking = False
894 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
897 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
895 compareworking = True
898 compareworking = True
896
899
897 if not compareworking:
900 if not compareworking:
898 # read the manifest from node1 before the manifest from node2,
901 # read the manifest from node1 before the manifest from node2,
899 # so that we'll hit the manifest cache if we're going through
902 # so that we'll hit the manifest cache if we're going through
900 # all the revisions in parent->child order.
903 # all the revisions in parent->child order.
901 mf1 = mfmatches(node1)
904 mf1 = mfmatches(node1)
902
905
903 # are we comparing the working directory?
906 # are we comparing the working directory?
904 if not node2:
907 if not node2:
905 (lookup, modified, added, removed, deleted, unknown,
908 (lookup, modified, added, removed, deleted, unknown,
906 ignored, clean) = self.dirstate.status(files, match,
909 ignored, clean) = self.dirstate.status(files, match,
907 list_ignored, list_clean)
910 list_ignored, list_clean)
908
911
909 # are we comparing working dir against its parent?
912 # are we comparing working dir against its parent?
910 if compareworking:
913 if compareworking:
911 if lookup:
914 if lookup:
912 fixup = []
915 fixup = []
913 # do a full compare of any files that might have changed
916 # do a full compare of any files that might have changed
914 ctx = self.changectx()
917 ctx = self.changectx()
915 for f in lookup:
918 for f in lookup:
916 if f not in ctx or ctx[f].cmp(self.wread(f)):
919 if f not in ctx or ctx[f].cmp(self.wread(f)):
917 modified.append(f)
920 modified.append(f)
918 else:
921 else:
919 fixup.append(f)
922 fixup.append(f)
920 if list_clean:
923 if list_clean:
921 clean.append(f)
924 clean.append(f)
922
925
923 # update dirstate for files that are actually clean
926 # update dirstate for files that are actually clean
924 if fixup:
927 if fixup:
925 wlock = None
928 wlock = None
926 try:
929 try:
927 try:
930 try:
928 wlock = self.wlock(False)
931 wlock = self.wlock(False)
929 except lock.LockException:
932 except lock.LockException:
930 pass
933 pass
931 if wlock:
934 if wlock:
932 for f in fixup:
935 for f in fixup:
933 self.dirstate.normal(f)
936 self.dirstate.normal(f)
934 finally:
937 finally:
935 del wlock
938 del wlock
936 else:
939 else:
937 # we are comparing working dir against non-parent
940 # we are comparing working dir against non-parent
938 # generate a pseudo-manifest for the working dir
941 # generate a pseudo-manifest for the working dir
939 # XXX: create it in dirstate.py ?
942 # XXX: create it in dirstate.py ?
940 mf2 = mfmatches(self.dirstate.parents()[0])
943 mf2 = mfmatches(self.dirstate.parents()[0])
941 is_exec = util.execfunc(self.root, mf2.execf)
944 is_exec = util.execfunc(self.root, mf2.execf)
942 is_link = util.linkfunc(self.root, mf2.linkf)
945 is_link = util.linkfunc(self.root, mf2.linkf)
943 for f in lookup + modified + added:
946 for f in lookup + modified + added:
944 mf2[f] = ""
947 mf2[f] = ""
945 mf2.set(f, is_exec(f), is_link(f))
948 mf2.set(f, is_exec(f), is_link(f))
946 for f in removed:
949 for f in removed:
947 if f in mf2:
950 if f in mf2:
948 del mf2[f]
951 del mf2[f]
949
952
950 else:
953 else:
951 # we are comparing two revisions
954 # we are comparing two revisions
952 mf2 = mfmatches(node2)
955 mf2 = mfmatches(node2)
953
956
954 if not compareworking:
957 if not compareworking:
955 # flush lists from dirstate before comparing manifests
958 # flush lists from dirstate before comparing manifests
956 modified, added, clean = [], [], []
959 modified, added, clean = [], [], []
957
960
958 # make sure to sort the files so we talk to the disk in a
961 # make sure to sort the files so we talk to the disk in a
959 # reasonable order
962 # reasonable order
960 mf2keys = mf2.keys()
963 mf2keys = mf2.keys()
961 mf2keys.sort()
964 mf2keys.sort()
962 getnode = lambda fn: mf1.get(fn, nullid)
965 getnode = lambda fn: mf1.get(fn, nullid)
963 for fn in mf2keys:
966 for fn in mf2keys:
964 if mf1.has_key(fn):
967 if mf1.has_key(fn):
965 if (mf1.flags(fn) != mf2.flags(fn) or
968 if (mf1.flags(fn) != mf2.flags(fn) or
966 (mf1[fn] != mf2[fn] and
969 (mf1[fn] != mf2[fn] and
967 (mf2[fn] != "" or fcmp(fn, getnode)))):
970 (mf2[fn] != "" or fcmp(fn, getnode)))):
968 modified.append(fn)
971 modified.append(fn)
969 elif list_clean:
972 elif list_clean:
970 clean.append(fn)
973 clean.append(fn)
971 del mf1[fn]
974 del mf1[fn]
972 else:
975 else:
973 added.append(fn)
976 added.append(fn)
974
977
975 removed = mf1.keys()
978 removed = mf1.keys()
976
979
977 # sort and return results:
980 # sort and return results:
978 for l in modified, added, removed, deleted, unknown, ignored, clean:
981 for l in modified, added, removed, deleted, unknown, ignored, clean:
979 l.sort()
982 l.sort()
980 return (modified, added, removed, deleted, unknown, ignored, clean)
983 return (modified, added, removed, deleted, unknown, ignored, clean)
981
984
982 def add(self, list):
985 def add(self, list):
983 wlock = self.wlock()
986 wlock = self.wlock()
984 try:
987 try:
985 for f in list:
988 for f in list:
986 p = self.wjoin(f)
989 p = self.wjoin(f)
987 try:
990 try:
988 st = os.lstat(p)
991 st = os.lstat(p)
989 except:
992 except:
990 self.ui.warn(_("%s does not exist!\n") % f)
993 self.ui.warn(_("%s does not exist!\n") % f)
991 continue
994 continue
992 if st.st_size > 10000000:
995 if st.st_size > 10000000:
993 self.ui.warn(_("%s: files over 10MB may cause memory and"
996 self.ui.warn(_("%s: files over 10MB may cause memory and"
994 " performance problems\n"
997 " performance problems\n"
995 "(use 'hg revert %s' to unadd the file)\n")
998 "(use 'hg revert %s' to unadd the file)\n")
996 % (f, f))
999 % (f, f))
997 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1000 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
998 self.ui.warn(_("%s not added: only files and symlinks "
1001 self.ui.warn(_("%s not added: only files and symlinks "
999 "supported currently\n") % f)
1002 "supported currently\n") % f)
1000 elif self.dirstate[f] in 'amn':
1003 elif self.dirstate[f] in 'amn':
1001 self.ui.warn(_("%s already tracked!\n") % f)
1004 self.ui.warn(_("%s already tracked!\n") % f)
1002 elif self.dirstate[f] == 'r':
1005 elif self.dirstate[f] == 'r':
1003 self.dirstate.normallookup(f)
1006 self.dirstate.normallookup(f)
1004 else:
1007 else:
1005 self.dirstate.add(f)
1008 self.dirstate.add(f)
1006 finally:
1009 finally:
1007 del wlock
1010 del wlock
1008
1011
1009 def forget(self, list):
1012 def forget(self, list):
1010 wlock = self.wlock()
1013 wlock = self.wlock()
1011 try:
1014 try:
1012 for f in list:
1015 for f in list:
1013 if self.dirstate[f] != 'a':
1016 if self.dirstate[f] != 'a':
1014 self.ui.warn(_("%s not added!\n") % f)
1017 self.ui.warn(_("%s not added!\n") % f)
1015 else:
1018 else:
1016 self.dirstate.forget(f)
1019 self.dirstate.forget(f)
1017 finally:
1020 finally:
1018 del wlock
1021 del wlock
1019
1022
1020 def remove(self, list, unlink=False):
1023 def remove(self, list, unlink=False):
1021 wlock = None
1024 wlock = None
1022 try:
1025 try:
1023 if unlink:
1026 if unlink:
1024 for f in list:
1027 for f in list:
1025 try:
1028 try:
1026 util.unlink(self.wjoin(f))
1029 util.unlink(self.wjoin(f))
1027 except OSError, inst:
1030 except OSError, inst:
1028 if inst.errno != errno.ENOENT:
1031 if inst.errno != errno.ENOENT:
1029 raise
1032 raise
1030 wlock = self.wlock()
1033 wlock = self.wlock()
1031 for f in list:
1034 for f in list:
1032 if unlink and os.path.exists(self.wjoin(f)):
1035 if unlink and os.path.exists(self.wjoin(f)):
1033 self.ui.warn(_("%s still exists!\n") % f)
1036 self.ui.warn(_("%s still exists!\n") % f)
1034 elif self.dirstate[f] == 'a':
1037 elif self.dirstate[f] == 'a':
1035 self.dirstate.forget(f)
1038 self.dirstate.forget(f)
1036 elif f not in self.dirstate:
1039 elif f not in self.dirstate:
1037 self.ui.warn(_("%s not tracked!\n") % f)
1040 self.ui.warn(_("%s not tracked!\n") % f)
1038 else:
1041 else:
1039 self.dirstate.remove(f)
1042 self.dirstate.remove(f)
1040 finally:
1043 finally:
1041 del wlock
1044 del wlock
1042
1045
1043 def undelete(self, list):
1046 def undelete(self, list):
1044 wlock = None
1047 wlock = None
1045 try:
1048 try:
1046 p = self.dirstate.parents()[0]
1049 p = self.dirstate.parents()[0]
1047 mn = self.changelog.read(p)[0]
1050 mn = self.changelog.read(p)[0]
1048 m = self.manifest.read(mn)
1051 m = self.manifest.read(mn)
1049 wlock = self.wlock()
1052 wlock = self.wlock()
1050 for f in list:
1053 for f in list:
1051 if self.dirstate[f] != 'r':
1054 if self.dirstate[f] != 'r':
1052 self.ui.warn("%s not removed!\n" % f)
1055 self.ui.warn("%s not removed!\n" % f)
1053 else:
1056 else:
1054 t = self.file(f).read(m[f])
1057 t = self.file(f).read(m[f])
1055 self.wwrite(f, t, m.flags(f))
1058 self.wwrite(f, t, m.flags(f))
1056 self.dirstate.normal(f)
1059 self.dirstate.normal(f)
1057 finally:
1060 finally:
1058 del wlock
1061 del wlock
1059
1062
1060 def copy(self, source, dest):
1063 def copy(self, source, dest):
1061 wlock = None
1064 wlock = None
1062 try:
1065 try:
1063 p = self.wjoin(dest)
1066 p = self.wjoin(dest)
1064 if not (os.path.exists(p) or os.path.islink(p)):
1067 if not (os.path.exists(p) or os.path.islink(p)):
1065 self.ui.warn(_("%s does not exist!\n") % dest)
1068 self.ui.warn(_("%s does not exist!\n") % dest)
1066 elif not (os.path.isfile(p) or os.path.islink(p)):
1069 elif not (os.path.isfile(p) or os.path.islink(p)):
1067 self.ui.warn(_("copy failed: %s is not a file or a "
1070 self.ui.warn(_("copy failed: %s is not a file or a "
1068 "symbolic link\n") % dest)
1071 "symbolic link\n") % dest)
1069 else:
1072 else:
1070 wlock = self.wlock()
1073 wlock = self.wlock()
1071 if dest not in self.dirstate:
1074 if dest not in self.dirstate:
1072 self.dirstate.add(dest)
1075 self.dirstate.add(dest)
1073 self.dirstate.copy(source, dest)
1076 self.dirstate.copy(source, dest)
1074 finally:
1077 finally:
1075 del wlock
1078 del wlock
1076
1079
1077 def heads(self, start=None):
1080 def heads(self, start=None):
1078 heads = self.changelog.heads(start)
1081 heads = self.changelog.heads(start)
1079 # sort the output in rev descending order
1082 # sort the output in rev descending order
1080 heads = [(-self.changelog.rev(h), h) for h in heads]
1083 heads = [(-self.changelog.rev(h), h) for h in heads]
1081 heads.sort()
1084 heads.sort()
1082 return [n for (r, n) in heads]
1085 return [n for (r, n) in heads]
1083
1086
1084 def branchheads(self, branch, start=None):
1087 def branchheads(self, branch, start=None):
1085 branches = self.branchtags()
1088 branches = self.branchtags()
1086 if branch not in branches:
1089 if branch not in branches:
1087 return []
1090 return []
1088 # The basic algorithm is this:
1091 # The basic algorithm is this:
1089 #
1092 #
1090 # Start from the branch tip since there are no later revisions that can
1093 # Start from the branch tip since there are no later revisions that can
1091 # possibly be in this branch, and the tip is a guaranteed head.
1094 # possibly be in this branch, and the tip is a guaranteed head.
1092 #
1095 #
1093 # Remember the tip's parents as the first ancestors, since these by
1096 # Remember the tip's parents as the first ancestors, since these by
1094 # definition are not heads.
1097 # definition are not heads.
1095 #
1098 #
1096 # Step backwards from the brach tip through all the revisions. We are
1099 # Step backwards from the brach tip through all the revisions. We are
1097 # guaranteed by the rules of Mercurial that we will now be visiting the
1100 # guaranteed by the rules of Mercurial that we will now be visiting the
1098 # nodes in reverse topological order (children before parents).
1101 # nodes in reverse topological order (children before parents).
1099 #
1102 #
1100 # If a revision is one of the ancestors of a head then we can toss it
1103 # If a revision is one of the ancestors of a head then we can toss it
1101 # out of the ancestors set (we've already found it and won't be
1104 # out of the ancestors set (we've already found it and won't be
1102 # visiting it again) and put its parents in the ancestors set.
1105 # visiting it again) and put its parents in the ancestors set.
1103 #
1106 #
1104 # Otherwise, if a revision is in the branch it's another head, since it
1107 # Otherwise, if a revision is in the branch it's another head, since it
1105 # wasn't in the ancestor list of an existing head. So add it to the
1108 # wasn't in the ancestor list of an existing head. So add it to the
1106 # head list, and add its parents to the ancestor list.
1109 # head list, and add its parents to the ancestor list.
1107 #
1110 #
1108 # If it is not in the branch ignore it.
1111 # If it is not in the branch ignore it.
1109 #
1112 #
1110 # Once we have a list of heads, use nodesbetween to filter out all the
1113 # Once we have a list of heads, use nodesbetween to filter out all the
1111 # heads that cannot be reached from startrev. There may be a more
1114 # heads that cannot be reached from startrev. There may be a more
1112 # efficient way to do this as part of the previous algorithm.
1115 # efficient way to do this as part of the previous algorithm.
1113
1116
1114 set = util.set
1117 set = util.set
1115 heads = [self.changelog.rev(branches[branch])]
1118 heads = [self.changelog.rev(branches[branch])]
1116 # Don't care if ancestors contains nullrev or not.
1119 # Don't care if ancestors contains nullrev or not.
1117 ancestors = set(self.changelog.parentrevs(heads[0]))
1120 ancestors = set(self.changelog.parentrevs(heads[0]))
1118 for rev in xrange(heads[0] - 1, nullrev, -1):
1121 for rev in xrange(heads[0] - 1, nullrev, -1):
1119 if rev in ancestors:
1122 if rev in ancestors:
1120 ancestors.update(self.changelog.parentrevs(rev))
1123 ancestors.update(self.changelog.parentrevs(rev))
1121 ancestors.remove(rev)
1124 ancestors.remove(rev)
1122 elif self.changectx(rev).branch() == branch:
1125 elif self.changectx(rev).branch() == branch:
1123 heads.append(rev)
1126 heads.append(rev)
1124 ancestors.update(self.changelog.parentrevs(rev))
1127 ancestors.update(self.changelog.parentrevs(rev))
1125 heads = [self.changelog.node(rev) for rev in heads]
1128 heads = [self.changelog.node(rev) for rev in heads]
1126 if start is not None:
1129 if start is not None:
1127 heads = self.changelog.nodesbetween([start], heads)[2]
1130 heads = self.changelog.nodesbetween([start], heads)[2]
1128 return heads
1131 return heads
1129
1132
1130 def branches(self, nodes):
1133 def branches(self, nodes):
1131 if not nodes:
1134 if not nodes:
1132 nodes = [self.changelog.tip()]
1135 nodes = [self.changelog.tip()]
1133 b = []
1136 b = []
1134 for n in nodes:
1137 for n in nodes:
1135 t = n
1138 t = n
1136 while 1:
1139 while 1:
1137 p = self.changelog.parents(n)
1140 p = self.changelog.parents(n)
1138 if p[1] != nullid or p[0] == nullid:
1141 if p[1] != nullid or p[0] == nullid:
1139 b.append((t, n, p[0], p[1]))
1142 b.append((t, n, p[0], p[1]))
1140 break
1143 break
1141 n = p[0]
1144 n = p[0]
1142 return b
1145 return b
1143
1146
1144 def between(self, pairs):
1147 def between(self, pairs):
1145 r = []
1148 r = []
1146
1149
1147 for top, bottom in pairs:
1150 for top, bottom in pairs:
1148 n, l, i = top, [], 0
1151 n, l, i = top, [], 0
1149 f = 1
1152 f = 1
1150
1153
1151 while n != bottom:
1154 while n != bottom:
1152 p = self.changelog.parents(n)[0]
1155 p = self.changelog.parents(n)[0]
1153 if i == f:
1156 if i == f:
1154 l.append(n)
1157 l.append(n)
1155 f = f * 2
1158 f = f * 2
1156 n = p
1159 n = p
1157 i += 1
1160 i += 1
1158
1161
1159 r.append(l)
1162 r.append(l)
1160
1163
1161 return r
1164 return r
1162
1165
1163 def findincoming(self, remote, base=None, heads=None, force=False):
1166 def findincoming(self, remote, base=None, heads=None, force=False):
1164 """Return list of roots of the subsets of missing nodes from remote
1167 """Return list of roots of the subsets of missing nodes from remote
1165
1168
1166 If base dict is specified, assume that these nodes and their parents
1169 If base dict is specified, assume that these nodes and their parents
1167 exist on the remote side and that no child of a node of base exists
1170 exist on the remote side and that no child of a node of base exists
1168 in both remote and self.
1171 in both remote and self.
1169 Furthermore base will be updated to include the nodes that exists
1172 Furthermore base will be updated to include the nodes that exists
1170 in self and remote but no children exists in self and remote.
1173 in self and remote but no children exists in self and remote.
1171 If a list of heads is specified, return only nodes which are heads
1174 If a list of heads is specified, return only nodes which are heads
1172 or ancestors of these heads.
1175 or ancestors of these heads.
1173
1176
1174 All the ancestors of base are in self and in remote.
1177 All the ancestors of base are in self and in remote.
1175 All the descendants of the list returned are missing in self.
1178 All the descendants of the list returned are missing in self.
1176 (and so we know that the rest of the nodes are missing in remote, see
1179 (and so we know that the rest of the nodes are missing in remote, see
1177 outgoing)
1180 outgoing)
1178 """
1181 """
1179 m = self.changelog.nodemap
1182 m = self.changelog.nodemap
1180 search = []
1183 search = []
1181 fetch = {}
1184 fetch = {}
1182 seen = {}
1185 seen = {}
1183 seenbranch = {}
1186 seenbranch = {}
1184 if base == None:
1187 if base == None:
1185 base = {}
1188 base = {}
1186
1189
1187 if not heads:
1190 if not heads:
1188 heads = remote.heads()
1191 heads = remote.heads()
1189
1192
1190 if self.changelog.tip() == nullid:
1193 if self.changelog.tip() == nullid:
1191 base[nullid] = 1
1194 base[nullid] = 1
1192 if heads != [nullid]:
1195 if heads != [nullid]:
1193 return [nullid]
1196 return [nullid]
1194 return []
1197 return []
1195
1198
1196 # assume we're closer to the tip than the root
1199 # assume we're closer to the tip than the root
1197 # and start by examining the heads
1200 # and start by examining the heads
1198 self.ui.status(_("searching for changes\n"))
1201 self.ui.status(_("searching for changes\n"))
1199
1202
1200 unknown = []
1203 unknown = []
1201 for h in heads:
1204 for h in heads:
1202 if h not in m:
1205 if h not in m:
1203 unknown.append(h)
1206 unknown.append(h)
1204 else:
1207 else:
1205 base[h] = 1
1208 base[h] = 1
1206
1209
1207 if not unknown:
1210 if not unknown:
1208 return []
1211 return []
1209
1212
1210 req = dict.fromkeys(unknown)
1213 req = dict.fromkeys(unknown)
1211 reqcnt = 0
1214 reqcnt = 0
1212
1215
1213 # search through remote branches
1216 # search through remote branches
1214 # a 'branch' here is a linear segment of history, with four parts:
1217 # a 'branch' here is a linear segment of history, with four parts:
1215 # head, root, first parent, second parent
1218 # head, root, first parent, second parent
1216 # (a branch always has two parents (or none) by definition)
1219 # (a branch always has two parents (or none) by definition)
1217 unknown = remote.branches(unknown)
1220 unknown = remote.branches(unknown)
1218 while unknown:
1221 while unknown:
1219 r = []
1222 r = []
1220 while unknown:
1223 while unknown:
1221 n = unknown.pop(0)
1224 n = unknown.pop(0)
1222 if n[0] in seen:
1225 if n[0] in seen:
1223 continue
1226 continue
1224
1227
1225 self.ui.debug(_("examining %s:%s\n")
1228 self.ui.debug(_("examining %s:%s\n")
1226 % (short(n[0]), short(n[1])))
1229 % (short(n[0]), short(n[1])))
1227 if n[0] == nullid: # found the end of the branch
1230 if n[0] == nullid: # found the end of the branch
1228 pass
1231 pass
1229 elif n in seenbranch:
1232 elif n in seenbranch:
1230 self.ui.debug(_("branch already found\n"))
1233 self.ui.debug(_("branch already found\n"))
1231 continue
1234 continue
1232 elif n[1] and n[1] in m: # do we know the base?
1235 elif n[1] and n[1] in m: # do we know the base?
1233 self.ui.debug(_("found incomplete branch %s:%s\n")
1236 self.ui.debug(_("found incomplete branch %s:%s\n")
1234 % (short(n[0]), short(n[1])))
1237 % (short(n[0]), short(n[1])))
1235 search.append(n) # schedule branch range for scanning
1238 search.append(n) # schedule branch range for scanning
1236 seenbranch[n] = 1
1239 seenbranch[n] = 1
1237 else:
1240 else:
1238 if n[1] not in seen and n[1] not in fetch:
1241 if n[1] not in seen and n[1] not in fetch:
1239 if n[2] in m and n[3] in m:
1242 if n[2] in m and n[3] in m:
1240 self.ui.debug(_("found new changeset %s\n") %
1243 self.ui.debug(_("found new changeset %s\n") %
1241 short(n[1]))
1244 short(n[1]))
1242 fetch[n[1]] = 1 # earliest unknown
1245 fetch[n[1]] = 1 # earliest unknown
1243 for p in n[2:4]:
1246 for p in n[2:4]:
1244 if p in m:
1247 if p in m:
1245 base[p] = 1 # latest known
1248 base[p] = 1 # latest known
1246
1249
1247 for p in n[2:4]:
1250 for p in n[2:4]:
1248 if p not in req and p not in m:
1251 if p not in req and p not in m:
1249 r.append(p)
1252 r.append(p)
1250 req[p] = 1
1253 req[p] = 1
1251 seen[n[0]] = 1
1254 seen[n[0]] = 1
1252
1255
1253 if r:
1256 if r:
1254 reqcnt += 1
1257 reqcnt += 1
1255 self.ui.debug(_("request %d: %s\n") %
1258 self.ui.debug(_("request %d: %s\n") %
1256 (reqcnt, " ".join(map(short, r))))
1259 (reqcnt, " ".join(map(short, r))))
1257 for p in xrange(0, len(r), 10):
1260 for p in xrange(0, len(r), 10):
1258 for b in remote.branches(r[p:p+10]):
1261 for b in remote.branches(r[p:p+10]):
1259 self.ui.debug(_("received %s:%s\n") %
1262 self.ui.debug(_("received %s:%s\n") %
1260 (short(b[0]), short(b[1])))
1263 (short(b[0]), short(b[1])))
1261 unknown.append(b)
1264 unknown.append(b)
1262
1265
1263 # do binary search on the branches we found
1266 # do binary search on the branches we found
1264 while search:
1267 while search:
1265 n = search.pop(0)
1268 n = search.pop(0)
1266 reqcnt += 1
1269 reqcnt += 1
1267 l = remote.between([(n[0], n[1])])[0]
1270 l = remote.between([(n[0], n[1])])[0]
1268 l.append(n[1])
1271 l.append(n[1])
1269 p = n[0]
1272 p = n[0]
1270 f = 1
1273 f = 1
1271 for i in l:
1274 for i in l:
1272 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1275 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1273 if i in m:
1276 if i in m:
1274 if f <= 2:
1277 if f <= 2:
1275 self.ui.debug(_("found new branch changeset %s\n") %
1278 self.ui.debug(_("found new branch changeset %s\n") %
1276 short(p))
1279 short(p))
1277 fetch[p] = 1
1280 fetch[p] = 1
1278 base[i] = 1
1281 base[i] = 1
1279 else:
1282 else:
1280 self.ui.debug(_("narrowed branch search to %s:%s\n")
1283 self.ui.debug(_("narrowed branch search to %s:%s\n")
1281 % (short(p), short(i)))
1284 % (short(p), short(i)))
1282 search.append((p, i))
1285 search.append((p, i))
1283 break
1286 break
1284 p, f = i, f * 2
1287 p, f = i, f * 2
1285
1288
1286 # sanity check our fetch list
1289 # sanity check our fetch list
1287 for f in fetch.keys():
1290 for f in fetch.keys():
1288 if f in m:
1291 if f in m:
1289 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1292 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1290
1293
1291 if base.keys() == [nullid]:
1294 if base.keys() == [nullid]:
1292 if force:
1295 if force:
1293 self.ui.warn(_("warning: repository is unrelated\n"))
1296 self.ui.warn(_("warning: repository is unrelated\n"))
1294 else:
1297 else:
1295 raise util.Abort(_("repository is unrelated"))
1298 raise util.Abort(_("repository is unrelated"))
1296
1299
1297 self.ui.debug(_("found new changesets starting at ") +
1300 self.ui.debug(_("found new changesets starting at ") +
1298 " ".join([short(f) for f in fetch]) + "\n")
1301 " ".join([short(f) for f in fetch]) + "\n")
1299
1302
1300 self.ui.debug(_("%d total queries\n") % reqcnt)
1303 self.ui.debug(_("%d total queries\n") % reqcnt)
1301
1304
1302 return fetch.keys()
1305 return fetch.keys()
1303
1306
1304 def findoutgoing(self, remote, base=None, heads=None, force=False):
1307 def findoutgoing(self, remote, base=None, heads=None, force=False):
1305 """Return list of nodes that are roots of subsets not in remote
1308 """Return list of nodes that are roots of subsets not in remote
1306
1309
1307 If base dict is specified, assume that these nodes and their parents
1310 If base dict is specified, assume that these nodes and their parents
1308 exist on the remote side.
1311 exist on the remote side.
1309 If a list of heads is specified, return only nodes which are heads
1312 If a list of heads is specified, return only nodes which are heads
1310 or ancestors of these heads, and return a second element which
1313 or ancestors of these heads, and return a second element which
1311 contains all remote heads which get new children.
1314 contains all remote heads which get new children.
1312 """
1315 """
1313 if base == None:
1316 if base == None:
1314 base = {}
1317 base = {}
1315 self.findincoming(remote, base, heads, force=force)
1318 self.findincoming(remote, base, heads, force=force)
1316
1319
1317 self.ui.debug(_("common changesets up to ")
1320 self.ui.debug(_("common changesets up to ")
1318 + " ".join(map(short, base.keys())) + "\n")
1321 + " ".join(map(short, base.keys())) + "\n")
1319
1322
1320 remain = dict.fromkeys(self.changelog.nodemap)
1323 remain = dict.fromkeys(self.changelog.nodemap)
1321
1324
1322 # prune everything remote has from the tree
1325 # prune everything remote has from the tree
1323 del remain[nullid]
1326 del remain[nullid]
1324 remove = base.keys()
1327 remove = base.keys()
1325 while remove:
1328 while remove:
1326 n = remove.pop(0)
1329 n = remove.pop(0)
1327 if n in remain:
1330 if n in remain:
1328 del remain[n]
1331 del remain[n]
1329 for p in self.changelog.parents(n):
1332 for p in self.changelog.parents(n):
1330 remove.append(p)
1333 remove.append(p)
1331
1334
1332 # find every node whose parents have been pruned
1335 # find every node whose parents have been pruned
1333 subset = []
1336 subset = []
1334 # find every remote head that will get new children
1337 # find every remote head that will get new children
1335 updated_heads = {}
1338 updated_heads = {}
1336 for n in remain:
1339 for n in remain:
1337 p1, p2 = self.changelog.parents(n)
1340 p1, p2 = self.changelog.parents(n)
1338 if p1 not in remain and p2 not in remain:
1341 if p1 not in remain and p2 not in remain:
1339 subset.append(n)
1342 subset.append(n)
1340 if heads:
1343 if heads:
1341 if p1 in heads:
1344 if p1 in heads:
1342 updated_heads[p1] = True
1345 updated_heads[p1] = True
1343 if p2 in heads:
1346 if p2 in heads:
1344 updated_heads[p2] = True
1347 updated_heads[p2] = True
1345
1348
1346 # this is the set of all roots we have to push
1349 # this is the set of all roots we have to push
1347 if heads:
1350 if heads:
1348 return subset, updated_heads.keys()
1351 return subset, updated_heads.keys()
1349 else:
1352 else:
1350 return subset
1353 return subset
1351
1354
1352 def pull(self, remote, heads=None, force=False):
1355 def pull(self, remote, heads=None, force=False):
1353 lock = self.lock()
1356 lock = self.lock()
1354 try:
1357 try:
1355 fetch = self.findincoming(remote, heads=heads, force=force)
1358 fetch = self.findincoming(remote, heads=heads, force=force)
1356 if fetch == [nullid]:
1359 if fetch == [nullid]:
1357 self.ui.status(_("requesting all changes\n"))
1360 self.ui.status(_("requesting all changes\n"))
1358
1361
1359 if not fetch:
1362 if not fetch:
1360 self.ui.status(_("no changes found\n"))
1363 self.ui.status(_("no changes found\n"))
1361 return 0
1364 return 0
1362
1365
1363 if heads is None:
1366 if heads is None:
1364 cg = remote.changegroup(fetch, 'pull')
1367 cg = remote.changegroup(fetch, 'pull')
1365 else:
1368 else:
1366 if 'changegroupsubset' not in remote.capabilities:
1369 if 'changegroupsubset' not in remote.capabilities:
1367 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1370 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1368 cg = remote.changegroupsubset(fetch, heads, 'pull')
1371 cg = remote.changegroupsubset(fetch, heads, 'pull')
1369 return self.addchangegroup(cg, 'pull', remote.url())
1372 return self.addchangegroup(cg, 'pull', remote.url())
1370 finally:
1373 finally:
1371 del lock
1374 del lock
1372
1375
1373 def push(self, remote, force=False, revs=None):
1376 def push(self, remote, force=False, revs=None):
1374 # there are two ways to push to remote repo:
1377 # there are two ways to push to remote repo:
1375 #
1378 #
1376 # addchangegroup assumes local user can lock remote
1379 # addchangegroup assumes local user can lock remote
1377 # repo (local filesystem, old ssh servers).
1380 # repo (local filesystem, old ssh servers).
1378 #
1381 #
1379 # unbundle assumes local user cannot lock remote repo (new ssh
1382 # unbundle assumes local user cannot lock remote repo (new ssh
1380 # servers, http servers).
1383 # servers, http servers).
1381
1384
1382 if remote.capable('unbundle'):
1385 if remote.capable('unbundle'):
1383 return self.push_unbundle(remote, force, revs)
1386 return self.push_unbundle(remote, force, revs)
1384 return self.push_addchangegroup(remote, force, revs)
1387 return self.push_addchangegroup(remote, force, revs)
1385
1388
1386 def prepush(self, remote, force, revs):
1389 def prepush(self, remote, force, revs):
1387 base = {}
1390 base = {}
1388 remote_heads = remote.heads()
1391 remote_heads = remote.heads()
1389 inc = self.findincoming(remote, base, remote_heads, force=force)
1392 inc = self.findincoming(remote, base, remote_heads, force=force)
1390
1393
1391 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1394 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1392 if revs is not None:
1395 if revs is not None:
1393 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1396 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1394 else:
1397 else:
1395 bases, heads = update, self.changelog.heads()
1398 bases, heads = update, self.changelog.heads()
1396
1399
1397 if not bases:
1400 if not bases:
1398 self.ui.status(_("no changes found\n"))
1401 self.ui.status(_("no changes found\n"))
1399 return None, 1
1402 return None, 1
1400 elif not force:
1403 elif not force:
1401 # check if we're creating new remote heads
1404 # check if we're creating new remote heads
1402 # to be a remote head after push, node must be either
1405 # to be a remote head after push, node must be either
1403 # - unknown locally
1406 # - unknown locally
1404 # - a local outgoing head descended from update
1407 # - a local outgoing head descended from update
1405 # - a remote head that's known locally and not
1408 # - a remote head that's known locally and not
1406 # ancestral to an outgoing head
1409 # ancestral to an outgoing head
1407
1410
1408 warn = 0
1411 warn = 0
1409
1412
1410 if remote_heads == [nullid]:
1413 if remote_heads == [nullid]:
1411 warn = 0
1414 warn = 0
1412 elif not revs and len(heads) > len(remote_heads):
1415 elif not revs and len(heads) > len(remote_heads):
1413 warn = 1
1416 warn = 1
1414 else:
1417 else:
1415 newheads = list(heads)
1418 newheads = list(heads)
1416 for r in remote_heads:
1419 for r in remote_heads:
1417 if r in self.changelog.nodemap:
1420 if r in self.changelog.nodemap:
1418 desc = self.changelog.heads(r, heads)
1421 desc = self.changelog.heads(r, heads)
1419 l = [h for h in heads if h in desc]
1422 l = [h for h in heads if h in desc]
1420 if not l:
1423 if not l:
1421 newheads.append(r)
1424 newheads.append(r)
1422 else:
1425 else:
1423 newheads.append(r)
1426 newheads.append(r)
1424 if len(newheads) > len(remote_heads):
1427 if len(newheads) > len(remote_heads):
1425 warn = 1
1428 warn = 1
1426
1429
1427 if warn:
1430 if warn:
1428 self.ui.warn(_("abort: push creates new remote branches!\n"))
1431 self.ui.warn(_("abort: push creates new remote branches!\n"))
1429 self.ui.status(_("(did you forget to merge?"
1432 self.ui.status(_("(did you forget to merge?"
1430 " use push -f to force)\n"))
1433 " use push -f to force)\n"))
1431 return None, 1
1434 return None, 1
1432 elif inc:
1435 elif inc:
1433 self.ui.warn(_("note: unsynced remote changes!\n"))
1436 self.ui.warn(_("note: unsynced remote changes!\n"))
1434
1437
1435
1438
1436 if revs is None:
1439 if revs is None:
1437 cg = self.changegroup(update, 'push')
1440 cg = self.changegroup(update, 'push')
1438 else:
1441 else:
1439 cg = self.changegroupsubset(update, revs, 'push')
1442 cg = self.changegroupsubset(update, revs, 'push')
1440 return cg, remote_heads
1443 return cg, remote_heads
1441
1444
1442 def push_addchangegroup(self, remote, force, revs):
1445 def push_addchangegroup(self, remote, force, revs):
1443 lock = remote.lock()
1446 lock = remote.lock()
1444 try:
1447 try:
1445 ret = self.prepush(remote, force, revs)
1448 ret = self.prepush(remote, force, revs)
1446 if ret[0] is not None:
1449 if ret[0] is not None:
1447 cg, remote_heads = ret
1450 cg, remote_heads = ret
1448 return remote.addchangegroup(cg, 'push', self.url())
1451 return remote.addchangegroup(cg, 'push', self.url())
1449 return ret[1]
1452 return ret[1]
1450 finally:
1453 finally:
1451 del lock
1454 del lock
1452
1455
1453 def push_unbundle(self, remote, force, revs):
1456 def push_unbundle(self, remote, force, revs):
1454 # local repo finds heads on server, finds out what revs it
1457 # local repo finds heads on server, finds out what revs it
1455 # must push. once revs transferred, if server finds it has
1458 # must push. once revs transferred, if server finds it has
1456 # different heads (someone else won commit/push race), server
1459 # different heads (someone else won commit/push race), server
1457 # aborts.
1460 # aborts.
1458
1461
1459 ret = self.prepush(remote, force, revs)
1462 ret = self.prepush(remote, force, revs)
1460 if ret[0] is not None:
1463 if ret[0] is not None:
1461 cg, remote_heads = ret
1464 cg, remote_heads = ret
1462 if force: remote_heads = ['force']
1465 if force: remote_heads = ['force']
1463 return remote.unbundle(cg, remote_heads, 'push')
1466 return remote.unbundle(cg, remote_heads, 'push')
1464 return ret[1]
1467 return ret[1]
1465
1468
1466 def changegroupinfo(self, nodes):
1469 def changegroupinfo(self, nodes):
1467 self.ui.note(_("%d changesets found\n") % len(nodes))
1470 self.ui.note(_("%d changesets found\n") % len(nodes))
1468 if self.ui.debugflag:
1471 if self.ui.debugflag:
1469 self.ui.debug(_("List of changesets:\n"))
1472 self.ui.debug(_("List of changesets:\n"))
1470 for node in nodes:
1473 for node in nodes:
1471 self.ui.debug("%s\n" % hex(node))
1474 self.ui.debug("%s\n" % hex(node))
1472
1475
1473 def changegroupsubset(self, bases, heads, source):
1476 def changegroupsubset(self, bases, heads, source):
1474 """This function generates a changegroup consisting of all the nodes
1477 """This function generates a changegroup consisting of all the nodes
1475 that are descendents of any of the bases, and ancestors of any of
1478 that are descendents of any of the bases, and ancestors of any of
1476 the heads.
1479 the heads.
1477
1480
1478 It is fairly complex as determining which filenodes and which
1481 It is fairly complex as determining which filenodes and which
1479 manifest nodes need to be included for the changeset to be complete
1482 manifest nodes need to be included for the changeset to be complete
1480 is non-trivial.
1483 is non-trivial.
1481
1484
1482 Another wrinkle is doing the reverse, figuring out which changeset in
1485 Another wrinkle is doing the reverse, figuring out which changeset in
1483 the changegroup a particular filenode or manifestnode belongs to."""
1486 the changegroup a particular filenode or manifestnode belongs to."""
1484
1487
1485 self.hook('preoutgoing', throw=True, source=source)
1488 self.hook('preoutgoing', throw=True, source=source)
1486
1489
1487 # Set up some initial variables
1490 # Set up some initial variables
1488 # Make it easy to refer to self.changelog
1491 # Make it easy to refer to self.changelog
1489 cl = self.changelog
1492 cl = self.changelog
1490 # msng is short for missing - compute the list of changesets in this
1493 # msng is short for missing - compute the list of changesets in this
1491 # changegroup.
1494 # changegroup.
1492 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1495 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1493 self.changegroupinfo(msng_cl_lst)
1496 self.changegroupinfo(msng_cl_lst)
1494 # Some bases may turn out to be superfluous, and some heads may be
1497 # Some bases may turn out to be superfluous, and some heads may be
1495 # too. nodesbetween will return the minimal set of bases and heads
1498 # too. nodesbetween will return the minimal set of bases and heads
1496 # necessary to re-create the changegroup.
1499 # necessary to re-create the changegroup.
1497
1500
1498 # Known heads are the list of heads that it is assumed the recipient
1501 # Known heads are the list of heads that it is assumed the recipient
1499 # of this changegroup will know about.
1502 # of this changegroup will know about.
1500 knownheads = {}
1503 knownheads = {}
1501 # We assume that all parents of bases are known heads.
1504 # We assume that all parents of bases are known heads.
1502 for n in bases:
1505 for n in bases:
1503 for p in cl.parents(n):
1506 for p in cl.parents(n):
1504 if p != nullid:
1507 if p != nullid:
1505 knownheads[p] = 1
1508 knownheads[p] = 1
1506 knownheads = knownheads.keys()
1509 knownheads = knownheads.keys()
1507 if knownheads:
1510 if knownheads:
1508 # Now that we know what heads are known, we can compute which
1511 # Now that we know what heads are known, we can compute which
1509 # changesets are known. The recipient must know about all
1512 # changesets are known. The recipient must know about all
1510 # changesets required to reach the known heads from the null
1513 # changesets required to reach the known heads from the null
1511 # changeset.
1514 # changeset.
1512 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1515 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1513 junk = None
1516 junk = None
1514 # Transform the list into an ersatz set.
1517 # Transform the list into an ersatz set.
1515 has_cl_set = dict.fromkeys(has_cl_set)
1518 has_cl_set = dict.fromkeys(has_cl_set)
1516 else:
1519 else:
1517 # If there were no known heads, the recipient cannot be assumed to
1520 # If there were no known heads, the recipient cannot be assumed to
1518 # know about any changesets.
1521 # know about any changesets.
1519 has_cl_set = {}
1522 has_cl_set = {}
1520
1523
1521 # Make it easy to refer to self.manifest
1524 # Make it easy to refer to self.manifest
1522 mnfst = self.manifest
1525 mnfst = self.manifest
1523 # We don't know which manifests are missing yet
1526 # We don't know which manifests are missing yet
1524 msng_mnfst_set = {}
1527 msng_mnfst_set = {}
1525 # Nor do we know which filenodes are missing.
1528 # Nor do we know which filenodes are missing.
1526 msng_filenode_set = {}
1529 msng_filenode_set = {}
1527
1530
1528 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1531 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1529 junk = None
1532 junk = None
1530
1533
1531 # A changeset always belongs to itself, so the changenode lookup
1534 # A changeset always belongs to itself, so the changenode lookup
1532 # function for a changenode is identity.
1535 # function for a changenode is identity.
1533 def identity(x):
1536 def identity(x):
1534 return x
1537 return x
1535
1538
1536 # A function generating function. Sets up an environment for the
1539 # A function generating function. Sets up an environment for the
1537 # inner function.
1540 # inner function.
1538 def cmp_by_rev_func(revlog):
1541 def cmp_by_rev_func(revlog):
1539 # Compare two nodes by their revision number in the environment's
1542 # Compare two nodes by their revision number in the environment's
1540 # revision history. Since the revision number both represents the
1543 # revision history. Since the revision number both represents the
1541 # most efficient order to read the nodes in, and represents a
1544 # most efficient order to read the nodes in, and represents a
1542 # topological sorting of the nodes, this function is often useful.
1545 # topological sorting of the nodes, this function is often useful.
1543 def cmp_by_rev(a, b):
1546 def cmp_by_rev(a, b):
1544 return cmp(revlog.rev(a), revlog.rev(b))
1547 return cmp(revlog.rev(a), revlog.rev(b))
1545 return cmp_by_rev
1548 return cmp_by_rev
1546
1549
1547 # If we determine that a particular file or manifest node must be a
1550 # If we determine that a particular file or manifest node must be a
1548 # node that the recipient of the changegroup will already have, we can
1551 # node that the recipient of the changegroup will already have, we can
1549 # also assume the recipient will have all the parents. This function
1552 # also assume the recipient will have all the parents. This function
1550 # prunes them from the set of missing nodes.
1553 # prunes them from the set of missing nodes.
1551 def prune_parents(revlog, hasset, msngset):
1554 def prune_parents(revlog, hasset, msngset):
1552 haslst = hasset.keys()
1555 haslst = hasset.keys()
1553 haslst.sort(cmp_by_rev_func(revlog))
1556 haslst.sort(cmp_by_rev_func(revlog))
1554 for node in haslst:
1557 for node in haslst:
1555 parentlst = [p for p in revlog.parents(node) if p != nullid]
1558 parentlst = [p for p in revlog.parents(node) if p != nullid]
1556 while parentlst:
1559 while parentlst:
1557 n = parentlst.pop()
1560 n = parentlst.pop()
1558 if n not in hasset:
1561 if n not in hasset:
1559 hasset[n] = 1
1562 hasset[n] = 1
1560 p = [p for p in revlog.parents(n) if p != nullid]
1563 p = [p for p in revlog.parents(n) if p != nullid]
1561 parentlst.extend(p)
1564 parentlst.extend(p)
1562 for n in hasset:
1565 for n in hasset:
1563 msngset.pop(n, None)
1566 msngset.pop(n, None)
1564
1567
1565 # This is a function generating function used to set up an environment
1568 # This is a function generating function used to set up an environment
1566 # for the inner function to execute in.
1569 # for the inner function to execute in.
1567 def manifest_and_file_collector(changedfileset):
1570 def manifest_and_file_collector(changedfileset):
1568 # This is an information gathering function that gathers
1571 # This is an information gathering function that gathers
1569 # information from each changeset node that goes out as part of
1572 # information from each changeset node that goes out as part of
1570 # the changegroup. The information gathered is a list of which
1573 # the changegroup. The information gathered is a list of which
1571 # manifest nodes are potentially required (the recipient may
1574 # manifest nodes are potentially required (the recipient may
1572 # already have them) and total list of all files which were
1575 # already have them) and total list of all files which were
1573 # changed in any changeset in the changegroup.
1576 # changed in any changeset in the changegroup.
1574 #
1577 #
1575 # We also remember the first changenode we saw any manifest
1578 # We also remember the first changenode we saw any manifest
1576 # referenced by so we can later determine which changenode 'owns'
1579 # referenced by so we can later determine which changenode 'owns'
1577 # the manifest.
1580 # the manifest.
1578 def collect_manifests_and_files(clnode):
1581 def collect_manifests_and_files(clnode):
1579 c = cl.read(clnode)
1582 c = cl.read(clnode)
1580 for f in c[3]:
1583 for f in c[3]:
1581 # This is to make sure we only have one instance of each
1584 # This is to make sure we only have one instance of each
1582 # filename string for each filename.
1585 # filename string for each filename.
1583 changedfileset.setdefault(f, f)
1586 changedfileset.setdefault(f, f)
1584 msng_mnfst_set.setdefault(c[0], clnode)
1587 msng_mnfst_set.setdefault(c[0], clnode)
1585 return collect_manifests_and_files
1588 return collect_manifests_and_files
1586
1589
1587 # Figure out which manifest nodes (of the ones we think might be part
1590 # Figure out which manifest nodes (of the ones we think might be part
1588 # of the changegroup) the recipient must know about and remove them
1591 # of the changegroup) the recipient must know about and remove them
1589 # from the changegroup.
1592 # from the changegroup.
1590 def prune_manifests():
1593 def prune_manifests():
1591 has_mnfst_set = {}
1594 has_mnfst_set = {}
1592 for n in msng_mnfst_set:
1595 for n in msng_mnfst_set:
1593 # If a 'missing' manifest thinks it belongs to a changenode
1596 # If a 'missing' manifest thinks it belongs to a changenode
1594 # the recipient is assumed to have, obviously the recipient
1597 # the recipient is assumed to have, obviously the recipient
1595 # must have that manifest.
1598 # must have that manifest.
1596 linknode = cl.node(mnfst.linkrev(n))
1599 linknode = cl.node(mnfst.linkrev(n))
1597 if linknode in has_cl_set:
1600 if linknode in has_cl_set:
1598 has_mnfst_set[n] = 1
1601 has_mnfst_set[n] = 1
1599 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1602 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1600
1603
1601 # Use the information collected in collect_manifests_and_files to say
1604 # Use the information collected in collect_manifests_and_files to say
1602 # which changenode any manifestnode belongs to.
1605 # which changenode any manifestnode belongs to.
1603 def lookup_manifest_link(mnfstnode):
1606 def lookup_manifest_link(mnfstnode):
1604 return msng_mnfst_set[mnfstnode]
1607 return msng_mnfst_set[mnfstnode]
1605
1608
1606 # A function generating function that sets up the initial environment
1609 # A function generating function that sets up the initial environment
1607 # the inner function.
1610 # the inner function.
1608 def filenode_collector(changedfiles):
1611 def filenode_collector(changedfiles):
1609 next_rev = [0]
1612 next_rev = [0]
1610 # This gathers information from each manifestnode included in the
1613 # This gathers information from each manifestnode included in the
1611 # changegroup about which filenodes the manifest node references
1614 # changegroup about which filenodes the manifest node references
1612 # so we can include those in the changegroup too.
1615 # so we can include those in the changegroup too.
1613 #
1616 #
1614 # It also remembers which changenode each filenode belongs to. It
1617 # It also remembers which changenode each filenode belongs to. It
1615 # does this by assuming the a filenode belongs to the changenode
1618 # does this by assuming the a filenode belongs to the changenode
1616 # the first manifest that references it belongs to.
1619 # the first manifest that references it belongs to.
1617 def collect_msng_filenodes(mnfstnode):
1620 def collect_msng_filenodes(mnfstnode):
1618 r = mnfst.rev(mnfstnode)
1621 r = mnfst.rev(mnfstnode)
1619 if r == next_rev[0]:
1622 if r == next_rev[0]:
1620 # If the last rev we looked at was the one just previous,
1623 # If the last rev we looked at was the one just previous,
1621 # we only need to see a diff.
1624 # we only need to see a diff.
1622 deltamf = mnfst.readdelta(mnfstnode)
1625 deltamf = mnfst.readdelta(mnfstnode)
1623 # For each line in the delta
1626 # For each line in the delta
1624 for f, fnode in deltamf.items():
1627 for f, fnode in deltamf.items():
1625 f = changedfiles.get(f, None)
1628 f = changedfiles.get(f, None)
1626 # And if the file is in the list of files we care
1629 # And if the file is in the list of files we care
1627 # about.
1630 # about.
1628 if f is not None:
1631 if f is not None:
1629 # Get the changenode this manifest belongs to
1632 # Get the changenode this manifest belongs to
1630 clnode = msng_mnfst_set[mnfstnode]
1633 clnode = msng_mnfst_set[mnfstnode]
1631 # Create the set of filenodes for the file if
1634 # Create the set of filenodes for the file if
1632 # there isn't one already.
1635 # there isn't one already.
1633 ndset = msng_filenode_set.setdefault(f, {})
1636 ndset = msng_filenode_set.setdefault(f, {})
1634 # And set the filenode's changelog node to the
1637 # And set the filenode's changelog node to the
1635 # manifest's if it hasn't been set already.
1638 # manifest's if it hasn't been set already.
1636 ndset.setdefault(fnode, clnode)
1639 ndset.setdefault(fnode, clnode)
1637 else:
1640 else:
1638 # Otherwise we need a full manifest.
1641 # Otherwise we need a full manifest.
1639 m = mnfst.read(mnfstnode)
1642 m = mnfst.read(mnfstnode)
1640 # For every file in we care about.
1643 # For every file in we care about.
1641 for f in changedfiles:
1644 for f in changedfiles:
1642 fnode = m.get(f, None)
1645 fnode = m.get(f, None)
1643 # If it's in the manifest
1646 # If it's in the manifest
1644 if fnode is not None:
1647 if fnode is not None:
1645 # See comments above.
1648 # See comments above.
1646 clnode = msng_mnfst_set[mnfstnode]
1649 clnode = msng_mnfst_set[mnfstnode]
1647 ndset = msng_filenode_set.setdefault(f, {})
1650 ndset = msng_filenode_set.setdefault(f, {})
1648 ndset.setdefault(fnode, clnode)
1651 ndset.setdefault(fnode, clnode)
1649 # Remember the revision we hope to see next.
1652 # Remember the revision we hope to see next.
1650 next_rev[0] = r + 1
1653 next_rev[0] = r + 1
1651 return collect_msng_filenodes
1654 return collect_msng_filenodes
1652
1655
1653 # We have a list of filenodes we think we need for a file, lets remove
1656 # We have a list of filenodes we think we need for a file, lets remove
1654 # all those we now the recipient must have.
1657 # all those we now the recipient must have.
1655 def prune_filenodes(f, filerevlog):
1658 def prune_filenodes(f, filerevlog):
1656 msngset = msng_filenode_set[f]
1659 msngset = msng_filenode_set[f]
1657 hasset = {}
1660 hasset = {}
1658 # If a 'missing' filenode thinks it belongs to a changenode we
1661 # If a 'missing' filenode thinks it belongs to a changenode we
1659 # assume the recipient must have, then the recipient must have
1662 # assume the recipient must have, then the recipient must have
1660 # that filenode.
1663 # that filenode.
1661 for n in msngset:
1664 for n in msngset:
1662 clnode = cl.node(filerevlog.linkrev(n))
1665 clnode = cl.node(filerevlog.linkrev(n))
1663 if clnode in has_cl_set:
1666 if clnode in has_cl_set:
1664 hasset[n] = 1
1667 hasset[n] = 1
1665 prune_parents(filerevlog, hasset, msngset)
1668 prune_parents(filerevlog, hasset, msngset)
1666
1669
1667 # A function generator function that sets up the a context for the
1670 # A function generator function that sets up the a context for the
1668 # inner function.
1671 # inner function.
1669 def lookup_filenode_link_func(fname):
1672 def lookup_filenode_link_func(fname):
1670 msngset = msng_filenode_set[fname]
1673 msngset = msng_filenode_set[fname]
1671 # Lookup the changenode the filenode belongs to.
1674 # Lookup the changenode the filenode belongs to.
1672 def lookup_filenode_link(fnode):
1675 def lookup_filenode_link(fnode):
1673 return msngset[fnode]
1676 return msngset[fnode]
1674 return lookup_filenode_link
1677 return lookup_filenode_link
1675
1678
1676 # Now that we have all theses utility functions to help out and
1679 # Now that we have all theses utility functions to help out and
1677 # logically divide up the task, generate the group.
1680 # logically divide up the task, generate the group.
1678 def gengroup():
1681 def gengroup():
1679 # The set of changed files starts empty.
1682 # The set of changed files starts empty.
1680 changedfiles = {}
1683 changedfiles = {}
1681 # Create a changenode group generator that will call our functions
1684 # Create a changenode group generator that will call our functions
1682 # back to lookup the owning changenode and collect information.
1685 # back to lookup the owning changenode and collect information.
1683 group = cl.group(msng_cl_lst, identity,
1686 group = cl.group(msng_cl_lst, identity,
1684 manifest_and_file_collector(changedfiles))
1687 manifest_and_file_collector(changedfiles))
1685 for chnk in group:
1688 for chnk in group:
1686 yield chnk
1689 yield chnk
1687
1690
1688 # The list of manifests has been collected by the generator
1691 # The list of manifests has been collected by the generator
1689 # calling our functions back.
1692 # calling our functions back.
1690 prune_manifests()
1693 prune_manifests()
1691 msng_mnfst_lst = msng_mnfst_set.keys()
1694 msng_mnfst_lst = msng_mnfst_set.keys()
1692 # Sort the manifestnodes by revision number.
1695 # Sort the manifestnodes by revision number.
1693 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1696 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1694 # Create a generator for the manifestnodes that calls our lookup
1697 # Create a generator for the manifestnodes that calls our lookup
1695 # and data collection functions back.
1698 # and data collection functions back.
1696 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1699 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1697 filenode_collector(changedfiles))
1700 filenode_collector(changedfiles))
1698 for chnk in group:
1701 for chnk in group:
1699 yield chnk
1702 yield chnk
1700
1703
1701 # These are no longer needed, dereference and toss the memory for
1704 # These are no longer needed, dereference and toss the memory for
1702 # them.
1705 # them.
1703 msng_mnfst_lst = None
1706 msng_mnfst_lst = None
1704 msng_mnfst_set.clear()
1707 msng_mnfst_set.clear()
1705
1708
1706 changedfiles = changedfiles.keys()
1709 changedfiles = changedfiles.keys()
1707 changedfiles.sort()
1710 changedfiles.sort()
1708 # Go through all our files in order sorted by name.
1711 # Go through all our files in order sorted by name.
1709 for fname in changedfiles:
1712 for fname in changedfiles:
1710 filerevlog = self.file(fname)
1713 filerevlog = self.file(fname)
1711 # Toss out the filenodes that the recipient isn't really
1714 # Toss out the filenodes that the recipient isn't really
1712 # missing.
1715 # missing.
1713 if msng_filenode_set.has_key(fname):
1716 if msng_filenode_set.has_key(fname):
1714 prune_filenodes(fname, filerevlog)
1717 prune_filenodes(fname, filerevlog)
1715 msng_filenode_lst = msng_filenode_set[fname].keys()
1718 msng_filenode_lst = msng_filenode_set[fname].keys()
1716 else:
1719 else:
1717 msng_filenode_lst = []
1720 msng_filenode_lst = []
1718 # If any filenodes are left, generate the group for them,
1721 # If any filenodes are left, generate the group for them,
1719 # otherwise don't bother.
1722 # otherwise don't bother.
1720 if len(msng_filenode_lst) > 0:
1723 if len(msng_filenode_lst) > 0:
1721 yield changegroup.genchunk(fname)
1724 yield changegroup.genchunk(fname)
1722 # Sort the filenodes by their revision #
1725 # Sort the filenodes by their revision #
1723 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1726 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1724 # Create a group generator and only pass in a changenode
1727 # Create a group generator and only pass in a changenode
1725 # lookup function as we need to collect no information
1728 # lookup function as we need to collect no information
1726 # from filenodes.
1729 # from filenodes.
1727 group = filerevlog.group(msng_filenode_lst,
1730 group = filerevlog.group(msng_filenode_lst,
1728 lookup_filenode_link_func(fname))
1731 lookup_filenode_link_func(fname))
1729 for chnk in group:
1732 for chnk in group:
1730 yield chnk
1733 yield chnk
1731 if msng_filenode_set.has_key(fname):
1734 if msng_filenode_set.has_key(fname):
1732 # Don't need this anymore, toss it to free memory.
1735 # Don't need this anymore, toss it to free memory.
1733 del msng_filenode_set[fname]
1736 del msng_filenode_set[fname]
1734 # Signal that no more groups are left.
1737 # Signal that no more groups are left.
1735 yield changegroup.closechunk()
1738 yield changegroup.closechunk()
1736
1739
1737 if msng_cl_lst:
1740 if msng_cl_lst:
1738 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1741 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1739
1742
1740 return util.chunkbuffer(gengroup())
1743 return util.chunkbuffer(gengroup())
1741
1744
1742 def changegroup(self, basenodes, source):
1745 def changegroup(self, basenodes, source):
1743 """Generate a changegroup of all nodes that we have that a recipient
1746 """Generate a changegroup of all nodes that we have that a recipient
1744 doesn't.
1747 doesn't.
1745
1748
1746 This is much easier than the previous function as we can assume that
1749 This is much easier than the previous function as we can assume that
1747 the recipient has any changenode we aren't sending them."""
1750 the recipient has any changenode we aren't sending them."""
1748
1751
1749 self.hook('preoutgoing', throw=True, source=source)
1752 self.hook('preoutgoing', throw=True, source=source)
1750
1753
1751 cl = self.changelog
1754 cl = self.changelog
1752 nodes = cl.nodesbetween(basenodes, None)[0]
1755 nodes = cl.nodesbetween(basenodes, None)[0]
1753 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1756 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1754 self.changegroupinfo(nodes)
1757 self.changegroupinfo(nodes)
1755
1758
1756 def identity(x):
1759 def identity(x):
1757 return x
1760 return x
1758
1761
1759 def gennodelst(revlog):
1762 def gennodelst(revlog):
1760 for r in xrange(0, revlog.count()):
1763 for r in xrange(0, revlog.count()):
1761 n = revlog.node(r)
1764 n = revlog.node(r)
1762 if revlog.linkrev(n) in revset:
1765 if revlog.linkrev(n) in revset:
1763 yield n
1766 yield n
1764
1767
1765 def changed_file_collector(changedfileset):
1768 def changed_file_collector(changedfileset):
1766 def collect_changed_files(clnode):
1769 def collect_changed_files(clnode):
1767 c = cl.read(clnode)
1770 c = cl.read(clnode)
1768 for fname in c[3]:
1771 for fname in c[3]:
1769 changedfileset[fname] = 1
1772 changedfileset[fname] = 1
1770 return collect_changed_files
1773 return collect_changed_files
1771
1774
1772 def lookuprevlink_func(revlog):
1775 def lookuprevlink_func(revlog):
1773 def lookuprevlink(n):
1776 def lookuprevlink(n):
1774 return cl.node(revlog.linkrev(n))
1777 return cl.node(revlog.linkrev(n))
1775 return lookuprevlink
1778 return lookuprevlink
1776
1779
1777 def gengroup():
1780 def gengroup():
1778 # construct a list of all changed files
1781 # construct a list of all changed files
1779 changedfiles = {}
1782 changedfiles = {}
1780
1783
1781 for chnk in cl.group(nodes, identity,
1784 for chnk in cl.group(nodes, identity,
1782 changed_file_collector(changedfiles)):
1785 changed_file_collector(changedfiles)):
1783 yield chnk
1786 yield chnk
1784 changedfiles = changedfiles.keys()
1787 changedfiles = changedfiles.keys()
1785 changedfiles.sort()
1788 changedfiles.sort()
1786
1789
1787 mnfst = self.manifest
1790 mnfst = self.manifest
1788 nodeiter = gennodelst(mnfst)
1791 nodeiter = gennodelst(mnfst)
1789 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1792 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1790 yield chnk
1793 yield chnk
1791
1794
1792 for fname in changedfiles:
1795 for fname in changedfiles:
1793 filerevlog = self.file(fname)
1796 filerevlog = self.file(fname)
1794 nodeiter = gennodelst(filerevlog)
1797 nodeiter = gennodelst(filerevlog)
1795 nodeiter = list(nodeiter)
1798 nodeiter = list(nodeiter)
1796 if nodeiter:
1799 if nodeiter:
1797 yield changegroup.genchunk(fname)
1800 yield changegroup.genchunk(fname)
1798 lookup = lookuprevlink_func(filerevlog)
1801 lookup = lookuprevlink_func(filerevlog)
1799 for chnk in filerevlog.group(nodeiter, lookup):
1802 for chnk in filerevlog.group(nodeiter, lookup):
1800 yield chnk
1803 yield chnk
1801
1804
1802 yield changegroup.closechunk()
1805 yield changegroup.closechunk()
1803
1806
1804 if nodes:
1807 if nodes:
1805 self.hook('outgoing', node=hex(nodes[0]), source=source)
1808 self.hook('outgoing', node=hex(nodes[0]), source=source)
1806
1809
1807 return util.chunkbuffer(gengroup())
1810 return util.chunkbuffer(gengroup())
1808
1811
1809 def addchangegroup(self, source, srctype, url):
1812 def addchangegroup(self, source, srctype, url):
1810 """add changegroup to repo.
1813 """add changegroup to repo.
1811
1814
1812 return values:
1815 return values:
1813 - nothing changed or no source: 0
1816 - nothing changed or no source: 0
1814 - more heads than before: 1+added heads (2..n)
1817 - more heads than before: 1+added heads (2..n)
1815 - less heads than before: -1-removed heads (-2..-n)
1818 - less heads than before: -1-removed heads (-2..-n)
1816 - number of heads stays the same: 1
1819 - number of heads stays the same: 1
1817 """
1820 """
1818 def csmap(x):
1821 def csmap(x):
1819 self.ui.debug(_("add changeset %s\n") % short(x))
1822 self.ui.debug(_("add changeset %s\n") % short(x))
1820 return cl.count()
1823 return cl.count()
1821
1824
1822 def revmap(x):
1825 def revmap(x):
1823 return cl.rev(x)
1826 return cl.rev(x)
1824
1827
1825 if not source:
1828 if not source:
1826 return 0
1829 return 0
1827
1830
1828 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1831 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1829
1832
1830 changesets = files = revisions = 0
1833 changesets = files = revisions = 0
1831
1834
1832 # write changelog data to temp files so concurrent readers will not see
1835 # write changelog data to temp files so concurrent readers will not see
1833 # inconsistent view
1836 # inconsistent view
1834 cl = self.changelog
1837 cl = self.changelog
1835 cl.delayupdate()
1838 cl.delayupdate()
1836 oldheads = len(cl.heads())
1839 oldheads = len(cl.heads())
1837
1840
1838 tr = self.transaction()
1841 tr = self.transaction()
1839 try:
1842 try:
1840 trp = weakref.proxy(tr)
1843 trp = weakref.proxy(tr)
1841 # pull off the changeset group
1844 # pull off the changeset group
1842 self.ui.status(_("adding changesets\n"))
1845 self.ui.status(_("adding changesets\n"))
1843 cor = cl.count() - 1
1846 cor = cl.count() - 1
1844 chunkiter = changegroup.chunkiter(source)
1847 chunkiter = changegroup.chunkiter(source)
1845 if cl.addgroup(chunkiter, csmap, trp, 1) is None:
1848 if cl.addgroup(chunkiter, csmap, trp, 1) is None:
1846 raise util.Abort(_("received changelog group is empty"))
1849 raise util.Abort(_("received changelog group is empty"))
1847 cnr = cl.count() - 1
1850 cnr = cl.count() - 1
1848 changesets = cnr - cor
1851 changesets = cnr - cor
1849
1852
1850 # pull off the manifest group
1853 # pull off the manifest group
1851 self.ui.status(_("adding manifests\n"))
1854 self.ui.status(_("adding manifests\n"))
1852 chunkiter = changegroup.chunkiter(source)
1855 chunkiter = changegroup.chunkiter(source)
1853 # no need to check for empty manifest group here:
1856 # no need to check for empty manifest group here:
1854 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1857 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1855 # no new manifest will be created and the manifest group will
1858 # no new manifest will be created and the manifest group will
1856 # be empty during the pull
1859 # be empty during the pull
1857 self.manifest.addgroup(chunkiter, revmap, trp)
1860 self.manifest.addgroup(chunkiter, revmap, trp)
1858
1861
1859 # process the files
1862 # process the files
1860 self.ui.status(_("adding file changes\n"))
1863 self.ui.status(_("adding file changes\n"))
1861 while 1:
1864 while 1:
1862 f = changegroup.getchunk(source)
1865 f = changegroup.getchunk(source)
1863 if not f:
1866 if not f:
1864 break
1867 break
1865 self.ui.debug(_("adding %s revisions\n") % f)
1868 self.ui.debug(_("adding %s revisions\n") % f)
1866 fl = self.file(f)
1869 fl = self.file(f)
1867 o = fl.count()
1870 o = fl.count()
1868 chunkiter = changegroup.chunkiter(source)
1871 chunkiter = changegroup.chunkiter(source)
1869 if fl.addgroup(chunkiter, revmap, trp) is None:
1872 if fl.addgroup(chunkiter, revmap, trp) is None:
1870 raise util.Abort(_("received file revlog group is empty"))
1873 raise util.Abort(_("received file revlog group is empty"))
1871 revisions += fl.count() - o
1874 revisions += fl.count() - o
1872 files += 1
1875 files += 1
1873
1876
1874 # make changelog see real files again
1877 # make changelog see real files again
1875 cl.finalize(trp)
1878 cl.finalize(trp)
1876
1879
1877 newheads = len(self.changelog.heads())
1880 newheads = len(self.changelog.heads())
1878 heads = ""
1881 heads = ""
1879 if oldheads and newheads != oldheads:
1882 if oldheads and newheads != oldheads:
1880 heads = _(" (%+d heads)") % (newheads - oldheads)
1883 heads = _(" (%+d heads)") % (newheads - oldheads)
1881
1884
1882 self.ui.status(_("added %d changesets"
1885 self.ui.status(_("added %d changesets"
1883 " with %d changes to %d files%s\n")
1886 " with %d changes to %d files%s\n")
1884 % (changesets, revisions, files, heads))
1887 % (changesets, revisions, files, heads))
1885
1888
1886 if changesets > 0:
1889 if changesets > 0:
1887 self.hook('pretxnchangegroup', throw=True,
1890 self.hook('pretxnchangegroup', throw=True,
1888 node=hex(self.changelog.node(cor+1)), source=srctype,
1891 node=hex(self.changelog.node(cor+1)), source=srctype,
1889 url=url)
1892 url=url)
1890
1893
1891 tr.close()
1894 tr.close()
1892 finally:
1895 finally:
1893 del tr
1896 del tr
1894
1897
1895 if changesets > 0:
1898 if changesets > 0:
1896 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1899 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1897 source=srctype, url=url)
1900 source=srctype, url=url)
1898
1901
1899 for i in xrange(cor + 1, cnr + 1):
1902 for i in xrange(cor + 1, cnr + 1):
1900 self.hook("incoming", node=hex(self.changelog.node(i)),
1903 self.hook("incoming", node=hex(self.changelog.node(i)),
1901 source=srctype, url=url)
1904 source=srctype, url=url)
1902
1905
1903 # never return 0 here:
1906 # never return 0 here:
1904 if newheads < oldheads:
1907 if newheads < oldheads:
1905 return newheads - oldheads - 1
1908 return newheads - oldheads - 1
1906 else:
1909 else:
1907 return newheads - oldheads + 1
1910 return newheads - oldheads + 1
1908
1911
1909
1912
1910 def stream_in(self, remote):
1913 def stream_in(self, remote):
1911 fp = remote.stream_out()
1914 fp = remote.stream_out()
1912 l = fp.readline()
1915 l = fp.readline()
1913 try:
1916 try:
1914 resp = int(l)
1917 resp = int(l)
1915 except ValueError:
1918 except ValueError:
1916 raise util.UnexpectedOutput(
1919 raise util.UnexpectedOutput(
1917 _('Unexpected response from remote server:'), l)
1920 _('Unexpected response from remote server:'), l)
1918 if resp == 1:
1921 if resp == 1:
1919 raise util.Abort(_('operation forbidden by server'))
1922 raise util.Abort(_('operation forbidden by server'))
1920 elif resp == 2:
1923 elif resp == 2:
1921 raise util.Abort(_('locking the remote repository failed'))
1924 raise util.Abort(_('locking the remote repository failed'))
1922 elif resp != 0:
1925 elif resp != 0:
1923 raise util.Abort(_('the server sent an unknown error code'))
1926 raise util.Abort(_('the server sent an unknown error code'))
1924 self.ui.status(_('streaming all changes\n'))
1927 self.ui.status(_('streaming all changes\n'))
1925 l = fp.readline()
1928 l = fp.readline()
1926 try:
1929 try:
1927 total_files, total_bytes = map(int, l.split(' ', 1))
1930 total_files, total_bytes = map(int, l.split(' ', 1))
1928 except ValueError, TypeError:
1931 except ValueError, TypeError:
1929 raise util.UnexpectedOutput(
1932 raise util.UnexpectedOutput(
1930 _('Unexpected response from remote server:'), l)
1933 _('Unexpected response from remote server:'), l)
1931 self.ui.status(_('%d files to transfer, %s of data\n') %
1934 self.ui.status(_('%d files to transfer, %s of data\n') %
1932 (total_files, util.bytecount(total_bytes)))
1935 (total_files, util.bytecount(total_bytes)))
1933 start = time.time()
1936 start = time.time()
1934 for i in xrange(total_files):
1937 for i in xrange(total_files):
1935 # XXX doesn't support '\n' or '\r' in filenames
1938 # XXX doesn't support '\n' or '\r' in filenames
1936 l = fp.readline()
1939 l = fp.readline()
1937 try:
1940 try:
1938 name, size = l.split('\0', 1)
1941 name, size = l.split('\0', 1)
1939 size = int(size)
1942 size = int(size)
1940 except ValueError, TypeError:
1943 except ValueError, TypeError:
1941 raise util.UnexpectedOutput(
1944 raise util.UnexpectedOutput(
1942 _('Unexpected response from remote server:'), l)
1945 _('Unexpected response from remote server:'), l)
1943 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1946 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1944 ofp = self.sopener(name, 'w')
1947 ofp = self.sopener(name, 'w')
1945 for chunk in util.filechunkiter(fp, limit=size):
1948 for chunk in util.filechunkiter(fp, limit=size):
1946 ofp.write(chunk)
1949 ofp.write(chunk)
1947 ofp.close()
1950 ofp.close()
1948 elapsed = time.time() - start
1951 elapsed = time.time() - start
1949 if elapsed <= 0:
1952 if elapsed <= 0:
1950 elapsed = 0.001
1953 elapsed = 0.001
1951 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1954 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1952 (util.bytecount(total_bytes), elapsed,
1955 (util.bytecount(total_bytes), elapsed,
1953 util.bytecount(total_bytes / elapsed)))
1956 util.bytecount(total_bytes / elapsed)))
1954 self.invalidate()
1957 self.invalidate()
1955 return len(self.heads()) + 1
1958 return len(self.heads()) + 1
1956
1959
1957 def clone(self, remote, heads=[], stream=False):
1960 def clone(self, remote, heads=[], stream=False):
1958 '''clone remote repository.
1961 '''clone remote repository.
1959
1962
1960 keyword arguments:
1963 keyword arguments:
1961 heads: list of revs to clone (forces use of pull)
1964 heads: list of revs to clone (forces use of pull)
1962 stream: use streaming clone if possible'''
1965 stream: use streaming clone if possible'''
1963
1966
1964 # now, all clients that can request uncompressed clones can
1967 # now, all clients that can request uncompressed clones can
1965 # read repo formats supported by all servers that can serve
1968 # read repo formats supported by all servers that can serve
1966 # them.
1969 # them.
1967
1970
1968 # if revlog format changes, client will have to check version
1971 # if revlog format changes, client will have to check version
1969 # and format flags on "stream" capability, and use
1972 # and format flags on "stream" capability, and use
1970 # uncompressed only if compatible.
1973 # uncompressed only if compatible.
1971
1974
1972 if stream and not heads and remote.capable('stream'):
1975 if stream and not heads and remote.capable('stream'):
1973 return self.stream_in(remote)
1976 return self.stream_in(remote)
1974 return self.pull(remote, heads)
1977 return self.pull(remote, heads)
1975
1978
1976 # used to avoid circular references so destructors work
1979 # used to avoid circular references so destructors work
1977 def aftertrans(files):
1980 def aftertrans(files):
1978 renamefiles = [tuple(t) for t in files]
1981 renamefiles = [tuple(t) for t in files]
1979 def a():
1982 def a():
1980 for src, dest in renamefiles:
1983 for src, dest in renamefiles:
1981 util.rename(src, dest)
1984 util.rename(src, dest)
1982 return a
1985 return a
1983
1986
1984 def instance(ui, path, create):
1987 def instance(ui, path, create):
1985 return localrepository(ui, util.drop_scheme('file', path), create)
1988 return localrepository(ui, util.drop_scheme('file', path), create)
1986
1989
1987 def islocal(path):
1990 def islocal(path):
1988 return True
1991 return True
@@ -1,32 +1,34 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 mkdir t
3 mkdir t
4 cd t
4 cd t
5 hg init
5 hg init
6
6
7 mkdir a
7 mkdir a
8 echo foo > a/a
8 echo foo > a/a
9 echo bar > a/b
9 echo bar > a/b
10
10
11 hg add a
11 hg add a
12 hg ci -m "0" -d "0 0"
12 hg ci -m "0" -d "0 0"
13
13
14 hg co -C 0
14 hg co -C 0
15 hg mv a b
15 hg mv a b
16 hg ci -m "1 mv a/ b/" -d "0 0"
16 hg ci -m "1 mv a/ b/" -d "0 0"
17
17
18 hg co -C 0
18 hg co -C 0
19 echo baz > a/c
19 echo baz > a/c
20 hg add a/c
20 hg add a/c
21 hg ci -m "2 add a/c" -d "0 0"
21 hg ci -m "2 add a/c" -d "0 0"
22
22
23 hg merge --debug 1
23 hg merge --debug 1
24 echo a/* b/*
24 echo a/* b/*
25 hg st -C
25 hg st -C
26 hg ci -m "3 merge 2+1" -d "0 0"
26 hg ci -m "3 merge 2+1" -d "0 0"
27 hg debugrename b/c
27
28
28 hg co -C 1
29 hg co -C 1
29 hg merge --debug 2
30 hg merge --debug 2
30 echo a/* b/*
31 echo a/* b/*
31 hg st -C
32 hg st -C
32 hg ci -m "4 merge 1+2" -d "0 0"
33 hg ci -m "4 merge 1+2" -d "0 0"
34 hg debugrename b/c
@@ -1,42 +1,44 b''
1 adding a/a
1 adding a/a
2 adding a/b
2 adding a/b
3 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
3 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
4 copying a/a to b/a
4 copying a/a to b/a
5 copying a/b to b/b
5 copying a/b to b/b
6 removing a/a
6 removing a/a
7 removing a/b
7 removing a/b
8 2 files updated, 0 files merged, 2 files removed, 0 files unresolved
8 2 files updated, 0 files merged, 2 files removed, 0 files unresolved
9 resolving manifests
9 resolving manifests
10 overwrite None partial False
10 overwrite None partial False
11 ancestor f9b20c0d4c51 local ce36d17b18fb+ remote 55119e611c80
11 ancestor f9b20c0d4c51 local ce36d17b18fb+ remote 55119e611c80
12 a/c: remote renamed directory to b/c -> d
12 a/c: remote renamed directory to b/c -> d
13 a/b: other deleted -> r
13 a/b: other deleted -> r
14 a/a: other deleted -> r
14 a/a: other deleted -> r
15 b/a: remote created -> g
15 b/a: remote created -> g
16 b/b: remote created -> g
16 b/b: remote created -> g
17 removing a/a
17 removing a/a
18 removing a/b
18 removing a/b
19 moving a/c to b/c
19 moving a/c to b/c
20 getting b/a
20 getting b/a
21 getting b/b
21 getting b/b
22 3 files updated, 0 files merged, 2 files removed, 0 files unresolved
22 3 files updated, 0 files merged, 2 files removed, 0 files unresolved
23 (branch merge, don't forget to commit)
23 (branch merge, don't forget to commit)
24 a/* b/a b/b b/c
24 a/* b/a b/b b/c
25 M b/a
25 M b/a
26 M b/b
26 M b/b
27 A b/c
27 A b/c
28 a/c
28 a/c
29 R a/a
29 R a/a
30 R a/b
30 R a/b
31 R a/c
31 R a/c
32 b/c renamed from a/c:354ae8da6e890359ef49ade27b68bbc361f3ca88
32 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
33 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
33 resolving manifests
34 resolving manifests
34 overwrite None partial False
35 overwrite None partial False
35 ancestor f9b20c0d4c51 local 55119e611c80+ remote ce36d17b18fb
36 ancestor f9b20c0d4c51 local 55119e611c80+ remote ce36d17b18fb
36 None: local renamed directory to b/c -> d
37 None: local renamed directory to b/c -> d
37 getting a/c to b/c
38 getting a/c to b/c
38 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
39 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
39 (branch merge, don't forget to commit)
40 (branch merge, don't forget to commit)
40 a/* b/a b/b b/c
41 a/* b/a b/b b/c
41 A b/c
42 A b/c
42 a/c
43 a/c
44 b/c renamed from a/c:354ae8da6e890359ef49ade27b68bbc361f3ca88
General Comments 0
You need to be logged in to leave comments. Login now