##// END OF EJS Templates
Merge with crew-stable
Patrick Mezard -
r5323:46455285 merge default
parent child Browse files
Show More
@@ -0,0 +1,33 b''
1 #!/bin/sh
2
3 # Test issue 746: renaming files brought by the
4 # second parent of a merge was broken.
5
6 echo % create source repository
7 hg init t
8 cd t
9 echo a > a
10 hg ci -Am a
11 cd ..
12
13 echo % fork source repository
14 hg clone t t2
15 cd t2
16 echo b > b
17 hg ci -Am b
18
19 echo % update source repository
20 cd ../t
21 echo a >> a
22 hg ci -m a2
23
24 echo % merge repositories
25 hg pull ../t2
26 hg merge
27
28 echo % rename b as c
29 hg mv b c
30 hg st
31 echo % rename back c as b
32 hg mv c b
33 hg st
@@ -0,0 +1,20 b''
1 % create source repository
2 adding a
3 % fork source repository
4 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
5 adding b
6 % update source repository
7 % merge repositories
8 pulling from ../t2
9 searching for changes
10 adding changesets
11 adding manifests
12 adding file changes
13 added 1 changesets with 1 changes to 1 files (+1 heads)
14 (run 'hg heads' to see heads, 'hg merge' to merge)
15 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
16 (branch merge, don't forget to commit)
17 % rename b as c
18 A c
19 R b
20 % rename back c as b
@@ -1,1990 +1,1990 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import re, lock, transaction, tempfile, stat, errno, ui
12 import re, lock, transaction, tempfile, stat, errno, ui
13 import os, revlog, time, util, extensions, hook
13 import os, revlog, time, util, extensions, hook
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = util.set(('lookup', 'changegroupsubset'))
16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 repo.repository.__init__(self)
20 repo.repository.__init__(self)
21 self.root = os.path.realpath(path)
21 self.root = os.path.realpath(path)
22 self.path = os.path.join(self.root, ".hg")
22 self.path = os.path.join(self.root, ".hg")
23 self.origroot = path
23 self.origroot = path
24 self.opener = util.opener(self.path)
24 self.opener = util.opener(self.path)
25 self.wopener = util.opener(self.root)
25 self.wopener = util.opener(self.root)
26
26
27 if not os.path.isdir(self.path):
27 if not os.path.isdir(self.path):
28 if create:
28 if create:
29 if not os.path.exists(path):
29 if not os.path.exists(path):
30 os.mkdir(path)
30 os.mkdir(path)
31 os.mkdir(self.path)
31 os.mkdir(self.path)
32 requirements = ["revlogv1"]
32 requirements = ["revlogv1"]
33 if parentui.configbool('format', 'usestore', True):
33 if parentui.configbool('format', 'usestore', True):
34 os.mkdir(os.path.join(self.path, "store"))
34 os.mkdir(os.path.join(self.path, "store"))
35 requirements.append("store")
35 requirements.append("store")
36 # create an invalid changelog
36 # create an invalid changelog
37 self.opener("00changelog.i", "a").write(
37 self.opener("00changelog.i", "a").write(
38 '\0\0\0\2' # represents revlogv2
38 '\0\0\0\2' # represents revlogv2
39 ' dummy changelog to prevent using the old repo layout'
39 ' dummy changelog to prevent using the old repo layout'
40 )
40 )
41 reqfile = self.opener("requires", "w")
41 reqfile = self.opener("requires", "w")
42 for r in requirements:
42 for r in requirements:
43 reqfile.write("%s\n" % r)
43 reqfile.write("%s\n" % r)
44 reqfile.close()
44 reqfile.close()
45 else:
45 else:
46 raise repo.RepoError(_("repository %s not found") % path)
46 raise repo.RepoError(_("repository %s not found") % path)
47 elif create:
47 elif create:
48 raise repo.RepoError(_("repository %s already exists") % path)
48 raise repo.RepoError(_("repository %s already exists") % path)
49 else:
49 else:
50 # find requirements
50 # find requirements
51 try:
51 try:
52 requirements = self.opener("requires").read().splitlines()
52 requirements = self.opener("requires").read().splitlines()
53 except IOError, inst:
53 except IOError, inst:
54 if inst.errno != errno.ENOENT:
54 if inst.errno != errno.ENOENT:
55 raise
55 raise
56 requirements = []
56 requirements = []
57 # check them
57 # check them
58 for r in requirements:
58 for r in requirements:
59 if r not in self.supported:
59 if r not in self.supported:
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61
61
62 # setup store
62 # setup store
63 if "store" in requirements:
63 if "store" in requirements:
64 self.encodefn = util.encodefilename
64 self.encodefn = util.encodefilename
65 self.decodefn = util.decodefilename
65 self.decodefn = util.decodefilename
66 self.spath = os.path.join(self.path, "store")
66 self.spath = os.path.join(self.path, "store")
67 else:
67 else:
68 self.encodefn = lambda x: x
68 self.encodefn = lambda x: x
69 self.decodefn = lambda x: x
69 self.decodefn = lambda x: x
70 self.spath = self.path
70 self.spath = self.path
71 self.sopener = util.encodedopener(util.opener(self.spath),
71 self.sopener = util.encodedopener(util.opener(self.spath),
72 self.encodefn)
72 self.encodefn)
73
73
74 self.ui = ui.ui(parentui=parentui)
74 self.ui = ui.ui(parentui=parentui)
75 try:
75 try:
76 self.ui.readconfig(self.join("hgrc"), self.root)
76 self.ui.readconfig(self.join("hgrc"), self.root)
77 extensions.loadall(self.ui)
77 extensions.loadall(self.ui)
78 except IOError:
78 except IOError:
79 pass
79 pass
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self.branchcache = None
82 self.branchcache = None
83 self.nodetagscache = None
83 self.nodetagscache = None
84 self.filterpats = {}
84 self.filterpats = {}
85 self._transref = self._lockref = self._wlockref = None
85 self._transref = self._lockref = self._wlockref = None
86
86
87 def __getattr__(self, name):
87 def __getattr__(self, name):
88 if name == 'changelog':
88 if name == 'changelog':
89 self.changelog = changelog.changelog(self.sopener)
89 self.changelog = changelog.changelog(self.sopener)
90 self.sopener.defversion = self.changelog.version
90 self.sopener.defversion = self.changelog.version
91 return self.changelog
91 return self.changelog
92 if name == 'manifest':
92 if name == 'manifest':
93 self.changelog
93 self.changelog
94 self.manifest = manifest.manifest(self.sopener)
94 self.manifest = manifest.manifest(self.sopener)
95 return self.manifest
95 return self.manifest
96 if name == 'dirstate':
96 if name == 'dirstate':
97 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
97 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
98 return self.dirstate
98 return self.dirstate
99 else:
99 else:
100 raise AttributeError, name
100 raise AttributeError, name
101
101
102 def url(self):
102 def url(self):
103 return 'file:' + self.root
103 return 'file:' + self.root
104
104
105 def hook(self, name, throw=False, **args):
105 def hook(self, name, throw=False, **args):
106 return hook.hook(self.ui, self, name, throw, **args)
106 return hook.hook(self.ui, self, name, throw, **args)
107
107
108 tag_disallowed = ':\r\n'
108 tag_disallowed = ':\r\n'
109
109
110 def _tag(self, name, node, message, local, user, date, parent=None,
110 def _tag(self, name, node, message, local, user, date, parent=None,
111 extra={}):
111 extra={}):
112 use_dirstate = parent is None
112 use_dirstate = parent is None
113
113
114 for c in self.tag_disallowed:
114 for c in self.tag_disallowed:
115 if c in name:
115 if c in name:
116 raise util.Abort(_('%r cannot be used in a tag name') % c)
116 raise util.Abort(_('%r cannot be used in a tag name') % c)
117
117
118 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
118 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
119
119
120 def writetag(fp, name, munge, prevtags):
120 def writetag(fp, name, munge, prevtags):
121 if prevtags and prevtags[-1] != '\n':
121 if prevtags and prevtags[-1] != '\n':
122 fp.write('\n')
122 fp.write('\n')
123 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
123 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
124 fp.close()
124 fp.close()
125 self.hook('tag', node=hex(node), tag=name, local=local)
125 self.hook('tag', node=hex(node), tag=name, local=local)
126
126
127 prevtags = ''
127 prevtags = ''
128 if local:
128 if local:
129 try:
129 try:
130 fp = self.opener('localtags', 'r+')
130 fp = self.opener('localtags', 'r+')
131 except IOError, err:
131 except IOError, err:
132 fp = self.opener('localtags', 'a')
132 fp = self.opener('localtags', 'a')
133 else:
133 else:
134 prevtags = fp.read()
134 prevtags = fp.read()
135
135
136 # local tags are stored in the current charset
136 # local tags are stored in the current charset
137 writetag(fp, name, None, prevtags)
137 writetag(fp, name, None, prevtags)
138 return
138 return
139
139
140 if use_dirstate:
140 if use_dirstate:
141 try:
141 try:
142 fp = self.wfile('.hgtags', 'rb+')
142 fp = self.wfile('.hgtags', 'rb+')
143 except IOError, err:
143 except IOError, err:
144 fp = self.wfile('.hgtags', 'ab')
144 fp = self.wfile('.hgtags', 'ab')
145 else:
145 else:
146 prevtags = fp.read()
146 prevtags = fp.read()
147 else:
147 else:
148 try:
148 try:
149 prevtags = self.filectx('.hgtags', parent).data()
149 prevtags = self.filectx('.hgtags', parent).data()
150 except revlog.LookupError:
150 except revlog.LookupError:
151 pass
151 pass
152 fp = self.wfile('.hgtags', 'wb')
152 fp = self.wfile('.hgtags', 'wb')
153 if prevtags:
153 if prevtags:
154 fp.write(prevtags)
154 fp.write(prevtags)
155
155
156 # committed tags are stored in UTF-8
156 # committed tags are stored in UTF-8
157 writetag(fp, name, util.fromlocal, prevtags)
157 writetag(fp, name, util.fromlocal, prevtags)
158
158
159 if use_dirstate and '.hgtags' not in self.dirstate:
159 if use_dirstate and '.hgtags' not in self.dirstate:
160 self.add(['.hgtags'])
160 self.add(['.hgtags'])
161
161
162 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
162 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
163 extra=extra)
163 extra=extra)
164
164
165 self.hook('tag', node=hex(node), tag=name, local=local)
165 self.hook('tag', node=hex(node), tag=name, local=local)
166
166
167 return tagnode
167 return tagnode
168
168
169 def tag(self, name, node, message, local, user, date):
169 def tag(self, name, node, message, local, user, date):
170 '''tag a revision with a symbolic name.
170 '''tag a revision with a symbolic name.
171
171
172 if local is True, the tag is stored in a per-repository file.
172 if local is True, the tag is stored in a per-repository file.
173 otherwise, it is stored in the .hgtags file, and a new
173 otherwise, it is stored in the .hgtags file, and a new
174 changeset is committed with the change.
174 changeset is committed with the change.
175
175
176 keyword arguments:
176 keyword arguments:
177
177
178 local: whether to store tag in non-version-controlled file
178 local: whether to store tag in non-version-controlled file
179 (default False)
179 (default False)
180
180
181 message: commit message to use if committing
181 message: commit message to use if committing
182
182
183 user: name of user to use if committing
183 user: name of user to use if committing
184
184
185 date: date tuple to use if committing'''
185 date: date tuple to use if committing'''
186
186
187 for x in self.status()[:5]:
187 for x in self.status()[:5]:
188 if '.hgtags' in x:
188 if '.hgtags' in x:
189 raise util.Abort(_('working copy of .hgtags is changed '
189 raise util.Abort(_('working copy of .hgtags is changed '
190 '(please commit .hgtags manually)'))
190 '(please commit .hgtags manually)'))
191
191
192
192
193 self._tag(name, node, message, local, user, date)
193 self._tag(name, node, message, local, user, date)
194
194
195 def tags(self):
195 def tags(self):
196 '''return a mapping of tag to node'''
196 '''return a mapping of tag to node'''
197 if self.tagscache:
197 if self.tagscache:
198 return self.tagscache
198 return self.tagscache
199
199
200 globaltags = {}
200 globaltags = {}
201
201
202 def readtags(lines, fn):
202 def readtags(lines, fn):
203 filetags = {}
203 filetags = {}
204 count = 0
204 count = 0
205
205
206 def warn(msg):
206 def warn(msg):
207 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
207 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
208
208
209 for l in lines:
209 for l in lines:
210 count += 1
210 count += 1
211 if not l:
211 if not l:
212 continue
212 continue
213 s = l.split(" ", 1)
213 s = l.split(" ", 1)
214 if len(s) != 2:
214 if len(s) != 2:
215 warn(_("cannot parse entry"))
215 warn(_("cannot parse entry"))
216 continue
216 continue
217 node, key = s
217 node, key = s
218 key = util.tolocal(key.strip()) # stored in UTF-8
218 key = util.tolocal(key.strip()) # stored in UTF-8
219 try:
219 try:
220 bin_n = bin(node)
220 bin_n = bin(node)
221 except TypeError:
221 except TypeError:
222 warn(_("node '%s' is not well formed") % node)
222 warn(_("node '%s' is not well formed") % node)
223 continue
223 continue
224 if bin_n not in self.changelog.nodemap:
224 if bin_n not in self.changelog.nodemap:
225 warn(_("tag '%s' refers to unknown node") % key)
225 warn(_("tag '%s' refers to unknown node") % key)
226 continue
226 continue
227
227
228 h = []
228 h = []
229 if key in filetags:
229 if key in filetags:
230 n, h = filetags[key]
230 n, h = filetags[key]
231 h.append(n)
231 h.append(n)
232 filetags[key] = (bin_n, h)
232 filetags[key] = (bin_n, h)
233
233
234 for k, nh in filetags.items():
234 for k, nh in filetags.items():
235 if k not in globaltags:
235 if k not in globaltags:
236 globaltags[k] = nh
236 globaltags[k] = nh
237 continue
237 continue
238 # we prefer the global tag if:
238 # we prefer the global tag if:
239 # it supercedes us OR
239 # it supercedes us OR
240 # mutual supercedes and it has a higher rank
240 # mutual supercedes and it has a higher rank
241 # otherwise we win because we're tip-most
241 # otherwise we win because we're tip-most
242 an, ah = nh
242 an, ah = nh
243 bn, bh = globaltags[k]
243 bn, bh = globaltags[k]
244 if (bn != an and an in bh and
244 if (bn != an and an in bh and
245 (bn not in ah or len(bh) > len(ah))):
245 (bn not in ah or len(bh) > len(ah))):
246 an = bn
246 an = bn
247 ah.extend([n for n in bh if n not in ah])
247 ah.extend([n for n in bh if n not in ah])
248 globaltags[k] = an, ah
248 globaltags[k] = an, ah
249
249
250 # read the tags file from each head, ending with the tip
250 # read the tags file from each head, ending with the tip
251 f = None
251 f = None
252 for rev, node, fnode in self._hgtagsnodes():
252 for rev, node, fnode in self._hgtagsnodes():
253 f = (f and f.filectx(fnode) or
253 f = (f and f.filectx(fnode) or
254 self.filectx('.hgtags', fileid=fnode))
254 self.filectx('.hgtags', fileid=fnode))
255 readtags(f.data().splitlines(), f)
255 readtags(f.data().splitlines(), f)
256
256
257 try:
257 try:
258 data = util.fromlocal(self.opener("localtags").read())
258 data = util.fromlocal(self.opener("localtags").read())
259 # localtags are stored in the local character set
259 # localtags are stored in the local character set
260 # while the internal tag table is stored in UTF-8
260 # while the internal tag table is stored in UTF-8
261 readtags(data.splitlines(), "localtags")
261 readtags(data.splitlines(), "localtags")
262 except IOError:
262 except IOError:
263 pass
263 pass
264
264
265 self.tagscache = {}
265 self.tagscache = {}
266 for k,nh in globaltags.items():
266 for k,nh in globaltags.items():
267 n = nh[0]
267 n = nh[0]
268 if n != nullid:
268 if n != nullid:
269 self.tagscache[k] = n
269 self.tagscache[k] = n
270 self.tagscache['tip'] = self.changelog.tip()
270 self.tagscache['tip'] = self.changelog.tip()
271
271
272 return self.tagscache
272 return self.tagscache
273
273
274 def _hgtagsnodes(self):
274 def _hgtagsnodes(self):
275 heads = self.heads()
275 heads = self.heads()
276 heads.reverse()
276 heads.reverse()
277 last = {}
277 last = {}
278 ret = []
278 ret = []
279 for node in heads:
279 for node in heads:
280 c = self.changectx(node)
280 c = self.changectx(node)
281 rev = c.rev()
281 rev = c.rev()
282 try:
282 try:
283 fnode = c.filenode('.hgtags')
283 fnode = c.filenode('.hgtags')
284 except revlog.LookupError:
284 except revlog.LookupError:
285 continue
285 continue
286 ret.append((rev, node, fnode))
286 ret.append((rev, node, fnode))
287 if fnode in last:
287 if fnode in last:
288 ret[last[fnode]] = None
288 ret[last[fnode]] = None
289 last[fnode] = len(ret) - 1
289 last[fnode] = len(ret) - 1
290 return [item for item in ret if item]
290 return [item for item in ret if item]
291
291
292 def tagslist(self):
292 def tagslist(self):
293 '''return a list of tags ordered by revision'''
293 '''return a list of tags ordered by revision'''
294 l = []
294 l = []
295 for t, n in self.tags().items():
295 for t, n in self.tags().items():
296 try:
296 try:
297 r = self.changelog.rev(n)
297 r = self.changelog.rev(n)
298 except:
298 except:
299 r = -2 # sort to the beginning of the list if unknown
299 r = -2 # sort to the beginning of the list if unknown
300 l.append((r, t, n))
300 l.append((r, t, n))
301 l.sort()
301 l.sort()
302 return [(t, n) for r, t, n in l]
302 return [(t, n) for r, t, n in l]
303
303
304 def nodetags(self, node):
304 def nodetags(self, node):
305 '''return the tags associated with a node'''
305 '''return the tags associated with a node'''
306 if not self.nodetagscache:
306 if not self.nodetagscache:
307 self.nodetagscache = {}
307 self.nodetagscache = {}
308 for t, n in self.tags().items():
308 for t, n in self.tags().items():
309 self.nodetagscache.setdefault(n, []).append(t)
309 self.nodetagscache.setdefault(n, []).append(t)
310 return self.nodetagscache.get(node, [])
310 return self.nodetagscache.get(node, [])
311
311
312 def _branchtags(self):
312 def _branchtags(self):
313 partial, last, lrev = self._readbranchcache()
313 partial, last, lrev = self._readbranchcache()
314
314
315 tiprev = self.changelog.count() - 1
315 tiprev = self.changelog.count() - 1
316 if lrev != tiprev:
316 if lrev != tiprev:
317 self._updatebranchcache(partial, lrev+1, tiprev+1)
317 self._updatebranchcache(partial, lrev+1, tiprev+1)
318 self._writebranchcache(partial, self.changelog.tip(), tiprev)
318 self._writebranchcache(partial, self.changelog.tip(), tiprev)
319
319
320 return partial
320 return partial
321
321
322 def branchtags(self):
322 def branchtags(self):
323 if self.branchcache is not None:
323 if self.branchcache is not None:
324 return self.branchcache
324 return self.branchcache
325
325
326 self.branchcache = {} # avoid recursion in changectx
326 self.branchcache = {} # avoid recursion in changectx
327 partial = self._branchtags()
327 partial = self._branchtags()
328
328
329 # the branch cache is stored on disk as UTF-8, but in the local
329 # the branch cache is stored on disk as UTF-8, but in the local
330 # charset internally
330 # charset internally
331 for k, v in partial.items():
331 for k, v in partial.items():
332 self.branchcache[util.tolocal(k)] = v
332 self.branchcache[util.tolocal(k)] = v
333 return self.branchcache
333 return self.branchcache
334
334
335 def _readbranchcache(self):
335 def _readbranchcache(self):
336 partial = {}
336 partial = {}
337 try:
337 try:
338 f = self.opener("branch.cache")
338 f = self.opener("branch.cache")
339 lines = f.read().split('\n')
339 lines = f.read().split('\n')
340 f.close()
340 f.close()
341 except (IOError, OSError):
341 except (IOError, OSError):
342 return {}, nullid, nullrev
342 return {}, nullid, nullrev
343
343
344 try:
344 try:
345 last, lrev = lines.pop(0).split(" ", 1)
345 last, lrev = lines.pop(0).split(" ", 1)
346 last, lrev = bin(last), int(lrev)
346 last, lrev = bin(last), int(lrev)
347 if not (lrev < self.changelog.count() and
347 if not (lrev < self.changelog.count() and
348 self.changelog.node(lrev) == last): # sanity check
348 self.changelog.node(lrev) == last): # sanity check
349 # invalidate the cache
349 # invalidate the cache
350 raise ValueError('Invalid branch cache: unknown tip')
350 raise ValueError('Invalid branch cache: unknown tip')
351 for l in lines:
351 for l in lines:
352 if not l: continue
352 if not l: continue
353 node, label = l.split(" ", 1)
353 node, label = l.split(" ", 1)
354 partial[label.strip()] = bin(node)
354 partial[label.strip()] = bin(node)
355 except (KeyboardInterrupt, util.SignalInterrupt):
355 except (KeyboardInterrupt, util.SignalInterrupt):
356 raise
356 raise
357 except Exception, inst:
357 except Exception, inst:
358 if self.ui.debugflag:
358 if self.ui.debugflag:
359 self.ui.warn(str(inst), '\n')
359 self.ui.warn(str(inst), '\n')
360 partial, last, lrev = {}, nullid, nullrev
360 partial, last, lrev = {}, nullid, nullrev
361 return partial, last, lrev
361 return partial, last, lrev
362
362
363 def _writebranchcache(self, branches, tip, tiprev):
363 def _writebranchcache(self, branches, tip, tiprev):
364 try:
364 try:
365 f = self.opener("branch.cache", "w", atomictemp=True)
365 f = self.opener("branch.cache", "w", atomictemp=True)
366 f.write("%s %s\n" % (hex(tip), tiprev))
366 f.write("%s %s\n" % (hex(tip), tiprev))
367 for label, node in branches.iteritems():
367 for label, node in branches.iteritems():
368 f.write("%s %s\n" % (hex(node), label))
368 f.write("%s %s\n" % (hex(node), label))
369 f.rename()
369 f.rename()
370 except (IOError, OSError):
370 except (IOError, OSError):
371 pass
371 pass
372
372
373 def _updatebranchcache(self, partial, start, end):
373 def _updatebranchcache(self, partial, start, end):
374 for r in xrange(start, end):
374 for r in xrange(start, end):
375 c = self.changectx(r)
375 c = self.changectx(r)
376 b = c.branch()
376 b = c.branch()
377 partial[b] = c.node()
377 partial[b] = c.node()
378
378
379 def lookup(self, key):
379 def lookup(self, key):
380 if key == '.':
380 if key == '.':
381 key, second = self.dirstate.parents()
381 key, second = self.dirstate.parents()
382 if key == nullid:
382 if key == nullid:
383 raise repo.RepoError(_("no revision checked out"))
383 raise repo.RepoError(_("no revision checked out"))
384 if second != nullid:
384 if second != nullid:
385 self.ui.warn(_("warning: working directory has two parents, "
385 self.ui.warn(_("warning: working directory has two parents, "
386 "tag '.' uses the first\n"))
386 "tag '.' uses the first\n"))
387 elif key == 'null':
387 elif key == 'null':
388 return nullid
388 return nullid
389 n = self.changelog._match(key)
389 n = self.changelog._match(key)
390 if n:
390 if n:
391 return n
391 return n
392 if key in self.tags():
392 if key in self.tags():
393 return self.tags()[key]
393 return self.tags()[key]
394 if key in self.branchtags():
394 if key in self.branchtags():
395 return self.branchtags()[key]
395 return self.branchtags()[key]
396 n = self.changelog._partialmatch(key)
396 n = self.changelog._partialmatch(key)
397 if n:
397 if n:
398 return n
398 return n
399 try:
399 try:
400 if len(key) == 20:
400 if len(key) == 20:
401 key = hex(key)
401 key = hex(key)
402 except:
402 except:
403 pass
403 pass
404 raise repo.RepoError(_("unknown revision '%s'") % key)
404 raise repo.RepoError(_("unknown revision '%s'") % key)
405
405
406 def dev(self):
406 def dev(self):
407 return os.lstat(self.path).st_dev
407 return os.lstat(self.path).st_dev
408
408
409 def local(self):
409 def local(self):
410 return True
410 return True
411
411
412 def join(self, f):
412 def join(self, f):
413 return os.path.join(self.path, f)
413 return os.path.join(self.path, f)
414
414
415 def sjoin(self, f):
415 def sjoin(self, f):
416 f = self.encodefn(f)
416 f = self.encodefn(f)
417 return os.path.join(self.spath, f)
417 return os.path.join(self.spath, f)
418
418
419 def wjoin(self, f):
419 def wjoin(self, f):
420 return os.path.join(self.root, f)
420 return os.path.join(self.root, f)
421
421
422 def file(self, f):
422 def file(self, f):
423 if f[0] == '/':
423 if f[0] == '/':
424 f = f[1:]
424 f = f[1:]
425 return filelog.filelog(self.sopener, f)
425 return filelog.filelog(self.sopener, f)
426
426
427 def changectx(self, changeid=None):
427 def changectx(self, changeid=None):
428 return context.changectx(self, changeid)
428 return context.changectx(self, changeid)
429
429
430 def workingctx(self):
430 def workingctx(self):
431 return context.workingctx(self)
431 return context.workingctx(self)
432
432
433 def parents(self, changeid=None):
433 def parents(self, changeid=None):
434 '''
434 '''
435 get list of changectxs for parents of changeid or working directory
435 get list of changectxs for parents of changeid or working directory
436 '''
436 '''
437 if changeid is None:
437 if changeid is None:
438 pl = self.dirstate.parents()
438 pl = self.dirstate.parents()
439 else:
439 else:
440 n = self.changelog.lookup(changeid)
440 n = self.changelog.lookup(changeid)
441 pl = self.changelog.parents(n)
441 pl = self.changelog.parents(n)
442 if pl[1] == nullid:
442 if pl[1] == nullid:
443 return [self.changectx(pl[0])]
443 return [self.changectx(pl[0])]
444 return [self.changectx(pl[0]), self.changectx(pl[1])]
444 return [self.changectx(pl[0]), self.changectx(pl[1])]
445
445
446 def filectx(self, path, changeid=None, fileid=None):
446 def filectx(self, path, changeid=None, fileid=None):
447 """changeid can be a changeset revision, node, or tag.
447 """changeid can be a changeset revision, node, or tag.
448 fileid can be a file revision or node."""
448 fileid can be a file revision or node."""
449 return context.filectx(self, path, changeid, fileid)
449 return context.filectx(self, path, changeid, fileid)
450
450
451 def getcwd(self):
451 def getcwd(self):
452 return self.dirstate.getcwd()
452 return self.dirstate.getcwd()
453
453
454 def pathto(self, f, cwd=None):
454 def pathto(self, f, cwd=None):
455 return self.dirstate.pathto(f, cwd)
455 return self.dirstate.pathto(f, cwd)
456
456
457 def wfile(self, f, mode='r'):
457 def wfile(self, f, mode='r'):
458 return self.wopener(f, mode)
458 return self.wopener(f, mode)
459
459
460 def _link(self, f):
460 def _link(self, f):
461 return os.path.islink(self.wjoin(f))
461 return os.path.islink(self.wjoin(f))
462
462
463 def _filter(self, filter, filename, data):
463 def _filter(self, filter, filename, data):
464 if filter not in self.filterpats:
464 if filter not in self.filterpats:
465 l = []
465 l = []
466 for pat, cmd in self.ui.configitems(filter):
466 for pat, cmd in self.ui.configitems(filter):
467 mf = util.matcher(self.root, "", [pat], [], [])[1]
467 mf = util.matcher(self.root, "", [pat], [], [])[1]
468 l.append((mf, cmd))
468 l.append((mf, cmd))
469 self.filterpats[filter] = l
469 self.filterpats[filter] = l
470
470
471 for mf, cmd in self.filterpats[filter]:
471 for mf, cmd in self.filterpats[filter]:
472 if mf(filename):
472 if mf(filename):
473 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
473 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
474 data = util.filter(data, cmd)
474 data = util.filter(data, cmd)
475 break
475 break
476
476
477 return data
477 return data
478
478
479 def wread(self, filename):
479 def wread(self, filename):
480 if self._link(filename):
480 if self._link(filename):
481 data = os.readlink(self.wjoin(filename))
481 data = os.readlink(self.wjoin(filename))
482 else:
482 else:
483 data = self.wopener(filename, 'r').read()
483 data = self.wopener(filename, 'r').read()
484 return self._filter("encode", filename, data)
484 return self._filter("encode", filename, data)
485
485
486 def wwrite(self, filename, data, flags):
486 def wwrite(self, filename, data, flags):
487 data = self._filter("decode", filename, data)
487 data = self._filter("decode", filename, data)
488 if "l" in flags:
488 if "l" in flags:
489 self.wopener.symlink(data, filename)
489 self.wopener.symlink(data, filename)
490 else:
490 else:
491 try:
491 try:
492 if self._link(filename):
492 if self._link(filename):
493 os.unlink(self.wjoin(filename))
493 os.unlink(self.wjoin(filename))
494 except OSError:
494 except OSError:
495 pass
495 pass
496 self.wopener(filename, 'w').write(data)
496 self.wopener(filename, 'w').write(data)
497 util.set_exec(self.wjoin(filename), "x" in flags)
497 util.set_exec(self.wjoin(filename), "x" in flags)
498
498
499 def wwritedata(self, filename, data):
499 def wwritedata(self, filename, data):
500 return self._filter("decode", filename, data)
500 return self._filter("decode", filename, data)
501
501
502 def transaction(self):
502 def transaction(self):
503 if self._transref and self._transref():
503 if self._transref and self._transref():
504 return self._transref().nest()
504 return self._transref().nest()
505
505
506 # save dirstate for rollback
506 # save dirstate for rollback
507 try:
507 try:
508 ds = self.opener("dirstate").read()
508 ds = self.opener("dirstate").read()
509 except IOError:
509 except IOError:
510 ds = ""
510 ds = ""
511 self.opener("journal.dirstate", "w").write(ds)
511 self.opener("journal.dirstate", "w").write(ds)
512
512
513 renames = [(self.sjoin("journal"), self.sjoin("undo")),
513 renames = [(self.sjoin("journal"), self.sjoin("undo")),
514 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
514 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
515 tr = transaction.transaction(self.ui.warn, self.sopener,
515 tr = transaction.transaction(self.ui.warn, self.sopener,
516 self.sjoin("journal"),
516 self.sjoin("journal"),
517 aftertrans(renames))
517 aftertrans(renames))
518 self._transref = weakref.ref(tr)
518 self._transref = weakref.ref(tr)
519 return tr
519 return tr
520
520
521 def recover(self):
521 def recover(self):
522 l = self.lock()
522 l = self.lock()
523 try:
523 try:
524 if os.path.exists(self.sjoin("journal")):
524 if os.path.exists(self.sjoin("journal")):
525 self.ui.status(_("rolling back interrupted transaction\n"))
525 self.ui.status(_("rolling back interrupted transaction\n"))
526 transaction.rollback(self.sopener, self.sjoin("journal"))
526 transaction.rollback(self.sopener, self.sjoin("journal"))
527 self.invalidate()
527 self.invalidate()
528 return True
528 return True
529 else:
529 else:
530 self.ui.warn(_("no interrupted transaction available\n"))
530 self.ui.warn(_("no interrupted transaction available\n"))
531 return False
531 return False
532 finally:
532 finally:
533 del l
533 del l
534
534
535 def rollback(self):
535 def rollback(self):
536 wlock = lock = None
536 wlock = lock = None
537 try:
537 try:
538 wlock = self.wlock()
538 wlock = self.wlock()
539 lock = self.lock()
539 lock = self.lock()
540 if os.path.exists(self.sjoin("undo")):
540 if os.path.exists(self.sjoin("undo")):
541 self.ui.status(_("rolling back last transaction\n"))
541 self.ui.status(_("rolling back last transaction\n"))
542 transaction.rollback(self.sopener, self.sjoin("undo"))
542 transaction.rollback(self.sopener, self.sjoin("undo"))
543 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
543 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
544 self.invalidate()
544 self.invalidate()
545 self.dirstate.invalidate()
545 self.dirstate.invalidate()
546 else:
546 else:
547 self.ui.warn(_("no rollback information available\n"))
547 self.ui.warn(_("no rollback information available\n"))
548 finally:
548 finally:
549 del lock, wlock
549 del lock, wlock
550
550
551 def invalidate(self):
551 def invalidate(self):
552 for a in "changelog manifest".split():
552 for a in "changelog manifest".split():
553 if hasattr(self, a):
553 if hasattr(self, a):
554 self.__delattr__(a)
554 self.__delattr__(a)
555 self.tagscache = None
555 self.tagscache = None
556 self.nodetagscache = None
556 self.nodetagscache = None
557
557
558 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
558 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
559 try:
559 try:
560 l = lock.lock(lockname, 0, releasefn, desc=desc)
560 l = lock.lock(lockname, 0, releasefn, desc=desc)
561 except lock.LockHeld, inst:
561 except lock.LockHeld, inst:
562 if not wait:
562 if not wait:
563 raise
563 raise
564 self.ui.warn(_("waiting for lock on %s held by %r\n") %
564 self.ui.warn(_("waiting for lock on %s held by %r\n") %
565 (desc, inst.locker))
565 (desc, inst.locker))
566 # default to 600 seconds timeout
566 # default to 600 seconds timeout
567 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
567 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
568 releasefn, desc=desc)
568 releasefn, desc=desc)
569 if acquirefn:
569 if acquirefn:
570 acquirefn()
570 acquirefn()
571 return l
571 return l
572
572
573 def lock(self, wait=True):
573 def lock(self, wait=True):
574 if self._lockref and self._lockref():
574 if self._lockref and self._lockref():
575 return self._lockref()
575 return self._lockref()
576
576
577 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
577 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
578 _('repository %s') % self.origroot)
578 _('repository %s') % self.origroot)
579 self._lockref = weakref.ref(l)
579 self._lockref = weakref.ref(l)
580 return l
580 return l
581
581
582 def wlock(self, wait=True):
582 def wlock(self, wait=True):
583 if self._wlockref and self._wlockref():
583 if self._wlockref and self._wlockref():
584 return self._wlockref()
584 return self._wlockref()
585
585
586 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
586 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
587 self.dirstate.invalidate, _('working directory of %s') %
587 self.dirstate.invalidate, _('working directory of %s') %
588 self.origroot)
588 self.origroot)
589 self._wlockref = weakref.ref(l)
589 self._wlockref = weakref.ref(l)
590 return l
590 return l
591
591
592 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
592 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
593 """
593 """
594 commit an individual file as part of a larger transaction
594 commit an individual file as part of a larger transaction
595 """
595 """
596
596
597 t = self.wread(fn)
597 t = self.wread(fn)
598 fl = self.file(fn)
598 fl = self.file(fn)
599 fp1 = manifest1.get(fn, nullid)
599 fp1 = manifest1.get(fn, nullid)
600 fp2 = manifest2.get(fn, nullid)
600 fp2 = manifest2.get(fn, nullid)
601
601
602 meta = {}
602 meta = {}
603 cp = self.dirstate.copied(fn)
603 cp = self.dirstate.copied(fn)
604 if cp:
604 if cp:
605 # Mark the new revision of this file as a copy of another
605 # Mark the new revision of this file as a copy of another
606 # file. This copy data will effectively act as a parent
606 # file. This copy data will effectively act as a parent
607 # of this new revision. If this is a merge, the first
607 # of this new revision. If this is a merge, the first
608 # parent will be the nullid (meaning "look up the copy data")
608 # parent will be the nullid (meaning "look up the copy data")
609 # and the second one will be the other parent. For example:
609 # and the second one will be the other parent. For example:
610 #
610 #
611 # 0 --- 1 --- 3 rev1 changes file foo
611 # 0 --- 1 --- 3 rev1 changes file foo
612 # \ / rev2 renames foo to bar and changes it
612 # \ / rev2 renames foo to bar and changes it
613 # \- 2 -/ rev3 should have bar with all changes and
613 # \- 2 -/ rev3 should have bar with all changes and
614 # should record that bar descends from
614 # should record that bar descends from
615 # bar in rev2 and foo in rev1
615 # bar in rev2 and foo in rev1
616 #
616 #
617 # this allows this merge to succeed:
617 # this allows this merge to succeed:
618 #
618 #
619 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
619 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
620 # \ / merging rev3 and rev4 should use bar@rev2
620 # \ / merging rev3 and rev4 should use bar@rev2
621 # \- 2 --- 4 as the merge base
621 # \- 2 --- 4 as the merge base
622 #
622 #
623 meta["copy"] = cp
623 meta["copy"] = cp
624 if not manifest2: # not a branch merge
624 if not manifest2: # not a branch merge
625 meta["copyrev"] = hex(manifest1.get(cp, nullid))
625 meta["copyrev"] = hex(manifest1.get(cp, nullid))
626 fp2 = nullid
626 fp2 = nullid
627 elif fp2 != nullid: # copied on remote side
627 elif fp2 != nullid: # copied on remote side
628 meta["copyrev"] = hex(manifest1.get(cp, nullid))
628 meta["copyrev"] = hex(manifest1.get(cp, nullid))
629 elif fp1 != nullid: # copied on local side, reversed
629 elif fp1 != nullid: # copied on local side, reversed
630 meta["copyrev"] = hex(manifest2.get(cp))
630 meta["copyrev"] = hex(manifest2.get(cp))
631 fp2 = fp1
631 fp2 = fp1
632 elif cp in manifest2: # directory rename on local side
632 elif cp in manifest2: # directory rename on local side
633 meta["copyrev"] = hex(manifest2[cp])
633 meta["copyrev"] = hex(manifest2[cp])
634 else: # directory rename on remote side
634 else: # directory rename on remote side
635 meta["copyrev"] = hex(manifest1.get(cp, nullid))
635 meta["copyrev"] = hex(manifest1.get(cp, nullid))
636 self.ui.debug(_(" %s: copy %s:%s\n") %
636 self.ui.debug(_(" %s: copy %s:%s\n") %
637 (fn, cp, meta["copyrev"]))
637 (fn, cp, meta["copyrev"]))
638 fp1 = nullid
638 fp1 = nullid
639 elif fp2 != nullid:
639 elif fp2 != nullid:
640 # is one parent an ancestor of the other?
640 # is one parent an ancestor of the other?
641 fpa = fl.ancestor(fp1, fp2)
641 fpa = fl.ancestor(fp1, fp2)
642 if fpa == fp1:
642 if fpa == fp1:
643 fp1, fp2 = fp2, nullid
643 fp1, fp2 = fp2, nullid
644 elif fpa == fp2:
644 elif fpa == fp2:
645 fp2 = nullid
645 fp2 = nullid
646
646
647 # is the file unmodified from the parent? report existing entry
647 # is the file unmodified from the parent? report existing entry
648 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
648 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
649 return fp1
649 return fp1
650
650
651 changelist.append(fn)
651 changelist.append(fn)
652 return fl.add(t, meta, tr, linkrev, fp1, fp2)
652 return fl.add(t, meta, tr, linkrev, fp1, fp2)
653
653
654 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
654 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
655 if p1 is None:
655 if p1 is None:
656 p1, p2 = self.dirstate.parents()
656 p1, p2 = self.dirstate.parents()
657 return self.commit(files=files, text=text, user=user, date=date,
657 return self.commit(files=files, text=text, user=user, date=date,
658 p1=p1, p2=p2, extra=extra, empty_ok=True)
658 p1=p1, p2=p2, extra=extra, empty_ok=True)
659
659
660 def commit(self, files=None, text="", user=None, date=None,
660 def commit(self, files=None, text="", user=None, date=None,
661 match=util.always, force=False, force_editor=False,
661 match=util.always, force=False, force_editor=False,
662 p1=None, p2=None, extra={}, empty_ok=False):
662 p1=None, p2=None, extra={}, empty_ok=False):
663 wlock = lock = tr = None
663 wlock = lock = tr = None
664 try:
664 try:
665 commit = []
665 commit = []
666 remove = []
666 remove = []
667 changed = []
667 changed = []
668 use_dirstate = (p1 is None) # not rawcommit
668 use_dirstate = (p1 is None) # not rawcommit
669 extra = extra.copy()
669 extra = extra.copy()
670
670
671 if use_dirstate:
671 if use_dirstate:
672 if files:
672 if files:
673 for f in files:
673 for f in files:
674 s = self.dirstate[f]
674 s = self.dirstate[f]
675 if s in 'nma':
675 if s in 'nma':
676 commit.append(f)
676 commit.append(f)
677 elif s == 'r':
677 elif s == 'r':
678 remove.append(f)
678 remove.append(f)
679 else:
679 else:
680 self.ui.warn(_("%s not tracked!\n") % f)
680 self.ui.warn(_("%s not tracked!\n") % f)
681 else:
681 else:
682 changes = self.status(match=match)[:5]
682 changes = self.status(match=match)[:5]
683 modified, added, removed, deleted, unknown = changes
683 modified, added, removed, deleted, unknown = changes
684 commit = modified + added
684 commit = modified + added
685 remove = removed
685 remove = removed
686 else:
686 else:
687 commit = files
687 commit = files
688
688
689 if use_dirstate:
689 if use_dirstate:
690 p1, p2 = self.dirstate.parents()
690 p1, p2 = self.dirstate.parents()
691 update_dirstate = True
691 update_dirstate = True
692 else:
692 else:
693 p1, p2 = p1, p2 or nullid
693 p1, p2 = p1, p2 or nullid
694 update_dirstate = (self.dirstate.parents()[0] == p1)
694 update_dirstate = (self.dirstate.parents()[0] == p1)
695
695
696 c1 = self.changelog.read(p1)
696 c1 = self.changelog.read(p1)
697 c2 = self.changelog.read(p2)
697 c2 = self.changelog.read(p2)
698 m1 = self.manifest.read(c1[0]).copy()
698 m1 = self.manifest.read(c1[0]).copy()
699 m2 = self.manifest.read(c2[0])
699 m2 = self.manifest.read(c2[0])
700
700
701 if use_dirstate:
701 if use_dirstate:
702 branchname = self.workingctx().branch()
702 branchname = self.workingctx().branch()
703 try:
703 try:
704 branchname = branchname.decode('UTF-8').encode('UTF-8')
704 branchname = branchname.decode('UTF-8').encode('UTF-8')
705 except UnicodeDecodeError:
705 except UnicodeDecodeError:
706 raise util.Abort(_('branch name not in UTF-8!'))
706 raise util.Abort(_('branch name not in UTF-8!'))
707 else:
707 else:
708 branchname = ""
708 branchname = ""
709
709
710 if use_dirstate:
710 if use_dirstate:
711 oldname = c1[5].get("branch") # stored in UTF-8
711 oldname = c1[5].get("branch") # stored in UTF-8
712 if (not commit and not remove and not force and p2 == nullid
712 if (not commit and not remove and not force and p2 == nullid
713 and branchname == oldname):
713 and branchname == oldname):
714 self.ui.status(_("nothing changed\n"))
714 self.ui.status(_("nothing changed\n"))
715 return None
715 return None
716
716
717 xp1 = hex(p1)
717 xp1 = hex(p1)
718 if p2 == nullid: xp2 = ''
718 if p2 == nullid: xp2 = ''
719 else: xp2 = hex(p2)
719 else: xp2 = hex(p2)
720
720
721 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
721 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
722
722
723 wlock = self.wlock()
723 wlock = self.wlock()
724 lock = self.lock()
724 lock = self.lock()
725 tr = self.transaction()
725 tr = self.transaction()
726 trp = weakref.proxy(tr)
726 trp = weakref.proxy(tr)
727
727
728 # check in files
728 # check in files
729 new = {}
729 new = {}
730 linkrev = self.changelog.count()
730 linkrev = self.changelog.count()
731 commit.sort()
731 commit.sort()
732 is_exec = util.execfunc(self.root, m1.execf)
732 is_exec = util.execfunc(self.root, m1.execf)
733 is_link = util.linkfunc(self.root, m1.linkf)
733 is_link = util.linkfunc(self.root, m1.linkf)
734 for f in commit:
734 for f in commit:
735 self.ui.note(f + "\n")
735 self.ui.note(f + "\n")
736 try:
736 try:
737 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
737 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
738 new_exec = is_exec(f)
738 new_exec = is_exec(f)
739 new_link = is_link(f)
739 new_link = is_link(f)
740 if ((not changed or changed[-1] != f) and
740 if ((not changed or changed[-1] != f) and
741 m2.get(f) != new[f]):
741 m2.get(f) != new[f]):
742 # mention the file in the changelog if some
742 # mention the file in the changelog if some
743 # flag changed, even if there was no content
743 # flag changed, even if there was no content
744 # change.
744 # change.
745 old_exec = m1.execf(f)
745 old_exec = m1.execf(f)
746 old_link = m1.linkf(f)
746 old_link = m1.linkf(f)
747 if old_exec != new_exec or old_link != new_link:
747 if old_exec != new_exec or old_link != new_link:
748 changed.append(f)
748 changed.append(f)
749 m1.set(f, new_exec, new_link)
749 m1.set(f, new_exec, new_link)
750 except (OSError, IOError):
750 except (OSError, IOError):
751 if use_dirstate:
751 if use_dirstate:
752 self.ui.warn(_("trouble committing %s!\n") % f)
752 self.ui.warn(_("trouble committing %s!\n") % f)
753 raise
753 raise
754 else:
754 else:
755 remove.append(f)
755 remove.append(f)
756
756
757 # update manifest
757 # update manifest
758 m1.update(new)
758 m1.update(new)
759 remove.sort()
759 remove.sort()
760 removed = []
760 removed = []
761
761
762 for f in remove:
762 for f in remove:
763 if f in m1:
763 if f in m1:
764 del m1[f]
764 del m1[f]
765 removed.append(f)
765 removed.append(f)
766 elif f in m2:
766 elif f in m2:
767 removed.append(f)
767 removed.append(f)
768 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
768 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
769 (new, removed))
769 (new, removed))
770
770
771 # add changeset
771 # add changeset
772 new = new.keys()
772 new = new.keys()
773 new.sort()
773 new.sort()
774
774
775 user = user or self.ui.username()
775 user = user or self.ui.username()
776 if (not empty_ok and not text) or force_editor:
776 if (not empty_ok and not text) or force_editor:
777 edittext = []
777 edittext = []
778 if text:
778 if text:
779 edittext.append(text)
779 edittext.append(text)
780 edittext.append("")
780 edittext.append("")
781 edittext.append("HG: user: %s" % user)
781 edittext.append("HG: user: %s" % user)
782 if p2 != nullid:
782 if p2 != nullid:
783 edittext.append("HG: branch merge")
783 edittext.append("HG: branch merge")
784 if branchname:
784 if branchname:
785 edittext.append("HG: branch %s" % util.tolocal(branchname))
785 edittext.append("HG: branch %s" % util.tolocal(branchname))
786 edittext.extend(["HG: changed %s" % f for f in changed])
786 edittext.extend(["HG: changed %s" % f for f in changed])
787 edittext.extend(["HG: removed %s" % f for f in removed])
787 edittext.extend(["HG: removed %s" % f for f in removed])
788 if not changed and not remove:
788 if not changed and not remove:
789 edittext.append("HG: no files changed")
789 edittext.append("HG: no files changed")
790 edittext.append("")
790 edittext.append("")
791 # run editor in the repository root
791 # run editor in the repository root
792 olddir = os.getcwd()
792 olddir = os.getcwd()
793 os.chdir(self.root)
793 os.chdir(self.root)
794 text = self.ui.edit("\n".join(edittext), user)
794 text = self.ui.edit("\n".join(edittext), user)
795 os.chdir(olddir)
795 os.chdir(olddir)
796
796
797 if branchname:
797 if branchname:
798 extra["branch"] = branchname
798 extra["branch"] = branchname
799
799
800 if use_dirstate:
800 if use_dirstate:
801 lines = [line.rstrip() for line in text.rstrip().splitlines()]
801 lines = [line.rstrip() for line in text.rstrip().splitlines()]
802 while lines and not lines[0]:
802 while lines and not lines[0]:
803 del lines[0]
803 del lines[0]
804 if not lines:
804 if not lines:
805 return None
805 return None
806 text = '\n'.join(lines)
806 text = '\n'.join(lines)
807
807
808 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
808 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
809 user, date, extra)
809 user, date, extra)
810 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
810 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
811 parent2=xp2)
811 parent2=xp2)
812 tr.close()
812 tr.close()
813
813
814 if self.branchcache and "branch" in extra:
814 if self.branchcache and "branch" in extra:
815 self.branchcache[util.tolocal(extra["branch"])] = n
815 self.branchcache[util.tolocal(extra["branch"])] = n
816
816
817 if use_dirstate or update_dirstate:
817 if use_dirstate or update_dirstate:
818 self.dirstate.setparents(n)
818 self.dirstate.setparents(n)
819 if use_dirstate:
819 if use_dirstate:
820 for f in new:
820 for f in new:
821 self.dirstate.normal(f)
821 self.dirstate.normal(f)
822 for f in removed:
822 for f in removed:
823 self.dirstate.forget(f)
823 self.dirstate.forget(f)
824
824
825 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
825 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
826 return n
826 return n
827 finally:
827 finally:
828 del tr, lock, wlock
828 del tr, lock, wlock
829
829
830 def walk(self, node=None, files=[], match=util.always, badmatch=None):
830 def walk(self, node=None, files=[], match=util.always, badmatch=None):
831 '''
831 '''
832 walk recursively through the directory tree or a given
832 walk recursively through the directory tree or a given
833 changeset, finding all files matched by the match
833 changeset, finding all files matched by the match
834 function
834 function
835
835
836 results are yielded in a tuple (src, filename), where src
836 results are yielded in a tuple (src, filename), where src
837 is one of:
837 is one of:
838 'f' the file was found in the directory tree
838 'f' the file was found in the directory tree
839 'm' the file was only in the dirstate and not in the tree
839 'm' the file was only in the dirstate and not in the tree
840 'b' file was not found and matched badmatch
840 'b' file was not found and matched badmatch
841 '''
841 '''
842
842
843 if node:
843 if node:
844 fdict = dict.fromkeys(files)
844 fdict = dict.fromkeys(files)
845 # for dirstate.walk, files=['.'] means "walk the whole tree".
845 # for dirstate.walk, files=['.'] means "walk the whole tree".
846 # follow that here, too
846 # follow that here, too
847 fdict.pop('.', None)
847 fdict.pop('.', None)
848 mdict = self.manifest.read(self.changelog.read(node)[0])
848 mdict = self.manifest.read(self.changelog.read(node)[0])
849 mfiles = mdict.keys()
849 mfiles = mdict.keys()
850 mfiles.sort()
850 mfiles.sort()
851 for fn in mfiles:
851 for fn in mfiles:
852 for ffn in fdict:
852 for ffn in fdict:
853 # match if the file is the exact name or a directory
853 # match if the file is the exact name or a directory
854 if ffn == fn or fn.startswith("%s/" % ffn):
854 if ffn == fn or fn.startswith("%s/" % ffn):
855 del fdict[ffn]
855 del fdict[ffn]
856 break
856 break
857 if match(fn):
857 if match(fn):
858 yield 'm', fn
858 yield 'm', fn
859 ffiles = fdict.keys()
859 ffiles = fdict.keys()
860 ffiles.sort()
860 ffiles.sort()
861 for fn in ffiles:
861 for fn in ffiles:
862 if badmatch and badmatch(fn):
862 if badmatch and badmatch(fn):
863 if match(fn):
863 if match(fn):
864 yield 'b', fn
864 yield 'b', fn
865 else:
865 else:
866 self.ui.warn(_('%s: No such file in rev %s\n')
866 self.ui.warn(_('%s: No such file in rev %s\n')
867 % (self.pathto(fn), short(node)))
867 % (self.pathto(fn), short(node)))
868 else:
868 else:
869 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
869 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
870 yield src, fn
870 yield src, fn
871
871
872 def status(self, node1=None, node2=None, files=[], match=util.always,
872 def status(self, node1=None, node2=None, files=[], match=util.always,
873 list_ignored=False, list_clean=False):
873 list_ignored=False, list_clean=False):
874 """return status of files between two nodes or node and working directory
874 """return status of files between two nodes or node and working directory
875
875
876 If node1 is None, use the first dirstate parent instead.
876 If node1 is None, use the first dirstate parent instead.
877 If node2 is None, compare node1 with working directory.
877 If node2 is None, compare node1 with working directory.
878 """
878 """
879
879
880 def fcmp(fn, getnode):
880 def fcmp(fn, getnode):
881 t1 = self.wread(fn)
881 t1 = self.wread(fn)
882 return self.file(fn).cmp(getnode(fn), t1)
882 return self.file(fn).cmp(getnode(fn), t1)
883
883
884 def mfmatches(node):
884 def mfmatches(node):
885 change = self.changelog.read(node)
885 change = self.changelog.read(node)
886 mf = self.manifest.read(change[0]).copy()
886 mf = self.manifest.read(change[0]).copy()
887 for fn in mf.keys():
887 for fn in mf.keys():
888 if not match(fn):
888 if not match(fn):
889 del mf[fn]
889 del mf[fn]
890 return mf
890 return mf
891
891
892 modified, added, removed, deleted, unknown = [], [], [], [], []
892 modified, added, removed, deleted, unknown = [], [], [], [], []
893 ignored, clean = [], []
893 ignored, clean = [], []
894
894
895 compareworking = False
895 compareworking = False
896 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
896 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
897 compareworking = True
897 compareworking = True
898
898
899 if not compareworking:
899 if not compareworking:
900 # read the manifest from node1 before the manifest from node2,
900 # read the manifest from node1 before the manifest from node2,
901 # so that we'll hit the manifest cache if we're going through
901 # so that we'll hit the manifest cache if we're going through
902 # all the revisions in parent->child order.
902 # all the revisions in parent->child order.
903 mf1 = mfmatches(node1)
903 mf1 = mfmatches(node1)
904
904
905 # are we comparing the working directory?
905 # are we comparing the working directory?
906 if not node2:
906 if not node2:
907 (lookup, modified, added, removed, deleted, unknown,
907 (lookup, modified, added, removed, deleted, unknown,
908 ignored, clean) = self.dirstate.status(files, match,
908 ignored, clean) = self.dirstate.status(files, match,
909 list_ignored, list_clean)
909 list_ignored, list_clean)
910
910
911 # are we comparing working dir against its parent?
911 # are we comparing working dir against its parent?
912 if compareworking:
912 if compareworking:
913 if lookup:
913 if lookup:
914 fixup = []
914 fixup = []
915 # do a full compare of any files that might have changed
915 # do a full compare of any files that might have changed
916 ctx = self.changectx()
916 ctx = self.changectx()
917 for f in lookup:
917 for f in lookup:
918 if f not in ctx or ctx[f].cmp(self.wread(f)):
918 if f not in ctx or ctx[f].cmp(self.wread(f)):
919 modified.append(f)
919 modified.append(f)
920 else:
920 else:
921 fixup.append(f)
921 fixup.append(f)
922 if list_clean:
922 if list_clean:
923 clean.append(f)
923 clean.append(f)
924
924
925 # update dirstate for files that are actually clean
925 # update dirstate for files that are actually clean
926 if fixup:
926 if fixup:
927 wlock = None
927 wlock = None
928 try:
928 try:
929 try:
929 try:
930 wlock = self.wlock(False)
930 wlock = self.wlock(False)
931 except lock.LockException:
931 except lock.LockException:
932 pass
932 pass
933 if wlock:
933 if wlock:
934 for f in fixup:
934 for f in fixup:
935 self.dirstate.normal(f)
935 self.dirstate.normal(f)
936 finally:
936 finally:
937 del wlock
937 del wlock
938 else:
938 else:
939 # we are comparing working dir against non-parent
939 # we are comparing working dir against non-parent
940 # generate a pseudo-manifest for the working dir
940 # generate a pseudo-manifest for the working dir
941 # XXX: create it in dirstate.py ?
941 # XXX: create it in dirstate.py ?
942 mf2 = mfmatches(self.dirstate.parents()[0])
942 mf2 = mfmatches(self.dirstate.parents()[0])
943 is_exec = util.execfunc(self.root, mf2.execf)
943 is_exec = util.execfunc(self.root, mf2.execf)
944 is_link = util.linkfunc(self.root, mf2.linkf)
944 is_link = util.linkfunc(self.root, mf2.linkf)
945 for f in lookup + modified + added:
945 for f in lookup + modified + added:
946 mf2[f] = ""
946 mf2[f] = ""
947 mf2.set(f, is_exec(f), is_link(f))
947 mf2.set(f, is_exec(f), is_link(f))
948 for f in removed:
948 for f in removed:
949 if f in mf2:
949 if f in mf2:
950 del mf2[f]
950 del mf2[f]
951
951
952 else:
952 else:
953 # we are comparing two revisions
953 # we are comparing two revisions
954 mf2 = mfmatches(node2)
954 mf2 = mfmatches(node2)
955
955
956 if not compareworking:
956 if not compareworking:
957 # flush lists from dirstate before comparing manifests
957 # flush lists from dirstate before comparing manifests
958 modified, added, clean = [], [], []
958 modified, added, clean = [], [], []
959
959
960 # make sure to sort the files so we talk to the disk in a
960 # make sure to sort the files so we talk to the disk in a
961 # reasonable order
961 # reasonable order
962 mf2keys = mf2.keys()
962 mf2keys = mf2.keys()
963 mf2keys.sort()
963 mf2keys.sort()
964 getnode = lambda fn: mf1.get(fn, nullid)
964 getnode = lambda fn: mf1.get(fn, nullid)
965 for fn in mf2keys:
965 for fn in mf2keys:
966 if mf1.has_key(fn):
966 if mf1.has_key(fn):
967 if (mf1.flags(fn) != mf2.flags(fn) or
967 if (mf1.flags(fn) != mf2.flags(fn) or
968 (mf1[fn] != mf2[fn] and
968 (mf1[fn] != mf2[fn] and
969 (mf2[fn] != "" or fcmp(fn, getnode)))):
969 (mf2[fn] != "" or fcmp(fn, getnode)))):
970 modified.append(fn)
970 modified.append(fn)
971 elif list_clean:
971 elif list_clean:
972 clean.append(fn)
972 clean.append(fn)
973 del mf1[fn]
973 del mf1[fn]
974 else:
974 else:
975 added.append(fn)
975 added.append(fn)
976
976
977 removed = mf1.keys()
977 removed = mf1.keys()
978
978
979 # sort and return results:
979 # sort and return results:
980 for l in modified, added, removed, deleted, unknown, ignored, clean:
980 for l in modified, added, removed, deleted, unknown, ignored, clean:
981 l.sort()
981 l.sort()
982 return (modified, added, removed, deleted, unknown, ignored, clean)
982 return (modified, added, removed, deleted, unknown, ignored, clean)
983
983
984 def add(self, list):
984 def add(self, list):
985 wlock = self.wlock()
985 wlock = self.wlock()
986 try:
986 try:
987 for f in list:
987 for f in list:
988 p = self.wjoin(f)
988 p = self.wjoin(f)
989 try:
989 try:
990 st = os.lstat(p)
990 st = os.lstat(p)
991 except:
991 except:
992 self.ui.warn(_("%s does not exist!\n") % f)
992 self.ui.warn(_("%s does not exist!\n") % f)
993 continue
993 continue
994 if st.st_size > 10000000:
994 if st.st_size > 10000000:
995 self.ui.warn(_("%s: files over 10MB may cause memory and"
995 self.ui.warn(_("%s: files over 10MB may cause memory and"
996 " performance problems\n"
996 " performance problems\n"
997 "(use 'hg revert %s' to unadd the file)\n")
997 "(use 'hg revert %s' to unadd the file)\n")
998 % (f, f))
998 % (f, f))
999 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
999 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1000 self.ui.warn(_("%s not added: only files and symlinks "
1000 self.ui.warn(_("%s not added: only files and symlinks "
1001 "supported currently\n") % f)
1001 "supported currently\n") % f)
1002 elif self.dirstate[f] in 'amn':
1002 elif self.dirstate[f] in 'amn':
1003 self.ui.warn(_("%s already tracked!\n") % f)
1003 self.ui.warn(_("%s already tracked!\n") % f)
1004 elif self.dirstate[f] == 'r':
1004 elif self.dirstate[f] == 'r':
1005 self.dirstate.normallookup(f)
1005 self.dirstate.normallookup(f)
1006 else:
1006 else:
1007 self.dirstate.add(f)
1007 self.dirstate.add(f)
1008 finally:
1008 finally:
1009 del wlock
1009 del wlock
1010
1010
1011 def forget(self, list):
1011 def forget(self, list):
1012 wlock = self.wlock()
1012 wlock = self.wlock()
1013 try:
1013 try:
1014 for f in list:
1014 for f in list:
1015 if self.dirstate[f] != 'a':
1015 if self.dirstate[f] != 'a':
1016 self.ui.warn(_("%s not added!\n") % f)
1016 self.ui.warn(_("%s not added!\n") % f)
1017 else:
1017 else:
1018 self.dirstate.forget(f)
1018 self.dirstate.forget(f)
1019 finally:
1019 finally:
1020 del wlock
1020 del wlock
1021
1021
1022 def remove(self, list, unlink=False):
1022 def remove(self, list, unlink=False):
1023 wlock = None
1023 wlock = None
1024 try:
1024 try:
1025 if unlink:
1025 if unlink:
1026 for f in list:
1026 for f in list:
1027 try:
1027 try:
1028 util.unlink(self.wjoin(f))
1028 util.unlink(self.wjoin(f))
1029 except OSError, inst:
1029 except OSError, inst:
1030 if inst.errno != errno.ENOENT:
1030 if inst.errno != errno.ENOENT:
1031 raise
1031 raise
1032 wlock = self.wlock()
1032 wlock = self.wlock()
1033 for f in list:
1033 for f in list:
1034 if unlink and os.path.exists(self.wjoin(f)):
1034 if unlink and os.path.exists(self.wjoin(f)):
1035 self.ui.warn(_("%s still exists!\n") % f)
1035 self.ui.warn(_("%s still exists!\n") % f)
1036 elif self.dirstate[f] == 'a':
1036 elif self.dirstate[f] == 'a':
1037 self.dirstate.forget(f)
1037 self.dirstate.forget(f)
1038 elif f not in self.dirstate:
1038 elif f not in self.dirstate:
1039 self.ui.warn(_("%s not tracked!\n") % f)
1039 self.ui.warn(_("%s not tracked!\n") % f)
1040 else:
1040 else:
1041 self.dirstate.remove(f)
1041 self.dirstate.remove(f)
1042 finally:
1042 finally:
1043 del wlock
1043 del wlock
1044
1044
1045 def undelete(self, list):
1045 def undelete(self, list):
1046 wlock = None
1046 wlock = None
1047 try:
1047 try:
1048 p = self.dirstate.parents()[0]
1048 manifests = [self.manifest.read(self.changelog.read(p)[0])
1049 mn = self.changelog.read(p)[0]
1049 for p in self.dirstate.parents() if p != nullid]
1050 m = self.manifest.read(mn)
1051 wlock = self.wlock()
1050 wlock = self.wlock()
1052 for f in list:
1051 for f in list:
1053 if self.dirstate[f] != 'r':
1052 if self.dirstate[f] != 'r':
1054 self.ui.warn("%s not removed!\n" % f)
1053 self.ui.warn("%s not removed!\n" % f)
1055 else:
1054 else:
1055 m = f in manifests[0] and manifests[0] or manifests[1]
1056 t = self.file(f).read(m[f])
1056 t = self.file(f).read(m[f])
1057 self.wwrite(f, t, m.flags(f))
1057 self.wwrite(f, t, m.flags(f))
1058 self.dirstate.normal(f)
1058 self.dirstate.normal(f)
1059 finally:
1059 finally:
1060 del wlock
1060 del wlock
1061
1061
1062 def copy(self, source, dest):
1062 def copy(self, source, dest):
1063 wlock = None
1063 wlock = None
1064 try:
1064 try:
1065 p = self.wjoin(dest)
1065 p = self.wjoin(dest)
1066 if not (os.path.exists(p) or os.path.islink(p)):
1066 if not (os.path.exists(p) or os.path.islink(p)):
1067 self.ui.warn(_("%s does not exist!\n") % dest)
1067 self.ui.warn(_("%s does not exist!\n") % dest)
1068 elif not (os.path.isfile(p) or os.path.islink(p)):
1068 elif not (os.path.isfile(p) or os.path.islink(p)):
1069 self.ui.warn(_("copy failed: %s is not a file or a "
1069 self.ui.warn(_("copy failed: %s is not a file or a "
1070 "symbolic link\n") % dest)
1070 "symbolic link\n") % dest)
1071 else:
1071 else:
1072 wlock = self.wlock()
1072 wlock = self.wlock()
1073 if dest not in self.dirstate:
1073 if dest not in self.dirstate:
1074 self.dirstate.add(dest)
1074 self.dirstate.add(dest)
1075 self.dirstate.copy(source, dest)
1075 self.dirstate.copy(source, dest)
1076 finally:
1076 finally:
1077 del wlock
1077 del wlock
1078
1078
1079 def heads(self, start=None):
1079 def heads(self, start=None):
1080 heads = self.changelog.heads(start)
1080 heads = self.changelog.heads(start)
1081 # sort the output in rev descending order
1081 # sort the output in rev descending order
1082 heads = [(-self.changelog.rev(h), h) for h in heads]
1082 heads = [(-self.changelog.rev(h), h) for h in heads]
1083 heads.sort()
1083 heads.sort()
1084 return [n for (r, n) in heads]
1084 return [n for (r, n) in heads]
1085
1085
1086 def branchheads(self, branch, start=None):
1086 def branchheads(self, branch, start=None):
1087 branches = self.branchtags()
1087 branches = self.branchtags()
1088 if branch not in branches:
1088 if branch not in branches:
1089 return []
1089 return []
1090 # The basic algorithm is this:
1090 # The basic algorithm is this:
1091 #
1091 #
1092 # Start from the branch tip since there are no later revisions that can
1092 # Start from the branch tip since there are no later revisions that can
1093 # possibly be in this branch, and the tip is a guaranteed head.
1093 # possibly be in this branch, and the tip is a guaranteed head.
1094 #
1094 #
1095 # Remember the tip's parents as the first ancestors, since these by
1095 # Remember the tip's parents as the first ancestors, since these by
1096 # definition are not heads.
1096 # definition are not heads.
1097 #
1097 #
1098 # Step backwards from the brach tip through all the revisions. We are
1098 # Step backwards from the brach tip through all the revisions. We are
1099 # guaranteed by the rules of Mercurial that we will now be visiting the
1099 # guaranteed by the rules of Mercurial that we will now be visiting the
1100 # nodes in reverse topological order (children before parents).
1100 # nodes in reverse topological order (children before parents).
1101 #
1101 #
1102 # If a revision is one of the ancestors of a head then we can toss it
1102 # If a revision is one of the ancestors of a head then we can toss it
1103 # out of the ancestors set (we've already found it and won't be
1103 # out of the ancestors set (we've already found it and won't be
1104 # visiting it again) and put its parents in the ancestors set.
1104 # visiting it again) and put its parents in the ancestors set.
1105 #
1105 #
1106 # Otherwise, if a revision is in the branch it's another head, since it
1106 # Otherwise, if a revision is in the branch it's another head, since it
1107 # wasn't in the ancestor list of an existing head. So add it to the
1107 # wasn't in the ancestor list of an existing head. So add it to the
1108 # head list, and add its parents to the ancestor list.
1108 # head list, and add its parents to the ancestor list.
1109 #
1109 #
1110 # If it is not in the branch ignore it.
1110 # If it is not in the branch ignore it.
1111 #
1111 #
1112 # Once we have a list of heads, use nodesbetween to filter out all the
1112 # Once we have a list of heads, use nodesbetween to filter out all the
1113 # heads that cannot be reached from startrev. There may be a more
1113 # heads that cannot be reached from startrev. There may be a more
1114 # efficient way to do this as part of the previous algorithm.
1114 # efficient way to do this as part of the previous algorithm.
1115
1115
1116 set = util.set
1116 set = util.set
1117 heads = [self.changelog.rev(branches[branch])]
1117 heads = [self.changelog.rev(branches[branch])]
1118 # Don't care if ancestors contains nullrev or not.
1118 # Don't care if ancestors contains nullrev or not.
1119 ancestors = set(self.changelog.parentrevs(heads[0]))
1119 ancestors = set(self.changelog.parentrevs(heads[0]))
1120 for rev in xrange(heads[0] - 1, nullrev, -1):
1120 for rev in xrange(heads[0] - 1, nullrev, -1):
1121 if rev in ancestors:
1121 if rev in ancestors:
1122 ancestors.update(self.changelog.parentrevs(rev))
1122 ancestors.update(self.changelog.parentrevs(rev))
1123 ancestors.remove(rev)
1123 ancestors.remove(rev)
1124 elif self.changectx(rev).branch() == branch:
1124 elif self.changectx(rev).branch() == branch:
1125 heads.append(rev)
1125 heads.append(rev)
1126 ancestors.update(self.changelog.parentrevs(rev))
1126 ancestors.update(self.changelog.parentrevs(rev))
1127 heads = [self.changelog.node(rev) for rev in heads]
1127 heads = [self.changelog.node(rev) for rev in heads]
1128 if start is not None:
1128 if start is not None:
1129 heads = self.changelog.nodesbetween([start], heads)[2]
1129 heads = self.changelog.nodesbetween([start], heads)[2]
1130 return heads
1130 return heads
1131
1131
1132 def branches(self, nodes):
1132 def branches(self, nodes):
1133 if not nodes:
1133 if not nodes:
1134 nodes = [self.changelog.tip()]
1134 nodes = [self.changelog.tip()]
1135 b = []
1135 b = []
1136 for n in nodes:
1136 for n in nodes:
1137 t = n
1137 t = n
1138 while 1:
1138 while 1:
1139 p = self.changelog.parents(n)
1139 p = self.changelog.parents(n)
1140 if p[1] != nullid or p[0] == nullid:
1140 if p[1] != nullid or p[0] == nullid:
1141 b.append((t, n, p[0], p[1]))
1141 b.append((t, n, p[0], p[1]))
1142 break
1142 break
1143 n = p[0]
1143 n = p[0]
1144 return b
1144 return b
1145
1145
1146 def between(self, pairs):
1146 def between(self, pairs):
1147 r = []
1147 r = []
1148
1148
1149 for top, bottom in pairs:
1149 for top, bottom in pairs:
1150 n, l, i = top, [], 0
1150 n, l, i = top, [], 0
1151 f = 1
1151 f = 1
1152
1152
1153 while n != bottom:
1153 while n != bottom:
1154 p = self.changelog.parents(n)[0]
1154 p = self.changelog.parents(n)[0]
1155 if i == f:
1155 if i == f:
1156 l.append(n)
1156 l.append(n)
1157 f = f * 2
1157 f = f * 2
1158 n = p
1158 n = p
1159 i += 1
1159 i += 1
1160
1160
1161 r.append(l)
1161 r.append(l)
1162
1162
1163 return r
1163 return r
1164
1164
1165 def findincoming(self, remote, base=None, heads=None, force=False):
1165 def findincoming(self, remote, base=None, heads=None, force=False):
1166 """Return list of roots of the subsets of missing nodes from remote
1166 """Return list of roots of the subsets of missing nodes from remote
1167
1167
1168 If base dict is specified, assume that these nodes and their parents
1168 If base dict is specified, assume that these nodes and their parents
1169 exist on the remote side and that no child of a node of base exists
1169 exist on the remote side and that no child of a node of base exists
1170 in both remote and self.
1170 in both remote and self.
1171 Furthermore base will be updated to include the nodes that exists
1171 Furthermore base will be updated to include the nodes that exists
1172 in self and remote but no children exists in self and remote.
1172 in self and remote but no children exists in self and remote.
1173 If a list of heads is specified, return only nodes which are heads
1173 If a list of heads is specified, return only nodes which are heads
1174 or ancestors of these heads.
1174 or ancestors of these heads.
1175
1175
1176 All the ancestors of base are in self and in remote.
1176 All the ancestors of base are in self and in remote.
1177 All the descendants of the list returned are missing in self.
1177 All the descendants of the list returned are missing in self.
1178 (and so we know that the rest of the nodes are missing in remote, see
1178 (and so we know that the rest of the nodes are missing in remote, see
1179 outgoing)
1179 outgoing)
1180 """
1180 """
1181 m = self.changelog.nodemap
1181 m = self.changelog.nodemap
1182 search = []
1182 search = []
1183 fetch = {}
1183 fetch = {}
1184 seen = {}
1184 seen = {}
1185 seenbranch = {}
1185 seenbranch = {}
1186 if base == None:
1186 if base == None:
1187 base = {}
1187 base = {}
1188
1188
1189 if not heads:
1189 if not heads:
1190 heads = remote.heads()
1190 heads = remote.heads()
1191
1191
1192 if self.changelog.tip() == nullid:
1192 if self.changelog.tip() == nullid:
1193 base[nullid] = 1
1193 base[nullid] = 1
1194 if heads != [nullid]:
1194 if heads != [nullid]:
1195 return [nullid]
1195 return [nullid]
1196 return []
1196 return []
1197
1197
1198 # assume we're closer to the tip than the root
1198 # assume we're closer to the tip than the root
1199 # and start by examining the heads
1199 # and start by examining the heads
1200 self.ui.status(_("searching for changes\n"))
1200 self.ui.status(_("searching for changes\n"))
1201
1201
1202 unknown = []
1202 unknown = []
1203 for h in heads:
1203 for h in heads:
1204 if h not in m:
1204 if h not in m:
1205 unknown.append(h)
1205 unknown.append(h)
1206 else:
1206 else:
1207 base[h] = 1
1207 base[h] = 1
1208
1208
1209 if not unknown:
1209 if not unknown:
1210 return []
1210 return []
1211
1211
1212 req = dict.fromkeys(unknown)
1212 req = dict.fromkeys(unknown)
1213 reqcnt = 0
1213 reqcnt = 0
1214
1214
1215 # search through remote branches
1215 # search through remote branches
1216 # a 'branch' here is a linear segment of history, with four parts:
1216 # a 'branch' here is a linear segment of history, with four parts:
1217 # head, root, first parent, second parent
1217 # head, root, first parent, second parent
1218 # (a branch always has two parents (or none) by definition)
1218 # (a branch always has two parents (or none) by definition)
1219 unknown = remote.branches(unknown)
1219 unknown = remote.branches(unknown)
1220 while unknown:
1220 while unknown:
1221 r = []
1221 r = []
1222 while unknown:
1222 while unknown:
1223 n = unknown.pop(0)
1223 n = unknown.pop(0)
1224 if n[0] in seen:
1224 if n[0] in seen:
1225 continue
1225 continue
1226
1226
1227 self.ui.debug(_("examining %s:%s\n")
1227 self.ui.debug(_("examining %s:%s\n")
1228 % (short(n[0]), short(n[1])))
1228 % (short(n[0]), short(n[1])))
1229 if n[0] == nullid: # found the end of the branch
1229 if n[0] == nullid: # found the end of the branch
1230 pass
1230 pass
1231 elif n in seenbranch:
1231 elif n in seenbranch:
1232 self.ui.debug(_("branch already found\n"))
1232 self.ui.debug(_("branch already found\n"))
1233 continue
1233 continue
1234 elif n[1] and n[1] in m: # do we know the base?
1234 elif n[1] and n[1] in m: # do we know the base?
1235 self.ui.debug(_("found incomplete branch %s:%s\n")
1235 self.ui.debug(_("found incomplete branch %s:%s\n")
1236 % (short(n[0]), short(n[1])))
1236 % (short(n[0]), short(n[1])))
1237 search.append(n) # schedule branch range for scanning
1237 search.append(n) # schedule branch range for scanning
1238 seenbranch[n] = 1
1238 seenbranch[n] = 1
1239 else:
1239 else:
1240 if n[1] not in seen and n[1] not in fetch:
1240 if n[1] not in seen and n[1] not in fetch:
1241 if n[2] in m and n[3] in m:
1241 if n[2] in m and n[3] in m:
1242 self.ui.debug(_("found new changeset %s\n") %
1242 self.ui.debug(_("found new changeset %s\n") %
1243 short(n[1]))
1243 short(n[1]))
1244 fetch[n[1]] = 1 # earliest unknown
1244 fetch[n[1]] = 1 # earliest unknown
1245 for p in n[2:4]:
1245 for p in n[2:4]:
1246 if p in m:
1246 if p in m:
1247 base[p] = 1 # latest known
1247 base[p] = 1 # latest known
1248
1248
1249 for p in n[2:4]:
1249 for p in n[2:4]:
1250 if p not in req and p not in m:
1250 if p not in req and p not in m:
1251 r.append(p)
1251 r.append(p)
1252 req[p] = 1
1252 req[p] = 1
1253 seen[n[0]] = 1
1253 seen[n[0]] = 1
1254
1254
1255 if r:
1255 if r:
1256 reqcnt += 1
1256 reqcnt += 1
1257 self.ui.debug(_("request %d: %s\n") %
1257 self.ui.debug(_("request %d: %s\n") %
1258 (reqcnt, " ".join(map(short, r))))
1258 (reqcnt, " ".join(map(short, r))))
1259 for p in xrange(0, len(r), 10):
1259 for p in xrange(0, len(r), 10):
1260 for b in remote.branches(r[p:p+10]):
1260 for b in remote.branches(r[p:p+10]):
1261 self.ui.debug(_("received %s:%s\n") %
1261 self.ui.debug(_("received %s:%s\n") %
1262 (short(b[0]), short(b[1])))
1262 (short(b[0]), short(b[1])))
1263 unknown.append(b)
1263 unknown.append(b)
1264
1264
1265 # do binary search on the branches we found
1265 # do binary search on the branches we found
1266 while search:
1266 while search:
1267 n = search.pop(0)
1267 n = search.pop(0)
1268 reqcnt += 1
1268 reqcnt += 1
1269 l = remote.between([(n[0], n[1])])[0]
1269 l = remote.between([(n[0], n[1])])[0]
1270 l.append(n[1])
1270 l.append(n[1])
1271 p = n[0]
1271 p = n[0]
1272 f = 1
1272 f = 1
1273 for i in l:
1273 for i in l:
1274 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1274 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1275 if i in m:
1275 if i in m:
1276 if f <= 2:
1276 if f <= 2:
1277 self.ui.debug(_("found new branch changeset %s\n") %
1277 self.ui.debug(_("found new branch changeset %s\n") %
1278 short(p))
1278 short(p))
1279 fetch[p] = 1
1279 fetch[p] = 1
1280 base[i] = 1
1280 base[i] = 1
1281 else:
1281 else:
1282 self.ui.debug(_("narrowed branch search to %s:%s\n")
1282 self.ui.debug(_("narrowed branch search to %s:%s\n")
1283 % (short(p), short(i)))
1283 % (short(p), short(i)))
1284 search.append((p, i))
1284 search.append((p, i))
1285 break
1285 break
1286 p, f = i, f * 2
1286 p, f = i, f * 2
1287
1287
1288 # sanity check our fetch list
1288 # sanity check our fetch list
1289 for f in fetch.keys():
1289 for f in fetch.keys():
1290 if f in m:
1290 if f in m:
1291 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1291 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1292
1292
1293 if base.keys() == [nullid]:
1293 if base.keys() == [nullid]:
1294 if force:
1294 if force:
1295 self.ui.warn(_("warning: repository is unrelated\n"))
1295 self.ui.warn(_("warning: repository is unrelated\n"))
1296 else:
1296 else:
1297 raise util.Abort(_("repository is unrelated"))
1297 raise util.Abort(_("repository is unrelated"))
1298
1298
1299 self.ui.debug(_("found new changesets starting at ") +
1299 self.ui.debug(_("found new changesets starting at ") +
1300 " ".join([short(f) for f in fetch]) + "\n")
1300 " ".join([short(f) for f in fetch]) + "\n")
1301
1301
1302 self.ui.debug(_("%d total queries\n") % reqcnt)
1302 self.ui.debug(_("%d total queries\n") % reqcnt)
1303
1303
1304 return fetch.keys()
1304 return fetch.keys()
1305
1305
1306 def findoutgoing(self, remote, base=None, heads=None, force=False):
1306 def findoutgoing(self, remote, base=None, heads=None, force=False):
1307 """Return list of nodes that are roots of subsets not in remote
1307 """Return list of nodes that are roots of subsets not in remote
1308
1308
1309 If base dict is specified, assume that these nodes and their parents
1309 If base dict is specified, assume that these nodes and their parents
1310 exist on the remote side.
1310 exist on the remote side.
1311 If a list of heads is specified, return only nodes which are heads
1311 If a list of heads is specified, return only nodes which are heads
1312 or ancestors of these heads, and return a second element which
1312 or ancestors of these heads, and return a second element which
1313 contains all remote heads which get new children.
1313 contains all remote heads which get new children.
1314 """
1314 """
1315 if base == None:
1315 if base == None:
1316 base = {}
1316 base = {}
1317 self.findincoming(remote, base, heads, force=force)
1317 self.findincoming(remote, base, heads, force=force)
1318
1318
1319 self.ui.debug(_("common changesets up to ")
1319 self.ui.debug(_("common changesets up to ")
1320 + " ".join(map(short, base.keys())) + "\n")
1320 + " ".join(map(short, base.keys())) + "\n")
1321
1321
1322 remain = dict.fromkeys(self.changelog.nodemap)
1322 remain = dict.fromkeys(self.changelog.nodemap)
1323
1323
1324 # prune everything remote has from the tree
1324 # prune everything remote has from the tree
1325 del remain[nullid]
1325 del remain[nullid]
1326 remove = base.keys()
1326 remove = base.keys()
1327 while remove:
1327 while remove:
1328 n = remove.pop(0)
1328 n = remove.pop(0)
1329 if n in remain:
1329 if n in remain:
1330 del remain[n]
1330 del remain[n]
1331 for p in self.changelog.parents(n):
1331 for p in self.changelog.parents(n):
1332 remove.append(p)
1332 remove.append(p)
1333
1333
1334 # find every node whose parents have been pruned
1334 # find every node whose parents have been pruned
1335 subset = []
1335 subset = []
1336 # find every remote head that will get new children
1336 # find every remote head that will get new children
1337 updated_heads = {}
1337 updated_heads = {}
1338 for n in remain:
1338 for n in remain:
1339 p1, p2 = self.changelog.parents(n)
1339 p1, p2 = self.changelog.parents(n)
1340 if p1 not in remain and p2 not in remain:
1340 if p1 not in remain and p2 not in remain:
1341 subset.append(n)
1341 subset.append(n)
1342 if heads:
1342 if heads:
1343 if p1 in heads:
1343 if p1 in heads:
1344 updated_heads[p1] = True
1344 updated_heads[p1] = True
1345 if p2 in heads:
1345 if p2 in heads:
1346 updated_heads[p2] = True
1346 updated_heads[p2] = True
1347
1347
1348 # this is the set of all roots we have to push
1348 # this is the set of all roots we have to push
1349 if heads:
1349 if heads:
1350 return subset, updated_heads.keys()
1350 return subset, updated_heads.keys()
1351 else:
1351 else:
1352 return subset
1352 return subset
1353
1353
1354 def pull(self, remote, heads=None, force=False):
1354 def pull(self, remote, heads=None, force=False):
1355 lock = self.lock()
1355 lock = self.lock()
1356 try:
1356 try:
1357 fetch = self.findincoming(remote, heads=heads, force=force)
1357 fetch = self.findincoming(remote, heads=heads, force=force)
1358 if fetch == [nullid]:
1358 if fetch == [nullid]:
1359 self.ui.status(_("requesting all changes\n"))
1359 self.ui.status(_("requesting all changes\n"))
1360
1360
1361 if not fetch:
1361 if not fetch:
1362 self.ui.status(_("no changes found\n"))
1362 self.ui.status(_("no changes found\n"))
1363 return 0
1363 return 0
1364
1364
1365 if heads is None:
1365 if heads is None:
1366 cg = remote.changegroup(fetch, 'pull')
1366 cg = remote.changegroup(fetch, 'pull')
1367 else:
1367 else:
1368 if 'changegroupsubset' not in remote.capabilities:
1368 if 'changegroupsubset' not in remote.capabilities:
1369 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1369 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1370 cg = remote.changegroupsubset(fetch, heads, 'pull')
1370 cg = remote.changegroupsubset(fetch, heads, 'pull')
1371 return self.addchangegroup(cg, 'pull', remote.url())
1371 return self.addchangegroup(cg, 'pull', remote.url())
1372 finally:
1372 finally:
1373 del lock
1373 del lock
1374
1374
1375 def push(self, remote, force=False, revs=None):
1375 def push(self, remote, force=False, revs=None):
1376 # there are two ways to push to remote repo:
1376 # there are two ways to push to remote repo:
1377 #
1377 #
1378 # addchangegroup assumes local user can lock remote
1378 # addchangegroup assumes local user can lock remote
1379 # repo (local filesystem, old ssh servers).
1379 # repo (local filesystem, old ssh servers).
1380 #
1380 #
1381 # unbundle assumes local user cannot lock remote repo (new ssh
1381 # unbundle assumes local user cannot lock remote repo (new ssh
1382 # servers, http servers).
1382 # servers, http servers).
1383
1383
1384 if remote.capable('unbundle'):
1384 if remote.capable('unbundle'):
1385 return self.push_unbundle(remote, force, revs)
1385 return self.push_unbundle(remote, force, revs)
1386 return self.push_addchangegroup(remote, force, revs)
1386 return self.push_addchangegroup(remote, force, revs)
1387
1387
1388 def prepush(self, remote, force, revs):
1388 def prepush(self, remote, force, revs):
1389 base = {}
1389 base = {}
1390 remote_heads = remote.heads()
1390 remote_heads = remote.heads()
1391 inc = self.findincoming(remote, base, remote_heads, force=force)
1391 inc = self.findincoming(remote, base, remote_heads, force=force)
1392
1392
1393 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1393 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1394 if revs is not None:
1394 if revs is not None:
1395 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1395 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1396 else:
1396 else:
1397 bases, heads = update, self.changelog.heads()
1397 bases, heads = update, self.changelog.heads()
1398
1398
1399 if not bases:
1399 if not bases:
1400 self.ui.status(_("no changes found\n"))
1400 self.ui.status(_("no changes found\n"))
1401 return None, 1
1401 return None, 1
1402 elif not force:
1402 elif not force:
1403 # check if we're creating new remote heads
1403 # check if we're creating new remote heads
1404 # to be a remote head after push, node must be either
1404 # to be a remote head after push, node must be either
1405 # - unknown locally
1405 # - unknown locally
1406 # - a local outgoing head descended from update
1406 # - a local outgoing head descended from update
1407 # - a remote head that's known locally and not
1407 # - a remote head that's known locally and not
1408 # ancestral to an outgoing head
1408 # ancestral to an outgoing head
1409
1409
1410 warn = 0
1410 warn = 0
1411
1411
1412 if remote_heads == [nullid]:
1412 if remote_heads == [nullid]:
1413 warn = 0
1413 warn = 0
1414 elif not revs and len(heads) > len(remote_heads):
1414 elif not revs and len(heads) > len(remote_heads):
1415 warn = 1
1415 warn = 1
1416 else:
1416 else:
1417 newheads = list(heads)
1417 newheads = list(heads)
1418 for r in remote_heads:
1418 for r in remote_heads:
1419 if r in self.changelog.nodemap:
1419 if r in self.changelog.nodemap:
1420 desc = self.changelog.heads(r, heads)
1420 desc = self.changelog.heads(r, heads)
1421 l = [h for h in heads if h in desc]
1421 l = [h for h in heads if h in desc]
1422 if not l:
1422 if not l:
1423 newheads.append(r)
1423 newheads.append(r)
1424 else:
1424 else:
1425 newheads.append(r)
1425 newheads.append(r)
1426 if len(newheads) > len(remote_heads):
1426 if len(newheads) > len(remote_heads):
1427 warn = 1
1427 warn = 1
1428
1428
1429 if warn:
1429 if warn:
1430 self.ui.warn(_("abort: push creates new remote branches!\n"))
1430 self.ui.warn(_("abort: push creates new remote branches!\n"))
1431 self.ui.status(_("(did you forget to merge?"
1431 self.ui.status(_("(did you forget to merge?"
1432 " use push -f to force)\n"))
1432 " use push -f to force)\n"))
1433 return None, 1
1433 return None, 1
1434 elif inc:
1434 elif inc:
1435 self.ui.warn(_("note: unsynced remote changes!\n"))
1435 self.ui.warn(_("note: unsynced remote changes!\n"))
1436
1436
1437
1437
1438 if revs is None:
1438 if revs is None:
1439 cg = self.changegroup(update, 'push')
1439 cg = self.changegroup(update, 'push')
1440 else:
1440 else:
1441 cg = self.changegroupsubset(update, revs, 'push')
1441 cg = self.changegroupsubset(update, revs, 'push')
1442 return cg, remote_heads
1442 return cg, remote_heads
1443
1443
1444 def push_addchangegroup(self, remote, force, revs):
1444 def push_addchangegroup(self, remote, force, revs):
1445 lock = remote.lock()
1445 lock = remote.lock()
1446 try:
1446 try:
1447 ret = self.prepush(remote, force, revs)
1447 ret = self.prepush(remote, force, revs)
1448 if ret[0] is not None:
1448 if ret[0] is not None:
1449 cg, remote_heads = ret
1449 cg, remote_heads = ret
1450 return remote.addchangegroup(cg, 'push', self.url())
1450 return remote.addchangegroup(cg, 'push', self.url())
1451 return ret[1]
1451 return ret[1]
1452 finally:
1452 finally:
1453 del lock
1453 del lock
1454
1454
1455 def push_unbundle(self, remote, force, revs):
1455 def push_unbundle(self, remote, force, revs):
1456 # local repo finds heads on server, finds out what revs it
1456 # local repo finds heads on server, finds out what revs it
1457 # must push. once revs transferred, if server finds it has
1457 # must push. once revs transferred, if server finds it has
1458 # different heads (someone else won commit/push race), server
1458 # different heads (someone else won commit/push race), server
1459 # aborts.
1459 # aborts.
1460
1460
1461 ret = self.prepush(remote, force, revs)
1461 ret = self.prepush(remote, force, revs)
1462 if ret[0] is not None:
1462 if ret[0] is not None:
1463 cg, remote_heads = ret
1463 cg, remote_heads = ret
1464 if force: remote_heads = ['force']
1464 if force: remote_heads = ['force']
1465 return remote.unbundle(cg, remote_heads, 'push')
1465 return remote.unbundle(cg, remote_heads, 'push')
1466 return ret[1]
1466 return ret[1]
1467
1467
1468 def changegroupinfo(self, nodes):
1468 def changegroupinfo(self, nodes):
1469 self.ui.note(_("%d changesets found\n") % len(nodes))
1469 self.ui.note(_("%d changesets found\n") % len(nodes))
1470 if self.ui.debugflag:
1470 if self.ui.debugflag:
1471 self.ui.debug(_("List of changesets:\n"))
1471 self.ui.debug(_("List of changesets:\n"))
1472 for node in nodes:
1472 for node in nodes:
1473 self.ui.debug("%s\n" % hex(node))
1473 self.ui.debug("%s\n" % hex(node))
1474
1474
1475 def changegroupsubset(self, bases, heads, source):
1475 def changegroupsubset(self, bases, heads, source):
1476 """This function generates a changegroup consisting of all the nodes
1476 """This function generates a changegroup consisting of all the nodes
1477 that are descendents of any of the bases, and ancestors of any of
1477 that are descendents of any of the bases, and ancestors of any of
1478 the heads.
1478 the heads.
1479
1479
1480 It is fairly complex as determining which filenodes and which
1480 It is fairly complex as determining which filenodes and which
1481 manifest nodes need to be included for the changeset to be complete
1481 manifest nodes need to be included for the changeset to be complete
1482 is non-trivial.
1482 is non-trivial.
1483
1483
1484 Another wrinkle is doing the reverse, figuring out which changeset in
1484 Another wrinkle is doing the reverse, figuring out which changeset in
1485 the changegroup a particular filenode or manifestnode belongs to."""
1485 the changegroup a particular filenode or manifestnode belongs to."""
1486
1486
1487 self.hook('preoutgoing', throw=True, source=source)
1487 self.hook('preoutgoing', throw=True, source=source)
1488
1488
1489 # Set up some initial variables
1489 # Set up some initial variables
1490 # Make it easy to refer to self.changelog
1490 # Make it easy to refer to self.changelog
1491 cl = self.changelog
1491 cl = self.changelog
1492 # msng is short for missing - compute the list of changesets in this
1492 # msng is short for missing - compute the list of changesets in this
1493 # changegroup.
1493 # changegroup.
1494 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1494 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1495 self.changegroupinfo(msng_cl_lst)
1495 self.changegroupinfo(msng_cl_lst)
1496 # Some bases may turn out to be superfluous, and some heads may be
1496 # Some bases may turn out to be superfluous, and some heads may be
1497 # too. nodesbetween will return the minimal set of bases and heads
1497 # too. nodesbetween will return the minimal set of bases and heads
1498 # necessary to re-create the changegroup.
1498 # necessary to re-create the changegroup.
1499
1499
1500 # Known heads are the list of heads that it is assumed the recipient
1500 # Known heads are the list of heads that it is assumed the recipient
1501 # of this changegroup will know about.
1501 # of this changegroup will know about.
1502 knownheads = {}
1502 knownheads = {}
1503 # We assume that all parents of bases are known heads.
1503 # We assume that all parents of bases are known heads.
1504 for n in bases:
1504 for n in bases:
1505 for p in cl.parents(n):
1505 for p in cl.parents(n):
1506 if p != nullid:
1506 if p != nullid:
1507 knownheads[p] = 1
1507 knownheads[p] = 1
1508 knownheads = knownheads.keys()
1508 knownheads = knownheads.keys()
1509 if knownheads:
1509 if knownheads:
1510 # Now that we know what heads are known, we can compute which
1510 # Now that we know what heads are known, we can compute which
1511 # changesets are known. The recipient must know about all
1511 # changesets are known. The recipient must know about all
1512 # changesets required to reach the known heads from the null
1512 # changesets required to reach the known heads from the null
1513 # changeset.
1513 # changeset.
1514 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1514 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1515 junk = None
1515 junk = None
1516 # Transform the list into an ersatz set.
1516 # Transform the list into an ersatz set.
1517 has_cl_set = dict.fromkeys(has_cl_set)
1517 has_cl_set = dict.fromkeys(has_cl_set)
1518 else:
1518 else:
1519 # If there were no known heads, the recipient cannot be assumed to
1519 # If there were no known heads, the recipient cannot be assumed to
1520 # know about any changesets.
1520 # know about any changesets.
1521 has_cl_set = {}
1521 has_cl_set = {}
1522
1522
1523 # Make it easy to refer to self.manifest
1523 # Make it easy to refer to self.manifest
1524 mnfst = self.manifest
1524 mnfst = self.manifest
1525 # We don't know which manifests are missing yet
1525 # We don't know which manifests are missing yet
1526 msng_mnfst_set = {}
1526 msng_mnfst_set = {}
1527 # Nor do we know which filenodes are missing.
1527 # Nor do we know which filenodes are missing.
1528 msng_filenode_set = {}
1528 msng_filenode_set = {}
1529
1529
1530 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1530 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1531 junk = None
1531 junk = None
1532
1532
1533 # A changeset always belongs to itself, so the changenode lookup
1533 # A changeset always belongs to itself, so the changenode lookup
1534 # function for a changenode is identity.
1534 # function for a changenode is identity.
1535 def identity(x):
1535 def identity(x):
1536 return x
1536 return x
1537
1537
1538 # A function generating function. Sets up an environment for the
1538 # A function generating function. Sets up an environment for the
1539 # inner function.
1539 # inner function.
1540 def cmp_by_rev_func(revlog):
1540 def cmp_by_rev_func(revlog):
1541 # Compare two nodes by their revision number in the environment's
1541 # Compare two nodes by their revision number in the environment's
1542 # revision history. Since the revision number both represents the
1542 # revision history. Since the revision number both represents the
1543 # most efficient order to read the nodes in, and represents a
1543 # most efficient order to read the nodes in, and represents a
1544 # topological sorting of the nodes, this function is often useful.
1544 # topological sorting of the nodes, this function is often useful.
1545 def cmp_by_rev(a, b):
1545 def cmp_by_rev(a, b):
1546 return cmp(revlog.rev(a), revlog.rev(b))
1546 return cmp(revlog.rev(a), revlog.rev(b))
1547 return cmp_by_rev
1547 return cmp_by_rev
1548
1548
1549 # If we determine that a particular file or manifest node must be a
1549 # If we determine that a particular file or manifest node must be a
1550 # node that the recipient of the changegroup will already have, we can
1550 # node that the recipient of the changegroup will already have, we can
1551 # also assume the recipient will have all the parents. This function
1551 # also assume the recipient will have all the parents. This function
1552 # prunes them from the set of missing nodes.
1552 # prunes them from the set of missing nodes.
1553 def prune_parents(revlog, hasset, msngset):
1553 def prune_parents(revlog, hasset, msngset):
1554 haslst = hasset.keys()
1554 haslst = hasset.keys()
1555 haslst.sort(cmp_by_rev_func(revlog))
1555 haslst.sort(cmp_by_rev_func(revlog))
1556 for node in haslst:
1556 for node in haslst:
1557 parentlst = [p for p in revlog.parents(node) if p != nullid]
1557 parentlst = [p for p in revlog.parents(node) if p != nullid]
1558 while parentlst:
1558 while parentlst:
1559 n = parentlst.pop()
1559 n = parentlst.pop()
1560 if n not in hasset:
1560 if n not in hasset:
1561 hasset[n] = 1
1561 hasset[n] = 1
1562 p = [p for p in revlog.parents(n) if p != nullid]
1562 p = [p for p in revlog.parents(n) if p != nullid]
1563 parentlst.extend(p)
1563 parentlst.extend(p)
1564 for n in hasset:
1564 for n in hasset:
1565 msngset.pop(n, None)
1565 msngset.pop(n, None)
1566
1566
1567 # This is a function generating function used to set up an environment
1567 # This is a function generating function used to set up an environment
1568 # for the inner function to execute in.
1568 # for the inner function to execute in.
1569 def manifest_and_file_collector(changedfileset):
1569 def manifest_and_file_collector(changedfileset):
1570 # This is an information gathering function that gathers
1570 # This is an information gathering function that gathers
1571 # information from each changeset node that goes out as part of
1571 # information from each changeset node that goes out as part of
1572 # the changegroup. The information gathered is a list of which
1572 # the changegroup. The information gathered is a list of which
1573 # manifest nodes are potentially required (the recipient may
1573 # manifest nodes are potentially required (the recipient may
1574 # already have them) and total list of all files which were
1574 # already have them) and total list of all files which were
1575 # changed in any changeset in the changegroup.
1575 # changed in any changeset in the changegroup.
1576 #
1576 #
1577 # We also remember the first changenode we saw any manifest
1577 # We also remember the first changenode we saw any manifest
1578 # referenced by so we can later determine which changenode 'owns'
1578 # referenced by so we can later determine which changenode 'owns'
1579 # the manifest.
1579 # the manifest.
1580 def collect_manifests_and_files(clnode):
1580 def collect_manifests_and_files(clnode):
1581 c = cl.read(clnode)
1581 c = cl.read(clnode)
1582 for f in c[3]:
1582 for f in c[3]:
1583 # This is to make sure we only have one instance of each
1583 # This is to make sure we only have one instance of each
1584 # filename string for each filename.
1584 # filename string for each filename.
1585 changedfileset.setdefault(f, f)
1585 changedfileset.setdefault(f, f)
1586 msng_mnfst_set.setdefault(c[0], clnode)
1586 msng_mnfst_set.setdefault(c[0], clnode)
1587 return collect_manifests_and_files
1587 return collect_manifests_and_files
1588
1588
1589 # Figure out which manifest nodes (of the ones we think might be part
1589 # Figure out which manifest nodes (of the ones we think might be part
1590 # of the changegroup) the recipient must know about and remove them
1590 # of the changegroup) the recipient must know about and remove them
1591 # from the changegroup.
1591 # from the changegroup.
1592 def prune_manifests():
1592 def prune_manifests():
1593 has_mnfst_set = {}
1593 has_mnfst_set = {}
1594 for n in msng_mnfst_set:
1594 for n in msng_mnfst_set:
1595 # If a 'missing' manifest thinks it belongs to a changenode
1595 # If a 'missing' manifest thinks it belongs to a changenode
1596 # the recipient is assumed to have, obviously the recipient
1596 # the recipient is assumed to have, obviously the recipient
1597 # must have that manifest.
1597 # must have that manifest.
1598 linknode = cl.node(mnfst.linkrev(n))
1598 linknode = cl.node(mnfst.linkrev(n))
1599 if linknode in has_cl_set:
1599 if linknode in has_cl_set:
1600 has_mnfst_set[n] = 1
1600 has_mnfst_set[n] = 1
1601 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1601 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1602
1602
1603 # Use the information collected in collect_manifests_and_files to say
1603 # Use the information collected in collect_manifests_and_files to say
1604 # which changenode any manifestnode belongs to.
1604 # which changenode any manifestnode belongs to.
1605 def lookup_manifest_link(mnfstnode):
1605 def lookup_manifest_link(mnfstnode):
1606 return msng_mnfst_set[mnfstnode]
1606 return msng_mnfst_set[mnfstnode]
1607
1607
1608 # A function generating function that sets up the initial environment
1608 # A function generating function that sets up the initial environment
1609 # the inner function.
1609 # the inner function.
1610 def filenode_collector(changedfiles):
1610 def filenode_collector(changedfiles):
1611 next_rev = [0]
1611 next_rev = [0]
1612 # This gathers information from each manifestnode included in the
1612 # This gathers information from each manifestnode included in the
1613 # changegroup about which filenodes the manifest node references
1613 # changegroup about which filenodes the manifest node references
1614 # so we can include those in the changegroup too.
1614 # so we can include those in the changegroup too.
1615 #
1615 #
1616 # It also remembers which changenode each filenode belongs to. It
1616 # It also remembers which changenode each filenode belongs to. It
1617 # does this by assuming the a filenode belongs to the changenode
1617 # does this by assuming the a filenode belongs to the changenode
1618 # the first manifest that references it belongs to.
1618 # the first manifest that references it belongs to.
1619 def collect_msng_filenodes(mnfstnode):
1619 def collect_msng_filenodes(mnfstnode):
1620 r = mnfst.rev(mnfstnode)
1620 r = mnfst.rev(mnfstnode)
1621 if r == next_rev[0]:
1621 if r == next_rev[0]:
1622 # If the last rev we looked at was the one just previous,
1622 # If the last rev we looked at was the one just previous,
1623 # we only need to see a diff.
1623 # we only need to see a diff.
1624 deltamf = mnfst.readdelta(mnfstnode)
1624 deltamf = mnfst.readdelta(mnfstnode)
1625 # For each line in the delta
1625 # For each line in the delta
1626 for f, fnode in deltamf.items():
1626 for f, fnode in deltamf.items():
1627 f = changedfiles.get(f, None)
1627 f = changedfiles.get(f, None)
1628 # And if the file is in the list of files we care
1628 # And if the file is in the list of files we care
1629 # about.
1629 # about.
1630 if f is not None:
1630 if f is not None:
1631 # Get the changenode this manifest belongs to
1631 # Get the changenode this manifest belongs to
1632 clnode = msng_mnfst_set[mnfstnode]
1632 clnode = msng_mnfst_set[mnfstnode]
1633 # Create the set of filenodes for the file if
1633 # Create the set of filenodes for the file if
1634 # there isn't one already.
1634 # there isn't one already.
1635 ndset = msng_filenode_set.setdefault(f, {})
1635 ndset = msng_filenode_set.setdefault(f, {})
1636 # And set the filenode's changelog node to the
1636 # And set the filenode's changelog node to the
1637 # manifest's if it hasn't been set already.
1637 # manifest's if it hasn't been set already.
1638 ndset.setdefault(fnode, clnode)
1638 ndset.setdefault(fnode, clnode)
1639 else:
1639 else:
1640 # Otherwise we need a full manifest.
1640 # Otherwise we need a full manifest.
1641 m = mnfst.read(mnfstnode)
1641 m = mnfst.read(mnfstnode)
1642 # For every file in we care about.
1642 # For every file in we care about.
1643 for f in changedfiles:
1643 for f in changedfiles:
1644 fnode = m.get(f, None)
1644 fnode = m.get(f, None)
1645 # If it's in the manifest
1645 # If it's in the manifest
1646 if fnode is not None:
1646 if fnode is not None:
1647 # See comments above.
1647 # See comments above.
1648 clnode = msng_mnfst_set[mnfstnode]
1648 clnode = msng_mnfst_set[mnfstnode]
1649 ndset = msng_filenode_set.setdefault(f, {})
1649 ndset = msng_filenode_set.setdefault(f, {})
1650 ndset.setdefault(fnode, clnode)
1650 ndset.setdefault(fnode, clnode)
1651 # Remember the revision we hope to see next.
1651 # Remember the revision we hope to see next.
1652 next_rev[0] = r + 1
1652 next_rev[0] = r + 1
1653 return collect_msng_filenodes
1653 return collect_msng_filenodes
1654
1654
1655 # We have a list of filenodes we think we need for a file, lets remove
1655 # We have a list of filenodes we think we need for a file, lets remove
1656 # all those we now the recipient must have.
1656 # all those we now the recipient must have.
1657 def prune_filenodes(f, filerevlog):
1657 def prune_filenodes(f, filerevlog):
1658 msngset = msng_filenode_set[f]
1658 msngset = msng_filenode_set[f]
1659 hasset = {}
1659 hasset = {}
1660 # If a 'missing' filenode thinks it belongs to a changenode we
1660 # If a 'missing' filenode thinks it belongs to a changenode we
1661 # assume the recipient must have, then the recipient must have
1661 # assume the recipient must have, then the recipient must have
1662 # that filenode.
1662 # that filenode.
1663 for n in msngset:
1663 for n in msngset:
1664 clnode = cl.node(filerevlog.linkrev(n))
1664 clnode = cl.node(filerevlog.linkrev(n))
1665 if clnode in has_cl_set:
1665 if clnode in has_cl_set:
1666 hasset[n] = 1
1666 hasset[n] = 1
1667 prune_parents(filerevlog, hasset, msngset)
1667 prune_parents(filerevlog, hasset, msngset)
1668
1668
1669 # A function generator function that sets up the a context for the
1669 # A function generator function that sets up the a context for the
1670 # inner function.
1670 # inner function.
1671 def lookup_filenode_link_func(fname):
1671 def lookup_filenode_link_func(fname):
1672 msngset = msng_filenode_set[fname]
1672 msngset = msng_filenode_set[fname]
1673 # Lookup the changenode the filenode belongs to.
1673 # Lookup the changenode the filenode belongs to.
1674 def lookup_filenode_link(fnode):
1674 def lookup_filenode_link(fnode):
1675 return msngset[fnode]
1675 return msngset[fnode]
1676 return lookup_filenode_link
1676 return lookup_filenode_link
1677
1677
1678 # Now that we have all theses utility functions to help out and
1678 # Now that we have all theses utility functions to help out and
1679 # logically divide up the task, generate the group.
1679 # logically divide up the task, generate the group.
1680 def gengroup():
1680 def gengroup():
1681 # The set of changed files starts empty.
1681 # The set of changed files starts empty.
1682 changedfiles = {}
1682 changedfiles = {}
1683 # Create a changenode group generator that will call our functions
1683 # Create a changenode group generator that will call our functions
1684 # back to lookup the owning changenode and collect information.
1684 # back to lookup the owning changenode and collect information.
1685 group = cl.group(msng_cl_lst, identity,
1685 group = cl.group(msng_cl_lst, identity,
1686 manifest_and_file_collector(changedfiles))
1686 manifest_and_file_collector(changedfiles))
1687 for chnk in group:
1687 for chnk in group:
1688 yield chnk
1688 yield chnk
1689
1689
1690 # The list of manifests has been collected by the generator
1690 # The list of manifests has been collected by the generator
1691 # calling our functions back.
1691 # calling our functions back.
1692 prune_manifests()
1692 prune_manifests()
1693 msng_mnfst_lst = msng_mnfst_set.keys()
1693 msng_mnfst_lst = msng_mnfst_set.keys()
1694 # Sort the manifestnodes by revision number.
1694 # Sort the manifestnodes by revision number.
1695 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1695 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1696 # Create a generator for the manifestnodes that calls our lookup
1696 # Create a generator for the manifestnodes that calls our lookup
1697 # and data collection functions back.
1697 # and data collection functions back.
1698 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1698 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1699 filenode_collector(changedfiles))
1699 filenode_collector(changedfiles))
1700 for chnk in group:
1700 for chnk in group:
1701 yield chnk
1701 yield chnk
1702
1702
1703 # These are no longer needed, dereference and toss the memory for
1703 # These are no longer needed, dereference and toss the memory for
1704 # them.
1704 # them.
1705 msng_mnfst_lst = None
1705 msng_mnfst_lst = None
1706 msng_mnfst_set.clear()
1706 msng_mnfst_set.clear()
1707
1707
1708 changedfiles = changedfiles.keys()
1708 changedfiles = changedfiles.keys()
1709 changedfiles.sort()
1709 changedfiles.sort()
1710 # Go through all our files in order sorted by name.
1710 # Go through all our files in order sorted by name.
1711 for fname in changedfiles:
1711 for fname in changedfiles:
1712 filerevlog = self.file(fname)
1712 filerevlog = self.file(fname)
1713 # Toss out the filenodes that the recipient isn't really
1713 # Toss out the filenodes that the recipient isn't really
1714 # missing.
1714 # missing.
1715 if msng_filenode_set.has_key(fname):
1715 if msng_filenode_set.has_key(fname):
1716 prune_filenodes(fname, filerevlog)
1716 prune_filenodes(fname, filerevlog)
1717 msng_filenode_lst = msng_filenode_set[fname].keys()
1717 msng_filenode_lst = msng_filenode_set[fname].keys()
1718 else:
1718 else:
1719 msng_filenode_lst = []
1719 msng_filenode_lst = []
1720 # If any filenodes are left, generate the group for them,
1720 # If any filenodes are left, generate the group for them,
1721 # otherwise don't bother.
1721 # otherwise don't bother.
1722 if len(msng_filenode_lst) > 0:
1722 if len(msng_filenode_lst) > 0:
1723 yield changegroup.genchunk(fname)
1723 yield changegroup.genchunk(fname)
1724 # Sort the filenodes by their revision #
1724 # Sort the filenodes by their revision #
1725 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1725 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1726 # Create a group generator and only pass in a changenode
1726 # Create a group generator and only pass in a changenode
1727 # lookup function as we need to collect no information
1727 # lookup function as we need to collect no information
1728 # from filenodes.
1728 # from filenodes.
1729 group = filerevlog.group(msng_filenode_lst,
1729 group = filerevlog.group(msng_filenode_lst,
1730 lookup_filenode_link_func(fname))
1730 lookup_filenode_link_func(fname))
1731 for chnk in group:
1731 for chnk in group:
1732 yield chnk
1732 yield chnk
1733 if msng_filenode_set.has_key(fname):
1733 if msng_filenode_set.has_key(fname):
1734 # Don't need this anymore, toss it to free memory.
1734 # Don't need this anymore, toss it to free memory.
1735 del msng_filenode_set[fname]
1735 del msng_filenode_set[fname]
1736 # Signal that no more groups are left.
1736 # Signal that no more groups are left.
1737 yield changegroup.closechunk()
1737 yield changegroup.closechunk()
1738
1738
1739 if msng_cl_lst:
1739 if msng_cl_lst:
1740 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1740 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1741
1741
1742 return util.chunkbuffer(gengroup())
1742 return util.chunkbuffer(gengroup())
1743
1743
1744 def changegroup(self, basenodes, source):
1744 def changegroup(self, basenodes, source):
1745 """Generate a changegroup of all nodes that we have that a recipient
1745 """Generate a changegroup of all nodes that we have that a recipient
1746 doesn't.
1746 doesn't.
1747
1747
1748 This is much easier than the previous function as we can assume that
1748 This is much easier than the previous function as we can assume that
1749 the recipient has any changenode we aren't sending them."""
1749 the recipient has any changenode we aren't sending them."""
1750
1750
1751 self.hook('preoutgoing', throw=True, source=source)
1751 self.hook('preoutgoing', throw=True, source=source)
1752
1752
1753 cl = self.changelog
1753 cl = self.changelog
1754 nodes = cl.nodesbetween(basenodes, None)[0]
1754 nodes = cl.nodesbetween(basenodes, None)[0]
1755 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1755 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1756 self.changegroupinfo(nodes)
1756 self.changegroupinfo(nodes)
1757
1757
1758 def identity(x):
1758 def identity(x):
1759 return x
1759 return x
1760
1760
1761 def gennodelst(revlog):
1761 def gennodelst(revlog):
1762 for r in xrange(0, revlog.count()):
1762 for r in xrange(0, revlog.count()):
1763 n = revlog.node(r)
1763 n = revlog.node(r)
1764 if revlog.linkrev(n) in revset:
1764 if revlog.linkrev(n) in revset:
1765 yield n
1765 yield n
1766
1766
1767 def changed_file_collector(changedfileset):
1767 def changed_file_collector(changedfileset):
1768 def collect_changed_files(clnode):
1768 def collect_changed_files(clnode):
1769 c = cl.read(clnode)
1769 c = cl.read(clnode)
1770 for fname in c[3]:
1770 for fname in c[3]:
1771 changedfileset[fname] = 1
1771 changedfileset[fname] = 1
1772 return collect_changed_files
1772 return collect_changed_files
1773
1773
1774 def lookuprevlink_func(revlog):
1774 def lookuprevlink_func(revlog):
1775 def lookuprevlink(n):
1775 def lookuprevlink(n):
1776 return cl.node(revlog.linkrev(n))
1776 return cl.node(revlog.linkrev(n))
1777 return lookuprevlink
1777 return lookuprevlink
1778
1778
1779 def gengroup():
1779 def gengroup():
1780 # construct a list of all changed files
1780 # construct a list of all changed files
1781 changedfiles = {}
1781 changedfiles = {}
1782
1782
1783 for chnk in cl.group(nodes, identity,
1783 for chnk in cl.group(nodes, identity,
1784 changed_file_collector(changedfiles)):
1784 changed_file_collector(changedfiles)):
1785 yield chnk
1785 yield chnk
1786 changedfiles = changedfiles.keys()
1786 changedfiles = changedfiles.keys()
1787 changedfiles.sort()
1787 changedfiles.sort()
1788
1788
1789 mnfst = self.manifest
1789 mnfst = self.manifest
1790 nodeiter = gennodelst(mnfst)
1790 nodeiter = gennodelst(mnfst)
1791 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1791 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1792 yield chnk
1792 yield chnk
1793
1793
1794 for fname in changedfiles:
1794 for fname in changedfiles:
1795 filerevlog = self.file(fname)
1795 filerevlog = self.file(fname)
1796 nodeiter = gennodelst(filerevlog)
1796 nodeiter = gennodelst(filerevlog)
1797 nodeiter = list(nodeiter)
1797 nodeiter = list(nodeiter)
1798 if nodeiter:
1798 if nodeiter:
1799 yield changegroup.genchunk(fname)
1799 yield changegroup.genchunk(fname)
1800 lookup = lookuprevlink_func(filerevlog)
1800 lookup = lookuprevlink_func(filerevlog)
1801 for chnk in filerevlog.group(nodeiter, lookup):
1801 for chnk in filerevlog.group(nodeiter, lookup):
1802 yield chnk
1802 yield chnk
1803
1803
1804 yield changegroup.closechunk()
1804 yield changegroup.closechunk()
1805
1805
1806 if nodes:
1806 if nodes:
1807 self.hook('outgoing', node=hex(nodes[0]), source=source)
1807 self.hook('outgoing', node=hex(nodes[0]), source=source)
1808
1808
1809 return util.chunkbuffer(gengroup())
1809 return util.chunkbuffer(gengroup())
1810
1810
1811 def addchangegroup(self, source, srctype, url):
1811 def addchangegroup(self, source, srctype, url):
1812 """add changegroup to repo.
1812 """add changegroup to repo.
1813
1813
1814 return values:
1814 return values:
1815 - nothing changed or no source: 0
1815 - nothing changed or no source: 0
1816 - more heads than before: 1+added heads (2..n)
1816 - more heads than before: 1+added heads (2..n)
1817 - less heads than before: -1-removed heads (-2..-n)
1817 - less heads than before: -1-removed heads (-2..-n)
1818 - number of heads stays the same: 1
1818 - number of heads stays the same: 1
1819 """
1819 """
1820 def csmap(x):
1820 def csmap(x):
1821 self.ui.debug(_("add changeset %s\n") % short(x))
1821 self.ui.debug(_("add changeset %s\n") % short(x))
1822 return cl.count()
1822 return cl.count()
1823
1823
1824 def revmap(x):
1824 def revmap(x):
1825 return cl.rev(x)
1825 return cl.rev(x)
1826
1826
1827 if not source:
1827 if not source:
1828 return 0
1828 return 0
1829
1829
1830 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1830 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1831
1831
1832 changesets = files = revisions = 0
1832 changesets = files = revisions = 0
1833
1833
1834 # write changelog data to temp files so concurrent readers will not see
1834 # write changelog data to temp files so concurrent readers will not see
1835 # inconsistent view
1835 # inconsistent view
1836 cl = self.changelog
1836 cl = self.changelog
1837 cl.delayupdate()
1837 cl.delayupdate()
1838 oldheads = len(cl.heads())
1838 oldheads = len(cl.heads())
1839
1839
1840 tr = self.transaction()
1840 tr = self.transaction()
1841 try:
1841 try:
1842 trp = weakref.proxy(tr)
1842 trp = weakref.proxy(tr)
1843 # pull off the changeset group
1843 # pull off the changeset group
1844 self.ui.status(_("adding changesets\n"))
1844 self.ui.status(_("adding changesets\n"))
1845 cor = cl.count() - 1
1845 cor = cl.count() - 1
1846 chunkiter = changegroup.chunkiter(source)
1846 chunkiter = changegroup.chunkiter(source)
1847 if cl.addgroup(chunkiter, csmap, trp, 1) is None:
1847 if cl.addgroup(chunkiter, csmap, trp, 1) is None:
1848 raise util.Abort(_("received changelog group is empty"))
1848 raise util.Abort(_("received changelog group is empty"))
1849 cnr = cl.count() - 1
1849 cnr = cl.count() - 1
1850 changesets = cnr - cor
1850 changesets = cnr - cor
1851
1851
1852 # pull off the manifest group
1852 # pull off the manifest group
1853 self.ui.status(_("adding manifests\n"))
1853 self.ui.status(_("adding manifests\n"))
1854 chunkiter = changegroup.chunkiter(source)
1854 chunkiter = changegroup.chunkiter(source)
1855 # no need to check for empty manifest group here:
1855 # no need to check for empty manifest group here:
1856 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1856 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1857 # no new manifest will be created and the manifest group will
1857 # no new manifest will be created and the manifest group will
1858 # be empty during the pull
1858 # be empty during the pull
1859 self.manifest.addgroup(chunkiter, revmap, trp)
1859 self.manifest.addgroup(chunkiter, revmap, trp)
1860
1860
1861 # process the files
1861 # process the files
1862 self.ui.status(_("adding file changes\n"))
1862 self.ui.status(_("adding file changes\n"))
1863 while 1:
1863 while 1:
1864 f = changegroup.getchunk(source)
1864 f = changegroup.getchunk(source)
1865 if not f:
1865 if not f:
1866 break
1866 break
1867 self.ui.debug(_("adding %s revisions\n") % f)
1867 self.ui.debug(_("adding %s revisions\n") % f)
1868 fl = self.file(f)
1868 fl = self.file(f)
1869 o = fl.count()
1869 o = fl.count()
1870 chunkiter = changegroup.chunkiter(source)
1870 chunkiter = changegroup.chunkiter(source)
1871 if fl.addgroup(chunkiter, revmap, trp) is None:
1871 if fl.addgroup(chunkiter, revmap, trp) is None:
1872 raise util.Abort(_("received file revlog group is empty"))
1872 raise util.Abort(_("received file revlog group is empty"))
1873 revisions += fl.count() - o
1873 revisions += fl.count() - o
1874 files += 1
1874 files += 1
1875
1875
1876 # make changelog see real files again
1876 # make changelog see real files again
1877 cl.finalize(trp)
1877 cl.finalize(trp)
1878
1878
1879 newheads = len(self.changelog.heads())
1879 newheads = len(self.changelog.heads())
1880 heads = ""
1880 heads = ""
1881 if oldheads and newheads != oldheads:
1881 if oldheads and newheads != oldheads:
1882 heads = _(" (%+d heads)") % (newheads - oldheads)
1882 heads = _(" (%+d heads)") % (newheads - oldheads)
1883
1883
1884 self.ui.status(_("added %d changesets"
1884 self.ui.status(_("added %d changesets"
1885 " with %d changes to %d files%s\n")
1885 " with %d changes to %d files%s\n")
1886 % (changesets, revisions, files, heads))
1886 % (changesets, revisions, files, heads))
1887
1887
1888 if changesets > 0:
1888 if changesets > 0:
1889 self.hook('pretxnchangegroup', throw=True,
1889 self.hook('pretxnchangegroup', throw=True,
1890 node=hex(self.changelog.node(cor+1)), source=srctype,
1890 node=hex(self.changelog.node(cor+1)), source=srctype,
1891 url=url)
1891 url=url)
1892
1892
1893 tr.close()
1893 tr.close()
1894 finally:
1894 finally:
1895 del tr
1895 del tr
1896
1896
1897 if changesets > 0:
1897 if changesets > 0:
1898 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1898 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1899 source=srctype, url=url)
1899 source=srctype, url=url)
1900
1900
1901 for i in xrange(cor + 1, cnr + 1):
1901 for i in xrange(cor + 1, cnr + 1):
1902 self.hook("incoming", node=hex(self.changelog.node(i)),
1902 self.hook("incoming", node=hex(self.changelog.node(i)),
1903 source=srctype, url=url)
1903 source=srctype, url=url)
1904
1904
1905 # never return 0 here:
1905 # never return 0 here:
1906 if newheads < oldheads:
1906 if newheads < oldheads:
1907 return newheads - oldheads - 1
1907 return newheads - oldheads - 1
1908 else:
1908 else:
1909 return newheads - oldheads + 1
1909 return newheads - oldheads + 1
1910
1910
1911
1911
1912 def stream_in(self, remote):
1912 def stream_in(self, remote):
1913 fp = remote.stream_out()
1913 fp = remote.stream_out()
1914 l = fp.readline()
1914 l = fp.readline()
1915 try:
1915 try:
1916 resp = int(l)
1916 resp = int(l)
1917 except ValueError:
1917 except ValueError:
1918 raise util.UnexpectedOutput(
1918 raise util.UnexpectedOutput(
1919 _('Unexpected response from remote server:'), l)
1919 _('Unexpected response from remote server:'), l)
1920 if resp == 1:
1920 if resp == 1:
1921 raise util.Abort(_('operation forbidden by server'))
1921 raise util.Abort(_('operation forbidden by server'))
1922 elif resp == 2:
1922 elif resp == 2:
1923 raise util.Abort(_('locking the remote repository failed'))
1923 raise util.Abort(_('locking the remote repository failed'))
1924 elif resp != 0:
1924 elif resp != 0:
1925 raise util.Abort(_('the server sent an unknown error code'))
1925 raise util.Abort(_('the server sent an unknown error code'))
1926 self.ui.status(_('streaming all changes\n'))
1926 self.ui.status(_('streaming all changes\n'))
1927 l = fp.readline()
1927 l = fp.readline()
1928 try:
1928 try:
1929 total_files, total_bytes = map(int, l.split(' ', 1))
1929 total_files, total_bytes = map(int, l.split(' ', 1))
1930 except ValueError, TypeError:
1930 except ValueError, TypeError:
1931 raise util.UnexpectedOutput(
1931 raise util.UnexpectedOutput(
1932 _('Unexpected response from remote server:'), l)
1932 _('Unexpected response from remote server:'), l)
1933 self.ui.status(_('%d files to transfer, %s of data\n') %
1933 self.ui.status(_('%d files to transfer, %s of data\n') %
1934 (total_files, util.bytecount(total_bytes)))
1934 (total_files, util.bytecount(total_bytes)))
1935 start = time.time()
1935 start = time.time()
1936 for i in xrange(total_files):
1936 for i in xrange(total_files):
1937 # XXX doesn't support '\n' or '\r' in filenames
1937 # XXX doesn't support '\n' or '\r' in filenames
1938 l = fp.readline()
1938 l = fp.readline()
1939 try:
1939 try:
1940 name, size = l.split('\0', 1)
1940 name, size = l.split('\0', 1)
1941 size = int(size)
1941 size = int(size)
1942 except ValueError, TypeError:
1942 except ValueError, TypeError:
1943 raise util.UnexpectedOutput(
1943 raise util.UnexpectedOutput(
1944 _('Unexpected response from remote server:'), l)
1944 _('Unexpected response from remote server:'), l)
1945 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1945 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1946 ofp = self.sopener(name, 'w')
1946 ofp = self.sopener(name, 'w')
1947 for chunk in util.filechunkiter(fp, limit=size):
1947 for chunk in util.filechunkiter(fp, limit=size):
1948 ofp.write(chunk)
1948 ofp.write(chunk)
1949 ofp.close()
1949 ofp.close()
1950 elapsed = time.time() - start
1950 elapsed = time.time() - start
1951 if elapsed <= 0:
1951 if elapsed <= 0:
1952 elapsed = 0.001
1952 elapsed = 0.001
1953 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1953 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1954 (util.bytecount(total_bytes), elapsed,
1954 (util.bytecount(total_bytes), elapsed,
1955 util.bytecount(total_bytes / elapsed)))
1955 util.bytecount(total_bytes / elapsed)))
1956 self.invalidate()
1956 self.invalidate()
1957 return len(self.heads()) + 1
1957 return len(self.heads()) + 1
1958
1958
1959 def clone(self, remote, heads=[], stream=False):
1959 def clone(self, remote, heads=[], stream=False):
1960 '''clone remote repository.
1960 '''clone remote repository.
1961
1961
1962 keyword arguments:
1962 keyword arguments:
1963 heads: list of revs to clone (forces use of pull)
1963 heads: list of revs to clone (forces use of pull)
1964 stream: use streaming clone if possible'''
1964 stream: use streaming clone if possible'''
1965
1965
1966 # now, all clients that can request uncompressed clones can
1966 # now, all clients that can request uncompressed clones can
1967 # read repo formats supported by all servers that can serve
1967 # read repo formats supported by all servers that can serve
1968 # them.
1968 # them.
1969
1969
1970 # if revlog format changes, client will have to check version
1970 # if revlog format changes, client will have to check version
1971 # and format flags on "stream" capability, and use
1971 # and format flags on "stream" capability, and use
1972 # uncompressed only if compatible.
1972 # uncompressed only if compatible.
1973
1973
1974 if stream and not heads and remote.capable('stream'):
1974 if stream and not heads and remote.capable('stream'):
1975 return self.stream_in(remote)
1975 return self.stream_in(remote)
1976 return self.pull(remote, heads)
1976 return self.pull(remote, heads)
1977
1977
1978 # used to avoid circular references so destructors work
1978 # used to avoid circular references so destructors work
1979 def aftertrans(files):
1979 def aftertrans(files):
1980 renamefiles = [tuple(t) for t in files]
1980 renamefiles = [tuple(t) for t in files]
1981 def a():
1981 def a():
1982 for src, dest in renamefiles:
1982 for src, dest in renamefiles:
1983 util.rename(src, dest)
1983 util.rename(src, dest)
1984 return a
1984 return a
1985
1985
1986 def instance(ui, path, create):
1986 def instance(ui, path, create):
1987 return localrepository(ui, util.drop_scheme('file', path), create)
1987 return localrepository(ui, util.drop_scheme('file', path), create)
1988
1988
1989 def islocal(path):
1989 def islocal(path):
1990 return True
1990 return True
General Comments 0
You need to be logged in to leave comments. Login now