##// END OF EJS Templates
Do not abort rollback if undo.branch isn't available, but warn.
Thomas Arendsen Hein -
r6058:88b4d726 default
parent child Browse files
Show More
@@ -1,2081 +1,2086 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import re, lock, transaction, tempfile, stat, errno, ui
12 import re, lock, transaction, tempfile, stat, errno, ui
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = util.set(('lookup', 'changegroupsubset'))
16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 repo.repository.__init__(self)
20 repo.repository.__init__(self)
21 self.root = os.path.realpath(path)
21 self.root = os.path.realpath(path)
22 self.path = os.path.join(self.root, ".hg")
22 self.path = os.path.join(self.root, ".hg")
23 self.origroot = path
23 self.origroot = path
24 self.opener = util.opener(self.path)
24 self.opener = util.opener(self.path)
25 self.wopener = util.opener(self.root)
25 self.wopener = util.opener(self.root)
26
26
27 if not os.path.isdir(self.path):
27 if not os.path.isdir(self.path):
28 if create:
28 if create:
29 if not os.path.exists(path):
29 if not os.path.exists(path):
30 os.mkdir(path)
30 os.mkdir(path)
31 os.mkdir(self.path)
31 os.mkdir(self.path)
32 requirements = ["revlogv1"]
32 requirements = ["revlogv1"]
33 if parentui.configbool('format', 'usestore', True):
33 if parentui.configbool('format', 'usestore', True):
34 os.mkdir(os.path.join(self.path, "store"))
34 os.mkdir(os.path.join(self.path, "store"))
35 requirements.append("store")
35 requirements.append("store")
36 # create an invalid changelog
36 # create an invalid changelog
37 self.opener("00changelog.i", "a").write(
37 self.opener("00changelog.i", "a").write(
38 '\0\0\0\2' # represents revlogv2
38 '\0\0\0\2' # represents revlogv2
39 ' dummy changelog to prevent using the old repo layout'
39 ' dummy changelog to prevent using the old repo layout'
40 )
40 )
41 reqfile = self.opener("requires", "w")
41 reqfile = self.opener("requires", "w")
42 for r in requirements:
42 for r in requirements:
43 reqfile.write("%s\n" % r)
43 reqfile.write("%s\n" % r)
44 reqfile.close()
44 reqfile.close()
45 else:
45 else:
46 raise repo.RepoError(_("repository %s not found") % path)
46 raise repo.RepoError(_("repository %s not found") % path)
47 elif create:
47 elif create:
48 raise repo.RepoError(_("repository %s already exists") % path)
48 raise repo.RepoError(_("repository %s already exists") % path)
49 else:
49 else:
50 # find requirements
50 # find requirements
51 try:
51 try:
52 requirements = self.opener("requires").read().splitlines()
52 requirements = self.opener("requires").read().splitlines()
53 except IOError, inst:
53 except IOError, inst:
54 if inst.errno != errno.ENOENT:
54 if inst.errno != errno.ENOENT:
55 raise
55 raise
56 requirements = []
56 requirements = []
57 # check them
57 # check them
58 for r in requirements:
58 for r in requirements:
59 if r not in self.supported:
59 if r not in self.supported:
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61
61
62 # setup store
62 # setup store
63 if "store" in requirements:
63 if "store" in requirements:
64 self.encodefn = util.encodefilename
64 self.encodefn = util.encodefilename
65 self.decodefn = util.decodefilename
65 self.decodefn = util.decodefilename
66 self.spath = os.path.join(self.path, "store")
66 self.spath = os.path.join(self.path, "store")
67 else:
67 else:
68 self.encodefn = lambda x: x
68 self.encodefn = lambda x: x
69 self.decodefn = lambda x: x
69 self.decodefn = lambda x: x
70 self.spath = self.path
70 self.spath = self.path
71 self.sopener = util.encodedopener(util.opener(self.spath),
71 self.sopener = util.encodedopener(util.opener(self.spath),
72 self.encodefn)
72 self.encodefn)
73
73
74 self.ui = ui.ui(parentui=parentui)
74 self.ui = ui.ui(parentui=parentui)
75 try:
75 try:
76 self.ui.readconfig(self.join("hgrc"), self.root)
76 self.ui.readconfig(self.join("hgrc"), self.root)
77 extensions.loadall(self.ui)
77 extensions.loadall(self.ui)
78 except IOError:
78 except IOError:
79 pass
79 pass
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self._tagstypecache = None
82 self._tagstypecache = None
83 self.branchcache = None
83 self.branchcache = None
84 self.nodetagscache = None
84 self.nodetagscache = None
85 self.filterpats = {}
85 self.filterpats = {}
86 self._datafilters = {}
86 self._datafilters = {}
87 self._transref = self._lockref = self._wlockref = None
87 self._transref = self._lockref = self._wlockref = None
88
88
89 def __getattr__(self, name):
89 def __getattr__(self, name):
90 if name == 'changelog':
90 if name == 'changelog':
91 self.changelog = changelog.changelog(self.sopener)
91 self.changelog = changelog.changelog(self.sopener)
92 self.sopener.defversion = self.changelog.version
92 self.sopener.defversion = self.changelog.version
93 return self.changelog
93 return self.changelog
94 if name == 'manifest':
94 if name == 'manifest':
95 self.changelog
95 self.changelog
96 self.manifest = manifest.manifest(self.sopener)
96 self.manifest = manifest.manifest(self.sopener)
97 return self.manifest
97 return self.manifest
98 if name == 'dirstate':
98 if name == 'dirstate':
99 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
99 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
100 return self.dirstate
100 return self.dirstate
101 else:
101 else:
102 raise AttributeError, name
102 raise AttributeError, name
103
103
104 def url(self):
104 def url(self):
105 return 'file:' + self.root
105 return 'file:' + self.root
106
106
107 def hook(self, name, throw=False, **args):
107 def hook(self, name, throw=False, **args):
108 return hook.hook(self.ui, self, name, throw, **args)
108 return hook.hook(self.ui, self, name, throw, **args)
109
109
110 tag_disallowed = ':\r\n'
110 tag_disallowed = ':\r\n'
111
111
112 def _tag(self, name, node, message, local, user, date, parent=None,
112 def _tag(self, name, node, message, local, user, date, parent=None,
113 extra={}):
113 extra={}):
114 use_dirstate = parent is None
114 use_dirstate = parent is None
115
115
116 for c in self.tag_disallowed:
116 for c in self.tag_disallowed:
117 if c in name:
117 if c in name:
118 raise util.Abort(_('%r cannot be used in a tag name') % c)
118 raise util.Abort(_('%r cannot be used in a tag name') % c)
119
119
120 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
120 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
121
121
122 def writetag(fp, name, munge, prevtags):
122 def writetag(fp, name, munge, prevtags):
123 fp.seek(0, 2)
123 fp.seek(0, 2)
124 if prevtags and prevtags[-1] != '\n':
124 if prevtags and prevtags[-1] != '\n':
125 fp.write('\n')
125 fp.write('\n')
126 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
126 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
127 fp.close()
127 fp.close()
128
128
129 prevtags = ''
129 prevtags = ''
130 if local:
130 if local:
131 try:
131 try:
132 fp = self.opener('localtags', 'r+')
132 fp = self.opener('localtags', 'r+')
133 except IOError, err:
133 except IOError, err:
134 fp = self.opener('localtags', 'a')
134 fp = self.opener('localtags', 'a')
135 else:
135 else:
136 prevtags = fp.read()
136 prevtags = fp.read()
137
137
138 # local tags are stored in the current charset
138 # local tags are stored in the current charset
139 writetag(fp, name, None, prevtags)
139 writetag(fp, name, None, prevtags)
140 self.hook('tag', node=hex(node), tag=name, local=local)
140 self.hook('tag', node=hex(node), tag=name, local=local)
141 return
141 return
142
142
143 if use_dirstate:
143 if use_dirstate:
144 try:
144 try:
145 fp = self.wfile('.hgtags', 'rb+')
145 fp = self.wfile('.hgtags', 'rb+')
146 except IOError, err:
146 except IOError, err:
147 fp = self.wfile('.hgtags', 'ab')
147 fp = self.wfile('.hgtags', 'ab')
148 else:
148 else:
149 prevtags = fp.read()
149 prevtags = fp.read()
150 else:
150 else:
151 try:
151 try:
152 prevtags = self.filectx('.hgtags', parent).data()
152 prevtags = self.filectx('.hgtags', parent).data()
153 except revlog.LookupError:
153 except revlog.LookupError:
154 pass
154 pass
155 fp = self.wfile('.hgtags', 'wb')
155 fp = self.wfile('.hgtags', 'wb')
156 if prevtags:
156 if prevtags:
157 fp.write(prevtags)
157 fp.write(prevtags)
158
158
159 # committed tags are stored in UTF-8
159 # committed tags are stored in UTF-8
160 writetag(fp, name, util.fromlocal, prevtags)
160 writetag(fp, name, util.fromlocal, prevtags)
161
161
162 if use_dirstate and '.hgtags' not in self.dirstate:
162 if use_dirstate and '.hgtags' not in self.dirstate:
163 self.add(['.hgtags'])
163 self.add(['.hgtags'])
164
164
165 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
165 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
166 extra=extra)
166 extra=extra)
167
167
168 self.hook('tag', node=hex(node), tag=name, local=local)
168 self.hook('tag', node=hex(node), tag=name, local=local)
169
169
170 return tagnode
170 return tagnode
171
171
172 def tag(self, name, node, message, local, user, date):
172 def tag(self, name, node, message, local, user, date):
173 '''tag a revision with a symbolic name.
173 '''tag a revision with a symbolic name.
174
174
175 if local is True, the tag is stored in a per-repository file.
175 if local is True, the tag is stored in a per-repository file.
176 otherwise, it is stored in the .hgtags file, and a new
176 otherwise, it is stored in the .hgtags file, and a new
177 changeset is committed with the change.
177 changeset is committed with the change.
178
178
179 keyword arguments:
179 keyword arguments:
180
180
181 local: whether to store tag in non-version-controlled file
181 local: whether to store tag in non-version-controlled file
182 (default False)
182 (default False)
183
183
184 message: commit message to use if committing
184 message: commit message to use if committing
185
185
186 user: name of user to use if committing
186 user: name of user to use if committing
187
187
188 date: date tuple to use if committing'''
188 date: date tuple to use if committing'''
189
189
190 for x in self.status()[:5]:
190 for x in self.status()[:5]:
191 if '.hgtags' in x:
191 if '.hgtags' in x:
192 raise util.Abort(_('working copy of .hgtags is changed '
192 raise util.Abort(_('working copy of .hgtags is changed '
193 '(please commit .hgtags manually)'))
193 '(please commit .hgtags manually)'))
194
194
195
195
196 self._tag(name, node, message, local, user, date)
196 self._tag(name, node, message, local, user, date)
197
197
198 def tags(self):
198 def tags(self):
199 '''return a mapping of tag to node'''
199 '''return a mapping of tag to node'''
200 if self.tagscache:
200 if self.tagscache:
201 return self.tagscache
201 return self.tagscache
202
202
203 globaltags = {}
203 globaltags = {}
204 tagtypes = {}
204 tagtypes = {}
205
205
206 def readtags(lines, fn, tagtype):
206 def readtags(lines, fn, tagtype):
207 filetags = {}
207 filetags = {}
208 count = 0
208 count = 0
209
209
210 def warn(msg):
210 def warn(msg):
211 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
211 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
212
212
213 for l in lines:
213 for l in lines:
214 count += 1
214 count += 1
215 if not l:
215 if not l:
216 continue
216 continue
217 s = l.split(" ", 1)
217 s = l.split(" ", 1)
218 if len(s) != 2:
218 if len(s) != 2:
219 warn(_("cannot parse entry"))
219 warn(_("cannot parse entry"))
220 continue
220 continue
221 node, key = s
221 node, key = s
222 key = util.tolocal(key.strip()) # stored in UTF-8
222 key = util.tolocal(key.strip()) # stored in UTF-8
223 try:
223 try:
224 bin_n = bin(node)
224 bin_n = bin(node)
225 except TypeError:
225 except TypeError:
226 warn(_("node '%s' is not well formed") % node)
226 warn(_("node '%s' is not well formed") % node)
227 continue
227 continue
228 if bin_n not in self.changelog.nodemap:
228 if bin_n not in self.changelog.nodemap:
229 warn(_("tag '%s' refers to unknown node") % key)
229 warn(_("tag '%s' refers to unknown node") % key)
230 continue
230 continue
231
231
232 h = []
232 h = []
233 if key in filetags:
233 if key in filetags:
234 n, h = filetags[key]
234 n, h = filetags[key]
235 h.append(n)
235 h.append(n)
236 filetags[key] = (bin_n, h)
236 filetags[key] = (bin_n, h)
237
237
238 for k, nh in filetags.items():
238 for k, nh in filetags.items():
239 if k not in globaltags:
239 if k not in globaltags:
240 globaltags[k] = nh
240 globaltags[k] = nh
241 tagtypes[k] = tagtype
241 tagtypes[k] = tagtype
242 continue
242 continue
243
243
244 # we prefer the global tag if:
244 # we prefer the global tag if:
245 # it supercedes us OR
245 # it supercedes us OR
246 # mutual supercedes and it has a higher rank
246 # mutual supercedes and it has a higher rank
247 # otherwise we win because we're tip-most
247 # otherwise we win because we're tip-most
248 an, ah = nh
248 an, ah = nh
249 bn, bh = globaltags[k]
249 bn, bh = globaltags[k]
250 if (bn != an and an in bh and
250 if (bn != an and an in bh and
251 (bn not in ah or len(bh) > len(ah))):
251 (bn not in ah or len(bh) > len(ah))):
252 an = bn
252 an = bn
253 ah.extend([n for n in bh if n not in ah])
253 ah.extend([n for n in bh if n not in ah])
254 globaltags[k] = an, ah
254 globaltags[k] = an, ah
255 tagtypes[k] = tagtype
255 tagtypes[k] = tagtype
256
256
257 # read the tags file from each head, ending with the tip
257 # read the tags file from each head, ending with the tip
258 f = None
258 f = None
259 for rev, node, fnode in self._hgtagsnodes():
259 for rev, node, fnode in self._hgtagsnodes():
260 f = (f and f.filectx(fnode) or
260 f = (f and f.filectx(fnode) or
261 self.filectx('.hgtags', fileid=fnode))
261 self.filectx('.hgtags', fileid=fnode))
262 readtags(f.data().splitlines(), f, "global")
262 readtags(f.data().splitlines(), f, "global")
263
263
264 try:
264 try:
265 data = util.fromlocal(self.opener("localtags").read())
265 data = util.fromlocal(self.opener("localtags").read())
266 # localtags are stored in the local character set
266 # localtags are stored in the local character set
267 # while the internal tag table is stored in UTF-8
267 # while the internal tag table is stored in UTF-8
268 readtags(data.splitlines(), "localtags", "local")
268 readtags(data.splitlines(), "localtags", "local")
269 except IOError:
269 except IOError:
270 pass
270 pass
271
271
272 self.tagscache = {}
272 self.tagscache = {}
273 self._tagstypecache = {}
273 self._tagstypecache = {}
274 for k,nh in globaltags.items():
274 for k,nh in globaltags.items():
275 n = nh[0]
275 n = nh[0]
276 if n != nullid:
276 if n != nullid:
277 self.tagscache[k] = n
277 self.tagscache[k] = n
278 self._tagstypecache[k] = tagtypes[k]
278 self._tagstypecache[k] = tagtypes[k]
279 self.tagscache['tip'] = self.changelog.tip()
279 self.tagscache['tip'] = self.changelog.tip()
280
280
281 return self.tagscache
281 return self.tagscache
282
282
283 def tagtype(self, tagname):
283 def tagtype(self, tagname):
284 '''
284 '''
285 return the type of the given tag. result can be:
285 return the type of the given tag. result can be:
286
286
287 'local' : a local tag
287 'local' : a local tag
288 'global' : a global tag
288 'global' : a global tag
289 None : tag does not exist
289 None : tag does not exist
290 '''
290 '''
291
291
292 self.tags()
292 self.tags()
293
293
294 return self._tagstypecache.get(tagname)
294 return self._tagstypecache.get(tagname)
295
295
296 def _hgtagsnodes(self):
296 def _hgtagsnodes(self):
297 heads = self.heads()
297 heads = self.heads()
298 heads.reverse()
298 heads.reverse()
299 last = {}
299 last = {}
300 ret = []
300 ret = []
301 for node in heads:
301 for node in heads:
302 c = self.changectx(node)
302 c = self.changectx(node)
303 rev = c.rev()
303 rev = c.rev()
304 try:
304 try:
305 fnode = c.filenode('.hgtags')
305 fnode = c.filenode('.hgtags')
306 except revlog.LookupError:
306 except revlog.LookupError:
307 continue
307 continue
308 ret.append((rev, node, fnode))
308 ret.append((rev, node, fnode))
309 if fnode in last:
309 if fnode in last:
310 ret[last[fnode]] = None
310 ret[last[fnode]] = None
311 last[fnode] = len(ret) - 1
311 last[fnode] = len(ret) - 1
312 return [item for item in ret if item]
312 return [item for item in ret if item]
313
313
314 def tagslist(self):
314 def tagslist(self):
315 '''return a list of tags ordered by revision'''
315 '''return a list of tags ordered by revision'''
316 l = []
316 l = []
317 for t, n in self.tags().items():
317 for t, n in self.tags().items():
318 try:
318 try:
319 r = self.changelog.rev(n)
319 r = self.changelog.rev(n)
320 except:
320 except:
321 r = -2 # sort to the beginning of the list if unknown
321 r = -2 # sort to the beginning of the list if unknown
322 l.append((r, t, n))
322 l.append((r, t, n))
323 l.sort()
323 l.sort()
324 return [(t, n) for r, t, n in l]
324 return [(t, n) for r, t, n in l]
325
325
326 def nodetags(self, node):
326 def nodetags(self, node):
327 '''return the tags associated with a node'''
327 '''return the tags associated with a node'''
328 if not self.nodetagscache:
328 if not self.nodetagscache:
329 self.nodetagscache = {}
329 self.nodetagscache = {}
330 for t, n in self.tags().items():
330 for t, n in self.tags().items():
331 self.nodetagscache.setdefault(n, []).append(t)
331 self.nodetagscache.setdefault(n, []).append(t)
332 return self.nodetagscache.get(node, [])
332 return self.nodetagscache.get(node, [])
333
333
334 def _branchtags(self):
334 def _branchtags(self):
335 partial, last, lrev = self._readbranchcache()
335 partial, last, lrev = self._readbranchcache()
336
336
337 tiprev = self.changelog.count() - 1
337 tiprev = self.changelog.count() - 1
338 if lrev != tiprev:
338 if lrev != tiprev:
339 self._updatebranchcache(partial, lrev+1, tiprev+1)
339 self._updatebranchcache(partial, lrev+1, tiprev+1)
340 self._writebranchcache(partial, self.changelog.tip(), tiprev)
340 self._writebranchcache(partial, self.changelog.tip(), tiprev)
341
341
342 return partial
342 return partial
343
343
344 def branchtags(self):
344 def branchtags(self):
345 if self.branchcache is not None:
345 if self.branchcache is not None:
346 return self.branchcache
346 return self.branchcache
347
347
348 self.branchcache = {} # avoid recursion in changectx
348 self.branchcache = {} # avoid recursion in changectx
349 partial = self._branchtags()
349 partial = self._branchtags()
350
350
351 # the branch cache is stored on disk as UTF-8, but in the local
351 # the branch cache is stored on disk as UTF-8, but in the local
352 # charset internally
352 # charset internally
353 for k, v in partial.items():
353 for k, v in partial.items():
354 self.branchcache[util.tolocal(k)] = v
354 self.branchcache[util.tolocal(k)] = v
355 return self.branchcache
355 return self.branchcache
356
356
357 def _readbranchcache(self):
357 def _readbranchcache(self):
358 partial = {}
358 partial = {}
359 try:
359 try:
360 f = self.opener("branch.cache")
360 f = self.opener("branch.cache")
361 lines = f.read().split('\n')
361 lines = f.read().split('\n')
362 f.close()
362 f.close()
363 except (IOError, OSError):
363 except (IOError, OSError):
364 return {}, nullid, nullrev
364 return {}, nullid, nullrev
365
365
366 try:
366 try:
367 last, lrev = lines.pop(0).split(" ", 1)
367 last, lrev = lines.pop(0).split(" ", 1)
368 last, lrev = bin(last), int(lrev)
368 last, lrev = bin(last), int(lrev)
369 if not (lrev < self.changelog.count() and
369 if not (lrev < self.changelog.count() and
370 self.changelog.node(lrev) == last): # sanity check
370 self.changelog.node(lrev) == last): # sanity check
371 # invalidate the cache
371 # invalidate the cache
372 raise ValueError('invalidating branch cache (tip differs)')
372 raise ValueError('invalidating branch cache (tip differs)')
373 for l in lines:
373 for l in lines:
374 if not l: continue
374 if not l: continue
375 node, label = l.split(" ", 1)
375 node, label = l.split(" ", 1)
376 partial[label.strip()] = bin(node)
376 partial[label.strip()] = bin(node)
377 except (KeyboardInterrupt, util.SignalInterrupt):
377 except (KeyboardInterrupt, util.SignalInterrupt):
378 raise
378 raise
379 except Exception, inst:
379 except Exception, inst:
380 if self.ui.debugflag:
380 if self.ui.debugflag:
381 self.ui.warn(str(inst), '\n')
381 self.ui.warn(str(inst), '\n')
382 partial, last, lrev = {}, nullid, nullrev
382 partial, last, lrev = {}, nullid, nullrev
383 return partial, last, lrev
383 return partial, last, lrev
384
384
385 def _writebranchcache(self, branches, tip, tiprev):
385 def _writebranchcache(self, branches, tip, tiprev):
386 try:
386 try:
387 f = self.opener("branch.cache", "w", atomictemp=True)
387 f = self.opener("branch.cache", "w", atomictemp=True)
388 f.write("%s %s\n" % (hex(tip), tiprev))
388 f.write("%s %s\n" % (hex(tip), tiprev))
389 for label, node in branches.iteritems():
389 for label, node in branches.iteritems():
390 f.write("%s %s\n" % (hex(node), label))
390 f.write("%s %s\n" % (hex(node), label))
391 f.rename()
391 f.rename()
392 except (IOError, OSError):
392 except (IOError, OSError):
393 pass
393 pass
394
394
395 def _updatebranchcache(self, partial, start, end):
395 def _updatebranchcache(self, partial, start, end):
396 for r in xrange(start, end):
396 for r in xrange(start, end):
397 c = self.changectx(r)
397 c = self.changectx(r)
398 b = c.branch()
398 b = c.branch()
399 partial[b] = c.node()
399 partial[b] = c.node()
400
400
401 def lookup(self, key):
401 def lookup(self, key):
402 if key == '.':
402 if key == '.':
403 key, second = self.dirstate.parents()
403 key, second = self.dirstate.parents()
404 if key == nullid:
404 if key == nullid:
405 raise repo.RepoError(_("no revision checked out"))
405 raise repo.RepoError(_("no revision checked out"))
406 if second != nullid:
406 if second != nullid:
407 self.ui.warn(_("warning: working directory has two parents, "
407 self.ui.warn(_("warning: working directory has two parents, "
408 "tag '.' uses the first\n"))
408 "tag '.' uses the first\n"))
409 elif key == 'null':
409 elif key == 'null':
410 return nullid
410 return nullid
411 n = self.changelog._match(key)
411 n = self.changelog._match(key)
412 if n:
412 if n:
413 return n
413 return n
414 if key in self.tags():
414 if key in self.tags():
415 return self.tags()[key]
415 return self.tags()[key]
416 if key in self.branchtags():
416 if key in self.branchtags():
417 return self.branchtags()[key]
417 return self.branchtags()[key]
418 n = self.changelog._partialmatch(key)
418 n = self.changelog._partialmatch(key)
419 if n:
419 if n:
420 return n
420 return n
421 try:
421 try:
422 if len(key) == 20:
422 if len(key) == 20:
423 key = hex(key)
423 key = hex(key)
424 except:
424 except:
425 pass
425 pass
426 raise repo.RepoError(_("unknown revision '%s'") % key)
426 raise repo.RepoError(_("unknown revision '%s'") % key)
427
427
428 def dev(self):
428 def dev(self):
429 return os.lstat(self.path).st_dev
429 return os.lstat(self.path).st_dev
430
430
431 def local(self):
431 def local(self):
432 return True
432 return True
433
433
434 def join(self, f):
434 def join(self, f):
435 return os.path.join(self.path, f)
435 return os.path.join(self.path, f)
436
436
437 def sjoin(self, f):
437 def sjoin(self, f):
438 f = self.encodefn(f)
438 f = self.encodefn(f)
439 return os.path.join(self.spath, f)
439 return os.path.join(self.spath, f)
440
440
441 def wjoin(self, f):
441 def wjoin(self, f):
442 return os.path.join(self.root, f)
442 return os.path.join(self.root, f)
443
443
444 def file(self, f):
444 def file(self, f):
445 if f[0] == '/':
445 if f[0] == '/':
446 f = f[1:]
446 f = f[1:]
447 return filelog.filelog(self.sopener, f)
447 return filelog.filelog(self.sopener, f)
448
448
449 def changectx(self, changeid=None):
449 def changectx(self, changeid=None):
450 return context.changectx(self, changeid)
450 return context.changectx(self, changeid)
451
451
452 def workingctx(self):
452 def workingctx(self):
453 return context.workingctx(self)
453 return context.workingctx(self)
454
454
455 def parents(self, changeid=None):
455 def parents(self, changeid=None):
456 '''
456 '''
457 get list of changectxs for parents of changeid or working directory
457 get list of changectxs for parents of changeid or working directory
458 '''
458 '''
459 if changeid is None:
459 if changeid is None:
460 pl = self.dirstate.parents()
460 pl = self.dirstate.parents()
461 else:
461 else:
462 n = self.changelog.lookup(changeid)
462 n = self.changelog.lookup(changeid)
463 pl = self.changelog.parents(n)
463 pl = self.changelog.parents(n)
464 if pl[1] == nullid:
464 if pl[1] == nullid:
465 return [self.changectx(pl[0])]
465 return [self.changectx(pl[0])]
466 return [self.changectx(pl[0]), self.changectx(pl[1])]
466 return [self.changectx(pl[0]), self.changectx(pl[1])]
467
467
468 def filectx(self, path, changeid=None, fileid=None):
468 def filectx(self, path, changeid=None, fileid=None):
469 """changeid can be a changeset revision, node, or tag.
469 """changeid can be a changeset revision, node, or tag.
470 fileid can be a file revision or node."""
470 fileid can be a file revision or node."""
471 return context.filectx(self, path, changeid, fileid)
471 return context.filectx(self, path, changeid, fileid)
472
472
473 def getcwd(self):
473 def getcwd(self):
474 return self.dirstate.getcwd()
474 return self.dirstate.getcwd()
475
475
476 def pathto(self, f, cwd=None):
476 def pathto(self, f, cwd=None):
477 return self.dirstate.pathto(f, cwd)
477 return self.dirstate.pathto(f, cwd)
478
478
479 def wfile(self, f, mode='r'):
479 def wfile(self, f, mode='r'):
480 return self.wopener(f, mode)
480 return self.wopener(f, mode)
481
481
482 def _link(self, f):
482 def _link(self, f):
483 return os.path.islink(self.wjoin(f))
483 return os.path.islink(self.wjoin(f))
484
484
485 def _filter(self, filter, filename, data):
485 def _filter(self, filter, filename, data):
486 if filter not in self.filterpats:
486 if filter not in self.filterpats:
487 l = []
487 l = []
488 for pat, cmd in self.ui.configitems(filter):
488 for pat, cmd in self.ui.configitems(filter):
489 mf = util.matcher(self.root, "", [pat], [], [])[1]
489 mf = util.matcher(self.root, "", [pat], [], [])[1]
490 fn = None
490 fn = None
491 for name, filterfn in self._datafilters.iteritems():
491 for name, filterfn in self._datafilters.iteritems():
492 if cmd.startswith(name):
492 if cmd.startswith(name):
493 fn = filterfn
493 fn = filterfn
494 break
494 break
495 if not fn:
495 if not fn:
496 fn = lambda s, c, **kwargs: util.filter(s, c)
496 fn = lambda s, c, **kwargs: util.filter(s, c)
497 # Wrap old filters not supporting keyword arguments
497 # Wrap old filters not supporting keyword arguments
498 if not inspect.getargspec(fn)[2]:
498 if not inspect.getargspec(fn)[2]:
499 oldfn = fn
499 oldfn = fn
500 fn = lambda s, c, **kwargs: oldfn(s, c)
500 fn = lambda s, c, **kwargs: oldfn(s, c)
501 l.append((mf, fn, cmd))
501 l.append((mf, fn, cmd))
502 self.filterpats[filter] = l
502 self.filterpats[filter] = l
503
503
504 for mf, fn, cmd in self.filterpats[filter]:
504 for mf, fn, cmd in self.filterpats[filter]:
505 if mf(filename):
505 if mf(filename):
506 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
506 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
507 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
507 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
508 break
508 break
509
509
510 return data
510 return data
511
511
512 def adddatafilter(self, name, filter):
512 def adddatafilter(self, name, filter):
513 self._datafilters[name] = filter
513 self._datafilters[name] = filter
514
514
515 def wread(self, filename):
515 def wread(self, filename):
516 if self._link(filename):
516 if self._link(filename):
517 data = os.readlink(self.wjoin(filename))
517 data = os.readlink(self.wjoin(filename))
518 else:
518 else:
519 data = self.wopener(filename, 'r').read()
519 data = self.wopener(filename, 'r').read()
520 return self._filter("encode", filename, data)
520 return self._filter("encode", filename, data)
521
521
522 def wwrite(self, filename, data, flags):
522 def wwrite(self, filename, data, flags):
523 data = self._filter("decode", filename, data)
523 data = self._filter("decode", filename, data)
524 try:
524 try:
525 os.unlink(self.wjoin(filename))
525 os.unlink(self.wjoin(filename))
526 except OSError:
526 except OSError:
527 pass
527 pass
528 self.wopener(filename, 'w').write(data)
528 self.wopener(filename, 'w').write(data)
529 util.set_flags(self.wjoin(filename), flags)
529 util.set_flags(self.wjoin(filename), flags)
530
530
531 def wwritedata(self, filename, data):
531 def wwritedata(self, filename, data):
532 return self._filter("decode", filename, data)
532 return self._filter("decode", filename, data)
533
533
534 def transaction(self):
534 def transaction(self):
535 if self._transref and self._transref():
535 if self._transref and self._transref():
536 return self._transref().nest()
536 return self._transref().nest()
537
537
538 # abort here if the journal already exists
538 # abort here if the journal already exists
539 if os.path.exists(self.sjoin("journal")):
539 if os.path.exists(self.sjoin("journal")):
540 raise repo.RepoError(_("journal already exists - run hg recover"))
540 raise repo.RepoError(_("journal already exists - run hg recover"))
541
541
542 # save dirstate for rollback
542 # save dirstate for rollback
543 try:
543 try:
544 ds = self.opener("dirstate").read()
544 ds = self.opener("dirstate").read()
545 except IOError:
545 except IOError:
546 ds = ""
546 ds = ""
547 self.opener("journal.dirstate", "w").write(ds)
547 self.opener("journal.dirstate", "w").write(ds)
548 self.opener("journal.branch", "w").write(self.dirstate.branch())
548 self.opener("journal.branch", "w").write(self.dirstate.branch())
549
549
550 renames = [(self.sjoin("journal"), self.sjoin("undo")),
550 renames = [(self.sjoin("journal"), self.sjoin("undo")),
551 (self.join("journal.dirstate"), self.join("undo.dirstate")),
551 (self.join("journal.dirstate"), self.join("undo.dirstate")),
552 (self.join("journal.branch"), self.join("undo.branch"))]
552 (self.join("journal.branch"), self.join("undo.branch"))]
553 tr = transaction.transaction(self.ui.warn, self.sopener,
553 tr = transaction.transaction(self.ui.warn, self.sopener,
554 self.sjoin("journal"),
554 self.sjoin("journal"),
555 aftertrans(renames))
555 aftertrans(renames))
556 self._transref = weakref.ref(tr)
556 self._transref = weakref.ref(tr)
557 return tr
557 return tr
558
558
559 def recover(self):
559 def recover(self):
560 l = self.lock()
560 l = self.lock()
561 try:
561 try:
562 if os.path.exists(self.sjoin("journal")):
562 if os.path.exists(self.sjoin("journal")):
563 self.ui.status(_("rolling back interrupted transaction\n"))
563 self.ui.status(_("rolling back interrupted transaction\n"))
564 transaction.rollback(self.sopener, self.sjoin("journal"))
564 transaction.rollback(self.sopener, self.sjoin("journal"))
565 self.invalidate()
565 self.invalidate()
566 return True
566 return True
567 else:
567 else:
568 self.ui.warn(_("no interrupted transaction available\n"))
568 self.ui.warn(_("no interrupted transaction available\n"))
569 return False
569 return False
570 finally:
570 finally:
571 del l
571 del l
572
572
573 def rollback(self):
573 def rollback(self):
574 wlock = lock = None
574 wlock = lock = None
575 try:
575 try:
576 wlock = self.wlock()
576 wlock = self.wlock()
577 lock = self.lock()
577 lock = self.lock()
578 if os.path.exists(self.sjoin("undo")):
578 if os.path.exists(self.sjoin("undo")):
579 self.ui.status(_("rolling back last transaction\n"))
579 self.ui.status(_("rolling back last transaction\n"))
580 transaction.rollback(self.sopener, self.sjoin("undo"))
580 transaction.rollback(self.sopener, self.sjoin("undo"))
581 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
581 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
582 branch = self.opener("undo.branch").read()
582 try:
583 self.dirstate.setbranch(branch)
583 branch = self.opener("undo.branch").read()
584 self.dirstate.setbranch(branch)
585 except IOError:
586 self.ui.warn(_("Named branch could not be reset, "
587 "current branch still is: %s\n")
588 % util.tolocal(self.dirstate.branch()))
584 self.invalidate()
589 self.invalidate()
585 self.dirstate.invalidate()
590 self.dirstate.invalidate()
586 else:
591 else:
587 self.ui.warn(_("no rollback information available\n"))
592 self.ui.warn(_("no rollback information available\n"))
588 finally:
593 finally:
589 del lock, wlock
594 del lock, wlock
590
595
591 def invalidate(self):
596 def invalidate(self):
592 for a in "changelog manifest".split():
597 for a in "changelog manifest".split():
593 if hasattr(self, a):
598 if hasattr(self, a):
594 self.__delattr__(a)
599 self.__delattr__(a)
595 self.tagscache = None
600 self.tagscache = None
596 self._tagstypecache = None
601 self._tagstypecache = None
597 self.nodetagscache = None
602 self.nodetagscache = None
598
603
599 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
604 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
600 try:
605 try:
601 l = lock.lock(lockname, 0, releasefn, desc=desc)
606 l = lock.lock(lockname, 0, releasefn, desc=desc)
602 except lock.LockHeld, inst:
607 except lock.LockHeld, inst:
603 if not wait:
608 if not wait:
604 raise
609 raise
605 self.ui.warn(_("waiting for lock on %s held by %r\n") %
610 self.ui.warn(_("waiting for lock on %s held by %r\n") %
606 (desc, inst.locker))
611 (desc, inst.locker))
607 # default to 600 seconds timeout
612 # default to 600 seconds timeout
608 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
613 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
609 releasefn, desc=desc)
614 releasefn, desc=desc)
610 if acquirefn:
615 if acquirefn:
611 acquirefn()
616 acquirefn()
612 return l
617 return l
613
618
614 def lock(self, wait=True):
619 def lock(self, wait=True):
615 if self._lockref and self._lockref():
620 if self._lockref and self._lockref():
616 return self._lockref()
621 return self._lockref()
617
622
618 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
623 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
619 _('repository %s') % self.origroot)
624 _('repository %s') % self.origroot)
620 self._lockref = weakref.ref(l)
625 self._lockref = weakref.ref(l)
621 return l
626 return l
622
627
623 def wlock(self, wait=True):
628 def wlock(self, wait=True):
624 if self._wlockref and self._wlockref():
629 if self._wlockref and self._wlockref():
625 return self._wlockref()
630 return self._wlockref()
626
631
627 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
632 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
628 self.dirstate.invalidate, _('working directory of %s') %
633 self.dirstate.invalidate, _('working directory of %s') %
629 self.origroot)
634 self.origroot)
630 self._wlockref = weakref.ref(l)
635 self._wlockref = weakref.ref(l)
631 return l
636 return l
632
637
633 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
638 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
634 """
639 """
635 commit an individual file as part of a larger transaction
640 commit an individual file as part of a larger transaction
636 """
641 """
637
642
638 t = self.wread(fn)
643 t = self.wread(fn)
639 fl = self.file(fn)
644 fl = self.file(fn)
640 fp1 = manifest1.get(fn, nullid)
645 fp1 = manifest1.get(fn, nullid)
641 fp2 = manifest2.get(fn, nullid)
646 fp2 = manifest2.get(fn, nullid)
642
647
643 meta = {}
648 meta = {}
644 cp = self.dirstate.copied(fn)
649 cp = self.dirstate.copied(fn)
645 if cp:
650 if cp:
646 # Mark the new revision of this file as a copy of another
651 # Mark the new revision of this file as a copy of another
647 # file. This copy data will effectively act as a parent
652 # file. This copy data will effectively act as a parent
648 # of this new revision. If this is a merge, the first
653 # of this new revision. If this is a merge, the first
649 # parent will be the nullid (meaning "look up the copy data")
654 # parent will be the nullid (meaning "look up the copy data")
650 # and the second one will be the other parent. For example:
655 # and the second one will be the other parent. For example:
651 #
656 #
652 # 0 --- 1 --- 3 rev1 changes file foo
657 # 0 --- 1 --- 3 rev1 changes file foo
653 # \ / rev2 renames foo to bar and changes it
658 # \ / rev2 renames foo to bar and changes it
654 # \- 2 -/ rev3 should have bar with all changes and
659 # \- 2 -/ rev3 should have bar with all changes and
655 # should record that bar descends from
660 # should record that bar descends from
656 # bar in rev2 and foo in rev1
661 # bar in rev2 and foo in rev1
657 #
662 #
658 # this allows this merge to succeed:
663 # this allows this merge to succeed:
659 #
664 #
660 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
665 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
661 # \ / merging rev3 and rev4 should use bar@rev2
666 # \ / merging rev3 and rev4 should use bar@rev2
662 # \- 2 --- 4 as the merge base
667 # \- 2 --- 4 as the merge base
663 #
668 #
664 meta["copy"] = cp
669 meta["copy"] = cp
665 if not manifest2: # not a branch merge
670 if not manifest2: # not a branch merge
666 meta["copyrev"] = hex(manifest1.get(cp, nullid))
671 meta["copyrev"] = hex(manifest1.get(cp, nullid))
667 fp2 = nullid
672 fp2 = nullid
668 elif fp2 != nullid: # copied on remote side
673 elif fp2 != nullid: # copied on remote side
669 meta["copyrev"] = hex(manifest1.get(cp, nullid))
674 meta["copyrev"] = hex(manifest1.get(cp, nullid))
670 elif fp1 != nullid: # copied on local side, reversed
675 elif fp1 != nullid: # copied on local side, reversed
671 meta["copyrev"] = hex(manifest2.get(cp))
676 meta["copyrev"] = hex(manifest2.get(cp))
672 fp2 = fp1
677 fp2 = fp1
673 elif cp in manifest2: # directory rename on local side
678 elif cp in manifest2: # directory rename on local side
674 meta["copyrev"] = hex(manifest2[cp])
679 meta["copyrev"] = hex(manifest2[cp])
675 else: # directory rename on remote side
680 else: # directory rename on remote side
676 meta["copyrev"] = hex(manifest1.get(cp, nullid))
681 meta["copyrev"] = hex(manifest1.get(cp, nullid))
677 self.ui.debug(_(" %s: copy %s:%s\n") %
682 self.ui.debug(_(" %s: copy %s:%s\n") %
678 (fn, cp, meta["copyrev"]))
683 (fn, cp, meta["copyrev"]))
679 fp1 = nullid
684 fp1 = nullid
680 elif fp2 != nullid:
685 elif fp2 != nullid:
681 # is one parent an ancestor of the other?
686 # is one parent an ancestor of the other?
682 fpa = fl.ancestor(fp1, fp2)
687 fpa = fl.ancestor(fp1, fp2)
683 if fpa == fp1:
688 if fpa == fp1:
684 fp1, fp2 = fp2, nullid
689 fp1, fp2 = fp2, nullid
685 elif fpa == fp2:
690 elif fpa == fp2:
686 fp2 = nullid
691 fp2 = nullid
687
692
688 # is the file unmodified from the parent? report existing entry
693 # is the file unmodified from the parent? report existing entry
689 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
694 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
690 return fp1
695 return fp1
691
696
692 changelist.append(fn)
697 changelist.append(fn)
693 return fl.add(t, meta, tr, linkrev, fp1, fp2)
698 return fl.add(t, meta, tr, linkrev, fp1, fp2)
694
699
695 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
700 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
696 if p1 is None:
701 if p1 is None:
697 p1, p2 = self.dirstate.parents()
702 p1, p2 = self.dirstate.parents()
698 return self.commit(files=files, text=text, user=user, date=date,
703 return self.commit(files=files, text=text, user=user, date=date,
699 p1=p1, p2=p2, extra=extra, empty_ok=True)
704 p1=p1, p2=p2, extra=extra, empty_ok=True)
700
705
701 def commit(self, files=None, text="", user=None, date=None,
706 def commit(self, files=None, text="", user=None, date=None,
702 match=util.always, force=False, force_editor=False,
707 match=util.always, force=False, force_editor=False,
703 p1=None, p2=None, extra={}, empty_ok=False):
708 p1=None, p2=None, extra={}, empty_ok=False):
704 wlock = lock = tr = None
709 wlock = lock = tr = None
705 valid = 0 # don't save the dirstate if this isn't set
710 valid = 0 # don't save the dirstate if this isn't set
706 if files:
711 if files:
707 files = util.unique(files)
712 files = util.unique(files)
708 try:
713 try:
709 commit = []
714 commit = []
710 remove = []
715 remove = []
711 changed = []
716 changed = []
712 use_dirstate = (p1 is None) # not rawcommit
717 use_dirstate = (p1 is None) # not rawcommit
713 extra = extra.copy()
718 extra = extra.copy()
714
719
715 if use_dirstate:
720 if use_dirstate:
716 if files:
721 if files:
717 for f in files:
722 for f in files:
718 s = self.dirstate[f]
723 s = self.dirstate[f]
719 if s in 'nma':
724 if s in 'nma':
720 commit.append(f)
725 commit.append(f)
721 elif s == 'r':
726 elif s == 'r':
722 remove.append(f)
727 remove.append(f)
723 else:
728 else:
724 self.ui.warn(_("%s not tracked!\n") % f)
729 self.ui.warn(_("%s not tracked!\n") % f)
725 else:
730 else:
726 changes = self.status(match=match)[:5]
731 changes = self.status(match=match)[:5]
727 modified, added, removed, deleted, unknown = changes
732 modified, added, removed, deleted, unknown = changes
728 commit = modified + added
733 commit = modified + added
729 remove = removed
734 remove = removed
730 else:
735 else:
731 commit = files
736 commit = files
732
737
733 if use_dirstate:
738 if use_dirstate:
734 p1, p2 = self.dirstate.parents()
739 p1, p2 = self.dirstate.parents()
735 update_dirstate = True
740 update_dirstate = True
736 else:
741 else:
737 p1, p2 = p1, p2 or nullid
742 p1, p2 = p1, p2 or nullid
738 update_dirstate = (self.dirstate.parents()[0] == p1)
743 update_dirstate = (self.dirstate.parents()[0] == p1)
739
744
740 c1 = self.changelog.read(p1)
745 c1 = self.changelog.read(p1)
741 c2 = self.changelog.read(p2)
746 c2 = self.changelog.read(p2)
742 m1 = self.manifest.read(c1[0]).copy()
747 m1 = self.manifest.read(c1[0]).copy()
743 m2 = self.manifest.read(c2[0])
748 m2 = self.manifest.read(c2[0])
744
749
745 if use_dirstate:
750 if use_dirstate:
746 branchname = self.workingctx().branch()
751 branchname = self.workingctx().branch()
747 try:
752 try:
748 branchname = branchname.decode('UTF-8').encode('UTF-8')
753 branchname = branchname.decode('UTF-8').encode('UTF-8')
749 except UnicodeDecodeError:
754 except UnicodeDecodeError:
750 raise util.Abort(_('branch name not in UTF-8!'))
755 raise util.Abort(_('branch name not in UTF-8!'))
751 else:
756 else:
752 branchname = ""
757 branchname = ""
753
758
754 if use_dirstate:
759 if use_dirstate:
755 oldname = c1[5].get("branch") # stored in UTF-8
760 oldname = c1[5].get("branch") # stored in UTF-8
756 if (not commit and not remove and not force and p2 == nullid
761 if (not commit and not remove and not force and p2 == nullid
757 and branchname == oldname):
762 and branchname == oldname):
758 self.ui.status(_("nothing changed\n"))
763 self.ui.status(_("nothing changed\n"))
759 return None
764 return None
760
765
761 xp1 = hex(p1)
766 xp1 = hex(p1)
762 if p2 == nullid: xp2 = ''
767 if p2 == nullid: xp2 = ''
763 else: xp2 = hex(p2)
768 else: xp2 = hex(p2)
764
769
765 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
770 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
766
771
767 wlock = self.wlock()
772 wlock = self.wlock()
768 lock = self.lock()
773 lock = self.lock()
769 tr = self.transaction()
774 tr = self.transaction()
770 trp = weakref.proxy(tr)
775 trp = weakref.proxy(tr)
771
776
772 # check in files
777 # check in files
773 new = {}
778 new = {}
774 linkrev = self.changelog.count()
779 linkrev = self.changelog.count()
775 commit.sort()
780 commit.sort()
776 is_exec = util.execfunc(self.root, m1.execf)
781 is_exec = util.execfunc(self.root, m1.execf)
777 is_link = util.linkfunc(self.root, m1.linkf)
782 is_link = util.linkfunc(self.root, m1.linkf)
778 for f in commit:
783 for f in commit:
779 self.ui.note(f + "\n")
784 self.ui.note(f + "\n")
780 try:
785 try:
781 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
786 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
782 new_exec = is_exec(f)
787 new_exec = is_exec(f)
783 new_link = is_link(f)
788 new_link = is_link(f)
784 if ((not changed or changed[-1] != f) and
789 if ((not changed or changed[-1] != f) and
785 m2.get(f) != new[f]):
790 m2.get(f) != new[f]):
786 # mention the file in the changelog if some
791 # mention the file in the changelog if some
787 # flag changed, even if there was no content
792 # flag changed, even if there was no content
788 # change.
793 # change.
789 old_exec = m1.execf(f)
794 old_exec = m1.execf(f)
790 old_link = m1.linkf(f)
795 old_link = m1.linkf(f)
791 if old_exec != new_exec or old_link != new_link:
796 if old_exec != new_exec or old_link != new_link:
792 changed.append(f)
797 changed.append(f)
793 m1.set(f, new_exec, new_link)
798 m1.set(f, new_exec, new_link)
794 if use_dirstate:
799 if use_dirstate:
795 self.dirstate.normal(f)
800 self.dirstate.normal(f)
796
801
797 except (OSError, IOError):
802 except (OSError, IOError):
798 if use_dirstate:
803 if use_dirstate:
799 self.ui.warn(_("trouble committing %s!\n") % f)
804 self.ui.warn(_("trouble committing %s!\n") % f)
800 raise
805 raise
801 else:
806 else:
802 remove.append(f)
807 remove.append(f)
803
808
804 # update manifest
809 # update manifest
805 m1.update(new)
810 m1.update(new)
806 remove.sort()
811 remove.sort()
807 removed = []
812 removed = []
808
813
809 for f in remove:
814 for f in remove:
810 if f in m1:
815 if f in m1:
811 del m1[f]
816 del m1[f]
812 removed.append(f)
817 removed.append(f)
813 elif f in m2:
818 elif f in m2:
814 removed.append(f)
819 removed.append(f)
815 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
820 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
816 (new, removed))
821 (new, removed))
817
822
818 # add changeset
823 # add changeset
819 new = new.keys()
824 new = new.keys()
820 new.sort()
825 new.sort()
821
826
822 user = user or self.ui.username()
827 user = user or self.ui.username()
823 if (not empty_ok and not text) or force_editor:
828 if (not empty_ok and not text) or force_editor:
824 edittext = []
829 edittext = []
825 if text:
830 if text:
826 edittext.append(text)
831 edittext.append(text)
827 edittext.append("")
832 edittext.append("")
828 edittext.append(_("HG: Enter commit message."
833 edittext.append(_("HG: Enter commit message."
829 " Lines beginning with 'HG:' are removed."))
834 " Lines beginning with 'HG:' are removed."))
830 edittext.append("HG: --")
835 edittext.append("HG: --")
831 edittext.append("HG: user: %s" % user)
836 edittext.append("HG: user: %s" % user)
832 if p2 != nullid:
837 if p2 != nullid:
833 edittext.append("HG: branch merge")
838 edittext.append("HG: branch merge")
834 if branchname:
839 if branchname:
835 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
840 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
836 edittext.extend(["HG: changed %s" % f for f in changed])
841 edittext.extend(["HG: changed %s" % f for f in changed])
837 edittext.extend(["HG: removed %s" % f for f in removed])
842 edittext.extend(["HG: removed %s" % f for f in removed])
838 if not changed and not remove:
843 if not changed and not remove:
839 edittext.append("HG: no files changed")
844 edittext.append("HG: no files changed")
840 edittext.append("")
845 edittext.append("")
841 # run editor in the repository root
846 # run editor in the repository root
842 olddir = os.getcwd()
847 olddir = os.getcwd()
843 os.chdir(self.root)
848 os.chdir(self.root)
844 text = self.ui.edit("\n".join(edittext), user)
849 text = self.ui.edit("\n".join(edittext), user)
845 os.chdir(olddir)
850 os.chdir(olddir)
846
851
847 if branchname:
852 if branchname:
848 extra["branch"] = branchname
853 extra["branch"] = branchname
849
854
850 if use_dirstate:
855 if use_dirstate:
851 lines = [line.rstrip() for line in text.rstrip().splitlines()]
856 lines = [line.rstrip() for line in text.rstrip().splitlines()]
852 while lines and not lines[0]:
857 while lines and not lines[0]:
853 del lines[0]
858 del lines[0]
854 if not lines:
859 if not lines:
855 raise util.Abort(_("empty commit message"))
860 raise util.Abort(_("empty commit message"))
856 text = '\n'.join(lines)
861 text = '\n'.join(lines)
857
862
858 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
863 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
859 user, date, extra)
864 user, date, extra)
860 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
865 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
861 parent2=xp2)
866 parent2=xp2)
862 tr.close()
867 tr.close()
863
868
864 if self.branchcache and "branch" in extra:
869 if self.branchcache and "branch" in extra:
865 self.branchcache[util.tolocal(extra["branch"])] = n
870 self.branchcache[util.tolocal(extra["branch"])] = n
866
871
867 if use_dirstate or update_dirstate:
872 if use_dirstate or update_dirstate:
868 self.dirstate.setparents(n)
873 self.dirstate.setparents(n)
869 if use_dirstate:
874 if use_dirstate:
870 for f in removed:
875 for f in removed:
871 self.dirstate.forget(f)
876 self.dirstate.forget(f)
872 valid = 1 # our dirstate updates are complete
877 valid = 1 # our dirstate updates are complete
873
878
874 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
879 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
875 return n
880 return n
876 finally:
881 finally:
877 if not valid: # don't save our updated dirstate
882 if not valid: # don't save our updated dirstate
878 self.dirstate.invalidate()
883 self.dirstate.invalidate()
879 del tr, lock, wlock
884 del tr, lock, wlock
880
885
881 def walk(self, node=None, files=[], match=util.always, badmatch=None):
886 def walk(self, node=None, files=[], match=util.always, badmatch=None):
882 '''
887 '''
883 walk recursively through the directory tree or a given
888 walk recursively through the directory tree or a given
884 changeset, finding all files matched by the match
889 changeset, finding all files matched by the match
885 function
890 function
886
891
887 results are yielded in a tuple (src, filename), where src
892 results are yielded in a tuple (src, filename), where src
888 is one of:
893 is one of:
889 'f' the file was found in the directory tree
894 'f' the file was found in the directory tree
890 'm' the file was only in the dirstate and not in the tree
895 'm' the file was only in the dirstate and not in the tree
891 'b' file was not found and matched badmatch
896 'b' file was not found and matched badmatch
892 '''
897 '''
893
898
894 if node:
899 if node:
895 fdict = dict.fromkeys(files)
900 fdict = dict.fromkeys(files)
896 # for dirstate.walk, files=['.'] means "walk the whole tree".
901 # for dirstate.walk, files=['.'] means "walk the whole tree".
897 # follow that here, too
902 # follow that here, too
898 fdict.pop('.', None)
903 fdict.pop('.', None)
899 mdict = self.manifest.read(self.changelog.read(node)[0])
904 mdict = self.manifest.read(self.changelog.read(node)[0])
900 mfiles = mdict.keys()
905 mfiles = mdict.keys()
901 mfiles.sort()
906 mfiles.sort()
902 for fn in mfiles:
907 for fn in mfiles:
903 for ffn in fdict:
908 for ffn in fdict:
904 # match if the file is the exact name or a directory
909 # match if the file is the exact name or a directory
905 if ffn == fn or fn.startswith("%s/" % ffn):
910 if ffn == fn or fn.startswith("%s/" % ffn):
906 del fdict[ffn]
911 del fdict[ffn]
907 break
912 break
908 if match(fn):
913 if match(fn):
909 yield 'm', fn
914 yield 'm', fn
910 ffiles = fdict.keys()
915 ffiles = fdict.keys()
911 ffiles.sort()
916 ffiles.sort()
912 for fn in ffiles:
917 for fn in ffiles:
913 if badmatch and badmatch(fn):
918 if badmatch and badmatch(fn):
914 if match(fn):
919 if match(fn):
915 yield 'b', fn
920 yield 'b', fn
916 else:
921 else:
917 self.ui.warn(_('%s: No such file in rev %s\n')
922 self.ui.warn(_('%s: No such file in rev %s\n')
918 % (self.pathto(fn), short(node)))
923 % (self.pathto(fn), short(node)))
919 else:
924 else:
920 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
925 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
921 yield src, fn
926 yield src, fn
922
927
923 def status(self, node1=None, node2=None, files=[], match=util.always,
928 def status(self, node1=None, node2=None, files=[], match=util.always,
924 list_ignored=False, list_clean=False):
929 list_ignored=False, list_clean=False):
925 """return status of files between two nodes or node and working directory
930 """return status of files between two nodes or node and working directory
926
931
927 If node1 is None, use the first dirstate parent instead.
932 If node1 is None, use the first dirstate parent instead.
928 If node2 is None, compare node1 with working directory.
933 If node2 is None, compare node1 with working directory.
929 """
934 """
930
935
931 def fcmp(fn, getnode):
936 def fcmp(fn, getnode):
932 t1 = self.wread(fn)
937 t1 = self.wread(fn)
933 return self.file(fn).cmp(getnode(fn), t1)
938 return self.file(fn).cmp(getnode(fn), t1)
934
939
935 def mfmatches(node):
940 def mfmatches(node):
936 change = self.changelog.read(node)
941 change = self.changelog.read(node)
937 mf = self.manifest.read(change[0]).copy()
942 mf = self.manifest.read(change[0]).copy()
938 for fn in mf.keys():
943 for fn in mf.keys():
939 if not match(fn):
944 if not match(fn):
940 del mf[fn]
945 del mf[fn]
941 return mf
946 return mf
942
947
943 modified, added, removed, deleted, unknown = [], [], [], [], []
948 modified, added, removed, deleted, unknown = [], [], [], [], []
944 ignored, clean = [], []
949 ignored, clean = [], []
945
950
946 compareworking = False
951 compareworking = False
947 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
952 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
948 compareworking = True
953 compareworking = True
949
954
950 if not compareworking:
955 if not compareworking:
951 # read the manifest from node1 before the manifest from node2,
956 # read the manifest from node1 before the manifest from node2,
952 # so that we'll hit the manifest cache if we're going through
957 # so that we'll hit the manifest cache if we're going through
953 # all the revisions in parent->child order.
958 # all the revisions in parent->child order.
954 mf1 = mfmatches(node1)
959 mf1 = mfmatches(node1)
955
960
956 # are we comparing the working directory?
961 # are we comparing the working directory?
957 if not node2:
962 if not node2:
958 (lookup, modified, added, removed, deleted, unknown,
963 (lookup, modified, added, removed, deleted, unknown,
959 ignored, clean) = self.dirstate.status(files, match,
964 ignored, clean) = self.dirstate.status(files, match,
960 list_ignored, list_clean)
965 list_ignored, list_clean)
961
966
962 # are we comparing working dir against its parent?
967 # are we comparing working dir against its parent?
963 if compareworking:
968 if compareworking:
964 if lookup:
969 if lookup:
965 fixup = []
970 fixup = []
966 # do a full compare of any files that might have changed
971 # do a full compare of any files that might have changed
967 ctx = self.changectx()
972 ctx = self.changectx()
968 for f in lookup:
973 for f in lookup:
969 if f not in ctx or ctx[f].cmp(self.wread(f)):
974 if f not in ctx or ctx[f].cmp(self.wread(f)):
970 modified.append(f)
975 modified.append(f)
971 else:
976 else:
972 fixup.append(f)
977 fixup.append(f)
973 if list_clean:
978 if list_clean:
974 clean.append(f)
979 clean.append(f)
975
980
976 # update dirstate for files that are actually clean
981 # update dirstate for files that are actually clean
977 if fixup:
982 if fixup:
978 wlock = None
983 wlock = None
979 try:
984 try:
980 try:
985 try:
981 wlock = self.wlock(False)
986 wlock = self.wlock(False)
982 except lock.LockException:
987 except lock.LockException:
983 pass
988 pass
984 if wlock:
989 if wlock:
985 for f in fixup:
990 for f in fixup:
986 self.dirstate.normal(f)
991 self.dirstate.normal(f)
987 finally:
992 finally:
988 del wlock
993 del wlock
989 else:
994 else:
990 # we are comparing working dir against non-parent
995 # we are comparing working dir against non-parent
991 # generate a pseudo-manifest for the working dir
996 # generate a pseudo-manifest for the working dir
992 # XXX: create it in dirstate.py ?
997 # XXX: create it in dirstate.py ?
993 mf2 = mfmatches(self.dirstate.parents()[0])
998 mf2 = mfmatches(self.dirstate.parents()[0])
994 is_exec = util.execfunc(self.root, mf2.execf)
999 is_exec = util.execfunc(self.root, mf2.execf)
995 is_link = util.linkfunc(self.root, mf2.linkf)
1000 is_link = util.linkfunc(self.root, mf2.linkf)
996 for f in lookup + modified + added:
1001 for f in lookup + modified + added:
997 mf2[f] = ""
1002 mf2[f] = ""
998 mf2.set(f, is_exec(f), is_link(f))
1003 mf2.set(f, is_exec(f), is_link(f))
999 for f in removed:
1004 for f in removed:
1000 if f in mf2:
1005 if f in mf2:
1001 del mf2[f]
1006 del mf2[f]
1002
1007
1003 else:
1008 else:
1004 # we are comparing two revisions
1009 # we are comparing two revisions
1005 mf2 = mfmatches(node2)
1010 mf2 = mfmatches(node2)
1006
1011
1007 if not compareworking:
1012 if not compareworking:
1008 # flush lists from dirstate before comparing manifests
1013 # flush lists from dirstate before comparing manifests
1009 modified, added, clean = [], [], []
1014 modified, added, clean = [], [], []
1010
1015
1011 # make sure to sort the files so we talk to the disk in a
1016 # make sure to sort the files so we talk to the disk in a
1012 # reasonable order
1017 # reasonable order
1013 mf2keys = mf2.keys()
1018 mf2keys = mf2.keys()
1014 mf2keys.sort()
1019 mf2keys.sort()
1015 getnode = lambda fn: mf1.get(fn, nullid)
1020 getnode = lambda fn: mf1.get(fn, nullid)
1016 for fn in mf2keys:
1021 for fn in mf2keys:
1017 if fn in mf1:
1022 if fn in mf1:
1018 if (mf1.flags(fn) != mf2.flags(fn) or
1023 if (mf1.flags(fn) != mf2.flags(fn) or
1019 (mf1[fn] != mf2[fn] and
1024 (mf1[fn] != mf2[fn] and
1020 (mf2[fn] != "" or fcmp(fn, getnode)))):
1025 (mf2[fn] != "" or fcmp(fn, getnode)))):
1021 modified.append(fn)
1026 modified.append(fn)
1022 elif list_clean:
1027 elif list_clean:
1023 clean.append(fn)
1028 clean.append(fn)
1024 del mf1[fn]
1029 del mf1[fn]
1025 else:
1030 else:
1026 added.append(fn)
1031 added.append(fn)
1027
1032
1028 removed = mf1.keys()
1033 removed = mf1.keys()
1029
1034
1030 # sort and return results:
1035 # sort and return results:
1031 for l in modified, added, removed, deleted, unknown, ignored, clean:
1036 for l in modified, added, removed, deleted, unknown, ignored, clean:
1032 l.sort()
1037 l.sort()
1033 return (modified, added, removed, deleted, unknown, ignored, clean)
1038 return (modified, added, removed, deleted, unknown, ignored, clean)
1034
1039
1035 def add(self, list):
1040 def add(self, list):
1036 wlock = self.wlock()
1041 wlock = self.wlock()
1037 try:
1042 try:
1038 rejected = []
1043 rejected = []
1039 for f in list:
1044 for f in list:
1040 p = self.wjoin(f)
1045 p = self.wjoin(f)
1041 try:
1046 try:
1042 st = os.lstat(p)
1047 st = os.lstat(p)
1043 except:
1048 except:
1044 self.ui.warn(_("%s does not exist!\n") % f)
1049 self.ui.warn(_("%s does not exist!\n") % f)
1045 rejected.append(f)
1050 rejected.append(f)
1046 continue
1051 continue
1047 if st.st_size > 10000000:
1052 if st.st_size > 10000000:
1048 self.ui.warn(_("%s: files over 10MB may cause memory and"
1053 self.ui.warn(_("%s: files over 10MB may cause memory and"
1049 " performance problems\n"
1054 " performance problems\n"
1050 "(use 'hg revert %s' to unadd the file)\n")
1055 "(use 'hg revert %s' to unadd the file)\n")
1051 % (f, f))
1056 % (f, f))
1052 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1057 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1053 self.ui.warn(_("%s not added: only files and symlinks "
1058 self.ui.warn(_("%s not added: only files and symlinks "
1054 "supported currently\n") % f)
1059 "supported currently\n") % f)
1055 rejected.append(p)
1060 rejected.append(p)
1056 elif self.dirstate[f] in 'amn':
1061 elif self.dirstate[f] in 'amn':
1057 self.ui.warn(_("%s already tracked!\n") % f)
1062 self.ui.warn(_("%s already tracked!\n") % f)
1058 elif self.dirstate[f] == 'r':
1063 elif self.dirstate[f] == 'r':
1059 self.dirstate.normallookup(f)
1064 self.dirstate.normallookup(f)
1060 else:
1065 else:
1061 self.dirstate.add(f)
1066 self.dirstate.add(f)
1062 return rejected
1067 return rejected
1063 finally:
1068 finally:
1064 del wlock
1069 del wlock
1065
1070
1066 def forget(self, list):
1071 def forget(self, list):
1067 wlock = self.wlock()
1072 wlock = self.wlock()
1068 try:
1073 try:
1069 for f in list:
1074 for f in list:
1070 if self.dirstate[f] != 'a':
1075 if self.dirstate[f] != 'a':
1071 self.ui.warn(_("%s not added!\n") % f)
1076 self.ui.warn(_("%s not added!\n") % f)
1072 else:
1077 else:
1073 self.dirstate.forget(f)
1078 self.dirstate.forget(f)
1074 finally:
1079 finally:
1075 del wlock
1080 del wlock
1076
1081
1077 def remove(self, list, unlink=False):
1082 def remove(self, list, unlink=False):
1078 wlock = None
1083 wlock = None
1079 try:
1084 try:
1080 if unlink:
1085 if unlink:
1081 for f in list:
1086 for f in list:
1082 try:
1087 try:
1083 util.unlink(self.wjoin(f))
1088 util.unlink(self.wjoin(f))
1084 except OSError, inst:
1089 except OSError, inst:
1085 if inst.errno != errno.ENOENT:
1090 if inst.errno != errno.ENOENT:
1086 raise
1091 raise
1087 wlock = self.wlock()
1092 wlock = self.wlock()
1088 for f in list:
1093 for f in list:
1089 if unlink and os.path.exists(self.wjoin(f)):
1094 if unlink and os.path.exists(self.wjoin(f)):
1090 self.ui.warn(_("%s still exists!\n") % f)
1095 self.ui.warn(_("%s still exists!\n") % f)
1091 elif self.dirstate[f] == 'a':
1096 elif self.dirstate[f] == 'a':
1092 self.dirstate.forget(f)
1097 self.dirstate.forget(f)
1093 elif f not in self.dirstate:
1098 elif f not in self.dirstate:
1094 self.ui.warn(_("%s not tracked!\n") % f)
1099 self.ui.warn(_("%s not tracked!\n") % f)
1095 else:
1100 else:
1096 self.dirstate.remove(f)
1101 self.dirstate.remove(f)
1097 finally:
1102 finally:
1098 del wlock
1103 del wlock
1099
1104
1100 def undelete(self, list):
1105 def undelete(self, list):
1101 wlock = None
1106 wlock = None
1102 try:
1107 try:
1103 manifests = [self.manifest.read(self.changelog.read(p)[0])
1108 manifests = [self.manifest.read(self.changelog.read(p)[0])
1104 for p in self.dirstate.parents() if p != nullid]
1109 for p in self.dirstate.parents() if p != nullid]
1105 wlock = self.wlock()
1110 wlock = self.wlock()
1106 for f in list:
1111 for f in list:
1107 if self.dirstate[f] != 'r':
1112 if self.dirstate[f] != 'r':
1108 self.ui.warn("%s not removed!\n" % f)
1113 self.ui.warn("%s not removed!\n" % f)
1109 else:
1114 else:
1110 m = f in manifests[0] and manifests[0] or manifests[1]
1115 m = f in manifests[0] and manifests[0] or manifests[1]
1111 t = self.file(f).read(m[f])
1116 t = self.file(f).read(m[f])
1112 self.wwrite(f, t, m.flags(f))
1117 self.wwrite(f, t, m.flags(f))
1113 self.dirstate.normal(f)
1118 self.dirstate.normal(f)
1114 finally:
1119 finally:
1115 del wlock
1120 del wlock
1116
1121
1117 def copy(self, source, dest):
1122 def copy(self, source, dest):
1118 wlock = None
1123 wlock = None
1119 try:
1124 try:
1120 p = self.wjoin(dest)
1125 p = self.wjoin(dest)
1121 if not (os.path.exists(p) or os.path.islink(p)):
1126 if not (os.path.exists(p) or os.path.islink(p)):
1122 self.ui.warn(_("%s does not exist!\n") % dest)
1127 self.ui.warn(_("%s does not exist!\n") % dest)
1123 elif not (os.path.isfile(p) or os.path.islink(p)):
1128 elif not (os.path.isfile(p) or os.path.islink(p)):
1124 self.ui.warn(_("copy failed: %s is not a file or a "
1129 self.ui.warn(_("copy failed: %s is not a file or a "
1125 "symbolic link\n") % dest)
1130 "symbolic link\n") % dest)
1126 else:
1131 else:
1127 wlock = self.wlock()
1132 wlock = self.wlock()
1128 if dest not in self.dirstate:
1133 if dest not in self.dirstate:
1129 self.dirstate.add(dest)
1134 self.dirstate.add(dest)
1130 self.dirstate.copy(source, dest)
1135 self.dirstate.copy(source, dest)
1131 finally:
1136 finally:
1132 del wlock
1137 del wlock
1133
1138
1134 def heads(self, start=None):
1139 def heads(self, start=None):
1135 heads = self.changelog.heads(start)
1140 heads = self.changelog.heads(start)
1136 # sort the output in rev descending order
1141 # sort the output in rev descending order
1137 heads = [(-self.changelog.rev(h), h) for h in heads]
1142 heads = [(-self.changelog.rev(h), h) for h in heads]
1138 heads.sort()
1143 heads.sort()
1139 return [n for (r, n) in heads]
1144 return [n for (r, n) in heads]
1140
1145
1141 def branchheads(self, branch, start=None):
1146 def branchheads(self, branch, start=None):
1142 branches = self.branchtags()
1147 branches = self.branchtags()
1143 if branch not in branches:
1148 if branch not in branches:
1144 return []
1149 return []
1145 # The basic algorithm is this:
1150 # The basic algorithm is this:
1146 #
1151 #
1147 # Start from the branch tip since there are no later revisions that can
1152 # Start from the branch tip since there are no later revisions that can
1148 # possibly be in this branch, and the tip is a guaranteed head.
1153 # possibly be in this branch, and the tip is a guaranteed head.
1149 #
1154 #
1150 # Remember the tip's parents as the first ancestors, since these by
1155 # Remember the tip's parents as the first ancestors, since these by
1151 # definition are not heads.
1156 # definition are not heads.
1152 #
1157 #
1153 # Step backwards from the brach tip through all the revisions. We are
1158 # Step backwards from the brach tip through all the revisions. We are
1154 # guaranteed by the rules of Mercurial that we will now be visiting the
1159 # guaranteed by the rules of Mercurial that we will now be visiting the
1155 # nodes in reverse topological order (children before parents).
1160 # nodes in reverse topological order (children before parents).
1156 #
1161 #
1157 # If a revision is one of the ancestors of a head then we can toss it
1162 # If a revision is one of the ancestors of a head then we can toss it
1158 # out of the ancestors set (we've already found it and won't be
1163 # out of the ancestors set (we've already found it and won't be
1159 # visiting it again) and put its parents in the ancestors set.
1164 # visiting it again) and put its parents in the ancestors set.
1160 #
1165 #
1161 # Otherwise, if a revision is in the branch it's another head, since it
1166 # Otherwise, if a revision is in the branch it's another head, since it
1162 # wasn't in the ancestor list of an existing head. So add it to the
1167 # wasn't in the ancestor list of an existing head. So add it to the
1163 # head list, and add its parents to the ancestor list.
1168 # head list, and add its parents to the ancestor list.
1164 #
1169 #
1165 # If it is not in the branch ignore it.
1170 # If it is not in the branch ignore it.
1166 #
1171 #
1167 # Once we have a list of heads, use nodesbetween to filter out all the
1172 # Once we have a list of heads, use nodesbetween to filter out all the
1168 # heads that cannot be reached from startrev. There may be a more
1173 # heads that cannot be reached from startrev. There may be a more
1169 # efficient way to do this as part of the previous algorithm.
1174 # efficient way to do this as part of the previous algorithm.
1170
1175
1171 set = util.set
1176 set = util.set
1172 heads = [self.changelog.rev(branches[branch])]
1177 heads = [self.changelog.rev(branches[branch])]
1173 # Don't care if ancestors contains nullrev or not.
1178 # Don't care if ancestors contains nullrev or not.
1174 ancestors = set(self.changelog.parentrevs(heads[0]))
1179 ancestors = set(self.changelog.parentrevs(heads[0]))
1175 for rev in xrange(heads[0] - 1, nullrev, -1):
1180 for rev in xrange(heads[0] - 1, nullrev, -1):
1176 if rev in ancestors:
1181 if rev in ancestors:
1177 ancestors.update(self.changelog.parentrevs(rev))
1182 ancestors.update(self.changelog.parentrevs(rev))
1178 ancestors.remove(rev)
1183 ancestors.remove(rev)
1179 elif self.changectx(rev).branch() == branch:
1184 elif self.changectx(rev).branch() == branch:
1180 heads.append(rev)
1185 heads.append(rev)
1181 ancestors.update(self.changelog.parentrevs(rev))
1186 ancestors.update(self.changelog.parentrevs(rev))
1182 heads = [self.changelog.node(rev) for rev in heads]
1187 heads = [self.changelog.node(rev) for rev in heads]
1183 if start is not None:
1188 if start is not None:
1184 heads = self.changelog.nodesbetween([start], heads)[2]
1189 heads = self.changelog.nodesbetween([start], heads)[2]
1185 return heads
1190 return heads
1186
1191
1187 def branches(self, nodes):
1192 def branches(self, nodes):
1188 if not nodes:
1193 if not nodes:
1189 nodes = [self.changelog.tip()]
1194 nodes = [self.changelog.tip()]
1190 b = []
1195 b = []
1191 for n in nodes:
1196 for n in nodes:
1192 t = n
1197 t = n
1193 while 1:
1198 while 1:
1194 p = self.changelog.parents(n)
1199 p = self.changelog.parents(n)
1195 if p[1] != nullid or p[0] == nullid:
1200 if p[1] != nullid or p[0] == nullid:
1196 b.append((t, n, p[0], p[1]))
1201 b.append((t, n, p[0], p[1]))
1197 break
1202 break
1198 n = p[0]
1203 n = p[0]
1199 return b
1204 return b
1200
1205
1201 def between(self, pairs):
1206 def between(self, pairs):
1202 r = []
1207 r = []
1203
1208
1204 for top, bottom in pairs:
1209 for top, bottom in pairs:
1205 n, l, i = top, [], 0
1210 n, l, i = top, [], 0
1206 f = 1
1211 f = 1
1207
1212
1208 while n != bottom:
1213 while n != bottom:
1209 p = self.changelog.parents(n)[0]
1214 p = self.changelog.parents(n)[0]
1210 if i == f:
1215 if i == f:
1211 l.append(n)
1216 l.append(n)
1212 f = f * 2
1217 f = f * 2
1213 n = p
1218 n = p
1214 i += 1
1219 i += 1
1215
1220
1216 r.append(l)
1221 r.append(l)
1217
1222
1218 return r
1223 return r
1219
1224
1220 def findincoming(self, remote, base=None, heads=None, force=False):
1225 def findincoming(self, remote, base=None, heads=None, force=False):
1221 """Return list of roots of the subsets of missing nodes from remote
1226 """Return list of roots of the subsets of missing nodes from remote
1222
1227
1223 If base dict is specified, assume that these nodes and their parents
1228 If base dict is specified, assume that these nodes and their parents
1224 exist on the remote side and that no child of a node of base exists
1229 exist on the remote side and that no child of a node of base exists
1225 in both remote and self.
1230 in both remote and self.
1226 Furthermore base will be updated to include the nodes that exists
1231 Furthermore base will be updated to include the nodes that exists
1227 in self and remote but no children exists in self and remote.
1232 in self and remote but no children exists in self and remote.
1228 If a list of heads is specified, return only nodes which are heads
1233 If a list of heads is specified, return only nodes which are heads
1229 or ancestors of these heads.
1234 or ancestors of these heads.
1230
1235
1231 All the ancestors of base are in self and in remote.
1236 All the ancestors of base are in self and in remote.
1232 All the descendants of the list returned are missing in self.
1237 All the descendants of the list returned are missing in self.
1233 (and so we know that the rest of the nodes are missing in remote, see
1238 (and so we know that the rest of the nodes are missing in remote, see
1234 outgoing)
1239 outgoing)
1235 """
1240 """
1236 m = self.changelog.nodemap
1241 m = self.changelog.nodemap
1237 search = []
1242 search = []
1238 fetch = {}
1243 fetch = {}
1239 seen = {}
1244 seen = {}
1240 seenbranch = {}
1245 seenbranch = {}
1241 if base == None:
1246 if base == None:
1242 base = {}
1247 base = {}
1243
1248
1244 if not heads:
1249 if not heads:
1245 heads = remote.heads()
1250 heads = remote.heads()
1246
1251
1247 if self.changelog.tip() == nullid:
1252 if self.changelog.tip() == nullid:
1248 base[nullid] = 1
1253 base[nullid] = 1
1249 if heads != [nullid]:
1254 if heads != [nullid]:
1250 return [nullid]
1255 return [nullid]
1251 return []
1256 return []
1252
1257
1253 # assume we're closer to the tip than the root
1258 # assume we're closer to the tip than the root
1254 # and start by examining the heads
1259 # and start by examining the heads
1255 self.ui.status(_("searching for changes\n"))
1260 self.ui.status(_("searching for changes\n"))
1256
1261
1257 unknown = []
1262 unknown = []
1258 for h in heads:
1263 for h in heads:
1259 if h not in m:
1264 if h not in m:
1260 unknown.append(h)
1265 unknown.append(h)
1261 else:
1266 else:
1262 base[h] = 1
1267 base[h] = 1
1263
1268
1264 if not unknown:
1269 if not unknown:
1265 return []
1270 return []
1266
1271
1267 req = dict.fromkeys(unknown)
1272 req = dict.fromkeys(unknown)
1268 reqcnt = 0
1273 reqcnt = 0
1269
1274
1270 # search through remote branches
1275 # search through remote branches
1271 # a 'branch' here is a linear segment of history, with four parts:
1276 # a 'branch' here is a linear segment of history, with four parts:
1272 # head, root, first parent, second parent
1277 # head, root, first parent, second parent
1273 # (a branch always has two parents (or none) by definition)
1278 # (a branch always has two parents (or none) by definition)
1274 unknown = remote.branches(unknown)
1279 unknown = remote.branches(unknown)
1275 while unknown:
1280 while unknown:
1276 r = []
1281 r = []
1277 while unknown:
1282 while unknown:
1278 n = unknown.pop(0)
1283 n = unknown.pop(0)
1279 if n[0] in seen:
1284 if n[0] in seen:
1280 continue
1285 continue
1281
1286
1282 self.ui.debug(_("examining %s:%s\n")
1287 self.ui.debug(_("examining %s:%s\n")
1283 % (short(n[0]), short(n[1])))
1288 % (short(n[0]), short(n[1])))
1284 if n[0] == nullid: # found the end of the branch
1289 if n[0] == nullid: # found the end of the branch
1285 pass
1290 pass
1286 elif n in seenbranch:
1291 elif n in seenbranch:
1287 self.ui.debug(_("branch already found\n"))
1292 self.ui.debug(_("branch already found\n"))
1288 continue
1293 continue
1289 elif n[1] and n[1] in m: # do we know the base?
1294 elif n[1] and n[1] in m: # do we know the base?
1290 self.ui.debug(_("found incomplete branch %s:%s\n")
1295 self.ui.debug(_("found incomplete branch %s:%s\n")
1291 % (short(n[0]), short(n[1])))
1296 % (short(n[0]), short(n[1])))
1292 search.append(n) # schedule branch range for scanning
1297 search.append(n) # schedule branch range for scanning
1293 seenbranch[n] = 1
1298 seenbranch[n] = 1
1294 else:
1299 else:
1295 if n[1] not in seen and n[1] not in fetch:
1300 if n[1] not in seen and n[1] not in fetch:
1296 if n[2] in m and n[3] in m:
1301 if n[2] in m and n[3] in m:
1297 self.ui.debug(_("found new changeset %s\n") %
1302 self.ui.debug(_("found new changeset %s\n") %
1298 short(n[1]))
1303 short(n[1]))
1299 fetch[n[1]] = 1 # earliest unknown
1304 fetch[n[1]] = 1 # earliest unknown
1300 for p in n[2:4]:
1305 for p in n[2:4]:
1301 if p in m:
1306 if p in m:
1302 base[p] = 1 # latest known
1307 base[p] = 1 # latest known
1303
1308
1304 for p in n[2:4]:
1309 for p in n[2:4]:
1305 if p not in req and p not in m:
1310 if p not in req and p not in m:
1306 r.append(p)
1311 r.append(p)
1307 req[p] = 1
1312 req[p] = 1
1308 seen[n[0]] = 1
1313 seen[n[0]] = 1
1309
1314
1310 if r:
1315 if r:
1311 reqcnt += 1
1316 reqcnt += 1
1312 self.ui.debug(_("request %d: %s\n") %
1317 self.ui.debug(_("request %d: %s\n") %
1313 (reqcnt, " ".join(map(short, r))))
1318 (reqcnt, " ".join(map(short, r))))
1314 for p in xrange(0, len(r), 10):
1319 for p in xrange(0, len(r), 10):
1315 for b in remote.branches(r[p:p+10]):
1320 for b in remote.branches(r[p:p+10]):
1316 self.ui.debug(_("received %s:%s\n") %
1321 self.ui.debug(_("received %s:%s\n") %
1317 (short(b[0]), short(b[1])))
1322 (short(b[0]), short(b[1])))
1318 unknown.append(b)
1323 unknown.append(b)
1319
1324
1320 # do binary search on the branches we found
1325 # do binary search on the branches we found
1321 while search:
1326 while search:
1322 n = search.pop(0)
1327 n = search.pop(0)
1323 reqcnt += 1
1328 reqcnt += 1
1324 l = remote.between([(n[0], n[1])])[0]
1329 l = remote.between([(n[0], n[1])])[0]
1325 l.append(n[1])
1330 l.append(n[1])
1326 p = n[0]
1331 p = n[0]
1327 f = 1
1332 f = 1
1328 for i in l:
1333 for i in l:
1329 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1334 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1330 if i in m:
1335 if i in m:
1331 if f <= 2:
1336 if f <= 2:
1332 self.ui.debug(_("found new branch changeset %s\n") %
1337 self.ui.debug(_("found new branch changeset %s\n") %
1333 short(p))
1338 short(p))
1334 fetch[p] = 1
1339 fetch[p] = 1
1335 base[i] = 1
1340 base[i] = 1
1336 else:
1341 else:
1337 self.ui.debug(_("narrowed branch search to %s:%s\n")
1342 self.ui.debug(_("narrowed branch search to %s:%s\n")
1338 % (short(p), short(i)))
1343 % (short(p), short(i)))
1339 search.append((p, i))
1344 search.append((p, i))
1340 break
1345 break
1341 p, f = i, f * 2
1346 p, f = i, f * 2
1342
1347
1343 # sanity check our fetch list
1348 # sanity check our fetch list
1344 for f in fetch.keys():
1349 for f in fetch.keys():
1345 if f in m:
1350 if f in m:
1346 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1351 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1347
1352
1348 if base.keys() == [nullid]:
1353 if base.keys() == [nullid]:
1349 if force:
1354 if force:
1350 self.ui.warn(_("warning: repository is unrelated\n"))
1355 self.ui.warn(_("warning: repository is unrelated\n"))
1351 else:
1356 else:
1352 raise util.Abort(_("repository is unrelated"))
1357 raise util.Abort(_("repository is unrelated"))
1353
1358
1354 self.ui.debug(_("found new changesets starting at ") +
1359 self.ui.debug(_("found new changesets starting at ") +
1355 " ".join([short(f) for f in fetch]) + "\n")
1360 " ".join([short(f) for f in fetch]) + "\n")
1356
1361
1357 self.ui.debug(_("%d total queries\n") % reqcnt)
1362 self.ui.debug(_("%d total queries\n") % reqcnt)
1358
1363
1359 return fetch.keys()
1364 return fetch.keys()
1360
1365
1361 def findoutgoing(self, remote, base=None, heads=None, force=False):
1366 def findoutgoing(self, remote, base=None, heads=None, force=False):
1362 """Return list of nodes that are roots of subsets not in remote
1367 """Return list of nodes that are roots of subsets not in remote
1363
1368
1364 If base dict is specified, assume that these nodes and their parents
1369 If base dict is specified, assume that these nodes and their parents
1365 exist on the remote side.
1370 exist on the remote side.
1366 If a list of heads is specified, return only nodes which are heads
1371 If a list of heads is specified, return only nodes which are heads
1367 or ancestors of these heads, and return a second element which
1372 or ancestors of these heads, and return a second element which
1368 contains all remote heads which get new children.
1373 contains all remote heads which get new children.
1369 """
1374 """
1370 if base == None:
1375 if base == None:
1371 base = {}
1376 base = {}
1372 self.findincoming(remote, base, heads, force=force)
1377 self.findincoming(remote, base, heads, force=force)
1373
1378
1374 self.ui.debug(_("common changesets up to ")
1379 self.ui.debug(_("common changesets up to ")
1375 + " ".join(map(short, base.keys())) + "\n")
1380 + " ".join(map(short, base.keys())) + "\n")
1376
1381
1377 remain = dict.fromkeys(self.changelog.nodemap)
1382 remain = dict.fromkeys(self.changelog.nodemap)
1378
1383
1379 # prune everything remote has from the tree
1384 # prune everything remote has from the tree
1380 del remain[nullid]
1385 del remain[nullid]
1381 remove = base.keys()
1386 remove = base.keys()
1382 while remove:
1387 while remove:
1383 n = remove.pop(0)
1388 n = remove.pop(0)
1384 if n in remain:
1389 if n in remain:
1385 del remain[n]
1390 del remain[n]
1386 for p in self.changelog.parents(n):
1391 for p in self.changelog.parents(n):
1387 remove.append(p)
1392 remove.append(p)
1388
1393
1389 # find every node whose parents have been pruned
1394 # find every node whose parents have been pruned
1390 subset = []
1395 subset = []
1391 # find every remote head that will get new children
1396 # find every remote head that will get new children
1392 updated_heads = {}
1397 updated_heads = {}
1393 for n in remain:
1398 for n in remain:
1394 p1, p2 = self.changelog.parents(n)
1399 p1, p2 = self.changelog.parents(n)
1395 if p1 not in remain and p2 not in remain:
1400 if p1 not in remain and p2 not in remain:
1396 subset.append(n)
1401 subset.append(n)
1397 if heads:
1402 if heads:
1398 if p1 in heads:
1403 if p1 in heads:
1399 updated_heads[p1] = True
1404 updated_heads[p1] = True
1400 if p2 in heads:
1405 if p2 in heads:
1401 updated_heads[p2] = True
1406 updated_heads[p2] = True
1402
1407
1403 # this is the set of all roots we have to push
1408 # this is the set of all roots we have to push
1404 if heads:
1409 if heads:
1405 return subset, updated_heads.keys()
1410 return subset, updated_heads.keys()
1406 else:
1411 else:
1407 return subset
1412 return subset
1408
1413
1409 def pull(self, remote, heads=None, force=False):
1414 def pull(self, remote, heads=None, force=False):
1410 lock = self.lock()
1415 lock = self.lock()
1411 try:
1416 try:
1412 fetch = self.findincoming(remote, heads=heads, force=force)
1417 fetch = self.findincoming(remote, heads=heads, force=force)
1413 if fetch == [nullid]:
1418 if fetch == [nullid]:
1414 self.ui.status(_("requesting all changes\n"))
1419 self.ui.status(_("requesting all changes\n"))
1415
1420
1416 if not fetch:
1421 if not fetch:
1417 self.ui.status(_("no changes found\n"))
1422 self.ui.status(_("no changes found\n"))
1418 return 0
1423 return 0
1419
1424
1420 if heads is None:
1425 if heads is None:
1421 cg = remote.changegroup(fetch, 'pull')
1426 cg = remote.changegroup(fetch, 'pull')
1422 else:
1427 else:
1423 if 'changegroupsubset' not in remote.capabilities:
1428 if 'changegroupsubset' not in remote.capabilities:
1424 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1429 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1425 cg = remote.changegroupsubset(fetch, heads, 'pull')
1430 cg = remote.changegroupsubset(fetch, heads, 'pull')
1426 return self.addchangegroup(cg, 'pull', remote.url())
1431 return self.addchangegroup(cg, 'pull', remote.url())
1427 finally:
1432 finally:
1428 del lock
1433 del lock
1429
1434
1430 def push(self, remote, force=False, revs=None):
1435 def push(self, remote, force=False, revs=None):
1431 # there are two ways to push to remote repo:
1436 # there are two ways to push to remote repo:
1432 #
1437 #
1433 # addchangegroup assumes local user can lock remote
1438 # addchangegroup assumes local user can lock remote
1434 # repo (local filesystem, old ssh servers).
1439 # repo (local filesystem, old ssh servers).
1435 #
1440 #
1436 # unbundle assumes local user cannot lock remote repo (new ssh
1441 # unbundle assumes local user cannot lock remote repo (new ssh
1437 # servers, http servers).
1442 # servers, http servers).
1438
1443
1439 if remote.capable('unbundle'):
1444 if remote.capable('unbundle'):
1440 return self.push_unbundle(remote, force, revs)
1445 return self.push_unbundle(remote, force, revs)
1441 return self.push_addchangegroup(remote, force, revs)
1446 return self.push_addchangegroup(remote, force, revs)
1442
1447
1443 def prepush(self, remote, force, revs):
1448 def prepush(self, remote, force, revs):
1444 base = {}
1449 base = {}
1445 remote_heads = remote.heads()
1450 remote_heads = remote.heads()
1446 inc = self.findincoming(remote, base, remote_heads, force=force)
1451 inc = self.findincoming(remote, base, remote_heads, force=force)
1447
1452
1448 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1453 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1449 if revs is not None:
1454 if revs is not None:
1450 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1455 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1451 else:
1456 else:
1452 bases, heads = update, self.changelog.heads()
1457 bases, heads = update, self.changelog.heads()
1453
1458
1454 if not bases:
1459 if not bases:
1455 self.ui.status(_("no changes found\n"))
1460 self.ui.status(_("no changes found\n"))
1456 return None, 1
1461 return None, 1
1457 elif not force:
1462 elif not force:
1458 # check if we're creating new remote heads
1463 # check if we're creating new remote heads
1459 # to be a remote head after push, node must be either
1464 # to be a remote head after push, node must be either
1460 # - unknown locally
1465 # - unknown locally
1461 # - a local outgoing head descended from update
1466 # - a local outgoing head descended from update
1462 # - a remote head that's known locally and not
1467 # - a remote head that's known locally and not
1463 # ancestral to an outgoing head
1468 # ancestral to an outgoing head
1464
1469
1465 warn = 0
1470 warn = 0
1466
1471
1467 if remote_heads == [nullid]:
1472 if remote_heads == [nullid]:
1468 warn = 0
1473 warn = 0
1469 elif not revs and len(heads) > len(remote_heads):
1474 elif not revs and len(heads) > len(remote_heads):
1470 warn = 1
1475 warn = 1
1471 else:
1476 else:
1472 newheads = list(heads)
1477 newheads = list(heads)
1473 for r in remote_heads:
1478 for r in remote_heads:
1474 if r in self.changelog.nodemap:
1479 if r in self.changelog.nodemap:
1475 desc = self.changelog.heads(r, heads)
1480 desc = self.changelog.heads(r, heads)
1476 l = [h for h in heads if h in desc]
1481 l = [h for h in heads if h in desc]
1477 if not l:
1482 if not l:
1478 newheads.append(r)
1483 newheads.append(r)
1479 else:
1484 else:
1480 newheads.append(r)
1485 newheads.append(r)
1481 if len(newheads) > len(remote_heads):
1486 if len(newheads) > len(remote_heads):
1482 warn = 1
1487 warn = 1
1483
1488
1484 if warn:
1489 if warn:
1485 self.ui.warn(_("abort: push creates new remote branches!\n"))
1490 self.ui.warn(_("abort: push creates new remote branches!\n"))
1486 self.ui.status(_("(did you forget to merge?"
1491 self.ui.status(_("(did you forget to merge?"
1487 " use push -f to force)\n"))
1492 " use push -f to force)\n"))
1488 return None, 1
1493 return None, 1
1489 elif inc:
1494 elif inc:
1490 self.ui.warn(_("note: unsynced remote changes!\n"))
1495 self.ui.warn(_("note: unsynced remote changes!\n"))
1491
1496
1492
1497
1493 if revs is None:
1498 if revs is None:
1494 cg = self.changegroup(update, 'push')
1499 cg = self.changegroup(update, 'push')
1495 else:
1500 else:
1496 cg = self.changegroupsubset(update, revs, 'push')
1501 cg = self.changegroupsubset(update, revs, 'push')
1497 return cg, remote_heads
1502 return cg, remote_heads
1498
1503
1499 def push_addchangegroup(self, remote, force, revs):
1504 def push_addchangegroup(self, remote, force, revs):
1500 lock = remote.lock()
1505 lock = remote.lock()
1501 try:
1506 try:
1502 ret = self.prepush(remote, force, revs)
1507 ret = self.prepush(remote, force, revs)
1503 if ret[0] is not None:
1508 if ret[0] is not None:
1504 cg, remote_heads = ret
1509 cg, remote_heads = ret
1505 return remote.addchangegroup(cg, 'push', self.url())
1510 return remote.addchangegroup(cg, 'push', self.url())
1506 return ret[1]
1511 return ret[1]
1507 finally:
1512 finally:
1508 del lock
1513 del lock
1509
1514
1510 def push_unbundle(self, remote, force, revs):
1515 def push_unbundle(self, remote, force, revs):
1511 # local repo finds heads on server, finds out what revs it
1516 # local repo finds heads on server, finds out what revs it
1512 # must push. once revs transferred, if server finds it has
1517 # must push. once revs transferred, if server finds it has
1513 # different heads (someone else won commit/push race), server
1518 # different heads (someone else won commit/push race), server
1514 # aborts.
1519 # aborts.
1515
1520
1516 ret = self.prepush(remote, force, revs)
1521 ret = self.prepush(remote, force, revs)
1517 if ret[0] is not None:
1522 if ret[0] is not None:
1518 cg, remote_heads = ret
1523 cg, remote_heads = ret
1519 if force: remote_heads = ['force']
1524 if force: remote_heads = ['force']
1520 return remote.unbundle(cg, remote_heads, 'push')
1525 return remote.unbundle(cg, remote_heads, 'push')
1521 return ret[1]
1526 return ret[1]
1522
1527
1523 def changegroupinfo(self, nodes, source):
1528 def changegroupinfo(self, nodes, source):
1524 if self.ui.verbose or source == 'bundle':
1529 if self.ui.verbose or source == 'bundle':
1525 self.ui.status(_("%d changesets found\n") % len(nodes))
1530 self.ui.status(_("%d changesets found\n") % len(nodes))
1526 if self.ui.debugflag:
1531 if self.ui.debugflag:
1527 self.ui.debug(_("List of changesets:\n"))
1532 self.ui.debug(_("List of changesets:\n"))
1528 for node in nodes:
1533 for node in nodes:
1529 self.ui.debug("%s\n" % hex(node))
1534 self.ui.debug("%s\n" % hex(node))
1530
1535
1531 def changegroupsubset(self, bases, heads, source, extranodes=None):
1536 def changegroupsubset(self, bases, heads, source, extranodes=None):
1532 """This function generates a changegroup consisting of all the nodes
1537 """This function generates a changegroup consisting of all the nodes
1533 that are descendents of any of the bases, and ancestors of any of
1538 that are descendents of any of the bases, and ancestors of any of
1534 the heads.
1539 the heads.
1535
1540
1536 It is fairly complex as determining which filenodes and which
1541 It is fairly complex as determining which filenodes and which
1537 manifest nodes need to be included for the changeset to be complete
1542 manifest nodes need to be included for the changeset to be complete
1538 is non-trivial.
1543 is non-trivial.
1539
1544
1540 Another wrinkle is doing the reverse, figuring out which changeset in
1545 Another wrinkle is doing the reverse, figuring out which changeset in
1541 the changegroup a particular filenode or manifestnode belongs to.
1546 the changegroup a particular filenode or manifestnode belongs to.
1542
1547
1543 The caller can specify some nodes that must be included in the
1548 The caller can specify some nodes that must be included in the
1544 changegroup using the extranodes argument. It should be a dict
1549 changegroup using the extranodes argument. It should be a dict
1545 where the keys are the filenames (or 1 for the manifest), and the
1550 where the keys are the filenames (or 1 for the manifest), and the
1546 values are lists of (node, linknode) tuples, where node is a wanted
1551 values are lists of (node, linknode) tuples, where node is a wanted
1547 node and linknode is the changelog node that should be transmitted as
1552 node and linknode is the changelog node that should be transmitted as
1548 the linkrev.
1553 the linkrev.
1549 """
1554 """
1550
1555
1551 self.hook('preoutgoing', throw=True, source=source)
1556 self.hook('preoutgoing', throw=True, source=source)
1552
1557
1553 # Set up some initial variables
1558 # Set up some initial variables
1554 # Make it easy to refer to self.changelog
1559 # Make it easy to refer to self.changelog
1555 cl = self.changelog
1560 cl = self.changelog
1556 # msng is short for missing - compute the list of changesets in this
1561 # msng is short for missing - compute the list of changesets in this
1557 # changegroup.
1562 # changegroup.
1558 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1563 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1559 self.changegroupinfo(msng_cl_lst, source)
1564 self.changegroupinfo(msng_cl_lst, source)
1560 # Some bases may turn out to be superfluous, and some heads may be
1565 # Some bases may turn out to be superfluous, and some heads may be
1561 # too. nodesbetween will return the minimal set of bases and heads
1566 # too. nodesbetween will return the minimal set of bases and heads
1562 # necessary to re-create the changegroup.
1567 # necessary to re-create the changegroup.
1563
1568
1564 # Known heads are the list of heads that it is assumed the recipient
1569 # Known heads are the list of heads that it is assumed the recipient
1565 # of this changegroup will know about.
1570 # of this changegroup will know about.
1566 knownheads = {}
1571 knownheads = {}
1567 # We assume that all parents of bases are known heads.
1572 # We assume that all parents of bases are known heads.
1568 for n in bases:
1573 for n in bases:
1569 for p in cl.parents(n):
1574 for p in cl.parents(n):
1570 if p != nullid:
1575 if p != nullid:
1571 knownheads[p] = 1
1576 knownheads[p] = 1
1572 knownheads = knownheads.keys()
1577 knownheads = knownheads.keys()
1573 if knownheads:
1578 if knownheads:
1574 # Now that we know what heads are known, we can compute which
1579 # Now that we know what heads are known, we can compute which
1575 # changesets are known. The recipient must know about all
1580 # changesets are known. The recipient must know about all
1576 # changesets required to reach the known heads from the null
1581 # changesets required to reach the known heads from the null
1577 # changeset.
1582 # changeset.
1578 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1583 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1579 junk = None
1584 junk = None
1580 # Transform the list into an ersatz set.
1585 # Transform the list into an ersatz set.
1581 has_cl_set = dict.fromkeys(has_cl_set)
1586 has_cl_set = dict.fromkeys(has_cl_set)
1582 else:
1587 else:
1583 # If there were no known heads, the recipient cannot be assumed to
1588 # If there were no known heads, the recipient cannot be assumed to
1584 # know about any changesets.
1589 # know about any changesets.
1585 has_cl_set = {}
1590 has_cl_set = {}
1586
1591
1587 # Make it easy to refer to self.manifest
1592 # Make it easy to refer to self.manifest
1588 mnfst = self.manifest
1593 mnfst = self.manifest
1589 # We don't know which manifests are missing yet
1594 # We don't know which manifests are missing yet
1590 msng_mnfst_set = {}
1595 msng_mnfst_set = {}
1591 # Nor do we know which filenodes are missing.
1596 # Nor do we know which filenodes are missing.
1592 msng_filenode_set = {}
1597 msng_filenode_set = {}
1593
1598
1594 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1599 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1595 junk = None
1600 junk = None
1596
1601
1597 # A changeset always belongs to itself, so the changenode lookup
1602 # A changeset always belongs to itself, so the changenode lookup
1598 # function for a changenode is identity.
1603 # function for a changenode is identity.
1599 def identity(x):
1604 def identity(x):
1600 return x
1605 return x
1601
1606
1602 # A function generating function. Sets up an environment for the
1607 # A function generating function. Sets up an environment for the
1603 # inner function.
1608 # inner function.
1604 def cmp_by_rev_func(revlog):
1609 def cmp_by_rev_func(revlog):
1605 # Compare two nodes by their revision number in the environment's
1610 # Compare two nodes by their revision number in the environment's
1606 # revision history. Since the revision number both represents the
1611 # revision history. Since the revision number both represents the
1607 # most efficient order to read the nodes in, and represents a
1612 # most efficient order to read the nodes in, and represents a
1608 # topological sorting of the nodes, this function is often useful.
1613 # topological sorting of the nodes, this function is often useful.
1609 def cmp_by_rev(a, b):
1614 def cmp_by_rev(a, b):
1610 return cmp(revlog.rev(a), revlog.rev(b))
1615 return cmp(revlog.rev(a), revlog.rev(b))
1611 return cmp_by_rev
1616 return cmp_by_rev
1612
1617
1613 # If we determine that a particular file or manifest node must be a
1618 # If we determine that a particular file or manifest node must be a
1614 # node that the recipient of the changegroup will already have, we can
1619 # node that the recipient of the changegroup will already have, we can
1615 # also assume the recipient will have all the parents. This function
1620 # also assume the recipient will have all the parents. This function
1616 # prunes them from the set of missing nodes.
1621 # prunes them from the set of missing nodes.
1617 def prune_parents(revlog, hasset, msngset):
1622 def prune_parents(revlog, hasset, msngset):
1618 haslst = hasset.keys()
1623 haslst = hasset.keys()
1619 haslst.sort(cmp_by_rev_func(revlog))
1624 haslst.sort(cmp_by_rev_func(revlog))
1620 for node in haslst:
1625 for node in haslst:
1621 parentlst = [p for p in revlog.parents(node) if p != nullid]
1626 parentlst = [p for p in revlog.parents(node) if p != nullid]
1622 while parentlst:
1627 while parentlst:
1623 n = parentlst.pop()
1628 n = parentlst.pop()
1624 if n not in hasset:
1629 if n not in hasset:
1625 hasset[n] = 1
1630 hasset[n] = 1
1626 p = [p for p in revlog.parents(n) if p != nullid]
1631 p = [p for p in revlog.parents(n) if p != nullid]
1627 parentlst.extend(p)
1632 parentlst.extend(p)
1628 for n in hasset:
1633 for n in hasset:
1629 msngset.pop(n, None)
1634 msngset.pop(n, None)
1630
1635
1631 # This is a function generating function used to set up an environment
1636 # This is a function generating function used to set up an environment
1632 # for the inner function to execute in.
1637 # for the inner function to execute in.
1633 def manifest_and_file_collector(changedfileset):
1638 def manifest_and_file_collector(changedfileset):
1634 # This is an information gathering function that gathers
1639 # This is an information gathering function that gathers
1635 # information from each changeset node that goes out as part of
1640 # information from each changeset node that goes out as part of
1636 # the changegroup. The information gathered is a list of which
1641 # the changegroup. The information gathered is a list of which
1637 # manifest nodes are potentially required (the recipient may
1642 # manifest nodes are potentially required (the recipient may
1638 # already have them) and total list of all files which were
1643 # already have them) and total list of all files which were
1639 # changed in any changeset in the changegroup.
1644 # changed in any changeset in the changegroup.
1640 #
1645 #
1641 # We also remember the first changenode we saw any manifest
1646 # We also remember the first changenode we saw any manifest
1642 # referenced by so we can later determine which changenode 'owns'
1647 # referenced by so we can later determine which changenode 'owns'
1643 # the manifest.
1648 # the manifest.
1644 def collect_manifests_and_files(clnode):
1649 def collect_manifests_and_files(clnode):
1645 c = cl.read(clnode)
1650 c = cl.read(clnode)
1646 for f in c[3]:
1651 for f in c[3]:
1647 # This is to make sure we only have one instance of each
1652 # This is to make sure we only have one instance of each
1648 # filename string for each filename.
1653 # filename string for each filename.
1649 changedfileset.setdefault(f, f)
1654 changedfileset.setdefault(f, f)
1650 msng_mnfst_set.setdefault(c[0], clnode)
1655 msng_mnfst_set.setdefault(c[0], clnode)
1651 return collect_manifests_and_files
1656 return collect_manifests_and_files
1652
1657
1653 # Figure out which manifest nodes (of the ones we think might be part
1658 # Figure out which manifest nodes (of the ones we think might be part
1654 # of the changegroup) the recipient must know about and remove them
1659 # of the changegroup) the recipient must know about and remove them
1655 # from the changegroup.
1660 # from the changegroup.
1656 def prune_manifests():
1661 def prune_manifests():
1657 has_mnfst_set = {}
1662 has_mnfst_set = {}
1658 for n in msng_mnfst_set:
1663 for n in msng_mnfst_set:
1659 # If a 'missing' manifest thinks it belongs to a changenode
1664 # If a 'missing' manifest thinks it belongs to a changenode
1660 # the recipient is assumed to have, obviously the recipient
1665 # the recipient is assumed to have, obviously the recipient
1661 # must have that manifest.
1666 # must have that manifest.
1662 linknode = cl.node(mnfst.linkrev(n))
1667 linknode = cl.node(mnfst.linkrev(n))
1663 if linknode in has_cl_set:
1668 if linknode in has_cl_set:
1664 has_mnfst_set[n] = 1
1669 has_mnfst_set[n] = 1
1665 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1670 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1666
1671
1667 # Use the information collected in collect_manifests_and_files to say
1672 # Use the information collected in collect_manifests_and_files to say
1668 # which changenode any manifestnode belongs to.
1673 # which changenode any manifestnode belongs to.
1669 def lookup_manifest_link(mnfstnode):
1674 def lookup_manifest_link(mnfstnode):
1670 return msng_mnfst_set[mnfstnode]
1675 return msng_mnfst_set[mnfstnode]
1671
1676
1672 # A function generating function that sets up the initial environment
1677 # A function generating function that sets up the initial environment
1673 # the inner function.
1678 # the inner function.
1674 def filenode_collector(changedfiles):
1679 def filenode_collector(changedfiles):
1675 next_rev = [0]
1680 next_rev = [0]
1676 # This gathers information from each manifestnode included in the
1681 # This gathers information from each manifestnode included in the
1677 # changegroup about which filenodes the manifest node references
1682 # changegroup about which filenodes the manifest node references
1678 # so we can include those in the changegroup too.
1683 # so we can include those in the changegroup too.
1679 #
1684 #
1680 # It also remembers which changenode each filenode belongs to. It
1685 # It also remembers which changenode each filenode belongs to. It
1681 # does this by assuming the a filenode belongs to the changenode
1686 # does this by assuming the a filenode belongs to the changenode
1682 # the first manifest that references it belongs to.
1687 # the first manifest that references it belongs to.
1683 def collect_msng_filenodes(mnfstnode):
1688 def collect_msng_filenodes(mnfstnode):
1684 r = mnfst.rev(mnfstnode)
1689 r = mnfst.rev(mnfstnode)
1685 if r == next_rev[0]:
1690 if r == next_rev[0]:
1686 # If the last rev we looked at was the one just previous,
1691 # If the last rev we looked at was the one just previous,
1687 # we only need to see a diff.
1692 # we only need to see a diff.
1688 deltamf = mnfst.readdelta(mnfstnode)
1693 deltamf = mnfst.readdelta(mnfstnode)
1689 # For each line in the delta
1694 # For each line in the delta
1690 for f, fnode in deltamf.items():
1695 for f, fnode in deltamf.items():
1691 f = changedfiles.get(f, None)
1696 f = changedfiles.get(f, None)
1692 # And if the file is in the list of files we care
1697 # And if the file is in the list of files we care
1693 # about.
1698 # about.
1694 if f is not None:
1699 if f is not None:
1695 # Get the changenode this manifest belongs to
1700 # Get the changenode this manifest belongs to
1696 clnode = msng_mnfst_set[mnfstnode]
1701 clnode = msng_mnfst_set[mnfstnode]
1697 # Create the set of filenodes for the file if
1702 # Create the set of filenodes for the file if
1698 # there isn't one already.
1703 # there isn't one already.
1699 ndset = msng_filenode_set.setdefault(f, {})
1704 ndset = msng_filenode_set.setdefault(f, {})
1700 # And set the filenode's changelog node to the
1705 # And set the filenode's changelog node to the
1701 # manifest's if it hasn't been set already.
1706 # manifest's if it hasn't been set already.
1702 ndset.setdefault(fnode, clnode)
1707 ndset.setdefault(fnode, clnode)
1703 else:
1708 else:
1704 # Otherwise we need a full manifest.
1709 # Otherwise we need a full manifest.
1705 m = mnfst.read(mnfstnode)
1710 m = mnfst.read(mnfstnode)
1706 # For every file in we care about.
1711 # For every file in we care about.
1707 for f in changedfiles:
1712 for f in changedfiles:
1708 fnode = m.get(f, None)
1713 fnode = m.get(f, None)
1709 # If it's in the manifest
1714 # If it's in the manifest
1710 if fnode is not None:
1715 if fnode is not None:
1711 # See comments above.
1716 # See comments above.
1712 clnode = msng_mnfst_set[mnfstnode]
1717 clnode = msng_mnfst_set[mnfstnode]
1713 ndset = msng_filenode_set.setdefault(f, {})
1718 ndset = msng_filenode_set.setdefault(f, {})
1714 ndset.setdefault(fnode, clnode)
1719 ndset.setdefault(fnode, clnode)
1715 # Remember the revision we hope to see next.
1720 # Remember the revision we hope to see next.
1716 next_rev[0] = r + 1
1721 next_rev[0] = r + 1
1717 return collect_msng_filenodes
1722 return collect_msng_filenodes
1718
1723
1719 # We have a list of filenodes we think we need for a file, lets remove
1724 # We have a list of filenodes we think we need for a file, lets remove
1720 # all those we now the recipient must have.
1725 # all those we now the recipient must have.
1721 def prune_filenodes(f, filerevlog):
1726 def prune_filenodes(f, filerevlog):
1722 msngset = msng_filenode_set[f]
1727 msngset = msng_filenode_set[f]
1723 hasset = {}
1728 hasset = {}
1724 # If a 'missing' filenode thinks it belongs to a changenode we
1729 # If a 'missing' filenode thinks it belongs to a changenode we
1725 # assume the recipient must have, then the recipient must have
1730 # assume the recipient must have, then the recipient must have
1726 # that filenode.
1731 # that filenode.
1727 for n in msngset:
1732 for n in msngset:
1728 clnode = cl.node(filerevlog.linkrev(n))
1733 clnode = cl.node(filerevlog.linkrev(n))
1729 if clnode in has_cl_set:
1734 if clnode in has_cl_set:
1730 hasset[n] = 1
1735 hasset[n] = 1
1731 prune_parents(filerevlog, hasset, msngset)
1736 prune_parents(filerevlog, hasset, msngset)
1732
1737
1733 # A function generator function that sets up the a context for the
1738 # A function generator function that sets up the a context for the
1734 # inner function.
1739 # inner function.
1735 def lookup_filenode_link_func(fname):
1740 def lookup_filenode_link_func(fname):
1736 msngset = msng_filenode_set[fname]
1741 msngset = msng_filenode_set[fname]
1737 # Lookup the changenode the filenode belongs to.
1742 # Lookup the changenode the filenode belongs to.
1738 def lookup_filenode_link(fnode):
1743 def lookup_filenode_link(fnode):
1739 return msngset[fnode]
1744 return msngset[fnode]
1740 return lookup_filenode_link
1745 return lookup_filenode_link
1741
1746
1742 # Add the nodes that were explicitly requested.
1747 # Add the nodes that were explicitly requested.
1743 def add_extra_nodes(name, nodes):
1748 def add_extra_nodes(name, nodes):
1744 if not extranodes or name not in extranodes:
1749 if not extranodes or name not in extranodes:
1745 return
1750 return
1746
1751
1747 for node, linknode in extranodes[name]:
1752 for node, linknode in extranodes[name]:
1748 if node not in nodes:
1753 if node not in nodes:
1749 nodes[node] = linknode
1754 nodes[node] = linknode
1750
1755
1751 # Now that we have all theses utility functions to help out and
1756 # Now that we have all theses utility functions to help out and
1752 # logically divide up the task, generate the group.
1757 # logically divide up the task, generate the group.
1753 def gengroup():
1758 def gengroup():
1754 # The set of changed files starts empty.
1759 # The set of changed files starts empty.
1755 changedfiles = {}
1760 changedfiles = {}
1756 # Create a changenode group generator that will call our functions
1761 # Create a changenode group generator that will call our functions
1757 # back to lookup the owning changenode and collect information.
1762 # back to lookup the owning changenode and collect information.
1758 group = cl.group(msng_cl_lst, identity,
1763 group = cl.group(msng_cl_lst, identity,
1759 manifest_and_file_collector(changedfiles))
1764 manifest_and_file_collector(changedfiles))
1760 for chnk in group:
1765 for chnk in group:
1761 yield chnk
1766 yield chnk
1762
1767
1763 # The list of manifests has been collected by the generator
1768 # The list of manifests has been collected by the generator
1764 # calling our functions back.
1769 # calling our functions back.
1765 prune_manifests()
1770 prune_manifests()
1766 add_extra_nodes(1, msng_mnfst_set)
1771 add_extra_nodes(1, msng_mnfst_set)
1767 msng_mnfst_lst = msng_mnfst_set.keys()
1772 msng_mnfst_lst = msng_mnfst_set.keys()
1768 # Sort the manifestnodes by revision number.
1773 # Sort the manifestnodes by revision number.
1769 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1774 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1770 # Create a generator for the manifestnodes that calls our lookup
1775 # Create a generator for the manifestnodes that calls our lookup
1771 # and data collection functions back.
1776 # and data collection functions back.
1772 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1777 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1773 filenode_collector(changedfiles))
1778 filenode_collector(changedfiles))
1774 for chnk in group:
1779 for chnk in group:
1775 yield chnk
1780 yield chnk
1776
1781
1777 # These are no longer needed, dereference and toss the memory for
1782 # These are no longer needed, dereference and toss the memory for
1778 # them.
1783 # them.
1779 msng_mnfst_lst = None
1784 msng_mnfst_lst = None
1780 msng_mnfst_set.clear()
1785 msng_mnfst_set.clear()
1781
1786
1782 if extranodes:
1787 if extranodes:
1783 for fname in extranodes:
1788 for fname in extranodes:
1784 if isinstance(fname, int):
1789 if isinstance(fname, int):
1785 continue
1790 continue
1786 add_extra_nodes(fname,
1791 add_extra_nodes(fname,
1787 msng_filenode_set.setdefault(fname, {}))
1792 msng_filenode_set.setdefault(fname, {}))
1788 changedfiles[fname] = 1
1793 changedfiles[fname] = 1
1789 changedfiles = changedfiles.keys()
1794 changedfiles = changedfiles.keys()
1790 changedfiles.sort()
1795 changedfiles.sort()
1791 # Go through all our files in order sorted by name.
1796 # Go through all our files in order sorted by name.
1792 for fname in changedfiles:
1797 for fname in changedfiles:
1793 filerevlog = self.file(fname)
1798 filerevlog = self.file(fname)
1794 if filerevlog.count() == 0:
1799 if filerevlog.count() == 0:
1795 raise util.Abort(_("empty or missing revlog for %s") % fname)
1800 raise util.Abort(_("empty or missing revlog for %s") % fname)
1796 # Toss out the filenodes that the recipient isn't really
1801 # Toss out the filenodes that the recipient isn't really
1797 # missing.
1802 # missing.
1798 if fname in msng_filenode_set:
1803 if fname in msng_filenode_set:
1799 prune_filenodes(fname, filerevlog)
1804 prune_filenodes(fname, filerevlog)
1800 msng_filenode_lst = msng_filenode_set[fname].keys()
1805 msng_filenode_lst = msng_filenode_set[fname].keys()
1801 else:
1806 else:
1802 msng_filenode_lst = []
1807 msng_filenode_lst = []
1803 # If any filenodes are left, generate the group for them,
1808 # If any filenodes are left, generate the group for them,
1804 # otherwise don't bother.
1809 # otherwise don't bother.
1805 if len(msng_filenode_lst) > 0:
1810 if len(msng_filenode_lst) > 0:
1806 yield changegroup.chunkheader(len(fname))
1811 yield changegroup.chunkheader(len(fname))
1807 yield fname
1812 yield fname
1808 # Sort the filenodes by their revision #
1813 # Sort the filenodes by their revision #
1809 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1814 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1810 # Create a group generator and only pass in a changenode
1815 # Create a group generator and only pass in a changenode
1811 # lookup function as we need to collect no information
1816 # lookup function as we need to collect no information
1812 # from filenodes.
1817 # from filenodes.
1813 group = filerevlog.group(msng_filenode_lst,
1818 group = filerevlog.group(msng_filenode_lst,
1814 lookup_filenode_link_func(fname))
1819 lookup_filenode_link_func(fname))
1815 for chnk in group:
1820 for chnk in group:
1816 yield chnk
1821 yield chnk
1817 if fname in msng_filenode_set:
1822 if fname in msng_filenode_set:
1818 # Don't need this anymore, toss it to free memory.
1823 # Don't need this anymore, toss it to free memory.
1819 del msng_filenode_set[fname]
1824 del msng_filenode_set[fname]
1820 # Signal that no more groups are left.
1825 # Signal that no more groups are left.
1821 yield changegroup.closechunk()
1826 yield changegroup.closechunk()
1822
1827
1823 if msng_cl_lst:
1828 if msng_cl_lst:
1824 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1829 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1825
1830
1826 return util.chunkbuffer(gengroup())
1831 return util.chunkbuffer(gengroup())
1827
1832
1828 def changegroup(self, basenodes, source):
1833 def changegroup(self, basenodes, source):
1829 """Generate a changegroup of all nodes that we have that a recipient
1834 """Generate a changegroup of all nodes that we have that a recipient
1830 doesn't.
1835 doesn't.
1831
1836
1832 This is much easier than the previous function as we can assume that
1837 This is much easier than the previous function as we can assume that
1833 the recipient has any changenode we aren't sending them."""
1838 the recipient has any changenode we aren't sending them."""
1834
1839
1835 self.hook('preoutgoing', throw=True, source=source)
1840 self.hook('preoutgoing', throw=True, source=source)
1836
1841
1837 cl = self.changelog
1842 cl = self.changelog
1838 nodes = cl.nodesbetween(basenodes, None)[0]
1843 nodes = cl.nodesbetween(basenodes, None)[0]
1839 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1844 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1840 self.changegroupinfo(nodes, source)
1845 self.changegroupinfo(nodes, source)
1841
1846
1842 def identity(x):
1847 def identity(x):
1843 return x
1848 return x
1844
1849
1845 def gennodelst(revlog):
1850 def gennodelst(revlog):
1846 for r in xrange(0, revlog.count()):
1851 for r in xrange(0, revlog.count()):
1847 n = revlog.node(r)
1852 n = revlog.node(r)
1848 if revlog.linkrev(n) in revset:
1853 if revlog.linkrev(n) in revset:
1849 yield n
1854 yield n
1850
1855
1851 def changed_file_collector(changedfileset):
1856 def changed_file_collector(changedfileset):
1852 def collect_changed_files(clnode):
1857 def collect_changed_files(clnode):
1853 c = cl.read(clnode)
1858 c = cl.read(clnode)
1854 for fname in c[3]:
1859 for fname in c[3]:
1855 changedfileset[fname] = 1
1860 changedfileset[fname] = 1
1856 return collect_changed_files
1861 return collect_changed_files
1857
1862
1858 def lookuprevlink_func(revlog):
1863 def lookuprevlink_func(revlog):
1859 def lookuprevlink(n):
1864 def lookuprevlink(n):
1860 return cl.node(revlog.linkrev(n))
1865 return cl.node(revlog.linkrev(n))
1861 return lookuprevlink
1866 return lookuprevlink
1862
1867
1863 def gengroup():
1868 def gengroup():
1864 # construct a list of all changed files
1869 # construct a list of all changed files
1865 changedfiles = {}
1870 changedfiles = {}
1866
1871
1867 for chnk in cl.group(nodes, identity,
1872 for chnk in cl.group(nodes, identity,
1868 changed_file_collector(changedfiles)):
1873 changed_file_collector(changedfiles)):
1869 yield chnk
1874 yield chnk
1870 changedfiles = changedfiles.keys()
1875 changedfiles = changedfiles.keys()
1871 changedfiles.sort()
1876 changedfiles.sort()
1872
1877
1873 mnfst = self.manifest
1878 mnfst = self.manifest
1874 nodeiter = gennodelst(mnfst)
1879 nodeiter = gennodelst(mnfst)
1875 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1880 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1876 yield chnk
1881 yield chnk
1877
1882
1878 for fname in changedfiles:
1883 for fname in changedfiles:
1879 filerevlog = self.file(fname)
1884 filerevlog = self.file(fname)
1880 if filerevlog.count() == 0:
1885 if filerevlog.count() == 0:
1881 raise util.Abort(_("empty or missing revlog for %s") % fname)
1886 raise util.Abort(_("empty or missing revlog for %s") % fname)
1882 nodeiter = gennodelst(filerevlog)
1887 nodeiter = gennodelst(filerevlog)
1883 nodeiter = list(nodeiter)
1888 nodeiter = list(nodeiter)
1884 if nodeiter:
1889 if nodeiter:
1885 yield changegroup.chunkheader(len(fname))
1890 yield changegroup.chunkheader(len(fname))
1886 yield fname
1891 yield fname
1887 lookup = lookuprevlink_func(filerevlog)
1892 lookup = lookuprevlink_func(filerevlog)
1888 for chnk in filerevlog.group(nodeiter, lookup):
1893 for chnk in filerevlog.group(nodeiter, lookup):
1889 yield chnk
1894 yield chnk
1890
1895
1891 yield changegroup.closechunk()
1896 yield changegroup.closechunk()
1892
1897
1893 if nodes:
1898 if nodes:
1894 self.hook('outgoing', node=hex(nodes[0]), source=source)
1899 self.hook('outgoing', node=hex(nodes[0]), source=source)
1895
1900
1896 return util.chunkbuffer(gengroup())
1901 return util.chunkbuffer(gengroup())
1897
1902
1898 def addchangegroup(self, source, srctype, url, emptyok=False):
1903 def addchangegroup(self, source, srctype, url, emptyok=False):
1899 """add changegroup to repo.
1904 """add changegroup to repo.
1900
1905
1901 return values:
1906 return values:
1902 - nothing changed or no source: 0
1907 - nothing changed or no source: 0
1903 - more heads than before: 1+added heads (2..n)
1908 - more heads than before: 1+added heads (2..n)
1904 - less heads than before: -1-removed heads (-2..-n)
1909 - less heads than before: -1-removed heads (-2..-n)
1905 - number of heads stays the same: 1
1910 - number of heads stays the same: 1
1906 """
1911 """
1907 def csmap(x):
1912 def csmap(x):
1908 self.ui.debug(_("add changeset %s\n") % short(x))
1913 self.ui.debug(_("add changeset %s\n") % short(x))
1909 return cl.count()
1914 return cl.count()
1910
1915
1911 def revmap(x):
1916 def revmap(x):
1912 return cl.rev(x)
1917 return cl.rev(x)
1913
1918
1914 if not source:
1919 if not source:
1915 return 0
1920 return 0
1916
1921
1917 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1922 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1918
1923
1919 changesets = files = revisions = 0
1924 changesets = files = revisions = 0
1920
1925
1921 # write changelog data to temp files so concurrent readers will not see
1926 # write changelog data to temp files so concurrent readers will not see
1922 # inconsistent view
1927 # inconsistent view
1923 cl = self.changelog
1928 cl = self.changelog
1924 cl.delayupdate()
1929 cl.delayupdate()
1925 oldheads = len(cl.heads())
1930 oldheads = len(cl.heads())
1926
1931
1927 tr = self.transaction()
1932 tr = self.transaction()
1928 try:
1933 try:
1929 trp = weakref.proxy(tr)
1934 trp = weakref.proxy(tr)
1930 # pull off the changeset group
1935 # pull off the changeset group
1931 self.ui.status(_("adding changesets\n"))
1936 self.ui.status(_("adding changesets\n"))
1932 cor = cl.count() - 1
1937 cor = cl.count() - 1
1933 chunkiter = changegroup.chunkiter(source)
1938 chunkiter = changegroup.chunkiter(source)
1934 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1939 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1935 raise util.Abort(_("received changelog group is empty"))
1940 raise util.Abort(_("received changelog group is empty"))
1936 cnr = cl.count() - 1
1941 cnr = cl.count() - 1
1937 changesets = cnr - cor
1942 changesets = cnr - cor
1938
1943
1939 # pull off the manifest group
1944 # pull off the manifest group
1940 self.ui.status(_("adding manifests\n"))
1945 self.ui.status(_("adding manifests\n"))
1941 chunkiter = changegroup.chunkiter(source)
1946 chunkiter = changegroup.chunkiter(source)
1942 # no need to check for empty manifest group here:
1947 # no need to check for empty manifest group here:
1943 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1948 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1944 # no new manifest will be created and the manifest group will
1949 # no new manifest will be created and the manifest group will
1945 # be empty during the pull
1950 # be empty during the pull
1946 self.manifest.addgroup(chunkiter, revmap, trp)
1951 self.manifest.addgroup(chunkiter, revmap, trp)
1947
1952
1948 # process the files
1953 # process the files
1949 self.ui.status(_("adding file changes\n"))
1954 self.ui.status(_("adding file changes\n"))
1950 while 1:
1955 while 1:
1951 f = changegroup.getchunk(source)
1956 f = changegroup.getchunk(source)
1952 if not f:
1957 if not f:
1953 break
1958 break
1954 self.ui.debug(_("adding %s revisions\n") % f)
1959 self.ui.debug(_("adding %s revisions\n") % f)
1955 fl = self.file(f)
1960 fl = self.file(f)
1956 o = fl.count()
1961 o = fl.count()
1957 chunkiter = changegroup.chunkiter(source)
1962 chunkiter = changegroup.chunkiter(source)
1958 if fl.addgroup(chunkiter, revmap, trp) is None:
1963 if fl.addgroup(chunkiter, revmap, trp) is None:
1959 raise util.Abort(_("received file revlog group is empty"))
1964 raise util.Abort(_("received file revlog group is empty"))
1960 revisions += fl.count() - o
1965 revisions += fl.count() - o
1961 files += 1
1966 files += 1
1962
1967
1963 # make changelog see real files again
1968 # make changelog see real files again
1964 cl.finalize(trp)
1969 cl.finalize(trp)
1965
1970
1966 newheads = len(self.changelog.heads())
1971 newheads = len(self.changelog.heads())
1967 heads = ""
1972 heads = ""
1968 if oldheads and newheads != oldheads:
1973 if oldheads and newheads != oldheads:
1969 heads = _(" (%+d heads)") % (newheads - oldheads)
1974 heads = _(" (%+d heads)") % (newheads - oldheads)
1970
1975
1971 self.ui.status(_("added %d changesets"
1976 self.ui.status(_("added %d changesets"
1972 " with %d changes to %d files%s\n")
1977 " with %d changes to %d files%s\n")
1973 % (changesets, revisions, files, heads))
1978 % (changesets, revisions, files, heads))
1974
1979
1975 if changesets > 0:
1980 if changesets > 0:
1976 self.hook('pretxnchangegroup', throw=True,
1981 self.hook('pretxnchangegroup', throw=True,
1977 node=hex(self.changelog.node(cor+1)), source=srctype,
1982 node=hex(self.changelog.node(cor+1)), source=srctype,
1978 url=url)
1983 url=url)
1979
1984
1980 tr.close()
1985 tr.close()
1981 finally:
1986 finally:
1982 del tr
1987 del tr
1983
1988
1984 if changesets > 0:
1989 if changesets > 0:
1985 # forcefully update the on-disk branch cache
1990 # forcefully update the on-disk branch cache
1986 self.ui.debug(_("updating the branch cache\n"))
1991 self.ui.debug(_("updating the branch cache\n"))
1987 self.branchcache = None
1992 self.branchcache = None
1988 self.branchtags()
1993 self.branchtags()
1989 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1994 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1990 source=srctype, url=url)
1995 source=srctype, url=url)
1991
1996
1992 for i in xrange(cor + 1, cnr + 1):
1997 for i in xrange(cor + 1, cnr + 1):
1993 self.hook("incoming", node=hex(self.changelog.node(i)),
1998 self.hook("incoming", node=hex(self.changelog.node(i)),
1994 source=srctype, url=url)
1999 source=srctype, url=url)
1995
2000
1996 # never return 0 here:
2001 # never return 0 here:
1997 if newheads < oldheads:
2002 if newheads < oldheads:
1998 return newheads - oldheads - 1
2003 return newheads - oldheads - 1
1999 else:
2004 else:
2000 return newheads - oldheads + 1
2005 return newheads - oldheads + 1
2001
2006
2002
2007
2003 def stream_in(self, remote):
2008 def stream_in(self, remote):
2004 fp = remote.stream_out()
2009 fp = remote.stream_out()
2005 l = fp.readline()
2010 l = fp.readline()
2006 try:
2011 try:
2007 resp = int(l)
2012 resp = int(l)
2008 except ValueError:
2013 except ValueError:
2009 raise util.UnexpectedOutput(
2014 raise util.UnexpectedOutput(
2010 _('Unexpected response from remote server:'), l)
2015 _('Unexpected response from remote server:'), l)
2011 if resp == 1:
2016 if resp == 1:
2012 raise util.Abort(_('operation forbidden by server'))
2017 raise util.Abort(_('operation forbidden by server'))
2013 elif resp == 2:
2018 elif resp == 2:
2014 raise util.Abort(_('locking the remote repository failed'))
2019 raise util.Abort(_('locking the remote repository failed'))
2015 elif resp != 0:
2020 elif resp != 0:
2016 raise util.Abort(_('the server sent an unknown error code'))
2021 raise util.Abort(_('the server sent an unknown error code'))
2017 self.ui.status(_('streaming all changes\n'))
2022 self.ui.status(_('streaming all changes\n'))
2018 l = fp.readline()
2023 l = fp.readline()
2019 try:
2024 try:
2020 total_files, total_bytes = map(int, l.split(' ', 1))
2025 total_files, total_bytes = map(int, l.split(' ', 1))
2021 except ValueError, TypeError:
2026 except ValueError, TypeError:
2022 raise util.UnexpectedOutput(
2027 raise util.UnexpectedOutput(
2023 _('Unexpected response from remote server:'), l)
2028 _('Unexpected response from remote server:'), l)
2024 self.ui.status(_('%d files to transfer, %s of data\n') %
2029 self.ui.status(_('%d files to transfer, %s of data\n') %
2025 (total_files, util.bytecount(total_bytes)))
2030 (total_files, util.bytecount(total_bytes)))
2026 start = time.time()
2031 start = time.time()
2027 for i in xrange(total_files):
2032 for i in xrange(total_files):
2028 # XXX doesn't support '\n' or '\r' in filenames
2033 # XXX doesn't support '\n' or '\r' in filenames
2029 l = fp.readline()
2034 l = fp.readline()
2030 try:
2035 try:
2031 name, size = l.split('\0', 1)
2036 name, size = l.split('\0', 1)
2032 size = int(size)
2037 size = int(size)
2033 except ValueError, TypeError:
2038 except ValueError, TypeError:
2034 raise util.UnexpectedOutput(
2039 raise util.UnexpectedOutput(
2035 _('Unexpected response from remote server:'), l)
2040 _('Unexpected response from remote server:'), l)
2036 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2041 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2037 ofp = self.sopener(name, 'w')
2042 ofp = self.sopener(name, 'w')
2038 for chunk in util.filechunkiter(fp, limit=size):
2043 for chunk in util.filechunkiter(fp, limit=size):
2039 ofp.write(chunk)
2044 ofp.write(chunk)
2040 ofp.close()
2045 ofp.close()
2041 elapsed = time.time() - start
2046 elapsed = time.time() - start
2042 if elapsed <= 0:
2047 if elapsed <= 0:
2043 elapsed = 0.001
2048 elapsed = 0.001
2044 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2049 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2045 (util.bytecount(total_bytes), elapsed,
2050 (util.bytecount(total_bytes), elapsed,
2046 util.bytecount(total_bytes / elapsed)))
2051 util.bytecount(total_bytes / elapsed)))
2047 self.invalidate()
2052 self.invalidate()
2048 return len(self.heads()) + 1
2053 return len(self.heads()) + 1
2049
2054
2050 def clone(self, remote, heads=[], stream=False):
2055 def clone(self, remote, heads=[], stream=False):
2051 '''clone remote repository.
2056 '''clone remote repository.
2052
2057
2053 keyword arguments:
2058 keyword arguments:
2054 heads: list of revs to clone (forces use of pull)
2059 heads: list of revs to clone (forces use of pull)
2055 stream: use streaming clone if possible'''
2060 stream: use streaming clone if possible'''
2056
2061
2057 # now, all clients that can request uncompressed clones can
2062 # now, all clients that can request uncompressed clones can
2058 # read repo formats supported by all servers that can serve
2063 # read repo formats supported by all servers that can serve
2059 # them.
2064 # them.
2060
2065
2061 # if revlog format changes, client will have to check version
2066 # if revlog format changes, client will have to check version
2062 # and format flags on "stream" capability, and use
2067 # and format flags on "stream" capability, and use
2063 # uncompressed only if compatible.
2068 # uncompressed only if compatible.
2064
2069
2065 if stream and not heads and remote.capable('stream'):
2070 if stream and not heads and remote.capable('stream'):
2066 return self.stream_in(remote)
2071 return self.stream_in(remote)
2067 return self.pull(remote, heads)
2072 return self.pull(remote, heads)
2068
2073
2069 # used to avoid circular references so destructors work
2074 # used to avoid circular references so destructors work
2070 def aftertrans(files):
2075 def aftertrans(files):
2071 renamefiles = [tuple(t) for t in files]
2076 renamefiles = [tuple(t) for t in files]
2072 def a():
2077 def a():
2073 for src, dest in renamefiles:
2078 for src, dest in renamefiles:
2074 util.rename(src, dest)
2079 util.rename(src, dest)
2075 return a
2080 return a
2076
2081
2077 def instance(ui, path, create):
2082 def instance(ui, path, create):
2078 return localrepository(ui, util.drop_scheme('file', path), create)
2083 return localrepository(ui, util.drop_scheme('file', path), create)
2079
2084
2080 def islocal(path):
2085 def islocal(path):
2081 return True
2086 return True
@@ -1,21 +1,28 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 mkdir t
3 mkdir t
4 cd t
4 cd t
5 hg init
5 hg init
6 echo a > a
6 echo a > a
7 hg add a
7 hg add a
8 hg commit -m "test" -d "1000000 0"
8 hg commit -m "test" -d "1000000 0"
9 hg verify
9 hg verify
10 hg parents
10 hg parents
11 hg status
11 hg status
12 hg rollback
12 hg rollback
13 hg verify
13 hg verify
14 hg parents
14 hg parents
15 hg status
15 hg status
16
16
17 # Test issue 902
17 echo % Test issue 902
18 hg commit -m "test"
18 hg commit -m "test"
19 hg branch test
19 hg branch test
20 hg rollback
20 hg rollback
21 hg branch
21 hg branch
22
23 echo % Test rollback of hg before issue 902 was fixed
24 hg commit -m "test"
25 hg branch test
26 rm .hg/undo.branch
27 hg rollback
28 hg branch
@@ -1,21 +1,27 b''
1 checking changesets
1 checking changesets
2 checking manifests
2 checking manifests
3 crosschecking files in changesets and manifests
3 crosschecking files in changesets and manifests
4 checking files
4 checking files
5 1 files, 1 changesets, 1 total revisions
5 1 files, 1 changesets, 1 total revisions
6 changeset: 0:0acdaf898367
6 changeset: 0:0acdaf898367
7 tag: tip
7 tag: tip
8 user: test
8 user: test
9 date: Mon Jan 12 13:46:40 1970 +0000
9 date: Mon Jan 12 13:46:40 1970 +0000
10 summary: test
10 summary: test
11
11
12 rolling back last transaction
12 rolling back last transaction
13 checking changesets
13 checking changesets
14 checking manifests
14 checking manifests
15 crosschecking files in changesets and manifests
15 crosschecking files in changesets and manifests
16 checking files
16 checking files
17 0 files, 0 changesets, 0 total revisions
17 0 files, 0 changesets, 0 total revisions
18 A a
18 A a
19 % Test issue 902
19 marked working directory as branch test
20 marked working directory as branch test
20 rolling back last transaction
21 rolling back last transaction
21 default
22 default
23 % Test rollback of hg before issue 902 was fixed
24 marked working directory as branch test
25 rolling back last transaction
26 Named branch could not be reset, current branch still is: test
27 test
General Comments 0
You need to be logged in to leave comments. Login now