##// END OF EJS Templates
restore branch after rollback (issue 902)
Alexandre Vassalotti -
r5814:dd5a501c default
parent child Browse files
Show More
@@ -1,2026 +1,2030 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import re, lock, transaction, tempfile, stat, errno, ui
12 import re, lock, transaction, tempfile, stat, errno, ui
13 import os, revlog, time, util, extensions, hook
13 import os, revlog, time, util, extensions, hook
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = util.set(('lookup', 'changegroupsubset'))
16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 repo.repository.__init__(self)
20 repo.repository.__init__(self)
21 self.root = os.path.realpath(path)
21 self.root = os.path.realpath(path)
22 self.path = os.path.join(self.root, ".hg")
22 self.path = os.path.join(self.root, ".hg")
23 self.origroot = path
23 self.origroot = path
24 self.opener = util.opener(self.path)
24 self.opener = util.opener(self.path)
25 self.wopener = util.opener(self.root)
25 self.wopener = util.opener(self.root)
26
26
27 if not os.path.isdir(self.path):
27 if not os.path.isdir(self.path):
28 if create:
28 if create:
29 if not os.path.exists(path):
29 if not os.path.exists(path):
30 os.mkdir(path)
30 os.mkdir(path)
31 os.mkdir(self.path)
31 os.mkdir(self.path)
32 requirements = ["revlogv1"]
32 requirements = ["revlogv1"]
33 if parentui.configbool('format', 'usestore', True):
33 if parentui.configbool('format', 'usestore', True):
34 os.mkdir(os.path.join(self.path, "store"))
34 os.mkdir(os.path.join(self.path, "store"))
35 requirements.append("store")
35 requirements.append("store")
36 # create an invalid changelog
36 # create an invalid changelog
37 self.opener("00changelog.i", "a").write(
37 self.opener("00changelog.i", "a").write(
38 '\0\0\0\2' # represents revlogv2
38 '\0\0\0\2' # represents revlogv2
39 ' dummy changelog to prevent using the old repo layout'
39 ' dummy changelog to prevent using the old repo layout'
40 )
40 )
41 reqfile = self.opener("requires", "w")
41 reqfile = self.opener("requires", "w")
42 for r in requirements:
42 for r in requirements:
43 reqfile.write("%s\n" % r)
43 reqfile.write("%s\n" % r)
44 reqfile.close()
44 reqfile.close()
45 else:
45 else:
46 raise repo.RepoError(_("repository %s not found") % path)
46 raise repo.RepoError(_("repository %s not found") % path)
47 elif create:
47 elif create:
48 raise repo.RepoError(_("repository %s already exists") % path)
48 raise repo.RepoError(_("repository %s already exists") % path)
49 else:
49 else:
50 # find requirements
50 # find requirements
51 try:
51 try:
52 requirements = self.opener("requires").read().splitlines()
52 requirements = self.opener("requires").read().splitlines()
53 except IOError, inst:
53 except IOError, inst:
54 if inst.errno != errno.ENOENT:
54 if inst.errno != errno.ENOENT:
55 raise
55 raise
56 requirements = []
56 requirements = []
57 # check them
57 # check them
58 for r in requirements:
58 for r in requirements:
59 if r not in self.supported:
59 if r not in self.supported:
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61
61
62 # setup store
62 # setup store
63 if "store" in requirements:
63 if "store" in requirements:
64 self.encodefn = util.encodefilename
64 self.encodefn = util.encodefilename
65 self.decodefn = util.decodefilename
65 self.decodefn = util.decodefilename
66 self.spath = os.path.join(self.path, "store")
66 self.spath = os.path.join(self.path, "store")
67 else:
67 else:
68 self.encodefn = lambda x: x
68 self.encodefn = lambda x: x
69 self.decodefn = lambda x: x
69 self.decodefn = lambda x: x
70 self.spath = self.path
70 self.spath = self.path
71 self.sopener = util.encodedopener(util.opener(self.spath),
71 self.sopener = util.encodedopener(util.opener(self.spath),
72 self.encodefn)
72 self.encodefn)
73
73
74 self.ui = ui.ui(parentui=parentui)
74 self.ui = ui.ui(parentui=parentui)
75 try:
75 try:
76 self.ui.readconfig(self.join("hgrc"), self.root)
76 self.ui.readconfig(self.join("hgrc"), self.root)
77 extensions.loadall(self.ui)
77 extensions.loadall(self.ui)
78 except IOError:
78 except IOError:
79 pass
79 pass
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self._tagstypecache = None
82 self._tagstypecache = None
83 self.branchcache = None
83 self.branchcache = None
84 self.nodetagscache = None
84 self.nodetagscache = None
85 self.filterpats = {}
85 self.filterpats = {}
86 self._transref = self._lockref = self._wlockref = None
86 self._transref = self._lockref = self._wlockref = None
87
87
88 def __getattr__(self, name):
88 def __getattr__(self, name):
89 if name == 'changelog':
89 if name == 'changelog':
90 self.changelog = changelog.changelog(self.sopener)
90 self.changelog = changelog.changelog(self.sopener)
91 self.sopener.defversion = self.changelog.version
91 self.sopener.defversion = self.changelog.version
92 return self.changelog
92 return self.changelog
93 if name == 'manifest':
93 if name == 'manifest':
94 self.changelog
94 self.changelog
95 self.manifest = manifest.manifest(self.sopener)
95 self.manifest = manifest.manifest(self.sopener)
96 return self.manifest
96 return self.manifest
97 if name == 'dirstate':
97 if name == 'dirstate':
98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
99 return self.dirstate
99 return self.dirstate
100 else:
100 else:
101 raise AttributeError, name
101 raise AttributeError, name
102
102
103 def url(self):
103 def url(self):
104 return 'file:' + self.root
104 return 'file:' + self.root
105
105
106 def hook(self, name, throw=False, **args):
106 def hook(self, name, throw=False, **args):
107 return hook.hook(self.ui, self, name, throw, **args)
107 return hook.hook(self.ui, self, name, throw, **args)
108
108
109 tag_disallowed = ':\r\n'
109 tag_disallowed = ':\r\n'
110
110
111 def _tag(self, name, node, message, local, user, date, parent=None,
111 def _tag(self, name, node, message, local, user, date, parent=None,
112 extra={}):
112 extra={}):
113 use_dirstate = parent is None
113 use_dirstate = parent is None
114
114
115 for c in self.tag_disallowed:
115 for c in self.tag_disallowed:
116 if c in name:
116 if c in name:
117 raise util.Abort(_('%r cannot be used in a tag name') % c)
117 raise util.Abort(_('%r cannot be used in a tag name') % c)
118
118
119 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
119 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
120
120
121 def writetag(fp, name, munge, prevtags):
121 def writetag(fp, name, munge, prevtags):
122 if prevtags and prevtags[-1] != '\n':
122 if prevtags and prevtags[-1] != '\n':
123 fp.write('\n')
123 fp.write('\n')
124 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
124 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
125 fp.close()
125 fp.close()
126
126
127 prevtags = ''
127 prevtags = ''
128 if local:
128 if local:
129 try:
129 try:
130 fp = self.opener('localtags', 'r+')
130 fp = self.opener('localtags', 'r+')
131 except IOError, err:
131 except IOError, err:
132 fp = self.opener('localtags', 'a')
132 fp = self.opener('localtags', 'a')
133 else:
133 else:
134 prevtags = fp.read()
134 prevtags = fp.read()
135
135
136 # local tags are stored in the current charset
136 # local tags are stored in the current charset
137 writetag(fp, name, None, prevtags)
137 writetag(fp, name, None, prevtags)
138 self.hook('tag', node=hex(node), tag=name, local=local)
138 self.hook('tag', node=hex(node), tag=name, local=local)
139 return
139 return
140
140
141 if use_dirstate:
141 if use_dirstate:
142 try:
142 try:
143 fp = self.wfile('.hgtags', 'rb+')
143 fp = self.wfile('.hgtags', 'rb+')
144 except IOError, err:
144 except IOError, err:
145 fp = self.wfile('.hgtags', 'ab')
145 fp = self.wfile('.hgtags', 'ab')
146 else:
146 else:
147 prevtags = fp.read()
147 prevtags = fp.read()
148 else:
148 else:
149 try:
149 try:
150 prevtags = self.filectx('.hgtags', parent).data()
150 prevtags = self.filectx('.hgtags', parent).data()
151 except revlog.LookupError:
151 except revlog.LookupError:
152 pass
152 pass
153 fp = self.wfile('.hgtags', 'wb')
153 fp = self.wfile('.hgtags', 'wb')
154 if prevtags:
154 if prevtags:
155 fp.write(prevtags)
155 fp.write(prevtags)
156
156
157 # committed tags are stored in UTF-8
157 # committed tags are stored in UTF-8
158 writetag(fp, name, util.fromlocal, prevtags)
158 writetag(fp, name, util.fromlocal, prevtags)
159
159
160 if use_dirstate and '.hgtags' not in self.dirstate:
160 if use_dirstate and '.hgtags' not in self.dirstate:
161 self.add(['.hgtags'])
161 self.add(['.hgtags'])
162
162
163 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
163 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
164 extra=extra)
164 extra=extra)
165
165
166 self.hook('tag', node=hex(node), tag=name, local=local)
166 self.hook('tag', node=hex(node), tag=name, local=local)
167
167
168 return tagnode
168 return tagnode
169
169
170 def tag(self, name, node, message, local, user, date):
170 def tag(self, name, node, message, local, user, date):
171 '''tag a revision with a symbolic name.
171 '''tag a revision with a symbolic name.
172
172
173 if local is True, the tag is stored in a per-repository file.
173 if local is True, the tag is stored in a per-repository file.
174 otherwise, it is stored in the .hgtags file, and a new
174 otherwise, it is stored in the .hgtags file, and a new
175 changeset is committed with the change.
175 changeset is committed with the change.
176
176
177 keyword arguments:
177 keyword arguments:
178
178
179 local: whether to store tag in non-version-controlled file
179 local: whether to store tag in non-version-controlled file
180 (default False)
180 (default False)
181
181
182 message: commit message to use if committing
182 message: commit message to use if committing
183
183
184 user: name of user to use if committing
184 user: name of user to use if committing
185
185
186 date: date tuple to use if committing'''
186 date: date tuple to use if committing'''
187
187
188 for x in self.status()[:5]:
188 for x in self.status()[:5]:
189 if '.hgtags' in x:
189 if '.hgtags' in x:
190 raise util.Abort(_('working copy of .hgtags is changed '
190 raise util.Abort(_('working copy of .hgtags is changed '
191 '(please commit .hgtags manually)'))
191 '(please commit .hgtags manually)'))
192
192
193
193
194 self._tag(name, node, message, local, user, date)
194 self._tag(name, node, message, local, user, date)
195
195
196 def tags(self):
196 def tags(self):
197 '''return a mapping of tag to node'''
197 '''return a mapping of tag to node'''
198 if self.tagscache:
198 if self.tagscache:
199 return self.tagscache
199 return self.tagscache
200
200
201 globaltags = {}
201 globaltags = {}
202 tagtypes = {}
202 tagtypes = {}
203
203
204 def readtags(lines, fn, tagtype):
204 def readtags(lines, fn, tagtype):
205 filetags = {}
205 filetags = {}
206 count = 0
206 count = 0
207
207
208 def warn(msg):
208 def warn(msg):
209 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
209 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
210
210
211 for l in lines:
211 for l in lines:
212 count += 1
212 count += 1
213 if not l:
213 if not l:
214 continue
214 continue
215 s = l.split(" ", 1)
215 s = l.split(" ", 1)
216 if len(s) != 2:
216 if len(s) != 2:
217 warn(_("cannot parse entry"))
217 warn(_("cannot parse entry"))
218 continue
218 continue
219 node, key = s
219 node, key = s
220 key = util.tolocal(key.strip()) # stored in UTF-8
220 key = util.tolocal(key.strip()) # stored in UTF-8
221 try:
221 try:
222 bin_n = bin(node)
222 bin_n = bin(node)
223 except TypeError:
223 except TypeError:
224 warn(_("node '%s' is not well formed") % node)
224 warn(_("node '%s' is not well formed") % node)
225 continue
225 continue
226 if bin_n not in self.changelog.nodemap:
226 if bin_n not in self.changelog.nodemap:
227 warn(_("tag '%s' refers to unknown node") % key)
227 warn(_("tag '%s' refers to unknown node") % key)
228 continue
228 continue
229
229
230 h = []
230 h = []
231 if key in filetags:
231 if key in filetags:
232 n, h = filetags[key]
232 n, h = filetags[key]
233 h.append(n)
233 h.append(n)
234 filetags[key] = (bin_n, h)
234 filetags[key] = (bin_n, h)
235
235
236 for k, nh in filetags.items():
236 for k, nh in filetags.items():
237 if k not in globaltags:
237 if k not in globaltags:
238 globaltags[k] = nh
238 globaltags[k] = nh
239 tagtypes[k] = tagtype
239 tagtypes[k] = tagtype
240 continue
240 continue
241
241
242 # we prefer the global tag if:
242 # we prefer the global tag if:
243 # it supercedes us OR
243 # it supercedes us OR
244 # mutual supercedes and it has a higher rank
244 # mutual supercedes and it has a higher rank
245 # otherwise we win because we're tip-most
245 # otherwise we win because we're tip-most
246 an, ah = nh
246 an, ah = nh
247 bn, bh = globaltags[k]
247 bn, bh = globaltags[k]
248 if (bn != an and an in bh and
248 if (bn != an and an in bh and
249 (bn not in ah or len(bh) > len(ah))):
249 (bn not in ah or len(bh) > len(ah))):
250 an = bn
250 an = bn
251 ah.extend([n for n in bh if n not in ah])
251 ah.extend([n for n in bh if n not in ah])
252 globaltags[k] = an, ah
252 globaltags[k] = an, ah
253 tagtypes[k] = tagtype
253 tagtypes[k] = tagtype
254
254
255 # read the tags file from each head, ending with the tip
255 # read the tags file from each head, ending with the tip
256 f = None
256 f = None
257 for rev, node, fnode in self._hgtagsnodes():
257 for rev, node, fnode in self._hgtagsnodes():
258 f = (f and f.filectx(fnode) or
258 f = (f and f.filectx(fnode) or
259 self.filectx('.hgtags', fileid=fnode))
259 self.filectx('.hgtags', fileid=fnode))
260 readtags(f.data().splitlines(), f, "global")
260 readtags(f.data().splitlines(), f, "global")
261
261
262 try:
262 try:
263 data = util.fromlocal(self.opener("localtags").read())
263 data = util.fromlocal(self.opener("localtags").read())
264 # localtags are stored in the local character set
264 # localtags are stored in the local character set
265 # while the internal tag table is stored in UTF-8
265 # while the internal tag table is stored in UTF-8
266 readtags(data.splitlines(), "localtags", "local")
266 readtags(data.splitlines(), "localtags", "local")
267 except IOError:
267 except IOError:
268 pass
268 pass
269
269
270 self.tagscache = {}
270 self.tagscache = {}
271 self._tagstypecache = {}
271 self._tagstypecache = {}
272 for k,nh in globaltags.items():
272 for k,nh in globaltags.items():
273 n = nh[0]
273 n = nh[0]
274 if n != nullid:
274 if n != nullid:
275 self.tagscache[k] = n
275 self.tagscache[k] = n
276 self._tagstypecache[k] = tagtypes[k]
276 self._tagstypecache[k] = tagtypes[k]
277 self.tagscache['tip'] = self.changelog.tip()
277 self.tagscache['tip'] = self.changelog.tip()
278
278
279 return self.tagscache
279 return self.tagscache
280
280
281 def tagtype(self, tagname):
281 def tagtype(self, tagname):
282 '''
282 '''
283 return the type of the given tag. result can be:
283 return the type of the given tag. result can be:
284
284
285 'local' : a local tag
285 'local' : a local tag
286 'global' : a global tag
286 'global' : a global tag
287 None : tag does not exist
287 None : tag does not exist
288 '''
288 '''
289
289
290 self.tags()
290 self.tags()
291
291
292 return self._tagstypecache.get(tagname)
292 return self._tagstypecache.get(tagname)
293
293
294 def _hgtagsnodes(self):
294 def _hgtagsnodes(self):
295 heads = self.heads()
295 heads = self.heads()
296 heads.reverse()
296 heads.reverse()
297 last = {}
297 last = {}
298 ret = []
298 ret = []
299 for node in heads:
299 for node in heads:
300 c = self.changectx(node)
300 c = self.changectx(node)
301 rev = c.rev()
301 rev = c.rev()
302 try:
302 try:
303 fnode = c.filenode('.hgtags')
303 fnode = c.filenode('.hgtags')
304 except revlog.LookupError:
304 except revlog.LookupError:
305 continue
305 continue
306 ret.append((rev, node, fnode))
306 ret.append((rev, node, fnode))
307 if fnode in last:
307 if fnode in last:
308 ret[last[fnode]] = None
308 ret[last[fnode]] = None
309 last[fnode] = len(ret) - 1
309 last[fnode] = len(ret) - 1
310 return [item for item in ret if item]
310 return [item for item in ret if item]
311
311
312 def tagslist(self):
312 def tagslist(self):
313 '''return a list of tags ordered by revision'''
313 '''return a list of tags ordered by revision'''
314 l = []
314 l = []
315 for t, n in self.tags().items():
315 for t, n in self.tags().items():
316 try:
316 try:
317 r = self.changelog.rev(n)
317 r = self.changelog.rev(n)
318 except:
318 except:
319 r = -2 # sort to the beginning of the list if unknown
319 r = -2 # sort to the beginning of the list if unknown
320 l.append((r, t, n))
320 l.append((r, t, n))
321 l.sort()
321 l.sort()
322 return [(t, n) for r, t, n in l]
322 return [(t, n) for r, t, n in l]
323
323
324 def nodetags(self, node):
324 def nodetags(self, node):
325 '''return the tags associated with a node'''
325 '''return the tags associated with a node'''
326 if not self.nodetagscache:
326 if not self.nodetagscache:
327 self.nodetagscache = {}
327 self.nodetagscache = {}
328 for t, n in self.tags().items():
328 for t, n in self.tags().items():
329 self.nodetagscache.setdefault(n, []).append(t)
329 self.nodetagscache.setdefault(n, []).append(t)
330 return self.nodetagscache.get(node, [])
330 return self.nodetagscache.get(node, [])
331
331
332 def _branchtags(self):
332 def _branchtags(self):
333 partial, last, lrev = self._readbranchcache()
333 partial, last, lrev = self._readbranchcache()
334
334
335 tiprev = self.changelog.count() - 1
335 tiprev = self.changelog.count() - 1
336 if lrev != tiprev:
336 if lrev != tiprev:
337 self._updatebranchcache(partial, lrev+1, tiprev+1)
337 self._updatebranchcache(partial, lrev+1, tiprev+1)
338 self._writebranchcache(partial, self.changelog.tip(), tiprev)
338 self._writebranchcache(partial, self.changelog.tip(), tiprev)
339
339
340 return partial
340 return partial
341
341
342 def branchtags(self):
342 def branchtags(self):
343 if self.branchcache is not None:
343 if self.branchcache is not None:
344 return self.branchcache
344 return self.branchcache
345
345
346 self.branchcache = {} # avoid recursion in changectx
346 self.branchcache = {} # avoid recursion in changectx
347 partial = self._branchtags()
347 partial = self._branchtags()
348
348
349 # the branch cache is stored on disk as UTF-8, but in the local
349 # the branch cache is stored on disk as UTF-8, but in the local
350 # charset internally
350 # charset internally
351 for k, v in partial.items():
351 for k, v in partial.items():
352 self.branchcache[util.tolocal(k)] = v
352 self.branchcache[util.tolocal(k)] = v
353 return self.branchcache
353 return self.branchcache
354
354
355 def _readbranchcache(self):
355 def _readbranchcache(self):
356 partial = {}
356 partial = {}
357 try:
357 try:
358 f = self.opener("branch.cache")
358 f = self.opener("branch.cache")
359 lines = f.read().split('\n')
359 lines = f.read().split('\n')
360 f.close()
360 f.close()
361 except (IOError, OSError):
361 except (IOError, OSError):
362 return {}, nullid, nullrev
362 return {}, nullid, nullrev
363
363
364 try:
364 try:
365 last, lrev = lines.pop(0).split(" ", 1)
365 last, lrev = lines.pop(0).split(" ", 1)
366 last, lrev = bin(last), int(lrev)
366 last, lrev = bin(last), int(lrev)
367 if not (lrev < self.changelog.count() and
367 if not (lrev < self.changelog.count() and
368 self.changelog.node(lrev) == last): # sanity check
368 self.changelog.node(lrev) == last): # sanity check
369 # invalidate the cache
369 # invalidate the cache
370 raise ValueError('Invalid branch cache: unknown tip')
370 raise ValueError('Invalid branch cache: unknown tip')
371 for l in lines:
371 for l in lines:
372 if not l: continue
372 if not l: continue
373 node, label = l.split(" ", 1)
373 node, label = l.split(" ", 1)
374 partial[label.strip()] = bin(node)
374 partial[label.strip()] = bin(node)
375 except (KeyboardInterrupt, util.SignalInterrupt):
375 except (KeyboardInterrupt, util.SignalInterrupt):
376 raise
376 raise
377 except Exception, inst:
377 except Exception, inst:
378 if self.ui.debugflag:
378 if self.ui.debugflag:
379 self.ui.warn(str(inst), '\n')
379 self.ui.warn(str(inst), '\n')
380 partial, last, lrev = {}, nullid, nullrev
380 partial, last, lrev = {}, nullid, nullrev
381 return partial, last, lrev
381 return partial, last, lrev
382
382
383 def _writebranchcache(self, branches, tip, tiprev):
383 def _writebranchcache(self, branches, tip, tiprev):
384 try:
384 try:
385 f = self.opener("branch.cache", "w", atomictemp=True)
385 f = self.opener("branch.cache", "w", atomictemp=True)
386 f.write("%s %s\n" % (hex(tip), tiprev))
386 f.write("%s %s\n" % (hex(tip), tiprev))
387 for label, node in branches.iteritems():
387 for label, node in branches.iteritems():
388 f.write("%s %s\n" % (hex(node), label))
388 f.write("%s %s\n" % (hex(node), label))
389 f.rename()
389 f.rename()
390 except (IOError, OSError):
390 except (IOError, OSError):
391 pass
391 pass
392
392
393 def _updatebranchcache(self, partial, start, end):
393 def _updatebranchcache(self, partial, start, end):
394 for r in xrange(start, end):
394 for r in xrange(start, end):
395 c = self.changectx(r)
395 c = self.changectx(r)
396 b = c.branch()
396 b = c.branch()
397 partial[b] = c.node()
397 partial[b] = c.node()
398
398
399 def lookup(self, key):
399 def lookup(self, key):
400 if key == '.':
400 if key == '.':
401 key, second = self.dirstate.parents()
401 key, second = self.dirstate.parents()
402 if key == nullid:
402 if key == nullid:
403 raise repo.RepoError(_("no revision checked out"))
403 raise repo.RepoError(_("no revision checked out"))
404 if second != nullid:
404 if second != nullid:
405 self.ui.warn(_("warning: working directory has two parents, "
405 self.ui.warn(_("warning: working directory has two parents, "
406 "tag '.' uses the first\n"))
406 "tag '.' uses the first\n"))
407 elif key == 'null':
407 elif key == 'null':
408 return nullid
408 return nullid
409 n = self.changelog._match(key)
409 n = self.changelog._match(key)
410 if n:
410 if n:
411 return n
411 return n
412 if key in self.tags():
412 if key in self.tags():
413 return self.tags()[key]
413 return self.tags()[key]
414 if key in self.branchtags():
414 if key in self.branchtags():
415 return self.branchtags()[key]
415 return self.branchtags()[key]
416 n = self.changelog._partialmatch(key)
416 n = self.changelog._partialmatch(key)
417 if n:
417 if n:
418 return n
418 return n
419 try:
419 try:
420 if len(key) == 20:
420 if len(key) == 20:
421 key = hex(key)
421 key = hex(key)
422 except:
422 except:
423 pass
423 pass
424 raise repo.RepoError(_("unknown revision '%s'") % key)
424 raise repo.RepoError(_("unknown revision '%s'") % key)
425
425
426 def dev(self):
426 def dev(self):
427 return os.lstat(self.path).st_dev
427 return os.lstat(self.path).st_dev
428
428
429 def local(self):
429 def local(self):
430 return True
430 return True
431
431
432 def join(self, f):
432 def join(self, f):
433 return os.path.join(self.path, f)
433 return os.path.join(self.path, f)
434
434
435 def sjoin(self, f):
435 def sjoin(self, f):
436 f = self.encodefn(f)
436 f = self.encodefn(f)
437 return os.path.join(self.spath, f)
437 return os.path.join(self.spath, f)
438
438
439 def wjoin(self, f):
439 def wjoin(self, f):
440 return os.path.join(self.root, f)
440 return os.path.join(self.root, f)
441
441
442 def file(self, f):
442 def file(self, f):
443 if f[0] == '/':
443 if f[0] == '/':
444 f = f[1:]
444 f = f[1:]
445 return filelog.filelog(self.sopener, f)
445 return filelog.filelog(self.sopener, f)
446
446
447 def changectx(self, changeid=None):
447 def changectx(self, changeid=None):
448 return context.changectx(self, changeid)
448 return context.changectx(self, changeid)
449
449
450 def workingctx(self):
450 def workingctx(self):
451 return context.workingctx(self)
451 return context.workingctx(self)
452
452
453 def parents(self, changeid=None):
453 def parents(self, changeid=None):
454 '''
454 '''
455 get list of changectxs for parents of changeid or working directory
455 get list of changectxs for parents of changeid or working directory
456 '''
456 '''
457 if changeid is None:
457 if changeid is None:
458 pl = self.dirstate.parents()
458 pl = self.dirstate.parents()
459 else:
459 else:
460 n = self.changelog.lookup(changeid)
460 n = self.changelog.lookup(changeid)
461 pl = self.changelog.parents(n)
461 pl = self.changelog.parents(n)
462 if pl[1] == nullid:
462 if pl[1] == nullid:
463 return [self.changectx(pl[0])]
463 return [self.changectx(pl[0])]
464 return [self.changectx(pl[0]), self.changectx(pl[1])]
464 return [self.changectx(pl[0]), self.changectx(pl[1])]
465
465
466 def filectx(self, path, changeid=None, fileid=None):
466 def filectx(self, path, changeid=None, fileid=None):
467 """changeid can be a changeset revision, node, or tag.
467 """changeid can be a changeset revision, node, or tag.
468 fileid can be a file revision or node."""
468 fileid can be a file revision or node."""
469 return context.filectx(self, path, changeid, fileid)
469 return context.filectx(self, path, changeid, fileid)
470
470
471 def getcwd(self):
471 def getcwd(self):
472 return self.dirstate.getcwd()
472 return self.dirstate.getcwd()
473
473
474 def pathto(self, f, cwd=None):
474 def pathto(self, f, cwd=None):
475 return self.dirstate.pathto(f, cwd)
475 return self.dirstate.pathto(f, cwd)
476
476
477 def wfile(self, f, mode='r'):
477 def wfile(self, f, mode='r'):
478 return self.wopener(f, mode)
478 return self.wopener(f, mode)
479
479
480 def _link(self, f):
480 def _link(self, f):
481 return os.path.islink(self.wjoin(f))
481 return os.path.islink(self.wjoin(f))
482
482
483 def _filter(self, filter, filename, data):
483 def _filter(self, filter, filename, data):
484 if filter not in self.filterpats:
484 if filter not in self.filterpats:
485 l = []
485 l = []
486 for pat, cmd in self.ui.configitems(filter):
486 for pat, cmd in self.ui.configitems(filter):
487 mf = util.matcher(self.root, "", [pat], [], [])[1]
487 mf = util.matcher(self.root, "", [pat], [], [])[1]
488 l.append((mf, cmd))
488 l.append((mf, cmd))
489 self.filterpats[filter] = l
489 self.filterpats[filter] = l
490
490
491 for mf, cmd in self.filterpats[filter]:
491 for mf, cmd in self.filterpats[filter]:
492 if mf(filename):
492 if mf(filename):
493 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
493 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
494 data = util.filter(data, cmd)
494 data = util.filter(data, cmd)
495 break
495 break
496
496
497 return data
497 return data
498
498
499 def wread(self, filename):
499 def wread(self, filename):
500 if self._link(filename):
500 if self._link(filename):
501 data = os.readlink(self.wjoin(filename))
501 data = os.readlink(self.wjoin(filename))
502 else:
502 else:
503 data = self.wopener(filename, 'r').read()
503 data = self.wopener(filename, 'r').read()
504 return self._filter("encode", filename, data)
504 return self._filter("encode", filename, data)
505
505
506 def wwrite(self, filename, data, flags):
506 def wwrite(self, filename, data, flags):
507 data = self._filter("decode", filename, data)
507 data = self._filter("decode", filename, data)
508 try:
508 try:
509 os.unlink(self.wjoin(filename))
509 os.unlink(self.wjoin(filename))
510 except OSError:
510 except OSError:
511 pass
511 pass
512 self.wopener(filename, 'w').write(data)
512 self.wopener(filename, 'w').write(data)
513 util.set_flags(self.wjoin(filename), flags)
513 util.set_flags(self.wjoin(filename), flags)
514
514
515 def wwritedata(self, filename, data):
515 def wwritedata(self, filename, data):
516 return self._filter("decode", filename, data)
516 return self._filter("decode", filename, data)
517
517
518 def transaction(self):
518 def transaction(self):
519 if self._transref and self._transref():
519 if self._transref and self._transref():
520 return self._transref().nest()
520 return self._transref().nest()
521
521
522 # save dirstate for rollback
522 # save dirstate for rollback
523 try:
523 try:
524 ds = self.opener("dirstate").read()
524 ds = self.opener("dirstate").read()
525 except IOError:
525 except IOError:
526 ds = ""
526 ds = ""
527 self.opener("journal.dirstate", "w").write(ds)
527 self.opener("journal.dirstate", "w").write(ds)
528 self.opener("journal.branch", "w").write(self.dirstate.branch())
528
529
529 renames = [(self.sjoin("journal"), self.sjoin("undo")),
530 renames = [(self.sjoin("journal"), self.sjoin("undo")),
530 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
531 (self.join("journal.dirstate"), self.join("undo.dirstate")),
532 (self.join("journal.branch"), self.join("undo.branch"))]
531 tr = transaction.transaction(self.ui.warn, self.sopener,
533 tr = transaction.transaction(self.ui.warn, self.sopener,
532 self.sjoin("journal"),
534 self.sjoin("journal"),
533 aftertrans(renames))
535 aftertrans(renames))
534 self._transref = weakref.ref(tr)
536 self._transref = weakref.ref(tr)
535 return tr
537 return tr
536
538
537 def recover(self):
539 def recover(self):
538 l = self.lock()
540 l = self.lock()
539 try:
541 try:
540 if os.path.exists(self.sjoin("journal")):
542 if os.path.exists(self.sjoin("journal")):
541 self.ui.status(_("rolling back interrupted transaction\n"))
543 self.ui.status(_("rolling back interrupted transaction\n"))
542 transaction.rollback(self.sopener, self.sjoin("journal"))
544 transaction.rollback(self.sopener, self.sjoin("journal"))
543 self.invalidate()
545 self.invalidate()
544 return True
546 return True
545 else:
547 else:
546 self.ui.warn(_("no interrupted transaction available\n"))
548 self.ui.warn(_("no interrupted transaction available\n"))
547 return False
549 return False
548 finally:
550 finally:
549 del l
551 del l
550
552
551 def rollback(self):
553 def rollback(self):
552 wlock = lock = None
554 wlock = lock = None
553 try:
555 try:
554 wlock = self.wlock()
556 wlock = self.wlock()
555 lock = self.lock()
557 lock = self.lock()
556 if os.path.exists(self.sjoin("undo")):
558 if os.path.exists(self.sjoin("undo")):
557 self.ui.status(_("rolling back last transaction\n"))
559 self.ui.status(_("rolling back last transaction\n"))
558 transaction.rollback(self.sopener, self.sjoin("undo"))
560 transaction.rollback(self.sopener, self.sjoin("undo"))
559 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
561 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
562 branch = self.opener("undo.branch").read()
563 self.dirstate.setbranch(branch)
560 self.invalidate()
564 self.invalidate()
561 self.dirstate.invalidate()
565 self.dirstate.invalidate()
562 else:
566 else:
563 self.ui.warn(_("no rollback information available\n"))
567 self.ui.warn(_("no rollback information available\n"))
564 finally:
568 finally:
565 del lock, wlock
569 del lock, wlock
566
570
567 def invalidate(self):
571 def invalidate(self):
568 for a in "changelog manifest".split():
572 for a in "changelog manifest".split():
569 if hasattr(self, a):
573 if hasattr(self, a):
570 self.__delattr__(a)
574 self.__delattr__(a)
571 self.tagscache = None
575 self.tagscache = None
572 self._tagstypecache = None
576 self._tagstypecache = None
573 self.nodetagscache = None
577 self.nodetagscache = None
574
578
575 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
579 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
576 try:
580 try:
577 l = lock.lock(lockname, 0, releasefn, desc=desc)
581 l = lock.lock(lockname, 0, releasefn, desc=desc)
578 except lock.LockHeld, inst:
582 except lock.LockHeld, inst:
579 if not wait:
583 if not wait:
580 raise
584 raise
581 self.ui.warn(_("waiting for lock on %s held by %r\n") %
585 self.ui.warn(_("waiting for lock on %s held by %r\n") %
582 (desc, inst.locker))
586 (desc, inst.locker))
583 # default to 600 seconds timeout
587 # default to 600 seconds timeout
584 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
588 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
585 releasefn, desc=desc)
589 releasefn, desc=desc)
586 if acquirefn:
590 if acquirefn:
587 acquirefn()
591 acquirefn()
588 return l
592 return l
589
593
590 def lock(self, wait=True):
594 def lock(self, wait=True):
591 if self._lockref and self._lockref():
595 if self._lockref and self._lockref():
592 return self._lockref()
596 return self._lockref()
593
597
594 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
598 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
595 _('repository %s') % self.origroot)
599 _('repository %s') % self.origroot)
596 self._lockref = weakref.ref(l)
600 self._lockref = weakref.ref(l)
597 return l
601 return l
598
602
599 def wlock(self, wait=True):
603 def wlock(self, wait=True):
600 if self._wlockref and self._wlockref():
604 if self._wlockref and self._wlockref():
601 return self._wlockref()
605 return self._wlockref()
602
606
603 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
607 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
604 self.dirstate.invalidate, _('working directory of %s') %
608 self.dirstate.invalidate, _('working directory of %s') %
605 self.origroot)
609 self.origroot)
606 self._wlockref = weakref.ref(l)
610 self._wlockref = weakref.ref(l)
607 return l
611 return l
608
612
609 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
613 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
610 """
614 """
611 commit an individual file as part of a larger transaction
615 commit an individual file as part of a larger transaction
612 """
616 """
613
617
614 t = self.wread(fn)
618 t = self.wread(fn)
615 fl = self.file(fn)
619 fl = self.file(fn)
616 fp1 = manifest1.get(fn, nullid)
620 fp1 = manifest1.get(fn, nullid)
617 fp2 = manifest2.get(fn, nullid)
621 fp2 = manifest2.get(fn, nullid)
618
622
619 meta = {}
623 meta = {}
620 cp = self.dirstate.copied(fn)
624 cp = self.dirstate.copied(fn)
621 if cp:
625 if cp:
622 # Mark the new revision of this file as a copy of another
626 # Mark the new revision of this file as a copy of another
623 # file. This copy data will effectively act as a parent
627 # file. This copy data will effectively act as a parent
624 # of this new revision. If this is a merge, the first
628 # of this new revision. If this is a merge, the first
625 # parent will be the nullid (meaning "look up the copy data")
629 # parent will be the nullid (meaning "look up the copy data")
626 # and the second one will be the other parent. For example:
630 # and the second one will be the other parent. For example:
627 #
631 #
628 # 0 --- 1 --- 3 rev1 changes file foo
632 # 0 --- 1 --- 3 rev1 changes file foo
629 # \ / rev2 renames foo to bar and changes it
633 # \ / rev2 renames foo to bar and changes it
630 # \- 2 -/ rev3 should have bar with all changes and
634 # \- 2 -/ rev3 should have bar with all changes and
631 # should record that bar descends from
635 # should record that bar descends from
632 # bar in rev2 and foo in rev1
636 # bar in rev2 and foo in rev1
633 #
637 #
634 # this allows this merge to succeed:
638 # this allows this merge to succeed:
635 #
639 #
636 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
640 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
637 # \ / merging rev3 and rev4 should use bar@rev2
641 # \ / merging rev3 and rev4 should use bar@rev2
638 # \- 2 --- 4 as the merge base
642 # \- 2 --- 4 as the merge base
639 #
643 #
640 meta["copy"] = cp
644 meta["copy"] = cp
641 if not manifest2: # not a branch merge
645 if not manifest2: # not a branch merge
642 meta["copyrev"] = hex(manifest1.get(cp, nullid))
646 meta["copyrev"] = hex(manifest1.get(cp, nullid))
643 fp2 = nullid
647 fp2 = nullid
644 elif fp2 != nullid: # copied on remote side
648 elif fp2 != nullid: # copied on remote side
645 meta["copyrev"] = hex(manifest1.get(cp, nullid))
649 meta["copyrev"] = hex(manifest1.get(cp, nullid))
646 elif fp1 != nullid: # copied on local side, reversed
650 elif fp1 != nullid: # copied on local side, reversed
647 meta["copyrev"] = hex(manifest2.get(cp))
651 meta["copyrev"] = hex(manifest2.get(cp))
648 fp2 = fp1
652 fp2 = fp1
649 elif cp in manifest2: # directory rename on local side
653 elif cp in manifest2: # directory rename on local side
650 meta["copyrev"] = hex(manifest2[cp])
654 meta["copyrev"] = hex(manifest2[cp])
651 else: # directory rename on remote side
655 else: # directory rename on remote side
652 meta["copyrev"] = hex(manifest1.get(cp, nullid))
656 meta["copyrev"] = hex(manifest1.get(cp, nullid))
653 self.ui.debug(_(" %s: copy %s:%s\n") %
657 self.ui.debug(_(" %s: copy %s:%s\n") %
654 (fn, cp, meta["copyrev"]))
658 (fn, cp, meta["copyrev"]))
655 fp1 = nullid
659 fp1 = nullid
656 elif fp2 != nullid:
660 elif fp2 != nullid:
657 # is one parent an ancestor of the other?
661 # is one parent an ancestor of the other?
658 fpa = fl.ancestor(fp1, fp2)
662 fpa = fl.ancestor(fp1, fp2)
659 if fpa == fp1:
663 if fpa == fp1:
660 fp1, fp2 = fp2, nullid
664 fp1, fp2 = fp2, nullid
661 elif fpa == fp2:
665 elif fpa == fp2:
662 fp2 = nullid
666 fp2 = nullid
663
667
664 # is the file unmodified from the parent? report existing entry
668 # is the file unmodified from the parent? report existing entry
665 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
669 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
666 return fp1
670 return fp1
667
671
668 changelist.append(fn)
672 changelist.append(fn)
669 return fl.add(t, meta, tr, linkrev, fp1, fp2)
673 return fl.add(t, meta, tr, linkrev, fp1, fp2)
670
674
671 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
675 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
672 if p1 is None:
676 if p1 is None:
673 p1, p2 = self.dirstate.parents()
677 p1, p2 = self.dirstate.parents()
674 return self.commit(files=files, text=text, user=user, date=date,
678 return self.commit(files=files, text=text, user=user, date=date,
675 p1=p1, p2=p2, extra=extra, empty_ok=True)
679 p1=p1, p2=p2, extra=extra, empty_ok=True)
676
680
677 def commit(self, files=None, text="", user=None, date=None,
681 def commit(self, files=None, text="", user=None, date=None,
678 match=util.always, force=False, force_editor=False,
682 match=util.always, force=False, force_editor=False,
679 p1=None, p2=None, extra={}, empty_ok=False):
683 p1=None, p2=None, extra={}, empty_ok=False):
680 wlock = lock = tr = None
684 wlock = lock = tr = None
681 valid = 0 # don't save the dirstate if this isn't set
685 valid = 0 # don't save the dirstate if this isn't set
682 try:
686 try:
683 commit = []
687 commit = []
684 remove = []
688 remove = []
685 changed = []
689 changed = []
686 use_dirstate = (p1 is None) # not rawcommit
690 use_dirstate = (p1 is None) # not rawcommit
687 extra = extra.copy()
691 extra = extra.copy()
688
692
689 if use_dirstate:
693 if use_dirstate:
690 if files:
694 if files:
691 for f in files:
695 for f in files:
692 s = self.dirstate[f]
696 s = self.dirstate[f]
693 if s in 'nma':
697 if s in 'nma':
694 commit.append(f)
698 commit.append(f)
695 elif s == 'r':
699 elif s == 'r':
696 remove.append(f)
700 remove.append(f)
697 else:
701 else:
698 self.ui.warn(_("%s not tracked!\n") % f)
702 self.ui.warn(_("%s not tracked!\n") % f)
699 else:
703 else:
700 changes = self.status(match=match)[:5]
704 changes = self.status(match=match)[:5]
701 modified, added, removed, deleted, unknown = changes
705 modified, added, removed, deleted, unknown = changes
702 commit = modified + added
706 commit = modified + added
703 remove = removed
707 remove = removed
704 else:
708 else:
705 commit = files
709 commit = files
706
710
707 if use_dirstate:
711 if use_dirstate:
708 p1, p2 = self.dirstate.parents()
712 p1, p2 = self.dirstate.parents()
709 update_dirstate = True
713 update_dirstate = True
710 else:
714 else:
711 p1, p2 = p1, p2 or nullid
715 p1, p2 = p1, p2 or nullid
712 update_dirstate = (self.dirstate.parents()[0] == p1)
716 update_dirstate = (self.dirstate.parents()[0] == p1)
713
717
714 c1 = self.changelog.read(p1)
718 c1 = self.changelog.read(p1)
715 c2 = self.changelog.read(p2)
719 c2 = self.changelog.read(p2)
716 m1 = self.manifest.read(c1[0]).copy()
720 m1 = self.manifest.read(c1[0]).copy()
717 m2 = self.manifest.read(c2[0])
721 m2 = self.manifest.read(c2[0])
718
722
719 if use_dirstate:
723 if use_dirstate:
720 branchname = self.workingctx().branch()
724 branchname = self.workingctx().branch()
721 try:
725 try:
722 branchname = branchname.decode('UTF-8').encode('UTF-8')
726 branchname = branchname.decode('UTF-8').encode('UTF-8')
723 except UnicodeDecodeError:
727 except UnicodeDecodeError:
724 raise util.Abort(_('branch name not in UTF-8!'))
728 raise util.Abort(_('branch name not in UTF-8!'))
725 else:
729 else:
726 branchname = ""
730 branchname = ""
727
731
728 if use_dirstate:
732 if use_dirstate:
729 oldname = c1[5].get("branch") # stored in UTF-8
733 oldname = c1[5].get("branch") # stored in UTF-8
730 if (not commit and not remove and not force and p2 == nullid
734 if (not commit and not remove and not force and p2 == nullid
731 and branchname == oldname):
735 and branchname == oldname):
732 self.ui.status(_("nothing changed\n"))
736 self.ui.status(_("nothing changed\n"))
733 return None
737 return None
734
738
735 xp1 = hex(p1)
739 xp1 = hex(p1)
736 if p2 == nullid: xp2 = ''
740 if p2 == nullid: xp2 = ''
737 else: xp2 = hex(p2)
741 else: xp2 = hex(p2)
738
742
739 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
743 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
740
744
741 wlock = self.wlock()
745 wlock = self.wlock()
742 lock = self.lock()
746 lock = self.lock()
743 tr = self.transaction()
747 tr = self.transaction()
744 trp = weakref.proxy(tr)
748 trp = weakref.proxy(tr)
745
749
746 # check in files
750 # check in files
747 new = {}
751 new = {}
748 linkrev = self.changelog.count()
752 linkrev = self.changelog.count()
749 commit.sort()
753 commit.sort()
750 is_exec = util.execfunc(self.root, m1.execf)
754 is_exec = util.execfunc(self.root, m1.execf)
751 is_link = util.linkfunc(self.root, m1.linkf)
755 is_link = util.linkfunc(self.root, m1.linkf)
752 for f in commit:
756 for f in commit:
753 self.ui.note(f + "\n")
757 self.ui.note(f + "\n")
754 try:
758 try:
755 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
759 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
756 new_exec = is_exec(f)
760 new_exec = is_exec(f)
757 new_link = is_link(f)
761 new_link = is_link(f)
758 if ((not changed or changed[-1] != f) and
762 if ((not changed or changed[-1] != f) and
759 m2.get(f) != new[f]):
763 m2.get(f) != new[f]):
760 # mention the file in the changelog if some
764 # mention the file in the changelog if some
761 # flag changed, even if there was no content
765 # flag changed, even if there was no content
762 # change.
766 # change.
763 old_exec = m1.execf(f)
767 old_exec = m1.execf(f)
764 old_link = m1.linkf(f)
768 old_link = m1.linkf(f)
765 if old_exec != new_exec or old_link != new_link:
769 if old_exec != new_exec or old_link != new_link:
766 changed.append(f)
770 changed.append(f)
767 m1.set(f, new_exec, new_link)
771 m1.set(f, new_exec, new_link)
768 if use_dirstate:
772 if use_dirstate:
769 self.dirstate.normal(f)
773 self.dirstate.normal(f)
770
774
771 except (OSError, IOError):
775 except (OSError, IOError):
772 if use_dirstate:
776 if use_dirstate:
773 self.ui.warn(_("trouble committing %s!\n") % f)
777 self.ui.warn(_("trouble committing %s!\n") % f)
774 raise
778 raise
775 else:
779 else:
776 remove.append(f)
780 remove.append(f)
777
781
778 # update manifest
782 # update manifest
779 m1.update(new)
783 m1.update(new)
780 remove.sort()
784 remove.sort()
781 removed = []
785 removed = []
782
786
783 for f in remove:
787 for f in remove:
784 if f in m1:
788 if f in m1:
785 del m1[f]
789 del m1[f]
786 removed.append(f)
790 removed.append(f)
787 elif f in m2:
791 elif f in m2:
788 removed.append(f)
792 removed.append(f)
789 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
793 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
790 (new, removed))
794 (new, removed))
791
795
792 # add changeset
796 # add changeset
793 new = new.keys()
797 new = new.keys()
794 new.sort()
798 new.sort()
795
799
796 user = user or self.ui.username()
800 user = user or self.ui.username()
797 if (not empty_ok and not text) or force_editor:
801 if (not empty_ok and not text) or force_editor:
798 edittext = []
802 edittext = []
799 if text:
803 if text:
800 edittext.append(text)
804 edittext.append(text)
801 edittext.append("")
805 edittext.append("")
802 edittext.append(_("HG: Enter commit message."
806 edittext.append(_("HG: Enter commit message."
803 " Lines beginning with 'HG:' are removed."))
807 " Lines beginning with 'HG:' are removed."))
804 edittext.append("HG: --")
808 edittext.append("HG: --")
805 edittext.append("HG: user: %s" % user)
809 edittext.append("HG: user: %s" % user)
806 if p2 != nullid:
810 if p2 != nullid:
807 edittext.append("HG: branch merge")
811 edittext.append("HG: branch merge")
808 if branchname:
812 if branchname:
809 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
813 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
810 edittext.extend(["HG: changed %s" % f for f in changed])
814 edittext.extend(["HG: changed %s" % f for f in changed])
811 edittext.extend(["HG: removed %s" % f for f in removed])
815 edittext.extend(["HG: removed %s" % f for f in removed])
812 if not changed and not remove:
816 if not changed and not remove:
813 edittext.append("HG: no files changed")
817 edittext.append("HG: no files changed")
814 edittext.append("")
818 edittext.append("")
815 # run editor in the repository root
819 # run editor in the repository root
816 olddir = os.getcwd()
820 olddir = os.getcwd()
817 os.chdir(self.root)
821 os.chdir(self.root)
818 text = self.ui.edit("\n".join(edittext), user)
822 text = self.ui.edit("\n".join(edittext), user)
819 os.chdir(olddir)
823 os.chdir(olddir)
820
824
821 if branchname:
825 if branchname:
822 extra["branch"] = branchname
826 extra["branch"] = branchname
823
827
824 if use_dirstate:
828 if use_dirstate:
825 lines = [line.rstrip() for line in text.rstrip().splitlines()]
829 lines = [line.rstrip() for line in text.rstrip().splitlines()]
826 while lines and not lines[0]:
830 while lines and not lines[0]:
827 del lines[0]
831 del lines[0]
828 if not lines:
832 if not lines:
829 raise util.Abort(_("empty commit message"))
833 raise util.Abort(_("empty commit message"))
830 text = '\n'.join(lines)
834 text = '\n'.join(lines)
831
835
832 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
836 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
833 user, date, extra)
837 user, date, extra)
834 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
838 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
835 parent2=xp2)
839 parent2=xp2)
836 tr.close()
840 tr.close()
837
841
838 if self.branchcache and "branch" in extra:
842 if self.branchcache and "branch" in extra:
839 self.branchcache[util.tolocal(extra["branch"])] = n
843 self.branchcache[util.tolocal(extra["branch"])] = n
840
844
841 if use_dirstate or update_dirstate:
845 if use_dirstate or update_dirstate:
842 self.dirstate.setparents(n)
846 self.dirstate.setparents(n)
843 if use_dirstate:
847 if use_dirstate:
844 for f in removed:
848 for f in removed:
845 self.dirstate.forget(f)
849 self.dirstate.forget(f)
846 valid = 1 # our dirstate updates are complete
850 valid = 1 # our dirstate updates are complete
847
851
848 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
852 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
849 return n
853 return n
850 finally:
854 finally:
851 if not valid: # don't save our updated dirstate
855 if not valid: # don't save our updated dirstate
852 self.dirstate.invalidate()
856 self.dirstate.invalidate()
853 del tr, lock, wlock
857 del tr, lock, wlock
854
858
855 def walk(self, node=None, files=[], match=util.always, badmatch=None):
859 def walk(self, node=None, files=[], match=util.always, badmatch=None):
856 '''
860 '''
857 walk recursively through the directory tree or a given
861 walk recursively through the directory tree or a given
858 changeset, finding all files matched by the match
862 changeset, finding all files matched by the match
859 function
863 function
860
864
861 results are yielded in a tuple (src, filename), where src
865 results are yielded in a tuple (src, filename), where src
862 is one of:
866 is one of:
863 'f' the file was found in the directory tree
867 'f' the file was found in the directory tree
864 'm' the file was only in the dirstate and not in the tree
868 'm' the file was only in the dirstate and not in the tree
865 'b' file was not found and matched badmatch
869 'b' file was not found and matched badmatch
866 '''
870 '''
867
871
868 if node:
872 if node:
869 fdict = dict.fromkeys(files)
873 fdict = dict.fromkeys(files)
870 # for dirstate.walk, files=['.'] means "walk the whole tree".
874 # for dirstate.walk, files=['.'] means "walk the whole tree".
871 # follow that here, too
875 # follow that here, too
872 fdict.pop('.', None)
876 fdict.pop('.', None)
873 mdict = self.manifest.read(self.changelog.read(node)[0])
877 mdict = self.manifest.read(self.changelog.read(node)[0])
874 mfiles = mdict.keys()
878 mfiles = mdict.keys()
875 mfiles.sort()
879 mfiles.sort()
876 for fn in mfiles:
880 for fn in mfiles:
877 for ffn in fdict:
881 for ffn in fdict:
878 # match if the file is the exact name or a directory
882 # match if the file is the exact name or a directory
879 if ffn == fn or fn.startswith("%s/" % ffn):
883 if ffn == fn or fn.startswith("%s/" % ffn):
880 del fdict[ffn]
884 del fdict[ffn]
881 break
885 break
882 if match(fn):
886 if match(fn):
883 yield 'm', fn
887 yield 'm', fn
884 ffiles = fdict.keys()
888 ffiles = fdict.keys()
885 ffiles.sort()
889 ffiles.sort()
886 for fn in ffiles:
890 for fn in ffiles:
887 if badmatch and badmatch(fn):
891 if badmatch and badmatch(fn):
888 if match(fn):
892 if match(fn):
889 yield 'b', fn
893 yield 'b', fn
890 else:
894 else:
891 self.ui.warn(_('%s: No such file in rev %s\n')
895 self.ui.warn(_('%s: No such file in rev %s\n')
892 % (self.pathto(fn), short(node)))
896 % (self.pathto(fn), short(node)))
893 else:
897 else:
894 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
898 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
895 yield src, fn
899 yield src, fn
896
900
897 def status(self, node1=None, node2=None, files=[], match=util.always,
901 def status(self, node1=None, node2=None, files=[], match=util.always,
898 list_ignored=False, list_clean=False):
902 list_ignored=False, list_clean=False):
899 """return status of files between two nodes or node and working directory
903 """return status of files between two nodes or node and working directory
900
904
901 If node1 is None, use the first dirstate parent instead.
905 If node1 is None, use the first dirstate parent instead.
902 If node2 is None, compare node1 with working directory.
906 If node2 is None, compare node1 with working directory.
903 """
907 """
904
908
905 def fcmp(fn, getnode):
909 def fcmp(fn, getnode):
906 t1 = self.wread(fn)
910 t1 = self.wread(fn)
907 return self.file(fn).cmp(getnode(fn), t1)
911 return self.file(fn).cmp(getnode(fn), t1)
908
912
909 def mfmatches(node):
913 def mfmatches(node):
910 change = self.changelog.read(node)
914 change = self.changelog.read(node)
911 mf = self.manifest.read(change[0]).copy()
915 mf = self.manifest.read(change[0]).copy()
912 for fn in mf.keys():
916 for fn in mf.keys():
913 if not match(fn):
917 if not match(fn):
914 del mf[fn]
918 del mf[fn]
915 return mf
919 return mf
916
920
917 modified, added, removed, deleted, unknown = [], [], [], [], []
921 modified, added, removed, deleted, unknown = [], [], [], [], []
918 ignored, clean = [], []
922 ignored, clean = [], []
919
923
920 compareworking = False
924 compareworking = False
921 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
925 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
922 compareworking = True
926 compareworking = True
923
927
924 if not compareworking:
928 if not compareworking:
925 # read the manifest from node1 before the manifest from node2,
929 # read the manifest from node1 before the manifest from node2,
926 # so that we'll hit the manifest cache if we're going through
930 # so that we'll hit the manifest cache if we're going through
927 # all the revisions in parent->child order.
931 # all the revisions in parent->child order.
928 mf1 = mfmatches(node1)
932 mf1 = mfmatches(node1)
929
933
930 # are we comparing the working directory?
934 # are we comparing the working directory?
931 if not node2:
935 if not node2:
932 (lookup, modified, added, removed, deleted, unknown,
936 (lookup, modified, added, removed, deleted, unknown,
933 ignored, clean) = self.dirstate.status(files, match,
937 ignored, clean) = self.dirstate.status(files, match,
934 list_ignored, list_clean)
938 list_ignored, list_clean)
935
939
936 # are we comparing working dir against its parent?
940 # are we comparing working dir against its parent?
937 if compareworking:
941 if compareworking:
938 if lookup:
942 if lookup:
939 fixup = []
943 fixup = []
940 # do a full compare of any files that might have changed
944 # do a full compare of any files that might have changed
941 ctx = self.changectx()
945 ctx = self.changectx()
942 for f in lookup:
946 for f in lookup:
943 if f not in ctx or ctx[f].cmp(self.wread(f)):
947 if f not in ctx or ctx[f].cmp(self.wread(f)):
944 modified.append(f)
948 modified.append(f)
945 else:
949 else:
946 fixup.append(f)
950 fixup.append(f)
947 if list_clean:
951 if list_clean:
948 clean.append(f)
952 clean.append(f)
949
953
950 # update dirstate for files that are actually clean
954 # update dirstate for files that are actually clean
951 if fixup:
955 if fixup:
952 wlock = None
956 wlock = None
953 try:
957 try:
954 try:
958 try:
955 wlock = self.wlock(False)
959 wlock = self.wlock(False)
956 except lock.LockException:
960 except lock.LockException:
957 pass
961 pass
958 if wlock:
962 if wlock:
959 for f in fixup:
963 for f in fixup:
960 self.dirstate.normal(f)
964 self.dirstate.normal(f)
961 finally:
965 finally:
962 del wlock
966 del wlock
963 else:
967 else:
964 # we are comparing working dir against non-parent
968 # we are comparing working dir against non-parent
965 # generate a pseudo-manifest for the working dir
969 # generate a pseudo-manifest for the working dir
966 # XXX: create it in dirstate.py ?
970 # XXX: create it in dirstate.py ?
967 mf2 = mfmatches(self.dirstate.parents()[0])
971 mf2 = mfmatches(self.dirstate.parents()[0])
968 is_exec = util.execfunc(self.root, mf2.execf)
972 is_exec = util.execfunc(self.root, mf2.execf)
969 is_link = util.linkfunc(self.root, mf2.linkf)
973 is_link = util.linkfunc(self.root, mf2.linkf)
970 for f in lookup + modified + added:
974 for f in lookup + modified + added:
971 mf2[f] = ""
975 mf2[f] = ""
972 mf2.set(f, is_exec(f), is_link(f))
976 mf2.set(f, is_exec(f), is_link(f))
973 for f in removed:
977 for f in removed:
974 if f in mf2:
978 if f in mf2:
975 del mf2[f]
979 del mf2[f]
976
980
977 else:
981 else:
978 # we are comparing two revisions
982 # we are comparing two revisions
979 mf2 = mfmatches(node2)
983 mf2 = mfmatches(node2)
980
984
981 if not compareworking:
985 if not compareworking:
982 # flush lists from dirstate before comparing manifests
986 # flush lists from dirstate before comparing manifests
983 modified, added, clean = [], [], []
987 modified, added, clean = [], [], []
984
988
985 # make sure to sort the files so we talk to the disk in a
989 # make sure to sort the files so we talk to the disk in a
986 # reasonable order
990 # reasonable order
987 mf2keys = mf2.keys()
991 mf2keys = mf2.keys()
988 mf2keys.sort()
992 mf2keys.sort()
989 getnode = lambda fn: mf1.get(fn, nullid)
993 getnode = lambda fn: mf1.get(fn, nullid)
990 for fn in mf2keys:
994 for fn in mf2keys:
991 if mf1.has_key(fn):
995 if mf1.has_key(fn):
992 if (mf1.flags(fn) != mf2.flags(fn) or
996 if (mf1.flags(fn) != mf2.flags(fn) or
993 (mf1[fn] != mf2[fn] and
997 (mf1[fn] != mf2[fn] and
994 (mf2[fn] != "" or fcmp(fn, getnode)))):
998 (mf2[fn] != "" or fcmp(fn, getnode)))):
995 modified.append(fn)
999 modified.append(fn)
996 elif list_clean:
1000 elif list_clean:
997 clean.append(fn)
1001 clean.append(fn)
998 del mf1[fn]
1002 del mf1[fn]
999 else:
1003 else:
1000 added.append(fn)
1004 added.append(fn)
1001
1005
1002 removed = mf1.keys()
1006 removed = mf1.keys()
1003
1007
1004 # sort and return results:
1008 # sort and return results:
1005 for l in modified, added, removed, deleted, unknown, ignored, clean:
1009 for l in modified, added, removed, deleted, unknown, ignored, clean:
1006 l.sort()
1010 l.sort()
1007 return (modified, added, removed, deleted, unknown, ignored, clean)
1011 return (modified, added, removed, deleted, unknown, ignored, clean)
1008
1012
1009 def add(self, list):
1013 def add(self, list):
1010 wlock = self.wlock()
1014 wlock = self.wlock()
1011 try:
1015 try:
1012 rejected = []
1016 rejected = []
1013 for f in list:
1017 for f in list:
1014 p = self.wjoin(f)
1018 p = self.wjoin(f)
1015 try:
1019 try:
1016 st = os.lstat(p)
1020 st = os.lstat(p)
1017 except:
1021 except:
1018 self.ui.warn(_("%s does not exist!\n") % f)
1022 self.ui.warn(_("%s does not exist!\n") % f)
1019 rejected.append(f)
1023 rejected.append(f)
1020 continue
1024 continue
1021 if st.st_size > 10000000:
1025 if st.st_size > 10000000:
1022 self.ui.warn(_("%s: files over 10MB may cause memory and"
1026 self.ui.warn(_("%s: files over 10MB may cause memory and"
1023 " performance problems\n"
1027 " performance problems\n"
1024 "(use 'hg revert %s' to unadd the file)\n")
1028 "(use 'hg revert %s' to unadd the file)\n")
1025 % (f, f))
1029 % (f, f))
1026 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1030 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1027 self.ui.warn(_("%s not added: only files and symlinks "
1031 self.ui.warn(_("%s not added: only files and symlinks "
1028 "supported currently\n") % f)
1032 "supported currently\n") % f)
1029 rejected.append(p)
1033 rejected.append(p)
1030 elif self.dirstate[f] in 'amn':
1034 elif self.dirstate[f] in 'amn':
1031 self.ui.warn(_("%s already tracked!\n") % f)
1035 self.ui.warn(_("%s already tracked!\n") % f)
1032 elif self.dirstate[f] == 'r':
1036 elif self.dirstate[f] == 'r':
1033 self.dirstate.normallookup(f)
1037 self.dirstate.normallookup(f)
1034 else:
1038 else:
1035 self.dirstate.add(f)
1039 self.dirstate.add(f)
1036 return rejected
1040 return rejected
1037 finally:
1041 finally:
1038 del wlock
1042 del wlock
1039
1043
1040 def forget(self, list):
1044 def forget(self, list):
1041 wlock = self.wlock()
1045 wlock = self.wlock()
1042 try:
1046 try:
1043 for f in list:
1047 for f in list:
1044 if self.dirstate[f] != 'a':
1048 if self.dirstate[f] != 'a':
1045 self.ui.warn(_("%s not added!\n") % f)
1049 self.ui.warn(_("%s not added!\n") % f)
1046 else:
1050 else:
1047 self.dirstate.forget(f)
1051 self.dirstate.forget(f)
1048 finally:
1052 finally:
1049 del wlock
1053 del wlock
1050
1054
1051 def remove(self, list, unlink=False):
1055 def remove(self, list, unlink=False):
1052 wlock = None
1056 wlock = None
1053 try:
1057 try:
1054 if unlink:
1058 if unlink:
1055 for f in list:
1059 for f in list:
1056 try:
1060 try:
1057 util.unlink(self.wjoin(f))
1061 util.unlink(self.wjoin(f))
1058 except OSError, inst:
1062 except OSError, inst:
1059 if inst.errno != errno.ENOENT:
1063 if inst.errno != errno.ENOENT:
1060 raise
1064 raise
1061 wlock = self.wlock()
1065 wlock = self.wlock()
1062 for f in list:
1066 for f in list:
1063 if unlink and os.path.exists(self.wjoin(f)):
1067 if unlink and os.path.exists(self.wjoin(f)):
1064 self.ui.warn(_("%s still exists!\n") % f)
1068 self.ui.warn(_("%s still exists!\n") % f)
1065 elif self.dirstate[f] == 'a':
1069 elif self.dirstate[f] == 'a':
1066 self.dirstate.forget(f)
1070 self.dirstate.forget(f)
1067 elif f not in self.dirstate:
1071 elif f not in self.dirstate:
1068 self.ui.warn(_("%s not tracked!\n") % f)
1072 self.ui.warn(_("%s not tracked!\n") % f)
1069 else:
1073 else:
1070 self.dirstate.remove(f)
1074 self.dirstate.remove(f)
1071 finally:
1075 finally:
1072 del wlock
1076 del wlock
1073
1077
1074 def undelete(self, list):
1078 def undelete(self, list):
1075 wlock = None
1079 wlock = None
1076 try:
1080 try:
1077 manifests = [self.manifest.read(self.changelog.read(p)[0])
1081 manifests = [self.manifest.read(self.changelog.read(p)[0])
1078 for p in self.dirstate.parents() if p != nullid]
1082 for p in self.dirstate.parents() if p != nullid]
1079 wlock = self.wlock()
1083 wlock = self.wlock()
1080 for f in list:
1084 for f in list:
1081 if self.dirstate[f] != 'r':
1085 if self.dirstate[f] != 'r':
1082 self.ui.warn("%s not removed!\n" % f)
1086 self.ui.warn("%s not removed!\n" % f)
1083 else:
1087 else:
1084 m = f in manifests[0] and manifests[0] or manifests[1]
1088 m = f in manifests[0] and manifests[0] or manifests[1]
1085 t = self.file(f).read(m[f])
1089 t = self.file(f).read(m[f])
1086 self.wwrite(f, t, m.flags(f))
1090 self.wwrite(f, t, m.flags(f))
1087 self.dirstate.normal(f)
1091 self.dirstate.normal(f)
1088 finally:
1092 finally:
1089 del wlock
1093 del wlock
1090
1094
1091 def copy(self, source, dest):
1095 def copy(self, source, dest):
1092 wlock = None
1096 wlock = None
1093 try:
1097 try:
1094 p = self.wjoin(dest)
1098 p = self.wjoin(dest)
1095 if not (os.path.exists(p) or os.path.islink(p)):
1099 if not (os.path.exists(p) or os.path.islink(p)):
1096 self.ui.warn(_("%s does not exist!\n") % dest)
1100 self.ui.warn(_("%s does not exist!\n") % dest)
1097 elif not (os.path.isfile(p) or os.path.islink(p)):
1101 elif not (os.path.isfile(p) or os.path.islink(p)):
1098 self.ui.warn(_("copy failed: %s is not a file or a "
1102 self.ui.warn(_("copy failed: %s is not a file or a "
1099 "symbolic link\n") % dest)
1103 "symbolic link\n") % dest)
1100 else:
1104 else:
1101 wlock = self.wlock()
1105 wlock = self.wlock()
1102 if dest not in self.dirstate:
1106 if dest not in self.dirstate:
1103 self.dirstate.add(dest)
1107 self.dirstate.add(dest)
1104 self.dirstate.copy(source, dest)
1108 self.dirstate.copy(source, dest)
1105 finally:
1109 finally:
1106 del wlock
1110 del wlock
1107
1111
1108 def heads(self, start=None):
1112 def heads(self, start=None):
1109 heads = self.changelog.heads(start)
1113 heads = self.changelog.heads(start)
1110 # sort the output in rev descending order
1114 # sort the output in rev descending order
1111 heads = [(-self.changelog.rev(h), h) for h in heads]
1115 heads = [(-self.changelog.rev(h), h) for h in heads]
1112 heads.sort()
1116 heads.sort()
1113 return [n for (r, n) in heads]
1117 return [n for (r, n) in heads]
1114
1118
1115 def branchheads(self, branch, start=None):
1119 def branchheads(self, branch, start=None):
1116 branches = self.branchtags()
1120 branches = self.branchtags()
1117 if branch not in branches:
1121 if branch not in branches:
1118 return []
1122 return []
1119 # The basic algorithm is this:
1123 # The basic algorithm is this:
1120 #
1124 #
1121 # Start from the branch tip since there are no later revisions that can
1125 # Start from the branch tip since there are no later revisions that can
1122 # possibly be in this branch, and the tip is a guaranteed head.
1126 # possibly be in this branch, and the tip is a guaranteed head.
1123 #
1127 #
1124 # Remember the tip's parents as the first ancestors, since these by
1128 # Remember the tip's parents as the first ancestors, since these by
1125 # definition are not heads.
1129 # definition are not heads.
1126 #
1130 #
1127 # Step backwards from the brach tip through all the revisions. We are
1131 # Step backwards from the brach tip through all the revisions. We are
1128 # guaranteed by the rules of Mercurial that we will now be visiting the
1132 # guaranteed by the rules of Mercurial that we will now be visiting the
1129 # nodes in reverse topological order (children before parents).
1133 # nodes in reverse topological order (children before parents).
1130 #
1134 #
1131 # If a revision is one of the ancestors of a head then we can toss it
1135 # If a revision is one of the ancestors of a head then we can toss it
1132 # out of the ancestors set (we've already found it and won't be
1136 # out of the ancestors set (we've already found it and won't be
1133 # visiting it again) and put its parents in the ancestors set.
1137 # visiting it again) and put its parents in the ancestors set.
1134 #
1138 #
1135 # Otherwise, if a revision is in the branch it's another head, since it
1139 # Otherwise, if a revision is in the branch it's another head, since it
1136 # wasn't in the ancestor list of an existing head. So add it to the
1140 # wasn't in the ancestor list of an existing head. So add it to the
1137 # head list, and add its parents to the ancestor list.
1141 # head list, and add its parents to the ancestor list.
1138 #
1142 #
1139 # If it is not in the branch ignore it.
1143 # If it is not in the branch ignore it.
1140 #
1144 #
1141 # Once we have a list of heads, use nodesbetween to filter out all the
1145 # Once we have a list of heads, use nodesbetween to filter out all the
1142 # heads that cannot be reached from startrev. There may be a more
1146 # heads that cannot be reached from startrev. There may be a more
1143 # efficient way to do this as part of the previous algorithm.
1147 # efficient way to do this as part of the previous algorithm.
1144
1148
1145 set = util.set
1149 set = util.set
1146 heads = [self.changelog.rev(branches[branch])]
1150 heads = [self.changelog.rev(branches[branch])]
1147 # Don't care if ancestors contains nullrev or not.
1151 # Don't care if ancestors contains nullrev or not.
1148 ancestors = set(self.changelog.parentrevs(heads[0]))
1152 ancestors = set(self.changelog.parentrevs(heads[0]))
1149 for rev in xrange(heads[0] - 1, nullrev, -1):
1153 for rev in xrange(heads[0] - 1, nullrev, -1):
1150 if rev in ancestors:
1154 if rev in ancestors:
1151 ancestors.update(self.changelog.parentrevs(rev))
1155 ancestors.update(self.changelog.parentrevs(rev))
1152 ancestors.remove(rev)
1156 ancestors.remove(rev)
1153 elif self.changectx(rev).branch() == branch:
1157 elif self.changectx(rev).branch() == branch:
1154 heads.append(rev)
1158 heads.append(rev)
1155 ancestors.update(self.changelog.parentrevs(rev))
1159 ancestors.update(self.changelog.parentrevs(rev))
1156 heads = [self.changelog.node(rev) for rev in heads]
1160 heads = [self.changelog.node(rev) for rev in heads]
1157 if start is not None:
1161 if start is not None:
1158 heads = self.changelog.nodesbetween([start], heads)[2]
1162 heads = self.changelog.nodesbetween([start], heads)[2]
1159 return heads
1163 return heads
1160
1164
1161 def branches(self, nodes):
1165 def branches(self, nodes):
1162 if not nodes:
1166 if not nodes:
1163 nodes = [self.changelog.tip()]
1167 nodes = [self.changelog.tip()]
1164 b = []
1168 b = []
1165 for n in nodes:
1169 for n in nodes:
1166 t = n
1170 t = n
1167 while 1:
1171 while 1:
1168 p = self.changelog.parents(n)
1172 p = self.changelog.parents(n)
1169 if p[1] != nullid or p[0] == nullid:
1173 if p[1] != nullid or p[0] == nullid:
1170 b.append((t, n, p[0], p[1]))
1174 b.append((t, n, p[0], p[1]))
1171 break
1175 break
1172 n = p[0]
1176 n = p[0]
1173 return b
1177 return b
1174
1178
1175 def between(self, pairs):
1179 def between(self, pairs):
1176 r = []
1180 r = []
1177
1181
1178 for top, bottom in pairs:
1182 for top, bottom in pairs:
1179 n, l, i = top, [], 0
1183 n, l, i = top, [], 0
1180 f = 1
1184 f = 1
1181
1185
1182 while n != bottom:
1186 while n != bottom:
1183 p = self.changelog.parents(n)[0]
1187 p = self.changelog.parents(n)[0]
1184 if i == f:
1188 if i == f:
1185 l.append(n)
1189 l.append(n)
1186 f = f * 2
1190 f = f * 2
1187 n = p
1191 n = p
1188 i += 1
1192 i += 1
1189
1193
1190 r.append(l)
1194 r.append(l)
1191
1195
1192 return r
1196 return r
1193
1197
1194 def findincoming(self, remote, base=None, heads=None, force=False):
1198 def findincoming(self, remote, base=None, heads=None, force=False):
1195 """Return list of roots of the subsets of missing nodes from remote
1199 """Return list of roots of the subsets of missing nodes from remote
1196
1200
1197 If base dict is specified, assume that these nodes and their parents
1201 If base dict is specified, assume that these nodes and their parents
1198 exist on the remote side and that no child of a node of base exists
1202 exist on the remote side and that no child of a node of base exists
1199 in both remote and self.
1203 in both remote and self.
1200 Furthermore base will be updated to include the nodes that exists
1204 Furthermore base will be updated to include the nodes that exists
1201 in self and remote but no children exists in self and remote.
1205 in self and remote but no children exists in self and remote.
1202 If a list of heads is specified, return only nodes which are heads
1206 If a list of heads is specified, return only nodes which are heads
1203 or ancestors of these heads.
1207 or ancestors of these heads.
1204
1208
1205 All the ancestors of base are in self and in remote.
1209 All the ancestors of base are in self and in remote.
1206 All the descendants of the list returned are missing in self.
1210 All the descendants of the list returned are missing in self.
1207 (and so we know that the rest of the nodes are missing in remote, see
1211 (and so we know that the rest of the nodes are missing in remote, see
1208 outgoing)
1212 outgoing)
1209 """
1213 """
1210 m = self.changelog.nodemap
1214 m = self.changelog.nodemap
1211 search = []
1215 search = []
1212 fetch = {}
1216 fetch = {}
1213 seen = {}
1217 seen = {}
1214 seenbranch = {}
1218 seenbranch = {}
1215 if base == None:
1219 if base == None:
1216 base = {}
1220 base = {}
1217
1221
1218 if not heads:
1222 if not heads:
1219 heads = remote.heads()
1223 heads = remote.heads()
1220
1224
1221 if self.changelog.tip() == nullid:
1225 if self.changelog.tip() == nullid:
1222 base[nullid] = 1
1226 base[nullid] = 1
1223 if heads != [nullid]:
1227 if heads != [nullid]:
1224 return [nullid]
1228 return [nullid]
1225 return []
1229 return []
1226
1230
1227 # assume we're closer to the tip than the root
1231 # assume we're closer to the tip than the root
1228 # and start by examining the heads
1232 # and start by examining the heads
1229 self.ui.status(_("searching for changes\n"))
1233 self.ui.status(_("searching for changes\n"))
1230
1234
1231 unknown = []
1235 unknown = []
1232 for h in heads:
1236 for h in heads:
1233 if h not in m:
1237 if h not in m:
1234 unknown.append(h)
1238 unknown.append(h)
1235 else:
1239 else:
1236 base[h] = 1
1240 base[h] = 1
1237
1241
1238 if not unknown:
1242 if not unknown:
1239 return []
1243 return []
1240
1244
1241 req = dict.fromkeys(unknown)
1245 req = dict.fromkeys(unknown)
1242 reqcnt = 0
1246 reqcnt = 0
1243
1247
1244 # search through remote branches
1248 # search through remote branches
1245 # a 'branch' here is a linear segment of history, with four parts:
1249 # a 'branch' here is a linear segment of history, with four parts:
1246 # head, root, first parent, second parent
1250 # head, root, first parent, second parent
1247 # (a branch always has two parents (or none) by definition)
1251 # (a branch always has two parents (or none) by definition)
1248 unknown = remote.branches(unknown)
1252 unknown = remote.branches(unknown)
1249 while unknown:
1253 while unknown:
1250 r = []
1254 r = []
1251 while unknown:
1255 while unknown:
1252 n = unknown.pop(0)
1256 n = unknown.pop(0)
1253 if n[0] in seen:
1257 if n[0] in seen:
1254 continue
1258 continue
1255
1259
1256 self.ui.debug(_("examining %s:%s\n")
1260 self.ui.debug(_("examining %s:%s\n")
1257 % (short(n[0]), short(n[1])))
1261 % (short(n[0]), short(n[1])))
1258 if n[0] == nullid: # found the end of the branch
1262 if n[0] == nullid: # found the end of the branch
1259 pass
1263 pass
1260 elif n in seenbranch:
1264 elif n in seenbranch:
1261 self.ui.debug(_("branch already found\n"))
1265 self.ui.debug(_("branch already found\n"))
1262 continue
1266 continue
1263 elif n[1] and n[1] in m: # do we know the base?
1267 elif n[1] and n[1] in m: # do we know the base?
1264 self.ui.debug(_("found incomplete branch %s:%s\n")
1268 self.ui.debug(_("found incomplete branch %s:%s\n")
1265 % (short(n[0]), short(n[1])))
1269 % (short(n[0]), short(n[1])))
1266 search.append(n) # schedule branch range for scanning
1270 search.append(n) # schedule branch range for scanning
1267 seenbranch[n] = 1
1271 seenbranch[n] = 1
1268 else:
1272 else:
1269 if n[1] not in seen and n[1] not in fetch:
1273 if n[1] not in seen and n[1] not in fetch:
1270 if n[2] in m and n[3] in m:
1274 if n[2] in m and n[3] in m:
1271 self.ui.debug(_("found new changeset %s\n") %
1275 self.ui.debug(_("found new changeset %s\n") %
1272 short(n[1]))
1276 short(n[1]))
1273 fetch[n[1]] = 1 # earliest unknown
1277 fetch[n[1]] = 1 # earliest unknown
1274 for p in n[2:4]:
1278 for p in n[2:4]:
1275 if p in m:
1279 if p in m:
1276 base[p] = 1 # latest known
1280 base[p] = 1 # latest known
1277
1281
1278 for p in n[2:4]:
1282 for p in n[2:4]:
1279 if p not in req and p not in m:
1283 if p not in req and p not in m:
1280 r.append(p)
1284 r.append(p)
1281 req[p] = 1
1285 req[p] = 1
1282 seen[n[0]] = 1
1286 seen[n[0]] = 1
1283
1287
1284 if r:
1288 if r:
1285 reqcnt += 1
1289 reqcnt += 1
1286 self.ui.debug(_("request %d: %s\n") %
1290 self.ui.debug(_("request %d: %s\n") %
1287 (reqcnt, " ".join(map(short, r))))
1291 (reqcnt, " ".join(map(short, r))))
1288 for p in xrange(0, len(r), 10):
1292 for p in xrange(0, len(r), 10):
1289 for b in remote.branches(r[p:p+10]):
1293 for b in remote.branches(r[p:p+10]):
1290 self.ui.debug(_("received %s:%s\n") %
1294 self.ui.debug(_("received %s:%s\n") %
1291 (short(b[0]), short(b[1])))
1295 (short(b[0]), short(b[1])))
1292 unknown.append(b)
1296 unknown.append(b)
1293
1297
1294 # do binary search on the branches we found
1298 # do binary search on the branches we found
1295 while search:
1299 while search:
1296 n = search.pop(0)
1300 n = search.pop(0)
1297 reqcnt += 1
1301 reqcnt += 1
1298 l = remote.between([(n[0], n[1])])[0]
1302 l = remote.between([(n[0], n[1])])[0]
1299 l.append(n[1])
1303 l.append(n[1])
1300 p = n[0]
1304 p = n[0]
1301 f = 1
1305 f = 1
1302 for i in l:
1306 for i in l:
1303 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1307 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1304 if i in m:
1308 if i in m:
1305 if f <= 2:
1309 if f <= 2:
1306 self.ui.debug(_("found new branch changeset %s\n") %
1310 self.ui.debug(_("found new branch changeset %s\n") %
1307 short(p))
1311 short(p))
1308 fetch[p] = 1
1312 fetch[p] = 1
1309 base[i] = 1
1313 base[i] = 1
1310 else:
1314 else:
1311 self.ui.debug(_("narrowed branch search to %s:%s\n")
1315 self.ui.debug(_("narrowed branch search to %s:%s\n")
1312 % (short(p), short(i)))
1316 % (short(p), short(i)))
1313 search.append((p, i))
1317 search.append((p, i))
1314 break
1318 break
1315 p, f = i, f * 2
1319 p, f = i, f * 2
1316
1320
1317 # sanity check our fetch list
1321 # sanity check our fetch list
1318 for f in fetch.keys():
1322 for f in fetch.keys():
1319 if f in m:
1323 if f in m:
1320 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1324 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1321
1325
1322 if base.keys() == [nullid]:
1326 if base.keys() == [nullid]:
1323 if force:
1327 if force:
1324 self.ui.warn(_("warning: repository is unrelated\n"))
1328 self.ui.warn(_("warning: repository is unrelated\n"))
1325 else:
1329 else:
1326 raise util.Abort(_("repository is unrelated"))
1330 raise util.Abort(_("repository is unrelated"))
1327
1331
1328 self.ui.debug(_("found new changesets starting at ") +
1332 self.ui.debug(_("found new changesets starting at ") +
1329 " ".join([short(f) for f in fetch]) + "\n")
1333 " ".join([short(f) for f in fetch]) + "\n")
1330
1334
1331 self.ui.debug(_("%d total queries\n") % reqcnt)
1335 self.ui.debug(_("%d total queries\n") % reqcnt)
1332
1336
1333 return fetch.keys()
1337 return fetch.keys()
1334
1338
1335 def findoutgoing(self, remote, base=None, heads=None, force=False):
1339 def findoutgoing(self, remote, base=None, heads=None, force=False):
1336 """Return list of nodes that are roots of subsets not in remote
1340 """Return list of nodes that are roots of subsets not in remote
1337
1341
1338 If base dict is specified, assume that these nodes and their parents
1342 If base dict is specified, assume that these nodes and their parents
1339 exist on the remote side.
1343 exist on the remote side.
1340 If a list of heads is specified, return only nodes which are heads
1344 If a list of heads is specified, return only nodes which are heads
1341 or ancestors of these heads, and return a second element which
1345 or ancestors of these heads, and return a second element which
1342 contains all remote heads which get new children.
1346 contains all remote heads which get new children.
1343 """
1347 """
1344 if base == None:
1348 if base == None:
1345 base = {}
1349 base = {}
1346 self.findincoming(remote, base, heads, force=force)
1350 self.findincoming(remote, base, heads, force=force)
1347
1351
1348 self.ui.debug(_("common changesets up to ")
1352 self.ui.debug(_("common changesets up to ")
1349 + " ".join(map(short, base.keys())) + "\n")
1353 + " ".join(map(short, base.keys())) + "\n")
1350
1354
1351 remain = dict.fromkeys(self.changelog.nodemap)
1355 remain = dict.fromkeys(self.changelog.nodemap)
1352
1356
1353 # prune everything remote has from the tree
1357 # prune everything remote has from the tree
1354 del remain[nullid]
1358 del remain[nullid]
1355 remove = base.keys()
1359 remove = base.keys()
1356 while remove:
1360 while remove:
1357 n = remove.pop(0)
1361 n = remove.pop(0)
1358 if n in remain:
1362 if n in remain:
1359 del remain[n]
1363 del remain[n]
1360 for p in self.changelog.parents(n):
1364 for p in self.changelog.parents(n):
1361 remove.append(p)
1365 remove.append(p)
1362
1366
1363 # find every node whose parents have been pruned
1367 # find every node whose parents have been pruned
1364 subset = []
1368 subset = []
1365 # find every remote head that will get new children
1369 # find every remote head that will get new children
1366 updated_heads = {}
1370 updated_heads = {}
1367 for n in remain:
1371 for n in remain:
1368 p1, p2 = self.changelog.parents(n)
1372 p1, p2 = self.changelog.parents(n)
1369 if p1 not in remain and p2 not in remain:
1373 if p1 not in remain and p2 not in remain:
1370 subset.append(n)
1374 subset.append(n)
1371 if heads:
1375 if heads:
1372 if p1 in heads:
1376 if p1 in heads:
1373 updated_heads[p1] = True
1377 updated_heads[p1] = True
1374 if p2 in heads:
1378 if p2 in heads:
1375 updated_heads[p2] = True
1379 updated_heads[p2] = True
1376
1380
1377 # this is the set of all roots we have to push
1381 # this is the set of all roots we have to push
1378 if heads:
1382 if heads:
1379 return subset, updated_heads.keys()
1383 return subset, updated_heads.keys()
1380 else:
1384 else:
1381 return subset
1385 return subset
1382
1386
1383 def pull(self, remote, heads=None, force=False):
1387 def pull(self, remote, heads=None, force=False):
1384 lock = self.lock()
1388 lock = self.lock()
1385 try:
1389 try:
1386 fetch = self.findincoming(remote, heads=heads, force=force)
1390 fetch = self.findincoming(remote, heads=heads, force=force)
1387 if fetch == [nullid]:
1391 if fetch == [nullid]:
1388 self.ui.status(_("requesting all changes\n"))
1392 self.ui.status(_("requesting all changes\n"))
1389
1393
1390 if not fetch:
1394 if not fetch:
1391 self.ui.status(_("no changes found\n"))
1395 self.ui.status(_("no changes found\n"))
1392 return 0
1396 return 0
1393
1397
1394 if heads is None:
1398 if heads is None:
1395 cg = remote.changegroup(fetch, 'pull')
1399 cg = remote.changegroup(fetch, 'pull')
1396 else:
1400 else:
1397 if 'changegroupsubset' not in remote.capabilities:
1401 if 'changegroupsubset' not in remote.capabilities:
1398 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1402 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1399 cg = remote.changegroupsubset(fetch, heads, 'pull')
1403 cg = remote.changegroupsubset(fetch, heads, 'pull')
1400 return self.addchangegroup(cg, 'pull', remote.url())
1404 return self.addchangegroup(cg, 'pull', remote.url())
1401 finally:
1405 finally:
1402 del lock
1406 del lock
1403
1407
1404 def push(self, remote, force=False, revs=None):
1408 def push(self, remote, force=False, revs=None):
1405 # there are two ways to push to remote repo:
1409 # there are two ways to push to remote repo:
1406 #
1410 #
1407 # addchangegroup assumes local user can lock remote
1411 # addchangegroup assumes local user can lock remote
1408 # repo (local filesystem, old ssh servers).
1412 # repo (local filesystem, old ssh servers).
1409 #
1413 #
1410 # unbundle assumes local user cannot lock remote repo (new ssh
1414 # unbundle assumes local user cannot lock remote repo (new ssh
1411 # servers, http servers).
1415 # servers, http servers).
1412
1416
1413 if remote.capable('unbundle'):
1417 if remote.capable('unbundle'):
1414 return self.push_unbundle(remote, force, revs)
1418 return self.push_unbundle(remote, force, revs)
1415 return self.push_addchangegroup(remote, force, revs)
1419 return self.push_addchangegroup(remote, force, revs)
1416
1420
1417 def prepush(self, remote, force, revs):
1421 def prepush(self, remote, force, revs):
1418 base = {}
1422 base = {}
1419 remote_heads = remote.heads()
1423 remote_heads = remote.heads()
1420 inc = self.findincoming(remote, base, remote_heads, force=force)
1424 inc = self.findincoming(remote, base, remote_heads, force=force)
1421
1425
1422 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1426 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1423 if revs is not None:
1427 if revs is not None:
1424 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1428 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1425 else:
1429 else:
1426 bases, heads = update, self.changelog.heads()
1430 bases, heads = update, self.changelog.heads()
1427
1431
1428 if not bases:
1432 if not bases:
1429 self.ui.status(_("no changes found\n"))
1433 self.ui.status(_("no changes found\n"))
1430 return None, 1
1434 return None, 1
1431 elif not force:
1435 elif not force:
1432 # check if we're creating new remote heads
1436 # check if we're creating new remote heads
1433 # to be a remote head after push, node must be either
1437 # to be a remote head after push, node must be either
1434 # - unknown locally
1438 # - unknown locally
1435 # - a local outgoing head descended from update
1439 # - a local outgoing head descended from update
1436 # - a remote head that's known locally and not
1440 # - a remote head that's known locally and not
1437 # ancestral to an outgoing head
1441 # ancestral to an outgoing head
1438
1442
1439 warn = 0
1443 warn = 0
1440
1444
1441 if remote_heads == [nullid]:
1445 if remote_heads == [nullid]:
1442 warn = 0
1446 warn = 0
1443 elif not revs and len(heads) > len(remote_heads):
1447 elif not revs and len(heads) > len(remote_heads):
1444 warn = 1
1448 warn = 1
1445 else:
1449 else:
1446 newheads = list(heads)
1450 newheads = list(heads)
1447 for r in remote_heads:
1451 for r in remote_heads:
1448 if r in self.changelog.nodemap:
1452 if r in self.changelog.nodemap:
1449 desc = self.changelog.heads(r, heads)
1453 desc = self.changelog.heads(r, heads)
1450 l = [h for h in heads if h in desc]
1454 l = [h for h in heads if h in desc]
1451 if not l:
1455 if not l:
1452 newheads.append(r)
1456 newheads.append(r)
1453 else:
1457 else:
1454 newheads.append(r)
1458 newheads.append(r)
1455 if len(newheads) > len(remote_heads):
1459 if len(newheads) > len(remote_heads):
1456 warn = 1
1460 warn = 1
1457
1461
1458 if warn:
1462 if warn:
1459 self.ui.warn(_("abort: push creates new remote branches!\n"))
1463 self.ui.warn(_("abort: push creates new remote branches!\n"))
1460 self.ui.status(_("(did you forget to merge?"
1464 self.ui.status(_("(did you forget to merge?"
1461 " use push -f to force)\n"))
1465 " use push -f to force)\n"))
1462 return None, 1
1466 return None, 1
1463 elif inc:
1467 elif inc:
1464 self.ui.warn(_("note: unsynced remote changes!\n"))
1468 self.ui.warn(_("note: unsynced remote changes!\n"))
1465
1469
1466
1470
1467 if revs is None:
1471 if revs is None:
1468 cg = self.changegroup(update, 'push')
1472 cg = self.changegroup(update, 'push')
1469 else:
1473 else:
1470 cg = self.changegroupsubset(update, revs, 'push')
1474 cg = self.changegroupsubset(update, revs, 'push')
1471 return cg, remote_heads
1475 return cg, remote_heads
1472
1476
1473 def push_addchangegroup(self, remote, force, revs):
1477 def push_addchangegroup(self, remote, force, revs):
1474 lock = remote.lock()
1478 lock = remote.lock()
1475 try:
1479 try:
1476 ret = self.prepush(remote, force, revs)
1480 ret = self.prepush(remote, force, revs)
1477 if ret[0] is not None:
1481 if ret[0] is not None:
1478 cg, remote_heads = ret
1482 cg, remote_heads = ret
1479 return remote.addchangegroup(cg, 'push', self.url())
1483 return remote.addchangegroup(cg, 'push', self.url())
1480 return ret[1]
1484 return ret[1]
1481 finally:
1485 finally:
1482 del lock
1486 del lock
1483
1487
1484 def push_unbundle(self, remote, force, revs):
1488 def push_unbundle(self, remote, force, revs):
1485 # local repo finds heads on server, finds out what revs it
1489 # local repo finds heads on server, finds out what revs it
1486 # must push. once revs transferred, if server finds it has
1490 # must push. once revs transferred, if server finds it has
1487 # different heads (someone else won commit/push race), server
1491 # different heads (someone else won commit/push race), server
1488 # aborts.
1492 # aborts.
1489
1493
1490 ret = self.prepush(remote, force, revs)
1494 ret = self.prepush(remote, force, revs)
1491 if ret[0] is not None:
1495 if ret[0] is not None:
1492 cg, remote_heads = ret
1496 cg, remote_heads = ret
1493 if force: remote_heads = ['force']
1497 if force: remote_heads = ['force']
1494 return remote.unbundle(cg, remote_heads, 'push')
1498 return remote.unbundle(cg, remote_heads, 'push')
1495 return ret[1]
1499 return ret[1]
1496
1500
1497 def changegroupinfo(self, nodes, source):
1501 def changegroupinfo(self, nodes, source):
1498 if self.ui.verbose or source == 'bundle':
1502 if self.ui.verbose or source == 'bundle':
1499 self.ui.status(_("%d changesets found\n") % len(nodes))
1503 self.ui.status(_("%d changesets found\n") % len(nodes))
1500 if self.ui.debugflag:
1504 if self.ui.debugflag:
1501 self.ui.debug(_("List of changesets:\n"))
1505 self.ui.debug(_("List of changesets:\n"))
1502 for node in nodes:
1506 for node in nodes:
1503 self.ui.debug("%s\n" % hex(node))
1507 self.ui.debug("%s\n" % hex(node))
1504
1508
1505 def changegroupsubset(self, bases, heads, source):
1509 def changegroupsubset(self, bases, heads, source):
1506 """This function generates a changegroup consisting of all the nodes
1510 """This function generates a changegroup consisting of all the nodes
1507 that are descendents of any of the bases, and ancestors of any of
1511 that are descendents of any of the bases, and ancestors of any of
1508 the heads.
1512 the heads.
1509
1513
1510 It is fairly complex as determining which filenodes and which
1514 It is fairly complex as determining which filenodes and which
1511 manifest nodes need to be included for the changeset to be complete
1515 manifest nodes need to be included for the changeset to be complete
1512 is non-trivial.
1516 is non-trivial.
1513
1517
1514 Another wrinkle is doing the reverse, figuring out which changeset in
1518 Another wrinkle is doing the reverse, figuring out which changeset in
1515 the changegroup a particular filenode or manifestnode belongs to."""
1519 the changegroup a particular filenode or manifestnode belongs to."""
1516
1520
1517 self.hook('preoutgoing', throw=True, source=source)
1521 self.hook('preoutgoing', throw=True, source=source)
1518
1522
1519 # Set up some initial variables
1523 # Set up some initial variables
1520 # Make it easy to refer to self.changelog
1524 # Make it easy to refer to self.changelog
1521 cl = self.changelog
1525 cl = self.changelog
1522 # msng is short for missing - compute the list of changesets in this
1526 # msng is short for missing - compute the list of changesets in this
1523 # changegroup.
1527 # changegroup.
1524 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1528 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1525 self.changegroupinfo(msng_cl_lst, source)
1529 self.changegroupinfo(msng_cl_lst, source)
1526 # Some bases may turn out to be superfluous, and some heads may be
1530 # Some bases may turn out to be superfluous, and some heads may be
1527 # too. nodesbetween will return the minimal set of bases and heads
1531 # too. nodesbetween will return the minimal set of bases and heads
1528 # necessary to re-create the changegroup.
1532 # necessary to re-create the changegroup.
1529
1533
1530 # Known heads are the list of heads that it is assumed the recipient
1534 # Known heads are the list of heads that it is assumed the recipient
1531 # of this changegroup will know about.
1535 # of this changegroup will know about.
1532 knownheads = {}
1536 knownheads = {}
1533 # We assume that all parents of bases are known heads.
1537 # We assume that all parents of bases are known heads.
1534 for n in bases:
1538 for n in bases:
1535 for p in cl.parents(n):
1539 for p in cl.parents(n):
1536 if p != nullid:
1540 if p != nullid:
1537 knownheads[p] = 1
1541 knownheads[p] = 1
1538 knownheads = knownheads.keys()
1542 knownheads = knownheads.keys()
1539 if knownheads:
1543 if knownheads:
1540 # Now that we know what heads are known, we can compute which
1544 # Now that we know what heads are known, we can compute which
1541 # changesets are known. The recipient must know about all
1545 # changesets are known. The recipient must know about all
1542 # changesets required to reach the known heads from the null
1546 # changesets required to reach the known heads from the null
1543 # changeset.
1547 # changeset.
1544 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1548 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1545 junk = None
1549 junk = None
1546 # Transform the list into an ersatz set.
1550 # Transform the list into an ersatz set.
1547 has_cl_set = dict.fromkeys(has_cl_set)
1551 has_cl_set = dict.fromkeys(has_cl_set)
1548 else:
1552 else:
1549 # If there were no known heads, the recipient cannot be assumed to
1553 # If there were no known heads, the recipient cannot be assumed to
1550 # know about any changesets.
1554 # know about any changesets.
1551 has_cl_set = {}
1555 has_cl_set = {}
1552
1556
1553 # Make it easy to refer to self.manifest
1557 # Make it easy to refer to self.manifest
1554 mnfst = self.manifest
1558 mnfst = self.manifest
1555 # We don't know which manifests are missing yet
1559 # We don't know which manifests are missing yet
1556 msng_mnfst_set = {}
1560 msng_mnfst_set = {}
1557 # Nor do we know which filenodes are missing.
1561 # Nor do we know which filenodes are missing.
1558 msng_filenode_set = {}
1562 msng_filenode_set = {}
1559
1563
1560 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1564 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1561 junk = None
1565 junk = None
1562
1566
1563 # A changeset always belongs to itself, so the changenode lookup
1567 # A changeset always belongs to itself, so the changenode lookup
1564 # function for a changenode is identity.
1568 # function for a changenode is identity.
1565 def identity(x):
1569 def identity(x):
1566 return x
1570 return x
1567
1571
1568 # A function generating function. Sets up an environment for the
1572 # A function generating function. Sets up an environment for the
1569 # inner function.
1573 # inner function.
1570 def cmp_by_rev_func(revlog):
1574 def cmp_by_rev_func(revlog):
1571 # Compare two nodes by their revision number in the environment's
1575 # Compare two nodes by their revision number in the environment's
1572 # revision history. Since the revision number both represents the
1576 # revision history. Since the revision number both represents the
1573 # most efficient order to read the nodes in, and represents a
1577 # most efficient order to read the nodes in, and represents a
1574 # topological sorting of the nodes, this function is often useful.
1578 # topological sorting of the nodes, this function is often useful.
1575 def cmp_by_rev(a, b):
1579 def cmp_by_rev(a, b):
1576 return cmp(revlog.rev(a), revlog.rev(b))
1580 return cmp(revlog.rev(a), revlog.rev(b))
1577 return cmp_by_rev
1581 return cmp_by_rev
1578
1582
1579 # If we determine that a particular file or manifest node must be a
1583 # If we determine that a particular file or manifest node must be a
1580 # node that the recipient of the changegroup will already have, we can
1584 # node that the recipient of the changegroup will already have, we can
1581 # also assume the recipient will have all the parents. This function
1585 # also assume the recipient will have all the parents. This function
1582 # prunes them from the set of missing nodes.
1586 # prunes them from the set of missing nodes.
1583 def prune_parents(revlog, hasset, msngset):
1587 def prune_parents(revlog, hasset, msngset):
1584 haslst = hasset.keys()
1588 haslst = hasset.keys()
1585 haslst.sort(cmp_by_rev_func(revlog))
1589 haslst.sort(cmp_by_rev_func(revlog))
1586 for node in haslst:
1590 for node in haslst:
1587 parentlst = [p for p in revlog.parents(node) if p != nullid]
1591 parentlst = [p for p in revlog.parents(node) if p != nullid]
1588 while parentlst:
1592 while parentlst:
1589 n = parentlst.pop()
1593 n = parentlst.pop()
1590 if n not in hasset:
1594 if n not in hasset:
1591 hasset[n] = 1
1595 hasset[n] = 1
1592 p = [p for p in revlog.parents(n) if p != nullid]
1596 p = [p for p in revlog.parents(n) if p != nullid]
1593 parentlst.extend(p)
1597 parentlst.extend(p)
1594 for n in hasset:
1598 for n in hasset:
1595 msngset.pop(n, None)
1599 msngset.pop(n, None)
1596
1600
1597 # This is a function generating function used to set up an environment
1601 # This is a function generating function used to set up an environment
1598 # for the inner function to execute in.
1602 # for the inner function to execute in.
1599 def manifest_and_file_collector(changedfileset):
1603 def manifest_and_file_collector(changedfileset):
1600 # This is an information gathering function that gathers
1604 # This is an information gathering function that gathers
1601 # information from each changeset node that goes out as part of
1605 # information from each changeset node that goes out as part of
1602 # the changegroup. The information gathered is a list of which
1606 # the changegroup. The information gathered is a list of which
1603 # manifest nodes are potentially required (the recipient may
1607 # manifest nodes are potentially required (the recipient may
1604 # already have them) and total list of all files which were
1608 # already have them) and total list of all files which were
1605 # changed in any changeset in the changegroup.
1609 # changed in any changeset in the changegroup.
1606 #
1610 #
1607 # We also remember the first changenode we saw any manifest
1611 # We also remember the first changenode we saw any manifest
1608 # referenced by so we can later determine which changenode 'owns'
1612 # referenced by so we can later determine which changenode 'owns'
1609 # the manifest.
1613 # the manifest.
1610 def collect_manifests_and_files(clnode):
1614 def collect_manifests_and_files(clnode):
1611 c = cl.read(clnode)
1615 c = cl.read(clnode)
1612 for f in c[3]:
1616 for f in c[3]:
1613 # This is to make sure we only have one instance of each
1617 # This is to make sure we only have one instance of each
1614 # filename string for each filename.
1618 # filename string for each filename.
1615 changedfileset.setdefault(f, f)
1619 changedfileset.setdefault(f, f)
1616 msng_mnfst_set.setdefault(c[0], clnode)
1620 msng_mnfst_set.setdefault(c[0], clnode)
1617 return collect_manifests_and_files
1621 return collect_manifests_and_files
1618
1622
1619 # Figure out which manifest nodes (of the ones we think might be part
1623 # Figure out which manifest nodes (of the ones we think might be part
1620 # of the changegroup) the recipient must know about and remove them
1624 # of the changegroup) the recipient must know about and remove them
1621 # from the changegroup.
1625 # from the changegroup.
1622 def prune_manifests():
1626 def prune_manifests():
1623 has_mnfst_set = {}
1627 has_mnfst_set = {}
1624 for n in msng_mnfst_set:
1628 for n in msng_mnfst_set:
1625 # If a 'missing' manifest thinks it belongs to a changenode
1629 # If a 'missing' manifest thinks it belongs to a changenode
1626 # the recipient is assumed to have, obviously the recipient
1630 # the recipient is assumed to have, obviously the recipient
1627 # must have that manifest.
1631 # must have that manifest.
1628 linknode = cl.node(mnfst.linkrev(n))
1632 linknode = cl.node(mnfst.linkrev(n))
1629 if linknode in has_cl_set:
1633 if linknode in has_cl_set:
1630 has_mnfst_set[n] = 1
1634 has_mnfst_set[n] = 1
1631 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1635 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1632
1636
1633 # Use the information collected in collect_manifests_and_files to say
1637 # Use the information collected in collect_manifests_and_files to say
1634 # which changenode any manifestnode belongs to.
1638 # which changenode any manifestnode belongs to.
1635 def lookup_manifest_link(mnfstnode):
1639 def lookup_manifest_link(mnfstnode):
1636 return msng_mnfst_set[mnfstnode]
1640 return msng_mnfst_set[mnfstnode]
1637
1641
1638 # A function generating function that sets up the initial environment
1642 # A function generating function that sets up the initial environment
1639 # the inner function.
1643 # the inner function.
1640 def filenode_collector(changedfiles):
1644 def filenode_collector(changedfiles):
1641 next_rev = [0]
1645 next_rev = [0]
1642 # This gathers information from each manifestnode included in the
1646 # This gathers information from each manifestnode included in the
1643 # changegroup about which filenodes the manifest node references
1647 # changegroup about which filenodes the manifest node references
1644 # so we can include those in the changegroup too.
1648 # so we can include those in the changegroup too.
1645 #
1649 #
1646 # It also remembers which changenode each filenode belongs to. It
1650 # It also remembers which changenode each filenode belongs to. It
1647 # does this by assuming the a filenode belongs to the changenode
1651 # does this by assuming the a filenode belongs to the changenode
1648 # the first manifest that references it belongs to.
1652 # the first manifest that references it belongs to.
1649 def collect_msng_filenodes(mnfstnode):
1653 def collect_msng_filenodes(mnfstnode):
1650 r = mnfst.rev(mnfstnode)
1654 r = mnfst.rev(mnfstnode)
1651 if r == next_rev[0]:
1655 if r == next_rev[0]:
1652 # If the last rev we looked at was the one just previous,
1656 # If the last rev we looked at was the one just previous,
1653 # we only need to see a diff.
1657 # we only need to see a diff.
1654 deltamf = mnfst.readdelta(mnfstnode)
1658 deltamf = mnfst.readdelta(mnfstnode)
1655 # For each line in the delta
1659 # For each line in the delta
1656 for f, fnode in deltamf.items():
1660 for f, fnode in deltamf.items():
1657 f = changedfiles.get(f, None)
1661 f = changedfiles.get(f, None)
1658 # And if the file is in the list of files we care
1662 # And if the file is in the list of files we care
1659 # about.
1663 # about.
1660 if f is not None:
1664 if f is not None:
1661 # Get the changenode this manifest belongs to
1665 # Get the changenode this manifest belongs to
1662 clnode = msng_mnfst_set[mnfstnode]
1666 clnode = msng_mnfst_set[mnfstnode]
1663 # Create the set of filenodes for the file if
1667 # Create the set of filenodes for the file if
1664 # there isn't one already.
1668 # there isn't one already.
1665 ndset = msng_filenode_set.setdefault(f, {})
1669 ndset = msng_filenode_set.setdefault(f, {})
1666 # And set the filenode's changelog node to the
1670 # And set the filenode's changelog node to the
1667 # manifest's if it hasn't been set already.
1671 # manifest's if it hasn't been set already.
1668 ndset.setdefault(fnode, clnode)
1672 ndset.setdefault(fnode, clnode)
1669 else:
1673 else:
1670 # Otherwise we need a full manifest.
1674 # Otherwise we need a full manifest.
1671 m = mnfst.read(mnfstnode)
1675 m = mnfst.read(mnfstnode)
1672 # For every file in we care about.
1676 # For every file in we care about.
1673 for f in changedfiles:
1677 for f in changedfiles:
1674 fnode = m.get(f, None)
1678 fnode = m.get(f, None)
1675 # If it's in the manifest
1679 # If it's in the manifest
1676 if fnode is not None:
1680 if fnode is not None:
1677 # See comments above.
1681 # See comments above.
1678 clnode = msng_mnfst_set[mnfstnode]
1682 clnode = msng_mnfst_set[mnfstnode]
1679 ndset = msng_filenode_set.setdefault(f, {})
1683 ndset = msng_filenode_set.setdefault(f, {})
1680 ndset.setdefault(fnode, clnode)
1684 ndset.setdefault(fnode, clnode)
1681 # Remember the revision we hope to see next.
1685 # Remember the revision we hope to see next.
1682 next_rev[0] = r + 1
1686 next_rev[0] = r + 1
1683 return collect_msng_filenodes
1687 return collect_msng_filenodes
1684
1688
1685 # We have a list of filenodes we think we need for a file, lets remove
1689 # We have a list of filenodes we think we need for a file, lets remove
1686 # all those we now the recipient must have.
1690 # all those we now the recipient must have.
1687 def prune_filenodes(f, filerevlog):
1691 def prune_filenodes(f, filerevlog):
1688 msngset = msng_filenode_set[f]
1692 msngset = msng_filenode_set[f]
1689 hasset = {}
1693 hasset = {}
1690 # If a 'missing' filenode thinks it belongs to a changenode we
1694 # If a 'missing' filenode thinks it belongs to a changenode we
1691 # assume the recipient must have, then the recipient must have
1695 # assume the recipient must have, then the recipient must have
1692 # that filenode.
1696 # that filenode.
1693 for n in msngset:
1697 for n in msngset:
1694 clnode = cl.node(filerevlog.linkrev(n))
1698 clnode = cl.node(filerevlog.linkrev(n))
1695 if clnode in has_cl_set:
1699 if clnode in has_cl_set:
1696 hasset[n] = 1
1700 hasset[n] = 1
1697 prune_parents(filerevlog, hasset, msngset)
1701 prune_parents(filerevlog, hasset, msngset)
1698
1702
1699 # A function generator function that sets up the a context for the
1703 # A function generator function that sets up the a context for the
1700 # inner function.
1704 # inner function.
1701 def lookup_filenode_link_func(fname):
1705 def lookup_filenode_link_func(fname):
1702 msngset = msng_filenode_set[fname]
1706 msngset = msng_filenode_set[fname]
1703 # Lookup the changenode the filenode belongs to.
1707 # Lookup the changenode the filenode belongs to.
1704 def lookup_filenode_link(fnode):
1708 def lookup_filenode_link(fnode):
1705 return msngset[fnode]
1709 return msngset[fnode]
1706 return lookup_filenode_link
1710 return lookup_filenode_link
1707
1711
1708 # Now that we have all theses utility functions to help out and
1712 # Now that we have all theses utility functions to help out and
1709 # logically divide up the task, generate the group.
1713 # logically divide up the task, generate the group.
1710 def gengroup():
1714 def gengroup():
1711 # The set of changed files starts empty.
1715 # The set of changed files starts empty.
1712 changedfiles = {}
1716 changedfiles = {}
1713 # Create a changenode group generator that will call our functions
1717 # Create a changenode group generator that will call our functions
1714 # back to lookup the owning changenode and collect information.
1718 # back to lookup the owning changenode and collect information.
1715 group = cl.group(msng_cl_lst, identity,
1719 group = cl.group(msng_cl_lst, identity,
1716 manifest_and_file_collector(changedfiles))
1720 manifest_and_file_collector(changedfiles))
1717 for chnk in group:
1721 for chnk in group:
1718 yield chnk
1722 yield chnk
1719
1723
1720 # The list of manifests has been collected by the generator
1724 # The list of manifests has been collected by the generator
1721 # calling our functions back.
1725 # calling our functions back.
1722 prune_manifests()
1726 prune_manifests()
1723 msng_mnfst_lst = msng_mnfst_set.keys()
1727 msng_mnfst_lst = msng_mnfst_set.keys()
1724 # Sort the manifestnodes by revision number.
1728 # Sort the manifestnodes by revision number.
1725 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1729 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1726 # Create a generator for the manifestnodes that calls our lookup
1730 # Create a generator for the manifestnodes that calls our lookup
1727 # and data collection functions back.
1731 # and data collection functions back.
1728 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1732 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1729 filenode_collector(changedfiles))
1733 filenode_collector(changedfiles))
1730 for chnk in group:
1734 for chnk in group:
1731 yield chnk
1735 yield chnk
1732
1736
1733 # These are no longer needed, dereference and toss the memory for
1737 # These are no longer needed, dereference and toss the memory for
1734 # them.
1738 # them.
1735 msng_mnfst_lst = None
1739 msng_mnfst_lst = None
1736 msng_mnfst_set.clear()
1740 msng_mnfst_set.clear()
1737
1741
1738 changedfiles = changedfiles.keys()
1742 changedfiles = changedfiles.keys()
1739 changedfiles.sort()
1743 changedfiles.sort()
1740 # Go through all our files in order sorted by name.
1744 # Go through all our files in order sorted by name.
1741 for fname in changedfiles:
1745 for fname in changedfiles:
1742 filerevlog = self.file(fname)
1746 filerevlog = self.file(fname)
1743 if filerevlog.count() == 0:
1747 if filerevlog.count() == 0:
1744 raise util.Abort(_("empty or missing revlog for %s") % fname)
1748 raise util.Abort(_("empty or missing revlog for %s") % fname)
1745 # Toss out the filenodes that the recipient isn't really
1749 # Toss out the filenodes that the recipient isn't really
1746 # missing.
1750 # missing.
1747 if msng_filenode_set.has_key(fname):
1751 if msng_filenode_set.has_key(fname):
1748 prune_filenodes(fname, filerevlog)
1752 prune_filenodes(fname, filerevlog)
1749 msng_filenode_lst = msng_filenode_set[fname].keys()
1753 msng_filenode_lst = msng_filenode_set[fname].keys()
1750 else:
1754 else:
1751 msng_filenode_lst = []
1755 msng_filenode_lst = []
1752 # If any filenodes are left, generate the group for them,
1756 # If any filenodes are left, generate the group for them,
1753 # otherwise don't bother.
1757 # otherwise don't bother.
1754 if len(msng_filenode_lst) > 0:
1758 if len(msng_filenode_lst) > 0:
1755 yield changegroup.chunkheader(len(fname))
1759 yield changegroup.chunkheader(len(fname))
1756 yield fname
1760 yield fname
1757 # Sort the filenodes by their revision #
1761 # Sort the filenodes by their revision #
1758 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1762 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1759 # Create a group generator and only pass in a changenode
1763 # Create a group generator and only pass in a changenode
1760 # lookup function as we need to collect no information
1764 # lookup function as we need to collect no information
1761 # from filenodes.
1765 # from filenodes.
1762 group = filerevlog.group(msng_filenode_lst,
1766 group = filerevlog.group(msng_filenode_lst,
1763 lookup_filenode_link_func(fname))
1767 lookup_filenode_link_func(fname))
1764 for chnk in group:
1768 for chnk in group:
1765 yield chnk
1769 yield chnk
1766 if msng_filenode_set.has_key(fname):
1770 if msng_filenode_set.has_key(fname):
1767 # Don't need this anymore, toss it to free memory.
1771 # Don't need this anymore, toss it to free memory.
1768 del msng_filenode_set[fname]
1772 del msng_filenode_set[fname]
1769 # Signal that no more groups are left.
1773 # Signal that no more groups are left.
1770 yield changegroup.closechunk()
1774 yield changegroup.closechunk()
1771
1775
1772 if msng_cl_lst:
1776 if msng_cl_lst:
1773 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1777 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1774
1778
1775 return util.chunkbuffer(gengroup())
1779 return util.chunkbuffer(gengroup())
1776
1780
1777 def changegroup(self, basenodes, source):
1781 def changegroup(self, basenodes, source):
1778 """Generate a changegroup of all nodes that we have that a recipient
1782 """Generate a changegroup of all nodes that we have that a recipient
1779 doesn't.
1783 doesn't.
1780
1784
1781 This is much easier than the previous function as we can assume that
1785 This is much easier than the previous function as we can assume that
1782 the recipient has any changenode we aren't sending them."""
1786 the recipient has any changenode we aren't sending them."""
1783
1787
1784 self.hook('preoutgoing', throw=True, source=source)
1788 self.hook('preoutgoing', throw=True, source=source)
1785
1789
1786 cl = self.changelog
1790 cl = self.changelog
1787 nodes = cl.nodesbetween(basenodes, None)[0]
1791 nodes = cl.nodesbetween(basenodes, None)[0]
1788 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1792 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1789 self.changegroupinfo(nodes, source)
1793 self.changegroupinfo(nodes, source)
1790
1794
1791 def identity(x):
1795 def identity(x):
1792 return x
1796 return x
1793
1797
1794 def gennodelst(revlog):
1798 def gennodelst(revlog):
1795 for r in xrange(0, revlog.count()):
1799 for r in xrange(0, revlog.count()):
1796 n = revlog.node(r)
1800 n = revlog.node(r)
1797 if revlog.linkrev(n) in revset:
1801 if revlog.linkrev(n) in revset:
1798 yield n
1802 yield n
1799
1803
1800 def changed_file_collector(changedfileset):
1804 def changed_file_collector(changedfileset):
1801 def collect_changed_files(clnode):
1805 def collect_changed_files(clnode):
1802 c = cl.read(clnode)
1806 c = cl.read(clnode)
1803 for fname in c[3]:
1807 for fname in c[3]:
1804 changedfileset[fname] = 1
1808 changedfileset[fname] = 1
1805 return collect_changed_files
1809 return collect_changed_files
1806
1810
1807 def lookuprevlink_func(revlog):
1811 def lookuprevlink_func(revlog):
1808 def lookuprevlink(n):
1812 def lookuprevlink(n):
1809 return cl.node(revlog.linkrev(n))
1813 return cl.node(revlog.linkrev(n))
1810 return lookuprevlink
1814 return lookuprevlink
1811
1815
1812 def gengroup():
1816 def gengroup():
1813 # construct a list of all changed files
1817 # construct a list of all changed files
1814 changedfiles = {}
1818 changedfiles = {}
1815
1819
1816 for chnk in cl.group(nodes, identity,
1820 for chnk in cl.group(nodes, identity,
1817 changed_file_collector(changedfiles)):
1821 changed_file_collector(changedfiles)):
1818 yield chnk
1822 yield chnk
1819 changedfiles = changedfiles.keys()
1823 changedfiles = changedfiles.keys()
1820 changedfiles.sort()
1824 changedfiles.sort()
1821
1825
1822 mnfst = self.manifest
1826 mnfst = self.manifest
1823 nodeiter = gennodelst(mnfst)
1827 nodeiter = gennodelst(mnfst)
1824 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1828 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1825 yield chnk
1829 yield chnk
1826
1830
1827 for fname in changedfiles:
1831 for fname in changedfiles:
1828 filerevlog = self.file(fname)
1832 filerevlog = self.file(fname)
1829 if filerevlog.count() == 0:
1833 if filerevlog.count() == 0:
1830 raise util.Abort(_("empty or missing revlog for %s") % fname)
1834 raise util.Abort(_("empty or missing revlog for %s") % fname)
1831 nodeiter = gennodelst(filerevlog)
1835 nodeiter = gennodelst(filerevlog)
1832 nodeiter = list(nodeiter)
1836 nodeiter = list(nodeiter)
1833 if nodeiter:
1837 if nodeiter:
1834 yield changegroup.chunkheader(len(fname))
1838 yield changegroup.chunkheader(len(fname))
1835 yield fname
1839 yield fname
1836 lookup = lookuprevlink_func(filerevlog)
1840 lookup = lookuprevlink_func(filerevlog)
1837 for chnk in filerevlog.group(nodeiter, lookup):
1841 for chnk in filerevlog.group(nodeiter, lookup):
1838 yield chnk
1842 yield chnk
1839
1843
1840 yield changegroup.closechunk()
1844 yield changegroup.closechunk()
1841
1845
1842 if nodes:
1846 if nodes:
1843 self.hook('outgoing', node=hex(nodes[0]), source=source)
1847 self.hook('outgoing', node=hex(nodes[0]), source=source)
1844
1848
1845 return util.chunkbuffer(gengroup())
1849 return util.chunkbuffer(gengroup())
1846
1850
1847 def addchangegroup(self, source, srctype, url):
1851 def addchangegroup(self, source, srctype, url):
1848 """add changegroup to repo.
1852 """add changegroup to repo.
1849
1853
1850 return values:
1854 return values:
1851 - nothing changed or no source: 0
1855 - nothing changed or no source: 0
1852 - more heads than before: 1+added heads (2..n)
1856 - more heads than before: 1+added heads (2..n)
1853 - less heads than before: -1-removed heads (-2..-n)
1857 - less heads than before: -1-removed heads (-2..-n)
1854 - number of heads stays the same: 1
1858 - number of heads stays the same: 1
1855 """
1859 """
1856 def csmap(x):
1860 def csmap(x):
1857 self.ui.debug(_("add changeset %s\n") % short(x))
1861 self.ui.debug(_("add changeset %s\n") % short(x))
1858 return cl.count()
1862 return cl.count()
1859
1863
1860 def revmap(x):
1864 def revmap(x):
1861 return cl.rev(x)
1865 return cl.rev(x)
1862
1866
1863 if not source:
1867 if not source:
1864 return 0
1868 return 0
1865
1869
1866 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1870 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1867
1871
1868 changesets = files = revisions = 0
1872 changesets = files = revisions = 0
1869
1873
1870 # write changelog data to temp files so concurrent readers will not see
1874 # write changelog data to temp files so concurrent readers will not see
1871 # inconsistent view
1875 # inconsistent view
1872 cl = self.changelog
1876 cl = self.changelog
1873 cl.delayupdate()
1877 cl.delayupdate()
1874 oldheads = len(cl.heads())
1878 oldheads = len(cl.heads())
1875
1879
1876 tr = self.transaction()
1880 tr = self.transaction()
1877 try:
1881 try:
1878 trp = weakref.proxy(tr)
1882 trp = weakref.proxy(tr)
1879 # pull off the changeset group
1883 # pull off the changeset group
1880 self.ui.status(_("adding changesets\n"))
1884 self.ui.status(_("adding changesets\n"))
1881 cor = cl.count() - 1
1885 cor = cl.count() - 1
1882 chunkiter = changegroup.chunkiter(source)
1886 chunkiter = changegroup.chunkiter(source)
1883 if cl.addgroup(chunkiter, csmap, trp, 1) is None:
1887 if cl.addgroup(chunkiter, csmap, trp, 1) is None:
1884 raise util.Abort(_("received changelog group is empty"))
1888 raise util.Abort(_("received changelog group is empty"))
1885 cnr = cl.count() - 1
1889 cnr = cl.count() - 1
1886 changesets = cnr - cor
1890 changesets = cnr - cor
1887
1891
1888 # pull off the manifest group
1892 # pull off the manifest group
1889 self.ui.status(_("adding manifests\n"))
1893 self.ui.status(_("adding manifests\n"))
1890 chunkiter = changegroup.chunkiter(source)
1894 chunkiter = changegroup.chunkiter(source)
1891 # no need to check for empty manifest group here:
1895 # no need to check for empty manifest group here:
1892 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1896 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1893 # no new manifest will be created and the manifest group will
1897 # no new manifest will be created and the manifest group will
1894 # be empty during the pull
1898 # be empty during the pull
1895 self.manifest.addgroup(chunkiter, revmap, trp)
1899 self.manifest.addgroup(chunkiter, revmap, trp)
1896
1900
1897 # process the files
1901 # process the files
1898 self.ui.status(_("adding file changes\n"))
1902 self.ui.status(_("adding file changes\n"))
1899 while 1:
1903 while 1:
1900 f = changegroup.getchunk(source)
1904 f = changegroup.getchunk(source)
1901 if not f:
1905 if not f:
1902 break
1906 break
1903 self.ui.debug(_("adding %s revisions\n") % f)
1907 self.ui.debug(_("adding %s revisions\n") % f)
1904 fl = self.file(f)
1908 fl = self.file(f)
1905 o = fl.count()
1909 o = fl.count()
1906 chunkiter = changegroup.chunkiter(source)
1910 chunkiter = changegroup.chunkiter(source)
1907 if fl.addgroup(chunkiter, revmap, trp) is None:
1911 if fl.addgroup(chunkiter, revmap, trp) is None:
1908 raise util.Abort(_("received file revlog group is empty"))
1912 raise util.Abort(_("received file revlog group is empty"))
1909 revisions += fl.count() - o
1913 revisions += fl.count() - o
1910 files += 1
1914 files += 1
1911
1915
1912 # make changelog see real files again
1916 # make changelog see real files again
1913 cl.finalize(trp)
1917 cl.finalize(trp)
1914
1918
1915 newheads = len(self.changelog.heads())
1919 newheads = len(self.changelog.heads())
1916 heads = ""
1920 heads = ""
1917 if oldheads and newheads != oldheads:
1921 if oldheads and newheads != oldheads:
1918 heads = _(" (%+d heads)") % (newheads - oldheads)
1922 heads = _(" (%+d heads)") % (newheads - oldheads)
1919
1923
1920 self.ui.status(_("added %d changesets"
1924 self.ui.status(_("added %d changesets"
1921 " with %d changes to %d files%s\n")
1925 " with %d changes to %d files%s\n")
1922 % (changesets, revisions, files, heads))
1926 % (changesets, revisions, files, heads))
1923
1927
1924 if changesets > 0:
1928 if changesets > 0:
1925 self.hook('pretxnchangegroup', throw=True,
1929 self.hook('pretxnchangegroup', throw=True,
1926 node=hex(self.changelog.node(cor+1)), source=srctype,
1930 node=hex(self.changelog.node(cor+1)), source=srctype,
1927 url=url)
1931 url=url)
1928
1932
1929 tr.close()
1933 tr.close()
1930 finally:
1934 finally:
1931 del tr
1935 del tr
1932
1936
1933 if changesets > 0:
1937 if changesets > 0:
1934 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1938 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1935 source=srctype, url=url)
1939 source=srctype, url=url)
1936
1940
1937 for i in xrange(cor + 1, cnr + 1):
1941 for i in xrange(cor + 1, cnr + 1):
1938 self.hook("incoming", node=hex(self.changelog.node(i)),
1942 self.hook("incoming", node=hex(self.changelog.node(i)),
1939 source=srctype, url=url)
1943 source=srctype, url=url)
1940
1944
1941 # never return 0 here:
1945 # never return 0 here:
1942 if newheads < oldheads:
1946 if newheads < oldheads:
1943 return newheads - oldheads - 1
1947 return newheads - oldheads - 1
1944 else:
1948 else:
1945 return newheads - oldheads + 1
1949 return newheads - oldheads + 1
1946
1950
1947
1951
1948 def stream_in(self, remote):
1952 def stream_in(self, remote):
1949 fp = remote.stream_out()
1953 fp = remote.stream_out()
1950 l = fp.readline()
1954 l = fp.readline()
1951 try:
1955 try:
1952 resp = int(l)
1956 resp = int(l)
1953 except ValueError:
1957 except ValueError:
1954 raise util.UnexpectedOutput(
1958 raise util.UnexpectedOutput(
1955 _('Unexpected response from remote server:'), l)
1959 _('Unexpected response from remote server:'), l)
1956 if resp == 1:
1960 if resp == 1:
1957 raise util.Abort(_('operation forbidden by server'))
1961 raise util.Abort(_('operation forbidden by server'))
1958 elif resp == 2:
1962 elif resp == 2:
1959 raise util.Abort(_('locking the remote repository failed'))
1963 raise util.Abort(_('locking the remote repository failed'))
1960 elif resp != 0:
1964 elif resp != 0:
1961 raise util.Abort(_('the server sent an unknown error code'))
1965 raise util.Abort(_('the server sent an unknown error code'))
1962 self.ui.status(_('streaming all changes\n'))
1966 self.ui.status(_('streaming all changes\n'))
1963 l = fp.readline()
1967 l = fp.readline()
1964 try:
1968 try:
1965 total_files, total_bytes = map(int, l.split(' ', 1))
1969 total_files, total_bytes = map(int, l.split(' ', 1))
1966 except ValueError, TypeError:
1970 except ValueError, TypeError:
1967 raise util.UnexpectedOutput(
1971 raise util.UnexpectedOutput(
1968 _('Unexpected response from remote server:'), l)
1972 _('Unexpected response from remote server:'), l)
1969 self.ui.status(_('%d files to transfer, %s of data\n') %
1973 self.ui.status(_('%d files to transfer, %s of data\n') %
1970 (total_files, util.bytecount(total_bytes)))
1974 (total_files, util.bytecount(total_bytes)))
1971 start = time.time()
1975 start = time.time()
1972 for i in xrange(total_files):
1976 for i in xrange(total_files):
1973 # XXX doesn't support '\n' or '\r' in filenames
1977 # XXX doesn't support '\n' or '\r' in filenames
1974 l = fp.readline()
1978 l = fp.readline()
1975 try:
1979 try:
1976 name, size = l.split('\0', 1)
1980 name, size = l.split('\0', 1)
1977 size = int(size)
1981 size = int(size)
1978 except ValueError, TypeError:
1982 except ValueError, TypeError:
1979 raise util.UnexpectedOutput(
1983 raise util.UnexpectedOutput(
1980 _('Unexpected response from remote server:'), l)
1984 _('Unexpected response from remote server:'), l)
1981 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1985 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1982 ofp = self.sopener(name, 'w')
1986 ofp = self.sopener(name, 'w')
1983 for chunk in util.filechunkiter(fp, limit=size):
1987 for chunk in util.filechunkiter(fp, limit=size):
1984 ofp.write(chunk)
1988 ofp.write(chunk)
1985 ofp.close()
1989 ofp.close()
1986 elapsed = time.time() - start
1990 elapsed = time.time() - start
1987 if elapsed <= 0:
1991 if elapsed <= 0:
1988 elapsed = 0.001
1992 elapsed = 0.001
1989 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1993 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1990 (util.bytecount(total_bytes), elapsed,
1994 (util.bytecount(total_bytes), elapsed,
1991 util.bytecount(total_bytes / elapsed)))
1995 util.bytecount(total_bytes / elapsed)))
1992 self.invalidate()
1996 self.invalidate()
1993 return len(self.heads()) + 1
1997 return len(self.heads()) + 1
1994
1998
1995 def clone(self, remote, heads=[], stream=False):
1999 def clone(self, remote, heads=[], stream=False):
1996 '''clone remote repository.
2000 '''clone remote repository.
1997
2001
1998 keyword arguments:
2002 keyword arguments:
1999 heads: list of revs to clone (forces use of pull)
2003 heads: list of revs to clone (forces use of pull)
2000 stream: use streaming clone if possible'''
2004 stream: use streaming clone if possible'''
2001
2005
2002 # now, all clients that can request uncompressed clones can
2006 # now, all clients that can request uncompressed clones can
2003 # read repo formats supported by all servers that can serve
2007 # read repo formats supported by all servers that can serve
2004 # them.
2008 # them.
2005
2009
2006 # if revlog format changes, client will have to check version
2010 # if revlog format changes, client will have to check version
2007 # and format flags on "stream" capability, and use
2011 # and format flags on "stream" capability, and use
2008 # uncompressed only if compatible.
2012 # uncompressed only if compatible.
2009
2013
2010 if stream and not heads and remote.capable('stream'):
2014 if stream and not heads and remote.capable('stream'):
2011 return self.stream_in(remote)
2015 return self.stream_in(remote)
2012 return self.pull(remote, heads)
2016 return self.pull(remote, heads)
2013
2017
2014 # used to avoid circular references so destructors work
2018 # used to avoid circular references so destructors work
2015 def aftertrans(files):
2019 def aftertrans(files):
2016 renamefiles = [tuple(t) for t in files]
2020 renamefiles = [tuple(t) for t in files]
2017 def a():
2021 def a():
2018 for src, dest in renamefiles:
2022 for src, dest in renamefiles:
2019 util.rename(src, dest)
2023 util.rename(src, dest)
2020 return a
2024 return a
2021
2025
2022 def instance(ui, path, create):
2026 def instance(ui, path, create):
2023 return localrepository(ui, util.drop_scheme('file', path), create)
2027 return localrepository(ui, util.drop_scheme('file', path), create)
2024
2028
2025 def islocal(path):
2029 def islocal(path):
2026 return True
2030 return True
@@ -1,7 +1,7 b''
1 0
1 0
2 0
2 0
3 adding changesets
3 adding changesets
4 transaction abort!
4 transaction abort!
5 rollback completed
5 rollback completed
6 killed!
6 killed!
7 .hg/00changelog.i .hg/journal.dirstate .hg/requires .hg/store .hg/store/00changelog.i .hg/store/00changelog.i.a
7 .hg/00changelog.i .hg/journal.branch .hg/journal.dirstate .hg/requires .hg/store .hg/store/00changelog.i .hg/store/00changelog.i.a
@@ -1,15 +1,21 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 mkdir t
3 mkdir t
4 cd t
4 cd t
5 hg init
5 hg init
6 echo a > a
6 echo a > a
7 hg add a
7 hg add a
8 hg commit -m "test" -d "1000000 0"
8 hg commit -m "test" -d "1000000 0"
9 hg verify
9 hg verify
10 hg parents
10 hg parents
11 hg status
11 hg status
12 hg rollback
12 hg rollback
13 hg verify
13 hg verify
14 hg parents
14 hg parents
15 hg status
15 hg status
16
17 # Test issue 902
18 hg commit -m "test"
19 hg branch test
20 hg rollback
21 hg branch
@@ -1,18 +1,21 b''
1 checking changesets
1 checking changesets
2 checking manifests
2 checking manifests
3 crosschecking files in changesets and manifests
3 crosschecking files in changesets and manifests
4 checking files
4 checking files
5 1 files, 1 changesets, 1 total revisions
5 1 files, 1 changesets, 1 total revisions
6 changeset: 0:0acdaf898367
6 changeset: 0:0acdaf898367
7 tag: tip
7 tag: tip
8 user: test
8 user: test
9 date: Mon Jan 12 13:46:40 1970 +0000
9 date: Mon Jan 12 13:46:40 1970 +0000
10 summary: test
10 summary: test
11
11
12 rolling back last transaction
12 rolling back last transaction
13 checking changesets
13 checking changesets
14 checking manifests
14 checking manifests
15 crosschecking files in changesets and manifests
15 crosschecking files in changesets and manifests
16 checking files
16 checking files
17 0 files, 0 changesets, 0 total revisions
17 0 files, 0 changesets, 0 total revisions
18 A a
18 A a
19 marked working directory as branch test
20 rolling back last transaction
21 default
General Comments 0
You need to be logged in to leave comments. Login now