##// END OF EJS Templates
addchangegroup: add option to skip check for empty changelog group
Alexis S. L. Carvalho -
r5907:afb7bdf1 default
parent child Browse files
Show More
@@ -1,2034 +1,2034 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import re, lock, transaction, tempfile, stat, errno, ui
12 import re, lock, transaction, tempfile, stat, errno, ui
13 import os, revlog, time, util, extensions, hook
13 import os, revlog, time, util, extensions, hook
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = util.set(('lookup', 'changegroupsubset'))
16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 repo.repository.__init__(self)
20 repo.repository.__init__(self)
21 self.root = os.path.realpath(path)
21 self.root = os.path.realpath(path)
22 self.path = os.path.join(self.root, ".hg")
22 self.path = os.path.join(self.root, ".hg")
23 self.origroot = path
23 self.origroot = path
24 self.opener = util.opener(self.path)
24 self.opener = util.opener(self.path)
25 self.wopener = util.opener(self.root)
25 self.wopener = util.opener(self.root)
26
26
27 if not os.path.isdir(self.path):
27 if not os.path.isdir(self.path):
28 if create:
28 if create:
29 if not os.path.exists(path):
29 if not os.path.exists(path):
30 os.mkdir(path)
30 os.mkdir(path)
31 os.mkdir(self.path)
31 os.mkdir(self.path)
32 requirements = ["revlogv1"]
32 requirements = ["revlogv1"]
33 if parentui.configbool('format', 'usestore', True):
33 if parentui.configbool('format', 'usestore', True):
34 os.mkdir(os.path.join(self.path, "store"))
34 os.mkdir(os.path.join(self.path, "store"))
35 requirements.append("store")
35 requirements.append("store")
36 # create an invalid changelog
36 # create an invalid changelog
37 self.opener("00changelog.i", "a").write(
37 self.opener("00changelog.i", "a").write(
38 '\0\0\0\2' # represents revlogv2
38 '\0\0\0\2' # represents revlogv2
39 ' dummy changelog to prevent using the old repo layout'
39 ' dummy changelog to prevent using the old repo layout'
40 )
40 )
41 reqfile = self.opener("requires", "w")
41 reqfile = self.opener("requires", "w")
42 for r in requirements:
42 for r in requirements:
43 reqfile.write("%s\n" % r)
43 reqfile.write("%s\n" % r)
44 reqfile.close()
44 reqfile.close()
45 else:
45 else:
46 raise repo.RepoError(_("repository %s not found") % path)
46 raise repo.RepoError(_("repository %s not found") % path)
47 elif create:
47 elif create:
48 raise repo.RepoError(_("repository %s already exists") % path)
48 raise repo.RepoError(_("repository %s already exists") % path)
49 else:
49 else:
50 # find requirements
50 # find requirements
51 try:
51 try:
52 requirements = self.opener("requires").read().splitlines()
52 requirements = self.opener("requires").read().splitlines()
53 except IOError, inst:
53 except IOError, inst:
54 if inst.errno != errno.ENOENT:
54 if inst.errno != errno.ENOENT:
55 raise
55 raise
56 requirements = []
56 requirements = []
57 # check them
57 # check them
58 for r in requirements:
58 for r in requirements:
59 if r not in self.supported:
59 if r not in self.supported:
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61
61
62 # setup store
62 # setup store
63 if "store" in requirements:
63 if "store" in requirements:
64 self.encodefn = util.encodefilename
64 self.encodefn = util.encodefilename
65 self.decodefn = util.decodefilename
65 self.decodefn = util.decodefilename
66 self.spath = os.path.join(self.path, "store")
66 self.spath = os.path.join(self.path, "store")
67 else:
67 else:
68 self.encodefn = lambda x: x
68 self.encodefn = lambda x: x
69 self.decodefn = lambda x: x
69 self.decodefn = lambda x: x
70 self.spath = self.path
70 self.spath = self.path
71 self.sopener = util.encodedopener(util.opener(self.spath),
71 self.sopener = util.encodedopener(util.opener(self.spath),
72 self.encodefn)
72 self.encodefn)
73
73
74 self.ui = ui.ui(parentui=parentui)
74 self.ui = ui.ui(parentui=parentui)
75 try:
75 try:
76 self.ui.readconfig(self.join("hgrc"), self.root)
76 self.ui.readconfig(self.join("hgrc"), self.root)
77 extensions.loadall(self.ui)
77 extensions.loadall(self.ui)
78 except IOError:
78 except IOError:
79 pass
79 pass
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self._tagstypecache = None
82 self._tagstypecache = None
83 self.branchcache = None
83 self.branchcache = None
84 self.nodetagscache = None
84 self.nodetagscache = None
85 self.filterpats = {}
85 self.filterpats = {}
86 self._transref = self._lockref = self._wlockref = None
86 self._transref = self._lockref = self._wlockref = None
87
87
88 def __getattr__(self, name):
88 def __getattr__(self, name):
89 if name == 'changelog':
89 if name == 'changelog':
90 self.changelog = changelog.changelog(self.sopener)
90 self.changelog = changelog.changelog(self.sopener)
91 self.sopener.defversion = self.changelog.version
91 self.sopener.defversion = self.changelog.version
92 return self.changelog
92 return self.changelog
93 if name == 'manifest':
93 if name == 'manifest':
94 self.changelog
94 self.changelog
95 self.manifest = manifest.manifest(self.sopener)
95 self.manifest = manifest.manifest(self.sopener)
96 return self.manifest
96 return self.manifest
97 if name == 'dirstate':
97 if name == 'dirstate':
98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
99 return self.dirstate
99 return self.dirstate
100 else:
100 else:
101 raise AttributeError, name
101 raise AttributeError, name
102
102
103 def url(self):
103 def url(self):
104 return 'file:' + self.root
104 return 'file:' + self.root
105
105
106 def hook(self, name, throw=False, **args):
106 def hook(self, name, throw=False, **args):
107 return hook.hook(self.ui, self, name, throw, **args)
107 return hook.hook(self.ui, self, name, throw, **args)
108
108
109 tag_disallowed = ':\r\n'
109 tag_disallowed = ':\r\n'
110
110
111 def _tag(self, name, node, message, local, user, date, parent=None,
111 def _tag(self, name, node, message, local, user, date, parent=None,
112 extra={}):
112 extra={}):
113 use_dirstate = parent is None
113 use_dirstate = parent is None
114
114
115 for c in self.tag_disallowed:
115 for c in self.tag_disallowed:
116 if c in name:
116 if c in name:
117 raise util.Abort(_('%r cannot be used in a tag name') % c)
117 raise util.Abort(_('%r cannot be used in a tag name') % c)
118
118
119 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
119 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
120
120
121 def writetag(fp, name, munge, prevtags):
121 def writetag(fp, name, munge, prevtags):
122 if prevtags and prevtags[-1] != '\n':
122 if prevtags and prevtags[-1] != '\n':
123 fp.write('\n')
123 fp.write('\n')
124 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
124 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
125 fp.close()
125 fp.close()
126
126
127 prevtags = ''
127 prevtags = ''
128 if local:
128 if local:
129 try:
129 try:
130 fp = self.opener('localtags', 'r+')
130 fp = self.opener('localtags', 'r+')
131 except IOError, err:
131 except IOError, err:
132 fp = self.opener('localtags', 'a')
132 fp = self.opener('localtags', 'a')
133 else:
133 else:
134 prevtags = fp.read()
134 prevtags = fp.read()
135
135
136 # local tags are stored in the current charset
136 # local tags are stored in the current charset
137 writetag(fp, name, None, prevtags)
137 writetag(fp, name, None, prevtags)
138 self.hook('tag', node=hex(node), tag=name, local=local)
138 self.hook('tag', node=hex(node), tag=name, local=local)
139 return
139 return
140
140
141 if use_dirstate:
141 if use_dirstate:
142 try:
142 try:
143 fp = self.wfile('.hgtags', 'rb+')
143 fp = self.wfile('.hgtags', 'rb+')
144 except IOError, err:
144 except IOError, err:
145 fp = self.wfile('.hgtags', 'ab')
145 fp = self.wfile('.hgtags', 'ab')
146 else:
146 else:
147 prevtags = fp.read()
147 prevtags = fp.read()
148 else:
148 else:
149 try:
149 try:
150 prevtags = self.filectx('.hgtags', parent).data()
150 prevtags = self.filectx('.hgtags', parent).data()
151 except revlog.LookupError:
151 except revlog.LookupError:
152 pass
152 pass
153 fp = self.wfile('.hgtags', 'wb')
153 fp = self.wfile('.hgtags', 'wb')
154 if prevtags:
154 if prevtags:
155 fp.write(prevtags)
155 fp.write(prevtags)
156
156
157 # committed tags are stored in UTF-8
157 # committed tags are stored in UTF-8
158 writetag(fp, name, util.fromlocal, prevtags)
158 writetag(fp, name, util.fromlocal, prevtags)
159
159
160 if use_dirstate and '.hgtags' not in self.dirstate:
160 if use_dirstate and '.hgtags' not in self.dirstate:
161 self.add(['.hgtags'])
161 self.add(['.hgtags'])
162
162
163 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
163 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
164 extra=extra)
164 extra=extra)
165
165
166 self.hook('tag', node=hex(node), tag=name, local=local)
166 self.hook('tag', node=hex(node), tag=name, local=local)
167
167
168 return tagnode
168 return tagnode
169
169
170 def tag(self, name, node, message, local, user, date):
170 def tag(self, name, node, message, local, user, date):
171 '''tag a revision with a symbolic name.
171 '''tag a revision with a symbolic name.
172
172
173 if local is True, the tag is stored in a per-repository file.
173 if local is True, the tag is stored in a per-repository file.
174 otherwise, it is stored in the .hgtags file, and a new
174 otherwise, it is stored in the .hgtags file, and a new
175 changeset is committed with the change.
175 changeset is committed with the change.
176
176
177 keyword arguments:
177 keyword arguments:
178
178
179 local: whether to store tag in non-version-controlled file
179 local: whether to store tag in non-version-controlled file
180 (default False)
180 (default False)
181
181
182 message: commit message to use if committing
182 message: commit message to use if committing
183
183
184 user: name of user to use if committing
184 user: name of user to use if committing
185
185
186 date: date tuple to use if committing'''
186 date: date tuple to use if committing'''
187
187
188 for x in self.status()[:5]:
188 for x in self.status()[:5]:
189 if '.hgtags' in x:
189 if '.hgtags' in x:
190 raise util.Abort(_('working copy of .hgtags is changed '
190 raise util.Abort(_('working copy of .hgtags is changed '
191 '(please commit .hgtags manually)'))
191 '(please commit .hgtags manually)'))
192
192
193
193
194 self._tag(name, node, message, local, user, date)
194 self._tag(name, node, message, local, user, date)
195
195
196 def tags(self):
196 def tags(self):
197 '''return a mapping of tag to node'''
197 '''return a mapping of tag to node'''
198 if self.tagscache:
198 if self.tagscache:
199 return self.tagscache
199 return self.tagscache
200
200
201 globaltags = {}
201 globaltags = {}
202 tagtypes = {}
202 tagtypes = {}
203
203
204 def readtags(lines, fn, tagtype):
204 def readtags(lines, fn, tagtype):
205 filetags = {}
205 filetags = {}
206 count = 0
206 count = 0
207
207
208 def warn(msg):
208 def warn(msg):
209 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
209 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
210
210
211 for l in lines:
211 for l in lines:
212 count += 1
212 count += 1
213 if not l:
213 if not l:
214 continue
214 continue
215 s = l.split(" ", 1)
215 s = l.split(" ", 1)
216 if len(s) != 2:
216 if len(s) != 2:
217 warn(_("cannot parse entry"))
217 warn(_("cannot parse entry"))
218 continue
218 continue
219 node, key = s
219 node, key = s
220 key = util.tolocal(key.strip()) # stored in UTF-8
220 key = util.tolocal(key.strip()) # stored in UTF-8
221 try:
221 try:
222 bin_n = bin(node)
222 bin_n = bin(node)
223 except TypeError:
223 except TypeError:
224 warn(_("node '%s' is not well formed") % node)
224 warn(_("node '%s' is not well formed") % node)
225 continue
225 continue
226 if bin_n not in self.changelog.nodemap:
226 if bin_n not in self.changelog.nodemap:
227 warn(_("tag '%s' refers to unknown node") % key)
227 warn(_("tag '%s' refers to unknown node") % key)
228 continue
228 continue
229
229
230 h = []
230 h = []
231 if key in filetags:
231 if key in filetags:
232 n, h = filetags[key]
232 n, h = filetags[key]
233 h.append(n)
233 h.append(n)
234 filetags[key] = (bin_n, h)
234 filetags[key] = (bin_n, h)
235
235
236 for k, nh in filetags.items():
236 for k, nh in filetags.items():
237 if k not in globaltags:
237 if k not in globaltags:
238 globaltags[k] = nh
238 globaltags[k] = nh
239 tagtypes[k] = tagtype
239 tagtypes[k] = tagtype
240 continue
240 continue
241
241
242 # we prefer the global tag if:
242 # we prefer the global tag if:
243 # it supercedes us OR
243 # it supercedes us OR
244 # mutual supercedes and it has a higher rank
244 # mutual supercedes and it has a higher rank
245 # otherwise we win because we're tip-most
245 # otherwise we win because we're tip-most
246 an, ah = nh
246 an, ah = nh
247 bn, bh = globaltags[k]
247 bn, bh = globaltags[k]
248 if (bn != an and an in bh and
248 if (bn != an and an in bh and
249 (bn not in ah or len(bh) > len(ah))):
249 (bn not in ah or len(bh) > len(ah))):
250 an = bn
250 an = bn
251 ah.extend([n for n in bh if n not in ah])
251 ah.extend([n for n in bh if n not in ah])
252 globaltags[k] = an, ah
252 globaltags[k] = an, ah
253 tagtypes[k] = tagtype
253 tagtypes[k] = tagtype
254
254
255 # read the tags file from each head, ending with the tip
255 # read the tags file from each head, ending with the tip
256 f = None
256 f = None
257 for rev, node, fnode in self._hgtagsnodes():
257 for rev, node, fnode in self._hgtagsnodes():
258 f = (f and f.filectx(fnode) or
258 f = (f and f.filectx(fnode) or
259 self.filectx('.hgtags', fileid=fnode))
259 self.filectx('.hgtags', fileid=fnode))
260 readtags(f.data().splitlines(), f, "global")
260 readtags(f.data().splitlines(), f, "global")
261
261
262 try:
262 try:
263 data = util.fromlocal(self.opener("localtags").read())
263 data = util.fromlocal(self.opener("localtags").read())
264 # localtags are stored in the local character set
264 # localtags are stored in the local character set
265 # while the internal tag table is stored in UTF-8
265 # while the internal tag table is stored in UTF-8
266 readtags(data.splitlines(), "localtags", "local")
266 readtags(data.splitlines(), "localtags", "local")
267 except IOError:
267 except IOError:
268 pass
268 pass
269
269
270 self.tagscache = {}
270 self.tagscache = {}
271 self._tagstypecache = {}
271 self._tagstypecache = {}
272 for k,nh in globaltags.items():
272 for k,nh in globaltags.items():
273 n = nh[0]
273 n = nh[0]
274 if n != nullid:
274 if n != nullid:
275 self.tagscache[k] = n
275 self.tagscache[k] = n
276 self._tagstypecache[k] = tagtypes[k]
276 self._tagstypecache[k] = tagtypes[k]
277 self.tagscache['tip'] = self.changelog.tip()
277 self.tagscache['tip'] = self.changelog.tip()
278
278
279 return self.tagscache
279 return self.tagscache
280
280
281 def tagtype(self, tagname):
281 def tagtype(self, tagname):
282 '''
282 '''
283 return the type of the given tag. result can be:
283 return the type of the given tag. result can be:
284
284
285 'local' : a local tag
285 'local' : a local tag
286 'global' : a global tag
286 'global' : a global tag
287 None : tag does not exist
287 None : tag does not exist
288 '''
288 '''
289
289
290 self.tags()
290 self.tags()
291
291
292 return self._tagstypecache.get(tagname)
292 return self._tagstypecache.get(tagname)
293
293
294 def _hgtagsnodes(self):
294 def _hgtagsnodes(self):
295 heads = self.heads()
295 heads = self.heads()
296 heads.reverse()
296 heads.reverse()
297 last = {}
297 last = {}
298 ret = []
298 ret = []
299 for node in heads:
299 for node in heads:
300 c = self.changectx(node)
300 c = self.changectx(node)
301 rev = c.rev()
301 rev = c.rev()
302 try:
302 try:
303 fnode = c.filenode('.hgtags')
303 fnode = c.filenode('.hgtags')
304 except revlog.LookupError:
304 except revlog.LookupError:
305 continue
305 continue
306 ret.append((rev, node, fnode))
306 ret.append((rev, node, fnode))
307 if fnode in last:
307 if fnode in last:
308 ret[last[fnode]] = None
308 ret[last[fnode]] = None
309 last[fnode] = len(ret) - 1
309 last[fnode] = len(ret) - 1
310 return [item for item in ret if item]
310 return [item for item in ret if item]
311
311
312 def tagslist(self):
312 def tagslist(self):
313 '''return a list of tags ordered by revision'''
313 '''return a list of tags ordered by revision'''
314 l = []
314 l = []
315 for t, n in self.tags().items():
315 for t, n in self.tags().items():
316 try:
316 try:
317 r = self.changelog.rev(n)
317 r = self.changelog.rev(n)
318 except:
318 except:
319 r = -2 # sort to the beginning of the list if unknown
319 r = -2 # sort to the beginning of the list if unknown
320 l.append((r, t, n))
320 l.append((r, t, n))
321 l.sort()
321 l.sort()
322 return [(t, n) for r, t, n in l]
322 return [(t, n) for r, t, n in l]
323
323
324 def nodetags(self, node):
324 def nodetags(self, node):
325 '''return the tags associated with a node'''
325 '''return the tags associated with a node'''
326 if not self.nodetagscache:
326 if not self.nodetagscache:
327 self.nodetagscache = {}
327 self.nodetagscache = {}
328 for t, n in self.tags().items():
328 for t, n in self.tags().items():
329 self.nodetagscache.setdefault(n, []).append(t)
329 self.nodetagscache.setdefault(n, []).append(t)
330 return self.nodetagscache.get(node, [])
330 return self.nodetagscache.get(node, [])
331
331
332 def _branchtags(self):
332 def _branchtags(self):
333 partial, last, lrev = self._readbranchcache()
333 partial, last, lrev = self._readbranchcache()
334
334
335 tiprev = self.changelog.count() - 1
335 tiprev = self.changelog.count() - 1
336 if lrev != tiprev:
336 if lrev != tiprev:
337 self._updatebranchcache(partial, lrev+1, tiprev+1)
337 self._updatebranchcache(partial, lrev+1, tiprev+1)
338 self._writebranchcache(partial, self.changelog.tip(), tiprev)
338 self._writebranchcache(partial, self.changelog.tip(), tiprev)
339
339
340 return partial
340 return partial
341
341
342 def branchtags(self):
342 def branchtags(self):
343 if self.branchcache is not None:
343 if self.branchcache is not None:
344 return self.branchcache
344 return self.branchcache
345
345
346 self.branchcache = {} # avoid recursion in changectx
346 self.branchcache = {} # avoid recursion in changectx
347 partial = self._branchtags()
347 partial = self._branchtags()
348
348
349 # the branch cache is stored on disk as UTF-8, but in the local
349 # the branch cache is stored on disk as UTF-8, but in the local
350 # charset internally
350 # charset internally
351 for k, v in partial.items():
351 for k, v in partial.items():
352 self.branchcache[util.tolocal(k)] = v
352 self.branchcache[util.tolocal(k)] = v
353 return self.branchcache
353 return self.branchcache
354
354
355 def _readbranchcache(self):
355 def _readbranchcache(self):
356 partial = {}
356 partial = {}
357 try:
357 try:
358 f = self.opener("branch.cache")
358 f = self.opener("branch.cache")
359 lines = f.read().split('\n')
359 lines = f.read().split('\n')
360 f.close()
360 f.close()
361 except (IOError, OSError):
361 except (IOError, OSError):
362 return {}, nullid, nullrev
362 return {}, nullid, nullrev
363
363
364 try:
364 try:
365 last, lrev = lines.pop(0).split(" ", 1)
365 last, lrev = lines.pop(0).split(" ", 1)
366 last, lrev = bin(last), int(lrev)
366 last, lrev = bin(last), int(lrev)
367 if not (lrev < self.changelog.count() and
367 if not (lrev < self.changelog.count() and
368 self.changelog.node(lrev) == last): # sanity check
368 self.changelog.node(lrev) == last): # sanity check
369 # invalidate the cache
369 # invalidate the cache
370 raise ValueError('Invalid branch cache: unknown tip')
370 raise ValueError('Invalid branch cache: unknown tip')
371 for l in lines:
371 for l in lines:
372 if not l: continue
372 if not l: continue
373 node, label = l.split(" ", 1)
373 node, label = l.split(" ", 1)
374 partial[label.strip()] = bin(node)
374 partial[label.strip()] = bin(node)
375 except (KeyboardInterrupt, util.SignalInterrupt):
375 except (KeyboardInterrupt, util.SignalInterrupt):
376 raise
376 raise
377 except Exception, inst:
377 except Exception, inst:
378 if self.ui.debugflag:
378 if self.ui.debugflag:
379 self.ui.warn(str(inst), '\n')
379 self.ui.warn(str(inst), '\n')
380 partial, last, lrev = {}, nullid, nullrev
380 partial, last, lrev = {}, nullid, nullrev
381 return partial, last, lrev
381 return partial, last, lrev
382
382
383 def _writebranchcache(self, branches, tip, tiprev):
383 def _writebranchcache(self, branches, tip, tiprev):
384 try:
384 try:
385 f = self.opener("branch.cache", "w", atomictemp=True)
385 f = self.opener("branch.cache", "w", atomictemp=True)
386 f.write("%s %s\n" % (hex(tip), tiprev))
386 f.write("%s %s\n" % (hex(tip), tiprev))
387 for label, node in branches.iteritems():
387 for label, node in branches.iteritems():
388 f.write("%s %s\n" % (hex(node), label))
388 f.write("%s %s\n" % (hex(node), label))
389 f.rename()
389 f.rename()
390 except (IOError, OSError):
390 except (IOError, OSError):
391 pass
391 pass
392
392
393 def _updatebranchcache(self, partial, start, end):
393 def _updatebranchcache(self, partial, start, end):
394 for r in xrange(start, end):
394 for r in xrange(start, end):
395 c = self.changectx(r)
395 c = self.changectx(r)
396 b = c.branch()
396 b = c.branch()
397 partial[b] = c.node()
397 partial[b] = c.node()
398
398
399 def lookup(self, key):
399 def lookup(self, key):
400 if key == '.':
400 if key == '.':
401 key, second = self.dirstate.parents()
401 key, second = self.dirstate.parents()
402 if key == nullid:
402 if key == nullid:
403 raise repo.RepoError(_("no revision checked out"))
403 raise repo.RepoError(_("no revision checked out"))
404 if second != nullid:
404 if second != nullid:
405 self.ui.warn(_("warning: working directory has two parents, "
405 self.ui.warn(_("warning: working directory has two parents, "
406 "tag '.' uses the first\n"))
406 "tag '.' uses the first\n"))
407 elif key == 'null':
407 elif key == 'null':
408 return nullid
408 return nullid
409 n = self.changelog._match(key)
409 n = self.changelog._match(key)
410 if n:
410 if n:
411 return n
411 return n
412 if key in self.tags():
412 if key in self.tags():
413 return self.tags()[key]
413 return self.tags()[key]
414 if key in self.branchtags():
414 if key in self.branchtags():
415 return self.branchtags()[key]
415 return self.branchtags()[key]
416 n = self.changelog._partialmatch(key)
416 n = self.changelog._partialmatch(key)
417 if n:
417 if n:
418 return n
418 return n
419 try:
419 try:
420 if len(key) == 20:
420 if len(key) == 20:
421 key = hex(key)
421 key = hex(key)
422 except:
422 except:
423 pass
423 pass
424 raise repo.RepoError(_("unknown revision '%s'") % key)
424 raise repo.RepoError(_("unknown revision '%s'") % key)
425
425
426 def dev(self):
426 def dev(self):
427 return os.lstat(self.path).st_dev
427 return os.lstat(self.path).st_dev
428
428
429 def local(self):
429 def local(self):
430 return True
430 return True
431
431
432 def join(self, f):
432 def join(self, f):
433 return os.path.join(self.path, f)
433 return os.path.join(self.path, f)
434
434
435 def sjoin(self, f):
435 def sjoin(self, f):
436 f = self.encodefn(f)
436 f = self.encodefn(f)
437 return os.path.join(self.spath, f)
437 return os.path.join(self.spath, f)
438
438
439 def wjoin(self, f):
439 def wjoin(self, f):
440 return os.path.join(self.root, f)
440 return os.path.join(self.root, f)
441
441
442 def file(self, f):
442 def file(self, f):
443 if f[0] == '/':
443 if f[0] == '/':
444 f = f[1:]
444 f = f[1:]
445 return filelog.filelog(self.sopener, f)
445 return filelog.filelog(self.sopener, f)
446
446
447 def changectx(self, changeid=None):
447 def changectx(self, changeid=None):
448 return context.changectx(self, changeid)
448 return context.changectx(self, changeid)
449
449
450 def workingctx(self):
450 def workingctx(self):
451 return context.workingctx(self)
451 return context.workingctx(self)
452
452
453 def parents(self, changeid=None):
453 def parents(self, changeid=None):
454 '''
454 '''
455 get list of changectxs for parents of changeid or working directory
455 get list of changectxs for parents of changeid or working directory
456 '''
456 '''
457 if changeid is None:
457 if changeid is None:
458 pl = self.dirstate.parents()
458 pl = self.dirstate.parents()
459 else:
459 else:
460 n = self.changelog.lookup(changeid)
460 n = self.changelog.lookup(changeid)
461 pl = self.changelog.parents(n)
461 pl = self.changelog.parents(n)
462 if pl[1] == nullid:
462 if pl[1] == nullid:
463 return [self.changectx(pl[0])]
463 return [self.changectx(pl[0])]
464 return [self.changectx(pl[0]), self.changectx(pl[1])]
464 return [self.changectx(pl[0]), self.changectx(pl[1])]
465
465
466 def filectx(self, path, changeid=None, fileid=None):
466 def filectx(self, path, changeid=None, fileid=None):
467 """changeid can be a changeset revision, node, or tag.
467 """changeid can be a changeset revision, node, or tag.
468 fileid can be a file revision or node."""
468 fileid can be a file revision or node."""
469 return context.filectx(self, path, changeid, fileid)
469 return context.filectx(self, path, changeid, fileid)
470
470
471 def getcwd(self):
471 def getcwd(self):
472 return self.dirstate.getcwd()
472 return self.dirstate.getcwd()
473
473
474 def pathto(self, f, cwd=None):
474 def pathto(self, f, cwd=None):
475 return self.dirstate.pathto(f, cwd)
475 return self.dirstate.pathto(f, cwd)
476
476
477 def wfile(self, f, mode='r'):
477 def wfile(self, f, mode='r'):
478 return self.wopener(f, mode)
478 return self.wopener(f, mode)
479
479
480 def _link(self, f):
480 def _link(self, f):
481 return os.path.islink(self.wjoin(f))
481 return os.path.islink(self.wjoin(f))
482
482
483 def _filter(self, filter, filename, data):
483 def _filter(self, filter, filename, data):
484 if filter not in self.filterpats:
484 if filter not in self.filterpats:
485 l = []
485 l = []
486 for pat, cmd in self.ui.configitems(filter):
486 for pat, cmd in self.ui.configitems(filter):
487 mf = util.matcher(self.root, "", [pat], [], [])[1]
487 mf = util.matcher(self.root, "", [pat], [], [])[1]
488 l.append((mf, cmd))
488 l.append((mf, cmd))
489 self.filterpats[filter] = l
489 self.filterpats[filter] = l
490
490
491 for mf, cmd in self.filterpats[filter]:
491 for mf, cmd in self.filterpats[filter]:
492 if mf(filename):
492 if mf(filename):
493 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
493 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
494 data = util.filter(data, cmd)
494 data = util.filter(data, cmd)
495 break
495 break
496
496
497 return data
497 return data
498
498
499 def wread(self, filename):
499 def wread(self, filename):
500 if self._link(filename):
500 if self._link(filename):
501 data = os.readlink(self.wjoin(filename))
501 data = os.readlink(self.wjoin(filename))
502 else:
502 else:
503 data = self.wopener(filename, 'r').read()
503 data = self.wopener(filename, 'r').read()
504 return self._filter("encode", filename, data)
504 return self._filter("encode", filename, data)
505
505
506 def wwrite(self, filename, data, flags):
506 def wwrite(self, filename, data, flags):
507 data = self._filter("decode", filename, data)
507 data = self._filter("decode", filename, data)
508 try:
508 try:
509 os.unlink(self.wjoin(filename))
509 os.unlink(self.wjoin(filename))
510 except OSError:
510 except OSError:
511 pass
511 pass
512 self.wopener(filename, 'w').write(data)
512 self.wopener(filename, 'w').write(data)
513 util.set_flags(self.wjoin(filename), flags)
513 util.set_flags(self.wjoin(filename), flags)
514
514
515 def wwritedata(self, filename, data):
515 def wwritedata(self, filename, data):
516 return self._filter("decode", filename, data)
516 return self._filter("decode", filename, data)
517
517
518 def transaction(self):
518 def transaction(self):
519 if self._transref and self._transref():
519 if self._transref and self._transref():
520 return self._transref().nest()
520 return self._transref().nest()
521
521
522 # abort here if the journal already exists
522 # abort here if the journal already exists
523 if os.path.exists(self.sjoin("journal")):
523 if os.path.exists(self.sjoin("journal")):
524 raise repo.RepoError(_("journal already exists - run hg recover"))
524 raise repo.RepoError(_("journal already exists - run hg recover"))
525
525
526 # save dirstate for rollback
526 # save dirstate for rollback
527 try:
527 try:
528 ds = self.opener("dirstate").read()
528 ds = self.opener("dirstate").read()
529 except IOError:
529 except IOError:
530 ds = ""
530 ds = ""
531 self.opener("journal.dirstate", "w").write(ds)
531 self.opener("journal.dirstate", "w").write(ds)
532 self.opener("journal.branch", "w").write(self.dirstate.branch())
532 self.opener("journal.branch", "w").write(self.dirstate.branch())
533
533
534 renames = [(self.sjoin("journal"), self.sjoin("undo")),
534 renames = [(self.sjoin("journal"), self.sjoin("undo")),
535 (self.join("journal.dirstate"), self.join("undo.dirstate")),
535 (self.join("journal.dirstate"), self.join("undo.dirstate")),
536 (self.join("journal.branch"), self.join("undo.branch"))]
536 (self.join("journal.branch"), self.join("undo.branch"))]
537 tr = transaction.transaction(self.ui.warn, self.sopener,
537 tr = transaction.transaction(self.ui.warn, self.sopener,
538 self.sjoin("journal"),
538 self.sjoin("journal"),
539 aftertrans(renames))
539 aftertrans(renames))
540 self._transref = weakref.ref(tr)
540 self._transref = weakref.ref(tr)
541 return tr
541 return tr
542
542
543 def recover(self):
543 def recover(self):
544 l = self.lock()
544 l = self.lock()
545 try:
545 try:
546 if os.path.exists(self.sjoin("journal")):
546 if os.path.exists(self.sjoin("journal")):
547 self.ui.status(_("rolling back interrupted transaction\n"))
547 self.ui.status(_("rolling back interrupted transaction\n"))
548 transaction.rollback(self.sopener, self.sjoin("journal"))
548 transaction.rollback(self.sopener, self.sjoin("journal"))
549 self.invalidate()
549 self.invalidate()
550 return True
550 return True
551 else:
551 else:
552 self.ui.warn(_("no interrupted transaction available\n"))
552 self.ui.warn(_("no interrupted transaction available\n"))
553 return False
553 return False
554 finally:
554 finally:
555 del l
555 del l
556
556
557 def rollback(self):
557 def rollback(self):
558 wlock = lock = None
558 wlock = lock = None
559 try:
559 try:
560 wlock = self.wlock()
560 wlock = self.wlock()
561 lock = self.lock()
561 lock = self.lock()
562 if os.path.exists(self.sjoin("undo")):
562 if os.path.exists(self.sjoin("undo")):
563 self.ui.status(_("rolling back last transaction\n"))
563 self.ui.status(_("rolling back last transaction\n"))
564 transaction.rollback(self.sopener, self.sjoin("undo"))
564 transaction.rollback(self.sopener, self.sjoin("undo"))
565 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
565 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
566 branch = self.opener("undo.branch").read()
566 branch = self.opener("undo.branch").read()
567 self.dirstate.setbranch(branch)
567 self.dirstate.setbranch(branch)
568 self.invalidate()
568 self.invalidate()
569 self.dirstate.invalidate()
569 self.dirstate.invalidate()
570 else:
570 else:
571 self.ui.warn(_("no rollback information available\n"))
571 self.ui.warn(_("no rollback information available\n"))
572 finally:
572 finally:
573 del lock, wlock
573 del lock, wlock
574
574
575 def invalidate(self):
575 def invalidate(self):
576 for a in "changelog manifest".split():
576 for a in "changelog manifest".split():
577 if hasattr(self, a):
577 if hasattr(self, a):
578 self.__delattr__(a)
578 self.__delattr__(a)
579 self.tagscache = None
579 self.tagscache = None
580 self._tagstypecache = None
580 self._tagstypecache = None
581 self.nodetagscache = None
581 self.nodetagscache = None
582
582
583 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
583 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
584 try:
584 try:
585 l = lock.lock(lockname, 0, releasefn, desc=desc)
585 l = lock.lock(lockname, 0, releasefn, desc=desc)
586 except lock.LockHeld, inst:
586 except lock.LockHeld, inst:
587 if not wait:
587 if not wait:
588 raise
588 raise
589 self.ui.warn(_("waiting for lock on %s held by %r\n") %
589 self.ui.warn(_("waiting for lock on %s held by %r\n") %
590 (desc, inst.locker))
590 (desc, inst.locker))
591 # default to 600 seconds timeout
591 # default to 600 seconds timeout
592 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
592 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
593 releasefn, desc=desc)
593 releasefn, desc=desc)
594 if acquirefn:
594 if acquirefn:
595 acquirefn()
595 acquirefn()
596 return l
596 return l
597
597
598 def lock(self, wait=True):
598 def lock(self, wait=True):
599 if self._lockref and self._lockref():
599 if self._lockref and self._lockref():
600 return self._lockref()
600 return self._lockref()
601
601
602 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
602 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
603 _('repository %s') % self.origroot)
603 _('repository %s') % self.origroot)
604 self._lockref = weakref.ref(l)
604 self._lockref = weakref.ref(l)
605 return l
605 return l
606
606
607 def wlock(self, wait=True):
607 def wlock(self, wait=True):
608 if self._wlockref and self._wlockref():
608 if self._wlockref and self._wlockref():
609 return self._wlockref()
609 return self._wlockref()
610
610
611 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
611 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
612 self.dirstate.invalidate, _('working directory of %s') %
612 self.dirstate.invalidate, _('working directory of %s') %
613 self.origroot)
613 self.origroot)
614 self._wlockref = weakref.ref(l)
614 self._wlockref = weakref.ref(l)
615 return l
615 return l
616
616
617 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
617 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
618 """
618 """
619 commit an individual file as part of a larger transaction
619 commit an individual file as part of a larger transaction
620 """
620 """
621
621
622 t = self.wread(fn)
622 t = self.wread(fn)
623 fl = self.file(fn)
623 fl = self.file(fn)
624 fp1 = manifest1.get(fn, nullid)
624 fp1 = manifest1.get(fn, nullid)
625 fp2 = manifest2.get(fn, nullid)
625 fp2 = manifest2.get(fn, nullid)
626
626
627 meta = {}
627 meta = {}
628 cp = self.dirstate.copied(fn)
628 cp = self.dirstate.copied(fn)
629 if cp:
629 if cp:
630 # Mark the new revision of this file as a copy of another
630 # Mark the new revision of this file as a copy of another
631 # file. This copy data will effectively act as a parent
631 # file. This copy data will effectively act as a parent
632 # of this new revision. If this is a merge, the first
632 # of this new revision. If this is a merge, the first
633 # parent will be the nullid (meaning "look up the copy data")
633 # parent will be the nullid (meaning "look up the copy data")
634 # and the second one will be the other parent. For example:
634 # and the second one will be the other parent. For example:
635 #
635 #
636 # 0 --- 1 --- 3 rev1 changes file foo
636 # 0 --- 1 --- 3 rev1 changes file foo
637 # \ / rev2 renames foo to bar and changes it
637 # \ / rev2 renames foo to bar and changes it
638 # \- 2 -/ rev3 should have bar with all changes and
638 # \- 2 -/ rev3 should have bar with all changes and
639 # should record that bar descends from
639 # should record that bar descends from
640 # bar in rev2 and foo in rev1
640 # bar in rev2 and foo in rev1
641 #
641 #
642 # this allows this merge to succeed:
642 # this allows this merge to succeed:
643 #
643 #
644 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
644 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
645 # \ / merging rev3 and rev4 should use bar@rev2
645 # \ / merging rev3 and rev4 should use bar@rev2
646 # \- 2 --- 4 as the merge base
646 # \- 2 --- 4 as the merge base
647 #
647 #
648 meta["copy"] = cp
648 meta["copy"] = cp
649 if not manifest2: # not a branch merge
649 if not manifest2: # not a branch merge
650 meta["copyrev"] = hex(manifest1.get(cp, nullid))
650 meta["copyrev"] = hex(manifest1.get(cp, nullid))
651 fp2 = nullid
651 fp2 = nullid
652 elif fp2 != nullid: # copied on remote side
652 elif fp2 != nullid: # copied on remote side
653 meta["copyrev"] = hex(manifest1.get(cp, nullid))
653 meta["copyrev"] = hex(manifest1.get(cp, nullid))
654 elif fp1 != nullid: # copied on local side, reversed
654 elif fp1 != nullid: # copied on local side, reversed
655 meta["copyrev"] = hex(manifest2.get(cp))
655 meta["copyrev"] = hex(manifest2.get(cp))
656 fp2 = fp1
656 fp2 = fp1
657 elif cp in manifest2: # directory rename on local side
657 elif cp in manifest2: # directory rename on local side
658 meta["copyrev"] = hex(manifest2[cp])
658 meta["copyrev"] = hex(manifest2[cp])
659 else: # directory rename on remote side
659 else: # directory rename on remote side
660 meta["copyrev"] = hex(manifest1.get(cp, nullid))
660 meta["copyrev"] = hex(manifest1.get(cp, nullid))
661 self.ui.debug(_(" %s: copy %s:%s\n") %
661 self.ui.debug(_(" %s: copy %s:%s\n") %
662 (fn, cp, meta["copyrev"]))
662 (fn, cp, meta["copyrev"]))
663 fp1 = nullid
663 fp1 = nullid
664 elif fp2 != nullid:
664 elif fp2 != nullid:
665 # is one parent an ancestor of the other?
665 # is one parent an ancestor of the other?
666 fpa = fl.ancestor(fp1, fp2)
666 fpa = fl.ancestor(fp1, fp2)
667 if fpa == fp1:
667 if fpa == fp1:
668 fp1, fp2 = fp2, nullid
668 fp1, fp2 = fp2, nullid
669 elif fpa == fp2:
669 elif fpa == fp2:
670 fp2 = nullid
670 fp2 = nullid
671
671
672 # is the file unmodified from the parent? report existing entry
672 # is the file unmodified from the parent? report existing entry
673 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
673 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
674 return fp1
674 return fp1
675
675
676 changelist.append(fn)
676 changelist.append(fn)
677 return fl.add(t, meta, tr, linkrev, fp1, fp2)
677 return fl.add(t, meta, tr, linkrev, fp1, fp2)
678
678
679 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
679 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
680 if p1 is None:
680 if p1 is None:
681 p1, p2 = self.dirstate.parents()
681 p1, p2 = self.dirstate.parents()
682 return self.commit(files=files, text=text, user=user, date=date,
682 return self.commit(files=files, text=text, user=user, date=date,
683 p1=p1, p2=p2, extra=extra, empty_ok=True)
683 p1=p1, p2=p2, extra=extra, empty_ok=True)
684
684
685 def commit(self, files=None, text="", user=None, date=None,
685 def commit(self, files=None, text="", user=None, date=None,
686 match=util.always, force=False, force_editor=False,
686 match=util.always, force=False, force_editor=False,
687 p1=None, p2=None, extra={}, empty_ok=False):
687 p1=None, p2=None, extra={}, empty_ok=False):
688 wlock = lock = tr = None
688 wlock = lock = tr = None
689 valid = 0 # don't save the dirstate if this isn't set
689 valid = 0 # don't save the dirstate if this isn't set
690 try:
690 try:
691 commit = []
691 commit = []
692 remove = []
692 remove = []
693 changed = []
693 changed = []
694 use_dirstate = (p1 is None) # not rawcommit
694 use_dirstate = (p1 is None) # not rawcommit
695 extra = extra.copy()
695 extra = extra.copy()
696
696
697 if use_dirstate:
697 if use_dirstate:
698 if files:
698 if files:
699 for f in files:
699 for f in files:
700 s = self.dirstate[f]
700 s = self.dirstate[f]
701 if s in 'nma':
701 if s in 'nma':
702 commit.append(f)
702 commit.append(f)
703 elif s == 'r':
703 elif s == 'r':
704 remove.append(f)
704 remove.append(f)
705 else:
705 else:
706 self.ui.warn(_("%s not tracked!\n") % f)
706 self.ui.warn(_("%s not tracked!\n") % f)
707 else:
707 else:
708 changes = self.status(match=match)[:5]
708 changes = self.status(match=match)[:5]
709 modified, added, removed, deleted, unknown = changes
709 modified, added, removed, deleted, unknown = changes
710 commit = modified + added
710 commit = modified + added
711 remove = removed
711 remove = removed
712 else:
712 else:
713 commit = files
713 commit = files
714
714
715 if use_dirstate:
715 if use_dirstate:
716 p1, p2 = self.dirstate.parents()
716 p1, p2 = self.dirstate.parents()
717 update_dirstate = True
717 update_dirstate = True
718 else:
718 else:
719 p1, p2 = p1, p2 or nullid
719 p1, p2 = p1, p2 or nullid
720 update_dirstate = (self.dirstate.parents()[0] == p1)
720 update_dirstate = (self.dirstate.parents()[0] == p1)
721
721
722 c1 = self.changelog.read(p1)
722 c1 = self.changelog.read(p1)
723 c2 = self.changelog.read(p2)
723 c2 = self.changelog.read(p2)
724 m1 = self.manifest.read(c1[0]).copy()
724 m1 = self.manifest.read(c1[0]).copy()
725 m2 = self.manifest.read(c2[0])
725 m2 = self.manifest.read(c2[0])
726
726
727 if use_dirstate:
727 if use_dirstate:
728 branchname = self.workingctx().branch()
728 branchname = self.workingctx().branch()
729 try:
729 try:
730 branchname = branchname.decode('UTF-8').encode('UTF-8')
730 branchname = branchname.decode('UTF-8').encode('UTF-8')
731 except UnicodeDecodeError:
731 except UnicodeDecodeError:
732 raise util.Abort(_('branch name not in UTF-8!'))
732 raise util.Abort(_('branch name not in UTF-8!'))
733 else:
733 else:
734 branchname = ""
734 branchname = ""
735
735
736 if use_dirstate:
736 if use_dirstate:
737 oldname = c1[5].get("branch") # stored in UTF-8
737 oldname = c1[5].get("branch") # stored in UTF-8
738 if (not commit and not remove and not force and p2 == nullid
738 if (not commit and not remove and not force and p2 == nullid
739 and branchname == oldname):
739 and branchname == oldname):
740 self.ui.status(_("nothing changed\n"))
740 self.ui.status(_("nothing changed\n"))
741 return None
741 return None
742
742
743 xp1 = hex(p1)
743 xp1 = hex(p1)
744 if p2 == nullid: xp2 = ''
744 if p2 == nullid: xp2 = ''
745 else: xp2 = hex(p2)
745 else: xp2 = hex(p2)
746
746
747 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
747 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
748
748
749 wlock = self.wlock()
749 wlock = self.wlock()
750 lock = self.lock()
750 lock = self.lock()
751 tr = self.transaction()
751 tr = self.transaction()
752 trp = weakref.proxy(tr)
752 trp = weakref.proxy(tr)
753
753
754 # check in files
754 # check in files
755 new = {}
755 new = {}
756 linkrev = self.changelog.count()
756 linkrev = self.changelog.count()
757 commit.sort()
757 commit.sort()
758 is_exec = util.execfunc(self.root, m1.execf)
758 is_exec = util.execfunc(self.root, m1.execf)
759 is_link = util.linkfunc(self.root, m1.linkf)
759 is_link = util.linkfunc(self.root, m1.linkf)
760 for f in commit:
760 for f in commit:
761 self.ui.note(f + "\n")
761 self.ui.note(f + "\n")
762 try:
762 try:
763 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
763 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
764 new_exec = is_exec(f)
764 new_exec = is_exec(f)
765 new_link = is_link(f)
765 new_link = is_link(f)
766 if ((not changed or changed[-1] != f) and
766 if ((not changed or changed[-1] != f) and
767 m2.get(f) != new[f]):
767 m2.get(f) != new[f]):
768 # mention the file in the changelog if some
768 # mention the file in the changelog if some
769 # flag changed, even if there was no content
769 # flag changed, even if there was no content
770 # change.
770 # change.
771 old_exec = m1.execf(f)
771 old_exec = m1.execf(f)
772 old_link = m1.linkf(f)
772 old_link = m1.linkf(f)
773 if old_exec != new_exec or old_link != new_link:
773 if old_exec != new_exec or old_link != new_link:
774 changed.append(f)
774 changed.append(f)
775 m1.set(f, new_exec, new_link)
775 m1.set(f, new_exec, new_link)
776 if use_dirstate:
776 if use_dirstate:
777 self.dirstate.normal(f)
777 self.dirstate.normal(f)
778
778
779 except (OSError, IOError):
779 except (OSError, IOError):
780 if use_dirstate:
780 if use_dirstate:
781 self.ui.warn(_("trouble committing %s!\n") % f)
781 self.ui.warn(_("trouble committing %s!\n") % f)
782 raise
782 raise
783 else:
783 else:
784 remove.append(f)
784 remove.append(f)
785
785
786 # update manifest
786 # update manifest
787 m1.update(new)
787 m1.update(new)
788 remove.sort()
788 remove.sort()
789 removed = []
789 removed = []
790
790
791 for f in remove:
791 for f in remove:
792 if f in m1:
792 if f in m1:
793 del m1[f]
793 del m1[f]
794 removed.append(f)
794 removed.append(f)
795 elif f in m2:
795 elif f in m2:
796 removed.append(f)
796 removed.append(f)
797 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
797 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
798 (new, removed))
798 (new, removed))
799
799
800 # add changeset
800 # add changeset
801 new = new.keys()
801 new = new.keys()
802 new.sort()
802 new.sort()
803
803
804 user = user or self.ui.username()
804 user = user or self.ui.username()
805 if (not empty_ok and not text) or force_editor:
805 if (not empty_ok and not text) or force_editor:
806 edittext = []
806 edittext = []
807 if text:
807 if text:
808 edittext.append(text)
808 edittext.append(text)
809 edittext.append("")
809 edittext.append("")
810 edittext.append(_("HG: Enter commit message."
810 edittext.append(_("HG: Enter commit message."
811 " Lines beginning with 'HG:' are removed."))
811 " Lines beginning with 'HG:' are removed."))
812 edittext.append("HG: --")
812 edittext.append("HG: --")
813 edittext.append("HG: user: %s" % user)
813 edittext.append("HG: user: %s" % user)
814 if p2 != nullid:
814 if p2 != nullid:
815 edittext.append("HG: branch merge")
815 edittext.append("HG: branch merge")
816 if branchname:
816 if branchname:
817 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
817 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
818 edittext.extend(["HG: changed %s" % f for f in changed])
818 edittext.extend(["HG: changed %s" % f for f in changed])
819 edittext.extend(["HG: removed %s" % f for f in removed])
819 edittext.extend(["HG: removed %s" % f for f in removed])
820 if not changed and not remove:
820 if not changed and not remove:
821 edittext.append("HG: no files changed")
821 edittext.append("HG: no files changed")
822 edittext.append("")
822 edittext.append("")
823 # run editor in the repository root
823 # run editor in the repository root
824 olddir = os.getcwd()
824 olddir = os.getcwd()
825 os.chdir(self.root)
825 os.chdir(self.root)
826 text = self.ui.edit("\n".join(edittext), user)
826 text = self.ui.edit("\n".join(edittext), user)
827 os.chdir(olddir)
827 os.chdir(olddir)
828
828
829 if branchname:
829 if branchname:
830 extra["branch"] = branchname
830 extra["branch"] = branchname
831
831
832 if use_dirstate:
832 if use_dirstate:
833 lines = [line.rstrip() for line in text.rstrip().splitlines()]
833 lines = [line.rstrip() for line in text.rstrip().splitlines()]
834 while lines and not lines[0]:
834 while lines and not lines[0]:
835 del lines[0]
835 del lines[0]
836 if not lines:
836 if not lines:
837 raise util.Abort(_("empty commit message"))
837 raise util.Abort(_("empty commit message"))
838 text = '\n'.join(lines)
838 text = '\n'.join(lines)
839
839
840 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
840 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
841 user, date, extra)
841 user, date, extra)
842 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
842 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
843 parent2=xp2)
843 parent2=xp2)
844 tr.close()
844 tr.close()
845
845
846 if self.branchcache and "branch" in extra:
846 if self.branchcache and "branch" in extra:
847 self.branchcache[util.tolocal(extra["branch"])] = n
847 self.branchcache[util.tolocal(extra["branch"])] = n
848
848
849 if use_dirstate or update_dirstate:
849 if use_dirstate or update_dirstate:
850 self.dirstate.setparents(n)
850 self.dirstate.setparents(n)
851 if use_dirstate:
851 if use_dirstate:
852 for f in removed:
852 for f in removed:
853 self.dirstate.forget(f)
853 self.dirstate.forget(f)
854 valid = 1 # our dirstate updates are complete
854 valid = 1 # our dirstate updates are complete
855
855
856 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
856 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
857 return n
857 return n
858 finally:
858 finally:
859 if not valid: # don't save our updated dirstate
859 if not valid: # don't save our updated dirstate
860 self.dirstate.invalidate()
860 self.dirstate.invalidate()
861 del tr, lock, wlock
861 del tr, lock, wlock
862
862
863 def walk(self, node=None, files=[], match=util.always, badmatch=None):
863 def walk(self, node=None, files=[], match=util.always, badmatch=None):
864 '''
864 '''
865 walk recursively through the directory tree or a given
865 walk recursively through the directory tree or a given
866 changeset, finding all files matched by the match
866 changeset, finding all files matched by the match
867 function
867 function
868
868
869 results are yielded in a tuple (src, filename), where src
869 results are yielded in a tuple (src, filename), where src
870 is one of:
870 is one of:
871 'f' the file was found in the directory tree
871 'f' the file was found in the directory tree
872 'm' the file was only in the dirstate and not in the tree
872 'm' the file was only in the dirstate and not in the tree
873 'b' file was not found and matched badmatch
873 'b' file was not found and matched badmatch
874 '''
874 '''
875
875
876 if node:
876 if node:
877 fdict = dict.fromkeys(files)
877 fdict = dict.fromkeys(files)
878 # for dirstate.walk, files=['.'] means "walk the whole tree".
878 # for dirstate.walk, files=['.'] means "walk the whole tree".
879 # follow that here, too
879 # follow that here, too
880 fdict.pop('.', None)
880 fdict.pop('.', None)
881 mdict = self.manifest.read(self.changelog.read(node)[0])
881 mdict = self.manifest.read(self.changelog.read(node)[0])
882 mfiles = mdict.keys()
882 mfiles = mdict.keys()
883 mfiles.sort()
883 mfiles.sort()
884 for fn in mfiles:
884 for fn in mfiles:
885 for ffn in fdict:
885 for ffn in fdict:
886 # match if the file is the exact name or a directory
886 # match if the file is the exact name or a directory
887 if ffn == fn or fn.startswith("%s/" % ffn):
887 if ffn == fn or fn.startswith("%s/" % ffn):
888 del fdict[ffn]
888 del fdict[ffn]
889 break
889 break
890 if match(fn):
890 if match(fn):
891 yield 'm', fn
891 yield 'm', fn
892 ffiles = fdict.keys()
892 ffiles = fdict.keys()
893 ffiles.sort()
893 ffiles.sort()
894 for fn in ffiles:
894 for fn in ffiles:
895 if badmatch and badmatch(fn):
895 if badmatch and badmatch(fn):
896 if match(fn):
896 if match(fn):
897 yield 'b', fn
897 yield 'b', fn
898 else:
898 else:
899 self.ui.warn(_('%s: No such file in rev %s\n')
899 self.ui.warn(_('%s: No such file in rev %s\n')
900 % (self.pathto(fn), short(node)))
900 % (self.pathto(fn), short(node)))
901 else:
901 else:
902 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
902 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
903 yield src, fn
903 yield src, fn
904
904
905 def status(self, node1=None, node2=None, files=[], match=util.always,
905 def status(self, node1=None, node2=None, files=[], match=util.always,
906 list_ignored=False, list_clean=False):
906 list_ignored=False, list_clean=False):
907 """return status of files between two nodes or node and working directory
907 """return status of files between two nodes or node and working directory
908
908
909 If node1 is None, use the first dirstate parent instead.
909 If node1 is None, use the first dirstate parent instead.
910 If node2 is None, compare node1 with working directory.
910 If node2 is None, compare node1 with working directory.
911 """
911 """
912
912
913 def fcmp(fn, getnode):
913 def fcmp(fn, getnode):
914 t1 = self.wread(fn)
914 t1 = self.wread(fn)
915 return self.file(fn).cmp(getnode(fn), t1)
915 return self.file(fn).cmp(getnode(fn), t1)
916
916
917 def mfmatches(node):
917 def mfmatches(node):
918 change = self.changelog.read(node)
918 change = self.changelog.read(node)
919 mf = self.manifest.read(change[0]).copy()
919 mf = self.manifest.read(change[0]).copy()
920 for fn in mf.keys():
920 for fn in mf.keys():
921 if not match(fn):
921 if not match(fn):
922 del mf[fn]
922 del mf[fn]
923 return mf
923 return mf
924
924
925 modified, added, removed, deleted, unknown = [], [], [], [], []
925 modified, added, removed, deleted, unknown = [], [], [], [], []
926 ignored, clean = [], []
926 ignored, clean = [], []
927
927
928 compareworking = False
928 compareworking = False
929 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
929 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
930 compareworking = True
930 compareworking = True
931
931
932 if not compareworking:
932 if not compareworking:
933 # read the manifest from node1 before the manifest from node2,
933 # read the manifest from node1 before the manifest from node2,
934 # so that we'll hit the manifest cache if we're going through
934 # so that we'll hit the manifest cache if we're going through
935 # all the revisions in parent->child order.
935 # all the revisions in parent->child order.
936 mf1 = mfmatches(node1)
936 mf1 = mfmatches(node1)
937
937
938 # are we comparing the working directory?
938 # are we comparing the working directory?
939 if not node2:
939 if not node2:
940 (lookup, modified, added, removed, deleted, unknown,
940 (lookup, modified, added, removed, deleted, unknown,
941 ignored, clean) = self.dirstate.status(files, match,
941 ignored, clean) = self.dirstate.status(files, match,
942 list_ignored, list_clean)
942 list_ignored, list_clean)
943
943
944 # are we comparing working dir against its parent?
944 # are we comparing working dir against its parent?
945 if compareworking:
945 if compareworking:
946 if lookup:
946 if lookup:
947 fixup = []
947 fixup = []
948 # do a full compare of any files that might have changed
948 # do a full compare of any files that might have changed
949 ctx = self.changectx()
949 ctx = self.changectx()
950 for f in lookup:
950 for f in lookup:
951 if f not in ctx or ctx[f].cmp(self.wread(f)):
951 if f not in ctx or ctx[f].cmp(self.wread(f)):
952 modified.append(f)
952 modified.append(f)
953 else:
953 else:
954 fixup.append(f)
954 fixup.append(f)
955 if list_clean:
955 if list_clean:
956 clean.append(f)
956 clean.append(f)
957
957
958 # update dirstate for files that are actually clean
958 # update dirstate for files that are actually clean
959 if fixup:
959 if fixup:
960 wlock = None
960 wlock = None
961 try:
961 try:
962 try:
962 try:
963 wlock = self.wlock(False)
963 wlock = self.wlock(False)
964 except lock.LockException:
964 except lock.LockException:
965 pass
965 pass
966 if wlock:
966 if wlock:
967 for f in fixup:
967 for f in fixup:
968 self.dirstate.normal(f)
968 self.dirstate.normal(f)
969 finally:
969 finally:
970 del wlock
970 del wlock
971 else:
971 else:
972 # we are comparing working dir against non-parent
972 # we are comparing working dir against non-parent
973 # generate a pseudo-manifest for the working dir
973 # generate a pseudo-manifest for the working dir
974 # XXX: create it in dirstate.py ?
974 # XXX: create it in dirstate.py ?
975 mf2 = mfmatches(self.dirstate.parents()[0])
975 mf2 = mfmatches(self.dirstate.parents()[0])
976 is_exec = util.execfunc(self.root, mf2.execf)
976 is_exec = util.execfunc(self.root, mf2.execf)
977 is_link = util.linkfunc(self.root, mf2.linkf)
977 is_link = util.linkfunc(self.root, mf2.linkf)
978 for f in lookup + modified + added:
978 for f in lookup + modified + added:
979 mf2[f] = ""
979 mf2[f] = ""
980 mf2.set(f, is_exec(f), is_link(f))
980 mf2.set(f, is_exec(f), is_link(f))
981 for f in removed:
981 for f in removed:
982 if f in mf2:
982 if f in mf2:
983 del mf2[f]
983 del mf2[f]
984
984
985 else:
985 else:
986 # we are comparing two revisions
986 # we are comparing two revisions
987 mf2 = mfmatches(node2)
987 mf2 = mfmatches(node2)
988
988
989 if not compareworking:
989 if not compareworking:
990 # flush lists from dirstate before comparing manifests
990 # flush lists from dirstate before comparing manifests
991 modified, added, clean = [], [], []
991 modified, added, clean = [], [], []
992
992
993 # make sure to sort the files so we talk to the disk in a
993 # make sure to sort the files so we talk to the disk in a
994 # reasonable order
994 # reasonable order
995 mf2keys = mf2.keys()
995 mf2keys = mf2.keys()
996 mf2keys.sort()
996 mf2keys.sort()
997 getnode = lambda fn: mf1.get(fn, nullid)
997 getnode = lambda fn: mf1.get(fn, nullid)
998 for fn in mf2keys:
998 for fn in mf2keys:
999 if mf1.has_key(fn):
999 if mf1.has_key(fn):
1000 if (mf1.flags(fn) != mf2.flags(fn) or
1000 if (mf1.flags(fn) != mf2.flags(fn) or
1001 (mf1[fn] != mf2[fn] and
1001 (mf1[fn] != mf2[fn] and
1002 (mf2[fn] != "" or fcmp(fn, getnode)))):
1002 (mf2[fn] != "" or fcmp(fn, getnode)))):
1003 modified.append(fn)
1003 modified.append(fn)
1004 elif list_clean:
1004 elif list_clean:
1005 clean.append(fn)
1005 clean.append(fn)
1006 del mf1[fn]
1006 del mf1[fn]
1007 else:
1007 else:
1008 added.append(fn)
1008 added.append(fn)
1009
1009
1010 removed = mf1.keys()
1010 removed = mf1.keys()
1011
1011
1012 # sort and return results:
1012 # sort and return results:
1013 for l in modified, added, removed, deleted, unknown, ignored, clean:
1013 for l in modified, added, removed, deleted, unknown, ignored, clean:
1014 l.sort()
1014 l.sort()
1015 return (modified, added, removed, deleted, unknown, ignored, clean)
1015 return (modified, added, removed, deleted, unknown, ignored, clean)
1016
1016
1017 def add(self, list):
1017 def add(self, list):
1018 wlock = self.wlock()
1018 wlock = self.wlock()
1019 try:
1019 try:
1020 rejected = []
1020 rejected = []
1021 for f in list:
1021 for f in list:
1022 p = self.wjoin(f)
1022 p = self.wjoin(f)
1023 try:
1023 try:
1024 st = os.lstat(p)
1024 st = os.lstat(p)
1025 except:
1025 except:
1026 self.ui.warn(_("%s does not exist!\n") % f)
1026 self.ui.warn(_("%s does not exist!\n") % f)
1027 rejected.append(f)
1027 rejected.append(f)
1028 continue
1028 continue
1029 if st.st_size > 10000000:
1029 if st.st_size > 10000000:
1030 self.ui.warn(_("%s: files over 10MB may cause memory and"
1030 self.ui.warn(_("%s: files over 10MB may cause memory and"
1031 " performance problems\n"
1031 " performance problems\n"
1032 "(use 'hg revert %s' to unadd the file)\n")
1032 "(use 'hg revert %s' to unadd the file)\n")
1033 % (f, f))
1033 % (f, f))
1034 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1034 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1035 self.ui.warn(_("%s not added: only files and symlinks "
1035 self.ui.warn(_("%s not added: only files and symlinks "
1036 "supported currently\n") % f)
1036 "supported currently\n") % f)
1037 rejected.append(p)
1037 rejected.append(p)
1038 elif self.dirstate[f] in 'amn':
1038 elif self.dirstate[f] in 'amn':
1039 self.ui.warn(_("%s already tracked!\n") % f)
1039 self.ui.warn(_("%s already tracked!\n") % f)
1040 elif self.dirstate[f] == 'r':
1040 elif self.dirstate[f] == 'r':
1041 self.dirstate.normallookup(f)
1041 self.dirstate.normallookup(f)
1042 else:
1042 else:
1043 self.dirstate.add(f)
1043 self.dirstate.add(f)
1044 return rejected
1044 return rejected
1045 finally:
1045 finally:
1046 del wlock
1046 del wlock
1047
1047
1048 def forget(self, list):
1048 def forget(self, list):
1049 wlock = self.wlock()
1049 wlock = self.wlock()
1050 try:
1050 try:
1051 for f in list:
1051 for f in list:
1052 if self.dirstate[f] != 'a':
1052 if self.dirstate[f] != 'a':
1053 self.ui.warn(_("%s not added!\n") % f)
1053 self.ui.warn(_("%s not added!\n") % f)
1054 else:
1054 else:
1055 self.dirstate.forget(f)
1055 self.dirstate.forget(f)
1056 finally:
1056 finally:
1057 del wlock
1057 del wlock
1058
1058
1059 def remove(self, list, unlink=False):
1059 def remove(self, list, unlink=False):
1060 wlock = None
1060 wlock = None
1061 try:
1061 try:
1062 if unlink:
1062 if unlink:
1063 for f in list:
1063 for f in list:
1064 try:
1064 try:
1065 util.unlink(self.wjoin(f))
1065 util.unlink(self.wjoin(f))
1066 except OSError, inst:
1066 except OSError, inst:
1067 if inst.errno != errno.ENOENT:
1067 if inst.errno != errno.ENOENT:
1068 raise
1068 raise
1069 wlock = self.wlock()
1069 wlock = self.wlock()
1070 for f in list:
1070 for f in list:
1071 if unlink and os.path.exists(self.wjoin(f)):
1071 if unlink and os.path.exists(self.wjoin(f)):
1072 self.ui.warn(_("%s still exists!\n") % f)
1072 self.ui.warn(_("%s still exists!\n") % f)
1073 elif self.dirstate[f] == 'a':
1073 elif self.dirstate[f] == 'a':
1074 self.dirstate.forget(f)
1074 self.dirstate.forget(f)
1075 elif f not in self.dirstate:
1075 elif f not in self.dirstate:
1076 self.ui.warn(_("%s not tracked!\n") % f)
1076 self.ui.warn(_("%s not tracked!\n") % f)
1077 else:
1077 else:
1078 self.dirstate.remove(f)
1078 self.dirstate.remove(f)
1079 finally:
1079 finally:
1080 del wlock
1080 del wlock
1081
1081
1082 def undelete(self, list):
1082 def undelete(self, list):
1083 wlock = None
1083 wlock = None
1084 try:
1084 try:
1085 manifests = [self.manifest.read(self.changelog.read(p)[0])
1085 manifests = [self.manifest.read(self.changelog.read(p)[0])
1086 for p in self.dirstate.parents() if p != nullid]
1086 for p in self.dirstate.parents() if p != nullid]
1087 wlock = self.wlock()
1087 wlock = self.wlock()
1088 for f in list:
1088 for f in list:
1089 if self.dirstate[f] != 'r':
1089 if self.dirstate[f] != 'r':
1090 self.ui.warn("%s not removed!\n" % f)
1090 self.ui.warn("%s not removed!\n" % f)
1091 else:
1091 else:
1092 m = f in manifests[0] and manifests[0] or manifests[1]
1092 m = f in manifests[0] and manifests[0] or manifests[1]
1093 t = self.file(f).read(m[f])
1093 t = self.file(f).read(m[f])
1094 self.wwrite(f, t, m.flags(f))
1094 self.wwrite(f, t, m.flags(f))
1095 self.dirstate.normal(f)
1095 self.dirstate.normal(f)
1096 finally:
1096 finally:
1097 del wlock
1097 del wlock
1098
1098
1099 def copy(self, source, dest):
1099 def copy(self, source, dest):
1100 wlock = None
1100 wlock = None
1101 try:
1101 try:
1102 p = self.wjoin(dest)
1102 p = self.wjoin(dest)
1103 if not (os.path.exists(p) or os.path.islink(p)):
1103 if not (os.path.exists(p) or os.path.islink(p)):
1104 self.ui.warn(_("%s does not exist!\n") % dest)
1104 self.ui.warn(_("%s does not exist!\n") % dest)
1105 elif not (os.path.isfile(p) or os.path.islink(p)):
1105 elif not (os.path.isfile(p) or os.path.islink(p)):
1106 self.ui.warn(_("copy failed: %s is not a file or a "
1106 self.ui.warn(_("copy failed: %s is not a file or a "
1107 "symbolic link\n") % dest)
1107 "symbolic link\n") % dest)
1108 else:
1108 else:
1109 wlock = self.wlock()
1109 wlock = self.wlock()
1110 if dest not in self.dirstate:
1110 if dest not in self.dirstate:
1111 self.dirstate.add(dest)
1111 self.dirstate.add(dest)
1112 self.dirstate.copy(source, dest)
1112 self.dirstate.copy(source, dest)
1113 finally:
1113 finally:
1114 del wlock
1114 del wlock
1115
1115
1116 def heads(self, start=None):
1116 def heads(self, start=None):
1117 heads = self.changelog.heads(start)
1117 heads = self.changelog.heads(start)
1118 # sort the output in rev descending order
1118 # sort the output in rev descending order
1119 heads = [(-self.changelog.rev(h), h) for h in heads]
1119 heads = [(-self.changelog.rev(h), h) for h in heads]
1120 heads.sort()
1120 heads.sort()
1121 return [n for (r, n) in heads]
1121 return [n for (r, n) in heads]
1122
1122
1123 def branchheads(self, branch, start=None):
1123 def branchheads(self, branch, start=None):
1124 branches = self.branchtags()
1124 branches = self.branchtags()
1125 if branch not in branches:
1125 if branch not in branches:
1126 return []
1126 return []
1127 # The basic algorithm is this:
1127 # The basic algorithm is this:
1128 #
1128 #
1129 # Start from the branch tip since there are no later revisions that can
1129 # Start from the branch tip since there are no later revisions that can
1130 # possibly be in this branch, and the tip is a guaranteed head.
1130 # possibly be in this branch, and the tip is a guaranteed head.
1131 #
1131 #
1132 # Remember the tip's parents as the first ancestors, since these by
1132 # Remember the tip's parents as the first ancestors, since these by
1133 # definition are not heads.
1133 # definition are not heads.
1134 #
1134 #
1135 # Step backwards from the brach tip through all the revisions. We are
1135 # Step backwards from the brach tip through all the revisions. We are
1136 # guaranteed by the rules of Mercurial that we will now be visiting the
1136 # guaranteed by the rules of Mercurial that we will now be visiting the
1137 # nodes in reverse topological order (children before parents).
1137 # nodes in reverse topological order (children before parents).
1138 #
1138 #
1139 # If a revision is one of the ancestors of a head then we can toss it
1139 # If a revision is one of the ancestors of a head then we can toss it
1140 # out of the ancestors set (we've already found it and won't be
1140 # out of the ancestors set (we've already found it and won't be
1141 # visiting it again) and put its parents in the ancestors set.
1141 # visiting it again) and put its parents in the ancestors set.
1142 #
1142 #
1143 # Otherwise, if a revision is in the branch it's another head, since it
1143 # Otherwise, if a revision is in the branch it's another head, since it
1144 # wasn't in the ancestor list of an existing head. So add it to the
1144 # wasn't in the ancestor list of an existing head. So add it to the
1145 # head list, and add its parents to the ancestor list.
1145 # head list, and add its parents to the ancestor list.
1146 #
1146 #
1147 # If it is not in the branch ignore it.
1147 # If it is not in the branch ignore it.
1148 #
1148 #
1149 # Once we have a list of heads, use nodesbetween to filter out all the
1149 # Once we have a list of heads, use nodesbetween to filter out all the
1150 # heads that cannot be reached from startrev. There may be a more
1150 # heads that cannot be reached from startrev. There may be a more
1151 # efficient way to do this as part of the previous algorithm.
1151 # efficient way to do this as part of the previous algorithm.
1152
1152
1153 set = util.set
1153 set = util.set
1154 heads = [self.changelog.rev(branches[branch])]
1154 heads = [self.changelog.rev(branches[branch])]
1155 # Don't care if ancestors contains nullrev or not.
1155 # Don't care if ancestors contains nullrev or not.
1156 ancestors = set(self.changelog.parentrevs(heads[0]))
1156 ancestors = set(self.changelog.parentrevs(heads[0]))
1157 for rev in xrange(heads[0] - 1, nullrev, -1):
1157 for rev in xrange(heads[0] - 1, nullrev, -1):
1158 if rev in ancestors:
1158 if rev in ancestors:
1159 ancestors.update(self.changelog.parentrevs(rev))
1159 ancestors.update(self.changelog.parentrevs(rev))
1160 ancestors.remove(rev)
1160 ancestors.remove(rev)
1161 elif self.changectx(rev).branch() == branch:
1161 elif self.changectx(rev).branch() == branch:
1162 heads.append(rev)
1162 heads.append(rev)
1163 ancestors.update(self.changelog.parentrevs(rev))
1163 ancestors.update(self.changelog.parentrevs(rev))
1164 heads = [self.changelog.node(rev) for rev in heads]
1164 heads = [self.changelog.node(rev) for rev in heads]
1165 if start is not None:
1165 if start is not None:
1166 heads = self.changelog.nodesbetween([start], heads)[2]
1166 heads = self.changelog.nodesbetween([start], heads)[2]
1167 return heads
1167 return heads
1168
1168
1169 def branches(self, nodes):
1169 def branches(self, nodes):
1170 if not nodes:
1170 if not nodes:
1171 nodes = [self.changelog.tip()]
1171 nodes = [self.changelog.tip()]
1172 b = []
1172 b = []
1173 for n in nodes:
1173 for n in nodes:
1174 t = n
1174 t = n
1175 while 1:
1175 while 1:
1176 p = self.changelog.parents(n)
1176 p = self.changelog.parents(n)
1177 if p[1] != nullid or p[0] == nullid:
1177 if p[1] != nullid or p[0] == nullid:
1178 b.append((t, n, p[0], p[1]))
1178 b.append((t, n, p[0], p[1]))
1179 break
1179 break
1180 n = p[0]
1180 n = p[0]
1181 return b
1181 return b
1182
1182
1183 def between(self, pairs):
1183 def between(self, pairs):
1184 r = []
1184 r = []
1185
1185
1186 for top, bottom in pairs:
1186 for top, bottom in pairs:
1187 n, l, i = top, [], 0
1187 n, l, i = top, [], 0
1188 f = 1
1188 f = 1
1189
1189
1190 while n != bottom:
1190 while n != bottom:
1191 p = self.changelog.parents(n)[0]
1191 p = self.changelog.parents(n)[0]
1192 if i == f:
1192 if i == f:
1193 l.append(n)
1193 l.append(n)
1194 f = f * 2
1194 f = f * 2
1195 n = p
1195 n = p
1196 i += 1
1196 i += 1
1197
1197
1198 r.append(l)
1198 r.append(l)
1199
1199
1200 return r
1200 return r
1201
1201
1202 def findincoming(self, remote, base=None, heads=None, force=False):
1202 def findincoming(self, remote, base=None, heads=None, force=False):
1203 """Return list of roots of the subsets of missing nodes from remote
1203 """Return list of roots of the subsets of missing nodes from remote
1204
1204
1205 If base dict is specified, assume that these nodes and their parents
1205 If base dict is specified, assume that these nodes and their parents
1206 exist on the remote side and that no child of a node of base exists
1206 exist on the remote side and that no child of a node of base exists
1207 in both remote and self.
1207 in both remote and self.
1208 Furthermore base will be updated to include the nodes that exists
1208 Furthermore base will be updated to include the nodes that exists
1209 in self and remote but no children exists in self and remote.
1209 in self and remote but no children exists in self and remote.
1210 If a list of heads is specified, return only nodes which are heads
1210 If a list of heads is specified, return only nodes which are heads
1211 or ancestors of these heads.
1211 or ancestors of these heads.
1212
1212
1213 All the ancestors of base are in self and in remote.
1213 All the ancestors of base are in self and in remote.
1214 All the descendants of the list returned are missing in self.
1214 All the descendants of the list returned are missing in self.
1215 (and so we know that the rest of the nodes are missing in remote, see
1215 (and so we know that the rest of the nodes are missing in remote, see
1216 outgoing)
1216 outgoing)
1217 """
1217 """
1218 m = self.changelog.nodemap
1218 m = self.changelog.nodemap
1219 search = []
1219 search = []
1220 fetch = {}
1220 fetch = {}
1221 seen = {}
1221 seen = {}
1222 seenbranch = {}
1222 seenbranch = {}
1223 if base == None:
1223 if base == None:
1224 base = {}
1224 base = {}
1225
1225
1226 if not heads:
1226 if not heads:
1227 heads = remote.heads()
1227 heads = remote.heads()
1228
1228
1229 if self.changelog.tip() == nullid:
1229 if self.changelog.tip() == nullid:
1230 base[nullid] = 1
1230 base[nullid] = 1
1231 if heads != [nullid]:
1231 if heads != [nullid]:
1232 return [nullid]
1232 return [nullid]
1233 return []
1233 return []
1234
1234
1235 # assume we're closer to the tip than the root
1235 # assume we're closer to the tip than the root
1236 # and start by examining the heads
1236 # and start by examining the heads
1237 self.ui.status(_("searching for changes\n"))
1237 self.ui.status(_("searching for changes\n"))
1238
1238
1239 unknown = []
1239 unknown = []
1240 for h in heads:
1240 for h in heads:
1241 if h not in m:
1241 if h not in m:
1242 unknown.append(h)
1242 unknown.append(h)
1243 else:
1243 else:
1244 base[h] = 1
1244 base[h] = 1
1245
1245
1246 if not unknown:
1246 if not unknown:
1247 return []
1247 return []
1248
1248
1249 req = dict.fromkeys(unknown)
1249 req = dict.fromkeys(unknown)
1250 reqcnt = 0
1250 reqcnt = 0
1251
1251
1252 # search through remote branches
1252 # search through remote branches
1253 # a 'branch' here is a linear segment of history, with four parts:
1253 # a 'branch' here is a linear segment of history, with four parts:
1254 # head, root, first parent, second parent
1254 # head, root, first parent, second parent
1255 # (a branch always has two parents (or none) by definition)
1255 # (a branch always has two parents (or none) by definition)
1256 unknown = remote.branches(unknown)
1256 unknown = remote.branches(unknown)
1257 while unknown:
1257 while unknown:
1258 r = []
1258 r = []
1259 while unknown:
1259 while unknown:
1260 n = unknown.pop(0)
1260 n = unknown.pop(0)
1261 if n[0] in seen:
1261 if n[0] in seen:
1262 continue
1262 continue
1263
1263
1264 self.ui.debug(_("examining %s:%s\n")
1264 self.ui.debug(_("examining %s:%s\n")
1265 % (short(n[0]), short(n[1])))
1265 % (short(n[0]), short(n[1])))
1266 if n[0] == nullid: # found the end of the branch
1266 if n[0] == nullid: # found the end of the branch
1267 pass
1267 pass
1268 elif n in seenbranch:
1268 elif n in seenbranch:
1269 self.ui.debug(_("branch already found\n"))
1269 self.ui.debug(_("branch already found\n"))
1270 continue
1270 continue
1271 elif n[1] and n[1] in m: # do we know the base?
1271 elif n[1] and n[1] in m: # do we know the base?
1272 self.ui.debug(_("found incomplete branch %s:%s\n")
1272 self.ui.debug(_("found incomplete branch %s:%s\n")
1273 % (short(n[0]), short(n[1])))
1273 % (short(n[0]), short(n[1])))
1274 search.append(n) # schedule branch range for scanning
1274 search.append(n) # schedule branch range for scanning
1275 seenbranch[n] = 1
1275 seenbranch[n] = 1
1276 else:
1276 else:
1277 if n[1] not in seen and n[1] not in fetch:
1277 if n[1] not in seen and n[1] not in fetch:
1278 if n[2] in m and n[3] in m:
1278 if n[2] in m and n[3] in m:
1279 self.ui.debug(_("found new changeset %s\n") %
1279 self.ui.debug(_("found new changeset %s\n") %
1280 short(n[1]))
1280 short(n[1]))
1281 fetch[n[1]] = 1 # earliest unknown
1281 fetch[n[1]] = 1 # earliest unknown
1282 for p in n[2:4]:
1282 for p in n[2:4]:
1283 if p in m:
1283 if p in m:
1284 base[p] = 1 # latest known
1284 base[p] = 1 # latest known
1285
1285
1286 for p in n[2:4]:
1286 for p in n[2:4]:
1287 if p not in req and p not in m:
1287 if p not in req and p not in m:
1288 r.append(p)
1288 r.append(p)
1289 req[p] = 1
1289 req[p] = 1
1290 seen[n[0]] = 1
1290 seen[n[0]] = 1
1291
1291
1292 if r:
1292 if r:
1293 reqcnt += 1
1293 reqcnt += 1
1294 self.ui.debug(_("request %d: %s\n") %
1294 self.ui.debug(_("request %d: %s\n") %
1295 (reqcnt, " ".join(map(short, r))))
1295 (reqcnt, " ".join(map(short, r))))
1296 for p in xrange(0, len(r), 10):
1296 for p in xrange(0, len(r), 10):
1297 for b in remote.branches(r[p:p+10]):
1297 for b in remote.branches(r[p:p+10]):
1298 self.ui.debug(_("received %s:%s\n") %
1298 self.ui.debug(_("received %s:%s\n") %
1299 (short(b[0]), short(b[1])))
1299 (short(b[0]), short(b[1])))
1300 unknown.append(b)
1300 unknown.append(b)
1301
1301
1302 # do binary search on the branches we found
1302 # do binary search on the branches we found
1303 while search:
1303 while search:
1304 n = search.pop(0)
1304 n = search.pop(0)
1305 reqcnt += 1
1305 reqcnt += 1
1306 l = remote.between([(n[0], n[1])])[0]
1306 l = remote.between([(n[0], n[1])])[0]
1307 l.append(n[1])
1307 l.append(n[1])
1308 p = n[0]
1308 p = n[0]
1309 f = 1
1309 f = 1
1310 for i in l:
1310 for i in l:
1311 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1311 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1312 if i in m:
1312 if i in m:
1313 if f <= 2:
1313 if f <= 2:
1314 self.ui.debug(_("found new branch changeset %s\n") %
1314 self.ui.debug(_("found new branch changeset %s\n") %
1315 short(p))
1315 short(p))
1316 fetch[p] = 1
1316 fetch[p] = 1
1317 base[i] = 1
1317 base[i] = 1
1318 else:
1318 else:
1319 self.ui.debug(_("narrowed branch search to %s:%s\n")
1319 self.ui.debug(_("narrowed branch search to %s:%s\n")
1320 % (short(p), short(i)))
1320 % (short(p), short(i)))
1321 search.append((p, i))
1321 search.append((p, i))
1322 break
1322 break
1323 p, f = i, f * 2
1323 p, f = i, f * 2
1324
1324
1325 # sanity check our fetch list
1325 # sanity check our fetch list
1326 for f in fetch.keys():
1326 for f in fetch.keys():
1327 if f in m:
1327 if f in m:
1328 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1328 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1329
1329
1330 if base.keys() == [nullid]:
1330 if base.keys() == [nullid]:
1331 if force:
1331 if force:
1332 self.ui.warn(_("warning: repository is unrelated\n"))
1332 self.ui.warn(_("warning: repository is unrelated\n"))
1333 else:
1333 else:
1334 raise util.Abort(_("repository is unrelated"))
1334 raise util.Abort(_("repository is unrelated"))
1335
1335
1336 self.ui.debug(_("found new changesets starting at ") +
1336 self.ui.debug(_("found new changesets starting at ") +
1337 " ".join([short(f) for f in fetch]) + "\n")
1337 " ".join([short(f) for f in fetch]) + "\n")
1338
1338
1339 self.ui.debug(_("%d total queries\n") % reqcnt)
1339 self.ui.debug(_("%d total queries\n") % reqcnt)
1340
1340
1341 return fetch.keys()
1341 return fetch.keys()
1342
1342
1343 def findoutgoing(self, remote, base=None, heads=None, force=False):
1343 def findoutgoing(self, remote, base=None, heads=None, force=False):
1344 """Return list of nodes that are roots of subsets not in remote
1344 """Return list of nodes that are roots of subsets not in remote
1345
1345
1346 If base dict is specified, assume that these nodes and their parents
1346 If base dict is specified, assume that these nodes and their parents
1347 exist on the remote side.
1347 exist on the remote side.
1348 If a list of heads is specified, return only nodes which are heads
1348 If a list of heads is specified, return only nodes which are heads
1349 or ancestors of these heads, and return a second element which
1349 or ancestors of these heads, and return a second element which
1350 contains all remote heads which get new children.
1350 contains all remote heads which get new children.
1351 """
1351 """
1352 if base == None:
1352 if base == None:
1353 base = {}
1353 base = {}
1354 self.findincoming(remote, base, heads, force=force)
1354 self.findincoming(remote, base, heads, force=force)
1355
1355
1356 self.ui.debug(_("common changesets up to ")
1356 self.ui.debug(_("common changesets up to ")
1357 + " ".join(map(short, base.keys())) + "\n")
1357 + " ".join(map(short, base.keys())) + "\n")
1358
1358
1359 remain = dict.fromkeys(self.changelog.nodemap)
1359 remain = dict.fromkeys(self.changelog.nodemap)
1360
1360
1361 # prune everything remote has from the tree
1361 # prune everything remote has from the tree
1362 del remain[nullid]
1362 del remain[nullid]
1363 remove = base.keys()
1363 remove = base.keys()
1364 while remove:
1364 while remove:
1365 n = remove.pop(0)
1365 n = remove.pop(0)
1366 if n in remain:
1366 if n in remain:
1367 del remain[n]
1367 del remain[n]
1368 for p in self.changelog.parents(n):
1368 for p in self.changelog.parents(n):
1369 remove.append(p)
1369 remove.append(p)
1370
1370
1371 # find every node whose parents have been pruned
1371 # find every node whose parents have been pruned
1372 subset = []
1372 subset = []
1373 # find every remote head that will get new children
1373 # find every remote head that will get new children
1374 updated_heads = {}
1374 updated_heads = {}
1375 for n in remain:
1375 for n in remain:
1376 p1, p2 = self.changelog.parents(n)
1376 p1, p2 = self.changelog.parents(n)
1377 if p1 not in remain and p2 not in remain:
1377 if p1 not in remain and p2 not in remain:
1378 subset.append(n)
1378 subset.append(n)
1379 if heads:
1379 if heads:
1380 if p1 in heads:
1380 if p1 in heads:
1381 updated_heads[p1] = True
1381 updated_heads[p1] = True
1382 if p2 in heads:
1382 if p2 in heads:
1383 updated_heads[p2] = True
1383 updated_heads[p2] = True
1384
1384
1385 # this is the set of all roots we have to push
1385 # this is the set of all roots we have to push
1386 if heads:
1386 if heads:
1387 return subset, updated_heads.keys()
1387 return subset, updated_heads.keys()
1388 else:
1388 else:
1389 return subset
1389 return subset
1390
1390
1391 def pull(self, remote, heads=None, force=False):
1391 def pull(self, remote, heads=None, force=False):
1392 lock = self.lock()
1392 lock = self.lock()
1393 try:
1393 try:
1394 fetch = self.findincoming(remote, heads=heads, force=force)
1394 fetch = self.findincoming(remote, heads=heads, force=force)
1395 if fetch == [nullid]:
1395 if fetch == [nullid]:
1396 self.ui.status(_("requesting all changes\n"))
1396 self.ui.status(_("requesting all changes\n"))
1397
1397
1398 if not fetch:
1398 if not fetch:
1399 self.ui.status(_("no changes found\n"))
1399 self.ui.status(_("no changes found\n"))
1400 return 0
1400 return 0
1401
1401
1402 if heads is None:
1402 if heads is None:
1403 cg = remote.changegroup(fetch, 'pull')
1403 cg = remote.changegroup(fetch, 'pull')
1404 else:
1404 else:
1405 if 'changegroupsubset' not in remote.capabilities:
1405 if 'changegroupsubset' not in remote.capabilities:
1406 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1406 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1407 cg = remote.changegroupsubset(fetch, heads, 'pull')
1407 cg = remote.changegroupsubset(fetch, heads, 'pull')
1408 return self.addchangegroup(cg, 'pull', remote.url())
1408 return self.addchangegroup(cg, 'pull', remote.url())
1409 finally:
1409 finally:
1410 del lock
1410 del lock
1411
1411
1412 def push(self, remote, force=False, revs=None):
1412 def push(self, remote, force=False, revs=None):
1413 # there are two ways to push to remote repo:
1413 # there are two ways to push to remote repo:
1414 #
1414 #
1415 # addchangegroup assumes local user can lock remote
1415 # addchangegroup assumes local user can lock remote
1416 # repo (local filesystem, old ssh servers).
1416 # repo (local filesystem, old ssh servers).
1417 #
1417 #
1418 # unbundle assumes local user cannot lock remote repo (new ssh
1418 # unbundle assumes local user cannot lock remote repo (new ssh
1419 # servers, http servers).
1419 # servers, http servers).
1420
1420
1421 if remote.capable('unbundle'):
1421 if remote.capable('unbundle'):
1422 return self.push_unbundle(remote, force, revs)
1422 return self.push_unbundle(remote, force, revs)
1423 return self.push_addchangegroup(remote, force, revs)
1423 return self.push_addchangegroup(remote, force, revs)
1424
1424
1425 def prepush(self, remote, force, revs):
1425 def prepush(self, remote, force, revs):
1426 base = {}
1426 base = {}
1427 remote_heads = remote.heads()
1427 remote_heads = remote.heads()
1428 inc = self.findincoming(remote, base, remote_heads, force=force)
1428 inc = self.findincoming(remote, base, remote_heads, force=force)
1429
1429
1430 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1430 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1431 if revs is not None:
1431 if revs is not None:
1432 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1432 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1433 else:
1433 else:
1434 bases, heads = update, self.changelog.heads()
1434 bases, heads = update, self.changelog.heads()
1435
1435
1436 if not bases:
1436 if not bases:
1437 self.ui.status(_("no changes found\n"))
1437 self.ui.status(_("no changes found\n"))
1438 return None, 1
1438 return None, 1
1439 elif not force:
1439 elif not force:
1440 # check if we're creating new remote heads
1440 # check if we're creating new remote heads
1441 # to be a remote head after push, node must be either
1441 # to be a remote head after push, node must be either
1442 # - unknown locally
1442 # - unknown locally
1443 # - a local outgoing head descended from update
1443 # - a local outgoing head descended from update
1444 # - a remote head that's known locally and not
1444 # - a remote head that's known locally and not
1445 # ancestral to an outgoing head
1445 # ancestral to an outgoing head
1446
1446
1447 warn = 0
1447 warn = 0
1448
1448
1449 if remote_heads == [nullid]:
1449 if remote_heads == [nullid]:
1450 warn = 0
1450 warn = 0
1451 elif not revs and len(heads) > len(remote_heads):
1451 elif not revs and len(heads) > len(remote_heads):
1452 warn = 1
1452 warn = 1
1453 else:
1453 else:
1454 newheads = list(heads)
1454 newheads = list(heads)
1455 for r in remote_heads:
1455 for r in remote_heads:
1456 if r in self.changelog.nodemap:
1456 if r in self.changelog.nodemap:
1457 desc = self.changelog.heads(r, heads)
1457 desc = self.changelog.heads(r, heads)
1458 l = [h for h in heads if h in desc]
1458 l = [h for h in heads if h in desc]
1459 if not l:
1459 if not l:
1460 newheads.append(r)
1460 newheads.append(r)
1461 else:
1461 else:
1462 newheads.append(r)
1462 newheads.append(r)
1463 if len(newheads) > len(remote_heads):
1463 if len(newheads) > len(remote_heads):
1464 warn = 1
1464 warn = 1
1465
1465
1466 if warn:
1466 if warn:
1467 self.ui.warn(_("abort: push creates new remote branches!\n"))
1467 self.ui.warn(_("abort: push creates new remote branches!\n"))
1468 self.ui.status(_("(did you forget to merge?"
1468 self.ui.status(_("(did you forget to merge?"
1469 " use push -f to force)\n"))
1469 " use push -f to force)\n"))
1470 return None, 1
1470 return None, 1
1471 elif inc:
1471 elif inc:
1472 self.ui.warn(_("note: unsynced remote changes!\n"))
1472 self.ui.warn(_("note: unsynced remote changes!\n"))
1473
1473
1474
1474
1475 if revs is None:
1475 if revs is None:
1476 cg = self.changegroup(update, 'push')
1476 cg = self.changegroup(update, 'push')
1477 else:
1477 else:
1478 cg = self.changegroupsubset(update, revs, 'push')
1478 cg = self.changegroupsubset(update, revs, 'push')
1479 return cg, remote_heads
1479 return cg, remote_heads
1480
1480
1481 def push_addchangegroup(self, remote, force, revs):
1481 def push_addchangegroup(self, remote, force, revs):
1482 lock = remote.lock()
1482 lock = remote.lock()
1483 try:
1483 try:
1484 ret = self.prepush(remote, force, revs)
1484 ret = self.prepush(remote, force, revs)
1485 if ret[0] is not None:
1485 if ret[0] is not None:
1486 cg, remote_heads = ret
1486 cg, remote_heads = ret
1487 return remote.addchangegroup(cg, 'push', self.url())
1487 return remote.addchangegroup(cg, 'push', self.url())
1488 return ret[1]
1488 return ret[1]
1489 finally:
1489 finally:
1490 del lock
1490 del lock
1491
1491
1492 def push_unbundle(self, remote, force, revs):
1492 def push_unbundle(self, remote, force, revs):
1493 # local repo finds heads on server, finds out what revs it
1493 # local repo finds heads on server, finds out what revs it
1494 # must push. once revs transferred, if server finds it has
1494 # must push. once revs transferred, if server finds it has
1495 # different heads (someone else won commit/push race), server
1495 # different heads (someone else won commit/push race), server
1496 # aborts.
1496 # aborts.
1497
1497
1498 ret = self.prepush(remote, force, revs)
1498 ret = self.prepush(remote, force, revs)
1499 if ret[0] is not None:
1499 if ret[0] is not None:
1500 cg, remote_heads = ret
1500 cg, remote_heads = ret
1501 if force: remote_heads = ['force']
1501 if force: remote_heads = ['force']
1502 return remote.unbundle(cg, remote_heads, 'push')
1502 return remote.unbundle(cg, remote_heads, 'push')
1503 return ret[1]
1503 return ret[1]
1504
1504
1505 def changegroupinfo(self, nodes, source):
1505 def changegroupinfo(self, nodes, source):
1506 if self.ui.verbose or source == 'bundle':
1506 if self.ui.verbose or source == 'bundle':
1507 self.ui.status(_("%d changesets found\n") % len(nodes))
1507 self.ui.status(_("%d changesets found\n") % len(nodes))
1508 if self.ui.debugflag:
1508 if self.ui.debugflag:
1509 self.ui.debug(_("List of changesets:\n"))
1509 self.ui.debug(_("List of changesets:\n"))
1510 for node in nodes:
1510 for node in nodes:
1511 self.ui.debug("%s\n" % hex(node))
1511 self.ui.debug("%s\n" % hex(node))
1512
1512
1513 def changegroupsubset(self, bases, heads, source):
1513 def changegroupsubset(self, bases, heads, source):
1514 """This function generates a changegroup consisting of all the nodes
1514 """This function generates a changegroup consisting of all the nodes
1515 that are descendents of any of the bases, and ancestors of any of
1515 that are descendents of any of the bases, and ancestors of any of
1516 the heads.
1516 the heads.
1517
1517
1518 It is fairly complex as determining which filenodes and which
1518 It is fairly complex as determining which filenodes and which
1519 manifest nodes need to be included for the changeset to be complete
1519 manifest nodes need to be included for the changeset to be complete
1520 is non-trivial.
1520 is non-trivial.
1521
1521
1522 Another wrinkle is doing the reverse, figuring out which changeset in
1522 Another wrinkle is doing the reverse, figuring out which changeset in
1523 the changegroup a particular filenode or manifestnode belongs to."""
1523 the changegroup a particular filenode or manifestnode belongs to."""
1524
1524
1525 self.hook('preoutgoing', throw=True, source=source)
1525 self.hook('preoutgoing', throw=True, source=source)
1526
1526
1527 # Set up some initial variables
1527 # Set up some initial variables
1528 # Make it easy to refer to self.changelog
1528 # Make it easy to refer to self.changelog
1529 cl = self.changelog
1529 cl = self.changelog
1530 # msng is short for missing - compute the list of changesets in this
1530 # msng is short for missing - compute the list of changesets in this
1531 # changegroup.
1531 # changegroup.
1532 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1532 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1533 self.changegroupinfo(msng_cl_lst, source)
1533 self.changegroupinfo(msng_cl_lst, source)
1534 # Some bases may turn out to be superfluous, and some heads may be
1534 # Some bases may turn out to be superfluous, and some heads may be
1535 # too. nodesbetween will return the minimal set of bases and heads
1535 # too. nodesbetween will return the minimal set of bases and heads
1536 # necessary to re-create the changegroup.
1536 # necessary to re-create the changegroup.
1537
1537
1538 # Known heads are the list of heads that it is assumed the recipient
1538 # Known heads are the list of heads that it is assumed the recipient
1539 # of this changegroup will know about.
1539 # of this changegroup will know about.
1540 knownheads = {}
1540 knownheads = {}
1541 # We assume that all parents of bases are known heads.
1541 # We assume that all parents of bases are known heads.
1542 for n in bases:
1542 for n in bases:
1543 for p in cl.parents(n):
1543 for p in cl.parents(n):
1544 if p != nullid:
1544 if p != nullid:
1545 knownheads[p] = 1
1545 knownheads[p] = 1
1546 knownheads = knownheads.keys()
1546 knownheads = knownheads.keys()
1547 if knownheads:
1547 if knownheads:
1548 # Now that we know what heads are known, we can compute which
1548 # Now that we know what heads are known, we can compute which
1549 # changesets are known. The recipient must know about all
1549 # changesets are known. The recipient must know about all
1550 # changesets required to reach the known heads from the null
1550 # changesets required to reach the known heads from the null
1551 # changeset.
1551 # changeset.
1552 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1552 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1553 junk = None
1553 junk = None
1554 # Transform the list into an ersatz set.
1554 # Transform the list into an ersatz set.
1555 has_cl_set = dict.fromkeys(has_cl_set)
1555 has_cl_set = dict.fromkeys(has_cl_set)
1556 else:
1556 else:
1557 # If there were no known heads, the recipient cannot be assumed to
1557 # If there were no known heads, the recipient cannot be assumed to
1558 # know about any changesets.
1558 # know about any changesets.
1559 has_cl_set = {}
1559 has_cl_set = {}
1560
1560
1561 # Make it easy to refer to self.manifest
1561 # Make it easy to refer to self.manifest
1562 mnfst = self.manifest
1562 mnfst = self.manifest
1563 # We don't know which manifests are missing yet
1563 # We don't know which manifests are missing yet
1564 msng_mnfst_set = {}
1564 msng_mnfst_set = {}
1565 # Nor do we know which filenodes are missing.
1565 # Nor do we know which filenodes are missing.
1566 msng_filenode_set = {}
1566 msng_filenode_set = {}
1567
1567
1568 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1568 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1569 junk = None
1569 junk = None
1570
1570
1571 # A changeset always belongs to itself, so the changenode lookup
1571 # A changeset always belongs to itself, so the changenode lookup
1572 # function for a changenode is identity.
1572 # function for a changenode is identity.
1573 def identity(x):
1573 def identity(x):
1574 return x
1574 return x
1575
1575
1576 # A function generating function. Sets up an environment for the
1576 # A function generating function. Sets up an environment for the
1577 # inner function.
1577 # inner function.
1578 def cmp_by_rev_func(revlog):
1578 def cmp_by_rev_func(revlog):
1579 # Compare two nodes by their revision number in the environment's
1579 # Compare two nodes by their revision number in the environment's
1580 # revision history. Since the revision number both represents the
1580 # revision history. Since the revision number both represents the
1581 # most efficient order to read the nodes in, and represents a
1581 # most efficient order to read the nodes in, and represents a
1582 # topological sorting of the nodes, this function is often useful.
1582 # topological sorting of the nodes, this function is often useful.
1583 def cmp_by_rev(a, b):
1583 def cmp_by_rev(a, b):
1584 return cmp(revlog.rev(a), revlog.rev(b))
1584 return cmp(revlog.rev(a), revlog.rev(b))
1585 return cmp_by_rev
1585 return cmp_by_rev
1586
1586
1587 # If we determine that a particular file or manifest node must be a
1587 # If we determine that a particular file or manifest node must be a
1588 # node that the recipient of the changegroup will already have, we can
1588 # node that the recipient of the changegroup will already have, we can
1589 # also assume the recipient will have all the parents. This function
1589 # also assume the recipient will have all the parents. This function
1590 # prunes them from the set of missing nodes.
1590 # prunes them from the set of missing nodes.
1591 def prune_parents(revlog, hasset, msngset):
1591 def prune_parents(revlog, hasset, msngset):
1592 haslst = hasset.keys()
1592 haslst = hasset.keys()
1593 haslst.sort(cmp_by_rev_func(revlog))
1593 haslst.sort(cmp_by_rev_func(revlog))
1594 for node in haslst:
1594 for node in haslst:
1595 parentlst = [p for p in revlog.parents(node) if p != nullid]
1595 parentlst = [p for p in revlog.parents(node) if p != nullid]
1596 while parentlst:
1596 while parentlst:
1597 n = parentlst.pop()
1597 n = parentlst.pop()
1598 if n not in hasset:
1598 if n not in hasset:
1599 hasset[n] = 1
1599 hasset[n] = 1
1600 p = [p for p in revlog.parents(n) if p != nullid]
1600 p = [p for p in revlog.parents(n) if p != nullid]
1601 parentlst.extend(p)
1601 parentlst.extend(p)
1602 for n in hasset:
1602 for n in hasset:
1603 msngset.pop(n, None)
1603 msngset.pop(n, None)
1604
1604
1605 # This is a function generating function used to set up an environment
1605 # This is a function generating function used to set up an environment
1606 # for the inner function to execute in.
1606 # for the inner function to execute in.
1607 def manifest_and_file_collector(changedfileset):
1607 def manifest_and_file_collector(changedfileset):
1608 # This is an information gathering function that gathers
1608 # This is an information gathering function that gathers
1609 # information from each changeset node that goes out as part of
1609 # information from each changeset node that goes out as part of
1610 # the changegroup. The information gathered is a list of which
1610 # the changegroup. The information gathered is a list of which
1611 # manifest nodes are potentially required (the recipient may
1611 # manifest nodes are potentially required (the recipient may
1612 # already have them) and total list of all files which were
1612 # already have them) and total list of all files which were
1613 # changed in any changeset in the changegroup.
1613 # changed in any changeset in the changegroup.
1614 #
1614 #
1615 # We also remember the first changenode we saw any manifest
1615 # We also remember the first changenode we saw any manifest
1616 # referenced by so we can later determine which changenode 'owns'
1616 # referenced by so we can later determine which changenode 'owns'
1617 # the manifest.
1617 # the manifest.
1618 def collect_manifests_and_files(clnode):
1618 def collect_manifests_and_files(clnode):
1619 c = cl.read(clnode)
1619 c = cl.read(clnode)
1620 for f in c[3]:
1620 for f in c[3]:
1621 # This is to make sure we only have one instance of each
1621 # This is to make sure we only have one instance of each
1622 # filename string for each filename.
1622 # filename string for each filename.
1623 changedfileset.setdefault(f, f)
1623 changedfileset.setdefault(f, f)
1624 msng_mnfst_set.setdefault(c[0], clnode)
1624 msng_mnfst_set.setdefault(c[0], clnode)
1625 return collect_manifests_and_files
1625 return collect_manifests_and_files
1626
1626
1627 # Figure out which manifest nodes (of the ones we think might be part
1627 # Figure out which manifest nodes (of the ones we think might be part
1628 # of the changegroup) the recipient must know about and remove them
1628 # of the changegroup) the recipient must know about and remove them
1629 # from the changegroup.
1629 # from the changegroup.
1630 def prune_manifests():
1630 def prune_manifests():
1631 has_mnfst_set = {}
1631 has_mnfst_set = {}
1632 for n in msng_mnfst_set:
1632 for n in msng_mnfst_set:
1633 # If a 'missing' manifest thinks it belongs to a changenode
1633 # If a 'missing' manifest thinks it belongs to a changenode
1634 # the recipient is assumed to have, obviously the recipient
1634 # the recipient is assumed to have, obviously the recipient
1635 # must have that manifest.
1635 # must have that manifest.
1636 linknode = cl.node(mnfst.linkrev(n))
1636 linknode = cl.node(mnfst.linkrev(n))
1637 if linknode in has_cl_set:
1637 if linknode in has_cl_set:
1638 has_mnfst_set[n] = 1
1638 has_mnfst_set[n] = 1
1639 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1639 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1640
1640
1641 # Use the information collected in collect_manifests_and_files to say
1641 # Use the information collected in collect_manifests_and_files to say
1642 # which changenode any manifestnode belongs to.
1642 # which changenode any manifestnode belongs to.
1643 def lookup_manifest_link(mnfstnode):
1643 def lookup_manifest_link(mnfstnode):
1644 return msng_mnfst_set[mnfstnode]
1644 return msng_mnfst_set[mnfstnode]
1645
1645
1646 # A function generating function that sets up the initial environment
1646 # A function generating function that sets up the initial environment
1647 # the inner function.
1647 # the inner function.
1648 def filenode_collector(changedfiles):
1648 def filenode_collector(changedfiles):
1649 next_rev = [0]
1649 next_rev = [0]
1650 # This gathers information from each manifestnode included in the
1650 # This gathers information from each manifestnode included in the
1651 # changegroup about which filenodes the manifest node references
1651 # changegroup about which filenodes the manifest node references
1652 # so we can include those in the changegroup too.
1652 # so we can include those in the changegroup too.
1653 #
1653 #
1654 # It also remembers which changenode each filenode belongs to. It
1654 # It also remembers which changenode each filenode belongs to. It
1655 # does this by assuming the a filenode belongs to the changenode
1655 # does this by assuming the a filenode belongs to the changenode
1656 # the first manifest that references it belongs to.
1656 # the first manifest that references it belongs to.
1657 def collect_msng_filenodes(mnfstnode):
1657 def collect_msng_filenodes(mnfstnode):
1658 r = mnfst.rev(mnfstnode)
1658 r = mnfst.rev(mnfstnode)
1659 if r == next_rev[0]:
1659 if r == next_rev[0]:
1660 # If the last rev we looked at was the one just previous,
1660 # If the last rev we looked at was the one just previous,
1661 # we only need to see a diff.
1661 # we only need to see a diff.
1662 deltamf = mnfst.readdelta(mnfstnode)
1662 deltamf = mnfst.readdelta(mnfstnode)
1663 # For each line in the delta
1663 # For each line in the delta
1664 for f, fnode in deltamf.items():
1664 for f, fnode in deltamf.items():
1665 f = changedfiles.get(f, None)
1665 f = changedfiles.get(f, None)
1666 # And if the file is in the list of files we care
1666 # And if the file is in the list of files we care
1667 # about.
1667 # about.
1668 if f is not None:
1668 if f is not None:
1669 # Get the changenode this manifest belongs to
1669 # Get the changenode this manifest belongs to
1670 clnode = msng_mnfst_set[mnfstnode]
1670 clnode = msng_mnfst_set[mnfstnode]
1671 # Create the set of filenodes for the file if
1671 # Create the set of filenodes for the file if
1672 # there isn't one already.
1672 # there isn't one already.
1673 ndset = msng_filenode_set.setdefault(f, {})
1673 ndset = msng_filenode_set.setdefault(f, {})
1674 # And set the filenode's changelog node to the
1674 # And set the filenode's changelog node to the
1675 # manifest's if it hasn't been set already.
1675 # manifest's if it hasn't been set already.
1676 ndset.setdefault(fnode, clnode)
1676 ndset.setdefault(fnode, clnode)
1677 else:
1677 else:
1678 # Otherwise we need a full manifest.
1678 # Otherwise we need a full manifest.
1679 m = mnfst.read(mnfstnode)
1679 m = mnfst.read(mnfstnode)
1680 # For every file in we care about.
1680 # For every file in we care about.
1681 for f in changedfiles:
1681 for f in changedfiles:
1682 fnode = m.get(f, None)
1682 fnode = m.get(f, None)
1683 # If it's in the manifest
1683 # If it's in the manifest
1684 if fnode is not None:
1684 if fnode is not None:
1685 # See comments above.
1685 # See comments above.
1686 clnode = msng_mnfst_set[mnfstnode]
1686 clnode = msng_mnfst_set[mnfstnode]
1687 ndset = msng_filenode_set.setdefault(f, {})
1687 ndset = msng_filenode_set.setdefault(f, {})
1688 ndset.setdefault(fnode, clnode)
1688 ndset.setdefault(fnode, clnode)
1689 # Remember the revision we hope to see next.
1689 # Remember the revision we hope to see next.
1690 next_rev[0] = r + 1
1690 next_rev[0] = r + 1
1691 return collect_msng_filenodes
1691 return collect_msng_filenodes
1692
1692
1693 # We have a list of filenodes we think we need for a file, lets remove
1693 # We have a list of filenodes we think we need for a file, lets remove
1694 # all those we now the recipient must have.
1694 # all those we now the recipient must have.
1695 def prune_filenodes(f, filerevlog):
1695 def prune_filenodes(f, filerevlog):
1696 msngset = msng_filenode_set[f]
1696 msngset = msng_filenode_set[f]
1697 hasset = {}
1697 hasset = {}
1698 # If a 'missing' filenode thinks it belongs to a changenode we
1698 # If a 'missing' filenode thinks it belongs to a changenode we
1699 # assume the recipient must have, then the recipient must have
1699 # assume the recipient must have, then the recipient must have
1700 # that filenode.
1700 # that filenode.
1701 for n in msngset:
1701 for n in msngset:
1702 clnode = cl.node(filerevlog.linkrev(n))
1702 clnode = cl.node(filerevlog.linkrev(n))
1703 if clnode in has_cl_set:
1703 if clnode in has_cl_set:
1704 hasset[n] = 1
1704 hasset[n] = 1
1705 prune_parents(filerevlog, hasset, msngset)
1705 prune_parents(filerevlog, hasset, msngset)
1706
1706
1707 # A function generator function that sets up the a context for the
1707 # A function generator function that sets up the a context for the
1708 # inner function.
1708 # inner function.
1709 def lookup_filenode_link_func(fname):
1709 def lookup_filenode_link_func(fname):
1710 msngset = msng_filenode_set[fname]
1710 msngset = msng_filenode_set[fname]
1711 # Lookup the changenode the filenode belongs to.
1711 # Lookup the changenode the filenode belongs to.
1712 def lookup_filenode_link(fnode):
1712 def lookup_filenode_link(fnode):
1713 return msngset[fnode]
1713 return msngset[fnode]
1714 return lookup_filenode_link
1714 return lookup_filenode_link
1715
1715
1716 # Now that we have all theses utility functions to help out and
1716 # Now that we have all theses utility functions to help out and
1717 # logically divide up the task, generate the group.
1717 # logically divide up the task, generate the group.
1718 def gengroup():
1718 def gengroup():
1719 # The set of changed files starts empty.
1719 # The set of changed files starts empty.
1720 changedfiles = {}
1720 changedfiles = {}
1721 # Create a changenode group generator that will call our functions
1721 # Create a changenode group generator that will call our functions
1722 # back to lookup the owning changenode and collect information.
1722 # back to lookup the owning changenode and collect information.
1723 group = cl.group(msng_cl_lst, identity,
1723 group = cl.group(msng_cl_lst, identity,
1724 manifest_and_file_collector(changedfiles))
1724 manifest_and_file_collector(changedfiles))
1725 for chnk in group:
1725 for chnk in group:
1726 yield chnk
1726 yield chnk
1727
1727
1728 # The list of manifests has been collected by the generator
1728 # The list of manifests has been collected by the generator
1729 # calling our functions back.
1729 # calling our functions back.
1730 prune_manifests()
1730 prune_manifests()
1731 msng_mnfst_lst = msng_mnfst_set.keys()
1731 msng_mnfst_lst = msng_mnfst_set.keys()
1732 # Sort the manifestnodes by revision number.
1732 # Sort the manifestnodes by revision number.
1733 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1733 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1734 # Create a generator for the manifestnodes that calls our lookup
1734 # Create a generator for the manifestnodes that calls our lookup
1735 # and data collection functions back.
1735 # and data collection functions back.
1736 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1736 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1737 filenode_collector(changedfiles))
1737 filenode_collector(changedfiles))
1738 for chnk in group:
1738 for chnk in group:
1739 yield chnk
1739 yield chnk
1740
1740
1741 # These are no longer needed, dereference and toss the memory for
1741 # These are no longer needed, dereference and toss the memory for
1742 # them.
1742 # them.
1743 msng_mnfst_lst = None
1743 msng_mnfst_lst = None
1744 msng_mnfst_set.clear()
1744 msng_mnfst_set.clear()
1745
1745
1746 changedfiles = changedfiles.keys()
1746 changedfiles = changedfiles.keys()
1747 changedfiles.sort()
1747 changedfiles.sort()
1748 # Go through all our files in order sorted by name.
1748 # Go through all our files in order sorted by name.
1749 for fname in changedfiles:
1749 for fname in changedfiles:
1750 filerevlog = self.file(fname)
1750 filerevlog = self.file(fname)
1751 if filerevlog.count() == 0:
1751 if filerevlog.count() == 0:
1752 raise util.Abort(_("empty or missing revlog for %s") % fname)
1752 raise util.Abort(_("empty or missing revlog for %s") % fname)
1753 # Toss out the filenodes that the recipient isn't really
1753 # Toss out the filenodes that the recipient isn't really
1754 # missing.
1754 # missing.
1755 if msng_filenode_set.has_key(fname):
1755 if msng_filenode_set.has_key(fname):
1756 prune_filenodes(fname, filerevlog)
1756 prune_filenodes(fname, filerevlog)
1757 msng_filenode_lst = msng_filenode_set[fname].keys()
1757 msng_filenode_lst = msng_filenode_set[fname].keys()
1758 else:
1758 else:
1759 msng_filenode_lst = []
1759 msng_filenode_lst = []
1760 # If any filenodes are left, generate the group for them,
1760 # If any filenodes are left, generate the group for them,
1761 # otherwise don't bother.
1761 # otherwise don't bother.
1762 if len(msng_filenode_lst) > 0:
1762 if len(msng_filenode_lst) > 0:
1763 yield changegroup.chunkheader(len(fname))
1763 yield changegroup.chunkheader(len(fname))
1764 yield fname
1764 yield fname
1765 # Sort the filenodes by their revision #
1765 # Sort the filenodes by their revision #
1766 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1766 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1767 # Create a group generator and only pass in a changenode
1767 # Create a group generator and only pass in a changenode
1768 # lookup function as we need to collect no information
1768 # lookup function as we need to collect no information
1769 # from filenodes.
1769 # from filenodes.
1770 group = filerevlog.group(msng_filenode_lst,
1770 group = filerevlog.group(msng_filenode_lst,
1771 lookup_filenode_link_func(fname))
1771 lookup_filenode_link_func(fname))
1772 for chnk in group:
1772 for chnk in group:
1773 yield chnk
1773 yield chnk
1774 if msng_filenode_set.has_key(fname):
1774 if msng_filenode_set.has_key(fname):
1775 # Don't need this anymore, toss it to free memory.
1775 # Don't need this anymore, toss it to free memory.
1776 del msng_filenode_set[fname]
1776 del msng_filenode_set[fname]
1777 # Signal that no more groups are left.
1777 # Signal that no more groups are left.
1778 yield changegroup.closechunk()
1778 yield changegroup.closechunk()
1779
1779
1780 if msng_cl_lst:
1780 if msng_cl_lst:
1781 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1781 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1782
1782
1783 return util.chunkbuffer(gengroup())
1783 return util.chunkbuffer(gengroup())
1784
1784
1785 def changegroup(self, basenodes, source):
1785 def changegroup(self, basenodes, source):
1786 """Generate a changegroup of all nodes that we have that a recipient
1786 """Generate a changegroup of all nodes that we have that a recipient
1787 doesn't.
1787 doesn't.
1788
1788
1789 This is much easier than the previous function as we can assume that
1789 This is much easier than the previous function as we can assume that
1790 the recipient has any changenode we aren't sending them."""
1790 the recipient has any changenode we aren't sending them."""
1791
1791
1792 self.hook('preoutgoing', throw=True, source=source)
1792 self.hook('preoutgoing', throw=True, source=source)
1793
1793
1794 cl = self.changelog
1794 cl = self.changelog
1795 nodes = cl.nodesbetween(basenodes, None)[0]
1795 nodes = cl.nodesbetween(basenodes, None)[0]
1796 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1796 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1797 self.changegroupinfo(nodes, source)
1797 self.changegroupinfo(nodes, source)
1798
1798
1799 def identity(x):
1799 def identity(x):
1800 return x
1800 return x
1801
1801
1802 def gennodelst(revlog):
1802 def gennodelst(revlog):
1803 for r in xrange(0, revlog.count()):
1803 for r in xrange(0, revlog.count()):
1804 n = revlog.node(r)
1804 n = revlog.node(r)
1805 if revlog.linkrev(n) in revset:
1805 if revlog.linkrev(n) in revset:
1806 yield n
1806 yield n
1807
1807
1808 def changed_file_collector(changedfileset):
1808 def changed_file_collector(changedfileset):
1809 def collect_changed_files(clnode):
1809 def collect_changed_files(clnode):
1810 c = cl.read(clnode)
1810 c = cl.read(clnode)
1811 for fname in c[3]:
1811 for fname in c[3]:
1812 changedfileset[fname] = 1
1812 changedfileset[fname] = 1
1813 return collect_changed_files
1813 return collect_changed_files
1814
1814
1815 def lookuprevlink_func(revlog):
1815 def lookuprevlink_func(revlog):
1816 def lookuprevlink(n):
1816 def lookuprevlink(n):
1817 return cl.node(revlog.linkrev(n))
1817 return cl.node(revlog.linkrev(n))
1818 return lookuprevlink
1818 return lookuprevlink
1819
1819
1820 def gengroup():
1820 def gengroup():
1821 # construct a list of all changed files
1821 # construct a list of all changed files
1822 changedfiles = {}
1822 changedfiles = {}
1823
1823
1824 for chnk in cl.group(nodes, identity,
1824 for chnk in cl.group(nodes, identity,
1825 changed_file_collector(changedfiles)):
1825 changed_file_collector(changedfiles)):
1826 yield chnk
1826 yield chnk
1827 changedfiles = changedfiles.keys()
1827 changedfiles = changedfiles.keys()
1828 changedfiles.sort()
1828 changedfiles.sort()
1829
1829
1830 mnfst = self.manifest
1830 mnfst = self.manifest
1831 nodeiter = gennodelst(mnfst)
1831 nodeiter = gennodelst(mnfst)
1832 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1832 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1833 yield chnk
1833 yield chnk
1834
1834
1835 for fname in changedfiles:
1835 for fname in changedfiles:
1836 filerevlog = self.file(fname)
1836 filerevlog = self.file(fname)
1837 if filerevlog.count() == 0:
1837 if filerevlog.count() == 0:
1838 raise util.Abort(_("empty or missing revlog for %s") % fname)
1838 raise util.Abort(_("empty or missing revlog for %s") % fname)
1839 nodeiter = gennodelst(filerevlog)
1839 nodeiter = gennodelst(filerevlog)
1840 nodeiter = list(nodeiter)
1840 nodeiter = list(nodeiter)
1841 if nodeiter:
1841 if nodeiter:
1842 yield changegroup.chunkheader(len(fname))
1842 yield changegroup.chunkheader(len(fname))
1843 yield fname
1843 yield fname
1844 lookup = lookuprevlink_func(filerevlog)
1844 lookup = lookuprevlink_func(filerevlog)
1845 for chnk in filerevlog.group(nodeiter, lookup):
1845 for chnk in filerevlog.group(nodeiter, lookup):
1846 yield chnk
1846 yield chnk
1847
1847
1848 yield changegroup.closechunk()
1848 yield changegroup.closechunk()
1849
1849
1850 if nodes:
1850 if nodes:
1851 self.hook('outgoing', node=hex(nodes[0]), source=source)
1851 self.hook('outgoing', node=hex(nodes[0]), source=source)
1852
1852
1853 return util.chunkbuffer(gengroup())
1853 return util.chunkbuffer(gengroup())
1854
1854
1855 def addchangegroup(self, source, srctype, url):
1855 def addchangegroup(self, source, srctype, url, emptyok=False):
1856 """add changegroup to repo.
1856 """add changegroup to repo.
1857
1857
1858 return values:
1858 return values:
1859 - nothing changed or no source: 0
1859 - nothing changed or no source: 0
1860 - more heads than before: 1+added heads (2..n)
1860 - more heads than before: 1+added heads (2..n)
1861 - less heads than before: -1-removed heads (-2..-n)
1861 - less heads than before: -1-removed heads (-2..-n)
1862 - number of heads stays the same: 1
1862 - number of heads stays the same: 1
1863 """
1863 """
1864 def csmap(x):
1864 def csmap(x):
1865 self.ui.debug(_("add changeset %s\n") % short(x))
1865 self.ui.debug(_("add changeset %s\n") % short(x))
1866 return cl.count()
1866 return cl.count()
1867
1867
1868 def revmap(x):
1868 def revmap(x):
1869 return cl.rev(x)
1869 return cl.rev(x)
1870
1870
1871 if not source:
1871 if not source:
1872 return 0
1872 return 0
1873
1873
1874 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1874 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1875
1875
1876 changesets = files = revisions = 0
1876 changesets = files = revisions = 0
1877
1877
1878 # write changelog data to temp files so concurrent readers will not see
1878 # write changelog data to temp files so concurrent readers will not see
1879 # inconsistent view
1879 # inconsistent view
1880 cl = self.changelog
1880 cl = self.changelog
1881 cl.delayupdate()
1881 cl.delayupdate()
1882 oldheads = len(cl.heads())
1882 oldheads = len(cl.heads())
1883
1883
1884 tr = self.transaction()
1884 tr = self.transaction()
1885 try:
1885 try:
1886 trp = weakref.proxy(tr)
1886 trp = weakref.proxy(tr)
1887 # pull off the changeset group
1887 # pull off the changeset group
1888 self.ui.status(_("adding changesets\n"))
1888 self.ui.status(_("adding changesets\n"))
1889 cor = cl.count() - 1
1889 cor = cl.count() - 1
1890 chunkiter = changegroup.chunkiter(source)
1890 chunkiter = changegroup.chunkiter(source)
1891 if cl.addgroup(chunkiter, csmap, trp, 1) is None:
1891 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1892 raise util.Abort(_("received changelog group is empty"))
1892 raise util.Abort(_("received changelog group is empty"))
1893 cnr = cl.count() - 1
1893 cnr = cl.count() - 1
1894 changesets = cnr - cor
1894 changesets = cnr - cor
1895
1895
1896 # pull off the manifest group
1896 # pull off the manifest group
1897 self.ui.status(_("adding manifests\n"))
1897 self.ui.status(_("adding manifests\n"))
1898 chunkiter = changegroup.chunkiter(source)
1898 chunkiter = changegroup.chunkiter(source)
1899 # no need to check for empty manifest group here:
1899 # no need to check for empty manifest group here:
1900 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1900 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1901 # no new manifest will be created and the manifest group will
1901 # no new manifest will be created and the manifest group will
1902 # be empty during the pull
1902 # be empty during the pull
1903 self.manifest.addgroup(chunkiter, revmap, trp)
1903 self.manifest.addgroup(chunkiter, revmap, trp)
1904
1904
1905 # process the files
1905 # process the files
1906 self.ui.status(_("adding file changes\n"))
1906 self.ui.status(_("adding file changes\n"))
1907 while 1:
1907 while 1:
1908 f = changegroup.getchunk(source)
1908 f = changegroup.getchunk(source)
1909 if not f:
1909 if not f:
1910 break
1910 break
1911 self.ui.debug(_("adding %s revisions\n") % f)
1911 self.ui.debug(_("adding %s revisions\n") % f)
1912 fl = self.file(f)
1912 fl = self.file(f)
1913 o = fl.count()
1913 o = fl.count()
1914 chunkiter = changegroup.chunkiter(source)
1914 chunkiter = changegroup.chunkiter(source)
1915 if fl.addgroup(chunkiter, revmap, trp) is None:
1915 if fl.addgroup(chunkiter, revmap, trp) is None:
1916 raise util.Abort(_("received file revlog group is empty"))
1916 raise util.Abort(_("received file revlog group is empty"))
1917 revisions += fl.count() - o
1917 revisions += fl.count() - o
1918 files += 1
1918 files += 1
1919
1919
1920 # make changelog see real files again
1920 # make changelog see real files again
1921 cl.finalize(trp)
1921 cl.finalize(trp)
1922
1922
1923 newheads = len(self.changelog.heads())
1923 newheads = len(self.changelog.heads())
1924 heads = ""
1924 heads = ""
1925 if oldheads and newheads != oldheads:
1925 if oldheads and newheads != oldheads:
1926 heads = _(" (%+d heads)") % (newheads - oldheads)
1926 heads = _(" (%+d heads)") % (newheads - oldheads)
1927
1927
1928 self.ui.status(_("added %d changesets"
1928 self.ui.status(_("added %d changesets"
1929 " with %d changes to %d files%s\n")
1929 " with %d changes to %d files%s\n")
1930 % (changesets, revisions, files, heads))
1930 % (changesets, revisions, files, heads))
1931
1931
1932 if changesets > 0:
1932 if changesets > 0:
1933 self.hook('pretxnchangegroup', throw=True,
1933 self.hook('pretxnchangegroup', throw=True,
1934 node=hex(self.changelog.node(cor+1)), source=srctype,
1934 node=hex(self.changelog.node(cor+1)), source=srctype,
1935 url=url)
1935 url=url)
1936
1936
1937 tr.close()
1937 tr.close()
1938 finally:
1938 finally:
1939 del tr
1939 del tr
1940
1940
1941 if changesets > 0:
1941 if changesets > 0:
1942 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1942 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1943 source=srctype, url=url)
1943 source=srctype, url=url)
1944
1944
1945 for i in xrange(cor + 1, cnr + 1):
1945 for i in xrange(cor + 1, cnr + 1):
1946 self.hook("incoming", node=hex(self.changelog.node(i)),
1946 self.hook("incoming", node=hex(self.changelog.node(i)),
1947 source=srctype, url=url)
1947 source=srctype, url=url)
1948
1948
1949 # never return 0 here:
1949 # never return 0 here:
1950 if newheads < oldheads:
1950 if newheads < oldheads:
1951 return newheads - oldheads - 1
1951 return newheads - oldheads - 1
1952 else:
1952 else:
1953 return newheads - oldheads + 1
1953 return newheads - oldheads + 1
1954
1954
1955
1955
1956 def stream_in(self, remote):
1956 def stream_in(self, remote):
1957 fp = remote.stream_out()
1957 fp = remote.stream_out()
1958 l = fp.readline()
1958 l = fp.readline()
1959 try:
1959 try:
1960 resp = int(l)
1960 resp = int(l)
1961 except ValueError:
1961 except ValueError:
1962 raise util.UnexpectedOutput(
1962 raise util.UnexpectedOutput(
1963 _('Unexpected response from remote server:'), l)
1963 _('Unexpected response from remote server:'), l)
1964 if resp == 1:
1964 if resp == 1:
1965 raise util.Abort(_('operation forbidden by server'))
1965 raise util.Abort(_('operation forbidden by server'))
1966 elif resp == 2:
1966 elif resp == 2:
1967 raise util.Abort(_('locking the remote repository failed'))
1967 raise util.Abort(_('locking the remote repository failed'))
1968 elif resp != 0:
1968 elif resp != 0:
1969 raise util.Abort(_('the server sent an unknown error code'))
1969 raise util.Abort(_('the server sent an unknown error code'))
1970 self.ui.status(_('streaming all changes\n'))
1970 self.ui.status(_('streaming all changes\n'))
1971 l = fp.readline()
1971 l = fp.readline()
1972 try:
1972 try:
1973 total_files, total_bytes = map(int, l.split(' ', 1))
1973 total_files, total_bytes = map(int, l.split(' ', 1))
1974 except ValueError, TypeError:
1974 except ValueError, TypeError:
1975 raise util.UnexpectedOutput(
1975 raise util.UnexpectedOutput(
1976 _('Unexpected response from remote server:'), l)
1976 _('Unexpected response from remote server:'), l)
1977 self.ui.status(_('%d files to transfer, %s of data\n') %
1977 self.ui.status(_('%d files to transfer, %s of data\n') %
1978 (total_files, util.bytecount(total_bytes)))
1978 (total_files, util.bytecount(total_bytes)))
1979 start = time.time()
1979 start = time.time()
1980 for i in xrange(total_files):
1980 for i in xrange(total_files):
1981 # XXX doesn't support '\n' or '\r' in filenames
1981 # XXX doesn't support '\n' or '\r' in filenames
1982 l = fp.readline()
1982 l = fp.readline()
1983 try:
1983 try:
1984 name, size = l.split('\0', 1)
1984 name, size = l.split('\0', 1)
1985 size = int(size)
1985 size = int(size)
1986 except ValueError, TypeError:
1986 except ValueError, TypeError:
1987 raise util.UnexpectedOutput(
1987 raise util.UnexpectedOutput(
1988 _('Unexpected response from remote server:'), l)
1988 _('Unexpected response from remote server:'), l)
1989 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1989 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1990 ofp = self.sopener(name, 'w')
1990 ofp = self.sopener(name, 'w')
1991 for chunk in util.filechunkiter(fp, limit=size):
1991 for chunk in util.filechunkiter(fp, limit=size):
1992 ofp.write(chunk)
1992 ofp.write(chunk)
1993 ofp.close()
1993 ofp.close()
1994 elapsed = time.time() - start
1994 elapsed = time.time() - start
1995 if elapsed <= 0:
1995 if elapsed <= 0:
1996 elapsed = 0.001
1996 elapsed = 0.001
1997 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1997 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1998 (util.bytecount(total_bytes), elapsed,
1998 (util.bytecount(total_bytes), elapsed,
1999 util.bytecount(total_bytes / elapsed)))
1999 util.bytecount(total_bytes / elapsed)))
2000 self.invalidate()
2000 self.invalidate()
2001 return len(self.heads()) + 1
2001 return len(self.heads()) + 1
2002
2002
2003 def clone(self, remote, heads=[], stream=False):
2003 def clone(self, remote, heads=[], stream=False):
2004 '''clone remote repository.
2004 '''clone remote repository.
2005
2005
2006 keyword arguments:
2006 keyword arguments:
2007 heads: list of revs to clone (forces use of pull)
2007 heads: list of revs to clone (forces use of pull)
2008 stream: use streaming clone if possible'''
2008 stream: use streaming clone if possible'''
2009
2009
2010 # now, all clients that can request uncompressed clones can
2010 # now, all clients that can request uncompressed clones can
2011 # read repo formats supported by all servers that can serve
2011 # read repo formats supported by all servers that can serve
2012 # them.
2012 # them.
2013
2013
2014 # if revlog format changes, client will have to check version
2014 # if revlog format changes, client will have to check version
2015 # and format flags on "stream" capability, and use
2015 # and format flags on "stream" capability, and use
2016 # uncompressed only if compatible.
2016 # uncompressed only if compatible.
2017
2017
2018 if stream and not heads and remote.capable('stream'):
2018 if stream and not heads and remote.capable('stream'):
2019 return self.stream_in(remote)
2019 return self.stream_in(remote)
2020 return self.pull(remote, heads)
2020 return self.pull(remote, heads)
2021
2021
2022 # used to avoid circular references so destructors work
2022 # used to avoid circular references so destructors work
2023 def aftertrans(files):
2023 def aftertrans(files):
2024 renamefiles = [tuple(t) for t in files]
2024 renamefiles = [tuple(t) for t in files]
2025 def a():
2025 def a():
2026 for src, dest in renamefiles:
2026 for src, dest in renamefiles:
2027 util.rename(src, dest)
2027 util.rename(src, dest)
2028 return a
2028 return a
2029
2029
2030 def instance(ui, path, create):
2030 def instance(ui, path, create):
2031 return localrepository(ui, util.drop_scheme('file', path), create)
2031 return localrepository(ui, util.drop_scheme('file', path), create)
2032
2032
2033 def islocal(path):
2033 def islocal(path):
2034 return True
2034 return True
General Comments 0
You need to be logged in to leave comments. Login now