##// END OF EJS Templates
localrepo.invalidate: invalidate branchcache
Alexis S. L. Carvalho -
r6118:b69a39ab default
parent child Browse files
Show More
@@ -1,2102 +1,2103 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import re, lock, transaction, tempfile, stat, errno, ui
12 import re, lock, transaction, tempfile, stat, errno, ui
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = util.set(('lookup', 'changegroupsubset'))
16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 repo.repository.__init__(self)
20 repo.repository.__init__(self)
21 self.root = os.path.realpath(path)
21 self.root = os.path.realpath(path)
22 self.path = os.path.join(self.root, ".hg")
22 self.path = os.path.join(self.root, ".hg")
23 self.origroot = path
23 self.origroot = path
24 self.opener = util.opener(self.path)
24 self.opener = util.opener(self.path)
25 self.wopener = util.opener(self.root)
25 self.wopener = util.opener(self.root)
26
26
27 if not os.path.isdir(self.path):
27 if not os.path.isdir(self.path):
28 if create:
28 if create:
29 if not os.path.exists(path):
29 if not os.path.exists(path):
30 os.mkdir(path)
30 os.mkdir(path)
31 os.mkdir(self.path)
31 os.mkdir(self.path)
32 requirements = ["revlogv1"]
32 requirements = ["revlogv1"]
33 if parentui.configbool('format', 'usestore', True):
33 if parentui.configbool('format', 'usestore', True):
34 os.mkdir(os.path.join(self.path, "store"))
34 os.mkdir(os.path.join(self.path, "store"))
35 requirements.append("store")
35 requirements.append("store")
36 # create an invalid changelog
36 # create an invalid changelog
37 self.opener("00changelog.i", "a").write(
37 self.opener("00changelog.i", "a").write(
38 '\0\0\0\2' # represents revlogv2
38 '\0\0\0\2' # represents revlogv2
39 ' dummy changelog to prevent using the old repo layout'
39 ' dummy changelog to prevent using the old repo layout'
40 )
40 )
41 reqfile = self.opener("requires", "w")
41 reqfile = self.opener("requires", "w")
42 for r in requirements:
42 for r in requirements:
43 reqfile.write("%s\n" % r)
43 reqfile.write("%s\n" % r)
44 reqfile.close()
44 reqfile.close()
45 else:
45 else:
46 raise repo.RepoError(_("repository %s not found") % path)
46 raise repo.RepoError(_("repository %s not found") % path)
47 elif create:
47 elif create:
48 raise repo.RepoError(_("repository %s already exists") % path)
48 raise repo.RepoError(_("repository %s already exists") % path)
49 else:
49 else:
50 # find requirements
50 # find requirements
51 try:
51 try:
52 requirements = self.opener("requires").read().splitlines()
52 requirements = self.opener("requires").read().splitlines()
53 except IOError, inst:
53 except IOError, inst:
54 if inst.errno != errno.ENOENT:
54 if inst.errno != errno.ENOENT:
55 raise
55 raise
56 requirements = []
56 requirements = []
57 # check them
57 # check them
58 for r in requirements:
58 for r in requirements:
59 if r not in self.supported:
59 if r not in self.supported:
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61
61
62 # setup store
62 # setup store
63 if "store" in requirements:
63 if "store" in requirements:
64 self.encodefn = util.encodefilename
64 self.encodefn = util.encodefilename
65 self.decodefn = util.decodefilename
65 self.decodefn = util.decodefilename
66 self.spath = os.path.join(self.path, "store")
66 self.spath = os.path.join(self.path, "store")
67 else:
67 else:
68 self.encodefn = lambda x: x
68 self.encodefn = lambda x: x
69 self.decodefn = lambda x: x
69 self.decodefn = lambda x: x
70 self.spath = self.path
70 self.spath = self.path
71
71
72 try:
72 try:
73 # files in .hg/ will be created using this mode
73 # files in .hg/ will be created using this mode
74 mode = os.stat(self.spath).st_mode
74 mode = os.stat(self.spath).st_mode
75 # avoid some useless chmods
75 # avoid some useless chmods
76 if (0777 & ~util._umask) == (0777 & mode):
76 if (0777 & ~util._umask) == (0777 & mode):
77 mode = None
77 mode = None
78 except OSError:
78 except OSError:
79 mode = None
79 mode = None
80
80
81 self._createmode = mode
81 self._createmode = mode
82 self.opener.createmode = mode
82 self.opener.createmode = mode
83 sopener = util.opener(self.spath)
83 sopener = util.opener(self.spath)
84 sopener.createmode = mode
84 sopener.createmode = mode
85 self.sopener = util.encodedopener(sopener, self.encodefn)
85 self.sopener = util.encodedopener(sopener, self.encodefn)
86
86
87 self.ui = ui.ui(parentui=parentui)
87 self.ui = ui.ui(parentui=parentui)
88 try:
88 try:
89 self.ui.readconfig(self.join("hgrc"), self.root)
89 self.ui.readconfig(self.join("hgrc"), self.root)
90 extensions.loadall(self.ui)
90 extensions.loadall(self.ui)
91 except IOError:
91 except IOError:
92 pass
92 pass
93
93
94 self.tagscache = None
94 self.tagscache = None
95 self._tagstypecache = None
95 self._tagstypecache = None
96 self.branchcache = None
96 self.branchcache = None
97 self.nodetagscache = None
97 self.nodetagscache = None
98 self.filterpats = {}
98 self.filterpats = {}
99 self._datafilters = {}
99 self._datafilters = {}
100 self._transref = self._lockref = self._wlockref = None
100 self._transref = self._lockref = self._wlockref = None
101
101
102 def __getattr__(self, name):
102 def __getattr__(self, name):
103 if name == 'changelog':
103 if name == 'changelog':
104 self.changelog = changelog.changelog(self.sopener)
104 self.changelog = changelog.changelog(self.sopener)
105 self.sopener.defversion = self.changelog.version
105 self.sopener.defversion = self.changelog.version
106 return self.changelog
106 return self.changelog
107 if name == 'manifest':
107 if name == 'manifest':
108 self.changelog
108 self.changelog
109 self.manifest = manifest.manifest(self.sopener)
109 self.manifest = manifest.manifest(self.sopener)
110 return self.manifest
110 return self.manifest
111 if name == 'dirstate':
111 if name == 'dirstate':
112 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
112 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
113 return self.dirstate
113 return self.dirstate
114 else:
114 else:
115 raise AttributeError, name
115 raise AttributeError, name
116
116
117 def url(self):
117 def url(self):
118 return 'file:' + self.root
118 return 'file:' + self.root
119
119
120 def hook(self, name, throw=False, **args):
120 def hook(self, name, throw=False, **args):
121 return hook.hook(self.ui, self, name, throw, **args)
121 return hook.hook(self.ui, self, name, throw, **args)
122
122
123 tag_disallowed = ':\r\n'
123 tag_disallowed = ':\r\n'
124
124
125 def _tag(self, name, node, message, local, user, date, parent=None,
125 def _tag(self, name, node, message, local, user, date, parent=None,
126 extra={}):
126 extra={}):
127 use_dirstate = parent is None
127 use_dirstate = parent is None
128
128
129 for c in self.tag_disallowed:
129 for c in self.tag_disallowed:
130 if c in name:
130 if c in name:
131 raise util.Abort(_('%r cannot be used in a tag name') % c)
131 raise util.Abort(_('%r cannot be used in a tag name') % c)
132
132
133 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
133 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
134
134
135 def writetag(fp, name, munge, prevtags):
135 def writetag(fp, name, munge, prevtags):
136 fp.seek(0, 2)
136 fp.seek(0, 2)
137 if prevtags and prevtags[-1] != '\n':
137 if prevtags and prevtags[-1] != '\n':
138 fp.write('\n')
138 fp.write('\n')
139 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
139 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
140 fp.close()
140 fp.close()
141
141
142 prevtags = ''
142 prevtags = ''
143 if local:
143 if local:
144 try:
144 try:
145 fp = self.opener('localtags', 'r+')
145 fp = self.opener('localtags', 'r+')
146 except IOError, err:
146 except IOError, err:
147 fp = self.opener('localtags', 'a')
147 fp = self.opener('localtags', 'a')
148 else:
148 else:
149 prevtags = fp.read()
149 prevtags = fp.read()
150
150
151 # local tags are stored in the current charset
151 # local tags are stored in the current charset
152 writetag(fp, name, None, prevtags)
152 writetag(fp, name, None, prevtags)
153 self.hook('tag', node=hex(node), tag=name, local=local)
153 self.hook('tag', node=hex(node), tag=name, local=local)
154 return
154 return
155
155
156 if use_dirstate:
156 if use_dirstate:
157 try:
157 try:
158 fp = self.wfile('.hgtags', 'rb+')
158 fp = self.wfile('.hgtags', 'rb+')
159 except IOError, err:
159 except IOError, err:
160 fp = self.wfile('.hgtags', 'ab')
160 fp = self.wfile('.hgtags', 'ab')
161 else:
161 else:
162 prevtags = fp.read()
162 prevtags = fp.read()
163 else:
163 else:
164 try:
164 try:
165 prevtags = self.filectx('.hgtags', parent).data()
165 prevtags = self.filectx('.hgtags', parent).data()
166 except revlog.LookupError:
166 except revlog.LookupError:
167 pass
167 pass
168 fp = self.wfile('.hgtags', 'wb')
168 fp = self.wfile('.hgtags', 'wb')
169 if prevtags:
169 if prevtags:
170 fp.write(prevtags)
170 fp.write(prevtags)
171
171
172 # committed tags are stored in UTF-8
172 # committed tags are stored in UTF-8
173 writetag(fp, name, util.fromlocal, prevtags)
173 writetag(fp, name, util.fromlocal, prevtags)
174
174
175 if use_dirstate and '.hgtags' not in self.dirstate:
175 if use_dirstate and '.hgtags' not in self.dirstate:
176 self.add(['.hgtags'])
176 self.add(['.hgtags'])
177
177
178 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
178 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
179 extra=extra)
179 extra=extra)
180
180
181 self.hook('tag', node=hex(node), tag=name, local=local)
181 self.hook('tag', node=hex(node), tag=name, local=local)
182
182
183 return tagnode
183 return tagnode
184
184
185 def tag(self, name, node, message, local, user, date):
185 def tag(self, name, node, message, local, user, date):
186 '''tag a revision with a symbolic name.
186 '''tag a revision with a symbolic name.
187
187
188 if local is True, the tag is stored in a per-repository file.
188 if local is True, the tag is stored in a per-repository file.
189 otherwise, it is stored in the .hgtags file, and a new
189 otherwise, it is stored in the .hgtags file, and a new
190 changeset is committed with the change.
190 changeset is committed with the change.
191
191
192 keyword arguments:
192 keyword arguments:
193
193
194 local: whether to store tag in non-version-controlled file
194 local: whether to store tag in non-version-controlled file
195 (default False)
195 (default False)
196
196
197 message: commit message to use if committing
197 message: commit message to use if committing
198
198
199 user: name of user to use if committing
199 user: name of user to use if committing
200
200
201 date: date tuple to use if committing'''
201 date: date tuple to use if committing'''
202
202
203 for x in self.status()[:5]:
203 for x in self.status()[:5]:
204 if '.hgtags' in x:
204 if '.hgtags' in x:
205 raise util.Abort(_('working copy of .hgtags is changed '
205 raise util.Abort(_('working copy of .hgtags is changed '
206 '(please commit .hgtags manually)'))
206 '(please commit .hgtags manually)'))
207
207
208
208
209 self._tag(name, node, message, local, user, date)
209 self._tag(name, node, message, local, user, date)
210
210
211 def tags(self):
211 def tags(self):
212 '''return a mapping of tag to node'''
212 '''return a mapping of tag to node'''
213 if self.tagscache:
213 if self.tagscache:
214 return self.tagscache
214 return self.tagscache
215
215
216 globaltags = {}
216 globaltags = {}
217 tagtypes = {}
217 tagtypes = {}
218
218
219 def readtags(lines, fn, tagtype):
219 def readtags(lines, fn, tagtype):
220 filetags = {}
220 filetags = {}
221 count = 0
221 count = 0
222
222
223 def warn(msg):
223 def warn(msg):
224 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
224 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
225
225
226 for l in lines:
226 for l in lines:
227 count += 1
227 count += 1
228 if not l:
228 if not l:
229 continue
229 continue
230 s = l.split(" ", 1)
230 s = l.split(" ", 1)
231 if len(s) != 2:
231 if len(s) != 2:
232 warn(_("cannot parse entry"))
232 warn(_("cannot parse entry"))
233 continue
233 continue
234 node, key = s
234 node, key = s
235 key = util.tolocal(key.strip()) # stored in UTF-8
235 key = util.tolocal(key.strip()) # stored in UTF-8
236 try:
236 try:
237 bin_n = bin(node)
237 bin_n = bin(node)
238 except TypeError:
238 except TypeError:
239 warn(_("node '%s' is not well formed") % node)
239 warn(_("node '%s' is not well formed") % node)
240 continue
240 continue
241 if bin_n not in self.changelog.nodemap:
241 if bin_n not in self.changelog.nodemap:
242 warn(_("tag '%s' refers to unknown node") % key)
242 warn(_("tag '%s' refers to unknown node") % key)
243 continue
243 continue
244
244
245 h = []
245 h = []
246 if key in filetags:
246 if key in filetags:
247 n, h = filetags[key]
247 n, h = filetags[key]
248 h.append(n)
248 h.append(n)
249 filetags[key] = (bin_n, h)
249 filetags[key] = (bin_n, h)
250
250
251 for k, nh in filetags.items():
251 for k, nh in filetags.items():
252 if k not in globaltags:
252 if k not in globaltags:
253 globaltags[k] = nh
253 globaltags[k] = nh
254 tagtypes[k] = tagtype
254 tagtypes[k] = tagtype
255 continue
255 continue
256
256
257 # we prefer the global tag if:
257 # we prefer the global tag if:
258 # it supercedes us OR
258 # it supercedes us OR
259 # mutual supercedes and it has a higher rank
259 # mutual supercedes and it has a higher rank
260 # otherwise we win because we're tip-most
260 # otherwise we win because we're tip-most
261 an, ah = nh
261 an, ah = nh
262 bn, bh = globaltags[k]
262 bn, bh = globaltags[k]
263 if (bn != an and an in bh and
263 if (bn != an and an in bh and
264 (bn not in ah or len(bh) > len(ah))):
264 (bn not in ah or len(bh) > len(ah))):
265 an = bn
265 an = bn
266 ah.extend([n for n in bh if n not in ah])
266 ah.extend([n for n in bh if n not in ah])
267 globaltags[k] = an, ah
267 globaltags[k] = an, ah
268 tagtypes[k] = tagtype
268 tagtypes[k] = tagtype
269
269
270 # read the tags file from each head, ending with the tip
270 # read the tags file from each head, ending with the tip
271 f = None
271 f = None
272 for rev, node, fnode in self._hgtagsnodes():
272 for rev, node, fnode in self._hgtagsnodes():
273 f = (f and f.filectx(fnode) or
273 f = (f and f.filectx(fnode) or
274 self.filectx('.hgtags', fileid=fnode))
274 self.filectx('.hgtags', fileid=fnode))
275 readtags(f.data().splitlines(), f, "global")
275 readtags(f.data().splitlines(), f, "global")
276
276
277 try:
277 try:
278 data = util.fromlocal(self.opener("localtags").read())
278 data = util.fromlocal(self.opener("localtags").read())
279 # localtags are stored in the local character set
279 # localtags are stored in the local character set
280 # while the internal tag table is stored in UTF-8
280 # while the internal tag table is stored in UTF-8
281 readtags(data.splitlines(), "localtags", "local")
281 readtags(data.splitlines(), "localtags", "local")
282 except IOError:
282 except IOError:
283 pass
283 pass
284
284
285 self.tagscache = {}
285 self.tagscache = {}
286 self._tagstypecache = {}
286 self._tagstypecache = {}
287 for k,nh in globaltags.items():
287 for k,nh in globaltags.items():
288 n = nh[0]
288 n = nh[0]
289 if n != nullid:
289 if n != nullid:
290 self.tagscache[k] = n
290 self.tagscache[k] = n
291 self._tagstypecache[k] = tagtypes[k]
291 self._tagstypecache[k] = tagtypes[k]
292 self.tagscache['tip'] = self.changelog.tip()
292 self.tagscache['tip'] = self.changelog.tip()
293
293
294 return self.tagscache
294 return self.tagscache
295
295
296 def tagtype(self, tagname):
296 def tagtype(self, tagname):
297 '''
297 '''
298 return the type of the given tag. result can be:
298 return the type of the given tag. result can be:
299
299
300 'local' : a local tag
300 'local' : a local tag
301 'global' : a global tag
301 'global' : a global tag
302 None : tag does not exist
302 None : tag does not exist
303 '''
303 '''
304
304
305 self.tags()
305 self.tags()
306
306
307 return self._tagstypecache.get(tagname)
307 return self._tagstypecache.get(tagname)
308
308
309 def _hgtagsnodes(self):
309 def _hgtagsnodes(self):
310 heads = self.heads()
310 heads = self.heads()
311 heads.reverse()
311 heads.reverse()
312 last = {}
312 last = {}
313 ret = []
313 ret = []
314 for node in heads:
314 for node in heads:
315 c = self.changectx(node)
315 c = self.changectx(node)
316 rev = c.rev()
316 rev = c.rev()
317 try:
317 try:
318 fnode = c.filenode('.hgtags')
318 fnode = c.filenode('.hgtags')
319 except revlog.LookupError:
319 except revlog.LookupError:
320 continue
320 continue
321 ret.append((rev, node, fnode))
321 ret.append((rev, node, fnode))
322 if fnode in last:
322 if fnode in last:
323 ret[last[fnode]] = None
323 ret[last[fnode]] = None
324 last[fnode] = len(ret) - 1
324 last[fnode] = len(ret) - 1
325 return [item for item in ret if item]
325 return [item for item in ret if item]
326
326
327 def tagslist(self):
327 def tagslist(self):
328 '''return a list of tags ordered by revision'''
328 '''return a list of tags ordered by revision'''
329 l = []
329 l = []
330 for t, n in self.tags().items():
330 for t, n in self.tags().items():
331 try:
331 try:
332 r = self.changelog.rev(n)
332 r = self.changelog.rev(n)
333 except:
333 except:
334 r = -2 # sort to the beginning of the list if unknown
334 r = -2 # sort to the beginning of the list if unknown
335 l.append((r, t, n))
335 l.append((r, t, n))
336 l.sort()
336 l.sort()
337 return [(t, n) for r, t, n in l]
337 return [(t, n) for r, t, n in l]
338
338
339 def nodetags(self, node):
339 def nodetags(self, node):
340 '''return the tags associated with a node'''
340 '''return the tags associated with a node'''
341 if not self.nodetagscache:
341 if not self.nodetagscache:
342 self.nodetagscache = {}
342 self.nodetagscache = {}
343 for t, n in self.tags().items():
343 for t, n in self.tags().items():
344 self.nodetagscache.setdefault(n, []).append(t)
344 self.nodetagscache.setdefault(n, []).append(t)
345 return self.nodetagscache.get(node, [])
345 return self.nodetagscache.get(node, [])
346
346
347 def _branchtags(self):
347 def _branchtags(self):
348 partial, last, lrev = self._readbranchcache()
348 partial, last, lrev = self._readbranchcache()
349
349
350 tiprev = self.changelog.count() - 1
350 tiprev = self.changelog.count() - 1
351 if lrev != tiprev:
351 if lrev != tiprev:
352 self._updatebranchcache(partial, lrev+1, tiprev+1)
352 self._updatebranchcache(partial, lrev+1, tiprev+1)
353 self._writebranchcache(partial, self.changelog.tip(), tiprev)
353 self._writebranchcache(partial, self.changelog.tip(), tiprev)
354
354
355 return partial
355 return partial
356
356
357 def branchtags(self):
357 def branchtags(self):
358 if self.branchcache is not None:
358 if self.branchcache is not None:
359 return self.branchcache
359 return self.branchcache
360
360
361 self.branchcache = {} # avoid recursion in changectx
361 self.branchcache = {} # avoid recursion in changectx
362 partial = self._branchtags()
362 partial = self._branchtags()
363
363
364 # the branch cache is stored on disk as UTF-8, but in the local
364 # the branch cache is stored on disk as UTF-8, but in the local
365 # charset internally
365 # charset internally
366 for k, v in partial.items():
366 for k, v in partial.items():
367 self.branchcache[util.tolocal(k)] = v
367 self.branchcache[util.tolocal(k)] = v
368 return self.branchcache
368 return self.branchcache
369
369
370 def _readbranchcache(self):
370 def _readbranchcache(self):
371 partial = {}
371 partial = {}
372 try:
372 try:
373 f = self.opener("branch.cache")
373 f = self.opener("branch.cache")
374 lines = f.read().split('\n')
374 lines = f.read().split('\n')
375 f.close()
375 f.close()
376 except (IOError, OSError):
376 except (IOError, OSError):
377 return {}, nullid, nullrev
377 return {}, nullid, nullrev
378
378
379 try:
379 try:
380 last, lrev = lines.pop(0).split(" ", 1)
380 last, lrev = lines.pop(0).split(" ", 1)
381 last, lrev = bin(last), int(lrev)
381 last, lrev = bin(last), int(lrev)
382 if not (lrev < self.changelog.count() and
382 if not (lrev < self.changelog.count() and
383 self.changelog.node(lrev) == last): # sanity check
383 self.changelog.node(lrev) == last): # sanity check
384 # invalidate the cache
384 # invalidate the cache
385 raise ValueError('invalidating branch cache (tip differs)')
385 raise ValueError('invalidating branch cache (tip differs)')
386 for l in lines:
386 for l in lines:
387 if not l: continue
387 if not l: continue
388 node, label = l.split(" ", 1)
388 node, label = l.split(" ", 1)
389 partial[label.strip()] = bin(node)
389 partial[label.strip()] = bin(node)
390 except (KeyboardInterrupt, util.SignalInterrupt):
390 except (KeyboardInterrupt, util.SignalInterrupt):
391 raise
391 raise
392 except Exception, inst:
392 except Exception, inst:
393 if self.ui.debugflag:
393 if self.ui.debugflag:
394 self.ui.warn(str(inst), '\n')
394 self.ui.warn(str(inst), '\n')
395 partial, last, lrev = {}, nullid, nullrev
395 partial, last, lrev = {}, nullid, nullrev
396 return partial, last, lrev
396 return partial, last, lrev
397
397
398 def _writebranchcache(self, branches, tip, tiprev):
398 def _writebranchcache(self, branches, tip, tiprev):
399 try:
399 try:
400 f = self.opener("branch.cache", "w", atomictemp=True)
400 f = self.opener("branch.cache", "w", atomictemp=True)
401 f.write("%s %s\n" % (hex(tip), tiprev))
401 f.write("%s %s\n" % (hex(tip), tiprev))
402 for label, node in branches.iteritems():
402 for label, node in branches.iteritems():
403 f.write("%s %s\n" % (hex(node), label))
403 f.write("%s %s\n" % (hex(node), label))
404 f.rename()
404 f.rename()
405 except (IOError, OSError):
405 except (IOError, OSError):
406 pass
406 pass
407
407
408 def _updatebranchcache(self, partial, start, end):
408 def _updatebranchcache(self, partial, start, end):
409 for r in xrange(start, end):
409 for r in xrange(start, end):
410 c = self.changectx(r)
410 c = self.changectx(r)
411 b = c.branch()
411 b = c.branch()
412 partial[b] = c.node()
412 partial[b] = c.node()
413
413
414 def lookup(self, key):
414 def lookup(self, key):
415 if key == '.':
415 if key == '.':
416 key, second = self.dirstate.parents()
416 key, second = self.dirstate.parents()
417 if key == nullid:
417 if key == nullid:
418 raise repo.RepoError(_("no revision checked out"))
418 raise repo.RepoError(_("no revision checked out"))
419 if second != nullid:
419 if second != nullid:
420 self.ui.warn(_("warning: working directory has two parents, "
420 self.ui.warn(_("warning: working directory has two parents, "
421 "tag '.' uses the first\n"))
421 "tag '.' uses the first\n"))
422 elif key == 'null':
422 elif key == 'null':
423 return nullid
423 return nullid
424 n = self.changelog._match(key)
424 n = self.changelog._match(key)
425 if n:
425 if n:
426 return n
426 return n
427 if key in self.tags():
427 if key in self.tags():
428 return self.tags()[key]
428 return self.tags()[key]
429 if key in self.branchtags():
429 if key in self.branchtags():
430 return self.branchtags()[key]
430 return self.branchtags()[key]
431 n = self.changelog._partialmatch(key)
431 n = self.changelog._partialmatch(key)
432 if n:
432 if n:
433 return n
433 return n
434 try:
434 try:
435 if len(key) == 20:
435 if len(key) == 20:
436 key = hex(key)
436 key = hex(key)
437 except:
437 except:
438 pass
438 pass
439 raise repo.RepoError(_("unknown revision '%s'") % key)
439 raise repo.RepoError(_("unknown revision '%s'") % key)
440
440
441 def dev(self):
441 def dev(self):
442 return os.lstat(self.path).st_dev
442 return os.lstat(self.path).st_dev
443
443
444 def local(self):
444 def local(self):
445 return True
445 return True
446
446
447 def join(self, f):
447 def join(self, f):
448 return os.path.join(self.path, f)
448 return os.path.join(self.path, f)
449
449
450 def sjoin(self, f):
450 def sjoin(self, f):
451 f = self.encodefn(f)
451 f = self.encodefn(f)
452 return os.path.join(self.spath, f)
452 return os.path.join(self.spath, f)
453
453
454 def wjoin(self, f):
454 def wjoin(self, f):
455 return os.path.join(self.root, f)
455 return os.path.join(self.root, f)
456
456
457 def file(self, f):
457 def file(self, f):
458 if f[0] == '/':
458 if f[0] == '/':
459 f = f[1:]
459 f = f[1:]
460 return filelog.filelog(self.sopener, f)
460 return filelog.filelog(self.sopener, f)
461
461
462 def changectx(self, changeid=None):
462 def changectx(self, changeid=None):
463 return context.changectx(self, changeid)
463 return context.changectx(self, changeid)
464
464
465 def workingctx(self):
465 def workingctx(self):
466 return context.workingctx(self)
466 return context.workingctx(self)
467
467
468 def parents(self, changeid=None):
468 def parents(self, changeid=None):
469 '''
469 '''
470 get list of changectxs for parents of changeid or working directory
470 get list of changectxs for parents of changeid or working directory
471 '''
471 '''
472 if changeid is None:
472 if changeid is None:
473 pl = self.dirstate.parents()
473 pl = self.dirstate.parents()
474 else:
474 else:
475 n = self.changelog.lookup(changeid)
475 n = self.changelog.lookup(changeid)
476 pl = self.changelog.parents(n)
476 pl = self.changelog.parents(n)
477 if pl[1] == nullid:
477 if pl[1] == nullid:
478 return [self.changectx(pl[0])]
478 return [self.changectx(pl[0])]
479 return [self.changectx(pl[0]), self.changectx(pl[1])]
479 return [self.changectx(pl[0]), self.changectx(pl[1])]
480
480
481 def filectx(self, path, changeid=None, fileid=None):
481 def filectx(self, path, changeid=None, fileid=None):
482 """changeid can be a changeset revision, node, or tag.
482 """changeid can be a changeset revision, node, or tag.
483 fileid can be a file revision or node."""
483 fileid can be a file revision or node."""
484 return context.filectx(self, path, changeid, fileid)
484 return context.filectx(self, path, changeid, fileid)
485
485
486 def getcwd(self):
486 def getcwd(self):
487 return self.dirstate.getcwd()
487 return self.dirstate.getcwd()
488
488
489 def pathto(self, f, cwd=None):
489 def pathto(self, f, cwd=None):
490 return self.dirstate.pathto(f, cwd)
490 return self.dirstate.pathto(f, cwd)
491
491
492 def wfile(self, f, mode='r'):
492 def wfile(self, f, mode='r'):
493 return self.wopener(f, mode)
493 return self.wopener(f, mode)
494
494
495 def _link(self, f):
495 def _link(self, f):
496 return os.path.islink(self.wjoin(f))
496 return os.path.islink(self.wjoin(f))
497
497
498 def _filter(self, filter, filename, data):
498 def _filter(self, filter, filename, data):
499 if filter not in self.filterpats:
499 if filter not in self.filterpats:
500 l = []
500 l = []
501 for pat, cmd in self.ui.configitems(filter):
501 for pat, cmd in self.ui.configitems(filter):
502 mf = util.matcher(self.root, "", [pat], [], [])[1]
502 mf = util.matcher(self.root, "", [pat], [], [])[1]
503 fn = None
503 fn = None
504 params = cmd
504 params = cmd
505 for name, filterfn in self._datafilters.iteritems():
505 for name, filterfn in self._datafilters.iteritems():
506 if cmd.startswith(name):
506 if cmd.startswith(name):
507 fn = filterfn
507 fn = filterfn
508 params = cmd[len(name):].lstrip()
508 params = cmd[len(name):].lstrip()
509 break
509 break
510 if not fn:
510 if not fn:
511 fn = lambda s, c, **kwargs: util.filter(s, c)
511 fn = lambda s, c, **kwargs: util.filter(s, c)
512 # Wrap old filters not supporting keyword arguments
512 # Wrap old filters not supporting keyword arguments
513 if not inspect.getargspec(fn)[2]:
513 if not inspect.getargspec(fn)[2]:
514 oldfn = fn
514 oldfn = fn
515 fn = lambda s, c, **kwargs: oldfn(s, c)
515 fn = lambda s, c, **kwargs: oldfn(s, c)
516 l.append((mf, fn, params))
516 l.append((mf, fn, params))
517 self.filterpats[filter] = l
517 self.filterpats[filter] = l
518
518
519 for mf, fn, cmd in self.filterpats[filter]:
519 for mf, fn, cmd in self.filterpats[filter]:
520 if mf(filename):
520 if mf(filename):
521 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
521 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
522 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
522 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
523 break
523 break
524
524
525 return data
525 return data
526
526
527 def adddatafilter(self, name, filter):
527 def adddatafilter(self, name, filter):
528 self._datafilters[name] = filter
528 self._datafilters[name] = filter
529
529
530 def wread(self, filename):
530 def wread(self, filename):
531 if self._link(filename):
531 if self._link(filename):
532 data = os.readlink(self.wjoin(filename))
532 data = os.readlink(self.wjoin(filename))
533 else:
533 else:
534 data = self.wopener(filename, 'r').read()
534 data = self.wopener(filename, 'r').read()
535 return self._filter("encode", filename, data)
535 return self._filter("encode", filename, data)
536
536
537 def wwrite(self, filename, data, flags):
537 def wwrite(self, filename, data, flags):
538 data = self._filter("decode", filename, data)
538 data = self._filter("decode", filename, data)
539 try:
539 try:
540 os.unlink(self.wjoin(filename))
540 os.unlink(self.wjoin(filename))
541 except OSError:
541 except OSError:
542 pass
542 pass
543 self.wopener(filename, 'w').write(data)
543 self.wopener(filename, 'w').write(data)
544 util.set_flags(self.wjoin(filename), flags)
544 util.set_flags(self.wjoin(filename), flags)
545
545
546 def wwritedata(self, filename, data):
546 def wwritedata(self, filename, data):
547 return self._filter("decode", filename, data)
547 return self._filter("decode", filename, data)
548
548
549 def transaction(self):
549 def transaction(self):
550 if self._transref and self._transref():
550 if self._transref and self._transref():
551 return self._transref().nest()
551 return self._transref().nest()
552
552
553 # abort here if the journal already exists
553 # abort here if the journal already exists
554 if os.path.exists(self.sjoin("journal")):
554 if os.path.exists(self.sjoin("journal")):
555 raise repo.RepoError(_("journal already exists - run hg recover"))
555 raise repo.RepoError(_("journal already exists - run hg recover"))
556
556
557 # save dirstate for rollback
557 # save dirstate for rollback
558 try:
558 try:
559 ds = self.opener("dirstate").read()
559 ds = self.opener("dirstate").read()
560 except IOError:
560 except IOError:
561 ds = ""
561 ds = ""
562 self.opener("journal.dirstate", "w").write(ds)
562 self.opener("journal.dirstate", "w").write(ds)
563 self.opener("journal.branch", "w").write(self.dirstate.branch())
563 self.opener("journal.branch", "w").write(self.dirstate.branch())
564
564
565 renames = [(self.sjoin("journal"), self.sjoin("undo")),
565 renames = [(self.sjoin("journal"), self.sjoin("undo")),
566 (self.join("journal.dirstate"), self.join("undo.dirstate")),
566 (self.join("journal.dirstate"), self.join("undo.dirstate")),
567 (self.join("journal.branch"), self.join("undo.branch"))]
567 (self.join("journal.branch"), self.join("undo.branch"))]
568 tr = transaction.transaction(self.ui.warn, self.sopener,
568 tr = transaction.transaction(self.ui.warn, self.sopener,
569 self.sjoin("journal"),
569 self.sjoin("journal"),
570 aftertrans(renames),
570 aftertrans(renames),
571 self._createmode)
571 self._createmode)
572 self._transref = weakref.ref(tr)
572 self._transref = weakref.ref(tr)
573 return tr
573 return tr
574
574
575 def recover(self):
575 def recover(self):
576 l = self.lock()
576 l = self.lock()
577 try:
577 try:
578 if os.path.exists(self.sjoin("journal")):
578 if os.path.exists(self.sjoin("journal")):
579 self.ui.status(_("rolling back interrupted transaction\n"))
579 self.ui.status(_("rolling back interrupted transaction\n"))
580 transaction.rollback(self.sopener, self.sjoin("journal"))
580 transaction.rollback(self.sopener, self.sjoin("journal"))
581 self.invalidate()
581 self.invalidate()
582 return True
582 return True
583 else:
583 else:
584 self.ui.warn(_("no interrupted transaction available\n"))
584 self.ui.warn(_("no interrupted transaction available\n"))
585 return False
585 return False
586 finally:
586 finally:
587 del l
587 del l
588
588
589 def rollback(self):
589 def rollback(self):
590 wlock = lock = None
590 wlock = lock = None
591 try:
591 try:
592 wlock = self.wlock()
592 wlock = self.wlock()
593 lock = self.lock()
593 lock = self.lock()
594 if os.path.exists(self.sjoin("undo")):
594 if os.path.exists(self.sjoin("undo")):
595 self.ui.status(_("rolling back last transaction\n"))
595 self.ui.status(_("rolling back last transaction\n"))
596 transaction.rollback(self.sopener, self.sjoin("undo"))
596 transaction.rollback(self.sopener, self.sjoin("undo"))
597 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
597 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
598 try:
598 try:
599 branch = self.opener("undo.branch").read()
599 branch = self.opener("undo.branch").read()
600 self.dirstate.setbranch(branch)
600 self.dirstate.setbranch(branch)
601 except IOError:
601 except IOError:
602 self.ui.warn(_("Named branch could not be reset, "
602 self.ui.warn(_("Named branch could not be reset, "
603 "current branch still is: %s\n")
603 "current branch still is: %s\n")
604 % util.tolocal(self.dirstate.branch()))
604 % util.tolocal(self.dirstate.branch()))
605 self.invalidate()
605 self.invalidate()
606 self.dirstate.invalidate()
606 self.dirstate.invalidate()
607 else:
607 else:
608 self.ui.warn(_("no rollback information available\n"))
608 self.ui.warn(_("no rollback information available\n"))
609 finally:
609 finally:
610 del lock, wlock
610 del lock, wlock
611
611
612 def invalidate(self):
612 def invalidate(self):
613 for a in "changelog manifest".split():
613 for a in "changelog manifest".split():
614 if hasattr(self, a):
614 if hasattr(self, a):
615 self.__delattr__(a)
615 self.__delattr__(a)
616 self.tagscache = None
616 self.tagscache = None
617 self._tagstypecache = None
617 self._tagstypecache = None
618 self.nodetagscache = None
618 self.nodetagscache = None
619 self.branchcache = None
619
620
620 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
621 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
621 try:
622 try:
622 l = lock.lock(lockname, 0, releasefn, desc=desc)
623 l = lock.lock(lockname, 0, releasefn, desc=desc)
623 except lock.LockHeld, inst:
624 except lock.LockHeld, inst:
624 if not wait:
625 if not wait:
625 raise
626 raise
626 self.ui.warn(_("waiting for lock on %s held by %r\n") %
627 self.ui.warn(_("waiting for lock on %s held by %r\n") %
627 (desc, inst.locker))
628 (desc, inst.locker))
628 # default to 600 seconds timeout
629 # default to 600 seconds timeout
629 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
630 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
630 releasefn, desc=desc)
631 releasefn, desc=desc)
631 if acquirefn:
632 if acquirefn:
632 acquirefn()
633 acquirefn()
633 return l
634 return l
634
635
635 def lock(self, wait=True):
636 def lock(self, wait=True):
636 if self._lockref and self._lockref():
637 if self._lockref and self._lockref():
637 return self._lockref()
638 return self._lockref()
638
639
639 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
640 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
640 _('repository %s') % self.origroot)
641 _('repository %s') % self.origroot)
641 self._lockref = weakref.ref(l)
642 self._lockref = weakref.ref(l)
642 return l
643 return l
643
644
644 def wlock(self, wait=True):
645 def wlock(self, wait=True):
645 if self._wlockref and self._wlockref():
646 if self._wlockref and self._wlockref():
646 return self._wlockref()
647 return self._wlockref()
647
648
648 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
649 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
649 self.dirstate.invalidate, _('working directory of %s') %
650 self.dirstate.invalidate, _('working directory of %s') %
650 self.origroot)
651 self.origroot)
651 self._wlockref = weakref.ref(l)
652 self._wlockref = weakref.ref(l)
652 return l
653 return l
653
654
654 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
655 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
655 """
656 """
656 commit an individual file as part of a larger transaction
657 commit an individual file as part of a larger transaction
657 """
658 """
658
659
659 t = self.wread(fn)
660 t = self.wread(fn)
660 fl = self.file(fn)
661 fl = self.file(fn)
661 fp1 = manifest1.get(fn, nullid)
662 fp1 = manifest1.get(fn, nullid)
662 fp2 = manifest2.get(fn, nullid)
663 fp2 = manifest2.get(fn, nullid)
663
664
664 meta = {}
665 meta = {}
665 cp = self.dirstate.copied(fn)
666 cp = self.dirstate.copied(fn)
666 if cp:
667 if cp:
667 # Mark the new revision of this file as a copy of another
668 # Mark the new revision of this file as a copy of another
668 # file. This copy data will effectively act as a parent
669 # file. This copy data will effectively act as a parent
669 # of this new revision. If this is a merge, the first
670 # of this new revision. If this is a merge, the first
670 # parent will be the nullid (meaning "look up the copy data")
671 # parent will be the nullid (meaning "look up the copy data")
671 # and the second one will be the other parent. For example:
672 # and the second one will be the other parent. For example:
672 #
673 #
673 # 0 --- 1 --- 3 rev1 changes file foo
674 # 0 --- 1 --- 3 rev1 changes file foo
674 # \ / rev2 renames foo to bar and changes it
675 # \ / rev2 renames foo to bar and changes it
675 # \- 2 -/ rev3 should have bar with all changes and
676 # \- 2 -/ rev3 should have bar with all changes and
676 # should record that bar descends from
677 # should record that bar descends from
677 # bar in rev2 and foo in rev1
678 # bar in rev2 and foo in rev1
678 #
679 #
679 # this allows this merge to succeed:
680 # this allows this merge to succeed:
680 #
681 #
681 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
682 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
682 # \ / merging rev3 and rev4 should use bar@rev2
683 # \ / merging rev3 and rev4 should use bar@rev2
683 # \- 2 --- 4 as the merge base
684 # \- 2 --- 4 as the merge base
684 #
685 #
685 meta["copy"] = cp
686 meta["copy"] = cp
686 if not manifest2: # not a branch merge
687 if not manifest2: # not a branch merge
687 meta["copyrev"] = hex(manifest1.get(cp, nullid))
688 meta["copyrev"] = hex(manifest1.get(cp, nullid))
688 fp2 = nullid
689 fp2 = nullid
689 elif fp2 != nullid: # copied on remote side
690 elif fp2 != nullid: # copied on remote side
690 meta["copyrev"] = hex(manifest1.get(cp, nullid))
691 meta["copyrev"] = hex(manifest1.get(cp, nullid))
691 elif fp1 != nullid: # copied on local side, reversed
692 elif fp1 != nullid: # copied on local side, reversed
692 meta["copyrev"] = hex(manifest2.get(cp))
693 meta["copyrev"] = hex(manifest2.get(cp))
693 fp2 = fp1
694 fp2 = fp1
694 elif cp in manifest2: # directory rename on local side
695 elif cp in manifest2: # directory rename on local side
695 meta["copyrev"] = hex(manifest2[cp])
696 meta["copyrev"] = hex(manifest2[cp])
696 else: # directory rename on remote side
697 else: # directory rename on remote side
697 meta["copyrev"] = hex(manifest1.get(cp, nullid))
698 meta["copyrev"] = hex(manifest1.get(cp, nullid))
698 self.ui.debug(_(" %s: copy %s:%s\n") %
699 self.ui.debug(_(" %s: copy %s:%s\n") %
699 (fn, cp, meta["copyrev"]))
700 (fn, cp, meta["copyrev"]))
700 fp1 = nullid
701 fp1 = nullid
701 elif fp2 != nullid:
702 elif fp2 != nullid:
702 # is one parent an ancestor of the other?
703 # is one parent an ancestor of the other?
703 fpa = fl.ancestor(fp1, fp2)
704 fpa = fl.ancestor(fp1, fp2)
704 if fpa == fp1:
705 if fpa == fp1:
705 fp1, fp2 = fp2, nullid
706 fp1, fp2 = fp2, nullid
706 elif fpa == fp2:
707 elif fpa == fp2:
707 fp2 = nullid
708 fp2 = nullid
708
709
709 # is the file unmodified from the parent? report existing entry
710 # is the file unmodified from the parent? report existing entry
710 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
711 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
711 return fp1
712 return fp1
712
713
713 changelist.append(fn)
714 changelist.append(fn)
714 return fl.add(t, meta, tr, linkrev, fp1, fp2)
715 return fl.add(t, meta, tr, linkrev, fp1, fp2)
715
716
716 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
717 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
717 if p1 is None:
718 if p1 is None:
718 p1, p2 = self.dirstate.parents()
719 p1, p2 = self.dirstate.parents()
719 return self.commit(files=files, text=text, user=user, date=date,
720 return self.commit(files=files, text=text, user=user, date=date,
720 p1=p1, p2=p2, extra=extra, empty_ok=True)
721 p1=p1, p2=p2, extra=extra, empty_ok=True)
721
722
722 def commit(self, files=None, text="", user=None, date=None,
723 def commit(self, files=None, text="", user=None, date=None,
723 match=util.always, force=False, force_editor=False,
724 match=util.always, force=False, force_editor=False,
724 p1=None, p2=None, extra={}, empty_ok=False):
725 p1=None, p2=None, extra={}, empty_ok=False):
725 wlock = lock = tr = None
726 wlock = lock = tr = None
726 valid = 0 # don't save the dirstate if this isn't set
727 valid = 0 # don't save the dirstate if this isn't set
727 if files:
728 if files:
728 files = util.unique(files)
729 files = util.unique(files)
729 try:
730 try:
730 commit = []
731 commit = []
731 remove = []
732 remove = []
732 changed = []
733 changed = []
733 use_dirstate = (p1 is None) # not rawcommit
734 use_dirstate = (p1 is None) # not rawcommit
734 extra = extra.copy()
735 extra = extra.copy()
735
736
736 if use_dirstate:
737 if use_dirstate:
737 if files:
738 if files:
738 for f in files:
739 for f in files:
739 s = self.dirstate[f]
740 s = self.dirstate[f]
740 if s in 'nma':
741 if s in 'nma':
741 commit.append(f)
742 commit.append(f)
742 elif s == 'r':
743 elif s == 'r':
743 remove.append(f)
744 remove.append(f)
744 else:
745 else:
745 self.ui.warn(_("%s not tracked!\n") % f)
746 self.ui.warn(_("%s not tracked!\n") % f)
746 else:
747 else:
747 changes = self.status(match=match)[:5]
748 changes = self.status(match=match)[:5]
748 modified, added, removed, deleted, unknown = changes
749 modified, added, removed, deleted, unknown = changes
749 commit = modified + added
750 commit = modified + added
750 remove = removed
751 remove = removed
751 else:
752 else:
752 commit = files
753 commit = files
753
754
754 if use_dirstate:
755 if use_dirstate:
755 p1, p2 = self.dirstate.parents()
756 p1, p2 = self.dirstate.parents()
756 update_dirstate = True
757 update_dirstate = True
757 else:
758 else:
758 p1, p2 = p1, p2 or nullid
759 p1, p2 = p1, p2 or nullid
759 update_dirstate = (self.dirstate.parents()[0] == p1)
760 update_dirstate = (self.dirstate.parents()[0] == p1)
760
761
761 c1 = self.changelog.read(p1)
762 c1 = self.changelog.read(p1)
762 c2 = self.changelog.read(p2)
763 c2 = self.changelog.read(p2)
763 m1 = self.manifest.read(c1[0]).copy()
764 m1 = self.manifest.read(c1[0]).copy()
764 m2 = self.manifest.read(c2[0])
765 m2 = self.manifest.read(c2[0])
765
766
766 if use_dirstate:
767 if use_dirstate:
767 branchname = self.workingctx().branch()
768 branchname = self.workingctx().branch()
768 try:
769 try:
769 branchname = branchname.decode('UTF-8').encode('UTF-8')
770 branchname = branchname.decode('UTF-8').encode('UTF-8')
770 except UnicodeDecodeError:
771 except UnicodeDecodeError:
771 raise util.Abort(_('branch name not in UTF-8!'))
772 raise util.Abort(_('branch name not in UTF-8!'))
772 else:
773 else:
773 branchname = ""
774 branchname = ""
774
775
775 if use_dirstate:
776 if use_dirstate:
776 oldname = c1[5].get("branch") # stored in UTF-8
777 oldname = c1[5].get("branch") # stored in UTF-8
777 if (not commit and not remove and not force and p2 == nullid
778 if (not commit and not remove and not force and p2 == nullid
778 and branchname == oldname):
779 and branchname == oldname):
779 self.ui.status(_("nothing changed\n"))
780 self.ui.status(_("nothing changed\n"))
780 return None
781 return None
781
782
782 xp1 = hex(p1)
783 xp1 = hex(p1)
783 if p2 == nullid: xp2 = ''
784 if p2 == nullid: xp2 = ''
784 else: xp2 = hex(p2)
785 else: xp2 = hex(p2)
785
786
786 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
787 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
787
788
788 wlock = self.wlock()
789 wlock = self.wlock()
789 lock = self.lock()
790 lock = self.lock()
790 tr = self.transaction()
791 tr = self.transaction()
791 trp = weakref.proxy(tr)
792 trp = weakref.proxy(tr)
792
793
793 # check in files
794 # check in files
794 new = {}
795 new = {}
795 linkrev = self.changelog.count()
796 linkrev = self.changelog.count()
796 commit.sort()
797 commit.sort()
797 is_exec = util.execfunc(self.root, m1.execf)
798 is_exec = util.execfunc(self.root, m1.execf)
798 is_link = util.linkfunc(self.root, m1.linkf)
799 is_link = util.linkfunc(self.root, m1.linkf)
799 for f in commit:
800 for f in commit:
800 self.ui.note(f + "\n")
801 self.ui.note(f + "\n")
801 try:
802 try:
802 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
803 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
803 new_exec = is_exec(f)
804 new_exec = is_exec(f)
804 new_link = is_link(f)
805 new_link = is_link(f)
805 if ((not changed or changed[-1] != f) and
806 if ((not changed or changed[-1] != f) and
806 m2.get(f) != new[f]):
807 m2.get(f) != new[f]):
807 # mention the file in the changelog if some
808 # mention the file in the changelog if some
808 # flag changed, even if there was no content
809 # flag changed, even if there was no content
809 # change.
810 # change.
810 old_exec = m1.execf(f)
811 old_exec = m1.execf(f)
811 old_link = m1.linkf(f)
812 old_link = m1.linkf(f)
812 if old_exec != new_exec or old_link != new_link:
813 if old_exec != new_exec or old_link != new_link:
813 changed.append(f)
814 changed.append(f)
814 m1.set(f, new_exec, new_link)
815 m1.set(f, new_exec, new_link)
815 if use_dirstate:
816 if use_dirstate:
816 self.dirstate.normal(f)
817 self.dirstate.normal(f)
817
818
818 except (OSError, IOError):
819 except (OSError, IOError):
819 if use_dirstate:
820 if use_dirstate:
820 self.ui.warn(_("trouble committing %s!\n") % f)
821 self.ui.warn(_("trouble committing %s!\n") % f)
821 raise
822 raise
822 else:
823 else:
823 remove.append(f)
824 remove.append(f)
824
825
825 # update manifest
826 # update manifest
826 m1.update(new)
827 m1.update(new)
827 remove.sort()
828 remove.sort()
828 removed = []
829 removed = []
829
830
830 for f in remove:
831 for f in remove:
831 if f in m1:
832 if f in m1:
832 del m1[f]
833 del m1[f]
833 removed.append(f)
834 removed.append(f)
834 elif f in m2:
835 elif f in m2:
835 removed.append(f)
836 removed.append(f)
836 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
837 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
837 (new, removed))
838 (new, removed))
838
839
839 # add changeset
840 # add changeset
840 new = new.keys()
841 new = new.keys()
841 new.sort()
842 new.sort()
842
843
843 user = user or self.ui.username()
844 user = user or self.ui.username()
844 if (not empty_ok and not text) or force_editor:
845 if (not empty_ok and not text) or force_editor:
845 edittext = []
846 edittext = []
846 if text:
847 if text:
847 edittext.append(text)
848 edittext.append(text)
848 edittext.append("")
849 edittext.append("")
849 edittext.append(_("HG: Enter commit message."
850 edittext.append(_("HG: Enter commit message."
850 " Lines beginning with 'HG:' are removed."))
851 " Lines beginning with 'HG:' are removed."))
851 edittext.append("HG: --")
852 edittext.append("HG: --")
852 edittext.append("HG: user: %s" % user)
853 edittext.append("HG: user: %s" % user)
853 if p2 != nullid:
854 if p2 != nullid:
854 edittext.append("HG: branch merge")
855 edittext.append("HG: branch merge")
855 if branchname:
856 if branchname:
856 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
857 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
857 edittext.extend(["HG: changed %s" % f for f in changed])
858 edittext.extend(["HG: changed %s" % f for f in changed])
858 edittext.extend(["HG: removed %s" % f for f in removed])
859 edittext.extend(["HG: removed %s" % f for f in removed])
859 if not changed and not remove:
860 if not changed and not remove:
860 edittext.append("HG: no files changed")
861 edittext.append("HG: no files changed")
861 edittext.append("")
862 edittext.append("")
862 # run editor in the repository root
863 # run editor in the repository root
863 olddir = os.getcwd()
864 olddir = os.getcwd()
864 os.chdir(self.root)
865 os.chdir(self.root)
865 text = self.ui.edit("\n".join(edittext), user)
866 text = self.ui.edit("\n".join(edittext), user)
866 os.chdir(olddir)
867 os.chdir(olddir)
867
868
868 if branchname:
869 if branchname:
869 extra["branch"] = branchname
870 extra["branch"] = branchname
870
871
871 if use_dirstate:
872 if use_dirstate:
872 lines = [line.rstrip() for line in text.rstrip().splitlines()]
873 lines = [line.rstrip() for line in text.rstrip().splitlines()]
873 while lines and not lines[0]:
874 while lines and not lines[0]:
874 del lines[0]
875 del lines[0]
875 if not lines:
876 if not lines:
876 raise util.Abort(_("empty commit message"))
877 raise util.Abort(_("empty commit message"))
877 text = '\n'.join(lines)
878 text = '\n'.join(lines)
878
879
879 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
880 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
880 user, date, extra)
881 user, date, extra)
881 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
882 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
882 parent2=xp2)
883 parent2=xp2)
883 tr.close()
884 tr.close()
884
885
885 if self.branchcache and "branch" in extra:
886 if self.branchcache and "branch" in extra:
886 self.branchcache[util.tolocal(extra["branch"])] = n
887 self.branchcache[util.tolocal(extra["branch"])] = n
887
888
888 if use_dirstate or update_dirstate:
889 if use_dirstate or update_dirstate:
889 self.dirstate.setparents(n)
890 self.dirstate.setparents(n)
890 if use_dirstate:
891 if use_dirstate:
891 for f in removed:
892 for f in removed:
892 self.dirstate.forget(f)
893 self.dirstate.forget(f)
893 valid = 1 # our dirstate updates are complete
894 valid = 1 # our dirstate updates are complete
894
895
895 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
896 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
896 return n
897 return n
897 finally:
898 finally:
898 if not valid: # don't save our updated dirstate
899 if not valid: # don't save our updated dirstate
899 self.dirstate.invalidate()
900 self.dirstate.invalidate()
900 del tr, lock, wlock
901 del tr, lock, wlock
901
902
902 def walk(self, node=None, files=[], match=util.always, badmatch=None):
903 def walk(self, node=None, files=[], match=util.always, badmatch=None):
903 '''
904 '''
904 walk recursively through the directory tree or a given
905 walk recursively through the directory tree or a given
905 changeset, finding all files matched by the match
906 changeset, finding all files matched by the match
906 function
907 function
907
908
908 results are yielded in a tuple (src, filename), where src
909 results are yielded in a tuple (src, filename), where src
909 is one of:
910 is one of:
910 'f' the file was found in the directory tree
911 'f' the file was found in the directory tree
911 'm' the file was only in the dirstate and not in the tree
912 'm' the file was only in the dirstate and not in the tree
912 'b' file was not found and matched badmatch
913 'b' file was not found and matched badmatch
913 '''
914 '''
914
915
915 if node:
916 if node:
916 fdict = dict.fromkeys(files)
917 fdict = dict.fromkeys(files)
917 # for dirstate.walk, files=['.'] means "walk the whole tree".
918 # for dirstate.walk, files=['.'] means "walk the whole tree".
918 # follow that here, too
919 # follow that here, too
919 fdict.pop('.', None)
920 fdict.pop('.', None)
920 mdict = self.manifest.read(self.changelog.read(node)[0])
921 mdict = self.manifest.read(self.changelog.read(node)[0])
921 mfiles = mdict.keys()
922 mfiles = mdict.keys()
922 mfiles.sort()
923 mfiles.sort()
923 for fn in mfiles:
924 for fn in mfiles:
924 for ffn in fdict:
925 for ffn in fdict:
925 # match if the file is the exact name or a directory
926 # match if the file is the exact name or a directory
926 if ffn == fn or fn.startswith("%s/" % ffn):
927 if ffn == fn or fn.startswith("%s/" % ffn):
927 del fdict[ffn]
928 del fdict[ffn]
928 break
929 break
929 if match(fn):
930 if match(fn):
930 yield 'm', fn
931 yield 'm', fn
931 ffiles = fdict.keys()
932 ffiles = fdict.keys()
932 ffiles.sort()
933 ffiles.sort()
933 for fn in ffiles:
934 for fn in ffiles:
934 if badmatch and badmatch(fn):
935 if badmatch and badmatch(fn):
935 if match(fn):
936 if match(fn):
936 yield 'b', fn
937 yield 'b', fn
937 else:
938 else:
938 self.ui.warn(_('%s: No such file in rev %s\n')
939 self.ui.warn(_('%s: No such file in rev %s\n')
939 % (self.pathto(fn), short(node)))
940 % (self.pathto(fn), short(node)))
940 else:
941 else:
941 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
942 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
942 yield src, fn
943 yield src, fn
943
944
944 def status(self, node1=None, node2=None, files=[], match=util.always,
945 def status(self, node1=None, node2=None, files=[], match=util.always,
945 list_ignored=False, list_clean=False):
946 list_ignored=False, list_clean=False):
946 """return status of files between two nodes or node and working directory
947 """return status of files between two nodes or node and working directory
947
948
948 If node1 is None, use the first dirstate parent instead.
949 If node1 is None, use the first dirstate parent instead.
949 If node2 is None, compare node1 with working directory.
950 If node2 is None, compare node1 with working directory.
950 """
951 """
951
952
952 def fcmp(fn, getnode):
953 def fcmp(fn, getnode):
953 t1 = self.wread(fn)
954 t1 = self.wread(fn)
954 return self.file(fn).cmp(getnode(fn), t1)
955 return self.file(fn).cmp(getnode(fn), t1)
955
956
956 def mfmatches(node):
957 def mfmatches(node):
957 change = self.changelog.read(node)
958 change = self.changelog.read(node)
958 mf = self.manifest.read(change[0]).copy()
959 mf = self.manifest.read(change[0]).copy()
959 for fn in mf.keys():
960 for fn in mf.keys():
960 if not match(fn):
961 if not match(fn):
961 del mf[fn]
962 del mf[fn]
962 return mf
963 return mf
963
964
964 modified, added, removed, deleted, unknown = [], [], [], [], []
965 modified, added, removed, deleted, unknown = [], [], [], [], []
965 ignored, clean = [], []
966 ignored, clean = [], []
966
967
967 compareworking = False
968 compareworking = False
968 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
969 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
969 compareworking = True
970 compareworking = True
970
971
971 if not compareworking:
972 if not compareworking:
972 # read the manifest from node1 before the manifest from node2,
973 # read the manifest from node1 before the manifest from node2,
973 # so that we'll hit the manifest cache if we're going through
974 # so that we'll hit the manifest cache if we're going through
974 # all the revisions in parent->child order.
975 # all the revisions in parent->child order.
975 mf1 = mfmatches(node1)
976 mf1 = mfmatches(node1)
976
977
977 # are we comparing the working directory?
978 # are we comparing the working directory?
978 if not node2:
979 if not node2:
979 (lookup, modified, added, removed, deleted, unknown,
980 (lookup, modified, added, removed, deleted, unknown,
980 ignored, clean) = self.dirstate.status(files, match,
981 ignored, clean) = self.dirstate.status(files, match,
981 list_ignored, list_clean)
982 list_ignored, list_clean)
982
983
983 # are we comparing working dir against its parent?
984 # are we comparing working dir against its parent?
984 if compareworking:
985 if compareworking:
985 if lookup:
986 if lookup:
986 fixup = []
987 fixup = []
987 # do a full compare of any files that might have changed
988 # do a full compare of any files that might have changed
988 ctx = self.changectx()
989 ctx = self.changectx()
989 for f in lookup:
990 for f in lookup:
990 if f not in ctx or ctx[f].cmp(self.wread(f)):
991 if f not in ctx or ctx[f].cmp(self.wread(f)):
991 modified.append(f)
992 modified.append(f)
992 else:
993 else:
993 fixup.append(f)
994 fixup.append(f)
994 if list_clean:
995 if list_clean:
995 clean.append(f)
996 clean.append(f)
996
997
997 # update dirstate for files that are actually clean
998 # update dirstate for files that are actually clean
998 if fixup:
999 if fixup:
999 wlock = None
1000 wlock = None
1000 try:
1001 try:
1001 try:
1002 try:
1002 wlock = self.wlock(False)
1003 wlock = self.wlock(False)
1003 except lock.LockException:
1004 except lock.LockException:
1004 pass
1005 pass
1005 if wlock:
1006 if wlock:
1006 for f in fixup:
1007 for f in fixup:
1007 self.dirstate.normal(f)
1008 self.dirstate.normal(f)
1008 finally:
1009 finally:
1009 del wlock
1010 del wlock
1010 else:
1011 else:
1011 # we are comparing working dir against non-parent
1012 # we are comparing working dir against non-parent
1012 # generate a pseudo-manifest for the working dir
1013 # generate a pseudo-manifest for the working dir
1013 # XXX: create it in dirstate.py ?
1014 # XXX: create it in dirstate.py ?
1014 mf2 = mfmatches(self.dirstate.parents()[0])
1015 mf2 = mfmatches(self.dirstate.parents()[0])
1015 is_exec = util.execfunc(self.root, mf2.execf)
1016 is_exec = util.execfunc(self.root, mf2.execf)
1016 is_link = util.linkfunc(self.root, mf2.linkf)
1017 is_link = util.linkfunc(self.root, mf2.linkf)
1017 for f in lookup + modified + added:
1018 for f in lookup + modified + added:
1018 mf2[f] = ""
1019 mf2[f] = ""
1019 mf2.set(f, is_exec(f), is_link(f))
1020 mf2.set(f, is_exec(f), is_link(f))
1020 for f in removed:
1021 for f in removed:
1021 if f in mf2:
1022 if f in mf2:
1022 del mf2[f]
1023 del mf2[f]
1023
1024
1024 else:
1025 else:
1025 # we are comparing two revisions
1026 # we are comparing two revisions
1026 mf2 = mfmatches(node2)
1027 mf2 = mfmatches(node2)
1027
1028
1028 if not compareworking:
1029 if not compareworking:
1029 # flush lists from dirstate before comparing manifests
1030 # flush lists from dirstate before comparing manifests
1030 modified, added, clean = [], [], []
1031 modified, added, clean = [], [], []
1031
1032
1032 # make sure to sort the files so we talk to the disk in a
1033 # make sure to sort the files so we talk to the disk in a
1033 # reasonable order
1034 # reasonable order
1034 mf2keys = mf2.keys()
1035 mf2keys = mf2.keys()
1035 mf2keys.sort()
1036 mf2keys.sort()
1036 getnode = lambda fn: mf1.get(fn, nullid)
1037 getnode = lambda fn: mf1.get(fn, nullid)
1037 for fn in mf2keys:
1038 for fn in mf2keys:
1038 if fn in mf1:
1039 if fn in mf1:
1039 if (mf1.flags(fn) != mf2.flags(fn) or
1040 if (mf1.flags(fn) != mf2.flags(fn) or
1040 (mf1[fn] != mf2[fn] and
1041 (mf1[fn] != mf2[fn] and
1041 (mf2[fn] != "" or fcmp(fn, getnode)))):
1042 (mf2[fn] != "" or fcmp(fn, getnode)))):
1042 modified.append(fn)
1043 modified.append(fn)
1043 elif list_clean:
1044 elif list_clean:
1044 clean.append(fn)
1045 clean.append(fn)
1045 del mf1[fn]
1046 del mf1[fn]
1046 else:
1047 else:
1047 added.append(fn)
1048 added.append(fn)
1048
1049
1049 removed = mf1.keys()
1050 removed = mf1.keys()
1050
1051
1051 # sort and return results:
1052 # sort and return results:
1052 for l in modified, added, removed, deleted, unknown, ignored, clean:
1053 for l in modified, added, removed, deleted, unknown, ignored, clean:
1053 l.sort()
1054 l.sort()
1054 return (modified, added, removed, deleted, unknown, ignored, clean)
1055 return (modified, added, removed, deleted, unknown, ignored, clean)
1055
1056
1056 def add(self, list):
1057 def add(self, list):
1057 wlock = self.wlock()
1058 wlock = self.wlock()
1058 try:
1059 try:
1059 rejected = []
1060 rejected = []
1060 for f in list:
1061 for f in list:
1061 p = self.wjoin(f)
1062 p = self.wjoin(f)
1062 try:
1063 try:
1063 st = os.lstat(p)
1064 st = os.lstat(p)
1064 except:
1065 except:
1065 self.ui.warn(_("%s does not exist!\n") % f)
1066 self.ui.warn(_("%s does not exist!\n") % f)
1066 rejected.append(f)
1067 rejected.append(f)
1067 continue
1068 continue
1068 if st.st_size > 10000000:
1069 if st.st_size > 10000000:
1069 self.ui.warn(_("%s: files over 10MB may cause memory and"
1070 self.ui.warn(_("%s: files over 10MB may cause memory and"
1070 " performance problems\n"
1071 " performance problems\n"
1071 "(use 'hg revert %s' to unadd the file)\n")
1072 "(use 'hg revert %s' to unadd the file)\n")
1072 % (f, f))
1073 % (f, f))
1073 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1074 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1074 self.ui.warn(_("%s not added: only files and symlinks "
1075 self.ui.warn(_("%s not added: only files and symlinks "
1075 "supported currently\n") % f)
1076 "supported currently\n") % f)
1076 rejected.append(p)
1077 rejected.append(p)
1077 elif self.dirstate[f] in 'amn':
1078 elif self.dirstate[f] in 'amn':
1078 self.ui.warn(_("%s already tracked!\n") % f)
1079 self.ui.warn(_("%s already tracked!\n") % f)
1079 elif self.dirstate[f] == 'r':
1080 elif self.dirstate[f] == 'r':
1080 self.dirstate.normallookup(f)
1081 self.dirstate.normallookup(f)
1081 else:
1082 else:
1082 self.dirstate.add(f)
1083 self.dirstate.add(f)
1083 return rejected
1084 return rejected
1084 finally:
1085 finally:
1085 del wlock
1086 del wlock
1086
1087
1087 def forget(self, list):
1088 def forget(self, list):
1088 wlock = self.wlock()
1089 wlock = self.wlock()
1089 try:
1090 try:
1090 for f in list:
1091 for f in list:
1091 if self.dirstate[f] != 'a':
1092 if self.dirstate[f] != 'a':
1092 self.ui.warn(_("%s not added!\n") % f)
1093 self.ui.warn(_("%s not added!\n") % f)
1093 else:
1094 else:
1094 self.dirstate.forget(f)
1095 self.dirstate.forget(f)
1095 finally:
1096 finally:
1096 del wlock
1097 del wlock
1097
1098
1098 def remove(self, list, unlink=False):
1099 def remove(self, list, unlink=False):
1099 wlock = None
1100 wlock = None
1100 try:
1101 try:
1101 if unlink:
1102 if unlink:
1102 for f in list:
1103 for f in list:
1103 try:
1104 try:
1104 util.unlink(self.wjoin(f))
1105 util.unlink(self.wjoin(f))
1105 except OSError, inst:
1106 except OSError, inst:
1106 if inst.errno != errno.ENOENT:
1107 if inst.errno != errno.ENOENT:
1107 raise
1108 raise
1108 wlock = self.wlock()
1109 wlock = self.wlock()
1109 for f in list:
1110 for f in list:
1110 if unlink and os.path.exists(self.wjoin(f)):
1111 if unlink and os.path.exists(self.wjoin(f)):
1111 self.ui.warn(_("%s still exists!\n") % f)
1112 self.ui.warn(_("%s still exists!\n") % f)
1112 elif self.dirstate[f] == 'a':
1113 elif self.dirstate[f] == 'a':
1113 self.dirstate.forget(f)
1114 self.dirstate.forget(f)
1114 elif f not in self.dirstate:
1115 elif f not in self.dirstate:
1115 self.ui.warn(_("%s not tracked!\n") % f)
1116 self.ui.warn(_("%s not tracked!\n") % f)
1116 else:
1117 else:
1117 self.dirstate.remove(f)
1118 self.dirstate.remove(f)
1118 finally:
1119 finally:
1119 del wlock
1120 del wlock
1120
1121
1121 def undelete(self, list):
1122 def undelete(self, list):
1122 wlock = None
1123 wlock = None
1123 try:
1124 try:
1124 manifests = [self.manifest.read(self.changelog.read(p)[0])
1125 manifests = [self.manifest.read(self.changelog.read(p)[0])
1125 for p in self.dirstate.parents() if p != nullid]
1126 for p in self.dirstate.parents() if p != nullid]
1126 wlock = self.wlock()
1127 wlock = self.wlock()
1127 for f in list:
1128 for f in list:
1128 if self.dirstate[f] != 'r':
1129 if self.dirstate[f] != 'r':
1129 self.ui.warn("%s not removed!\n" % f)
1130 self.ui.warn("%s not removed!\n" % f)
1130 else:
1131 else:
1131 m = f in manifests[0] and manifests[0] or manifests[1]
1132 m = f in manifests[0] and manifests[0] or manifests[1]
1132 t = self.file(f).read(m[f])
1133 t = self.file(f).read(m[f])
1133 self.wwrite(f, t, m.flags(f))
1134 self.wwrite(f, t, m.flags(f))
1134 self.dirstate.normal(f)
1135 self.dirstate.normal(f)
1135 finally:
1136 finally:
1136 del wlock
1137 del wlock
1137
1138
1138 def copy(self, source, dest):
1139 def copy(self, source, dest):
1139 wlock = None
1140 wlock = None
1140 try:
1141 try:
1141 p = self.wjoin(dest)
1142 p = self.wjoin(dest)
1142 if not (os.path.exists(p) or os.path.islink(p)):
1143 if not (os.path.exists(p) or os.path.islink(p)):
1143 self.ui.warn(_("%s does not exist!\n") % dest)
1144 self.ui.warn(_("%s does not exist!\n") % dest)
1144 elif not (os.path.isfile(p) or os.path.islink(p)):
1145 elif not (os.path.isfile(p) or os.path.islink(p)):
1145 self.ui.warn(_("copy failed: %s is not a file or a "
1146 self.ui.warn(_("copy failed: %s is not a file or a "
1146 "symbolic link\n") % dest)
1147 "symbolic link\n") % dest)
1147 else:
1148 else:
1148 wlock = self.wlock()
1149 wlock = self.wlock()
1149 if dest not in self.dirstate:
1150 if dest not in self.dirstate:
1150 self.dirstate.add(dest)
1151 self.dirstate.add(dest)
1151 self.dirstate.copy(source, dest)
1152 self.dirstate.copy(source, dest)
1152 finally:
1153 finally:
1153 del wlock
1154 del wlock
1154
1155
1155 def heads(self, start=None):
1156 def heads(self, start=None):
1156 heads = self.changelog.heads(start)
1157 heads = self.changelog.heads(start)
1157 # sort the output in rev descending order
1158 # sort the output in rev descending order
1158 heads = [(-self.changelog.rev(h), h) for h in heads]
1159 heads = [(-self.changelog.rev(h), h) for h in heads]
1159 heads.sort()
1160 heads.sort()
1160 return [n for (r, n) in heads]
1161 return [n for (r, n) in heads]
1161
1162
1162 def branchheads(self, branch, start=None):
1163 def branchheads(self, branch, start=None):
1163 branches = self.branchtags()
1164 branches = self.branchtags()
1164 if branch not in branches:
1165 if branch not in branches:
1165 return []
1166 return []
1166 # The basic algorithm is this:
1167 # The basic algorithm is this:
1167 #
1168 #
1168 # Start from the branch tip since there are no later revisions that can
1169 # Start from the branch tip since there are no later revisions that can
1169 # possibly be in this branch, and the tip is a guaranteed head.
1170 # possibly be in this branch, and the tip is a guaranteed head.
1170 #
1171 #
1171 # Remember the tip's parents as the first ancestors, since these by
1172 # Remember the tip's parents as the first ancestors, since these by
1172 # definition are not heads.
1173 # definition are not heads.
1173 #
1174 #
1174 # Step backwards from the brach tip through all the revisions. We are
1175 # Step backwards from the brach tip through all the revisions. We are
1175 # guaranteed by the rules of Mercurial that we will now be visiting the
1176 # guaranteed by the rules of Mercurial that we will now be visiting the
1176 # nodes in reverse topological order (children before parents).
1177 # nodes in reverse topological order (children before parents).
1177 #
1178 #
1178 # If a revision is one of the ancestors of a head then we can toss it
1179 # If a revision is one of the ancestors of a head then we can toss it
1179 # out of the ancestors set (we've already found it and won't be
1180 # out of the ancestors set (we've already found it and won't be
1180 # visiting it again) and put its parents in the ancestors set.
1181 # visiting it again) and put its parents in the ancestors set.
1181 #
1182 #
1182 # Otherwise, if a revision is in the branch it's another head, since it
1183 # Otherwise, if a revision is in the branch it's another head, since it
1183 # wasn't in the ancestor list of an existing head. So add it to the
1184 # wasn't in the ancestor list of an existing head. So add it to the
1184 # head list, and add its parents to the ancestor list.
1185 # head list, and add its parents to the ancestor list.
1185 #
1186 #
1186 # If it is not in the branch ignore it.
1187 # If it is not in the branch ignore it.
1187 #
1188 #
1188 # Once we have a list of heads, use nodesbetween to filter out all the
1189 # Once we have a list of heads, use nodesbetween to filter out all the
1189 # heads that cannot be reached from startrev. There may be a more
1190 # heads that cannot be reached from startrev. There may be a more
1190 # efficient way to do this as part of the previous algorithm.
1191 # efficient way to do this as part of the previous algorithm.
1191
1192
1192 set = util.set
1193 set = util.set
1193 heads = [self.changelog.rev(branches[branch])]
1194 heads = [self.changelog.rev(branches[branch])]
1194 # Don't care if ancestors contains nullrev or not.
1195 # Don't care if ancestors contains nullrev or not.
1195 ancestors = set(self.changelog.parentrevs(heads[0]))
1196 ancestors = set(self.changelog.parentrevs(heads[0]))
1196 for rev in xrange(heads[0] - 1, nullrev, -1):
1197 for rev in xrange(heads[0] - 1, nullrev, -1):
1197 if rev in ancestors:
1198 if rev in ancestors:
1198 ancestors.update(self.changelog.parentrevs(rev))
1199 ancestors.update(self.changelog.parentrevs(rev))
1199 ancestors.remove(rev)
1200 ancestors.remove(rev)
1200 elif self.changectx(rev).branch() == branch:
1201 elif self.changectx(rev).branch() == branch:
1201 heads.append(rev)
1202 heads.append(rev)
1202 ancestors.update(self.changelog.parentrevs(rev))
1203 ancestors.update(self.changelog.parentrevs(rev))
1203 heads = [self.changelog.node(rev) for rev in heads]
1204 heads = [self.changelog.node(rev) for rev in heads]
1204 if start is not None:
1205 if start is not None:
1205 heads = self.changelog.nodesbetween([start], heads)[2]
1206 heads = self.changelog.nodesbetween([start], heads)[2]
1206 return heads
1207 return heads
1207
1208
1208 def branches(self, nodes):
1209 def branches(self, nodes):
1209 if not nodes:
1210 if not nodes:
1210 nodes = [self.changelog.tip()]
1211 nodes = [self.changelog.tip()]
1211 b = []
1212 b = []
1212 for n in nodes:
1213 for n in nodes:
1213 t = n
1214 t = n
1214 while 1:
1215 while 1:
1215 p = self.changelog.parents(n)
1216 p = self.changelog.parents(n)
1216 if p[1] != nullid or p[0] == nullid:
1217 if p[1] != nullid or p[0] == nullid:
1217 b.append((t, n, p[0], p[1]))
1218 b.append((t, n, p[0], p[1]))
1218 break
1219 break
1219 n = p[0]
1220 n = p[0]
1220 return b
1221 return b
1221
1222
1222 def between(self, pairs):
1223 def between(self, pairs):
1223 r = []
1224 r = []
1224
1225
1225 for top, bottom in pairs:
1226 for top, bottom in pairs:
1226 n, l, i = top, [], 0
1227 n, l, i = top, [], 0
1227 f = 1
1228 f = 1
1228
1229
1229 while n != bottom:
1230 while n != bottom:
1230 p = self.changelog.parents(n)[0]
1231 p = self.changelog.parents(n)[0]
1231 if i == f:
1232 if i == f:
1232 l.append(n)
1233 l.append(n)
1233 f = f * 2
1234 f = f * 2
1234 n = p
1235 n = p
1235 i += 1
1236 i += 1
1236
1237
1237 r.append(l)
1238 r.append(l)
1238
1239
1239 return r
1240 return r
1240
1241
1241 def findincoming(self, remote, base=None, heads=None, force=False):
1242 def findincoming(self, remote, base=None, heads=None, force=False):
1242 """Return list of roots of the subsets of missing nodes from remote
1243 """Return list of roots of the subsets of missing nodes from remote
1243
1244
1244 If base dict is specified, assume that these nodes and their parents
1245 If base dict is specified, assume that these nodes and their parents
1245 exist on the remote side and that no child of a node of base exists
1246 exist on the remote side and that no child of a node of base exists
1246 in both remote and self.
1247 in both remote and self.
1247 Furthermore base will be updated to include the nodes that exists
1248 Furthermore base will be updated to include the nodes that exists
1248 in self and remote but no children exists in self and remote.
1249 in self and remote but no children exists in self and remote.
1249 If a list of heads is specified, return only nodes which are heads
1250 If a list of heads is specified, return only nodes which are heads
1250 or ancestors of these heads.
1251 or ancestors of these heads.
1251
1252
1252 All the ancestors of base are in self and in remote.
1253 All the ancestors of base are in self and in remote.
1253 All the descendants of the list returned are missing in self.
1254 All the descendants of the list returned are missing in self.
1254 (and so we know that the rest of the nodes are missing in remote, see
1255 (and so we know that the rest of the nodes are missing in remote, see
1255 outgoing)
1256 outgoing)
1256 """
1257 """
1257 m = self.changelog.nodemap
1258 m = self.changelog.nodemap
1258 search = []
1259 search = []
1259 fetch = {}
1260 fetch = {}
1260 seen = {}
1261 seen = {}
1261 seenbranch = {}
1262 seenbranch = {}
1262 if base == None:
1263 if base == None:
1263 base = {}
1264 base = {}
1264
1265
1265 if not heads:
1266 if not heads:
1266 heads = remote.heads()
1267 heads = remote.heads()
1267
1268
1268 if self.changelog.tip() == nullid:
1269 if self.changelog.tip() == nullid:
1269 base[nullid] = 1
1270 base[nullid] = 1
1270 if heads != [nullid]:
1271 if heads != [nullid]:
1271 return [nullid]
1272 return [nullid]
1272 return []
1273 return []
1273
1274
1274 # assume we're closer to the tip than the root
1275 # assume we're closer to the tip than the root
1275 # and start by examining the heads
1276 # and start by examining the heads
1276 self.ui.status(_("searching for changes\n"))
1277 self.ui.status(_("searching for changes\n"))
1277
1278
1278 unknown = []
1279 unknown = []
1279 for h in heads:
1280 for h in heads:
1280 if h not in m:
1281 if h not in m:
1281 unknown.append(h)
1282 unknown.append(h)
1282 else:
1283 else:
1283 base[h] = 1
1284 base[h] = 1
1284
1285
1285 if not unknown:
1286 if not unknown:
1286 return []
1287 return []
1287
1288
1288 req = dict.fromkeys(unknown)
1289 req = dict.fromkeys(unknown)
1289 reqcnt = 0
1290 reqcnt = 0
1290
1291
1291 # search through remote branches
1292 # search through remote branches
1292 # a 'branch' here is a linear segment of history, with four parts:
1293 # a 'branch' here is a linear segment of history, with four parts:
1293 # head, root, first parent, second parent
1294 # head, root, first parent, second parent
1294 # (a branch always has two parents (or none) by definition)
1295 # (a branch always has two parents (or none) by definition)
1295 unknown = remote.branches(unknown)
1296 unknown = remote.branches(unknown)
1296 while unknown:
1297 while unknown:
1297 r = []
1298 r = []
1298 while unknown:
1299 while unknown:
1299 n = unknown.pop(0)
1300 n = unknown.pop(0)
1300 if n[0] in seen:
1301 if n[0] in seen:
1301 continue
1302 continue
1302
1303
1303 self.ui.debug(_("examining %s:%s\n")
1304 self.ui.debug(_("examining %s:%s\n")
1304 % (short(n[0]), short(n[1])))
1305 % (short(n[0]), short(n[1])))
1305 if n[0] == nullid: # found the end of the branch
1306 if n[0] == nullid: # found the end of the branch
1306 pass
1307 pass
1307 elif n in seenbranch:
1308 elif n in seenbranch:
1308 self.ui.debug(_("branch already found\n"))
1309 self.ui.debug(_("branch already found\n"))
1309 continue
1310 continue
1310 elif n[1] and n[1] in m: # do we know the base?
1311 elif n[1] and n[1] in m: # do we know the base?
1311 self.ui.debug(_("found incomplete branch %s:%s\n")
1312 self.ui.debug(_("found incomplete branch %s:%s\n")
1312 % (short(n[0]), short(n[1])))
1313 % (short(n[0]), short(n[1])))
1313 search.append(n) # schedule branch range for scanning
1314 search.append(n) # schedule branch range for scanning
1314 seenbranch[n] = 1
1315 seenbranch[n] = 1
1315 else:
1316 else:
1316 if n[1] not in seen and n[1] not in fetch:
1317 if n[1] not in seen and n[1] not in fetch:
1317 if n[2] in m and n[3] in m:
1318 if n[2] in m and n[3] in m:
1318 self.ui.debug(_("found new changeset %s\n") %
1319 self.ui.debug(_("found new changeset %s\n") %
1319 short(n[1]))
1320 short(n[1]))
1320 fetch[n[1]] = 1 # earliest unknown
1321 fetch[n[1]] = 1 # earliest unknown
1321 for p in n[2:4]:
1322 for p in n[2:4]:
1322 if p in m:
1323 if p in m:
1323 base[p] = 1 # latest known
1324 base[p] = 1 # latest known
1324
1325
1325 for p in n[2:4]:
1326 for p in n[2:4]:
1326 if p not in req and p not in m:
1327 if p not in req and p not in m:
1327 r.append(p)
1328 r.append(p)
1328 req[p] = 1
1329 req[p] = 1
1329 seen[n[0]] = 1
1330 seen[n[0]] = 1
1330
1331
1331 if r:
1332 if r:
1332 reqcnt += 1
1333 reqcnt += 1
1333 self.ui.debug(_("request %d: %s\n") %
1334 self.ui.debug(_("request %d: %s\n") %
1334 (reqcnt, " ".join(map(short, r))))
1335 (reqcnt, " ".join(map(short, r))))
1335 for p in xrange(0, len(r), 10):
1336 for p in xrange(0, len(r), 10):
1336 for b in remote.branches(r[p:p+10]):
1337 for b in remote.branches(r[p:p+10]):
1337 self.ui.debug(_("received %s:%s\n") %
1338 self.ui.debug(_("received %s:%s\n") %
1338 (short(b[0]), short(b[1])))
1339 (short(b[0]), short(b[1])))
1339 unknown.append(b)
1340 unknown.append(b)
1340
1341
1341 # do binary search on the branches we found
1342 # do binary search on the branches we found
1342 while search:
1343 while search:
1343 n = search.pop(0)
1344 n = search.pop(0)
1344 reqcnt += 1
1345 reqcnt += 1
1345 l = remote.between([(n[0], n[1])])[0]
1346 l = remote.between([(n[0], n[1])])[0]
1346 l.append(n[1])
1347 l.append(n[1])
1347 p = n[0]
1348 p = n[0]
1348 f = 1
1349 f = 1
1349 for i in l:
1350 for i in l:
1350 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1351 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1351 if i in m:
1352 if i in m:
1352 if f <= 2:
1353 if f <= 2:
1353 self.ui.debug(_("found new branch changeset %s\n") %
1354 self.ui.debug(_("found new branch changeset %s\n") %
1354 short(p))
1355 short(p))
1355 fetch[p] = 1
1356 fetch[p] = 1
1356 base[i] = 1
1357 base[i] = 1
1357 else:
1358 else:
1358 self.ui.debug(_("narrowed branch search to %s:%s\n")
1359 self.ui.debug(_("narrowed branch search to %s:%s\n")
1359 % (short(p), short(i)))
1360 % (short(p), short(i)))
1360 search.append((p, i))
1361 search.append((p, i))
1361 break
1362 break
1362 p, f = i, f * 2
1363 p, f = i, f * 2
1363
1364
1364 # sanity check our fetch list
1365 # sanity check our fetch list
1365 for f in fetch.keys():
1366 for f in fetch.keys():
1366 if f in m:
1367 if f in m:
1367 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1368 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1368
1369
1369 if base.keys() == [nullid]:
1370 if base.keys() == [nullid]:
1370 if force:
1371 if force:
1371 self.ui.warn(_("warning: repository is unrelated\n"))
1372 self.ui.warn(_("warning: repository is unrelated\n"))
1372 else:
1373 else:
1373 raise util.Abort(_("repository is unrelated"))
1374 raise util.Abort(_("repository is unrelated"))
1374
1375
1375 self.ui.debug(_("found new changesets starting at ") +
1376 self.ui.debug(_("found new changesets starting at ") +
1376 " ".join([short(f) for f in fetch]) + "\n")
1377 " ".join([short(f) for f in fetch]) + "\n")
1377
1378
1378 self.ui.debug(_("%d total queries\n") % reqcnt)
1379 self.ui.debug(_("%d total queries\n") % reqcnt)
1379
1380
1380 return fetch.keys()
1381 return fetch.keys()
1381
1382
1382 def findoutgoing(self, remote, base=None, heads=None, force=False):
1383 def findoutgoing(self, remote, base=None, heads=None, force=False):
1383 """Return list of nodes that are roots of subsets not in remote
1384 """Return list of nodes that are roots of subsets not in remote
1384
1385
1385 If base dict is specified, assume that these nodes and their parents
1386 If base dict is specified, assume that these nodes and their parents
1386 exist on the remote side.
1387 exist on the remote side.
1387 If a list of heads is specified, return only nodes which are heads
1388 If a list of heads is specified, return only nodes which are heads
1388 or ancestors of these heads, and return a second element which
1389 or ancestors of these heads, and return a second element which
1389 contains all remote heads which get new children.
1390 contains all remote heads which get new children.
1390 """
1391 """
1391 if base == None:
1392 if base == None:
1392 base = {}
1393 base = {}
1393 self.findincoming(remote, base, heads, force=force)
1394 self.findincoming(remote, base, heads, force=force)
1394
1395
1395 self.ui.debug(_("common changesets up to ")
1396 self.ui.debug(_("common changesets up to ")
1396 + " ".join(map(short, base.keys())) + "\n")
1397 + " ".join(map(short, base.keys())) + "\n")
1397
1398
1398 remain = dict.fromkeys(self.changelog.nodemap)
1399 remain = dict.fromkeys(self.changelog.nodemap)
1399
1400
1400 # prune everything remote has from the tree
1401 # prune everything remote has from the tree
1401 del remain[nullid]
1402 del remain[nullid]
1402 remove = base.keys()
1403 remove = base.keys()
1403 while remove:
1404 while remove:
1404 n = remove.pop(0)
1405 n = remove.pop(0)
1405 if n in remain:
1406 if n in remain:
1406 del remain[n]
1407 del remain[n]
1407 for p in self.changelog.parents(n):
1408 for p in self.changelog.parents(n):
1408 remove.append(p)
1409 remove.append(p)
1409
1410
1410 # find every node whose parents have been pruned
1411 # find every node whose parents have been pruned
1411 subset = []
1412 subset = []
1412 # find every remote head that will get new children
1413 # find every remote head that will get new children
1413 updated_heads = {}
1414 updated_heads = {}
1414 for n in remain:
1415 for n in remain:
1415 p1, p2 = self.changelog.parents(n)
1416 p1, p2 = self.changelog.parents(n)
1416 if p1 not in remain and p2 not in remain:
1417 if p1 not in remain and p2 not in remain:
1417 subset.append(n)
1418 subset.append(n)
1418 if heads:
1419 if heads:
1419 if p1 in heads:
1420 if p1 in heads:
1420 updated_heads[p1] = True
1421 updated_heads[p1] = True
1421 if p2 in heads:
1422 if p2 in heads:
1422 updated_heads[p2] = True
1423 updated_heads[p2] = True
1423
1424
1424 # this is the set of all roots we have to push
1425 # this is the set of all roots we have to push
1425 if heads:
1426 if heads:
1426 return subset, updated_heads.keys()
1427 return subset, updated_heads.keys()
1427 else:
1428 else:
1428 return subset
1429 return subset
1429
1430
1430 def pull(self, remote, heads=None, force=False):
1431 def pull(self, remote, heads=None, force=False):
1431 lock = self.lock()
1432 lock = self.lock()
1432 try:
1433 try:
1433 fetch = self.findincoming(remote, heads=heads, force=force)
1434 fetch = self.findincoming(remote, heads=heads, force=force)
1434 if fetch == [nullid]:
1435 if fetch == [nullid]:
1435 self.ui.status(_("requesting all changes\n"))
1436 self.ui.status(_("requesting all changes\n"))
1436
1437
1437 if not fetch:
1438 if not fetch:
1438 self.ui.status(_("no changes found\n"))
1439 self.ui.status(_("no changes found\n"))
1439 return 0
1440 return 0
1440
1441
1441 if heads is None:
1442 if heads is None:
1442 cg = remote.changegroup(fetch, 'pull')
1443 cg = remote.changegroup(fetch, 'pull')
1443 else:
1444 else:
1444 if 'changegroupsubset' not in remote.capabilities:
1445 if 'changegroupsubset' not in remote.capabilities:
1445 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1446 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1446 cg = remote.changegroupsubset(fetch, heads, 'pull')
1447 cg = remote.changegroupsubset(fetch, heads, 'pull')
1447 return self.addchangegroup(cg, 'pull', remote.url())
1448 return self.addchangegroup(cg, 'pull', remote.url())
1448 finally:
1449 finally:
1449 del lock
1450 del lock
1450
1451
1451 def push(self, remote, force=False, revs=None):
1452 def push(self, remote, force=False, revs=None):
1452 # there are two ways to push to remote repo:
1453 # there are two ways to push to remote repo:
1453 #
1454 #
1454 # addchangegroup assumes local user can lock remote
1455 # addchangegroup assumes local user can lock remote
1455 # repo (local filesystem, old ssh servers).
1456 # repo (local filesystem, old ssh servers).
1456 #
1457 #
1457 # unbundle assumes local user cannot lock remote repo (new ssh
1458 # unbundle assumes local user cannot lock remote repo (new ssh
1458 # servers, http servers).
1459 # servers, http servers).
1459
1460
1460 if remote.capable('unbundle'):
1461 if remote.capable('unbundle'):
1461 return self.push_unbundle(remote, force, revs)
1462 return self.push_unbundle(remote, force, revs)
1462 return self.push_addchangegroup(remote, force, revs)
1463 return self.push_addchangegroup(remote, force, revs)
1463
1464
1464 def prepush(self, remote, force, revs):
1465 def prepush(self, remote, force, revs):
1465 base = {}
1466 base = {}
1466 remote_heads = remote.heads()
1467 remote_heads = remote.heads()
1467 inc = self.findincoming(remote, base, remote_heads, force=force)
1468 inc = self.findincoming(remote, base, remote_heads, force=force)
1468
1469
1469 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1470 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1470 if revs is not None:
1471 if revs is not None:
1471 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1472 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1472 else:
1473 else:
1473 bases, heads = update, self.changelog.heads()
1474 bases, heads = update, self.changelog.heads()
1474
1475
1475 if not bases:
1476 if not bases:
1476 self.ui.status(_("no changes found\n"))
1477 self.ui.status(_("no changes found\n"))
1477 return None, 1
1478 return None, 1
1478 elif not force:
1479 elif not force:
1479 # check if we're creating new remote heads
1480 # check if we're creating new remote heads
1480 # to be a remote head after push, node must be either
1481 # to be a remote head after push, node must be either
1481 # - unknown locally
1482 # - unknown locally
1482 # - a local outgoing head descended from update
1483 # - a local outgoing head descended from update
1483 # - a remote head that's known locally and not
1484 # - a remote head that's known locally and not
1484 # ancestral to an outgoing head
1485 # ancestral to an outgoing head
1485
1486
1486 warn = 0
1487 warn = 0
1487
1488
1488 if remote_heads == [nullid]:
1489 if remote_heads == [nullid]:
1489 warn = 0
1490 warn = 0
1490 elif not revs and len(heads) > len(remote_heads):
1491 elif not revs and len(heads) > len(remote_heads):
1491 warn = 1
1492 warn = 1
1492 else:
1493 else:
1493 newheads = list(heads)
1494 newheads = list(heads)
1494 for r in remote_heads:
1495 for r in remote_heads:
1495 if r in self.changelog.nodemap:
1496 if r in self.changelog.nodemap:
1496 desc = self.changelog.heads(r, heads)
1497 desc = self.changelog.heads(r, heads)
1497 l = [h for h in heads if h in desc]
1498 l = [h for h in heads if h in desc]
1498 if not l:
1499 if not l:
1499 newheads.append(r)
1500 newheads.append(r)
1500 else:
1501 else:
1501 newheads.append(r)
1502 newheads.append(r)
1502 if len(newheads) > len(remote_heads):
1503 if len(newheads) > len(remote_heads):
1503 warn = 1
1504 warn = 1
1504
1505
1505 if warn:
1506 if warn:
1506 self.ui.warn(_("abort: push creates new remote branches!\n"))
1507 self.ui.warn(_("abort: push creates new remote branches!\n"))
1507 self.ui.status(_("(did you forget to merge?"
1508 self.ui.status(_("(did you forget to merge?"
1508 " use push -f to force)\n"))
1509 " use push -f to force)\n"))
1509 return None, 1
1510 return None, 1
1510 elif inc:
1511 elif inc:
1511 self.ui.warn(_("note: unsynced remote changes!\n"))
1512 self.ui.warn(_("note: unsynced remote changes!\n"))
1512
1513
1513
1514
1514 if revs is None:
1515 if revs is None:
1515 cg = self.changegroup(update, 'push')
1516 cg = self.changegroup(update, 'push')
1516 else:
1517 else:
1517 cg = self.changegroupsubset(update, revs, 'push')
1518 cg = self.changegroupsubset(update, revs, 'push')
1518 return cg, remote_heads
1519 return cg, remote_heads
1519
1520
1520 def push_addchangegroup(self, remote, force, revs):
1521 def push_addchangegroup(self, remote, force, revs):
1521 lock = remote.lock()
1522 lock = remote.lock()
1522 try:
1523 try:
1523 ret = self.prepush(remote, force, revs)
1524 ret = self.prepush(remote, force, revs)
1524 if ret[0] is not None:
1525 if ret[0] is not None:
1525 cg, remote_heads = ret
1526 cg, remote_heads = ret
1526 return remote.addchangegroup(cg, 'push', self.url())
1527 return remote.addchangegroup(cg, 'push', self.url())
1527 return ret[1]
1528 return ret[1]
1528 finally:
1529 finally:
1529 del lock
1530 del lock
1530
1531
1531 def push_unbundle(self, remote, force, revs):
1532 def push_unbundle(self, remote, force, revs):
1532 # local repo finds heads on server, finds out what revs it
1533 # local repo finds heads on server, finds out what revs it
1533 # must push. once revs transferred, if server finds it has
1534 # must push. once revs transferred, if server finds it has
1534 # different heads (someone else won commit/push race), server
1535 # different heads (someone else won commit/push race), server
1535 # aborts.
1536 # aborts.
1536
1537
1537 ret = self.prepush(remote, force, revs)
1538 ret = self.prepush(remote, force, revs)
1538 if ret[0] is not None:
1539 if ret[0] is not None:
1539 cg, remote_heads = ret
1540 cg, remote_heads = ret
1540 if force: remote_heads = ['force']
1541 if force: remote_heads = ['force']
1541 return remote.unbundle(cg, remote_heads, 'push')
1542 return remote.unbundle(cg, remote_heads, 'push')
1542 return ret[1]
1543 return ret[1]
1543
1544
1544 def changegroupinfo(self, nodes, source):
1545 def changegroupinfo(self, nodes, source):
1545 if self.ui.verbose or source == 'bundle':
1546 if self.ui.verbose or source == 'bundle':
1546 self.ui.status(_("%d changesets found\n") % len(nodes))
1547 self.ui.status(_("%d changesets found\n") % len(nodes))
1547 if self.ui.debugflag:
1548 if self.ui.debugflag:
1548 self.ui.debug(_("List of changesets:\n"))
1549 self.ui.debug(_("List of changesets:\n"))
1549 for node in nodes:
1550 for node in nodes:
1550 self.ui.debug("%s\n" % hex(node))
1551 self.ui.debug("%s\n" % hex(node))
1551
1552
1552 def changegroupsubset(self, bases, heads, source, extranodes=None):
1553 def changegroupsubset(self, bases, heads, source, extranodes=None):
1553 """This function generates a changegroup consisting of all the nodes
1554 """This function generates a changegroup consisting of all the nodes
1554 that are descendents of any of the bases, and ancestors of any of
1555 that are descendents of any of the bases, and ancestors of any of
1555 the heads.
1556 the heads.
1556
1557
1557 It is fairly complex as determining which filenodes and which
1558 It is fairly complex as determining which filenodes and which
1558 manifest nodes need to be included for the changeset to be complete
1559 manifest nodes need to be included for the changeset to be complete
1559 is non-trivial.
1560 is non-trivial.
1560
1561
1561 Another wrinkle is doing the reverse, figuring out which changeset in
1562 Another wrinkle is doing the reverse, figuring out which changeset in
1562 the changegroup a particular filenode or manifestnode belongs to.
1563 the changegroup a particular filenode or manifestnode belongs to.
1563
1564
1564 The caller can specify some nodes that must be included in the
1565 The caller can specify some nodes that must be included in the
1565 changegroup using the extranodes argument. It should be a dict
1566 changegroup using the extranodes argument. It should be a dict
1566 where the keys are the filenames (or 1 for the manifest), and the
1567 where the keys are the filenames (or 1 for the manifest), and the
1567 values are lists of (node, linknode) tuples, where node is a wanted
1568 values are lists of (node, linknode) tuples, where node is a wanted
1568 node and linknode is the changelog node that should be transmitted as
1569 node and linknode is the changelog node that should be transmitted as
1569 the linkrev.
1570 the linkrev.
1570 """
1571 """
1571
1572
1572 self.hook('preoutgoing', throw=True, source=source)
1573 self.hook('preoutgoing', throw=True, source=source)
1573
1574
1574 # Set up some initial variables
1575 # Set up some initial variables
1575 # Make it easy to refer to self.changelog
1576 # Make it easy to refer to self.changelog
1576 cl = self.changelog
1577 cl = self.changelog
1577 # msng is short for missing - compute the list of changesets in this
1578 # msng is short for missing - compute the list of changesets in this
1578 # changegroup.
1579 # changegroup.
1579 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1580 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1580 self.changegroupinfo(msng_cl_lst, source)
1581 self.changegroupinfo(msng_cl_lst, source)
1581 # Some bases may turn out to be superfluous, and some heads may be
1582 # Some bases may turn out to be superfluous, and some heads may be
1582 # too. nodesbetween will return the minimal set of bases and heads
1583 # too. nodesbetween will return the minimal set of bases and heads
1583 # necessary to re-create the changegroup.
1584 # necessary to re-create the changegroup.
1584
1585
1585 # Known heads are the list of heads that it is assumed the recipient
1586 # Known heads are the list of heads that it is assumed the recipient
1586 # of this changegroup will know about.
1587 # of this changegroup will know about.
1587 knownheads = {}
1588 knownheads = {}
1588 # We assume that all parents of bases are known heads.
1589 # We assume that all parents of bases are known heads.
1589 for n in bases:
1590 for n in bases:
1590 for p in cl.parents(n):
1591 for p in cl.parents(n):
1591 if p != nullid:
1592 if p != nullid:
1592 knownheads[p] = 1
1593 knownheads[p] = 1
1593 knownheads = knownheads.keys()
1594 knownheads = knownheads.keys()
1594 if knownheads:
1595 if knownheads:
1595 # Now that we know what heads are known, we can compute which
1596 # Now that we know what heads are known, we can compute which
1596 # changesets are known. The recipient must know about all
1597 # changesets are known. The recipient must know about all
1597 # changesets required to reach the known heads from the null
1598 # changesets required to reach the known heads from the null
1598 # changeset.
1599 # changeset.
1599 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1600 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1600 junk = None
1601 junk = None
1601 # Transform the list into an ersatz set.
1602 # Transform the list into an ersatz set.
1602 has_cl_set = dict.fromkeys(has_cl_set)
1603 has_cl_set = dict.fromkeys(has_cl_set)
1603 else:
1604 else:
1604 # If there were no known heads, the recipient cannot be assumed to
1605 # If there were no known heads, the recipient cannot be assumed to
1605 # know about any changesets.
1606 # know about any changesets.
1606 has_cl_set = {}
1607 has_cl_set = {}
1607
1608
1608 # Make it easy to refer to self.manifest
1609 # Make it easy to refer to self.manifest
1609 mnfst = self.manifest
1610 mnfst = self.manifest
1610 # We don't know which manifests are missing yet
1611 # We don't know which manifests are missing yet
1611 msng_mnfst_set = {}
1612 msng_mnfst_set = {}
1612 # Nor do we know which filenodes are missing.
1613 # Nor do we know which filenodes are missing.
1613 msng_filenode_set = {}
1614 msng_filenode_set = {}
1614
1615
1615 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1616 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1616 junk = None
1617 junk = None
1617
1618
1618 # A changeset always belongs to itself, so the changenode lookup
1619 # A changeset always belongs to itself, so the changenode lookup
1619 # function for a changenode is identity.
1620 # function for a changenode is identity.
1620 def identity(x):
1621 def identity(x):
1621 return x
1622 return x
1622
1623
1623 # A function generating function. Sets up an environment for the
1624 # A function generating function. Sets up an environment for the
1624 # inner function.
1625 # inner function.
1625 def cmp_by_rev_func(revlog):
1626 def cmp_by_rev_func(revlog):
1626 # Compare two nodes by their revision number in the environment's
1627 # Compare two nodes by their revision number in the environment's
1627 # revision history. Since the revision number both represents the
1628 # revision history. Since the revision number both represents the
1628 # most efficient order to read the nodes in, and represents a
1629 # most efficient order to read the nodes in, and represents a
1629 # topological sorting of the nodes, this function is often useful.
1630 # topological sorting of the nodes, this function is often useful.
1630 def cmp_by_rev(a, b):
1631 def cmp_by_rev(a, b):
1631 return cmp(revlog.rev(a), revlog.rev(b))
1632 return cmp(revlog.rev(a), revlog.rev(b))
1632 return cmp_by_rev
1633 return cmp_by_rev
1633
1634
1634 # If we determine that a particular file or manifest node must be a
1635 # If we determine that a particular file or manifest node must be a
1635 # node that the recipient of the changegroup will already have, we can
1636 # node that the recipient of the changegroup will already have, we can
1636 # also assume the recipient will have all the parents. This function
1637 # also assume the recipient will have all the parents. This function
1637 # prunes them from the set of missing nodes.
1638 # prunes them from the set of missing nodes.
1638 def prune_parents(revlog, hasset, msngset):
1639 def prune_parents(revlog, hasset, msngset):
1639 haslst = hasset.keys()
1640 haslst = hasset.keys()
1640 haslst.sort(cmp_by_rev_func(revlog))
1641 haslst.sort(cmp_by_rev_func(revlog))
1641 for node in haslst:
1642 for node in haslst:
1642 parentlst = [p for p in revlog.parents(node) if p != nullid]
1643 parentlst = [p for p in revlog.parents(node) if p != nullid]
1643 while parentlst:
1644 while parentlst:
1644 n = parentlst.pop()
1645 n = parentlst.pop()
1645 if n not in hasset:
1646 if n not in hasset:
1646 hasset[n] = 1
1647 hasset[n] = 1
1647 p = [p for p in revlog.parents(n) if p != nullid]
1648 p = [p for p in revlog.parents(n) if p != nullid]
1648 parentlst.extend(p)
1649 parentlst.extend(p)
1649 for n in hasset:
1650 for n in hasset:
1650 msngset.pop(n, None)
1651 msngset.pop(n, None)
1651
1652
1652 # This is a function generating function used to set up an environment
1653 # This is a function generating function used to set up an environment
1653 # for the inner function to execute in.
1654 # for the inner function to execute in.
1654 def manifest_and_file_collector(changedfileset):
1655 def manifest_and_file_collector(changedfileset):
1655 # This is an information gathering function that gathers
1656 # This is an information gathering function that gathers
1656 # information from each changeset node that goes out as part of
1657 # information from each changeset node that goes out as part of
1657 # the changegroup. The information gathered is a list of which
1658 # the changegroup. The information gathered is a list of which
1658 # manifest nodes are potentially required (the recipient may
1659 # manifest nodes are potentially required (the recipient may
1659 # already have them) and total list of all files which were
1660 # already have them) and total list of all files which were
1660 # changed in any changeset in the changegroup.
1661 # changed in any changeset in the changegroup.
1661 #
1662 #
1662 # We also remember the first changenode we saw any manifest
1663 # We also remember the first changenode we saw any manifest
1663 # referenced by so we can later determine which changenode 'owns'
1664 # referenced by so we can later determine which changenode 'owns'
1664 # the manifest.
1665 # the manifest.
1665 def collect_manifests_and_files(clnode):
1666 def collect_manifests_and_files(clnode):
1666 c = cl.read(clnode)
1667 c = cl.read(clnode)
1667 for f in c[3]:
1668 for f in c[3]:
1668 # This is to make sure we only have one instance of each
1669 # This is to make sure we only have one instance of each
1669 # filename string for each filename.
1670 # filename string for each filename.
1670 changedfileset.setdefault(f, f)
1671 changedfileset.setdefault(f, f)
1671 msng_mnfst_set.setdefault(c[0], clnode)
1672 msng_mnfst_set.setdefault(c[0], clnode)
1672 return collect_manifests_and_files
1673 return collect_manifests_and_files
1673
1674
1674 # Figure out which manifest nodes (of the ones we think might be part
1675 # Figure out which manifest nodes (of the ones we think might be part
1675 # of the changegroup) the recipient must know about and remove them
1676 # of the changegroup) the recipient must know about and remove them
1676 # from the changegroup.
1677 # from the changegroup.
1677 def prune_manifests():
1678 def prune_manifests():
1678 has_mnfst_set = {}
1679 has_mnfst_set = {}
1679 for n in msng_mnfst_set:
1680 for n in msng_mnfst_set:
1680 # If a 'missing' manifest thinks it belongs to a changenode
1681 # If a 'missing' manifest thinks it belongs to a changenode
1681 # the recipient is assumed to have, obviously the recipient
1682 # the recipient is assumed to have, obviously the recipient
1682 # must have that manifest.
1683 # must have that manifest.
1683 linknode = cl.node(mnfst.linkrev(n))
1684 linknode = cl.node(mnfst.linkrev(n))
1684 if linknode in has_cl_set:
1685 if linknode in has_cl_set:
1685 has_mnfst_set[n] = 1
1686 has_mnfst_set[n] = 1
1686 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1687 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1687
1688
1688 # Use the information collected in collect_manifests_and_files to say
1689 # Use the information collected in collect_manifests_and_files to say
1689 # which changenode any manifestnode belongs to.
1690 # which changenode any manifestnode belongs to.
1690 def lookup_manifest_link(mnfstnode):
1691 def lookup_manifest_link(mnfstnode):
1691 return msng_mnfst_set[mnfstnode]
1692 return msng_mnfst_set[mnfstnode]
1692
1693
1693 # A function generating function that sets up the initial environment
1694 # A function generating function that sets up the initial environment
1694 # the inner function.
1695 # the inner function.
1695 def filenode_collector(changedfiles):
1696 def filenode_collector(changedfiles):
1696 next_rev = [0]
1697 next_rev = [0]
1697 # This gathers information from each manifestnode included in the
1698 # This gathers information from each manifestnode included in the
1698 # changegroup about which filenodes the manifest node references
1699 # changegroup about which filenodes the manifest node references
1699 # so we can include those in the changegroup too.
1700 # so we can include those in the changegroup too.
1700 #
1701 #
1701 # It also remembers which changenode each filenode belongs to. It
1702 # It also remembers which changenode each filenode belongs to. It
1702 # does this by assuming the a filenode belongs to the changenode
1703 # does this by assuming the a filenode belongs to the changenode
1703 # the first manifest that references it belongs to.
1704 # the first manifest that references it belongs to.
1704 def collect_msng_filenodes(mnfstnode):
1705 def collect_msng_filenodes(mnfstnode):
1705 r = mnfst.rev(mnfstnode)
1706 r = mnfst.rev(mnfstnode)
1706 if r == next_rev[0]:
1707 if r == next_rev[0]:
1707 # If the last rev we looked at was the one just previous,
1708 # If the last rev we looked at was the one just previous,
1708 # we only need to see a diff.
1709 # we only need to see a diff.
1709 deltamf = mnfst.readdelta(mnfstnode)
1710 deltamf = mnfst.readdelta(mnfstnode)
1710 # For each line in the delta
1711 # For each line in the delta
1711 for f, fnode in deltamf.items():
1712 for f, fnode in deltamf.items():
1712 f = changedfiles.get(f, None)
1713 f = changedfiles.get(f, None)
1713 # And if the file is in the list of files we care
1714 # And if the file is in the list of files we care
1714 # about.
1715 # about.
1715 if f is not None:
1716 if f is not None:
1716 # Get the changenode this manifest belongs to
1717 # Get the changenode this manifest belongs to
1717 clnode = msng_mnfst_set[mnfstnode]
1718 clnode = msng_mnfst_set[mnfstnode]
1718 # Create the set of filenodes for the file if
1719 # Create the set of filenodes for the file if
1719 # there isn't one already.
1720 # there isn't one already.
1720 ndset = msng_filenode_set.setdefault(f, {})
1721 ndset = msng_filenode_set.setdefault(f, {})
1721 # And set the filenode's changelog node to the
1722 # And set the filenode's changelog node to the
1722 # manifest's if it hasn't been set already.
1723 # manifest's if it hasn't been set already.
1723 ndset.setdefault(fnode, clnode)
1724 ndset.setdefault(fnode, clnode)
1724 else:
1725 else:
1725 # Otherwise we need a full manifest.
1726 # Otherwise we need a full manifest.
1726 m = mnfst.read(mnfstnode)
1727 m = mnfst.read(mnfstnode)
1727 # For every file in we care about.
1728 # For every file in we care about.
1728 for f in changedfiles:
1729 for f in changedfiles:
1729 fnode = m.get(f, None)
1730 fnode = m.get(f, None)
1730 # If it's in the manifest
1731 # If it's in the manifest
1731 if fnode is not None:
1732 if fnode is not None:
1732 # See comments above.
1733 # See comments above.
1733 clnode = msng_mnfst_set[mnfstnode]
1734 clnode = msng_mnfst_set[mnfstnode]
1734 ndset = msng_filenode_set.setdefault(f, {})
1735 ndset = msng_filenode_set.setdefault(f, {})
1735 ndset.setdefault(fnode, clnode)
1736 ndset.setdefault(fnode, clnode)
1736 # Remember the revision we hope to see next.
1737 # Remember the revision we hope to see next.
1737 next_rev[0] = r + 1
1738 next_rev[0] = r + 1
1738 return collect_msng_filenodes
1739 return collect_msng_filenodes
1739
1740
1740 # We have a list of filenodes we think we need for a file, lets remove
1741 # We have a list of filenodes we think we need for a file, lets remove
1741 # all those we now the recipient must have.
1742 # all those we now the recipient must have.
1742 def prune_filenodes(f, filerevlog):
1743 def prune_filenodes(f, filerevlog):
1743 msngset = msng_filenode_set[f]
1744 msngset = msng_filenode_set[f]
1744 hasset = {}
1745 hasset = {}
1745 # If a 'missing' filenode thinks it belongs to a changenode we
1746 # If a 'missing' filenode thinks it belongs to a changenode we
1746 # assume the recipient must have, then the recipient must have
1747 # assume the recipient must have, then the recipient must have
1747 # that filenode.
1748 # that filenode.
1748 for n in msngset:
1749 for n in msngset:
1749 clnode = cl.node(filerevlog.linkrev(n))
1750 clnode = cl.node(filerevlog.linkrev(n))
1750 if clnode in has_cl_set:
1751 if clnode in has_cl_set:
1751 hasset[n] = 1
1752 hasset[n] = 1
1752 prune_parents(filerevlog, hasset, msngset)
1753 prune_parents(filerevlog, hasset, msngset)
1753
1754
1754 # A function generator function that sets up the a context for the
1755 # A function generator function that sets up the a context for the
1755 # inner function.
1756 # inner function.
1756 def lookup_filenode_link_func(fname):
1757 def lookup_filenode_link_func(fname):
1757 msngset = msng_filenode_set[fname]
1758 msngset = msng_filenode_set[fname]
1758 # Lookup the changenode the filenode belongs to.
1759 # Lookup the changenode the filenode belongs to.
1759 def lookup_filenode_link(fnode):
1760 def lookup_filenode_link(fnode):
1760 return msngset[fnode]
1761 return msngset[fnode]
1761 return lookup_filenode_link
1762 return lookup_filenode_link
1762
1763
1763 # Add the nodes that were explicitly requested.
1764 # Add the nodes that were explicitly requested.
1764 def add_extra_nodes(name, nodes):
1765 def add_extra_nodes(name, nodes):
1765 if not extranodes or name not in extranodes:
1766 if not extranodes or name not in extranodes:
1766 return
1767 return
1767
1768
1768 for node, linknode in extranodes[name]:
1769 for node, linknode in extranodes[name]:
1769 if node not in nodes:
1770 if node not in nodes:
1770 nodes[node] = linknode
1771 nodes[node] = linknode
1771
1772
1772 # Now that we have all theses utility functions to help out and
1773 # Now that we have all theses utility functions to help out and
1773 # logically divide up the task, generate the group.
1774 # logically divide up the task, generate the group.
1774 def gengroup():
1775 def gengroup():
1775 # The set of changed files starts empty.
1776 # The set of changed files starts empty.
1776 changedfiles = {}
1777 changedfiles = {}
1777 # Create a changenode group generator that will call our functions
1778 # Create a changenode group generator that will call our functions
1778 # back to lookup the owning changenode and collect information.
1779 # back to lookup the owning changenode and collect information.
1779 group = cl.group(msng_cl_lst, identity,
1780 group = cl.group(msng_cl_lst, identity,
1780 manifest_and_file_collector(changedfiles))
1781 manifest_and_file_collector(changedfiles))
1781 for chnk in group:
1782 for chnk in group:
1782 yield chnk
1783 yield chnk
1783
1784
1784 # The list of manifests has been collected by the generator
1785 # The list of manifests has been collected by the generator
1785 # calling our functions back.
1786 # calling our functions back.
1786 prune_manifests()
1787 prune_manifests()
1787 add_extra_nodes(1, msng_mnfst_set)
1788 add_extra_nodes(1, msng_mnfst_set)
1788 msng_mnfst_lst = msng_mnfst_set.keys()
1789 msng_mnfst_lst = msng_mnfst_set.keys()
1789 # Sort the manifestnodes by revision number.
1790 # Sort the manifestnodes by revision number.
1790 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1791 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1791 # Create a generator for the manifestnodes that calls our lookup
1792 # Create a generator for the manifestnodes that calls our lookup
1792 # and data collection functions back.
1793 # and data collection functions back.
1793 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1794 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1794 filenode_collector(changedfiles))
1795 filenode_collector(changedfiles))
1795 for chnk in group:
1796 for chnk in group:
1796 yield chnk
1797 yield chnk
1797
1798
1798 # These are no longer needed, dereference and toss the memory for
1799 # These are no longer needed, dereference and toss the memory for
1799 # them.
1800 # them.
1800 msng_mnfst_lst = None
1801 msng_mnfst_lst = None
1801 msng_mnfst_set.clear()
1802 msng_mnfst_set.clear()
1802
1803
1803 if extranodes:
1804 if extranodes:
1804 for fname in extranodes:
1805 for fname in extranodes:
1805 if isinstance(fname, int):
1806 if isinstance(fname, int):
1806 continue
1807 continue
1807 add_extra_nodes(fname,
1808 add_extra_nodes(fname,
1808 msng_filenode_set.setdefault(fname, {}))
1809 msng_filenode_set.setdefault(fname, {}))
1809 changedfiles[fname] = 1
1810 changedfiles[fname] = 1
1810 changedfiles = changedfiles.keys()
1811 changedfiles = changedfiles.keys()
1811 changedfiles.sort()
1812 changedfiles.sort()
1812 # Go through all our files in order sorted by name.
1813 # Go through all our files in order sorted by name.
1813 for fname in changedfiles:
1814 for fname in changedfiles:
1814 filerevlog = self.file(fname)
1815 filerevlog = self.file(fname)
1815 if filerevlog.count() == 0:
1816 if filerevlog.count() == 0:
1816 raise util.Abort(_("empty or missing revlog for %s") % fname)
1817 raise util.Abort(_("empty or missing revlog for %s") % fname)
1817 # Toss out the filenodes that the recipient isn't really
1818 # Toss out the filenodes that the recipient isn't really
1818 # missing.
1819 # missing.
1819 if fname in msng_filenode_set:
1820 if fname in msng_filenode_set:
1820 prune_filenodes(fname, filerevlog)
1821 prune_filenodes(fname, filerevlog)
1821 msng_filenode_lst = msng_filenode_set[fname].keys()
1822 msng_filenode_lst = msng_filenode_set[fname].keys()
1822 else:
1823 else:
1823 msng_filenode_lst = []
1824 msng_filenode_lst = []
1824 # If any filenodes are left, generate the group for them,
1825 # If any filenodes are left, generate the group for them,
1825 # otherwise don't bother.
1826 # otherwise don't bother.
1826 if len(msng_filenode_lst) > 0:
1827 if len(msng_filenode_lst) > 0:
1827 yield changegroup.chunkheader(len(fname))
1828 yield changegroup.chunkheader(len(fname))
1828 yield fname
1829 yield fname
1829 # Sort the filenodes by their revision #
1830 # Sort the filenodes by their revision #
1830 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1831 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1831 # Create a group generator and only pass in a changenode
1832 # Create a group generator and only pass in a changenode
1832 # lookup function as we need to collect no information
1833 # lookup function as we need to collect no information
1833 # from filenodes.
1834 # from filenodes.
1834 group = filerevlog.group(msng_filenode_lst,
1835 group = filerevlog.group(msng_filenode_lst,
1835 lookup_filenode_link_func(fname))
1836 lookup_filenode_link_func(fname))
1836 for chnk in group:
1837 for chnk in group:
1837 yield chnk
1838 yield chnk
1838 if fname in msng_filenode_set:
1839 if fname in msng_filenode_set:
1839 # Don't need this anymore, toss it to free memory.
1840 # Don't need this anymore, toss it to free memory.
1840 del msng_filenode_set[fname]
1841 del msng_filenode_set[fname]
1841 # Signal that no more groups are left.
1842 # Signal that no more groups are left.
1842 yield changegroup.closechunk()
1843 yield changegroup.closechunk()
1843
1844
1844 if msng_cl_lst:
1845 if msng_cl_lst:
1845 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1846 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1846
1847
1847 return util.chunkbuffer(gengroup())
1848 return util.chunkbuffer(gengroup())
1848
1849
1849 def changegroup(self, basenodes, source):
1850 def changegroup(self, basenodes, source):
1850 """Generate a changegroup of all nodes that we have that a recipient
1851 """Generate a changegroup of all nodes that we have that a recipient
1851 doesn't.
1852 doesn't.
1852
1853
1853 This is much easier than the previous function as we can assume that
1854 This is much easier than the previous function as we can assume that
1854 the recipient has any changenode we aren't sending them."""
1855 the recipient has any changenode we aren't sending them."""
1855
1856
1856 self.hook('preoutgoing', throw=True, source=source)
1857 self.hook('preoutgoing', throw=True, source=source)
1857
1858
1858 cl = self.changelog
1859 cl = self.changelog
1859 nodes = cl.nodesbetween(basenodes, None)[0]
1860 nodes = cl.nodesbetween(basenodes, None)[0]
1860 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1861 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1861 self.changegroupinfo(nodes, source)
1862 self.changegroupinfo(nodes, source)
1862
1863
1863 def identity(x):
1864 def identity(x):
1864 return x
1865 return x
1865
1866
1866 def gennodelst(revlog):
1867 def gennodelst(revlog):
1867 for r in xrange(0, revlog.count()):
1868 for r in xrange(0, revlog.count()):
1868 n = revlog.node(r)
1869 n = revlog.node(r)
1869 if revlog.linkrev(n) in revset:
1870 if revlog.linkrev(n) in revset:
1870 yield n
1871 yield n
1871
1872
1872 def changed_file_collector(changedfileset):
1873 def changed_file_collector(changedfileset):
1873 def collect_changed_files(clnode):
1874 def collect_changed_files(clnode):
1874 c = cl.read(clnode)
1875 c = cl.read(clnode)
1875 for fname in c[3]:
1876 for fname in c[3]:
1876 changedfileset[fname] = 1
1877 changedfileset[fname] = 1
1877 return collect_changed_files
1878 return collect_changed_files
1878
1879
1879 def lookuprevlink_func(revlog):
1880 def lookuprevlink_func(revlog):
1880 def lookuprevlink(n):
1881 def lookuprevlink(n):
1881 return cl.node(revlog.linkrev(n))
1882 return cl.node(revlog.linkrev(n))
1882 return lookuprevlink
1883 return lookuprevlink
1883
1884
1884 def gengroup():
1885 def gengroup():
1885 # construct a list of all changed files
1886 # construct a list of all changed files
1886 changedfiles = {}
1887 changedfiles = {}
1887
1888
1888 for chnk in cl.group(nodes, identity,
1889 for chnk in cl.group(nodes, identity,
1889 changed_file_collector(changedfiles)):
1890 changed_file_collector(changedfiles)):
1890 yield chnk
1891 yield chnk
1891 changedfiles = changedfiles.keys()
1892 changedfiles = changedfiles.keys()
1892 changedfiles.sort()
1893 changedfiles.sort()
1893
1894
1894 mnfst = self.manifest
1895 mnfst = self.manifest
1895 nodeiter = gennodelst(mnfst)
1896 nodeiter = gennodelst(mnfst)
1896 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1897 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1897 yield chnk
1898 yield chnk
1898
1899
1899 for fname in changedfiles:
1900 for fname in changedfiles:
1900 filerevlog = self.file(fname)
1901 filerevlog = self.file(fname)
1901 if filerevlog.count() == 0:
1902 if filerevlog.count() == 0:
1902 raise util.Abort(_("empty or missing revlog for %s") % fname)
1903 raise util.Abort(_("empty or missing revlog for %s") % fname)
1903 nodeiter = gennodelst(filerevlog)
1904 nodeiter = gennodelst(filerevlog)
1904 nodeiter = list(nodeiter)
1905 nodeiter = list(nodeiter)
1905 if nodeiter:
1906 if nodeiter:
1906 yield changegroup.chunkheader(len(fname))
1907 yield changegroup.chunkheader(len(fname))
1907 yield fname
1908 yield fname
1908 lookup = lookuprevlink_func(filerevlog)
1909 lookup = lookuprevlink_func(filerevlog)
1909 for chnk in filerevlog.group(nodeiter, lookup):
1910 for chnk in filerevlog.group(nodeiter, lookup):
1910 yield chnk
1911 yield chnk
1911
1912
1912 yield changegroup.closechunk()
1913 yield changegroup.closechunk()
1913
1914
1914 if nodes:
1915 if nodes:
1915 self.hook('outgoing', node=hex(nodes[0]), source=source)
1916 self.hook('outgoing', node=hex(nodes[0]), source=source)
1916
1917
1917 return util.chunkbuffer(gengroup())
1918 return util.chunkbuffer(gengroup())
1918
1919
1919 def addchangegroup(self, source, srctype, url, emptyok=False):
1920 def addchangegroup(self, source, srctype, url, emptyok=False):
1920 """add changegroup to repo.
1921 """add changegroup to repo.
1921
1922
1922 return values:
1923 return values:
1923 - nothing changed or no source: 0
1924 - nothing changed or no source: 0
1924 - more heads than before: 1+added heads (2..n)
1925 - more heads than before: 1+added heads (2..n)
1925 - less heads than before: -1-removed heads (-2..-n)
1926 - less heads than before: -1-removed heads (-2..-n)
1926 - number of heads stays the same: 1
1927 - number of heads stays the same: 1
1927 """
1928 """
1928 def csmap(x):
1929 def csmap(x):
1929 self.ui.debug(_("add changeset %s\n") % short(x))
1930 self.ui.debug(_("add changeset %s\n") % short(x))
1930 return cl.count()
1931 return cl.count()
1931
1932
1932 def revmap(x):
1933 def revmap(x):
1933 return cl.rev(x)
1934 return cl.rev(x)
1934
1935
1935 if not source:
1936 if not source:
1936 return 0
1937 return 0
1937
1938
1938 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1939 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1939
1940
1940 changesets = files = revisions = 0
1941 changesets = files = revisions = 0
1941
1942
1942 # write changelog data to temp files so concurrent readers will not see
1943 # write changelog data to temp files so concurrent readers will not see
1943 # inconsistent view
1944 # inconsistent view
1944 cl = self.changelog
1945 cl = self.changelog
1945 cl.delayupdate()
1946 cl.delayupdate()
1946 oldheads = len(cl.heads())
1947 oldheads = len(cl.heads())
1947
1948
1948 tr = self.transaction()
1949 tr = self.transaction()
1949 try:
1950 try:
1950 trp = weakref.proxy(tr)
1951 trp = weakref.proxy(tr)
1951 # pull off the changeset group
1952 # pull off the changeset group
1952 self.ui.status(_("adding changesets\n"))
1953 self.ui.status(_("adding changesets\n"))
1953 cor = cl.count() - 1
1954 cor = cl.count() - 1
1954 chunkiter = changegroup.chunkiter(source)
1955 chunkiter = changegroup.chunkiter(source)
1955 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1956 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1956 raise util.Abort(_("received changelog group is empty"))
1957 raise util.Abort(_("received changelog group is empty"))
1957 cnr = cl.count() - 1
1958 cnr = cl.count() - 1
1958 changesets = cnr - cor
1959 changesets = cnr - cor
1959
1960
1960 # pull off the manifest group
1961 # pull off the manifest group
1961 self.ui.status(_("adding manifests\n"))
1962 self.ui.status(_("adding manifests\n"))
1962 chunkiter = changegroup.chunkiter(source)
1963 chunkiter = changegroup.chunkiter(source)
1963 # no need to check for empty manifest group here:
1964 # no need to check for empty manifest group here:
1964 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1965 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1965 # no new manifest will be created and the manifest group will
1966 # no new manifest will be created and the manifest group will
1966 # be empty during the pull
1967 # be empty during the pull
1967 self.manifest.addgroup(chunkiter, revmap, trp)
1968 self.manifest.addgroup(chunkiter, revmap, trp)
1968
1969
1969 # process the files
1970 # process the files
1970 self.ui.status(_("adding file changes\n"))
1971 self.ui.status(_("adding file changes\n"))
1971 while 1:
1972 while 1:
1972 f = changegroup.getchunk(source)
1973 f = changegroup.getchunk(source)
1973 if not f:
1974 if not f:
1974 break
1975 break
1975 self.ui.debug(_("adding %s revisions\n") % f)
1976 self.ui.debug(_("adding %s revisions\n") % f)
1976 fl = self.file(f)
1977 fl = self.file(f)
1977 o = fl.count()
1978 o = fl.count()
1978 chunkiter = changegroup.chunkiter(source)
1979 chunkiter = changegroup.chunkiter(source)
1979 if fl.addgroup(chunkiter, revmap, trp) is None:
1980 if fl.addgroup(chunkiter, revmap, trp) is None:
1980 raise util.Abort(_("received file revlog group is empty"))
1981 raise util.Abort(_("received file revlog group is empty"))
1981 revisions += fl.count() - o
1982 revisions += fl.count() - o
1982 files += 1
1983 files += 1
1983
1984
1984 # make changelog see real files again
1985 # make changelog see real files again
1985 cl.finalize(trp)
1986 cl.finalize(trp)
1986
1987
1987 newheads = len(self.changelog.heads())
1988 newheads = len(self.changelog.heads())
1988 heads = ""
1989 heads = ""
1989 if oldheads and newheads != oldheads:
1990 if oldheads and newheads != oldheads:
1990 heads = _(" (%+d heads)") % (newheads - oldheads)
1991 heads = _(" (%+d heads)") % (newheads - oldheads)
1991
1992
1992 self.ui.status(_("added %d changesets"
1993 self.ui.status(_("added %d changesets"
1993 " with %d changes to %d files%s\n")
1994 " with %d changes to %d files%s\n")
1994 % (changesets, revisions, files, heads))
1995 % (changesets, revisions, files, heads))
1995
1996
1996 if changesets > 0:
1997 if changesets > 0:
1997 self.hook('pretxnchangegroup', throw=True,
1998 self.hook('pretxnchangegroup', throw=True,
1998 node=hex(self.changelog.node(cor+1)), source=srctype,
1999 node=hex(self.changelog.node(cor+1)), source=srctype,
1999 url=url)
2000 url=url)
2000
2001
2001 tr.close()
2002 tr.close()
2002 finally:
2003 finally:
2003 del tr
2004 del tr
2004
2005
2005 if changesets > 0:
2006 if changesets > 0:
2006 # forcefully update the on-disk branch cache
2007 # forcefully update the on-disk branch cache
2007 self.ui.debug(_("updating the branch cache\n"))
2008 self.ui.debug(_("updating the branch cache\n"))
2008 self.branchcache = None
2009 self.branchcache = None
2009 self.branchtags()
2010 self.branchtags()
2010 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2011 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2011 source=srctype, url=url)
2012 source=srctype, url=url)
2012
2013
2013 for i in xrange(cor + 1, cnr + 1):
2014 for i in xrange(cor + 1, cnr + 1):
2014 self.hook("incoming", node=hex(self.changelog.node(i)),
2015 self.hook("incoming", node=hex(self.changelog.node(i)),
2015 source=srctype, url=url)
2016 source=srctype, url=url)
2016
2017
2017 # never return 0 here:
2018 # never return 0 here:
2018 if newheads < oldheads:
2019 if newheads < oldheads:
2019 return newheads - oldheads - 1
2020 return newheads - oldheads - 1
2020 else:
2021 else:
2021 return newheads - oldheads + 1
2022 return newheads - oldheads + 1
2022
2023
2023
2024
2024 def stream_in(self, remote):
2025 def stream_in(self, remote):
2025 fp = remote.stream_out()
2026 fp = remote.stream_out()
2026 l = fp.readline()
2027 l = fp.readline()
2027 try:
2028 try:
2028 resp = int(l)
2029 resp = int(l)
2029 except ValueError:
2030 except ValueError:
2030 raise util.UnexpectedOutput(
2031 raise util.UnexpectedOutput(
2031 _('Unexpected response from remote server:'), l)
2032 _('Unexpected response from remote server:'), l)
2032 if resp == 1:
2033 if resp == 1:
2033 raise util.Abort(_('operation forbidden by server'))
2034 raise util.Abort(_('operation forbidden by server'))
2034 elif resp == 2:
2035 elif resp == 2:
2035 raise util.Abort(_('locking the remote repository failed'))
2036 raise util.Abort(_('locking the remote repository failed'))
2036 elif resp != 0:
2037 elif resp != 0:
2037 raise util.Abort(_('the server sent an unknown error code'))
2038 raise util.Abort(_('the server sent an unknown error code'))
2038 self.ui.status(_('streaming all changes\n'))
2039 self.ui.status(_('streaming all changes\n'))
2039 l = fp.readline()
2040 l = fp.readline()
2040 try:
2041 try:
2041 total_files, total_bytes = map(int, l.split(' ', 1))
2042 total_files, total_bytes = map(int, l.split(' ', 1))
2042 except ValueError, TypeError:
2043 except ValueError, TypeError:
2043 raise util.UnexpectedOutput(
2044 raise util.UnexpectedOutput(
2044 _('Unexpected response from remote server:'), l)
2045 _('Unexpected response from remote server:'), l)
2045 self.ui.status(_('%d files to transfer, %s of data\n') %
2046 self.ui.status(_('%d files to transfer, %s of data\n') %
2046 (total_files, util.bytecount(total_bytes)))
2047 (total_files, util.bytecount(total_bytes)))
2047 start = time.time()
2048 start = time.time()
2048 for i in xrange(total_files):
2049 for i in xrange(total_files):
2049 # XXX doesn't support '\n' or '\r' in filenames
2050 # XXX doesn't support '\n' or '\r' in filenames
2050 l = fp.readline()
2051 l = fp.readline()
2051 try:
2052 try:
2052 name, size = l.split('\0', 1)
2053 name, size = l.split('\0', 1)
2053 size = int(size)
2054 size = int(size)
2054 except ValueError, TypeError:
2055 except ValueError, TypeError:
2055 raise util.UnexpectedOutput(
2056 raise util.UnexpectedOutput(
2056 _('Unexpected response from remote server:'), l)
2057 _('Unexpected response from remote server:'), l)
2057 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2058 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2058 ofp = self.sopener(name, 'w')
2059 ofp = self.sopener(name, 'w')
2059 for chunk in util.filechunkiter(fp, limit=size):
2060 for chunk in util.filechunkiter(fp, limit=size):
2060 ofp.write(chunk)
2061 ofp.write(chunk)
2061 ofp.close()
2062 ofp.close()
2062 elapsed = time.time() - start
2063 elapsed = time.time() - start
2063 if elapsed <= 0:
2064 if elapsed <= 0:
2064 elapsed = 0.001
2065 elapsed = 0.001
2065 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2066 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2066 (util.bytecount(total_bytes), elapsed,
2067 (util.bytecount(total_bytes), elapsed,
2067 util.bytecount(total_bytes / elapsed)))
2068 util.bytecount(total_bytes / elapsed)))
2068 self.invalidate()
2069 self.invalidate()
2069 return len(self.heads()) + 1
2070 return len(self.heads()) + 1
2070
2071
2071 def clone(self, remote, heads=[], stream=False):
2072 def clone(self, remote, heads=[], stream=False):
2072 '''clone remote repository.
2073 '''clone remote repository.
2073
2074
2074 keyword arguments:
2075 keyword arguments:
2075 heads: list of revs to clone (forces use of pull)
2076 heads: list of revs to clone (forces use of pull)
2076 stream: use streaming clone if possible'''
2077 stream: use streaming clone if possible'''
2077
2078
2078 # now, all clients that can request uncompressed clones can
2079 # now, all clients that can request uncompressed clones can
2079 # read repo formats supported by all servers that can serve
2080 # read repo formats supported by all servers that can serve
2080 # them.
2081 # them.
2081
2082
2082 # if revlog format changes, client will have to check version
2083 # if revlog format changes, client will have to check version
2083 # and format flags on "stream" capability, and use
2084 # and format flags on "stream" capability, and use
2084 # uncompressed only if compatible.
2085 # uncompressed only if compatible.
2085
2086
2086 if stream and not heads and remote.capable('stream'):
2087 if stream and not heads and remote.capable('stream'):
2087 return self.stream_in(remote)
2088 return self.stream_in(remote)
2088 return self.pull(remote, heads)
2089 return self.pull(remote, heads)
2089
2090
2090 # used to avoid circular references so destructors work
2091 # used to avoid circular references so destructors work
2091 def aftertrans(files):
2092 def aftertrans(files):
2092 renamefiles = [tuple(t) for t in files]
2093 renamefiles = [tuple(t) for t in files]
2093 def a():
2094 def a():
2094 for src, dest in renamefiles:
2095 for src, dest in renamefiles:
2095 util.rename(src, dest)
2096 util.rename(src, dest)
2096 return a
2097 return a
2097
2098
2098 def instance(ui, path, create):
2099 def instance(ui, path, create):
2099 return localrepository(ui, util.drop_scheme('file', path), create)
2100 return localrepository(ui, util.drop_scheme('file', path), create)
2100
2101
2101 def islocal(path):
2102 def islocal(path):
2102 return True
2103 return True
General Comments 0
You need to be logged in to leave comments. Login now