##// END OF EJS Templates
Add missing catch of a TypeError
Bernhard Leiner -
r7063:be2daa32 default
parent child Browse files
Show More
@@ -1,2158 +1,2158 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui
12 import lock, transaction, stat, errno, ui
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = util.set(('lookup', 'changegroupsubset'))
16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 repo.repository.__init__(self)
20 repo.repository.__init__(self)
21 self.root = os.path.realpath(path)
21 self.root = os.path.realpath(path)
22 self.path = os.path.join(self.root, ".hg")
22 self.path = os.path.join(self.root, ".hg")
23 self.origroot = path
23 self.origroot = path
24 self.opener = util.opener(self.path)
24 self.opener = util.opener(self.path)
25 self.wopener = util.opener(self.root)
25 self.wopener = util.opener(self.root)
26
26
27 if not os.path.isdir(self.path):
27 if not os.path.isdir(self.path):
28 if create:
28 if create:
29 if not os.path.exists(path):
29 if not os.path.exists(path):
30 os.mkdir(path)
30 os.mkdir(path)
31 os.mkdir(self.path)
31 os.mkdir(self.path)
32 requirements = ["revlogv1"]
32 requirements = ["revlogv1"]
33 if parentui.configbool('format', 'usestore', True):
33 if parentui.configbool('format', 'usestore', True):
34 os.mkdir(os.path.join(self.path, "store"))
34 os.mkdir(os.path.join(self.path, "store"))
35 requirements.append("store")
35 requirements.append("store")
36 # create an invalid changelog
36 # create an invalid changelog
37 self.opener("00changelog.i", "a").write(
37 self.opener("00changelog.i", "a").write(
38 '\0\0\0\2' # represents revlogv2
38 '\0\0\0\2' # represents revlogv2
39 ' dummy changelog to prevent using the old repo layout'
39 ' dummy changelog to prevent using the old repo layout'
40 )
40 )
41 reqfile = self.opener("requires", "w")
41 reqfile = self.opener("requires", "w")
42 for r in requirements:
42 for r in requirements:
43 reqfile.write("%s\n" % r)
43 reqfile.write("%s\n" % r)
44 reqfile.close()
44 reqfile.close()
45 else:
45 else:
46 raise repo.RepoError(_("repository %s not found") % path)
46 raise repo.RepoError(_("repository %s not found") % path)
47 elif create:
47 elif create:
48 raise repo.RepoError(_("repository %s already exists") % path)
48 raise repo.RepoError(_("repository %s already exists") % path)
49 else:
49 else:
50 # find requirements
50 # find requirements
51 try:
51 try:
52 requirements = self.opener("requires").read().splitlines()
52 requirements = self.opener("requires").read().splitlines()
53 except IOError, inst:
53 except IOError, inst:
54 if inst.errno != errno.ENOENT:
54 if inst.errno != errno.ENOENT:
55 raise
55 raise
56 requirements = []
56 requirements = []
57 # check them
57 # check them
58 for r in requirements:
58 for r in requirements:
59 if r not in self.supported:
59 if r not in self.supported:
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61
61
62 # setup store
62 # setup store
63 if "store" in requirements:
63 if "store" in requirements:
64 self.encodefn = util.encodefilename
64 self.encodefn = util.encodefilename
65 self.decodefn = util.decodefilename
65 self.decodefn = util.decodefilename
66 self.spath = os.path.join(self.path, "store")
66 self.spath = os.path.join(self.path, "store")
67 else:
67 else:
68 self.encodefn = lambda x: x
68 self.encodefn = lambda x: x
69 self.decodefn = lambda x: x
69 self.decodefn = lambda x: x
70 self.spath = self.path
70 self.spath = self.path
71
71
72 try:
72 try:
73 # files in .hg/ will be created using this mode
73 # files in .hg/ will be created using this mode
74 mode = os.stat(self.spath).st_mode
74 mode = os.stat(self.spath).st_mode
75 # avoid some useless chmods
75 # avoid some useless chmods
76 if (0777 & ~util._umask) == (0777 & mode):
76 if (0777 & ~util._umask) == (0777 & mode):
77 mode = None
77 mode = None
78 except OSError:
78 except OSError:
79 mode = None
79 mode = None
80
80
81 self._createmode = mode
81 self._createmode = mode
82 self.opener.createmode = mode
82 self.opener.createmode = mode
83 sopener = util.opener(self.spath)
83 sopener = util.opener(self.spath)
84 sopener.createmode = mode
84 sopener.createmode = mode
85 self.sopener = util.encodedopener(sopener, self.encodefn)
85 self.sopener = util.encodedopener(sopener, self.encodefn)
86
86
87 self.ui = ui.ui(parentui=parentui)
87 self.ui = ui.ui(parentui=parentui)
88 try:
88 try:
89 self.ui.readconfig(self.join("hgrc"), self.root)
89 self.ui.readconfig(self.join("hgrc"), self.root)
90 extensions.loadall(self.ui)
90 extensions.loadall(self.ui)
91 except IOError:
91 except IOError:
92 pass
92 pass
93
93
94 self.tagscache = None
94 self.tagscache = None
95 self._tagstypecache = None
95 self._tagstypecache = None
96 self.branchcache = None
96 self.branchcache = None
97 self._ubranchcache = None # UTF-8 version of branchcache
97 self._ubranchcache = None # UTF-8 version of branchcache
98 self._branchcachetip = None
98 self._branchcachetip = None
99 self.nodetagscache = None
99 self.nodetagscache = None
100 self.filterpats = {}
100 self.filterpats = {}
101 self._datafilters = {}
101 self._datafilters = {}
102 self._transref = self._lockref = self._wlockref = None
102 self._transref = self._lockref = self._wlockref = None
103
103
104 def __getattr__(self, name):
104 def __getattr__(self, name):
105 if name == 'changelog':
105 if name == 'changelog':
106 self.changelog = changelog.changelog(self.sopener)
106 self.changelog = changelog.changelog(self.sopener)
107 self.sopener.defversion = self.changelog.version
107 self.sopener.defversion = self.changelog.version
108 return self.changelog
108 return self.changelog
109 if name == 'manifest':
109 if name == 'manifest':
110 self.changelog
110 self.changelog
111 self.manifest = manifest.manifest(self.sopener)
111 self.manifest = manifest.manifest(self.sopener)
112 return self.manifest
112 return self.manifest
113 if name == 'dirstate':
113 if name == 'dirstate':
114 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
114 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
115 return self.dirstate
115 return self.dirstate
116 else:
116 else:
117 raise AttributeError, name
117 raise AttributeError, name
118
118
119 def url(self):
119 def url(self):
120 return 'file:' + self.root
120 return 'file:' + self.root
121
121
122 def hook(self, name, throw=False, **args):
122 def hook(self, name, throw=False, **args):
123 return hook.hook(self.ui, self, name, throw, **args)
123 return hook.hook(self.ui, self, name, throw, **args)
124
124
125 tag_disallowed = ':\r\n'
125 tag_disallowed = ':\r\n'
126
126
127 def _tag(self, names, node, message, local, user, date, parent=None,
127 def _tag(self, names, node, message, local, user, date, parent=None,
128 extra={}):
128 extra={}):
129 use_dirstate = parent is None
129 use_dirstate = parent is None
130
130
131 if isinstance(names, str):
131 if isinstance(names, str):
132 allchars = names
132 allchars = names
133 names = (names,)
133 names = (names,)
134 else:
134 else:
135 allchars = ''.join(names)
135 allchars = ''.join(names)
136 for c in self.tag_disallowed:
136 for c in self.tag_disallowed:
137 if c in allchars:
137 if c in allchars:
138 raise util.Abort(_('%r cannot be used in a tag name') % c)
138 raise util.Abort(_('%r cannot be used in a tag name') % c)
139
139
140 for name in names:
140 for name in names:
141 self.hook('pretag', throw=True, node=hex(node), tag=name,
141 self.hook('pretag', throw=True, node=hex(node), tag=name,
142 local=local)
142 local=local)
143
143
144 def writetags(fp, names, munge, prevtags):
144 def writetags(fp, names, munge, prevtags):
145 fp.seek(0, 2)
145 fp.seek(0, 2)
146 if prevtags and prevtags[-1] != '\n':
146 if prevtags and prevtags[-1] != '\n':
147 fp.write('\n')
147 fp.write('\n')
148 for name in names:
148 for name in names:
149 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
149 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
150 fp.close()
150 fp.close()
151
151
152 prevtags = ''
152 prevtags = ''
153 if local:
153 if local:
154 try:
154 try:
155 fp = self.opener('localtags', 'r+')
155 fp = self.opener('localtags', 'r+')
156 except IOError, err:
156 except IOError, err:
157 fp = self.opener('localtags', 'a')
157 fp = self.opener('localtags', 'a')
158 else:
158 else:
159 prevtags = fp.read()
159 prevtags = fp.read()
160
160
161 # local tags are stored in the current charset
161 # local tags are stored in the current charset
162 writetags(fp, names, None, prevtags)
162 writetags(fp, names, None, prevtags)
163 for name in names:
163 for name in names:
164 self.hook('tag', node=hex(node), tag=name, local=local)
164 self.hook('tag', node=hex(node), tag=name, local=local)
165 return
165 return
166
166
167 if use_dirstate:
167 if use_dirstate:
168 try:
168 try:
169 fp = self.wfile('.hgtags', 'rb+')
169 fp = self.wfile('.hgtags', 'rb+')
170 except IOError, err:
170 except IOError, err:
171 fp = self.wfile('.hgtags', 'ab')
171 fp = self.wfile('.hgtags', 'ab')
172 else:
172 else:
173 prevtags = fp.read()
173 prevtags = fp.read()
174 else:
174 else:
175 try:
175 try:
176 prevtags = self.filectx('.hgtags', parent).data()
176 prevtags = self.filectx('.hgtags', parent).data()
177 except revlog.LookupError:
177 except revlog.LookupError:
178 pass
178 pass
179 fp = self.wfile('.hgtags', 'wb')
179 fp = self.wfile('.hgtags', 'wb')
180 if prevtags:
180 if prevtags:
181 fp.write(prevtags)
181 fp.write(prevtags)
182
182
183 # committed tags are stored in UTF-8
183 # committed tags are stored in UTF-8
184 writetags(fp, names, util.fromlocal, prevtags)
184 writetags(fp, names, util.fromlocal, prevtags)
185
185
186 if use_dirstate and '.hgtags' not in self.dirstate:
186 if use_dirstate and '.hgtags' not in self.dirstate:
187 self.add(['.hgtags'])
187 self.add(['.hgtags'])
188
188
189 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
189 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
190 extra=extra)
190 extra=extra)
191
191
192 for name in names:
192 for name in names:
193 self.hook('tag', node=hex(node), tag=name, local=local)
193 self.hook('tag', node=hex(node), tag=name, local=local)
194
194
195 return tagnode
195 return tagnode
196
196
197 def tag(self, names, node, message, local, user, date):
197 def tag(self, names, node, message, local, user, date):
198 '''tag a revision with one or more symbolic names.
198 '''tag a revision with one or more symbolic names.
199
199
200 names is a list of strings or, when adding a single tag, names may be a
200 names is a list of strings or, when adding a single tag, names may be a
201 string.
201 string.
202
202
203 if local is True, the tags are stored in a per-repository file.
203 if local is True, the tags are stored in a per-repository file.
204 otherwise, they are stored in the .hgtags file, and a new
204 otherwise, they are stored in the .hgtags file, and a new
205 changeset is committed with the change.
205 changeset is committed with the change.
206
206
207 keyword arguments:
207 keyword arguments:
208
208
209 local: whether to store tags in non-version-controlled file
209 local: whether to store tags in non-version-controlled file
210 (default False)
210 (default False)
211
211
212 message: commit message to use if committing
212 message: commit message to use if committing
213
213
214 user: name of user to use if committing
214 user: name of user to use if committing
215
215
216 date: date tuple to use if committing'''
216 date: date tuple to use if committing'''
217
217
218 for x in self.status()[:5]:
218 for x in self.status()[:5]:
219 if '.hgtags' in x:
219 if '.hgtags' in x:
220 raise util.Abort(_('working copy of .hgtags is changed '
220 raise util.Abort(_('working copy of .hgtags is changed '
221 '(please commit .hgtags manually)'))
221 '(please commit .hgtags manually)'))
222
222
223 self._tag(names, node, message, local, user, date)
223 self._tag(names, node, message, local, user, date)
224
224
225 def tags(self):
225 def tags(self):
226 '''return a mapping of tag to node'''
226 '''return a mapping of tag to node'''
227 if self.tagscache:
227 if self.tagscache:
228 return self.tagscache
228 return self.tagscache
229
229
230 globaltags = {}
230 globaltags = {}
231 tagtypes = {}
231 tagtypes = {}
232
232
233 def readtags(lines, fn, tagtype):
233 def readtags(lines, fn, tagtype):
234 filetags = {}
234 filetags = {}
235 count = 0
235 count = 0
236
236
237 def warn(msg):
237 def warn(msg):
238 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
238 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
239
239
240 for l in lines:
240 for l in lines:
241 count += 1
241 count += 1
242 if not l:
242 if not l:
243 continue
243 continue
244 s = l.split(" ", 1)
244 s = l.split(" ", 1)
245 if len(s) != 2:
245 if len(s) != 2:
246 warn(_("cannot parse entry"))
246 warn(_("cannot parse entry"))
247 continue
247 continue
248 node, key = s
248 node, key = s
249 key = util.tolocal(key.strip()) # stored in UTF-8
249 key = util.tolocal(key.strip()) # stored in UTF-8
250 try:
250 try:
251 bin_n = bin(node)
251 bin_n = bin(node)
252 except TypeError:
252 except TypeError:
253 warn(_("node '%s' is not well formed") % node)
253 warn(_("node '%s' is not well formed") % node)
254 continue
254 continue
255 if bin_n not in self.changelog.nodemap:
255 if bin_n not in self.changelog.nodemap:
256 warn(_("tag '%s' refers to unknown node") % key)
256 warn(_("tag '%s' refers to unknown node") % key)
257 continue
257 continue
258
258
259 h = []
259 h = []
260 if key in filetags:
260 if key in filetags:
261 n, h = filetags[key]
261 n, h = filetags[key]
262 h.append(n)
262 h.append(n)
263 filetags[key] = (bin_n, h)
263 filetags[key] = (bin_n, h)
264
264
265 for k, nh in filetags.items():
265 for k, nh in filetags.items():
266 if k not in globaltags:
266 if k not in globaltags:
267 globaltags[k] = nh
267 globaltags[k] = nh
268 tagtypes[k] = tagtype
268 tagtypes[k] = tagtype
269 continue
269 continue
270
270
271 # we prefer the global tag if:
271 # we prefer the global tag if:
272 # it supercedes us OR
272 # it supercedes us OR
273 # mutual supercedes and it has a higher rank
273 # mutual supercedes and it has a higher rank
274 # otherwise we win because we're tip-most
274 # otherwise we win because we're tip-most
275 an, ah = nh
275 an, ah = nh
276 bn, bh = globaltags[k]
276 bn, bh = globaltags[k]
277 if (bn != an and an in bh and
277 if (bn != an and an in bh and
278 (bn not in ah or len(bh) > len(ah))):
278 (bn not in ah or len(bh) > len(ah))):
279 an = bn
279 an = bn
280 ah.extend([n for n in bh if n not in ah])
280 ah.extend([n for n in bh if n not in ah])
281 globaltags[k] = an, ah
281 globaltags[k] = an, ah
282 tagtypes[k] = tagtype
282 tagtypes[k] = tagtype
283
283
284 # read the tags file from each head, ending with the tip
284 # read the tags file from each head, ending with the tip
285 f = None
285 f = None
286 for rev, node, fnode in self._hgtagsnodes():
286 for rev, node, fnode in self._hgtagsnodes():
287 f = (f and f.filectx(fnode) or
287 f = (f and f.filectx(fnode) or
288 self.filectx('.hgtags', fileid=fnode))
288 self.filectx('.hgtags', fileid=fnode))
289 readtags(f.data().splitlines(), f, "global")
289 readtags(f.data().splitlines(), f, "global")
290
290
291 try:
291 try:
292 data = util.fromlocal(self.opener("localtags").read())
292 data = util.fromlocal(self.opener("localtags").read())
293 # localtags are stored in the local character set
293 # localtags are stored in the local character set
294 # while the internal tag table is stored in UTF-8
294 # while the internal tag table is stored in UTF-8
295 readtags(data.splitlines(), "localtags", "local")
295 readtags(data.splitlines(), "localtags", "local")
296 except IOError:
296 except IOError:
297 pass
297 pass
298
298
299 self.tagscache = {}
299 self.tagscache = {}
300 self._tagstypecache = {}
300 self._tagstypecache = {}
301 for k,nh in globaltags.items():
301 for k,nh in globaltags.items():
302 n = nh[0]
302 n = nh[0]
303 if n != nullid:
303 if n != nullid:
304 self.tagscache[k] = n
304 self.tagscache[k] = n
305 self._tagstypecache[k] = tagtypes[k]
305 self._tagstypecache[k] = tagtypes[k]
306 self.tagscache['tip'] = self.changelog.tip()
306 self.tagscache['tip'] = self.changelog.tip()
307
307
308 return self.tagscache
308 return self.tagscache
309
309
310 def tagtype(self, tagname):
310 def tagtype(self, tagname):
311 '''
311 '''
312 return the type of the given tag. result can be:
312 return the type of the given tag. result can be:
313
313
314 'local' : a local tag
314 'local' : a local tag
315 'global' : a global tag
315 'global' : a global tag
316 None : tag does not exist
316 None : tag does not exist
317 '''
317 '''
318
318
319 self.tags()
319 self.tags()
320
320
321 return self._tagstypecache.get(tagname)
321 return self._tagstypecache.get(tagname)
322
322
323 def _hgtagsnodes(self):
323 def _hgtagsnodes(self):
324 heads = self.heads()
324 heads = self.heads()
325 heads.reverse()
325 heads.reverse()
326 last = {}
326 last = {}
327 ret = []
327 ret = []
328 for node in heads:
328 for node in heads:
329 c = self.changectx(node)
329 c = self.changectx(node)
330 rev = c.rev()
330 rev = c.rev()
331 try:
331 try:
332 fnode = c.filenode('.hgtags')
332 fnode = c.filenode('.hgtags')
333 except revlog.LookupError:
333 except revlog.LookupError:
334 continue
334 continue
335 ret.append((rev, node, fnode))
335 ret.append((rev, node, fnode))
336 if fnode in last:
336 if fnode in last:
337 ret[last[fnode]] = None
337 ret[last[fnode]] = None
338 last[fnode] = len(ret) - 1
338 last[fnode] = len(ret) - 1
339 return [item for item in ret if item]
339 return [item for item in ret if item]
340
340
341 def tagslist(self):
341 def tagslist(self):
342 '''return a list of tags ordered by revision'''
342 '''return a list of tags ordered by revision'''
343 l = []
343 l = []
344 for t, n in self.tags().items():
344 for t, n in self.tags().items():
345 try:
345 try:
346 r = self.changelog.rev(n)
346 r = self.changelog.rev(n)
347 except:
347 except:
348 r = -2 # sort to the beginning of the list if unknown
348 r = -2 # sort to the beginning of the list if unknown
349 l.append((r, t, n))
349 l.append((r, t, n))
350 l.sort()
350 l.sort()
351 return [(t, n) for r, t, n in l]
351 return [(t, n) for r, t, n in l]
352
352
353 def nodetags(self, node):
353 def nodetags(self, node):
354 '''return the tags associated with a node'''
354 '''return the tags associated with a node'''
355 if not self.nodetagscache:
355 if not self.nodetagscache:
356 self.nodetagscache = {}
356 self.nodetagscache = {}
357 for t, n in self.tags().items():
357 for t, n in self.tags().items():
358 self.nodetagscache.setdefault(n, []).append(t)
358 self.nodetagscache.setdefault(n, []).append(t)
359 return self.nodetagscache.get(node, [])
359 return self.nodetagscache.get(node, [])
360
360
361 def _branchtags(self, partial, lrev):
361 def _branchtags(self, partial, lrev):
362 tiprev = self.changelog.count() - 1
362 tiprev = self.changelog.count() - 1
363 if lrev != tiprev:
363 if lrev != tiprev:
364 self._updatebranchcache(partial, lrev+1, tiprev+1)
364 self._updatebranchcache(partial, lrev+1, tiprev+1)
365 self._writebranchcache(partial, self.changelog.tip(), tiprev)
365 self._writebranchcache(partial, self.changelog.tip(), tiprev)
366
366
367 return partial
367 return partial
368
368
369 def branchtags(self):
369 def branchtags(self):
370 tip = self.changelog.tip()
370 tip = self.changelog.tip()
371 if self.branchcache is not None and self._branchcachetip == tip:
371 if self.branchcache is not None and self._branchcachetip == tip:
372 return self.branchcache
372 return self.branchcache
373
373
374 oldtip = self._branchcachetip
374 oldtip = self._branchcachetip
375 self._branchcachetip = tip
375 self._branchcachetip = tip
376 if self.branchcache is None:
376 if self.branchcache is None:
377 self.branchcache = {} # avoid recursion in changectx
377 self.branchcache = {} # avoid recursion in changectx
378 else:
378 else:
379 self.branchcache.clear() # keep using the same dict
379 self.branchcache.clear() # keep using the same dict
380 if oldtip is None or oldtip not in self.changelog.nodemap:
380 if oldtip is None or oldtip not in self.changelog.nodemap:
381 partial, last, lrev = self._readbranchcache()
381 partial, last, lrev = self._readbranchcache()
382 else:
382 else:
383 lrev = self.changelog.rev(oldtip)
383 lrev = self.changelog.rev(oldtip)
384 partial = self._ubranchcache
384 partial = self._ubranchcache
385
385
386 self._branchtags(partial, lrev)
386 self._branchtags(partial, lrev)
387
387
388 # the branch cache is stored on disk as UTF-8, but in the local
388 # the branch cache is stored on disk as UTF-8, but in the local
389 # charset internally
389 # charset internally
390 for k, v in partial.items():
390 for k, v in partial.items():
391 self.branchcache[util.tolocal(k)] = v
391 self.branchcache[util.tolocal(k)] = v
392 self._ubranchcache = partial
392 self._ubranchcache = partial
393 return self.branchcache
393 return self.branchcache
394
394
395 def _readbranchcache(self):
395 def _readbranchcache(self):
396 partial = {}
396 partial = {}
397 try:
397 try:
398 f = self.opener("branch.cache")
398 f = self.opener("branch.cache")
399 lines = f.read().split('\n')
399 lines = f.read().split('\n')
400 f.close()
400 f.close()
401 except (IOError, OSError):
401 except (IOError, OSError):
402 return {}, nullid, nullrev
402 return {}, nullid, nullrev
403
403
404 try:
404 try:
405 last, lrev = lines.pop(0).split(" ", 1)
405 last, lrev = lines.pop(0).split(" ", 1)
406 last, lrev = bin(last), int(lrev)
406 last, lrev = bin(last), int(lrev)
407 if not (lrev < self.changelog.count() and
407 if not (lrev < self.changelog.count() and
408 self.changelog.node(lrev) == last): # sanity check
408 self.changelog.node(lrev) == last): # sanity check
409 # invalidate the cache
409 # invalidate the cache
410 raise ValueError('invalidating branch cache (tip differs)')
410 raise ValueError('invalidating branch cache (tip differs)')
411 for l in lines:
411 for l in lines:
412 if not l: continue
412 if not l: continue
413 node, label = l.split(" ", 1)
413 node, label = l.split(" ", 1)
414 partial[label.strip()] = bin(node)
414 partial[label.strip()] = bin(node)
415 except (KeyboardInterrupt, util.SignalInterrupt):
415 except (KeyboardInterrupt, util.SignalInterrupt):
416 raise
416 raise
417 except Exception, inst:
417 except Exception, inst:
418 if self.ui.debugflag:
418 if self.ui.debugflag:
419 self.ui.warn(str(inst), '\n')
419 self.ui.warn(str(inst), '\n')
420 partial, last, lrev = {}, nullid, nullrev
420 partial, last, lrev = {}, nullid, nullrev
421 return partial, last, lrev
421 return partial, last, lrev
422
422
423 def _writebranchcache(self, branches, tip, tiprev):
423 def _writebranchcache(self, branches, tip, tiprev):
424 try:
424 try:
425 f = self.opener("branch.cache", "w", atomictemp=True)
425 f = self.opener("branch.cache", "w", atomictemp=True)
426 f.write("%s %s\n" % (hex(tip), tiprev))
426 f.write("%s %s\n" % (hex(tip), tiprev))
427 for label, node in branches.iteritems():
427 for label, node in branches.iteritems():
428 f.write("%s %s\n" % (hex(node), label))
428 f.write("%s %s\n" % (hex(node), label))
429 f.rename()
429 f.rename()
430 except (IOError, OSError):
430 except (IOError, OSError):
431 pass
431 pass
432
432
433 def _updatebranchcache(self, partial, start, end):
433 def _updatebranchcache(self, partial, start, end):
434 for r in xrange(start, end):
434 for r in xrange(start, end):
435 c = self.changectx(r)
435 c = self.changectx(r)
436 b = c.branch()
436 b = c.branch()
437 partial[b] = c.node()
437 partial[b] = c.node()
438
438
439 def lookup(self, key):
439 def lookup(self, key):
440 if key == '.':
440 if key == '.':
441 key, second = self.dirstate.parents()
441 key, second = self.dirstate.parents()
442 if key == nullid:
442 if key == nullid:
443 raise repo.RepoError(_("no revision checked out"))
443 raise repo.RepoError(_("no revision checked out"))
444 if second != nullid:
444 if second != nullid:
445 self.ui.warn(_("warning: working directory has two parents, "
445 self.ui.warn(_("warning: working directory has two parents, "
446 "tag '.' uses the first\n"))
446 "tag '.' uses the first\n"))
447 elif key == 'null':
447 elif key == 'null':
448 return nullid
448 return nullid
449 n = self.changelog._match(key)
449 n = self.changelog._match(key)
450 if n:
450 if n:
451 return n
451 return n
452 if key in self.tags():
452 if key in self.tags():
453 return self.tags()[key]
453 return self.tags()[key]
454 if key in self.branchtags():
454 if key in self.branchtags():
455 return self.branchtags()[key]
455 return self.branchtags()[key]
456 n = self.changelog._partialmatch(key)
456 n = self.changelog._partialmatch(key)
457 if n:
457 if n:
458 return n
458 return n
459 try:
459 try:
460 if len(key) == 20:
460 if len(key) == 20:
461 key = hex(key)
461 key = hex(key)
462 except:
462 except:
463 pass
463 pass
464 raise repo.RepoError(_("unknown revision '%s'") % key)
464 raise repo.RepoError(_("unknown revision '%s'") % key)
465
465
466 def local(self):
466 def local(self):
467 return True
467 return True
468
468
469 def join(self, f):
469 def join(self, f):
470 return os.path.join(self.path, f)
470 return os.path.join(self.path, f)
471
471
472 def sjoin(self, f):
472 def sjoin(self, f):
473 f = self.encodefn(f)
473 f = self.encodefn(f)
474 return os.path.join(self.spath, f)
474 return os.path.join(self.spath, f)
475
475
476 def wjoin(self, f):
476 def wjoin(self, f):
477 return os.path.join(self.root, f)
477 return os.path.join(self.root, f)
478
478
479 def file(self, f):
479 def file(self, f):
480 if f[0] == '/':
480 if f[0] == '/':
481 f = f[1:]
481 f = f[1:]
482 return filelog.filelog(self.sopener, f)
482 return filelog.filelog(self.sopener, f)
483
483
484 def changectx(self, changeid=None):
484 def changectx(self, changeid=None):
485 return context.changectx(self, changeid)
485 return context.changectx(self, changeid)
486
486
487 def workingctx(self):
487 def workingctx(self):
488 return context.workingctx(self)
488 return context.workingctx(self)
489
489
490 def parents(self, changeid=None):
490 def parents(self, changeid=None):
491 '''
491 '''
492 get list of changectxs for parents of changeid or working directory
492 get list of changectxs for parents of changeid or working directory
493 '''
493 '''
494 if changeid is None:
494 if changeid is None:
495 pl = self.dirstate.parents()
495 pl = self.dirstate.parents()
496 else:
496 else:
497 n = self.changelog.lookup(changeid)
497 n = self.changelog.lookup(changeid)
498 pl = self.changelog.parents(n)
498 pl = self.changelog.parents(n)
499 if pl[1] == nullid:
499 if pl[1] == nullid:
500 return [self.changectx(pl[0])]
500 return [self.changectx(pl[0])]
501 return [self.changectx(pl[0]), self.changectx(pl[1])]
501 return [self.changectx(pl[0]), self.changectx(pl[1])]
502
502
503 def filectx(self, path, changeid=None, fileid=None):
503 def filectx(self, path, changeid=None, fileid=None):
504 """changeid can be a changeset revision, node, or tag.
504 """changeid can be a changeset revision, node, or tag.
505 fileid can be a file revision or node."""
505 fileid can be a file revision or node."""
506 return context.filectx(self, path, changeid, fileid)
506 return context.filectx(self, path, changeid, fileid)
507
507
508 def getcwd(self):
508 def getcwd(self):
509 return self.dirstate.getcwd()
509 return self.dirstate.getcwd()
510
510
511 def pathto(self, f, cwd=None):
511 def pathto(self, f, cwd=None):
512 return self.dirstate.pathto(f, cwd)
512 return self.dirstate.pathto(f, cwd)
513
513
514 def wfile(self, f, mode='r'):
514 def wfile(self, f, mode='r'):
515 return self.wopener(f, mode)
515 return self.wopener(f, mode)
516
516
517 def _link(self, f):
517 def _link(self, f):
518 return os.path.islink(self.wjoin(f))
518 return os.path.islink(self.wjoin(f))
519
519
520 def _filter(self, filter, filename, data):
520 def _filter(self, filter, filename, data):
521 if filter not in self.filterpats:
521 if filter not in self.filterpats:
522 l = []
522 l = []
523 for pat, cmd in self.ui.configitems(filter):
523 for pat, cmd in self.ui.configitems(filter):
524 mf = util.matcher(self.root, "", [pat], [], [])[1]
524 mf = util.matcher(self.root, "", [pat], [], [])[1]
525 fn = None
525 fn = None
526 params = cmd
526 params = cmd
527 for name, filterfn in self._datafilters.iteritems():
527 for name, filterfn in self._datafilters.iteritems():
528 if cmd.startswith(name):
528 if cmd.startswith(name):
529 fn = filterfn
529 fn = filterfn
530 params = cmd[len(name):].lstrip()
530 params = cmd[len(name):].lstrip()
531 break
531 break
532 if not fn:
532 if not fn:
533 fn = lambda s, c, **kwargs: util.filter(s, c)
533 fn = lambda s, c, **kwargs: util.filter(s, c)
534 # Wrap old filters not supporting keyword arguments
534 # Wrap old filters not supporting keyword arguments
535 if not inspect.getargspec(fn)[2]:
535 if not inspect.getargspec(fn)[2]:
536 oldfn = fn
536 oldfn = fn
537 fn = lambda s, c, **kwargs: oldfn(s, c)
537 fn = lambda s, c, **kwargs: oldfn(s, c)
538 l.append((mf, fn, params))
538 l.append((mf, fn, params))
539 self.filterpats[filter] = l
539 self.filterpats[filter] = l
540
540
541 for mf, fn, cmd in self.filterpats[filter]:
541 for mf, fn, cmd in self.filterpats[filter]:
542 if mf(filename):
542 if mf(filename):
543 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
543 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
544 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
544 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
545 break
545 break
546
546
547 return data
547 return data
548
548
549 def adddatafilter(self, name, filter):
549 def adddatafilter(self, name, filter):
550 self._datafilters[name] = filter
550 self._datafilters[name] = filter
551
551
552 def wread(self, filename):
552 def wread(self, filename):
553 if self._link(filename):
553 if self._link(filename):
554 data = os.readlink(self.wjoin(filename))
554 data = os.readlink(self.wjoin(filename))
555 else:
555 else:
556 data = self.wopener(filename, 'r').read()
556 data = self.wopener(filename, 'r').read()
557 return self._filter("encode", filename, data)
557 return self._filter("encode", filename, data)
558
558
559 def wwrite(self, filename, data, flags):
559 def wwrite(self, filename, data, flags):
560 data = self._filter("decode", filename, data)
560 data = self._filter("decode", filename, data)
561 try:
561 try:
562 os.unlink(self.wjoin(filename))
562 os.unlink(self.wjoin(filename))
563 except OSError:
563 except OSError:
564 pass
564 pass
565 if 'l' in flags:
565 if 'l' in flags:
566 self.wopener.symlink(data, filename)
566 self.wopener.symlink(data, filename)
567 else:
567 else:
568 self.wopener(filename, 'w').write(data)
568 self.wopener(filename, 'w').write(data)
569 if 'x' in flags:
569 if 'x' in flags:
570 util.set_flags(self.wjoin(filename), False, True)
570 util.set_flags(self.wjoin(filename), False, True)
571
571
572 def wwritedata(self, filename, data):
572 def wwritedata(self, filename, data):
573 return self._filter("decode", filename, data)
573 return self._filter("decode", filename, data)
574
574
575 def transaction(self):
575 def transaction(self):
576 if self._transref and self._transref():
576 if self._transref and self._transref():
577 return self._transref().nest()
577 return self._transref().nest()
578
578
579 # abort here if the journal already exists
579 # abort here if the journal already exists
580 if os.path.exists(self.sjoin("journal")):
580 if os.path.exists(self.sjoin("journal")):
581 raise repo.RepoError(_("journal already exists - run hg recover"))
581 raise repo.RepoError(_("journal already exists - run hg recover"))
582
582
583 # save dirstate for rollback
583 # save dirstate for rollback
584 try:
584 try:
585 ds = self.opener("dirstate").read()
585 ds = self.opener("dirstate").read()
586 except IOError:
586 except IOError:
587 ds = ""
587 ds = ""
588 self.opener("journal.dirstate", "w").write(ds)
588 self.opener("journal.dirstate", "w").write(ds)
589 self.opener("journal.branch", "w").write(self.dirstate.branch())
589 self.opener("journal.branch", "w").write(self.dirstate.branch())
590
590
591 renames = [(self.sjoin("journal"), self.sjoin("undo")),
591 renames = [(self.sjoin("journal"), self.sjoin("undo")),
592 (self.join("journal.dirstate"), self.join("undo.dirstate")),
592 (self.join("journal.dirstate"), self.join("undo.dirstate")),
593 (self.join("journal.branch"), self.join("undo.branch"))]
593 (self.join("journal.branch"), self.join("undo.branch"))]
594 tr = transaction.transaction(self.ui.warn, self.sopener,
594 tr = transaction.transaction(self.ui.warn, self.sopener,
595 self.sjoin("journal"),
595 self.sjoin("journal"),
596 aftertrans(renames),
596 aftertrans(renames),
597 self._createmode)
597 self._createmode)
598 self._transref = weakref.ref(tr)
598 self._transref = weakref.ref(tr)
599 return tr
599 return tr
600
600
601 def recover(self):
601 def recover(self):
602 l = self.lock()
602 l = self.lock()
603 try:
603 try:
604 if os.path.exists(self.sjoin("journal")):
604 if os.path.exists(self.sjoin("journal")):
605 self.ui.status(_("rolling back interrupted transaction\n"))
605 self.ui.status(_("rolling back interrupted transaction\n"))
606 transaction.rollback(self.sopener, self.sjoin("journal"))
606 transaction.rollback(self.sopener, self.sjoin("journal"))
607 self.invalidate()
607 self.invalidate()
608 return True
608 return True
609 else:
609 else:
610 self.ui.warn(_("no interrupted transaction available\n"))
610 self.ui.warn(_("no interrupted transaction available\n"))
611 return False
611 return False
612 finally:
612 finally:
613 del l
613 del l
614
614
615 def rollback(self):
615 def rollback(self):
616 wlock = lock = None
616 wlock = lock = None
617 try:
617 try:
618 wlock = self.wlock()
618 wlock = self.wlock()
619 lock = self.lock()
619 lock = self.lock()
620 if os.path.exists(self.sjoin("undo")):
620 if os.path.exists(self.sjoin("undo")):
621 self.ui.status(_("rolling back last transaction\n"))
621 self.ui.status(_("rolling back last transaction\n"))
622 transaction.rollback(self.sopener, self.sjoin("undo"))
622 transaction.rollback(self.sopener, self.sjoin("undo"))
623 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
623 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
624 try:
624 try:
625 branch = self.opener("undo.branch").read()
625 branch = self.opener("undo.branch").read()
626 self.dirstate.setbranch(branch)
626 self.dirstate.setbranch(branch)
627 except IOError:
627 except IOError:
628 self.ui.warn(_("Named branch could not be reset, "
628 self.ui.warn(_("Named branch could not be reset, "
629 "current branch still is: %s\n")
629 "current branch still is: %s\n")
630 % util.tolocal(self.dirstate.branch()))
630 % util.tolocal(self.dirstate.branch()))
631 self.invalidate()
631 self.invalidate()
632 self.dirstate.invalidate()
632 self.dirstate.invalidate()
633 else:
633 else:
634 self.ui.warn(_("no rollback information available\n"))
634 self.ui.warn(_("no rollback information available\n"))
635 finally:
635 finally:
636 del lock, wlock
636 del lock, wlock
637
637
638 def invalidate(self):
638 def invalidate(self):
639 for a in "changelog manifest".split():
639 for a in "changelog manifest".split():
640 if a in self.__dict__:
640 if a in self.__dict__:
641 delattr(self, a)
641 delattr(self, a)
642 self.tagscache = None
642 self.tagscache = None
643 self._tagstypecache = None
643 self._tagstypecache = None
644 self.nodetagscache = None
644 self.nodetagscache = None
645 self.branchcache = None
645 self.branchcache = None
646 self._ubranchcache = None
646 self._ubranchcache = None
647 self._branchcachetip = None
647 self._branchcachetip = None
648
648
649 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
649 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
650 try:
650 try:
651 l = lock.lock(lockname, 0, releasefn, desc=desc)
651 l = lock.lock(lockname, 0, releasefn, desc=desc)
652 except lock.LockHeld, inst:
652 except lock.LockHeld, inst:
653 if not wait:
653 if not wait:
654 raise
654 raise
655 self.ui.warn(_("waiting for lock on %s held by %r\n") %
655 self.ui.warn(_("waiting for lock on %s held by %r\n") %
656 (desc, inst.locker))
656 (desc, inst.locker))
657 # default to 600 seconds timeout
657 # default to 600 seconds timeout
658 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
658 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
659 releasefn, desc=desc)
659 releasefn, desc=desc)
660 if acquirefn:
660 if acquirefn:
661 acquirefn()
661 acquirefn()
662 return l
662 return l
663
663
664 def lock(self, wait=True):
664 def lock(self, wait=True):
665 if self._lockref and self._lockref():
665 if self._lockref and self._lockref():
666 return self._lockref()
666 return self._lockref()
667
667
668 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
668 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
669 _('repository %s') % self.origroot)
669 _('repository %s') % self.origroot)
670 self._lockref = weakref.ref(l)
670 self._lockref = weakref.ref(l)
671 return l
671 return l
672
672
673 def wlock(self, wait=True):
673 def wlock(self, wait=True):
674 if self._wlockref and self._wlockref():
674 if self._wlockref and self._wlockref():
675 return self._wlockref()
675 return self._wlockref()
676
676
677 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
677 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
678 self.dirstate.invalidate, _('working directory of %s') %
678 self.dirstate.invalidate, _('working directory of %s') %
679 self.origroot)
679 self.origroot)
680 self._wlockref = weakref.ref(l)
680 self._wlockref = weakref.ref(l)
681 return l
681 return l
682
682
683 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
683 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
684 """
684 """
685 commit an individual file as part of a larger transaction
685 commit an individual file as part of a larger transaction
686 """
686 """
687
687
688 t = self.wread(fn)
688 t = self.wread(fn)
689 fl = self.file(fn)
689 fl = self.file(fn)
690 fp1 = manifest1.get(fn, nullid)
690 fp1 = manifest1.get(fn, nullid)
691 fp2 = manifest2.get(fn, nullid)
691 fp2 = manifest2.get(fn, nullid)
692
692
693 meta = {}
693 meta = {}
694 cf = self.dirstate.copied(fn)
694 cf = self.dirstate.copied(fn)
695 if cf and cf != fn:
695 if cf and cf != fn:
696 # Mark the new revision of this file as a copy of another
696 # Mark the new revision of this file as a copy of another
697 # file. This copy data will effectively act as a parent
697 # file. This copy data will effectively act as a parent
698 # of this new revision. If this is a merge, the first
698 # of this new revision. If this is a merge, the first
699 # parent will be the nullid (meaning "look up the copy data")
699 # parent will be the nullid (meaning "look up the copy data")
700 # and the second one will be the other parent. For example:
700 # and the second one will be the other parent. For example:
701 #
701 #
702 # 0 --- 1 --- 3 rev1 changes file foo
702 # 0 --- 1 --- 3 rev1 changes file foo
703 # \ / rev2 renames foo to bar and changes it
703 # \ / rev2 renames foo to bar and changes it
704 # \- 2 -/ rev3 should have bar with all changes and
704 # \- 2 -/ rev3 should have bar with all changes and
705 # should record that bar descends from
705 # should record that bar descends from
706 # bar in rev2 and foo in rev1
706 # bar in rev2 and foo in rev1
707 #
707 #
708 # this allows this merge to succeed:
708 # this allows this merge to succeed:
709 #
709 #
710 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
710 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
711 # \ / merging rev3 and rev4 should use bar@rev2
711 # \ / merging rev3 and rev4 should use bar@rev2
712 # \- 2 --- 4 as the merge base
712 # \- 2 --- 4 as the merge base
713 #
713 #
714
714
715 cr = manifest1.get(cf)
715 cr = manifest1.get(cf)
716 nfp = fp2
716 nfp = fp2
717
717
718 if manifest2: # branch merge
718 if manifest2: # branch merge
719 if fp2 == nullid: # copied on remote side
719 if fp2 == nullid: # copied on remote side
720 if fp1 != nullid or cf in manifest2:
720 if fp1 != nullid or cf in manifest2:
721 cr = manifest2[cf]
721 cr = manifest2[cf]
722 nfp = fp1
722 nfp = fp1
723
723
724 # find source in nearest ancestor if we've lost track
724 # find source in nearest ancestor if we've lost track
725 if not cr:
725 if not cr:
726 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
726 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
727 (fn, cf))
727 (fn, cf))
728 p1 = self.dirstate.parents()[0]
728 p1 = self.dirstate.parents()[0]
729 rev = self.changelog.rev(p1)
729 rev = self.changelog.rev(p1)
730 seen = {-1:None}
730 seen = {-1:None}
731 visit = [rev]
731 visit = [rev]
732 while visit:
732 while visit:
733 for p in self.changelog.parentrevs(visit.pop(0)):
733 for p in self.changelog.parentrevs(visit.pop(0)):
734 if p not in seen:
734 if p not in seen:
735 seen[p] = True
735 seen[p] = True
736 visit.append(p)
736 visit.append(p)
737 ctx = self.changectx(p)
737 ctx = self.changectx(p)
738 if cf in ctx:
738 if cf in ctx:
739 cr = ctx[cf].filenode()
739 cr = ctx[cf].filenode()
740 break
740 break
741
741
742 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
742 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
743 meta["copy"] = cf
743 meta["copy"] = cf
744 meta["copyrev"] = hex(cr)
744 meta["copyrev"] = hex(cr)
745 fp1, fp2 = nullid, nfp
745 fp1, fp2 = nullid, nfp
746 elif fp2 != nullid:
746 elif fp2 != nullid:
747 # is one parent an ancestor of the other?
747 # is one parent an ancestor of the other?
748 fpa = fl.ancestor(fp1, fp2)
748 fpa = fl.ancestor(fp1, fp2)
749 if fpa == fp1:
749 if fpa == fp1:
750 fp1, fp2 = fp2, nullid
750 fp1, fp2 = fp2, nullid
751 elif fpa == fp2:
751 elif fpa == fp2:
752 fp2 = nullid
752 fp2 = nullid
753
753
754 # is the file unmodified from the parent? report existing entry
754 # is the file unmodified from the parent? report existing entry
755 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
755 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
756 return fp1
756 return fp1
757
757
758 changelist.append(fn)
758 changelist.append(fn)
759 return fl.add(t, meta, tr, linkrev, fp1, fp2)
759 return fl.add(t, meta, tr, linkrev, fp1, fp2)
760
760
761 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
761 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
762 if p1 is None:
762 if p1 is None:
763 p1, p2 = self.dirstate.parents()
763 p1, p2 = self.dirstate.parents()
764 return self.commit(files=files, text=text, user=user, date=date,
764 return self.commit(files=files, text=text, user=user, date=date,
765 p1=p1, p2=p2, extra=extra, empty_ok=True)
765 p1=p1, p2=p2, extra=extra, empty_ok=True)
766
766
767 def commit(self, files=None, text="", user=None, date=None,
767 def commit(self, files=None, text="", user=None, date=None,
768 match=util.always, force=False, force_editor=False,
768 match=util.always, force=False, force_editor=False,
769 p1=None, p2=None, extra={}, empty_ok=False):
769 p1=None, p2=None, extra={}, empty_ok=False):
770 wlock = lock = tr = None
770 wlock = lock = tr = None
771 valid = 0 # don't save the dirstate if this isn't set
771 valid = 0 # don't save the dirstate if this isn't set
772 if files:
772 if files:
773 files = util.unique(files)
773 files = util.unique(files)
774 try:
774 try:
775 wlock = self.wlock()
775 wlock = self.wlock()
776 lock = self.lock()
776 lock = self.lock()
777 commit = []
777 commit = []
778 remove = []
778 remove = []
779 changed = []
779 changed = []
780 use_dirstate = (p1 is None) # not rawcommit
780 use_dirstate = (p1 is None) # not rawcommit
781 extra = extra.copy()
781 extra = extra.copy()
782
782
783 if use_dirstate:
783 if use_dirstate:
784 if files:
784 if files:
785 for f in files:
785 for f in files:
786 s = self.dirstate[f]
786 s = self.dirstate[f]
787 if s in 'nma':
787 if s in 'nma':
788 commit.append(f)
788 commit.append(f)
789 elif s == 'r':
789 elif s == 'r':
790 remove.append(f)
790 remove.append(f)
791 else:
791 else:
792 self.ui.warn(_("%s not tracked!\n") % f)
792 self.ui.warn(_("%s not tracked!\n") % f)
793 else:
793 else:
794 changes = self.status(match=match)[:5]
794 changes = self.status(match=match)[:5]
795 modified, added, removed, deleted, unknown = changes
795 modified, added, removed, deleted, unknown = changes
796 commit = modified + added
796 commit = modified + added
797 remove = removed
797 remove = removed
798 else:
798 else:
799 commit = files
799 commit = files
800
800
801 if use_dirstate:
801 if use_dirstate:
802 p1, p2 = self.dirstate.parents()
802 p1, p2 = self.dirstate.parents()
803 update_dirstate = True
803 update_dirstate = True
804
804
805 if (not force and p2 != nullid and
805 if (not force and p2 != nullid and
806 (files or match != util.always)):
806 (files or match != util.always)):
807 raise util.Abort(_('cannot partially commit a merge '
807 raise util.Abort(_('cannot partially commit a merge '
808 '(do not specify files or patterns)'))
808 '(do not specify files or patterns)'))
809 else:
809 else:
810 p1, p2 = p1, p2 or nullid
810 p1, p2 = p1, p2 or nullid
811 update_dirstate = (self.dirstate.parents()[0] == p1)
811 update_dirstate = (self.dirstate.parents()[0] == p1)
812
812
813 c1 = self.changelog.read(p1)
813 c1 = self.changelog.read(p1)
814 c2 = self.changelog.read(p2)
814 c2 = self.changelog.read(p2)
815 m1 = self.manifest.read(c1[0]).copy()
815 m1 = self.manifest.read(c1[0]).copy()
816 m2 = self.manifest.read(c2[0])
816 m2 = self.manifest.read(c2[0])
817
817
818 if use_dirstate:
818 if use_dirstate:
819 branchname = self.workingctx().branch()
819 branchname = self.workingctx().branch()
820 try:
820 try:
821 branchname = branchname.decode('UTF-8').encode('UTF-8')
821 branchname = branchname.decode('UTF-8').encode('UTF-8')
822 except UnicodeDecodeError:
822 except UnicodeDecodeError:
823 raise util.Abort(_('branch name not in UTF-8!'))
823 raise util.Abort(_('branch name not in UTF-8!'))
824 else:
824 else:
825 branchname = ""
825 branchname = ""
826
826
827 if use_dirstate:
827 if use_dirstate:
828 oldname = c1[5].get("branch") # stored in UTF-8
828 oldname = c1[5].get("branch") # stored in UTF-8
829 if (not commit and not remove and not force and p2 == nullid
829 if (not commit and not remove and not force and p2 == nullid
830 and branchname == oldname):
830 and branchname == oldname):
831 self.ui.status(_("nothing changed\n"))
831 self.ui.status(_("nothing changed\n"))
832 return None
832 return None
833
833
834 xp1 = hex(p1)
834 xp1 = hex(p1)
835 if p2 == nullid: xp2 = ''
835 if p2 == nullid: xp2 = ''
836 else: xp2 = hex(p2)
836 else: xp2 = hex(p2)
837
837
838 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
838 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
839
839
840 tr = self.transaction()
840 tr = self.transaction()
841 trp = weakref.proxy(tr)
841 trp = weakref.proxy(tr)
842
842
843 # check in files
843 # check in files
844 new = {}
844 new = {}
845 linkrev = self.changelog.count()
845 linkrev = self.changelog.count()
846 commit.sort()
846 commit.sort()
847 is_exec = util.execfunc(self.root, m1.execf)
847 is_exec = util.execfunc(self.root, m1.execf)
848 is_link = util.linkfunc(self.root, m1.linkf)
848 is_link = util.linkfunc(self.root, m1.linkf)
849 for f in commit:
849 for f in commit:
850 self.ui.note(f + "\n")
850 self.ui.note(f + "\n")
851 try:
851 try:
852 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
852 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
853 new_exec = is_exec(f)
853 new_exec = is_exec(f)
854 new_link = is_link(f)
854 new_link = is_link(f)
855 if ((not changed or changed[-1] != f) and
855 if ((not changed or changed[-1] != f) and
856 m2.get(f) != new[f]):
856 m2.get(f) != new[f]):
857 # mention the file in the changelog if some
857 # mention the file in the changelog if some
858 # flag changed, even if there was no content
858 # flag changed, even if there was no content
859 # change.
859 # change.
860 old_exec = m1.execf(f)
860 old_exec = m1.execf(f)
861 old_link = m1.linkf(f)
861 old_link = m1.linkf(f)
862 if old_exec != new_exec or old_link != new_link:
862 if old_exec != new_exec or old_link != new_link:
863 changed.append(f)
863 changed.append(f)
864 m1.set(f, new_exec, new_link)
864 m1.set(f, new_exec, new_link)
865 if use_dirstate:
865 if use_dirstate:
866 self.dirstate.normal(f)
866 self.dirstate.normal(f)
867
867
868 except (OSError, IOError):
868 except (OSError, IOError):
869 if use_dirstate:
869 if use_dirstate:
870 self.ui.warn(_("trouble committing %s!\n") % f)
870 self.ui.warn(_("trouble committing %s!\n") % f)
871 raise
871 raise
872 else:
872 else:
873 remove.append(f)
873 remove.append(f)
874
874
875 # update manifest
875 # update manifest
876 m1.update(new)
876 m1.update(new)
877 remove.sort()
877 remove.sort()
878 removed = []
878 removed = []
879
879
880 for f in remove:
880 for f in remove:
881 if f in m1:
881 if f in m1:
882 del m1[f]
882 del m1[f]
883 removed.append(f)
883 removed.append(f)
884 elif f in m2:
884 elif f in m2:
885 removed.append(f)
885 removed.append(f)
886 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
886 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
887 (new, removed))
887 (new, removed))
888
888
889 # add changeset
889 # add changeset
890 new = new.keys()
890 new = new.keys()
891 new.sort()
891 new.sort()
892
892
893 user = user or self.ui.username()
893 user = user or self.ui.username()
894 if (not empty_ok and not text) or force_editor:
894 if (not empty_ok and not text) or force_editor:
895 edittext = []
895 edittext = []
896 if text:
896 if text:
897 edittext.append(text)
897 edittext.append(text)
898 edittext.append("")
898 edittext.append("")
899 edittext.append(_("HG: Enter commit message."
899 edittext.append(_("HG: Enter commit message."
900 " Lines beginning with 'HG:' are removed."))
900 " Lines beginning with 'HG:' are removed."))
901 edittext.append("HG: --")
901 edittext.append("HG: --")
902 edittext.append("HG: user: %s" % user)
902 edittext.append("HG: user: %s" % user)
903 if p2 != nullid:
903 if p2 != nullid:
904 edittext.append("HG: branch merge")
904 edittext.append("HG: branch merge")
905 if branchname:
905 if branchname:
906 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
906 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
907 edittext.extend(["HG: changed %s" % f for f in changed])
907 edittext.extend(["HG: changed %s" % f for f in changed])
908 edittext.extend(["HG: removed %s" % f for f in removed])
908 edittext.extend(["HG: removed %s" % f for f in removed])
909 if not changed and not remove:
909 if not changed and not remove:
910 edittext.append("HG: no files changed")
910 edittext.append("HG: no files changed")
911 edittext.append("")
911 edittext.append("")
912 # run editor in the repository root
912 # run editor in the repository root
913 olddir = os.getcwd()
913 olddir = os.getcwd()
914 os.chdir(self.root)
914 os.chdir(self.root)
915 text = self.ui.edit("\n".join(edittext), user)
915 text = self.ui.edit("\n".join(edittext), user)
916 os.chdir(olddir)
916 os.chdir(olddir)
917
917
918 if branchname:
918 if branchname:
919 extra["branch"] = branchname
919 extra["branch"] = branchname
920
920
921 lines = [line.rstrip() for line in text.rstrip().splitlines()]
921 lines = [line.rstrip() for line in text.rstrip().splitlines()]
922 while lines and not lines[0]:
922 while lines and not lines[0]:
923 del lines[0]
923 del lines[0]
924 if not lines and use_dirstate:
924 if not lines and use_dirstate:
925 raise util.Abort(_("empty commit message"))
925 raise util.Abort(_("empty commit message"))
926 text = '\n'.join(lines)
926 text = '\n'.join(lines)
927
927
928 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
928 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
929 user, date, extra)
929 user, date, extra)
930 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
930 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
931 parent2=xp2)
931 parent2=xp2)
932 tr.close()
932 tr.close()
933
933
934 if self.branchcache:
934 if self.branchcache:
935 self.branchtags()
935 self.branchtags()
936
936
937 if use_dirstate or update_dirstate:
937 if use_dirstate or update_dirstate:
938 self.dirstate.setparents(n)
938 self.dirstate.setparents(n)
939 if use_dirstate:
939 if use_dirstate:
940 for f in removed:
940 for f in removed:
941 self.dirstate.forget(f)
941 self.dirstate.forget(f)
942 valid = 1 # our dirstate updates are complete
942 valid = 1 # our dirstate updates are complete
943
943
944 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
944 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
945 return n
945 return n
946 finally:
946 finally:
947 if not valid: # don't save our updated dirstate
947 if not valid: # don't save our updated dirstate
948 self.dirstate.invalidate()
948 self.dirstate.invalidate()
949 del tr, lock, wlock
949 del tr, lock, wlock
950
950
951 def walk(self, node=None, files=[], match=util.always, badmatch=None):
951 def walk(self, node=None, files=[], match=util.always, badmatch=None):
952 '''
952 '''
953 walk recursively through the directory tree or a given
953 walk recursively through the directory tree or a given
954 changeset, finding all files matched by the match
954 changeset, finding all files matched by the match
955 function
955 function
956
956
957 results are yielded in a tuple (src, filename), where src
957 results are yielded in a tuple (src, filename), where src
958 is one of:
958 is one of:
959 'f' the file was found in the directory tree
959 'f' the file was found in the directory tree
960 'm' the file was only in the dirstate and not in the tree
960 'm' the file was only in the dirstate and not in the tree
961 'b' file was not found and matched badmatch
961 'b' file was not found and matched badmatch
962 '''
962 '''
963
963
964 if node:
964 if node:
965 fdict = dict.fromkeys(files)
965 fdict = dict.fromkeys(files)
966 # for dirstate.walk, files=['.'] means "walk the whole tree".
966 # for dirstate.walk, files=['.'] means "walk the whole tree".
967 # follow that here, too
967 # follow that here, too
968 fdict.pop('.', None)
968 fdict.pop('.', None)
969 mdict = self.manifest.read(self.changelog.read(node)[0])
969 mdict = self.manifest.read(self.changelog.read(node)[0])
970 mfiles = mdict.keys()
970 mfiles = mdict.keys()
971 mfiles.sort()
971 mfiles.sort()
972 for fn in mfiles:
972 for fn in mfiles:
973 for ffn in fdict:
973 for ffn in fdict:
974 # match if the file is the exact name or a directory
974 # match if the file is the exact name or a directory
975 if ffn == fn or fn.startswith("%s/" % ffn):
975 if ffn == fn or fn.startswith("%s/" % ffn):
976 del fdict[ffn]
976 del fdict[ffn]
977 break
977 break
978 if match(fn):
978 if match(fn):
979 yield 'm', fn
979 yield 'm', fn
980 ffiles = fdict.keys()
980 ffiles = fdict.keys()
981 ffiles.sort()
981 ffiles.sort()
982 for fn in ffiles:
982 for fn in ffiles:
983 if badmatch and badmatch(fn):
983 if badmatch and badmatch(fn):
984 if match(fn):
984 if match(fn):
985 yield 'b', fn
985 yield 'b', fn
986 else:
986 else:
987 self.ui.warn(_('%s: No such file in rev %s\n')
987 self.ui.warn(_('%s: No such file in rev %s\n')
988 % (self.pathto(fn), short(node)))
988 % (self.pathto(fn), short(node)))
989 else:
989 else:
990 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
990 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
991 yield src, fn
991 yield src, fn
992
992
993 def status(self, node1=None, node2=None, files=[], match=util.always,
993 def status(self, node1=None, node2=None, files=[], match=util.always,
994 list_ignored=False, list_clean=False, list_unknown=True):
994 list_ignored=False, list_clean=False, list_unknown=True):
995 """return status of files between two nodes or node and working directory
995 """return status of files between two nodes or node and working directory
996
996
997 If node1 is None, use the first dirstate parent instead.
997 If node1 is None, use the first dirstate parent instead.
998 If node2 is None, compare node1 with working directory.
998 If node2 is None, compare node1 with working directory.
999 """
999 """
1000
1000
1001 def fcmp(fn, getnode):
1001 def fcmp(fn, getnode):
1002 t1 = self.wread(fn)
1002 t1 = self.wread(fn)
1003 return self.file(fn).cmp(getnode(fn), t1)
1003 return self.file(fn).cmp(getnode(fn), t1)
1004
1004
1005 def mfmatches(node):
1005 def mfmatches(node):
1006 change = self.changelog.read(node)
1006 change = self.changelog.read(node)
1007 mf = self.manifest.read(change[0]).copy()
1007 mf = self.manifest.read(change[0]).copy()
1008 for fn in mf.keys():
1008 for fn in mf.keys():
1009 if not match(fn):
1009 if not match(fn):
1010 del mf[fn]
1010 del mf[fn]
1011 return mf
1011 return mf
1012
1012
1013 modified, added, removed, deleted, unknown = [], [], [], [], []
1013 modified, added, removed, deleted, unknown = [], [], [], [], []
1014 ignored, clean = [], []
1014 ignored, clean = [], []
1015
1015
1016 compareworking = False
1016 compareworking = False
1017 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
1017 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
1018 compareworking = True
1018 compareworking = True
1019
1019
1020 if not compareworking:
1020 if not compareworking:
1021 # read the manifest from node1 before the manifest from node2,
1021 # read the manifest from node1 before the manifest from node2,
1022 # so that we'll hit the manifest cache if we're going through
1022 # so that we'll hit the manifest cache if we're going through
1023 # all the revisions in parent->child order.
1023 # all the revisions in parent->child order.
1024 mf1 = mfmatches(node1)
1024 mf1 = mfmatches(node1)
1025
1025
1026 # are we comparing the working directory?
1026 # are we comparing the working directory?
1027 if not node2:
1027 if not node2:
1028 (lookup, modified, added, removed, deleted, unknown,
1028 (lookup, modified, added, removed, deleted, unknown,
1029 ignored, clean) = self.dirstate.status(files, match,
1029 ignored, clean) = self.dirstate.status(files, match,
1030 list_ignored, list_clean,
1030 list_ignored, list_clean,
1031 list_unknown)
1031 list_unknown)
1032
1032
1033 # are we comparing working dir against its parent?
1033 # are we comparing working dir against its parent?
1034 if compareworking:
1034 if compareworking:
1035 if lookup:
1035 if lookup:
1036 fixup = []
1036 fixup = []
1037 # do a full compare of any files that might have changed
1037 # do a full compare of any files that might have changed
1038 ctx = self.changectx()
1038 ctx = self.changectx()
1039 mexec = lambda f: 'x' in ctx.fileflags(f)
1039 mexec = lambda f: 'x' in ctx.fileflags(f)
1040 mlink = lambda f: 'l' in ctx.fileflags(f)
1040 mlink = lambda f: 'l' in ctx.fileflags(f)
1041 is_exec = util.execfunc(self.root, mexec)
1041 is_exec = util.execfunc(self.root, mexec)
1042 is_link = util.linkfunc(self.root, mlink)
1042 is_link = util.linkfunc(self.root, mlink)
1043 def flags(f):
1043 def flags(f):
1044 return is_link(f) and 'l' or is_exec(f) and 'x' or ''
1044 return is_link(f) and 'l' or is_exec(f) and 'x' or ''
1045 for f in lookup:
1045 for f in lookup:
1046 if (f not in ctx or flags(f) != ctx.fileflags(f)
1046 if (f not in ctx or flags(f) != ctx.fileflags(f)
1047 or ctx[f].cmp(self.wread(f))):
1047 or ctx[f].cmp(self.wread(f))):
1048 modified.append(f)
1048 modified.append(f)
1049 else:
1049 else:
1050 fixup.append(f)
1050 fixup.append(f)
1051 if list_clean:
1051 if list_clean:
1052 clean.append(f)
1052 clean.append(f)
1053
1053
1054 # update dirstate for files that are actually clean
1054 # update dirstate for files that are actually clean
1055 if fixup:
1055 if fixup:
1056 wlock = None
1056 wlock = None
1057 try:
1057 try:
1058 try:
1058 try:
1059 wlock = self.wlock(False)
1059 wlock = self.wlock(False)
1060 except lock.LockException:
1060 except lock.LockException:
1061 pass
1061 pass
1062 if wlock:
1062 if wlock:
1063 for f in fixup:
1063 for f in fixup:
1064 self.dirstate.normal(f)
1064 self.dirstate.normal(f)
1065 finally:
1065 finally:
1066 del wlock
1066 del wlock
1067 else:
1067 else:
1068 # we are comparing working dir against non-parent
1068 # we are comparing working dir against non-parent
1069 # generate a pseudo-manifest for the working dir
1069 # generate a pseudo-manifest for the working dir
1070 # XXX: create it in dirstate.py ?
1070 # XXX: create it in dirstate.py ?
1071 mf2 = mfmatches(self.dirstate.parents()[0])
1071 mf2 = mfmatches(self.dirstate.parents()[0])
1072 is_exec = util.execfunc(self.root, mf2.execf)
1072 is_exec = util.execfunc(self.root, mf2.execf)
1073 is_link = util.linkfunc(self.root, mf2.linkf)
1073 is_link = util.linkfunc(self.root, mf2.linkf)
1074 for f in lookup + modified + added:
1074 for f in lookup + modified + added:
1075 mf2[f] = ""
1075 mf2[f] = ""
1076 mf2.set(f, is_exec(f), is_link(f))
1076 mf2.set(f, is_exec(f), is_link(f))
1077 for f in removed:
1077 for f in removed:
1078 if f in mf2:
1078 if f in mf2:
1079 del mf2[f]
1079 del mf2[f]
1080
1080
1081 else:
1081 else:
1082 # we are comparing two revisions
1082 # we are comparing two revisions
1083 mf2 = mfmatches(node2)
1083 mf2 = mfmatches(node2)
1084
1084
1085 if not compareworking:
1085 if not compareworking:
1086 # flush lists from dirstate before comparing manifests
1086 # flush lists from dirstate before comparing manifests
1087 modified, added, clean = [], [], []
1087 modified, added, clean = [], [], []
1088
1088
1089 # make sure to sort the files so we talk to the disk in a
1089 # make sure to sort the files so we talk to the disk in a
1090 # reasonable order
1090 # reasonable order
1091 mf2keys = mf2.keys()
1091 mf2keys = mf2.keys()
1092 mf2keys.sort()
1092 mf2keys.sort()
1093 getnode = lambda fn: mf1.get(fn, nullid)
1093 getnode = lambda fn: mf1.get(fn, nullid)
1094 for fn in mf2keys:
1094 for fn in mf2keys:
1095 if fn in mf1:
1095 if fn in mf1:
1096 if (mf1.flags(fn) != mf2.flags(fn) or
1096 if (mf1.flags(fn) != mf2.flags(fn) or
1097 (mf1[fn] != mf2[fn] and
1097 (mf1[fn] != mf2[fn] and
1098 (mf2[fn] != "" or fcmp(fn, getnode)))):
1098 (mf2[fn] != "" or fcmp(fn, getnode)))):
1099 modified.append(fn)
1099 modified.append(fn)
1100 elif list_clean:
1100 elif list_clean:
1101 clean.append(fn)
1101 clean.append(fn)
1102 del mf1[fn]
1102 del mf1[fn]
1103 else:
1103 else:
1104 added.append(fn)
1104 added.append(fn)
1105
1105
1106 removed = mf1.keys()
1106 removed = mf1.keys()
1107
1107
1108 # sort and return results:
1108 # sort and return results:
1109 for l in modified, added, removed, deleted, unknown, ignored, clean:
1109 for l in modified, added, removed, deleted, unknown, ignored, clean:
1110 l.sort()
1110 l.sort()
1111 return (modified, added, removed, deleted, unknown, ignored, clean)
1111 return (modified, added, removed, deleted, unknown, ignored, clean)
1112
1112
1113 def add(self, list):
1113 def add(self, list):
1114 wlock = self.wlock()
1114 wlock = self.wlock()
1115 try:
1115 try:
1116 rejected = []
1116 rejected = []
1117 for f in list:
1117 for f in list:
1118 p = self.wjoin(f)
1118 p = self.wjoin(f)
1119 try:
1119 try:
1120 st = os.lstat(p)
1120 st = os.lstat(p)
1121 except:
1121 except:
1122 self.ui.warn(_("%s does not exist!\n") % f)
1122 self.ui.warn(_("%s does not exist!\n") % f)
1123 rejected.append(f)
1123 rejected.append(f)
1124 continue
1124 continue
1125 if st.st_size > 10000000:
1125 if st.st_size > 10000000:
1126 self.ui.warn(_("%s: files over 10MB may cause memory and"
1126 self.ui.warn(_("%s: files over 10MB may cause memory and"
1127 " performance problems\n"
1127 " performance problems\n"
1128 "(use 'hg revert %s' to unadd the file)\n")
1128 "(use 'hg revert %s' to unadd the file)\n")
1129 % (f, f))
1129 % (f, f))
1130 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1130 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1131 self.ui.warn(_("%s not added: only files and symlinks "
1131 self.ui.warn(_("%s not added: only files and symlinks "
1132 "supported currently\n") % f)
1132 "supported currently\n") % f)
1133 rejected.append(p)
1133 rejected.append(p)
1134 elif self.dirstate[f] in 'amn':
1134 elif self.dirstate[f] in 'amn':
1135 self.ui.warn(_("%s already tracked!\n") % f)
1135 self.ui.warn(_("%s already tracked!\n") % f)
1136 elif self.dirstate[f] == 'r':
1136 elif self.dirstate[f] == 'r':
1137 self.dirstate.normallookup(f)
1137 self.dirstate.normallookup(f)
1138 else:
1138 else:
1139 self.dirstate.add(f)
1139 self.dirstate.add(f)
1140 return rejected
1140 return rejected
1141 finally:
1141 finally:
1142 del wlock
1142 del wlock
1143
1143
1144 def forget(self, list):
1144 def forget(self, list):
1145 wlock = self.wlock()
1145 wlock = self.wlock()
1146 try:
1146 try:
1147 for f in list:
1147 for f in list:
1148 if self.dirstate[f] != 'a':
1148 if self.dirstate[f] != 'a':
1149 self.ui.warn(_("%s not added!\n") % f)
1149 self.ui.warn(_("%s not added!\n") % f)
1150 else:
1150 else:
1151 self.dirstate.forget(f)
1151 self.dirstate.forget(f)
1152 finally:
1152 finally:
1153 del wlock
1153 del wlock
1154
1154
1155 def remove(self, list, unlink=False):
1155 def remove(self, list, unlink=False):
1156 wlock = None
1156 wlock = None
1157 try:
1157 try:
1158 if unlink:
1158 if unlink:
1159 for f in list:
1159 for f in list:
1160 try:
1160 try:
1161 util.unlink(self.wjoin(f))
1161 util.unlink(self.wjoin(f))
1162 except OSError, inst:
1162 except OSError, inst:
1163 if inst.errno != errno.ENOENT:
1163 if inst.errno != errno.ENOENT:
1164 raise
1164 raise
1165 wlock = self.wlock()
1165 wlock = self.wlock()
1166 for f in list:
1166 for f in list:
1167 if unlink and os.path.exists(self.wjoin(f)):
1167 if unlink and os.path.exists(self.wjoin(f)):
1168 self.ui.warn(_("%s still exists!\n") % f)
1168 self.ui.warn(_("%s still exists!\n") % f)
1169 elif self.dirstate[f] == 'a':
1169 elif self.dirstate[f] == 'a':
1170 self.dirstate.forget(f)
1170 self.dirstate.forget(f)
1171 elif f not in self.dirstate:
1171 elif f not in self.dirstate:
1172 self.ui.warn(_("%s not tracked!\n") % f)
1172 self.ui.warn(_("%s not tracked!\n") % f)
1173 else:
1173 else:
1174 self.dirstate.remove(f)
1174 self.dirstate.remove(f)
1175 finally:
1175 finally:
1176 del wlock
1176 del wlock
1177
1177
1178 def undelete(self, list):
1178 def undelete(self, list):
1179 wlock = None
1179 wlock = None
1180 try:
1180 try:
1181 manifests = [self.manifest.read(self.changelog.read(p)[0])
1181 manifests = [self.manifest.read(self.changelog.read(p)[0])
1182 for p in self.dirstate.parents() if p != nullid]
1182 for p in self.dirstate.parents() if p != nullid]
1183 wlock = self.wlock()
1183 wlock = self.wlock()
1184 for f in list:
1184 for f in list:
1185 if self.dirstate[f] != 'r':
1185 if self.dirstate[f] != 'r':
1186 self.ui.warn("%s not removed!\n" % f)
1186 self.ui.warn("%s not removed!\n" % f)
1187 else:
1187 else:
1188 m = f in manifests[0] and manifests[0] or manifests[1]
1188 m = f in manifests[0] and manifests[0] or manifests[1]
1189 t = self.file(f).read(m[f])
1189 t = self.file(f).read(m[f])
1190 self.wwrite(f, t, m.flags(f))
1190 self.wwrite(f, t, m.flags(f))
1191 self.dirstate.normal(f)
1191 self.dirstate.normal(f)
1192 finally:
1192 finally:
1193 del wlock
1193 del wlock
1194
1194
1195 def copy(self, source, dest):
1195 def copy(self, source, dest):
1196 wlock = None
1196 wlock = None
1197 try:
1197 try:
1198 p = self.wjoin(dest)
1198 p = self.wjoin(dest)
1199 if not (os.path.exists(p) or os.path.islink(p)):
1199 if not (os.path.exists(p) or os.path.islink(p)):
1200 self.ui.warn(_("%s does not exist!\n") % dest)
1200 self.ui.warn(_("%s does not exist!\n") % dest)
1201 elif not (os.path.isfile(p) or os.path.islink(p)):
1201 elif not (os.path.isfile(p) or os.path.islink(p)):
1202 self.ui.warn(_("copy failed: %s is not a file or a "
1202 self.ui.warn(_("copy failed: %s is not a file or a "
1203 "symbolic link\n") % dest)
1203 "symbolic link\n") % dest)
1204 else:
1204 else:
1205 wlock = self.wlock()
1205 wlock = self.wlock()
1206 if dest not in self.dirstate:
1206 if dest not in self.dirstate:
1207 self.dirstate.add(dest)
1207 self.dirstate.add(dest)
1208 self.dirstate.copy(source, dest)
1208 self.dirstate.copy(source, dest)
1209 finally:
1209 finally:
1210 del wlock
1210 del wlock
1211
1211
1212 def heads(self, start=None):
1212 def heads(self, start=None):
1213 heads = self.changelog.heads(start)
1213 heads = self.changelog.heads(start)
1214 # sort the output in rev descending order
1214 # sort the output in rev descending order
1215 heads = [(-self.changelog.rev(h), h) for h in heads]
1215 heads = [(-self.changelog.rev(h), h) for h in heads]
1216 heads.sort()
1216 heads.sort()
1217 return [n for (r, n) in heads]
1217 return [n for (r, n) in heads]
1218
1218
1219 def branchheads(self, branch, start=None):
1219 def branchheads(self, branch, start=None):
1220 branches = self.branchtags()
1220 branches = self.branchtags()
1221 if branch not in branches:
1221 if branch not in branches:
1222 return []
1222 return []
1223 # The basic algorithm is this:
1223 # The basic algorithm is this:
1224 #
1224 #
1225 # Start from the branch tip since there are no later revisions that can
1225 # Start from the branch tip since there are no later revisions that can
1226 # possibly be in this branch, and the tip is a guaranteed head.
1226 # possibly be in this branch, and the tip is a guaranteed head.
1227 #
1227 #
1228 # Remember the tip's parents as the first ancestors, since these by
1228 # Remember the tip's parents as the first ancestors, since these by
1229 # definition are not heads.
1229 # definition are not heads.
1230 #
1230 #
1231 # Step backwards from the brach tip through all the revisions. We are
1231 # Step backwards from the brach tip through all the revisions. We are
1232 # guaranteed by the rules of Mercurial that we will now be visiting the
1232 # guaranteed by the rules of Mercurial that we will now be visiting the
1233 # nodes in reverse topological order (children before parents).
1233 # nodes in reverse topological order (children before parents).
1234 #
1234 #
1235 # If a revision is one of the ancestors of a head then we can toss it
1235 # If a revision is one of the ancestors of a head then we can toss it
1236 # out of the ancestors set (we've already found it and won't be
1236 # out of the ancestors set (we've already found it and won't be
1237 # visiting it again) and put its parents in the ancestors set.
1237 # visiting it again) and put its parents in the ancestors set.
1238 #
1238 #
1239 # Otherwise, if a revision is in the branch it's another head, since it
1239 # Otherwise, if a revision is in the branch it's another head, since it
1240 # wasn't in the ancestor list of an existing head. So add it to the
1240 # wasn't in the ancestor list of an existing head. So add it to the
1241 # head list, and add its parents to the ancestor list.
1241 # head list, and add its parents to the ancestor list.
1242 #
1242 #
1243 # If it is not in the branch ignore it.
1243 # If it is not in the branch ignore it.
1244 #
1244 #
1245 # Once we have a list of heads, use nodesbetween to filter out all the
1245 # Once we have a list of heads, use nodesbetween to filter out all the
1246 # heads that cannot be reached from startrev. There may be a more
1246 # heads that cannot be reached from startrev. There may be a more
1247 # efficient way to do this as part of the previous algorithm.
1247 # efficient way to do this as part of the previous algorithm.
1248
1248
1249 set = util.set
1249 set = util.set
1250 heads = [self.changelog.rev(branches[branch])]
1250 heads = [self.changelog.rev(branches[branch])]
1251 # Don't care if ancestors contains nullrev or not.
1251 # Don't care if ancestors contains nullrev or not.
1252 ancestors = set(self.changelog.parentrevs(heads[0]))
1252 ancestors = set(self.changelog.parentrevs(heads[0]))
1253 for rev in xrange(heads[0] - 1, nullrev, -1):
1253 for rev in xrange(heads[0] - 1, nullrev, -1):
1254 if rev in ancestors:
1254 if rev in ancestors:
1255 ancestors.update(self.changelog.parentrevs(rev))
1255 ancestors.update(self.changelog.parentrevs(rev))
1256 ancestors.remove(rev)
1256 ancestors.remove(rev)
1257 elif self.changectx(rev).branch() == branch:
1257 elif self.changectx(rev).branch() == branch:
1258 heads.append(rev)
1258 heads.append(rev)
1259 ancestors.update(self.changelog.parentrevs(rev))
1259 ancestors.update(self.changelog.parentrevs(rev))
1260 heads = [self.changelog.node(rev) for rev in heads]
1260 heads = [self.changelog.node(rev) for rev in heads]
1261 if start is not None:
1261 if start is not None:
1262 heads = self.changelog.nodesbetween([start], heads)[2]
1262 heads = self.changelog.nodesbetween([start], heads)[2]
1263 return heads
1263 return heads
1264
1264
1265 def branches(self, nodes):
1265 def branches(self, nodes):
1266 if not nodes:
1266 if not nodes:
1267 nodes = [self.changelog.tip()]
1267 nodes = [self.changelog.tip()]
1268 b = []
1268 b = []
1269 for n in nodes:
1269 for n in nodes:
1270 t = n
1270 t = n
1271 while 1:
1271 while 1:
1272 p = self.changelog.parents(n)
1272 p = self.changelog.parents(n)
1273 if p[1] != nullid or p[0] == nullid:
1273 if p[1] != nullid or p[0] == nullid:
1274 b.append((t, n, p[0], p[1]))
1274 b.append((t, n, p[0], p[1]))
1275 break
1275 break
1276 n = p[0]
1276 n = p[0]
1277 return b
1277 return b
1278
1278
1279 def between(self, pairs):
1279 def between(self, pairs):
1280 r = []
1280 r = []
1281
1281
1282 for top, bottom in pairs:
1282 for top, bottom in pairs:
1283 n, l, i = top, [], 0
1283 n, l, i = top, [], 0
1284 f = 1
1284 f = 1
1285
1285
1286 while n != bottom:
1286 while n != bottom:
1287 p = self.changelog.parents(n)[0]
1287 p = self.changelog.parents(n)[0]
1288 if i == f:
1288 if i == f:
1289 l.append(n)
1289 l.append(n)
1290 f = f * 2
1290 f = f * 2
1291 n = p
1291 n = p
1292 i += 1
1292 i += 1
1293
1293
1294 r.append(l)
1294 r.append(l)
1295
1295
1296 return r
1296 return r
1297
1297
1298 def findincoming(self, remote, base=None, heads=None, force=False):
1298 def findincoming(self, remote, base=None, heads=None, force=False):
1299 """Return list of roots of the subsets of missing nodes from remote
1299 """Return list of roots of the subsets of missing nodes from remote
1300
1300
1301 If base dict is specified, assume that these nodes and their parents
1301 If base dict is specified, assume that these nodes and their parents
1302 exist on the remote side and that no child of a node of base exists
1302 exist on the remote side and that no child of a node of base exists
1303 in both remote and self.
1303 in both remote and self.
1304 Furthermore base will be updated to include the nodes that exists
1304 Furthermore base will be updated to include the nodes that exists
1305 in self and remote but no children exists in self and remote.
1305 in self and remote but no children exists in self and remote.
1306 If a list of heads is specified, return only nodes which are heads
1306 If a list of heads is specified, return only nodes which are heads
1307 or ancestors of these heads.
1307 or ancestors of these heads.
1308
1308
1309 All the ancestors of base are in self and in remote.
1309 All the ancestors of base are in self and in remote.
1310 All the descendants of the list returned are missing in self.
1310 All the descendants of the list returned are missing in self.
1311 (and so we know that the rest of the nodes are missing in remote, see
1311 (and so we know that the rest of the nodes are missing in remote, see
1312 outgoing)
1312 outgoing)
1313 """
1313 """
1314 m = self.changelog.nodemap
1314 m = self.changelog.nodemap
1315 search = []
1315 search = []
1316 fetch = {}
1316 fetch = {}
1317 seen = {}
1317 seen = {}
1318 seenbranch = {}
1318 seenbranch = {}
1319 if base == None:
1319 if base == None:
1320 base = {}
1320 base = {}
1321
1321
1322 if not heads:
1322 if not heads:
1323 heads = remote.heads()
1323 heads = remote.heads()
1324
1324
1325 if self.changelog.tip() == nullid:
1325 if self.changelog.tip() == nullid:
1326 base[nullid] = 1
1326 base[nullid] = 1
1327 if heads != [nullid]:
1327 if heads != [nullid]:
1328 return [nullid]
1328 return [nullid]
1329 return []
1329 return []
1330
1330
1331 # assume we're closer to the tip than the root
1331 # assume we're closer to the tip than the root
1332 # and start by examining the heads
1332 # and start by examining the heads
1333 self.ui.status(_("searching for changes\n"))
1333 self.ui.status(_("searching for changes\n"))
1334
1334
1335 unknown = []
1335 unknown = []
1336 for h in heads:
1336 for h in heads:
1337 if h not in m:
1337 if h not in m:
1338 unknown.append(h)
1338 unknown.append(h)
1339 else:
1339 else:
1340 base[h] = 1
1340 base[h] = 1
1341
1341
1342 if not unknown:
1342 if not unknown:
1343 return []
1343 return []
1344
1344
1345 req = dict.fromkeys(unknown)
1345 req = dict.fromkeys(unknown)
1346 reqcnt = 0
1346 reqcnt = 0
1347
1347
1348 # search through remote branches
1348 # search through remote branches
1349 # a 'branch' here is a linear segment of history, with four parts:
1349 # a 'branch' here is a linear segment of history, with four parts:
1350 # head, root, first parent, second parent
1350 # head, root, first parent, second parent
1351 # (a branch always has two parents (or none) by definition)
1351 # (a branch always has two parents (or none) by definition)
1352 unknown = remote.branches(unknown)
1352 unknown = remote.branches(unknown)
1353 while unknown:
1353 while unknown:
1354 r = []
1354 r = []
1355 while unknown:
1355 while unknown:
1356 n = unknown.pop(0)
1356 n = unknown.pop(0)
1357 if n[0] in seen:
1357 if n[0] in seen:
1358 continue
1358 continue
1359
1359
1360 self.ui.debug(_("examining %s:%s\n")
1360 self.ui.debug(_("examining %s:%s\n")
1361 % (short(n[0]), short(n[1])))
1361 % (short(n[0]), short(n[1])))
1362 if n[0] == nullid: # found the end of the branch
1362 if n[0] == nullid: # found the end of the branch
1363 pass
1363 pass
1364 elif n in seenbranch:
1364 elif n in seenbranch:
1365 self.ui.debug(_("branch already found\n"))
1365 self.ui.debug(_("branch already found\n"))
1366 continue
1366 continue
1367 elif n[1] and n[1] in m: # do we know the base?
1367 elif n[1] and n[1] in m: # do we know the base?
1368 self.ui.debug(_("found incomplete branch %s:%s\n")
1368 self.ui.debug(_("found incomplete branch %s:%s\n")
1369 % (short(n[0]), short(n[1])))
1369 % (short(n[0]), short(n[1])))
1370 search.append(n) # schedule branch range for scanning
1370 search.append(n) # schedule branch range for scanning
1371 seenbranch[n] = 1
1371 seenbranch[n] = 1
1372 else:
1372 else:
1373 if n[1] not in seen and n[1] not in fetch:
1373 if n[1] not in seen and n[1] not in fetch:
1374 if n[2] in m and n[3] in m:
1374 if n[2] in m and n[3] in m:
1375 self.ui.debug(_("found new changeset %s\n") %
1375 self.ui.debug(_("found new changeset %s\n") %
1376 short(n[1]))
1376 short(n[1]))
1377 fetch[n[1]] = 1 # earliest unknown
1377 fetch[n[1]] = 1 # earliest unknown
1378 for p in n[2:4]:
1378 for p in n[2:4]:
1379 if p in m:
1379 if p in m:
1380 base[p] = 1 # latest known
1380 base[p] = 1 # latest known
1381
1381
1382 for p in n[2:4]:
1382 for p in n[2:4]:
1383 if p not in req and p not in m:
1383 if p not in req and p not in m:
1384 r.append(p)
1384 r.append(p)
1385 req[p] = 1
1385 req[p] = 1
1386 seen[n[0]] = 1
1386 seen[n[0]] = 1
1387
1387
1388 if r:
1388 if r:
1389 reqcnt += 1
1389 reqcnt += 1
1390 self.ui.debug(_("request %d: %s\n") %
1390 self.ui.debug(_("request %d: %s\n") %
1391 (reqcnt, " ".join(map(short, r))))
1391 (reqcnt, " ".join(map(short, r))))
1392 for p in xrange(0, len(r), 10):
1392 for p in xrange(0, len(r), 10):
1393 for b in remote.branches(r[p:p+10]):
1393 for b in remote.branches(r[p:p+10]):
1394 self.ui.debug(_("received %s:%s\n") %
1394 self.ui.debug(_("received %s:%s\n") %
1395 (short(b[0]), short(b[1])))
1395 (short(b[0]), short(b[1])))
1396 unknown.append(b)
1396 unknown.append(b)
1397
1397
1398 # do binary search on the branches we found
1398 # do binary search on the branches we found
1399 while search:
1399 while search:
1400 n = search.pop(0)
1400 n = search.pop(0)
1401 reqcnt += 1
1401 reqcnt += 1
1402 l = remote.between([(n[0], n[1])])[0]
1402 l = remote.between([(n[0], n[1])])[0]
1403 l.append(n[1])
1403 l.append(n[1])
1404 p = n[0]
1404 p = n[0]
1405 f = 1
1405 f = 1
1406 for i in l:
1406 for i in l:
1407 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1407 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1408 if i in m:
1408 if i in m:
1409 if f <= 2:
1409 if f <= 2:
1410 self.ui.debug(_("found new branch changeset %s\n") %
1410 self.ui.debug(_("found new branch changeset %s\n") %
1411 short(p))
1411 short(p))
1412 fetch[p] = 1
1412 fetch[p] = 1
1413 base[i] = 1
1413 base[i] = 1
1414 else:
1414 else:
1415 self.ui.debug(_("narrowed branch search to %s:%s\n")
1415 self.ui.debug(_("narrowed branch search to %s:%s\n")
1416 % (short(p), short(i)))
1416 % (short(p), short(i)))
1417 search.append((p, i))
1417 search.append((p, i))
1418 break
1418 break
1419 p, f = i, f * 2
1419 p, f = i, f * 2
1420
1420
1421 # sanity check our fetch list
1421 # sanity check our fetch list
1422 for f in fetch.keys():
1422 for f in fetch.keys():
1423 if f in m:
1423 if f in m:
1424 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1424 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1425
1425
1426 if base.keys() == [nullid]:
1426 if base.keys() == [nullid]:
1427 if force:
1427 if force:
1428 self.ui.warn(_("warning: repository is unrelated\n"))
1428 self.ui.warn(_("warning: repository is unrelated\n"))
1429 else:
1429 else:
1430 raise util.Abort(_("repository is unrelated"))
1430 raise util.Abort(_("repository is unrelated"))
1431
1431
1432 self.ui.debug(_("found new changesets starting at ") +
1432 self.ui.debug(_("found new changesets starting at ") +
1433 " ".join([short(f) for f in fetch]) + "\n")
1433 " ".join([short(f) for f in fetch]) + "\n")
1434
1434
1435 self.ui.debug(_("%d total queries\n") % reqcnt)
1435 self.ui.debug(_("%d total queries\n") % reqcnt)
1436
1436
1437 return fetch.keys()
1437 return fetch.keys()
1438
1438
1439 def findoutgoing(self, remote, base=None, heads=None, force=False):
1439 def findoutgoing(self, remote, base=None, heads=None, force=False):
1440 """Return list of nodes that are roots of subsets not in remote
1440 """Return list of nodes that are roots of subsets not in remote
1441
1441
1442 If base dict is specified, assume that these nodes and their parents
1442 If base dict is specified, assume that these nodes and their parents
1443 exist on the remote side.
1443 exist on the remote side.
1444 If a list of heads is specified, return only nodes which are heads
1444 If a list of heads is specified, return only nodes which are heads
1445 or ancestors of these heads, and return a second element which
1445 or ancestors of these heads, and return a second element which
1446 contains all remote heads which get new children.
1446 contains all remote heads which get new children.
1447 """
1447 """
1448 if base == None:
1448 if base == None:
1449 base = {}
1449 base = {}
1450 self.findincoming(remote, base, heads, force=force)
1450 self.findincoming(remote, base, heads, force=force)
1451
1451
1452 self.ui.debug(_("common changesets up to ")
1452 self.ui.debug(_("common changesets up to ")
1453 + " ".join(map(short, base.keys())) + "\n")
1453 + " ".join(map(short, base.keys())) + "\n")
1454
1454
1455 remain = dict.fromkeys(self.changelog.nodemap)
1455 remain = dict.fromkeys(self.changelog.nodemap)
1456
1456
1457 # prune everything remote has from the tree
1457 # prune everything remote has from the tree
1458 del remain[nullid]
1458 del remain[nullid]
1459 remove = base.keys()
1459 remove = base.keys()
1460 while remove:
1460 while remove:
1461 n = remove.pop(0)
1461 n = remove.pop(0)
1462 if n in remain:
1462 if n in remain:
1463 del remain[n]
1463 del remain[n]
1464 for p in self.changelog.parents(n):
1464 for p in self.changelog.parents(n):
1465 remove.append(p)
1465 remove.append(p)
1466
1466
1467 # find every node whose parents have been pruned
1467 # find every node whose parents have been pruned
1468 subset = []
1468 subset = []
1469 # find every remote head that will get new children
1469 # find every remote head that will get new children
1470 updated_heads = {}
1470 updated_heads = {}
1471 for n in remain:
1471 for n in remain:
1472 p1, p2 = self.changelog.parents(n)
1472 p1, p2 = self.changelog.parents(n)
1473 if p1 not in remain and p2 not in remain:
1473 if p1 not in remain and p2 not in remain:
1474 subset.append(n)
1474 subset.append(n)
1475 if heads:
1475 if heads:
1476 if p1 in heads:
1476 if p1 in heads:
1477 updated_heads[p1] = True
1477 updated_heads[p1] = True
1478 if p2 in heads:
1478 if p2 in heads:
1479 updated_heads[p2] = True
1479 updated_heads[p2] = True
1480
1480
1481 # this is the set of all roots we have to push
1481 # this is the set of all roots we have to push
1482 if heads:
1482 if heads:
1483 return subset, updated_heads.keys()
1483 return subset, updated_heads.keys()
1484 else:
1484 else:
1485 return subset
1485 return subset
1486
1486
1487 def pull(self, remote, heads=None, force=False):
1487 def pull(self, remote, heads=None, force=False):
1488 lock = self.lock()
1488 lock = self.lock()
1489 try:
1489 try:
1490 fetch = self.findincoming(remote, heads=heads, force=force)
1490 fetch = self.findincoming(remote, heads=heads, force=force)
1491 if fetch == [nullid]:
1491 if fetch == [nullid]:
1492 self.ui.status(_("requesting all changes\n"))
1492 self.ui.status(_("requesting all changes\n"))
1493
1493
1494 if not fetch:
1494 if not fetch:
1495 self.ui.status(_("no changes found\n"))
1495 self.ui.status(_("no changes found\n"))
1496 return 0
1496 return 0
1497
1497
1498 if heads is None:
1498 if heads is None:
1499 cg = remote.changegroup(fetch, 'pull')
1499 cg = remote.changegroup(fetch, 'pull')
1500 else:
1500 else:
1501 if 'changegroupsubset' not in remote.capabilities:
1501 if 'changegroupsubset' not in remote.capabilities:
1502 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1502 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1503 cg = remote.changegroupsubset(fetch, heads, 'pull')
1503 cg = remote.changegroupsubset(fetch, heads, 'pull')
1504 return self.addchangegroup(cg, 'pull', remote.url())
1504 return self.addchangegroup(cg, 'pull', remote.url())
1505 finally:
1505 finally:
1506 del lock
1506 del lock
1507
1507
1508 def push(self, remote, force=False, revs=None):
1508 def push(self, remote, force=False, revs=None):
1509 # there are two ways to push to remote repo:
1509 # there are two ways to push to remote repo:
1510 #
1510 #
1511 # addchangegroup assumes local user can lock remote
1511 # addchangegroup assumes local user can lock remote
1512 # repo (local filesystem, old ssh servers).
1512 # repo (local filesystem, old ssh servers).
1513 #
1513 #
1514 # unbundle assumes local user cannot lock remote repo (new ssh
1514 # unbundle assumes local user cannot lock remote repo (new ssh
1515 # servers, http servers).
1515 # servers, http servers).
1516
1516
1517 if remote.capable('unbundle'):
1517 if remote.capable('unbundle'):
1518 return self.push_unbundle(remote, force, revs)
1518 return self.push_unbundle(remote, force, revs)
1519 return self.push_addchangegroup(remote, force, revs)
1519 return self.push_addchangegroup(remote, force, revs)
1520
1520
1521 def prepush(self, remote, force, revs):
1521 def prepush(self, remote, force, revs):
1522 base = {}
1522 base = {}
1523 remote_heads = remote.heads()
1523 remote_heads = remote.heads()
1524 inc = self.findincoming(remote, base, remote_heads, force=force)
1524 inc = self.findincoming(remote, base, remote_heads, force=force)
1525
1525
1526 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1526 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1527 if revs is not None:
1527 if revs is not None:
1528 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1528 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1529 else:
1529 else:
1530 bases, heads = update, self.changelog.heads()
1530 bases, heads = update, self.changelog.heads()
1531
1531
1532 if not bases:
1532 if not bases:
1533 self.ui.status(_("no changes found\n"))
1533 self.ui.status(_("no changes found\n"))
1534 return None, 1
1534 return None, 1
1535 elif not force:
1535 elif not force:
1536 # check if we're creating new remote heads
1536 # check if we're creating new remote heads
1537 # to be a remote head after push, node must be either
1537 # to be a remote head after push, node must be either
1538 # - unknown locally
1538 # - unknown locally
1539 # - a local outgoing head descended from update
1539 # - a local outgoing head descended from update
1540 # - a remote head that's known locally and not
1540 # - a remote head that's known locally and not
1541 # ancestral to an outgoing head
1541 # ancestral to an outgoing head
1542
1542
1543 warn = 0
1543 warn = 0
1544
1544
1545 if remote_heads == [nullid]:
1545 if remote_heads == [nullid]:
1546 warn = 0
1546 warn = 0
1547 elif not revs and len(heads) > len(remote_heads):
1547 elif not revs and len(heads) > len(remote_heads):
1548 warn = 1
1548 warn = 1
1549 else:
1549 else:
1550 newheads = list(heads)
1550 newheads = list(heads)
1551 for r in remote_heads:
1551 for r in remote_heads:
1552 if r in self.changelog.nodemap:
1552 if r in self.changelog.nodemap:
1553 desc = self.changelog.heads(r, heads)
1553 desc = self.changelog.heads(r, heads)
1554 l = [h for h in heads if h in desc]
1554 l = [h for h in heads if h in desc]
1555 if not l:
1555 if not l:
1556 newheads.append(r)
1556 newheads.append(r)
1557 else:
1557 else:
1558 newheads.append(r)
1558 newheads.append(r)
1559 if len(newheads) > len(remote_heads):
1559 if len(newheads) > len(remote_heads):
1560 warn = 1
1560 warn = 1
1561
1561
1562 if warn:
1562 if warn:
1563 self.ui.warn(_("abort: push creates new remote heads!\n"))
1563 self.ui.warn(_("abort: push creates new remote heads!\n"))
1564 self.ui.status(_("(did you forget to merge?"
1564 self.ui.status(_("(did you forget to merge?"
1565 " use push -f to force)\n"))
1565 " use push -f to force)\n"))
1566 return None, 0
1566 return None, 0
1567 elif inc:
1567 elif inc:
1568 self.ui.warn(_("note: unsynced remote changes!\n"))
1568 self.ui.warn(_("note: unsynced remote changes!\n"))
1569
1569
1570
1570
1571 if revs is None:
1571 if revs is None:
1572 cg = self.changegroup(update, 'push')
1572 cg = self.changegroup(update, 'push')
1573 else:
1573 else:
1574 cg = self.changegroupsubset(update, revs, 'push')
1574 cg = self.changegroupsubset(update, revs, 'push')
1575 return cg, remote_heads
1575 return cg, remote_heads
1576
1576
1577 def push_addchangegroup(self, remote, force, revs):
1577 def push_addchangegroup(self, remote, force, revs):
1578 lock = remote.lock()
1578 lock = remote.lock()
1579 try:
1579 try:
1580 ret = self.prepush(remote, force, revs)
1580 ret = self.prepush(remote, force, revs)
1581 if ret[0] is not None:
1581 if ret[0] is not None:
1582 cg, remote_heads = ret
1582 cg, remote_heads = ret
1583 return remote.addchangegroup(cg, 'push', self.url())
1583 return remote.addchangegroup(cg, 'push', self.url())
1584 return ret[1]
1584 return ret[1]
1585 finally:
1585 finally:
1586 del lock
1586 del lock
1587
1587
1588 def push_unbundle(self, remote, force, revs):
1588 def push_unbundle(self, remote, force, revs):
1589 # local repo finds heads on server, finds out what revs it
1589 # local repo finds heads on server, finds out what revs it
1590 # must push. once revs transferred, if server finds it has
1590 # must push. once revs transferred, if server finds it has
1591 # different heads (someone else won commit/push race), server
1591 # different heads (someone else won commit/push race), server
1592 # aborts.
1592 # aborts.
1593
1593
1594 ret = self.prepush(remote, force, revs)
1594 ret = self.prepush(remote, force, revs)
1595 if ret[0] is not None:
1595 if ret[0] is not None:
1596 cg, remote_heads = ret
1596 cg, remote_heads = ret
1597 if force: remote_heads = ['force']
1597 if force: remote_heads = ['force']
1598 return remote.unbundle(cg, remote_heads, 'push')
1598 return remote.unbundle(cg, remote_heads, 'push')
1599 return ret[1]
1599 return ret[1]
1600
1600
1601 def changegroupinfo(self, nodes, source):
1601 def changegroupinfo(self, nodes, source):
1602 if self.ui.verbose or source == 'bundle':
1602 if self.ui.verbose or source == 'bundle':
1603 self.ui.status(_("%d changesets found\n") % len(nodes))
1603 self.ui.status(_("%d changesets found\n") % len(nodes))
1604 if self.ui.debugflag:
1604 if self.ui.debugflag:
1605 self.ui.debug(_("List of changesets:\n"))
1605 self.ui.debug(_("List of changesets:\n"))
1606 for node in nodes:
1606 for node in nodes:
1607 self.ui.debug("%s\n" % hex(node))
1607 self.ui.debug("%s\n" % hex(node))
1608
1608
1609 def changegroupsubset(self, bases, heads, source, extranodes=None):
1609 def changegroupsubset(self, bases, heads, source, extranodes=None):
1610 """This function generates a changegroup consisting of all the nodes
1610 """This function generates a changegroup consisting of all the nodes
1611 that are descendents of any of the bases, and ancestors of any of
1611 that are descendents of any of the bases, and ancestors of any of
1612 the heads.
1612 the heads.
1613
1613
1614 It is fairly complex as determining which filenodes and which
1614 It is fairly complex as determining which filenodes and which
1615 manifest nodes need to be included for the changeset to be complete
1615 manifest nodes need to be included for the changeset to be complete
1616 is non-trivial.
1616 is non-trivial.
1617
1617
1618 Another wrinkle is doing the reverse, figuring out which changeset in
1618 Another wrinkle is doing the reverse, figuring out which changeset in
1619 the changegroup a particular filenode or manifestnode belongs to.
1619 the changegroup a particular filenode or manifestnode belongs to.
1620
1620
1621 The caller can specify some nodes that must be included in the
1621 The caller can specify some nodes that must be included in the
1622 changegroup using the extranodes argument. It should be a dict
1622 changegroup using the extranodes argument. It should be a dict
1623 where the keys are the filenames (or 1 for the manifest), and the
1623 where the keys are the filenames (or 1 for the manifest), and the
1624 values are lists of (node, linknode) tuples, where node is a wanted
1624 values are lists of (node, linknode) tuples, where node is a wanted
1625 node and linknode is the changelog node that should be transmitted as
1625 node and linknode is the changelog node that should be transmitted as
1626 the linkrev.
1626 the linkrev.
1627 """
1627 """
1628
1628
1629 self.hook('preoutgoing', throw=True, source=source)
1629 self.hook('preoutgoing', throw=True, source=source)
1630
1630
1631 # Set up some initial variables
1631 # Set up some initial variables
1632 # Make it easy to refer to self.changelog
1632 # Make it easy to refer to self.changelog
1633 cl = self.changelog
1633 cl = self.changelog
1634 # msng is short for missing - compute the list of changesets in this
1634 # msng is short for missing - compute the list of changesets in this
1635 # changegroup.
1635 # changegroup.
1636 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1636 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1637 self.changegroupinfo(msng_cl_lst, source)
1637 self.changegroupinfo(msng_cl_lst, source)
1638 # Some bases may turn out to be superfluous, and some heads may be
1638 # Some bases may turn out to be superfluous, and some heads may be
1639 # too. nodesbetween will return the minimal set of bases and heads
1639 # too. nodesbetween will return the minimal set of bases and heads
1640 # necessary to re-create the changegroup.
1640 # necessary to re-create the changegroup.
1641
1641
1642 # Known heads are the list of heads that it is assumed the recipient
1642 # Known heads are the list of heads that it is assumed the recipient
1643 # of this changegroup will know about.
1643 # of this changegroup will know about.
1644 knownheads = {}
1644 knownheads = {}
1645 # We assume that all parents of bases are known heads.
1645 # We assume that all parents of bases are known heads.
1646 for n in bases:
1646 for n in bases:
1647 for p in cl.parents(n):
1647 for p in cl.parents(n):
1648 if p != nullid:
1648 if p != nullid:
1649 knownheads[p] = 1
1649 knownheads[p] = 1
1650 knownheads = knownheads.keys()
1650 knownheads = knownheads.keys()
1651 if knownheads:
1651 if knownheads:
1652 # Now that we know what heads are known, we can compute which
1652 # Now that we know what heads are known, we can compute which
1653 # changesets are known. The recipient must know about all
1653 # changesets are known. The recipient must know about all
1654 # changesets required to reach the known heads from the null
1654 # changesets required to reach the known heads from the null
1655 # changeset.
1655 # changeset.
1656 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1656 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1657 junk = None
1657 junk = None
1658 # Transform the list into an ersatz set.
1658 # Transform the list into an ersatz set.
1659 has_cl_set = dict.fromkeys(has_cl_set)
1659 has_cl_set = dict.fromkeys(has_cl_set)
1660 else:
1660 else:
1661 # If there were no known heads, the recipient cannot be assumed to
1661 # If there were no known heads, the recipient cannot be assumed to
1662 # know about any changesets.
1662 # know about any changesets.
1663 has_cl_set = {}
1663 has_cl_set = {}
1664
1664
1665 # Make it easy to refer to self.manifest
1665 # Make it easy to refer to self.manifest
1666 mnfst = self.manifest
1666 mnfst = self.manifest
1667 # We don't know which manifests are missing yet
1667 # We don't know which manifests are missing yet
1668 msng_mnfst_set = {}
1668 msng_mnfst_set = {}
1669 # Nor do we know which filenodes are missing.
1669 # Nor do we know which filenodes are missing.
1670 msng_filenode_set = {}
1670 msng_filenode_set = {}
1671
1671
1672 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1672 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1673 junk = None
1673 junk = None
1674
1674
1675 # A changeset always belongs to itself, so the changenode lookup
1675 # A changeset always belongs to itself, so the changenode lookup
1676 # function for a changenode is identity.
1676 # function for a changenode is identity.
1677 def identity(x):
1677 def identity(x):
1678 return x
1678 return x
1679
1679
1680 # A function generating function. Sets up an environment for the
1680 # A function generating function. Sets up an environment for the
1681 # inner function.
1681 # inner function.
1682 def cmp_by_rev_func(revlog):
1682 def cmp_by_rev_func(revlog):
1683 # Compare two nodes by their revision number in the environment's
1683 # Compare two nodes by their revision number in the environment's
1684 # revision history. Since the revision number both represents the
1684 # revision history. Since the revision number both represents the
1685 # most efficient order to read the nodes in, and represents a
1685 # most efficient order to read the nodes in, and represents a
1686 # topological sorting of the nodes, this function is often useful.
1686 # topological sorting of the nodes, this function is often useful.
1687 def cmp_by_rev(a, b):
1687 def cmp_by_rev(a, b):
1688 return cmp(revlog.rev(a), revlog.rev(b))
1688 return cmp(revlog.rev(a), revlog.rev(b))
1689 return cmp_by_rev
1689 return cmp_by_rev
1690
1690
1691 # If we determine that a particular file or manifest node must be a
1691 # If we determine that a particular file or manifest node must be a
1692 # node that the recipient of the changegroup will already have, we can
1692 # node that the recipient of the changegroup will already have, we can
1693 # also assume the recipient will have all the parents. This function
1693 # also assume the recipient will have all the parents. This function
1694 # prunes them from the set of missing nodes.
1694 # prunes them from the set of missing nodes.
1695 def prune_parents(revlog, hasset, msngset):
1695 def prune_parents(revlog, hasset, msngset):
1696 haslst = hasset.keys()
1696 haslst = hasset.keys()
1697 haslst.sort(cmp_by_rev_func(revlog))
1697 haslst.sort(cmp_by_rev_func(revlog))
1698 for node in haslst:
1698 for node in haslst:
1699 parentlst = [p for p in revlog.parents(node) if p != nullid]
1699 parentlst = [p for p in revlog.parents(node) if p != nullid]
1700 while parentlst:
1700 while parentlst:
1701 n = parentlst.pop()
1701 n = parentlst.pop()
1702 if n not in hasset:
1702 if n not in hasset:
1703 hasset[n] = 1
1703 hasset[n] = 1
1704 p = [p for p in revlog.parents(n) if p != nullid]
1704 p = [p for p in revlog.parents(n) if p != nullid]
1705 parentlst.extend(p)
1705 parentlst.extend(p)
1706 for n in hasset:
1706 for n in hasset:
1707 msngset.pop(n, None)
1707 msngset.pop(n, None)
1708
1708
1709 # This is a function generating function used to set up an environment
1709 # This is a function generating function used to set up an environment
1710 # for the inner function to execute in.
1710 # for the inner function to execute in.
1711 def manifest_and_file_collector(changedfileset):
1711 def manifest_and_file_collector(changedfileset):
1712 # This is an information gathering function that gathers
1712 # This is an information gathering function that gathers
1713 # information from each changeset node that goes out as part of
1713 # information from each changeset node that goes out as part of
1714 # the changegroup. The information gathered is a list of which
1714 # the changegroup. The information gathered is a list of which
1715 # manifest nodes are potentially required (the recipient may
1715 # manifest nodes are potentially required (the recipient may
1716 # already have them) and total list of all files which were
1716 # already have them) and total list of all files which were
1717 # changed in any changeset in the changegroup.
1717 # changed in any changeset in the changegroup.
1718 #
1718 #
1719 # We also remember the first changenode we saw any manifest
1719 # We also remember the first changenode we saw any manifest
1720 # referenced by so we can later determine which changenode 'owns'
1720 # referenced by so we can later determine which changenode 'owns'
1721 # the manifest.
1721 # the manifest.
1722 def collect_manifests_and_files(clnode):
1722 def collect_manifests_and_files(clnode):
1723 c = cl.read(clnode)
1723 c = cl.read(clnode)
1724 for f in c[3]:
1724 for f in c[3]:
1725 # This is to make sure we only have one instance of each
1725 # This is to make sure we only have one instance of each
1726 # filename string for each filename.
1726 # filename string for each filename.
1727 changedfileset.setdefault(f, f)
1727 changedfileset.setdefault(f, f)
1728 msng_mnfst_set.setdefault(c[0], clnode)
1728 msng_mnfst_set.setdefault(c[0], clnode)
1729 return collect_manifests_and_files
1729 return collect_manifests_and_files
1730
1730
1731 # Figure out which manifest nodes (of the ones we think might be part
1731 # Figure out which manifest nodes (of the ones we think might be part
1732 # of the changegroup) the recipient must know about and remove them
1732 # of the changegroup) the recipient must know about and remove them
1733 # from the changegroup.
1733 # from the changegroup.
1734 def prune_manifests():
1734 def prune_manifests():
1735 has_mnfst_set = {}
1735 has_mnfst_set = {}
1736 for n in msng_mnfst_set:
1736 for n in msng_mnfst_set:
1737 # If a 'missing' manifest thinks it belongs to a changenode
1737 # If a 'missing' manifest thinks it belongs to a changenode
1738 # the recipient is assumed to have, obviously the recipient
1738 # the recipient is assumed to have, obviously the recipient
1739 # must have that manifest.
1739 # must have that manifest.
1740 linknode = cl.node(mnfst.linkrev(n))
1740 linknode = cl.node(mnfst.linkrev(n))
1741 if linknode in has_cl_set:
1741 if linknode in has_cl_set:
1742 has_mnfst_set[n] = 1
1742 has_mnfst_set[n] = 1
1743 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1743 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1744
1744
1745 # Use the information collected in collect_manifests_and_files to say
1745 # Use the information collected in collect_manifests_and_files to say
1746 # which changenode any manifestnode belongs to.
1746 # which changenode any manifestnode belongs to.
1747 def lookup_manifest_link(mnfstnode):
1747 def lookup_manifest_link(mnfstnode):
1748 return msng_mnfst_set[mnfstnode]
1748 return msng_mnfst_set[mnfstnode]
1749
1749
1750 # A function generating function that sets up the initial environment
1750 # A function generating function that sets up the initial environment
1751 # the inner function.
1751 # the inner function.
1752 def filenode_collector(changedfiles):
1752 def filenode_collector(changedfiles):
1753 next_rev = [0]
1753 next_rev = [0]
1754 # This gathers information from each manifestnode included in the
1754 # This gathers information from each manifestnode included in the
1755 # changegroup about which filenodes the manifest node references
1755 # changegroup about which filenodes the manifest node references
1756 # so we can include those in the changegroup too.
1756 # so we can include those in the changegroup too.
1757 #
1757 #
1758 # It also remembers which changenode each filenode belongs to. It
1758 # It also remembers which changenode each filenode belongs to. It
1759 # does this by assuming the a filenode belongs to the changenode
1759 # does this by assuming the a filenode belongs to the changenode
1760 # the first manifest that references it belongs to.
1760 # the first manifest that references it belongs to.
1761 def collect_msng_filenodes(mnfstnode):
1761 def collect_msng_filenodes(mnfstnode):
1762 r = mnfst.rev(mnfstnode)
1762 r = mnfst.rev(mnfstnode)
1763 if r == next_rev[0]:
1763 if r == next_rev[0]:
1764 # If the last rev we looked at was the one just previous,
1764 # If the last rev we looked at was the one just previous,
1765 # we only need to see a diff.
1765 # we only need to see a diff.
1766 deltamf = mnfst.readdelta(mnfstnode)
1766 deltamf = mnfst.readdelta(mnfstnode)
1767 # For each line in the delta
1767 # For each line in the delta
1768 for f, fnode in deltamf.items():
1768 for f, fnode in deltamf.items():
1769 f = changedfiles.get(f, None)
1769 f = changedfiles.get(f, None)
1770 # And if the file is in the list of files we care
1770 # And if the file is in the list of files we care
1771 # about.
1771 # about.
1772 if f is not None:
1772 if f is not None:
1773 # Get the changenode this manifest belongs to
1773 # Get the changenode this manifest belongs to
1774 clnode = msng_mnfst_set[mnfstnode]
1774 clnode = msng_mnfst_set[mnfstnode]
1775 # Create the set of filenodes for the file if
1775 # Create the set of filenodes for the file if
1776 # there isn't one already.
1776 # there isn't one already.
1777 ndset = msng_filenode_set.setdefault(f, {})
1777 ndset = msng_filenode_set.setdefault(f, {})
1778 # And set the filenode's changelog node to the
1778 # And set the filenode's changelog node to the
1779 # manifest's if it hasn't been set already.
1779 # manifest's if it hasn't been set already.
1780 ndset.setdefault(fnode, clnode)
1780 ndset.setdefault(fnode, clnode)
1781 else:
1781 else:
1782 # Otherwise we need a full manifest.
1782 # Otherwise we need a full manifest.
1783 m = mnfst.read(mnfstnode)
1783 m = mnfst.read(mnfstnode)
1784 # For every file in we care about.
1784 # For every file in we care about.
1785 for f in changedfiles:
1785 for f in changedfiles:
1786 fnode = m.get(f, None)
1786 fnode = m.get(f, None)
1787 # If it's in the manifest
1787 # If it's in the manifest
1788 if fnode is not None:
1788 if fnode is not None:
1789 # See comments above.
1789 # See comments above.
1790 clnode = msng_mnfst_set[mnfstnode]
1790 clnode = msng_mnfst_set[mnfstnode]
1791 ndset = msng_filenode_set.setdefault(f, {})
1791 ndset = msng_filenode_set.setdefault(f, {})
1792 ndset.setdefault(fnode, clnode)
1792 ndset.setdefault(fnode, clnode)
1793 # Remember the revision we hope to see next.
1793 # Remember the revision we hope to see next.
1794 next_rev[0] = r + 1
1794 next_rev[0] = r + 1
1795 return collect_msng_filenodes
1795 return collect_msng_filenodes
1796
1796
1797 # We have a list of filenodes we think we need for a file, lets remove
1797 # We have a list of filenodes we think we need for a file, lets remove
1798 # all those we now the recipient must have.
1798 # all those we now the recipient must have.
1799 def prune_filenodes(f, filerevlog):
1799 def prune_filenodes(f, filerevlog):
1800 msngset = msng_filenode_set[f]
1800 msngset = msng_filenode_set[f]
1801 hasset = {}
1801 hasset = {}
1802 # If a 'missing' filenode thinks it belongs to a changenode we
1802 # If a 'missing' filenode thinks it belongs to a changenode we
1803 # assume the recipient must have, then the recipient must have
1803 # assume the recipient must have, then the recipient must have
1804 # that filenode.
1804 # that filenode.
1805 for n in msngset:
1805 for n in msngset:
1806 clnode = cl.node(filerevlog.linkrev(n))
1806 clnode = cl.node(filerevlog.linkrev(n))
1807 if clnode in has_cl_set:
1807 if clnode in has_cl_set:
1808 hasset[n] = 1
1808 hasset[n] = 1
1809 prune_parents(filerevlog, hasset, msngset)
1809 prune_parents(filerevlog, hasset, msngset)
1810
1810
1811 # A function generator function that sets up the a context for the
1811 # A function generator function that sets up the a context for the
1812 # inner function.
1812 # inner function.
1813 def lookup_filenode_link_func(fname):
1813 def lookup_filenode_link_func(fname):
1814 msngset = msng_filenode_set[fname]
1814 msngset = msng_filenode_set[fname]
1815 # Lookup the changenode the filenode belongs to.
1815 # Lookup the changenode the filenode belongs to.
1816 def lookup_filenode_link(fnode):
1816 def lookup_filenode_link(fnode):
1817 return msngset[fnode]
1817 return msngset[fnode]
1818 return lookup_filenode_link
1818 return lookup_filenode_link
1819
1819
1820 # Add the nodes that were explicitly requested.
1820 # Add the nodes that were explicitly requested.
1821 def add_extra_nodes(name, nodes):
1821 def add_extra_nodes(name, nodes):
1822 if not extranodes or name not in extranodes:
1822 if not extranodes or name not in extranodes:
1823 return
1823 return
1824
1824
1825 for node, linknode in extranodes[name]:
1825 for node, linknode in extranodes[name]:
1826 if node not in nodes:
1826 if node not in nodes:
1827 nodes[node] = linknode
1827 nodes[node] = linknode
1828
1828
1829 # Now that we have all theses utility functions to help out and
1829 # Now that we have all theses utility functions to help out and
1830 # logically divide up the task, generate the group.
1830 # logically divide up the task, generate the group.
1831 def gengroup():
1831 def gengroup():
1832 # The set of changed files starts empty.
1832 # The set of changed files starts empty.
1833 changedfiles = {}
1833 changedfiles = {}
1834 # Create a changenode group generator that will call our functions
1834 # Create a changenode group generator that will call our functions
1835 # back to lookup the owning changenode and collect information.
1835 # back to lookup the owning changenode and collect information.
1836 group = cl.group(msng_cl_lst, identity,
1836 group = cl.group(msng_cl_lst, identity,
1837 manifest_and_file_collector(changedfiles))
1837 manifest_and_file_collector(changedfiles))
1838 for chnk in group:
1838 for chnk in group:
1839 yield chnk
1839 yield chnk
1840
1840
1841 # The list of manifests has been collected by the generator
1841 # The list of manifests has been collected by the generator
1842 # calling our functions back.
1842 # calling our functions back.
1843 prune_manifests()
1843 prune_manifests()
1844 add_extra_nodes(1, msng_mnfst_set)
1844 add_extra_nodes(1, msng_mnfst_set)
1845 msng_mnfst_lst = msng_mnfst_set.keys()
1845 msng_mnfst_lst = msng_mnfst_set.keys()
1846 # Sort the manifestnodes by revision number.
1846 # Sort the manifestnodes by revision number.
1847 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1847 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1848 # Create a generator for the manifestnodes that calls our lookup
1848 # Create a generator for the manifestnodes that calls our lookup
1849 # and data collection functions back.
1849 # and data collection functions back.
1850 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1850 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1851 filenode_collector(changedfiles))
1851 filenode_collector(changedfiles))
1852 for chnk in group:
1852 for chnk in group:
1853 yield chnk
1853 yield chnk
1854
1854
1855 # These are no longer needed, dereference and toss the memory for
1855 # These are no longer needed, dereference and toss the memory for
1856 # them.
1856 # them.
1857 msng_mnfst_lst = None
1857 msng_mnfst_lst = None
1858 msng_mnfst_set.clear()
1858 msng_mnfst_set.clear()
1859
1859
1860 if extranodes:
1860 if extranodes:
1861 for fname in extranodes:
1861 for fname in extranodes:
1862 if isinstance(fname, int):
1862 if isinstance(fname, int):
1863 continue
1863 continue
1864 add_extra_nodes(fname,
1864 add_extra_nodes(fname,
1865 msng_filenode_set.setdefault(fname, {}))
1865 msng_filenode_set.setdefault(fname, {}))
1866 changedfiles[fname] = 1
1866 changedfiles[fname] = 1
1867 changedfiles = changedfiles.keys()
1867 changedfiles = changedfiles.keys()
1868 changedfiles.sort()
1868 changedfiles.sort()
1869 # Go through all our files in order sorted by name.
1869 # Go through all our files in order sorted by name.
1870 for fname in changedfiles:
1870 for fname in changedfiles:
1871 filerevlog = self.file(fname)
1871 filerevlog = self.file(fname)
1872 if filerevlog.count() == 0:
1872 if filerevlog.count() == 0:
1873 raise util.Abort(_("empty or missing revlog for %s") % fname)
1873 raise util.Abort(_("empty or missing revlog for %s") % fname)
1874 # Toss out the filenodes that the recipient isn't really
1874 # Toss out the filenodes that the recipient isn't really
1875 # missing.
1875 # missing.
1876 if fname in msng_filenode_set:
1876 if fname in msng_filenode_set:
1877 prune_filenodes(fname, filerevlog)
1877 prune_filenodes(fname, filerevlog)
1878 msng_filenode_lst = msng_filenode_set[fname].keys()
1878 msng_filenode_lst = msng_filenode_set[fname].keys()
1879 else:
1879 else:
1880 msng_filenode_lst = []
1880 msng_filenode_lst = []
1881 # If any filenodes are left, generate the group for them,
1881 # If any filenodes are left, generate the group for them,
1882 # otherwise don't bother.
1882 # otherwise don't bother.
1883 if len(msng_filenode_lst) > 0:
1883 if len(msng_filenode_lst) > 0:
1884 yield changegroup.chunkheader(len(fname))
1884 yield changegroup.chunkheader(len(fname))
1885 yield fname
1885 yield fname
1886 # Sort the filenodes by their revision #
1886 # Sort the filenodes by their revision #
1887 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1887 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1888 # Create a group generator and only pass in a changenode
1888 # Create a group generator and only pass in a changenode
1889 # lookup function as we need to collect no information
1889 # lookup function as we need to collect no information
1890 # from filenodes.
1890 # from filenodes.
1891 group = filerevlog.group(msng_filenode_lst,
1891 group = filerevlog.group(msng_filenode_lst,
1892 lookup_filenode_link_func(fname))
1892 lookup_filenode_link_func(fname))
1893 for chnk in group:
1893 for chnk in group:
1894 yield chnk
1894 yield chnk
1895 if fname in msng_filenode_set:
1895 if fname in msng_filenode_set:
1896 # Don't need this anymore, toss it to free memory.
1896 # Don't need this anymore, toss it to free memory.
1897 del msng_filenode_set[fname]
1897 del msng_filenode_set[fname]
1898 # Signal that no more groups are left.
1898 # Signal that no more groups are left.
1899 yield changegroup.closechunk()
1899 yield changegroup.closechunk()
1900
1900
1901 if msng_cl_lst:
1901 if msng_cl_lst:
1902 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1902 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1903
1903
1904 return util.chunkbuffer(gengroup())
1904 return util.chunkbuffer(gengroup())
1905
1905
1906 def changegroup(self, basenodes, source):
1906 def changegroup(self, basenodes, source):
1907 """Generate a changegroup of all nodes that we have that a recipient
1907 """Generate a changegroup of all nodes that we have that a recipient
1908 doesn't.
1908 doesn't.
1909
1909
1910 This is much easier than the previous function as we can assume that
1910 This is much easier than the previous function as we can assume that
1911 the recipient has any changenode we aren't sending them."""
1911 the recipient has any changenode we aren't sending them."""
1912
1912
1913 self.hook('preoutgoing', throw=True, source=source)
1913 self.hook('preoutgoing', throw=True, source=source)
1914
1914
1915 cl = self.changelog
1915 cl = self.changelog
1916 nodes = cl.nodesbetween(basenodes, None)[0]
1916 nodes = cl.nodesbetween(basenodes, None)[0]
1917 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1917 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1918 self.changegroupinfo(nodes, source)
1918 self.changegroupinfo(nodes, source)
1919
1919
1920 def identity(x):
1920 def identity(x):
1921 return x
1921 return x
1922
1922
1923 def gennodelst(revlog):
1923 def gennodelst(revlog):
1924 for r in xrange(0, revlog.count()):
1924 for r in xrange(0, revlog.count()):
1925 n = revlog.node(r)
1925 n = revlog.node(r)
1926 if revlog.linkrev(n) in revset:
1926 if revlog.linkrev(n) in revset:
1927 yield n
1927 yield n
1928
1928
1929 def changed_file_collector(changedfileset):
1929 def changed_file_collector(changedfileset):
1930 def collect_changed_files(clnode):
1930 def collect_changed_files(clnode):
1931 c = cl.read(clnode)
1931 c = cl.read(clnode)
1932 for fname in c[3]:
1932 for fname in c[3]:
1933 changedfileset[fname] = 1
1933 changedfileset[fname] = 1
1934 return collect_changed_files
1934 return collect_changed_files
1935
1935
1936 def lookuprevlink_func(revlog):
1936 def lookuprevlink_func(revlog):
1937 def lookuprevlink(n):
1937 def lookuprevlink(n):
1938 return cl.node(revlog.linkrev(n))
1938 return cl.node(revlog.linkrev(n))
1939 return lookuprevlink
1939 return lookuprevlink
1940
1940
1941 def gengroup():
1941 def gengroup():
1942 # construct a list of all changed files
1942 # construct a list of all changed files
1943 changedfiles = {}
1943 changedfiles = {}
1944
1944
1945 for chnk in cl.group(nodes, identity,
1945 for chnk in cl.group(nodes, identity,
1946 changed_file_collector(changedfiles)):
1946 changed_file_collector(changedfiles)):
1947 yield chnk
1947 yield chnk
1948 changedfiles = changedfiles.keys()
1948 changedfiles = changedfiles.keys()
1949 changedfiles.sort()
1949 changedfiles.sort()
1950
1950
1951 mnfst = self.manifest
1951 mnfst = self.manifest
1952 nodeiter = gennodelst(mnfst)
1952 nodeiter = gennodelst(mnfst)
1953 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1953 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1954 yield chnk
1954 yield chnk
1955
1955
1956 for fname in changedfiles:
1956 for fname in changedfiles:
1957 filerevlog = self.file(fname)
1957 filerevlog = self.file(fname)
1958 if filerevlog.count() == 0:
1958 if filerevlog.count() == 0:
1959 raise util.Abort(_("empty or missing revlog for %s") % fname)
1959 raise util.Abort(_("empty or missing revlog for %s") % fname)
1960 nodeiter = gennodelst(filerevlog)
1960 nodeiter = gennodelst(filerevlog)
1961 nodeiter = list(nodeiter)
1961 nodeiter = list(nodeiter)
1962 if nodeiter:
1962 if nodeiter:
1963 yield changegroup.chunkheader(len(fname))
1963 yield changegroup.chunkheader(len(fname))
1964 yield fname
1964 yield fname
1965 lookup = lookuprevlink_func(filerevlog)
1965 lookup = lookuprevlink_func(filerevlog)
1966 for chnk in filerevlog.group(nodeiter, lookup):
1966 for chnk in filerevlog.group(nodeiter, lookup):
1967 yield chnk
1967 yield chnk
1968
1968
1969 yield changegroup.closechunk()
1969 yield changegroup.closechunk()
1970
1970
1971 if nodes:
1971 if nodes:
1972 self.hook('outgoing', node=hex(nodes[0]), source=source)
1972 self.hook('outgoing', node=hex(nodes[0]), source=source)
1973
1973
1974 return util.chunkbuffer(gengroup())
1974 return util.chunkbuffer(gengroup())
1975
1975
1976 def addchangegroup(self, source, srctype, url, emptyok=False):
1976 def addchangegroup(self, source, srctype, url, emptyok=False):
1977 """add changegroup to repo.
1977 """add changegroup to repo.
1978
1978
1979 return values:
1979 return values:
1980 - nothing changed or no source: 0
1980 - nothing changed or no source: 0
1981 - more heads than before: 1+added heads (2..n)
1981 - more heads than before: 1+added heads (2..n)
1982 - less heads than before: -1-removed heads (-2..-n)
1982 - less heads than before: -1-removed heads (-2..-n)
1983 - number of heads stays the same: 1
1983 - number of heads stays the same: 1
1984 """
1984 """
1985 def csmap(x):
1985 def csmap(x):
1986 self.ui.debug(_("add changeset %s\n") % short(x))
1986 self.ui.debug(_("add changeset %s\n") % short(x))
1987 return cl.count()
1987 return cl.count()
1988
1988
1989 def revmap(x):
1989 def revmap(x):
1990 return cl.rev(x)
1990 return cl.rev(x)
1991
1991
1992 if not source:
1992 if not source:
1993 return 0
1993 return 0
1994
1994
1995 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1995 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1996
1996
1997 changesets = files = revisions = 0
1997 changesets = files = revisions = 0
1998
1998
1999 # write changelog data to temp files so concurrent readers will not see
1999 # write changelog data to temp files so concurrent readers will not see
2000 # inconsistent view
2000 # inconsistent view
2001 cl = self.changelog
2001 cl = self.changelog
2002 cl.delayupdate()
2002 cl.delayupdate()
2003 oldheads = len(cl.heads())
2003 oldheads = len(cl.heads())
2004
2004
2005 tr = self.transaction()
2005 tr = self.transaction()
2006 try:
2006 try:
2007 trp = weakref.proxy(tr)
2007 trp = weakref.proxy(tr)
2008 # pull off the changeset group
2008 # pull off the changeset group
2009 self.ui.status(_("adding changesets\n"))
2009 self.ui.status(_("adding changesets\n"))
2010 cor = cl.count() - 1
2010 cor = cl.count() - 1
2011 chunkiter = changegroup.chunkiter(source)
2011 chunkiter = changegroup.chunkiter(source)
2012 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
2012 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
2013 raise util.Abort(_("received changelog group is empty"))
2013 raise util.Abort(_("received changelog group is empty"))
2014 cnr = cl.count() - 1
2014 cnr = cl.count() - 1
2015 changesets = cnr - cor
2015 changesets = cnr - cor
2016
2016
2017 # pull off the manifest group
2017 # pull off the manifest group
2018 self.ui.status(_("adding manifests\n"))
2018 self.ui.status(_("adding manifests\n"))
2019 chunkiter = changegroup.chunkiter(source)
2019 chunkiter = changegroup.chunkiter(source)
2020 # no need to check for empty manifest group here:
2020 # no need to check for empty manifest group here:
2021 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2021 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2022 # no new manifest will be created and the manifest group will
2022 # no new manifest will be created and the manifest group will
2023 # be empty during the pull
2023 # be empty during the pull
2024 self.manifest.addgroup(chunkiter, revmap, trp)
2024 self.manifest.addgroup(chunkiter, revmap, trp)
2025
2025
2026 # process the files
2026 # process the files
2027 self.ui.status(_("adding file changes\n"))
2027 self.ui.status(_("adding file changes\n"))
2028 while 1:
2028 while 1:
2029 f = changegroup.getchunk(source)
2029 f = changegroup.getchunk(source)
2030 if not f:
2030 if not f:
2031 break
2031 break
2032 self.ui.debug(_("adding %s revisions\n") % f)
2032 self.ui.debug(_("adding %s revisions\n") % f)
2033 fl = self.file(f)
2033 fl = self.file(f)
2034 o = fl.count()
2034 o = fl.count()
2035 chunkiter = changegroup.chunkiter(source)
2035 chunkiter = changegroup.chunkiter(source)
2036 if fl.addgroup(chunkiter, revmap, trp) is None:
2036 if fl.addgroup(chunkiter, revmap, trp) is None:
2037 raise util.Abort(_("received file revlog group is empty"))
2037 raise util.Abort(_("received file revlog group is empty"))
2038 revisions += fl.count() - o
2038 revisions += fl.count() - o
2039 files += 1
2039 files += 1
2040
2040
2041 # make changelog see real files again
2041 # make changelog see real files again
2042 cl.finalize(trp)
2042 cl.finalize(trp)
2043
2043
2044 newheads = len(self.changelog.heads())
2044 newheads = len(self.changelog.heads())
2045 heads = ""
2045 heads = ""
2046 if oldheads and newheads != oldheads:
2046 if oldheads and newheads != oldheads:
2047 heads = _(" (%+d heads)") % (newheads - oldheads)
2047 heads = _(" (%+d heads)") % (newheads - oldheads)
2048
2048
2049 self.ui.status(_("added %d changesets"
2049 self.ui.status(_("added %d changesets"
2050 " with %d changes to %d files%s\n")
2050 " with %d changes to %d files%s\n")
2051 % (changesets, revisions, files, heads))
2051 % (changesets, revisions, files, heads))
2052
2052
2053 if changesets > 0:
2053 if changesets > 0:
2054 self.hook('pretxnchangegroup', throw=True,
2054 self.hook('pretxnchangegroup', throw=True,
2055 node=hex(self.changelog.node(cor+1)), source=srctype,
2055 node=hex(self.changelog.node(cor+1)), source=srctype,
2056 url=url)
2056 url=url)
2057
2057
2058 tr.close()
2058 tr.close()
2059 finally:
2059 finally:
2060 del tr
2060 del tr
2061
2061
2062 if changesets > 0:
2062 if changesets > 0:
2063 # forcefully update the on-disk branch cache
2063 # forcefully update the on-disk branch cache
2064 self.ui.debug(_("updating the branch cache\n"))
2064 self.ui.debug(_("updating the branch cache\n"))
2065 self.branchtags()
2065 self.branchtags()
2066 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2066 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2067 source=srctype, url=url)
2067 source=srctype, url=url)
2068
2068
2069 for i in xrange(cor + 1, cnr + 1):
2069 for i in xrange(cor + 1, cnr + 1):
2070 self.hook("incoming", node=hex(self.changelog.node(i)),
2070 self.hook("incoming", node=hex(self.changelog.node(i)),
2071 source=srctype, url=url)
2071 source=srctype, url=url)
2072
2072
2073 # never return 0 here:
2073 # never return 0 here:
2074 if newheads < oldheads:
2074 if newheads < oldheads:
2075 return newheads - oldheads - 1
2075 return newheads - oldheads - 1
2076 else:
2076 else:
2077 return newheads - oldheads + 1
2077 return newheads - oldheads + 1
2078
2078
2079
2079
2080 def stream_in(self, remote):
2080 def stream_in(self, remote):
2081 fp = remote.stream_out()
2081 fp = remote.stream_out()
2082 l = fp.readline()
2082 l = fp.readline()
2083 try:
2083 try:
2084 resp = int(l)
2084 resp = int(l)
2085 except ValueError:
2085 except ValueError:
2086 raise util.UnexpectedOutput(
2086 raise util.UnexpectedOutput(
2087 _('Unexpected response from remote server:'), l)
2087 _('Unexpected response from remote server:'), l)
2088 if resp == 1:
2088 if resp == 1:
2089 raise util.Abort(_('operation forbidden by server'))
2089 raise util.Abort(_('operation forbidden by server'))
2090 elif resp == 2:
2090 elif resp == 2:
2091 raise util.Abort(_('locking the remote repository failed'))
2091 raise util.Abort(_('locking the remote repository failed'))
2092 elif resp != 0:
2092 elif resp != 0:
2093 raise util.Abort(_('the server sent an unknown error code'))
2093 raise util.Abort(_('the server sent an unknown error code'))
2094 self.ui.status(_('streaming all changes\n'))
2094 self.ui.status(_('streaming all changes\n'))
2095 l = fp.readline()
2095 l = fp.readline()
2096 try:
2096 try:
2097 total_files, total_bytes = map(int, l.split(' ', 1))
2097 total_files, total_bytes = map(int, l.split(' ', 1))
2098 except (ValueError, TypeError):
2098 except (ValueError, TypeError):
2099 raise util.UnexpectedOutput(
2099 raise util.UnexpectedOutput(
2100 _('Unexpected response from remote server:'), l)
2100 _('Unexpected response from remote server:'), l)
2101 self.ui.status(_('%d files to transfer, %s of data\n') %
2101 self.ui.status(_('%d files to transfer, %s of data\n') %
2102 (total_files, util.bytecount(total_bytes)))
2102 (total_files, util.bytecount(total_bytes)))
2103 start = time.time()
2103 start = time.time()
2104 for i in xrange(total_files):
2104 for i in xrange(total_files):
2105 # XXX doesn't support '\n' or '\r' in filenames
2105 # XXX doesn't support '\n' or '\r' in filenames
2106 l = fp.readline()
2106 l = fp.readline()
2107 try:
2107 try:
2108 name, size = l.split('\0', 1)
2108 name, size = l.split('\0', 1)
2109 size = int(size)
2109 size = int(size)
2110 except ValueError, TypeError:
2110 except (ValueError, TypeError):
2111 raise util.UnexpectedOutput(
2111 raise util.UnexpectedOutput(
2112 _('Unexpected response from remote server:'), l)
2112 _('Unexpected response from remote server:'), l)
2113 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2113 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2114 ofp = self.sopener(name, 'w')
2114 ofp = self.sopener(name, 'w')
2115 for chunk in util.filechunkiter(fp, limit=size):
2115 for chunk in util.filechunkiter(fp, limit=size):
2116 ofp.write(chunk)
2116 ofp.write(chunk)
2117 ofp.close()
2117 ofp.close()
2118 elapsed = time.time() - start
2118 elapsed = time.time() - start
2119 if elapsed <= 0:
2119 if elapsed <= 0:
2120 elapsed = 0.001
2120 elapsed = 0.001
2121 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2121 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2122 (util.bytecount(total_bytes), elapsed,
2122 (util.bytecount(total_bytes), elapsed,
2123 util.bytecount(total_bytes / elapsed)))
2123 util.bytecount(total_bytes / elapsed)))
2124 self.invalidate()
2124 self.invalidate()
2125 return len(self.heads()) + 1
2125 return len(self.heads()) + 1
2126
2126
2127 def clone(self, remote, heads=[], stream=False):
2127 def clone(self, remote, heads=[], stream=False):
2128 '''clone remote repository.
2128 '''clone remote repository.
2129
2129
2130 keyword arguments:
2130 keyword arguments:
2131 heads: list of revs to clone (forces use of pull)
2131 heads: list of revs to clone (forces use of pull)
2132 stream: use streaming clone if possible'''
2132 stream: use streaming clone if possible'''
2133
2133
2134 # now, all clients that can request uncompressed clones can
2134 # now, all clients that can request uncompressed clones can
2135 # read repo formats supported by all servers that can serve
2135 # read repo formats supported by all servers that can serve
2136 # them.
2136 # them.
2137
2137
2138 # if revlog format changes, client will have to check version
2138 # if revlog format changes, client will have to check version
2139 # and format flags on "stream" capability, and use
2139 # and format flags on "stream" capability, and use
2140 # uncompressed only if compatible.
2140 # uncompressed only if compatible.
2141
2141
2142 if stream and not heads and remote.capable('stream'):
2142 if stream and not heads and remote.capable('stream'):
2143 return self.stream_in(remote)
2143 return self.stream_in(remote)
2144 return self.pull(remote, heads)
2144 return self.pull(remote, heads)
2145
2145
2146 # used to avoid circular references so destructors work
2146 # used to avoid circular references so destructors work
2147 def aftertrans(files):
2147 def aftertrans(files):
2148 renamefiles = [tuple(t) for t in files]
2148 renamefiles = [tuple(t) for t in files]
2149 def a():
2149 def a():
2150 for src, dest in renamefiles:
2150 for src, dest in renamefiles:
2151 util.rename(src, dest)
2151 util.rename(src, dest)
2152 return a
2152 return a
2153
2153
2154 def instance(ui, path, create):
2154 def instance(ui, path, create):
2155 return localrepository(ui, util.drop_scheme('file', path), create)
2155 return localrepository(ui, util.drop_scheme('file', path), create)
2156
2156
2157 def islocal(path):
2157 def islocal(path):
2158 return True
2158 return True
General Comments 0
You need to be logged in to leave comments. Login now