##// END OF EJS Templates
fix error spotted by pychecker
Benoit Boissinot -
r6411:34c51857 default
parent child Browse files
Show More
@@ -1,2138 +1,2138 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui
12 import lock, transaction, stat, errno, ui
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = util.set(('lookup', 'changegroupsubset'))
16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 repo.repository.__init__(self)
20 repo.repository.__init__(self)
21 self.root = os.path.realpath(path)
21 self.root = os.path.realpath(path)
22 self.path = os.path.join(self.root, ".hg")
22 self.path = os.path.join(self.root, ".hg")
23 self.origroot = path
23 self.origroot = path
24 self.opener = util.opener(self.path)
24 self.opener = util.opener(self.path)
25 self.wopener = util.opener(self.root)
25 self.wopener = util.opener(self.root)
26
26
27 if not os.path.isdir(self.path):
27 if not os.path.isdir(self.path):
28 if create:
28 if create:
29 if not os.path.exists(path):
29 if not os.path.exists(path):
30 os.mkdir(path)
30 os.mkdir(path)
31 os.mkdir(self.path)
31 os.mkdir(self.path)
32 requirements = ["revlogv1"]
32 requirements = ["revlogv1"]
33 if parentui.configbool('format', 'usestore', True):
33 if parentui.configbool('format', 'usestore', True):
34 os.mkdir(os.path.join(self.path, "store"))
34 os.mkdir(os.path.join(self.path, "store"))
35 requirements.append("store")
35 requirements.append("store")
36 # create an invalid changelog
36 # create an invalid changelog
37 self.opener("00changelog.i", "a").write(
37 self.opener("00changelog.i", "a").write(
38 '\0\0\0\2' # represents revlogv2
38 '\0\0\0\2' # represents revlogv2
39 ' dummy changelog to prevent using the old repo layout'
39 ' dummy changelog to prevent using the old repo layout'
40 )
40 )
41 reqfile = self.opener("requires", "w")
41 reqfile = self.opener("requires", "w")
42 for r in requirements:
42 for r in requirements:
43 reqfile.write("%s\n" % r)
43 reqfile.write("%s\n" % r)
44 reqfile.close()
44 reqfile.close()
45 else:
45 else:
46 raise repo.RepoError(_("repository %s not found") % path)
46 raise repo.RepoError(_("repository %s not found") % path)
47 elif create:
47 elif create:
48 raise repo.RepoError(_("repository %s already exists") % path)
48 raise repo.RepoError(_("repository %s already exists") % path)
49 else:
49 else:
50 # find requirements
50 # find requirements
51 try:
51 try:
52 requirements = self.opener("requires").read().splitlines()
52 requirements = self.opener("requires").read().splitlines()
53 except IOError, inst:
53 except IOError, inst:
54 if inst.errno != errno.ENOENT:
54 if inst.errno != errno.ENOENT:
55 raise
55 raise
56 requirements = []
56 requirements = []
57 # check them
57 # check them
58 for r in requirements:
58 for r in requirements:
59 if r not in self.supported:
59 if r not in self.supported:
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61
61
62 # setup store
62 # setup store
63 if "store" in requirements:
63 if "store" in requirements:
64 self.encodefn = util.encodefilename
64 self.encodefn = util.encodefilename
65 self.decodefn = util.decodefilename
65 self.decodefn = util.decodefilename
66 self.spath = os.path.join(self.path, "store")
66 self.spath = os.path.join(self.path, "store")
67 else:
67 else:
68 self.encodefn = lambda x: x
68 self.encodefn = lambda x: x
69 self.decodefn = lambda x: x
69 self.decodefn = lambda x: x
70 self.spath = self.path
70 self.spath = self.path
71
71
72 try:
72 try:
73 # files in .hg/ will be created using this mode
73 # files in .hg/ will be created using this mode
74 mode = os.stat(self.spath).st_mode
74 mode = os.stat(self.spath).st_mode
75 # avoid some useless chmods
75 # avoid some useless chmods
76 if (0777 & ~util._umask) == (0777 & mode):
76 if (0777 & ~util._umask) == (0777 & mode):
77 mode = None
77 mode = None
78 except OSError:
78 except OSError:
79 mode = None
79 mode = None
80
80
81 self._createmode = mode
81 self._createmode = mode
82 self.opener.createmode = mode
82 self.opener.createmode = mode
83 sopener = util.opener(self.spath)
83 sopener = util.opener(self.spath)
84 sopener.createmode = mode
84 sopener.createmode = mode
85 self.sopener = util.encodedopener(sopener, self.encodefn)
85 self.sopener = util.encodedopener(sopener, self.encodefn)
86
86
87 self.ui = ui.ui(parentui=parentui)
87 self.ui = ui.ui(parentui=parentui)
88 try:
88 try:
89 self.ui.readconfig(self.join("hgrc"), self.root)
89 self.ui.readconfig(self.join("hgrc"), self.root)
90 extensions.loadall(self.ui)
90 extensions.loadall(self.ui)
91 except IOError:
91 except IOError:
92 pass
92 pass
93
93
94 self.tagscache = None
94 self.tagscache = None
95 self._tagstypecache = None
95 self._tagstypecache = None
96 self.branchcache = None
96 self.branchcache = None
97 self._ubranchcache = None # UTF-8 version of branchcache
97 self._ubranchcache = None # UTF-8 version of branchcache
98 self._branchcachetip = None
98 self._branchcachetip = None
99 self.nodetagscache = None
99 self.nodetagscache = None
100 self.filterpats = {}
100 self.filterpats = {}
101 self._datafilters = {}
101 self._datafilters = {}
102 self._transref = self._lockref = self._wlockref = None
102 self._transref = self._lockref = self._wlockref = None
103
103
104 def __getattr__(self, name):
104 def __getattr__(self, name):
105 if name == 'changelog':
105 if name == 'changelog':
106 self.changelog = changelog.changelog(self.sopener)
106 self.changelog = changelog.changelog(self.sopener)
107 self.sopener.defversion = self.changelog.version
107 self.sopener.defversion = self.changelog.version
108 return self.changelog
108 return self.changelog
109 if name == 'manifest':
109 if name == 'manifest':
110 self.changelog
110 self.changelog
111 self.manifest = manifest.manifest(self.sopener)
111 self.manifest = manifest.manifest(self.sopener)
112 return self.manifest
112 return self.manifest
113 if name == 'dirstate':
113 if name == 'dirstate':
114 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
114 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
115 return self.dirstate
115 return self.dirstate
116 else:
116 else:
117 raise AttributeError, name
117 raise AttributeError, name
118
118
119 def url(self):
119 def url(self):
120 return 'file:' + self.root
120 return 'file:' + self.root
121
121
122 def hook(self, name, throw=False, **args):
122 def hook(self, name, throw=False, **args):
123 return hook.hook(self.ui, self, name, throw, **args)
123 return hook.hook(self.ui, self, name, throw, **args)
124
124
125 tag_disallowed = ':\r\n'
125 tag_disallowed = ':\r\n'
126
126
127 def _tag(self, names, node, message, local, user, date, parent=None,
127 def _tag(self, names, node, message, local, user, date, parent=None,
128 extra={}):
128 extra={}):
129 use_dirstate = parent is None
129 use_dirstate = parent is None
130
130
131 if isinstance(names, str):
131 if isinstance(names, str):
132 allchars = names
132 allchars = names
133 names = (names,)
133 names = (names,)
134 else:
134 else:
135 allchars = ''.join(names)
135 allchars = ''.join(names)
136 for c in self.tag_disallowed:
136 for c in self.tag_disallowed:
137 if c in allchars:
137 if c in allchars:
138 raise util.Abort(_('%r cannot be used in a tag name') % c)
138 raise util.Abort(_('%r cannot be used in a tag name') % c)
139
139
140 for name in names:
140 for name in names:
141 self.hook('pretag', throw=True, node=hex(node), tag=name,
141 self.hook('pretag', throw=True, node=hex(node), tag=name,
142 local=local)
142 local=local)
143
143
144 def writetags(fp, names, munge, prevtags):
144 def writetags(fp, names, munge, prevtags):
145 fp.seek(0, 2)
145 fp.seek(0, 2)
146 if prevtags and prevtags[-1] != '\n':
146 if prevtags and prevtags[-1] != '\n':
147 fp.write('\n')
147 fp.write('\n')
148 for name in names:
148 for name in names:
149 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
149 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
150 fp.close()
150 fp.close()
151
151
152 prevtags = ''
152 prevtags = ''
153 if local:
153 if local:
154 try:
154 try:
155 fp = self.opener('localtags', 'r+')
155 fp = self.opener('localtags', 'r+')
156 except IOError, err:
156 except IOError, err:
157 fp = self.opener('localtags', 'a')
157 fp = self.opener('localtags', 'a')
158 else:
158 else:
159 prevtags = fp.read()
159 prevtags = fp.read()
160
160
161 # local tags are stored in the current charset
161 # local tags are stored in the current charset
162 writetags(fp, names, None, prevtags)
162 writetags(fp, names, None, prevtags)
163 for name in names:
163 for name in names:
164 self.hook('tag', node=hex(node), tag=name, local=local)
164 self.hook('tag', node=hex(node), tag=name, local=local)
165 return
165 return
166
166
167 if use_dirstate:
167 if use_dirstate:
168 try:
168 try:
169 fp = self.wfile('.hgtags', 'rb+')
169 fp = self.wfile('.hgtags', 'rb+')
170 except IOError, err:
170 except IOError, err:
171 fp = self.wfile('.hgtags', 'ab')
171 fp = self.wfile('.hgtags', 'ab')
172 else:
172 else:
173 prevtags = fp.read()
173 prevtags = fp.read()
174 else:
174 else:
175 try:
175 try:
176 prevtags = self.filectx('.hgtags', parent).data()
176 prevtags = self.filectx('.hgtags', parent).data()
177 except revlog.LookupError:
177 except revlog.LookupError:
178 pass
178 pass
179 fp = self.wfile('.hgtags', 'wb')
179 fp = self.wfile('.hgtags', 'wb')
180 if prevtags:
180 if prevtags:
181 fp.write(prevtags)
181 fp.write(prevtags)
182
182
183 # committed tags are stored in UTF-8
183 # committed tags are stored in UTF-8
184 writetags(fp, names, util.fromlocal, prevtags)
184 writetags(fp, names, util.fromlocal, prevtags)
185
185
186 if use_dirstate and '.hgtags' not in self.dirstate:
186 if use_dirstate and '.hgtags' not in self.dirstate:
187 self.add(['.hgtags'])
187 self.add(['.hgtags'])
188
188
189 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
189 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
190 extra=extra)
190 extra=extra)
191
191
192 for name in names:
192 for name in names:
193 self.hook('tag', node=hex(node), tag=name, local=local)
193 self.hook('tag', node=hex(node), tag=name, local=local)
194
194
195 return tagnode
195 return tagnode
196
196
197 def tag(self, names, node, message, local, user, date):
197 def tag(self, names, node, message, local, user, date):
198 '''tag a revision with one or more symbolic names.
198 '''tag a revision with one or more symbolic names.
199
199
200 names is a list of strings or, when adding a single tag, names may be a
200 names is a list of strings or, when adding a single tag, names may be a
201 string.
201 string.
202
202
203 if local is True, the tags are stored in a per-repository file.
203 if local is True, the tags are stored in a per-repository file.
204 otherwise, they are stored in the .hgtags file, and a new
204 otherwise, they are stored in the .hgtags file, and a new
205 changeset is committed with the change.
205 changeset is committed with the change.
206
206
207 keyword arguments:
207 keyword arguments:
208
208
209 local: whether to store tags in non-version-controlled file
209 local: whether to store tags in non-version-controlled file
210 (default False)
210 (default False)
211
211
212 message: commit message to use if committing
212 message: commit message to use if committing
213
213
214 user: name of user to use if committing
214 user: name of user to use if committing
215
215
216 date: date tuple to use if committing'''
216 date: date tuple to use if committing'''
217
217
218 for x in self.status()[:5]:
218 for x in self.status()[:5]:
219 if '.hgtags' in x:
219 if '.hgtags' in x:
220 raise util.Abort(_('working copy of .hgtags is changed '
220 raise util.Abort(_('working copy of .hgtags is changed '
221 '(please commit .hgtags manually)'))
221 '(please commit .hgtags manually)'))
222
222
223 self._tag(names, node, message, local, user, date)
223 self._tag(names, node, message, local, user, date)
224
224
225 def tags(self):
225 def tags(self):
226 '''return a mapping of tag to node'''
226 '''return a mapping of tag to node'''
227 if self.tagscache:
227 if self.tagscache:
228 return self.tagscache
228 return self.tagscache
229
229
230 globaltags = {}
230 globaltags = {}
231 tagtypes = {}
231 tagtypes = {}
232
232
233 def readtags(lines, fn, tagtype):
233 def readtags(lines, fn, tagtype):
234 filetags = {}
234 filetags = {}
235 count = 0
235 count = 0
236
236
237 def warn(msg):
237 def warn(msg):
238 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
238 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
239
239
240 for l in lines:
240 for l in lines:
241 count += 1
241 count += 1
242 if not l:
242 if not l:
243 continue
243 continue
244 s = l.split(" ", 1)
244 s = l.split(" ", 1)
245 if len(s) != 2:
245 if len(s) != 2:
246 warn(_("cannot parse entry"))
246 warn(_("cannot parse entry"))
247 continue
247 continue
248 node, key = s
248 node, key = s
249 key = util.tolocal(key.strip()) # stored in UTF-8
249 key = util.tolocal(key.strip()) # stored in UTF-8
250 try:
250 try:
251 bin_n = bin(node)
251 bin_n = bin(node)
252 except TypeError:
252 except TypeError:
253 warn(_("node '%s' is not well formed") % node)
253 warn(_("node '%s' is not well formed") % node)
254 continue
254 continue
255 if bin_n not in self.changelog.nodemap:
255 if bin_n not in self.changelog.nodemap:
256 warn(_("tag '%s' refers to unknown node") % key)
256 warn(_("tag '%s' refers to unknown node") % key)
257 continue
257 continue
258
258
259 h = []
259 h = []
260 if key in filetags:
260 if key in filetags:
261 n, h = filetags[key]
261 n, h = filetags[key]
262 h.append(n)
262 h.append(n)
263 filetags[key] = (bin_n, h)
263 filetags[key] = (bin_n, h)
264
264
265 for k, nh in filetags.items():
265 for k, nh in filetags.items():
266 if k not in globaltags:
266 if k not in globaltags:
267 globaltags[k] = nh
267 globaltags[k] = nh
268 tagtypes[k] = tagtype
268 tagtypes[k] = tagtype
269 continue
269 continue
270
270
271 # we prefer the global tag if:
271 # we prefer the global tag if:
272 # it supercedes us OR
272 # it supercedes us OR
273 # mutual supercedes and it has a higher rank
273 # mutual supercedes and it has a higher rank
274 # otherwise we win because we're tip-most
274 # otherwise we win because we're tip-most
275 an, ah = nh
275 an, ah = nh
276 bn, bh = globaltags[k]
276 bn, bh = globaltags[k]
277 if (bn != an and an in bh and
277 if (bn != an and an in bh and
278 (bn not in ah or len(bh) > len(ah))):
278 (bn not in ah or len(bh) > len(ah))):
279 an = bn
279 an = bn
280 ah.extend([n for n in bh if n not in ah])
280 ah.extend([n for n in bh if n not in ah])
281 globaltags[k] = an, ah
281 globaltags[k] = an, ah
282 tagtypes[k] = tagtype
282 tagtypes[k] = tagtype
283
283
284 # read the tags file from each head, ending with the tip
284 # read the tags file from each head, ending with the tip
285 f = None
285 f = None
286 for rev, node, fnode in self._hgtagsnodes():
286 for rev, node, fnode in self._hgtagsnodes():
287 f = (f and f.filectx(fnode) or
287 f = (f and f.filectx(fnode) or
288 self.filectx('.hgtags', fileid=fnode))
288 self.filectx('.hgtags', fileid=fnode))
289 readtags(f.data().splitlines(), f, "global")
289 readtags(f.data().splitlines(), f, "global")
290
290
291 try:
291 try:
292 data = util.fromlocal(self.opener("localtags").read())
292 data = util.fromlocal(self.opener("localtags").read())
293 # localtags are stored in the local character set
293 # localtags are stored in the local character set
294 # while the internal tag table is stored in UTF-8
294 # while the internal tag table is stored in UTF-8
295 readtags(data.splitlines(), "localtags", "local")
295 readtags(data.splitlines(), "localtags", "local")
296 except IOError:
296 except IOError:
297 pass
297 pass
298
298
299 self.tagscache = {}
299 self.tagscache = {}
300 self._tagstypecache = {}
300 self._tagstypecache = {}
301 for k,nh in globaltags.items():
301 for k,nh in globaltags.items():
302 n = nh[0]
302 n = nh[0]
303 if n != nullid:
303 if n != nullid:
304 self.tagscache[k] = n
304 self.tagscache[k] = n
305 self._tagstypecache[k] = tagtypes[k]
305 self._tagstypecache[k] = tagtypes[k]
306 self.tagscache['tip'] = self.changelog.tip()
306 self.tagscache['tip'] = self.changelog.tip()
307
307
308 return self.tagscache
308 return self.tagscache
309
309
310 def tagtype(self, tagname):
310 def tagtype(self, tagname):
311 '''
311 '''
312 return the type of the given tag. result can be:
312 return the type of the given tag. result can be:
313
313
314 'local' : a local tag
314 'local' : a local tag
315 'global' : a global tag
315 'global' : a global tag
316 None : tag does not exist
316 None : tag does not exist
317 '''
317 '''
318
318
319 self.tags()
319 self.tags()
320
320
321 return self._tagstypecache.get(tagname)
321 return self._tagstypecache.get(tagname)
322
322
323 def _hgtagsnodes(self):
323 def _hgtagsnodes(self):
324 heads = self.heads()
324 heads = self.heads()
325 heads.reverse()
325 heads.reverse()
326 last = {}
326 last = {}
327 ret = []
327 ret = []
328 for node in heads:
328 for node in heads:
329 c = self.changectx(node)
329 c = self.changectx(node)
330 rev = c.rev()
330 rev = c.rev()
331 try:
331 try:
332 fnode = c.filenode('.hgtags')
332 fnode = c.filenode('.hgtags')
333 except revlog.LookupError:
333 except revlog.LookupError:
334 continue
334 continue
335 ret.append((rev, node, fnode))
335 ret.append((rev, node, fnode))
336 if fnode in last:
336 if fnode in last:
337 ret[last[fnode]] = None
337 ret[last[fnode]] = None
338 last[fnode] = len(ret) - 1
338 last[fnode] = len(ret) - 1
339 return [item for item in ret if item]
339 return [item for item in ret if item]
340
340
341 def tagslist(self):
341 def tagslist(self):
342 '''return a list of tags ordered by revision'''
342 '''return a list of tags ordered by revision'''
343 l = []
343 l = []
344 for t, n in self.tags().items():
344 for t, n in self.tags().items():
345 try:
345 try:
346 r = self.changelog.rev(n)
346 r = self.changelog.rev(n)
347 except:
347 except:
348 r = -2 # sort to the beginning of the list if unknown
348 r = -2 # sort to the beginning of the list if unknown
349 l.append((r, t, n))
349 l.append((r, t, n))
350 l.sort()
350 l.sort()
351 return [(t, n) for r, t, n in l]
351 return [(t, n) for r, t, n in l]
352
352
353 def nodetags(self, node):
353 def nodetags(self, node):
354 '''return the tags associated with a node'''
354 '''return the tags associated with a node'''
355 if not self.nodetagscache:
355 if not self.nodetagscache:
356 self.nodetagscache = {}
356 self.nodetagscache = {}
357 for t, n in self.tags().items():
357 for t, n in self.tags().items():
358 self.nodetagscache.setdefault(n, []).append(t)
358 self.nodetagscache.setdefault(n, []).append(t)
359 return self.nodetagscache.get(node, [])
359 return self.nodetagscache.get(node, [])
360
360
361 def _branchtags(self, partial, lrev):
361 def _branchtags(self, partial, lrev):
362 tiprev = self.changelog.count() - 1
362 tiprev = self.changelog.count() - 1
363 if lrev != tiprev:
363 if lrev != tiprev:
364 self._updatebranchcache(partial, lrev+1, tiprev+1)
364 self._updatebranchcache(partial, lrev+1, tiprev+1)
365 self._writebranchcache(partial, self.changelog.tip(), tiprev)
365 self._writebranchcache(partial, self.changelog.tip(), tiprev)
366
366
367 return partial
367 return partial
368
368
369 def branchtags(self):
369 def branchtags(self):
370 tip = self.changelog.tip()
370 tip = self.changelog.tip()
371 if self.branchcache is not None and self._branchcachetip == tip:
371 if self.branchcache is not None and self._branchcachetip == tip:
372 return self.branchcache
372 return self.branchcache
373
373
374 oldtip = self._branchcachetip
374 oldtip = self._branchcachetip
375 self._branchcachetip = tip
375 self._branchcachetip = tip
376 if self.branchcache is None:
376 if self.branchcache is None:
377 self.branchcache = {} # avoid recursion in changectx
377 self.branchcache = {} # avoid recursion in changectx
378 else:
378 else:
379 self.branchcache.clear() # keep using the same dict
379 self.branchcache.clear() # keep using the same dict
380 if oldtip is None or oldtip not in self.changelog.nodemap:
380 if oldtip is None or oldtip not in self.changelog.nodemap:
381 partial, last, lrev = self._readbranchcache()
381 partial, last, lrev = self._readbranchcache()
382 else:
382 else:
383 lrev = self.changelog.rev(oldtip)
383 lrev = self.changelog.rev(oldtip)
384 partial = self._ubranchcache
384 partial = self._ubranchcache
385
385
386 self._branchtags(partial, lrev)
386 self._branchtags(partial, lrev)
387
387
388 # the branch cache is stored on disk as UTF-8, but in the local
388 # the branch cache is stored on disk as UTF-8, but in the local
389 # charset internally
389 # charset internally
390 for k, v in partial.items():
390 for k, v in partial.items():
391 self.branchcache[util.tolocal(k)] = v
391 self.branchcache[util.tolocal(k)] = v
392 self._ubranchcache = partial
392 self._ubranchcache = partial
393 return self.branchcache
393 return self.branchcache
394
394
395 def _readbranchcache(self):
395 def _readbranchcache(self):
396 partial = {}
396 partial = {}
397 try:
397 try:
398 f = self.opener("branch.cache")
398 f = self.opener("branch.cache")
399 lines = f.read().split('\n')
399 lines = f.read().split('\n')
400 f.close()
400 f.close()
401 except (IOError, OSError):
401 except (IOError, OSError):
402 return {}, nullid, nullrev
402 return {}, nullid, nullrev
403
403
404 try:
404 try:
405 last, lrev = lines.pop(0).split(" ", 1)
405 last, lrev = lines.pop(0).split(" ", 1)
406 last, lrev = bin(last), int(lrev)
406 last, lrev = bin(last), int(lrev)
407 if not (lrev < self.changelog.count() and
407 if not (lrev < self.changelog.count() and
408 self.changelog.node(lrev) == last): # sanity check
408 self.changelog.node(lrev) == last): # sanity check
409 # invalidate the cache
409 # invalidate the cache
410 raise ValueError('invalidating branch cache (tip differs)')
410 raise ValueError('invalidating branch cache (tip differs)')
411 for l in lines:
411 for l in lines:
412 if not l: continue
412 if not l: continue
413 node, label = l.split(" ", 1)
413 node, label = l.split(" ", 1)
414 partial[label.strip()] = bin(node)
414 partial[label.strip()] = bin(node)
415 except (KeyboardInterrupt, util.SignalInterrupt):
415 except (KeyboardInterrupt, util.SignalInterrupt):
416 raise
416 raise
417 except Exception, inst:
417 except Exception, inst:
418 if self.ui.debugflag:
418 if self.ui.debugflag:
419 self.ui.warn(str(inst), '\n')
419 self.ui.warn(str(inst), '\n')
420 partial, last, lrev = {}, nullid, nullrev
420 partial, last, lrev = {}, nullid, nullrev
421 return partial, last, lrev
421 return partial, last, lrev
422
422
423 def _writebranchcache(self, branches, tip, tiprev):
423 def _writebranchcache(self, branches, tip, tiprev):
424 try:
424 try:
425 f = self.opener("branch.cache", "w", atomictemp=True)
425 f = self.opener("branch.cache", "w", atomictemp=True)
426 f.write("%s %s\n" % (hex(tip), tiprev))
426 f.write("%s %s\n" % (hex(tip), tiprev))
427 for label, node in branches.iteritems():
427 for label, node in branches.iteritems():
428 f.write("%s %s\n" % (hex(node), label))
428 f.write("%s %s\n" % (hex(node), label))
429 f.rename()
429 f.rename()
430 except (IOError, OSError):
430 except (IOError, OSError):
431 pass
431 pass
432
432
433 def _updatebranchcache(self, partial, start, end):
433 def _updatebranchcache(self, partial, start, end):
434 for r in xrange(start, end):
434 for r in xrange(start, end):
435 c = self.changectx(r)
435 c = self.changectx(r)
436 b = c.branch()
436 b = c.branch()
437 partial[b] = c.node()
437 partial[b] = c.node()
438
438
439 def lookup(self, key):
439 def lookup(self, key):
440 if key == '.':
440 if key == '.':
441 key, second = self.dirstate.parents()
441 key, second = self.dirstate.parents()
442 if key == nullid:
442 if key == nullid:
443 raise repo.RepoError(_("no revision checked out"))
443 raise repo.RepoError(_("no revision checked out"))
444 if second != nullid:
444 if second != nullid:
445 self.ui.warn(_("warning: working directory has two parents, "
445 self.ui.warn(_("warning: working directory has two parents, "
446 "tag '.' uses the first\n"))
446 "tag '.' uses the first\n"))
447 elif key == 'null':
447 elif key == 'null':
448 return nullid
448 return nullid
449 n = self.changelog._match(key)
449 n = self.changelog._match(key)
450 if n:
450 if n:
451 return n
451 return n
452 if key in self.tags():
452 if key in self.tags():
453 return self.tags()[key]
453 return self.tags()[key]
454 if key in self.branchtags():
454 if key in self.branchtags():
455 return self.branchtags()[key]
455 return self.branchtags()[key]
456 n = self.changelog._partialmatch(key)
456 n = self.changelog._partialmatch(key)
457 if n:
457 if n:
458 return n
458 return n
459 try:
459 try:
460 if len(key) == 20:
460 if len(key) == 20:
461 key = hex(key)
461 key = hex(key)
462 except:
462 except:
463 pass
463 pass
464 raise repo.RepoError(_("unknown revision '%s'") % key)
464 raise repo.RepoError(_("unknown revision '%s'") % key)
465
465
466 def local(self):
466 def local(self):
467 return True
467 return True
468
468
469 def join(self, f):
469 def join(self, f):
470 return os.path.join(self.path, f)
470 return os.path.join(self.path, f)
471
471
472 def sjoin(self, f):
472 def sjoin(self, f):
473 f = self.encodefn(f)
473 f = self.encodefn(f)
474 return os.path.join(self.spath, f)
474 return os.path.join(self.spath, f)
475
475
476 def wjoin(self, f):
476 def wjoin(self, f):
477 return os.path.join(self.root, f)
477 return os.path.join(self.root, f)
478
478
479 def file(self, f):
479 def file(self, f):
480 if f[0] == '/':
480 if f[0] == '/':
481 f = f[1:]
481 f = f[1:]
482 return filelog.filelog(self.sopener, f)
482 return filelog.filelog(self.sopener, f)
483
483
484 def changectx(self, changeid=None):
484 def changectx(self, changeid=None):
485 return context.changectx(self, changeid)
485 return context.changectx(self, changeid)
486
486
487 def workingctx(self):
487 def workingctx(self):
488 return context.workingctx(self)
488 return context.workingctx(self)
489
489
490 def parents(self, changeid=None):
490 def parents(self, changeid=None):
491 '''
491 '''
492 get list of changectxs for parents of changeid or working directory
492 get list of changectxs for parents of changeid or working directory
493 '''
493 '''
494 if changeid is None:
494 if changeid is None:
495 pl = self.dirstate.parents()
495 pl = self.dirstate.parents()
496 else:
496 else:
497 n = self.changelog.lookup(changeid)
497 n = self.changelog.lookup(changeid)
498 pl = self.changelog.parents(n)
498 pl = self.changelog.parents(n)
499 if pl[1] == nullid:
499 if pl[1] == nullid:
500 return [self.changectx(pl[0])]
500 return [self.changectx(pl[0])]
501 return [self.changectx(pl[0]), self.changectx(pl[1])]
501 return [self.changectx(pl[0]), self.changectx(pl[1])]
502
502
503 def filectx(self, path, changeid=None, fileid=None):
503 def filectx(self, path, changeid=None, fileid=None):
504 """changeid can be a changeset revision, node, or tag.
504 """changeid can be a changeset revision, node, or tag.
505 fileid can be a file revision or node."""
505 fileid can be a file revision or node."""
506 return context.filectx(self, path, changeid, fileid)
506 return context.filectx(self, path, changeid, fileid)
507
507
508 def getcwd(self):
508 def getcwd(self):
509 return self.dirstate.getcwd()
509 return self.dirstate.getcwd()
510
510
511 def pathto(self, f, cwd=None):
511 def pathto(self, f, cwd=None):
512 return self.dirstate.pathto(f, cwd)
512 return self.dirstate.pathto(f, cwd)
513
513
514 def wfile(self, f, mode='r'):
514 def wfile(self, f, mode='r'):
515 return self.wopener(f, mode)
515 return self.wopener(f, mode)
516
516
517 def _link(self, f):
517 def _link(self, f):
518 return os.path.islink(self.wjoin(f))
518 return os.path.islink(self.wjoin(f))
519
519
520 def _filter(self, filter, filename, data):
520 def _filter(self, filter, filename, data):
521 if filter not in self.filterpats:
521 if filter not in self.filterpats:
522 l = []
522 l = []
523 for pat, cmd in self.ui.configitems(filter):
523 for pat, cmd in self.ui.configitems(filter):
524 mf = util.matcher(self.root, "", [pat], [], [])[1]
524 mf = util.matcher(self.root, "", [pat], [], [])[1]
525 fn = None
525 fn = None
526 params = cmd
526 params = cmd
527 for name, filterfn in self._datafilters.iteritems():
527 for name, filterfn in self._datafilters.iteritems():
528 if cmd.startswith(name):
528 if cmd.startswith(name):
529 fn = filterfn
529 fn = filterfn
530 params = cmd[len(name):].lstrip()
530 params = cmd[len(name):].lstrip()
531 break
531 break
532 if not fn:
532 if not fn:
533 fn = lambda s, c, **kwargs: util.filter(s, c)
533 fn = lambda s, c, **kwargs: util.filter(s, c)
534 # Wrap old filters not supporting keyword arguments
534 # Wrap old filters not supporting keyword arguments
535 if not inspect.getargspec(fn)[2]:
535 if not inspect.getargspec(fn)[2]:
536 oldfn = fn
536 oldfn = fn
537 fn = lambda s, c, **kwargs: oldfn(s, c)
537 fn = lambda s, c, **kwargs: oldfn(s, c)
538 l.append((mf, fn, params))
538 l.append((mf, fn, params))
539 self.filterpats[filter] = l
539 self.filterpats[filter] = l
540
540
541 for mf, fn, cmd in self.filterpats[filter]:
541 for mf, fn, cmd in self.filterpats[filter]:
542 if mf(filename):
542 if mf(filename):
543 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
543 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
544 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
544 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
545 break
545 break
546
546
547 return data
547 return data
548
548
549 def adddatafilter(self, name, filter):
549 def adddatafilter(self, name, filter):
550 self._datafilters[name] = filter
550 self._datafilters[name] = filter
551
551
552 def wread(self, filename):
552 def wread(self, filename):
553 if self._link(filename):
553 if self._link(filename):
554 data = os.readlink(self.wjoin(filename))
554 data = os.readlink(self.wjoin(filename))
555 else:
555 else:
556 data = self.wopener(filename, 'r').read()
556 data = self.wopener(filename, 'r').read()
557 return self._filter("encode", filename, data)
557 return self._filter("encode", filename, data)
558
558
559 def wwrite(self, filename, data, flags):
559 def wwrite(self, filename, data, flags):
560 data = self._filter("decode", filename, data)
560 data = self._filter("decode", filename, data)
561 try:
561 try:
562 os.unlink(self.wjoin(filename))
562 os.unlink(self.wjoin(filename))
563 except OSError:
563 except OSError:
564 pass
564 pass
565 self.wopener(filename, 'w').write(data)
565 self.wopener(filename, 'w').write(data)
566 util.set_flags(self.wjoin(filename), flags)
566 util.set_flags(self.wjoin(filename), flags)
567
567
568 def wwritedata(self, filename, data):
568 def wwritedata(self, filename, data):
569 return self._filter("decode", filename, data)
569 return self._filter("decode", filename, data)
570
570
571 def transaction(self):
571 def transaction(self):
572 if self._transref and self._transref():
572 if self._transref and self._transref():
573 return self._transref().nest()
573 return self._transref().nest()
574
574
575 # abort here if the journal already exists
575 # abort here if the journal already exists
576 if os.path.exists(self.sjoin("journal")):
576 if os.path.exists(self.sjoin("journal")):
577 raise repo.RepoError(_("journal already exists - run hg recover"))
577 raise repo.RepoError(_("journal already exists - run hg recover"))
578
578
579 # save dirstate for rollback
579 # save dirstate for rollback
580 try:
580 try:
581 ds = self.opener("dirstate").read()
581 ds = self.opener("dirstate").read()
582 except IOError:
582 except IOError:
583 ds = ""
583 ds = ""
584 self.opener("journal.dirstate", "w").write(ds)
584 self.opener("journal.dirstate", "w").write(ds)
585 self.opener("journal.branch", "w").write(self.dirstate.branch())
585 self.opener("journal.branch", "w").write(self.dirstate.branch())
586
586
587 renames = [(self.sjoin("journal"), self.sjoin("undo")),
587 renames = [(self.sjoin("journal"), self.sjoin("undo")),
588 (self.join("journal.dirstate"), self.join("undo.dirstate")),
588 (self.join("journal.dirstate"), self.join("undo.dirstate")),
589 (self.join("journal.branch"), self.join("undo.branch"))]
589 (self.join("journal.branch"), self.join("undo.branch"))]
590 tr = transaction.transaction(self.ui.warn, self.sopener,
590 tr = transaction.transaction(self.ui.warn, self.sopener,
591 self.sjoin("journal"),
591 self.sjoin("journal"),
592 aftertrans(renames),
592 aftertrans(renames),
593 self._createmode)
593 self._createmode)
594 self._transref = weakref.ref(tr)
594 self._transref = weakref.ref(tr)
595 return tr
595 return tr
596
596
597 def recover(self):
597 def recover(self):
598 l = self.lock()
598 l = self.lock()
599 try:
599 try:
600 if os.path.exists(self.sjoin("journal")):
600 if os.path.exists(self.sjoin("journal")):
601 self.ui.status(_("rolling back interrupted transaction\n"))
601 self.ui.status(_("rolling back interrupted transaction\n"))
602 transaction.rollback(self.sopener, self.sjoin("journal"))
602 transaction.rollback(self.sopener, self.sjoin("journal"))
603 self.invalidate()
603 self.invalidate()
604 return True
604 return True
605 else:
605 else:
606 self.ui.warn(_("no interrupted transaction available\n"))
606 self.ui.warn(_("no interrupted transaction available\n"))
607 return False
607 return False
608 finally:
608 finally:
609 del l
609 del l
610
610
611 def rollback(self):
611 def rollback(self):
612 wlock = lock = None
612 wlock = lock = None
613 try:
613 try:
614 wlock = self.wlock()
614 wlock = self.wlock()
615 lock = self.lock()
615 lock = self.lock()
616 if os.path.exists(self.sjoin("undo")):
616 if os.path.exists(self.sjoin("undo")):
617 self.ui.status(_("rolling back last transaction\n"))
617 self.ui.status(_("rolling back last transaction\n"))
618 transaction.rollback(self.sopener, self.sjoin("undo"))
618 transaction.rollback(self.sopener, self.sjoin("undo"))
619 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
619 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
620 try:
620 try:
621 branch = self.opener("undo.branch").read()
621 branch = self.opener("undo.branch").read()
622 self.dirstate.setbranch(branch)
622 self.dirstate.setbranch(branch)
623 except IOError:
623 except IOError:
624 self.ui.warn(_("Named branch could not be reset, "
624 self.ui.warn(_("Named branch could not be reset, "
625 "current branch still is: %s\n")
625 "current branch still is: %s\n")
626 % util.tolocal(self.dirstate.branch()))
626 % util.tolocal(self.dirstate.branch()))
627 self.invalidate()
627 self.invalidate()
628 self.dirstate.invalidate()
628 self.dirstate.invalidate()
629 else:
629 else:
630 self.ui.warn(_("no rollback information available\n"))
630 self.ui.warn(_("no rollback information available\n"))
631 finally:
631 finally:
632 del lock, wlock
632 del lock, wlock
633
633
634 def invalidate(self):
634 def invalidate(self):
635 for a in "changelog manifest".split():
635 for a in "changelog manifest".split():
636 if a in self.__dict__:
636 if a in self.__dict__:
637 delattr(self, a)
637 delattr(self, a)
638 self.tagscache = None
638 self.tagscache = None
639 self._tagstypecache = None
639 self._tagstypecache = None
640 self.nodetagscache = None
640 self.nodetagscache = None
641 self.branchcache = None
641 self.branchcache = None
642 self._ubranchcache = None
642 self._ubranchcache = None
643 self._branchcachetip = None
643 self._branchcachetip = None
644
644
645 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
645 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
646 try:
646 try:
647 l = lock.lock(lockname, 0, releasefn, desc=desc)
647 l = lock.lock(lockname, 0, releasefn, desc=desc)
648 except lock.LockHeld, inst:
648 except lock.LockHeld, inst:
649 if not wait:
649 if not wait:
650 raise
650 raise
651 self.ui.warn(_("waiting for lock on %s held by %r\n") %
651 self.ui.warn(_("waiting for lock on %s held by %r\n") %
652 (desc, inst.locker))
652 (desc, inst.locker))
653 # default to 600 seconds timeout
653 # default to 600 seconds timeout
654 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
654 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
655 releasefn, desc=desc)
655 releasefn, desc=desc)
656 if acquirefn:
656 if acquirefn:
657 acquirefn()
657 acquirefn()
658 return l
658 return l
659
659
660 def lock(self, wait=True):
660 def lock(self, wait=True):
661 if self._lockref and self._lockref():
661 if self._lockref and self._lockref():
662 return self._lockref()
662 return self._lockref()
663
663
664 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
664 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
665 _('repository %s') % self.origroot)
665 _('repository %s') % self.origroot)
666 self._lockref = weakref.ref(l)
666 self._lockref = weakref.ref(l)
667 return l
667 return l
668
668
669 def wlock(self, wait=True):
669 def wlock(self, wait=True):
670 if self._wlockref and self._wlockref():
670 if self._wlockref and self._wlockref():
671 return self._wlockref()
671 return self._wlockref()
672
672
673 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
673 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
674 self.dirstate.invalidate, _('working directory of %s') %
674 self.dirstate.invalidate, _('working directory of %s') %
675 self.origroot)
675 self.origroot)
676 self._wlockref = weakref.ref(l)
676 self._wlockref = weakref.ref(l)
677 return l
677 return l
678
678
679 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
679 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
680 """
680 """
681 commit an individual file as part of a larger transaction
681 commit an individual file as part of a larger transaction
682 """
682 """
683
683
684 t = self.wread(fn)
684 t = self.wread(fn)
685 fl = self.file(fn)
685 fl = self.file(fn)
686 fp1 = manifest1.get(fn, nullid)
686 fp1 = manifest1.get(fn, nullid)
687 fp2 = manifest2.get(fn, nullid)
687 fp2 = manifest2.get(fn, nullid)
688
688
689 meta = {}
689 meta = {}
690 cp = self.dirstate.copied(fn)
690 cp = self.dirstate.copied(fn)
691 if cp:
691 if cp:
692 # Mark the new revision of this file as a copy of another
692 # Mark the new revision of this file as a copy of another
693 # file. This copy data will effectively act as a parent
693 # file. This copy data will effectively act as a parent
694 # of this new revision. If this is a merge, the first
694 # of this new revision. If this is a merge, the first
695 # parent will be the nullid (meaning "look up the copy data")
695 # parent will be the nullid (meaning "look up the copy data")
696 # and the second one will be the other parent. For example:
696 # and the second one will be the other parent. For example:
697 #
697 #
698 # 0 --- 1 --- 3 rev1 changes file foo
698 # 0 --- 1 --- 3 rev1 changes file foo
699 # \ / rev2 renames foo to bar and changes it
699 # \ / rev2 renames foo to bar and changes it
700 # \- 2 -/ rev3 should have bar with all changes and
700 # \- 2 -/ rev3 should have bar with all changes and
701 # should record that bar descends from
701 # should record that bar descends from
702 # bar in rev2 and foo in rev1
702 # bar in rev2 and foo in rev1
703 #
703 #
704 # this allows this merge to succeed:
704 # this allows this merge to succeed:
705 #
705 #
706 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
706 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
707 # \ / merging rev3 and rev4 should use bar@rev2
707 # \ / merging rev3 and rev4 should use bar@rev2
708 # \- 2 --- 4 as the merge base
708 # \- 2 --- 4 as the merge base
709 #
709 #
710 meta["copy"] = cp
710 meta["copy"] = cp
711 if not manifest2: # not a branch merge
711 if not manifest2: # not a branch merge
712 meta["copyrev"] = hex(manifest1.get(cp, nullid))
712 meta["copyrev"] = hex(manifest1.get(cp, nullid))
713 fp2 = nullid
713 fp2 = nullid
714 elif fp2 != nullid: # copied on remote side
714 elif fp2 != nullid: # copied on remote side
715 meta["copyrev"] = hex(manifest1.get(cp, nullid))
715 meta["copyrev"] = hex(manifest1.get(cp, nullid))
716 elif fp1 != nullid: # copied on local side, reversed
716 elif fp1 != nullid: # copied on local side, reversed
717 meta["copyrev"] = hex(manifest2.get(cp))
717 meta["copyrev"] = hex(manifest2.get(cp))
718 fp2 = fp1
718 fp2 = fp1
719 elif cp in manifest2: # directory rename on local side
719 elif cp in manifest2: # directory rename on local side
720 meta["copyrev"] = hex(manifest2[cp])
720 meta["copyrev"] = hex(manifest2[cp])
721 else: # directory rename on remote side
721 else: # directory rename on remote side
722 meta["copyrev"] = hex(manifest1.get(cp, nullid))
722 meta["copyrev"] = hex(manifest1.get(cp, nullid))
723 self.ui.debug(_(" %s: copy %s:%s\n") %
723 self.ui.debug(_(" %s: copy %s:%s\n") %
724 (fn, cp, meta["copyrev"]))
724 (fn, cp, meta["copyrev"]))
725 fp1 = nullid
725 fp1 = nullid
726 elif fp2 != nullid:
726 elif fp2 != nullid:
727 # is one parent an ancestor of the other?
727 # is one parent an ancestor of the other?
728 fpa = fl.ancestor(fp1, fp2)
728 fpa = fl.ancestor(fp1, fp2)
729 if fpa == fp1:
729 if fpa == fp1:
730 fp1, fp2 = fp2, nullid
730 fp1, fp2 = fp2, nullid
731 elif fpa == fp2:
731 elif fpa == fp2:
732 fp2 = nullid
732 fp2 = nullid
733
733
734 # is the file unmodified from the parent? report existing entry
734 # is the file unmodified from the parent? report existing entry
735 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
735 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
736 return fp1
736 return fp1
737
737
738 changelist.append(fn)
738 changelist.append(fn)
739 return fl.add(t, meta, tr, linkrev, fp1, fp2)
739 return fl.add(t, meta, tr, linkrev, fp1, fp2)
740
740
741 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
741 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
742 if p1 is None:
742 if p1 is None:
743 p1, p2 = self.dirstate.parents()
743 p1, p2 = self.dirstate.parents()
744 return self.commit(files=files, text=text, user=user, date=date,
744 return self.commit(files=files, text=text, user=user, date=date,
745 p1=p1, p2=p2, extra=extra, empty_ok=True)
745 p1=p1, p2=p2, extra=extra, empty_ok=True)
746
746
747 def commit(self, files=None, text="", user=None, date=None,
747 def commit(self, files=None, text="", user=None, date=None,
748 match=util.always, force=False, force_editor=False,
748 match=util.always, force=False, force_editor=False,
749 p1=None, p2=None, extra={}, empty_ok=False):
749 p1=None, p2=None, extra={}, empty_ok=False):
750 wlock = lock = tr = None
750 wlock = lock = tr = None
751 valid = 0 # don't save the dirstate if this isn't set
751 valid = 0 # don't save the dirstate if this isn't set
752 if files:
752 if files:
753 files = util.unique(files)
753 files = util.unique(files)
754 try:
754 try:
755 wlock = self.wlock()
755 wlock = self.wlock()
756 lock = self.lock()
756 lock = self.lock()
757 commit = []
757 commit = []
758 remove = []
758 remove = []
759 changed = []
759 changed = []
760 use_dirstate = (p1 is None) # not rawcommit
760 use_dirstate = (p1 is None) # not rawcommit
761 extra = extra.copy()
761 extra = extra.copy()
762
762
763 if use_dirstate:
763 if use_dirstate:
764 if files:
764 if files:
765 for f in files:
765 for f in files:
766 s = self.dirstate[f]
766 s = self.dirstate[f]
767 if s in 'nma':
767 if s in 'nma':
768 commit.append(f)
768 commit.append(f)
769 elif s == 'r':
769 elif s == 'r':
770 remove.append(f)
770 remove.append(f)
771 else:
771 else:
772 self.ui.warn(_("%s not tracked!\n") % f)
772 self.ui.warn(_("%s not tracked!\n") % f)
773 else:
773 else:
774 changes = self.status(match=match)[:5]
774 changes = self.status(match=match)[:5]
775 modified, added, removed, deleted, unknown = changes
775 modified, added, removed, deleted, unknown = changes
776 commit = modified + added
776 commit = modified + added
777 remove = removed
777 remove = removed
778 else:
778 else:
779 commit = files
779 commit = files
780
780
781 if use_dirstate:
781 if use_dirstate:
782 p1, p2 = self.dirstate.parents()
782 p1, p2 = self.dirstate.parents()
783 update_dirstate = True
783 update_dirstate = True
784
784
785 if (not force and p2 != nullid and
785 if (not force and p2 != nullid and
786 (files or match != util.always)):
786 (files or match != util.always)):
787 raise util.Abort(_('cannot partially commit a merge '
787 raise util.Abort(_('cannot partially commit a merge '
788 '(do not specify files or patterns)'))
788 '(do not specify files or patterns)'))
789 else:
789 else:
790 p1, p2 = p1, p2 or nullid
790 p1, p2 = p1, p2 or nullid
791 update_dirstate = (self.dirstate.parents()[0] == p1)
791 update_dirstate = (self.dirstate.parents()[0] == p1)
792
792
793 c1 = self.changelog.read(p1)
793 c1 = self.changelog.read(p1)
794 c2 = self.changelog.read(p2)
794 c2 = self.changelog.read(p2)
795 m1 = self.manifest.read(c1[0]).copy()
795 m1 = self.manifest.read(c1[0]).copy()
796 m2 = self.manifest.read(c2[0])
796 m2 = self.manifest.read(c2[0])
797
797
798 if use_dirstate:
798 if use_dirstate:
799 branchname = self.workingctx().branch()
799 branchname = self.workingctx().branch()
800 try:
800 try:
801 branchname = branchname.decode('UTF-8').encode('UTF-8')
801 branchname = branchname.decode('UTF-8').encode('UTF-8')
802 except UnicodeDecodeError:
802 except UnicodeDecodeError:
803 raise util.Abort(_('branch name not in UTF-8!'))
803 raise util.Abort(_('branch name not in UTF-8!'))
804 else:
804 else:
805 branchname = ""
805 branchname = ""
806
806
807 if use_dirstate:
807 if use_dirstate:
808 oldname = c1[5].get("branch") # stored in UTF-8
808 oldname = c1[5].get("branch") # stored in UTF-8
809 if (not commit and not remove and not force and p2 == nullid
809 if (not commit and not remove and not force and p2 == nullid
810 and branchname == oldname):
810 and branchname == oldname):
811 self.ui.status(_("nothing changed\n"))
811 self.ui.status(_("nothing changed\n"))
812 return None
812 return None
813
813
814 xp1 = hex(p1)
814 xp1 = hex(p1)
815 if p2 == nullid: xp2 = ''
815 if p2 == nullid: xp2 = ''
816 else: xp2 = hex(p2)
816 else: xp2 = hex(p2)
817
817
818 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
818 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
819
819
820 tr = self.transaction()
820 tr = self.transaction()
821 trp = weakref.proxy(tr)
821 trp = weakref.proxy(tr)
822
822
823 # check in files
823 # check in files
824 new = {}
824 new = {}
825 linkrev = self.changelog.count()
825 linkrev = self.changelog.count()
826 commit.sort()
826 commit.sort()
827 is_exec = util.execfunc(self.root, m1.execf)
827 is_exec = util.execfunc(self.root, m1.execf)
828 is_link = util.linkfunc(self.root, m1.linkf)
828 is_link = util.linkfunc(self.root, m1.linkf)
829 for f in commit:
829 for f in commit:
830 self.ui.note(f + "\n")
830 self.ui.note(f + "\n")
831 try:
831 try:
832 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
832 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
833 new_exec = is_exec(f)
833 new_exec = is_exec(f)
834 new_link = is_link(f)
834 new_link = is_link(f)
835 if ((not changed or changed[-1] != f) and
835 if ((not changed or changed[-1] != f) and
836 m2.get(f) != new[f]):
836 m2.get(f) != new[f]):
837 # mention the file in the changelog if some
837 # mention the file in the changelog if some
838 # flag changed, even if there was no content
838 # flag changed, even if there was no content
839 # change.
839 # change.
840 old_exec = m1.execf(f)
840 old_exec = m1.execf(f)
841 old_link = m1.linkf(f)
841 old_link = m1.linkf(f)
842 if old_exec != new_exec or old_link != new_link:
842 if old_exec != new_exec or old_link != new_link:
843 changed.append(f)
843 changed.append(f)
844 m1.set(f, new_exec, new_link)
844 m1.set(f, new_exec, new_link)
845 if use_dirstate:
845 if use_dirstate:
846 self.dirstate.normal(f)
846 self.dirstate.normal(f)
847
847
848 except (OSError, IOError):
848 except (OSError, IOError):
849 if use_dirstate:
849 if use_dirstate:
850 self.ui.warn(_("trouble committing %s!\n") % f)
850 self.ui.warn(_("trouble committing %s!\n") % f)
851 raise
851 raise
852 else:
852 else:
853 remove.append(f)
853 remove.append(f)
854
854
855 # update manifest
855 # update manifest
856 m1.update(new)
856 m1.update(new)
857 remove.sort()
857 remove.sort()
858 removed = []
858 removed = []
859
859
860 for f in remove:
860 for f in remove:
861 if f in m1:
861 if f in m1:
862 del m1[f]
862 del m1[f]
863 removed.append(f)
863 removed.append(f)
864 elif f in m2:
864 elif f in m2:
865 removed.append(f)
865 removed.append(f)
866 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
866 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
867 (new, removed))
867 (new, removed))
868
868
869 # add changeset
869 # add changeset
870 new = new.keys()
870 new = new.keys()
871 new.sort()
871 new.sort()
872
872
873 user = user or self.ui.username()
873 user = user or self.ui.username()
874 if (not empty_ok and not text) or force_editor:
874 if (not empty_ok and not text) or force_editor:
875 edittext = []
875 edittext = []
876 if text:
876 if text:
877 edittext.append(text)
877 edittext.append(text)
878 edittext.append("")
878 edittext.append("")
879 edittext.append(_("HG: Enter commit message."
879 edittext.append(_("HG: Enter commit message."
880 " Lines beginning with 'HG:' are removed."))
880 " Lines beginning with 'HG:' are removed."))
881 edittext.append("HG: --")
881 edittext.append("HG: --")
882 edittext.append("HG: user: %s" % user)
882 edittext.append("HG: user: %s" % user)
883 if p2 != nullid:
883 if p2 != nullid:
884 edittext.append("HG: branch merge")
884 edittext.append("HG: branch merge")
885 if branchname:
885 if branchname:
886 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
886 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
887 edittext.extend(["HG: changed %s" % f for f in changed])
887 edittext.extend(["HG: changed %s" % f for f in changed])
888 edittext.extend(["HG: removed %s" % f for f in removed])
888 edittext.extend(["HG: removed %s" % f for f in removed])
889 if not changed and not remove:
889 if not changed and not remove:
890 edittext.append("HG: no files changed")
890 edittext.append("HG: no files changed")
891 edittext.append("")
891 edittext.append("")
892 # run editor in the repository root
892 # run editor in the repository root
893 olddir = os.getcwd()
893 olddir = os.getcwd()
894 os.chdir(self.root)
894 os.chdir(self.root)
895 text = self.ui.edit("\n".join(edittext), user)
895 text = self.ui.edit("\n".join(edittext), user)
896 os.chdir(olddir)
896 os.chdir(olddir)
897
897
898 if branchname:
898 if branchname:
899 extra["branch"] = branchname
899 extra["branch"] = branchname
900
900
901 lines = [line.rstrip() for line in text.rstrip().splitlines()]
901 lines = [line.rstrip() for line in text.rstrip().splitlines()]
902 while lines and not lines[0]:
902 while lines and not lines[0]:
903 del lines[0]
903 del lines[0]
904 if not lines and use_dirstate:
904 if not lines and use_dirstate:
905 raise util.Abort(_("empty commit message"))
905 raise util.Abort(_("empty commit message"))
906 text = '\n'.join(lines)
906 text = '\n'.join(lines)
907
907
908 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
908 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
909 user, date, extra)
909 user, date, extra)
910 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
910 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
911 parent2=xp2)
911 parent2=xp2)
912 tr.close()
912 tr.close()
913
913
914 if self.branchcache:
914 if self.branchcache:
915 self.branchtags()
915 self.branchtags()
916
916
917 if use_dirstate or update_dirstate:
917 if use_dirstate or update_dirstate:
918 self.dirstate.setparents(n)
918 self.dirstate.setparents(n)
919 if use_dirstate:
919 if use_dirstate:
920 for f in removed:
920 for f in removed:
921 self.dirstate.forget(f)
921 self.dirstate.forget(f)
922 valid = 1 # our dirstate updates are complete
922 valid = 1 # our dirstate updates are complete
923
923
924 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
924 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
925 return n
925 return n
926 finally:
926 finally:
927 if not valid: # don't save our updated dirstate
927 if not valid: # don't save our updated dirstate
928 self.dirstate.invalidate()
928 self.dirstate.invalidate()
929 del tr, lock, wlock
929 del tr, lock, wlock
930
930
931 def walk(self, node=None, files=[], match=util.always, badmatch=None):
931 def walk(self, node=None, files=[], match=util.always, badmatch=None):
932 '''
932 '''
933 walk recursively through the directory tree or a given
933 walk recursively through the directory tree or a given
934 changeset, finding all files matched by the match
934 changeset, finding all files matched by the match
935 function
935 function
936
936
937 results are yielded in a tuple (src, filename), where src
937 results are yielded in a tuple (src, filename), where src
938 is one of:
938 is one of:
939 'f' the file was found in the directory tree
939 'f' the file was found in the directory tree
940 'm' the file was only in the dirstate and not in the tree
940 'm' the file was only in the dirstate and not in the tree
941 'b' file was not found and matched badmatch
941 'b' file was not found and matched badmatch
942 '''
942 '''
943
943
944 if node:
944 if node:
945 fdict = dict.fromkeys(files)
945 fdict = dict.fromkeys(files)
946 # for dirstate.walk, files=['.'] means "walk the whole tree".
946 # for dirstate.walk, files=['.'] means "walk the whole tree".
947 # follow that here, too
947 # follow that here, too
948 fdict.pop('.', None)
948 fdict.pop('.', None)
949 mdict = self.manifest.read(self.changelog.read(node)[0])
949 mdict = self.manifest.read(self.changelog.read(node)[0])
950 mfiles = mdict.keys()
950 mfiles = mdict.keys()
951 mfiles.sort()
951 mfiles.sort()
952 for fn in mfiles:
952 for fn in mfiles:
953 for ffn in fdict:
953 for ffn in fdict:
954 # match if the file is the exact name or a directory
954 # match if the file is the exact name or a directory
955 if ffn == fn or fn.startswith("%s/" % ffn):
955 if ffn == fn or fn.startswith("%s/" % ffn):
956 del fdict[ffn]
956 del fdict[ffn]
957 break
957 break
958 if match(fn):
958 if match(fn):
959 yield 'm', fn
959 yield 'm', fn
960 ffiles = fdict.keys()
960 ffiles = fdict.keys()
961 ffiles.sort()
961 ffiles.sort()
962 for fn in ffiles:
962 for fn in ffiles:
963 if badmatch and badmatch(fn):
963 if badmatch and badmatch(fn):
964 if match(fn):
964 if match(fn):
965 yield 'b', fn
965 yield 'b', fn
966 else:
966 else:
967 self.ui.warn(_('%s: No such file in rev %s\n')
967 self.ui.warn(_('%s: No such file in rev %s\n')
968 % (self.pathto(fn), short(node)))
968 % (self.pathto(fn), short(node)))
969 else:
969 else:
970 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
970 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
971 yield src, fn
971 yield src, fn
972
972
973 def status(self, node1=None, node2=None, files=[], match=util.always,
973 def status(self, node1=None, node2=None, files=[], match=util.always,
974 list_ignored=False, list_clean=False, list_unknown=True):
974 list_ignored=False, list_clean=False, list_unknown=True):
975 """return status of files between two nodes or node and working directory
975 """return status of files between two nodes or node and working directory
976
976
977 If node1 is None, use the first dirstate parent instead.
977 If node1 is None, use the first dirstate parent instead.
978 If node2 is None, compare node1 with working directory.
978 If node2 is None, compare node1 with working directory.
979 """
979 """
980
980
981 def fcmp(fn, getnode):
981 def fcmp(fn, getnode):
982 t1 = self.wread(fn)
982 t1 = self.wread(fn)
983 return self.file(fn).cmp(getnode(fn), t1)
983 return self.file(fn).cmp(getnode(fn), t1)
984
984
985 def mfmatches(node):
985 def mfmatches(node):
986 change = self.changelog.read(node)
986 change = self.changelog.read(node)
987 mf = self.manifest.read(change[0]).copy()
987 mf = self.manifest.read(change[0]).copy()
988 for fn in mf.keys():
988 for fn in mf.keys():
989 if not match(fn):
989 if not match(fn):
990 del mf[fn]
990 del mf[fn]
991 return mf
991 return mf
992
992
993 modified, added, removed, deleted, unknown = [], [], [], [], []
993 modified, added, removed, deleted, unknown = [], [], [], [], []
994 ignored, clean = [], []
994 ignored, clean = [], []
995
995
996 compareworking = False
996 compareworking = False
997 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
997 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
998 compareworking = True
998 compareworking = True
999
999
1000 if not compareworking:
1000 if not compareworking:
1001 # read the manifest from node1 before the manifest from node2,
1001 # read the manifest from node1 before the manifest from node2,
1002 # so that we'll hit the manifest cache if we're going through
1002 # so that we'll hit the manifest cache if we're going through
1003 # all the revisions in parent->child order.
1003 # all the revisions in parent->child order.
1004 mf1 = mfmatches(node1)
1004 mf1 = mfmatches(node1)
1005
1005
1006 # are we comparing the working directory?
1006 # are we comparing the working directory?
1007 if not node2:
1007 if not node2:
1008 (lookup, modified, added, removed, deleted, unknown,
1008 (lookup, modified, added, removed, deleted, unknown,
1009 ignored, clean) = self.dirstate.status(files, match,
1009 ignored, clean) = self.dirstate.status(files, match,
1010 list_ignored, list_clean,
1010 list_ignored, list_clean,
1011 list_unknown)
1011 list_unknown)
1012
1012
1013 # are we comparing working dir against its parent?
1013 # are we comparing working dir against its parent?
1014 if compareworking:
1014 if compareworking:
1015 if lookup:
1015 if lookup:
1016 fixup = []
1016 fixup = []
1017 # do a full compare of any files that might have changed
1017 # do a full compare of any files that might have changed
1018 ctx = self.changectx()
1018 ctx = self.changectx()
1019 mexec = lambda f: 'x' in ctx.fileflags(f)
1019 mexec = lambda f: 'x' in ctx.fileflags(f)
1020 mlink = lambda f: 'l' in ctx.fileflags(f)
1020 mlink = lambda f: 'l' in ctx.fileflags(f)
1021 is_exec = util.execfunc(self.root, mexec)
1021 is_exec = util.execfunc(self.root, mexec)
1022 is_link = util.linkfunc(self.root, mlink)
1022 is_link = util.linkfunc(self.root, mlink)
1023 def flags(f):
1023 def flags(f):
1024 return is_link(f) and 'l' or is_exec(f) and 'x' or ''
1024 return is_link(f) and 'l' or is_exec(f) and 'x' or ''
1025 for f in lookup:
1025 for f in lookup:
1026 if (f not in ctx or flags(f) != ctx.fileflags(f)
1026 if (f not in ctx or flags(f) != ctx.fileflags(f)
1027 or ctx[f].cmp(self.wread(f))):
1027 or ctx[f].cmp(self.wread(f))):
1028 modified.append(f)
1028 modified.append(f)
1029 else:
1029 else:
1030 fixup.append(f)
1030 fixup.append(f)
1031 if list_clean:
1031 if list_clean:
1032 clean.append(f)
1032 clean.append(f)
1033
1033
1034 # update dirstate for files that are actually clean
1034 # update dirstate for files that are actually clean
1035 if fixup:
1035 if fixup:
1036 wlock = None
1036 wlock = None
1037 try:
1037 try:
1038 try:
1038 try:
1039 wlock = self.wlock(False)
1039 wlock = self.wlock(False)
1040 except lock.LockException:
1040 except lock.LockException:
1041 pass
1041 pass
1042 if wlock:
1042 if wlock:
1043 for f in fixup:
1043 for f in fixup:
1044 self.dirstate.normal(f)
1044 self.dirstate.normal(f)
1045 finally:
1045 finally:
1046 del wlock
1046 del wlock
1047 else:
1047 else:
1048 # we are comparing working dir against non-parent
1048 # we are comparing working dir against non-parent
1049 # generate a pseudo-manifest for the working dir
1049 # generate a pseudo-manifest for the working dir
1050 # XXX: create it in dirstate.py ?
1050 # XXX: create it in dirstate.py ?
1051 mf2 = mfmatches(self.dirstate.parents()[0])
1051 mf2 = mfmatches(self.dirstate.parents()[0])
1052 is_exec = util.execfunc(self.root, mf2.execf)
1052 is_exec = util.execfunc(self.root, mf2.execf)
1053 is_link = util.linkfunc(self.root, mf2.linkf)
1053 is_link = util.linkfunc(self.root, mf2.linkf)
1054 for f in lookup + modified + added:
1054 for f in lookup + modified + added:
1055 mf2[f] = ""
1055 mf2[f] = ""
1056 mf2.set(f, is_exec(f), is_link(f))
1056 mf2.set(f, is_exec(f), is_link(f))
1057 for f in removed:
1057 for f in removed:
1058 if f in mf2:
1058 if f in mf2:
1059 del mf2[f]
1059 del mf2[f]
1060
1060
1061 else:
1061 else:
1062 # we are comparing two revisions
1062 # we are comparing two revisions
1063 mf2 = mfmatches(node2)
1063 mf2 = mfmatches(node2)
1064
1064
1065 if not compareworking:
1065 if not compareworking:
1066 # flush lists from dirstate before comparing manifests
1066 # flush lists from dirstate before comparing manifests
1067 modified, added, clean = [], [], []
1067 modified, added, clean = [], [], []
1068
1068
1069 # make sure to sort the files so we talk to the disk in a
1069 # make sure to sort the files so we talk to the disk in a
1070 # reasonable order
1070 # reasonable order
1071 mf2keys = mf2.keys()
1071 mf2keys = mf2.keys()
1072 mf2keys.sort()
1072 mf2keys.sort()
1073 getnode = lambda fn: mf1.get(fn, nullid)
1073 getnode = lambda fn: mf1.get(fn, nullid)
1074 for fn in mf2keys:
1074 for fn in mf2keys:
1075 if fn in mf1:
1075 if fn in mf1:
1076 if (mf1.flags(fn) != mf2.flags(fn) or
1076 if (mf1.flags(fn) != mf2.flags(fn) or
1077 (mf1[fn] != mf2[fn] and
1077 (mf1[fn] != mf2[fn] and
1078 (mf2[fn] != "" or fcmp(fn, getnode)))):
1078 (mf2[fn] != "" or fcmp(fn, getnode)))):
1079 modified.append(fn)
1079 modified.append(fn)
1080 elif list_clean:
1080 elif list_clean:
1081 clean.append(fn)
1081 clean.append(fn)
1082 del mf1[fn]
1082 del mf1[fn]
1083 else:
1083 else:
1084 added.append(fn)
1084 added.append(fn)
1085
1085
1086 removed = mf1.keys()
1086 removed = mf1.keys()
1087
1087
1088 # sort and return results:
1088 # sort and return results:
1089 for l in modified, added, removed, deleted, unknown, ignored, clean:
1089 for l in modified, added, removed, deleted, unknown, ignored, clean:
1090 l.sort()
1090 l.sort()
1091 return (modified, added, removed, deleted, unknown, ignored, clean)
1091 return (modified, added, removed, deleted, unknown, ignored, clean)
1092
1092
1093 def add(self, list):
1093 def add(self, list):
1094 wlock = self.wlock()
1094 wlock = self.wlock()
1095 try:
1095 try:
1096 rejected = []
1096 rejected = []
1097 for f in list:
1097 for f in list:
1098 p = self.wjoin(f)
1098 p = self.wjoin(f)
1099 try:
1099 try:
1100 st = os.lstat(p)
1100 st = os.lstat(p)
1101 except:
1101 except:
1102 self.ui.warn(_("%s does not exist!\n") % f)
1102 self.ui.warn(_("%s does not exist!\n") % f)
1103 rejected.append(f)
1103 rejected.append(f)
1104 continue
1104 continue
1105 if st.st_size > 10000000:
1105 if st.st_size > 10000000:
1106 self.ui.warn(_("%s: files over 10MB may cause memory and"
1106 self.ui.warn(_("%s: files over 10MB may cause memory and"
1107 " performance problems\n"
1107 " performance problems\n"
1108 "(use 'hg revert %s' to unadd the file)\n")
1108 "(use 'hg revert %s' to unadd the file)\n")
1109 % (f, f))
1109 % (f, f))
1110 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1110 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1111 self.ui.warn(_("%s not added: only files and symlinks "
1111 self.ui.warn(_("%s not added: only files and symlinks "
1112 "supported currently\n") % f)
1112 "supported currently\n") % f)
1113 rejected.append(p)
1113 rejected.append(p)
1114 elif self.dirstate[f] in 'amn':
1114 elif self.dirstate[f] in 'amn':
1115 self.ui.warn(_("%s already tracked!\n") % f)
1115 self.ui.warn(_("%s already tracked!\n") % f)
1116 elif self.dirstate[f] == 'r':
1116 elif self.dirstate[f] == 'r':
1117 self.dirstate.normallookup(f)
1117 self.dirstate.normallookup(f)
1118 else:
1118 else:
1119 self.dirstate.add(f)
1119 self.dirstate.add(f)
1120 return rejected
1120 return rejected
1121 finally:
1121 finally:
1122 del wlock
1122 del wlock
1123
1123
1124 def forget(self, list):
1124 def forget(self, list):
1125 wlock = self.wlock()
1125 wlock = self.wlock()
1126 try:
1126 try:
1127 for f in list:
1127 for f in list:
1128 if self.dirstate[f] != 'a':
1128 if self.dirstate[f] != 'a':
1129 self.ui.warn(_("%s not added!\n") % f)
1129 self.ui.warn(_("%s not added!\n") % f)
1130 else:
1130 else:
1131 self.dirstate.forget(f)
1131 self.dirstate.forget(f)
1132 finally:
1132 finally:
1133 del wlock
1133 del wlock
1134
1134
1135 def remove(self, list, unlink=False):
1135 def remove(self, list, unlink=False):
1136 wlock = None
1136 wlock = None
1137 try:
1137 try:
1138 if unlink:
1138 if unlink:
1139 for f in list:
1139 for f in list:
1140 try:
1140 try:
1141 util.unlink(self.wjoin(f))
1141 util.unlink(self.wjoin(f))
1142 except OSError, inst:
1142 except OSError, inst:
1143 if inst.errno != errno.ENOENT:
1143 if inst.errno != errno.ENOENT:
1144 raise
1144 raise
1145 wlock = self.wlock()
1145 wlock = self.wlock()
1146 for f in list:
1146 for f in list:
1147 if unlink and os.path.exists(self.wjoin(f)):
1147 if unlink and os.path.exists(self.wjoin(f)):
1148 self.ui.warn(_("%s still exists!\n") % f)
1148 self.ui.warn(_("%s still exists!\n") % f)
1149 elif self.dirstate[f] == 'a':
1149 elif self.dirstate[f] == 'a':
1150 self.dirstate.forget(f)
1150 self.dirstate.forget(f)
1151 elif f not in self.dirstate:
1151 elif f not in self.dirstate:
1152 self.ui.warn(_("%s not tracked!\n") % f)
1152 self.ui.warn(_("%s not tracked!\n") % f)
1153 else:
1153 else:
1154 self.dirstate.remove(f)
1154 self.dirstate.remove(f)
1155 finally:
1155 finally:
1156 del wlock
1156 del wlock
1157
1157
1158 def undelete(self, list):
1158 def undelete(self, list):
1159 wlock = None
1159 wlock = None
1160 try:
1160 try:
1161 manifests = [self.manifest.read(self.changelog.read(p)[0])
1161 manifests = [self.manifest.read(self.changelog.read(p)[0])
1162 for p in self.dirstate.parents() if p != nullid]
1162 for p in self.dirstate.parents() if p != nullid]
1163 wlock = self.wlock()
1163 wlock = self.wlock()
1164 for f in list:
1164 for f in list:
1165 if self.dirstate[f] != 'r':
1165 if self.dirstate[f] != 'r':
1166 self.ui.warn("%s not removed!\n" % f)
1166 self.ui.warn("%s not removed!\n" % f)
1167 else:
1167 else:
1168 m = f in manifests[0] and manifests[0] or manifests[1]
1168 m = f in manifests[0] and manifests[0] or manifests[1]
1169 t = self.file(f).read(m[f])
1169 t = self.file(f).read(m[f])
1170 self.wwrite(f, t, m.flags(f))
1170 self.wwrite(f, t, m.flags(f))
1171 self.dirstate.normal(f)
1171 self.dirstate.normal(f)
1172 finally:
1172 finally:
1173 del wlock
1173 del wlock
1174
1174
1175 def copy(self, source, dest):
1175 def copy(self, source, dest):
1176 wlock = None
1176 wlock = None
1177 try:
1177 try:
1178 p = self.wjoin(dest)
1178 p = self.wjoin(dest)
1179 if not (os.path.exists(p) or os.path.islink(p)):
1179 if not (os.path.exists(p) or os.path.islink(p)):
1180 self.ui.warn(_("%s does not exist!\n") % dest)
1180 self.ui.warn(_("%s does not exist!\n") % dest)
1181 elif not (os.path.isfile(p) or os.path.islink(p)):
1181 elif not (os.path.isfile(p) or os.path.islink(p)):
1182 self.ui.warn(_("copy failed: %s is not a file or a "
1182 self.ui.warn(_("copy failed: %s is not a file or a "
1183 "symbolic link\n") % dest)
1183 "symbolic link\n") % dest)
1184 else:
1184 else:
1185 wlock = self.wlock()
1185 wlock = self.wlock()
1186 if dest not in self.dirstate:
1186 if dest not in self.dirstate:
1187 self.dirstate.add(dest)
1187 self.dirstate.add(dest)
1188 self.dirstate.copy(source, dest)
1188 self.dirstate.copy(source, dest)
1189 finally:
1189 finally:
1190 del wlock
1190 del wlock
1191
1191
1192 def heads(self, start=None):
1192 def heads(self, start=None):
1193 heads = self.changelog.heads(start)
1193 heads = self.changelog.heads(start)
1194 # sort the output in rev descending order
1194 # sort the output in rev descending order
1195 heads = [(-self.changelog.rev(h), h) for h in heads]
1195 heads = [(-self.changelog.rev(h), h) for h in heads]
1196 heads.sort()
1196 heads.sort()
1197 return [n for (r, n) in heads]
1197 return [n for (r, n) in heads]
1198
1198
1199 def branchheads(self, branch, start=None):
1199 def branchheads(self, branch, start=None):
1200 branches = self.branchtags()
1200 branches = self.branchtags()
1201 if branch not in branches:
1201 if branch not in branches:
1202 return []
1202 return []
1203 # The basic algorithm is this:
1203 # The basic algorithm is this:
1204 #
1204 #
1205 # Start from the branch tip since there are no later revisions that can
1205 # Start from the branch tip since there are no later revisions that can
1206 # possibly be in this branch, and the tip is a guaranteed head.
1206 # possibly be in this branch, and the tip is a guaranteed head.
1207 #
1207 #
1208 # Remember the tip's parents as the first ancestors, since these by
1208 # Remember the tip's parents as the first ancestors, since these by
1209 # definition are not heads.
1209 # definition are not heads.
1210 #
1210 #
1211 # Step backwards from the brach tip through all the revisions. We are
1211 # Step backwards from the brach tip through all the revisions. We are
1212 # guaranteed by the rules of Mercurial that we will now be visiting the
1212 # guaranteed by the rules of Mercurial that we will now be visiting the
1213 # nodes in reverse topological order (children before parents).
1213 # nodes in reverse topological order (children before parents).
1214 #
1214 #
1215 # If a revision is one of the ancestors of a head then we can toss it
1215 # If a revision is one of the ancestors of a head then we can toss it
1216 # out of the ancestors set (we've already found it and won't be
1216 # out of the ancestors set (we've already found it and won't be
1217 # visiting it again) and put its parents in the ancestors set.
1217 # visiting it again) and put its parents in the ancestors set.
1218 #
1218 #
1219 # Otherwise, if a revision is in the branch it's another head, since it
1219 # Otherwise, if a revision is in the branch it's another head, since it
1220 # wasn't in the ancestor list of an existing head. So add it to the
1220 # wasn't in the ancestor list of an existing head. So add it to the
1221 # head list, and add its parents to the ancestor list.
1221 # head list, and add its parents to the ancestor list.
1222 #
1222 #
1223 # If it is not in the branch ignore it.
1223 # If it is not in the branch ignore it.
1224 #
1224 #
1225 # Once we have a list of heads, use nodesbetween to filter out all the
1225 # Once we have a list of heads, use nodesbetween to filter out all the
1226 # heads that cannot be reached from startrev. There may be a more
1226 # heads that cannot be reached from startrev. There may be a more
1227 # efficient way to do this as part of the previous algorithm.
1227 # efficient way to do this as part of the previous algorithm.
1228
1228
1229 set = util.set
1229 set = util.set
1230 heads = [self.changelog.rev(branches[branch])]
1230 heads = [self.changelog.rev(branches[branch])]
1231 # Don't care if ancestors contains nullrev or not.
1231 # Don't care if ancestors contains nullrev or not.
1232 ancestors = set(self.changelog.parentrevs(heads[0]))
1232 ancestors = set(self.changelog.parentrevs(heads[0]))
1233 for rev in xrange(heads[0] - 1, nullrev, -1):
1233 for rev in xrange(heads[0] - 1, nullrev, -1):
1234 if rev in ancestors:
1234 if rev in ancestors:
1235 ancestors.update(self.changelog.parentrevs(rev))
1235 ancestors.update(self.changelog.parentrevs(rev))
1236 ancestors.remove(rev)
1236 ancestors.remove(rev)
1237 elif self.changectx(rev).branch() == branch:
1237 elif self.changectx(rev).branch() == branch:
1238 heads.append(rev)
1238 heads.append(rev)
1239 ancestors.update(self.changelog.parentrevs(rev))
1239 ancestors.update(self.changelog.parentrevs(rev))
1240 heads = [self.changelog.node(rev) for rev in heads]
1240 heads = [self.changelog.node(rev) for rev in heads]
1241 if start is not None:
1241 if start is not None:
1242 heads = self.changelog.nodesbetween([start], heads)[2]
1242 heads = self.changelog.nodesbetween([start], heads)[2]
1243 return heads
1243 return heads
1244
1244
1245 def branches(self, nodes):
1245 def branches(self, nodes):
1246 if not nodes:
1246 if not nodes:
1247 nodes = [self.changelog.tip()]
1247 nodes = [self.changelog.tip()]
1248 b = []
1248 b = []
1249 for n in nodes:
1249 for n in nodes:
1250 t = n
1250 t = n
1251 while 1:
1251 while 1:
1252 p = self.changelog.parents(n)
1252 p = self.changelog.parents(n)
1253 if p[1] != nullid or p[0] == nullid:
1253 if p[1] != nullid or p[0] == nullid:
1254 b.append((t, n, p[0], p[1]))
1254 b.append((t, n, p[0], p[1]))
1255 break
1255 break
1256 n = p[0]
1256 n = p[0]
1257 return b
1257 return b
1258
1258
1259 def between(self, pairs):
1259 def between(self, pairs):
1260 r = []
1260 r = []
1261
1261
1262 for top, bottom in pairs:
1262 for top, bottom in pairs:
1263 n, l, i = top, [], 0
1263 n, l, i = top, [], 0
1264 f = 1
1264 f = 1
1265
1265
1266 while n != bottom:
1266 while n != bottom:
1267 p = self.changelog.parents(n)[0]
1267 p = self.changelog.parents(n)[0]
1268 if i == f:
1268 if i == f:
1269 l.append(n)
1269 l.append(n)
1270 f = f * 2
1270 f = f * 2
1271 n = p
1271 n = p
1272 i += 1
1272 i += 1
1273
1273
1274 r.append(l)
1274 r.append(l)
1275
1275
1276 return r
1276 return r
1277
1277
1278 def findincoming(self, remote, base=None, heads=None, force=False):
1278 def findincoming(self, remote, base=None, heads=None, force=False):
1279 """Return list of roots of the subsets of missing nodes from remote
1279 """Return list of roots of the subsets of missing nodes from remote
1280
1280
1281 If base dict is specified, assume that these nodes and their parents
1281 If base dict is specified, assume that these nodes and their parents
1282 exist on the remote side and that no child of a node of base exists
1282 exist on the remote side and that no child of a node of base exists
1283 in both remote and self.
1283 in both remote and self.
1284 Furthermore base will be updated to include the nodes that exists
1284 Furthermore base will be updated to include the nodes that exists
1285 in self and remote but no children exists in self and remote.
1285 in self and remote but no children exists in self and remote.
1286 If a list of heads is specified, return only nodes which are heads
1286 If a list of heads is specified, return only nodes which are heads
1287 or ancestors of these heads.
1287 or ancestors of these heads.
1288
1288
1289 All the ancestors of base are in self and in remote.
1289 All the ancestors of base are in self and in remote.
1290 All the descendants of the list returned are missing in self.
1290 All the descendants of the list returned are missing in self.
1291 (and so we know that the rest of the nodes are missing in remote, see
1291 (and so we know that the rest of the nodes are missing in remote, see
1292 outgoing)
1292 outgoing)
1293 """
1293 """
1294 m = self.changelog.nodemap
1294 m = self.changelog.nodemap
1295 search = []
1295 search = []
1296 fetch = {}
1296 fetch = {}
1297 seen = {}
1297 seen = {}
1298 seenbranch = {}
1298 seenbranch = {}
1299 if base == None:
1299 if base == None:
1300 base = {}
1300 base = {}
1301
1301
1302 if not heads:
1302 if not heads:
1303 heads = remote.heads()
1303 heads = remote.heads()
1304
1304
1305 if self.changelog.tip() == nullid:
1305 if self.changelog.tip() == nullid:
1306 base[nullid] = 1
1306 base[nullid] = 1
1307 if heads != [nullid]:
1307 if heads != [nullid]:
1308 return [nullid]
1308 return [nullid]
1309 return []
1309 return []
1310
1310
1311 # assume we're closer to the tip than the root
1311 # assume we're closer to the tip than the root
1312 # and start by examining the heads
1312 # and start by examining the heads
1313 self.ui.status(_("searching for changes\n"))
1313 self.ui.status(_("searching for changes\n"))
1314
1314
1315 unknown = []
1315 unknown = []
1316 for h in heads:
1316 for h in heads:
1317 if h not in m:
1317 if h not in m:
1318 unknown.append(h)
1318 unknown.append(h)
1319 else:
1319 else:
1320 base[h] = 1
1320 base[h] = 1
1321
1321
1322 if not unknown:
1322 if not unknown:
1323 return []
1323 return []
1324
1324
1325 req = dict.fromkeys(unknown)
1325 req = dict.fromkeys(unknown)
1326 reqcnt = 0
1326 reqcnt = 0
1327
1327
1328 # search through remote branches
1328 # search through remote branches
1329 # a 'branch' here is a linear segment of history, with four parts:
1329 # a 'branch' here is a linear segment of history, with four parts:
1330 # head, root, first parent, second parent
1330 # head, root, first parent, second parent
1331 # (a branch always has two parents (or none) by definition)
1331 # (a branch always has two parents (or none) by definition)
1332 unknown = remote.branches(unknown)
1332 unknown = remote.branches(unknown)
1333 while unknown:
1333 while unknown:
1334 r = []
1334 r = []
1335 while unknown:
1335 while unknown:
1336 n = unknown.pop(0)
1336 n = unknown.pop(0)
1337 if n[0] in seen:
1337 if n[0] in seen:
1338 continue
1338 continue
1339
1339
1340 self.ui.debug(_("examining %s:%s\n")
1340 self.ui.debug(_("examining %s:%s\n")
1341 % (short(n[0]), short(n[1])))
1341 % (short(n[0]), short(n[1])))
1342 if n[0] == nullid: # found the end of the branch
1342 if n[0] == nullid: # found the end of the branch
1343 pass
1343 pass
1344 elif n in seenbranch:
1344 elif n in seenbranch:
1345 self.ui.debug(_("branch already found\n"))
1345 self.ui.debug(_("branch already found\n"))
1346 continue
1346 continue
1347 elif n[1] and n[1] in m: # do we know the base?
1347 elif n[1] and n[1] in m: # do we know the base?
1348 self.ui.debug(_("found incomplete branch %s:%s\n")
1348 self.ui.debug(_("found incomplete branch %s:%s\n")
1349 % (short(n[0]), short(n[1])))
1349 % (short(n[0]), short(n[1])))
1350 search.append(n) # schedule branch range for scanning
1350 search.append(n) # schedule branch range for scanning
1351 seenbranch[n] = 1
1351 seenbranch[n] = 1
1352 else:
1352 else:
1353 if n[1] not in seen and n[1] not in fetch:
1353 if n[1] not in seen and n[1] not in fetch:
1354 if n[2] in m and n[3] in m:
1354 if n[2] in m and n[3] in m:
1355 self.ui.debug(_("found new changeset %s\n") %
1355 self.ui.debug(_("found new changeset %s\n") %
1356 short(n[1]))
1356 short(n[1]))
1357 fetch[n[1]] = 1 # earliest unknown
1357 fetch[n[1]] = 1 # earliest unknown
1358 for p in n[2:4]:
1358 for p in n[2:4]:
1359 if p in m:
1359 if p in m:
1360 base[p] = 1 # latest known
1360 base[p] = 1 # latest known
1361
1361
1362 for p in n[2:4]:
1362 for p in n[2:4]:
1363 if p not in req and p not in m:
1363 if p not in req and p not in m:
1364 r.append(p)
1364 r.append(p)
1365 req[p] = 1
1365 req[p] = 1
1366 seen[n[0]] = 1
1366 seen[n[0]] = 1
1367
1367
1368 if r:
1368 if r:
1369 reqcnt += 1
1369 reqcnt += 1
1370 self.ui.debug(_("request %d: %s\n") %
1370 self.ui.debug(_("request %d: %s\n") %
1371 (reqcnt, " ".join(map(short, r))))
1371 (reqcnt, " ".join(map(short, r))))
1372 for p in xrange(0, len(r), 10):
1372 for p in xrange(0, len(r), 10):
1373 for b in remote.branches(r[p:p+10]):
1373 for b in remote.branches(r[p:p+10]):
1374 self.ui.debug(_("received %s:%s\n") %
1374 self.ui.debug(_("received %s:%s\n") %
1375 (short(b[0]), short(b[1])))
1375 (short(b[0]), short(b[1])))
1376 unknown.append(b)
1376 unknown.append(b)
1377
1377
1378 # do binary search on the branches we found
1378 # do binary search on the branches we found
1379 while search:
1379 while search:
1380 n = search.pop(0)
1380 n = search.pop(0)
1381 reqcnt += 1
1381 reqcnt += 1
1382 l = remote.between([(n[0], n[1])])[0]
1382 l = remote.between([(n[0], n[1])])[0]
1383 l.append(n[1])
1383 l.append(n[1])
1384 p = n[0]
1384 p = n[0]
1385 f = 1
1385 f = 1
1386 for i in l:
1386 for i in l:
1387 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1387 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1388 if i in m:
1388 if i in m:
1389 if f <= 2:
1389 if f <= 2:
1390 self.ui.debug(_("found new branch changeset %s\n") %
1390 self.ui.debug(_("found new branch changeset %s\n") %
1391 short(p))
1391 short(p))
1392 fetch[p] = 1
1392 fetch[p] = 1
1393 base[i] = 1
1393 base[i] = 1
1394 else:
1394 else:
1395 self.ui.debug(_("narrowed branch search to %s:%s\n")
1395 self.ui.debug(_("narrowed branch search to %s:%s\n")
1396 % (short(p), short(i)))
1396 % (short(p), short(i)))
1397 search.append((p, i))
1397 search.append((p, i))
1398 break
1398 break
1399 p, f = i, f * 2
1399 p, f = i, f * 2
1400
1400
1401 # sanity check our fetch list
1401 # sanity check our fetch list
1402 for f in fetch.keys():
1402 for f in fetch.keys():
1403 if f in m:
1403 if f in m:
1404 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1404 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1405
1405
1406 if base.keys() == [nullid]:
1406 if base.keys() == [nullid]:
1407 if force:
1407 if force:
1408 self.ui.warn(_("warning: repository is unrelated\n"))
1408 self.ui.warn(_("warning: repository is unrelated\n"))
1409 else:
1409 else:
1410 raise util.Abort(_("repository is unrelated"))
1410 raise util.Abort(_("repository is unrelated"))
1411
1411
1412 self.ui.debug(_("found new changesets starting at ") +
1412 self.ui.debug(_("found new changesets starting at ") +
1413 " ".join([short(f) for f in fetch]) + "\n")
1413 " ".join([short(f) for f in fetch]) + "\n")
1414
1414
1415 self.ui.debug(_("%d total queries\n") % reqcnt)
1415 self.ui.debug(_("%d total queries\n") % reqcnt)
1416
1416
1417 return fetch.keys()
1417 return fetch.keys()
1418
1418
1419 def findoutgoing(self, remote, base=None, heads=None, force=False):
1419 def findoutgoing(self, remote, base=None, heads=None, force=False):
1420 """Return list of nodes that are roots of subsets not in remote
1420 """Return list of nodes that are roots of subsets not in remote
1421
1421
1422 If base dict is specified, assume that these nodes and their parents
1422 If base dict is specified, assume that these nodes and their parents
1423 exist on the remote side.
1423 exist on the remote side.
1424 If a list of heads is specified, return only nodes which are heads
1424 If a list of heads is specified, return only nodes which are heads
1425 or ancestors of these heads, and return a second element which
1425 or ancestors of these heads, and return a second element which
1426 contains all remote heads which get new children.
1426 contains all remote heads which get new children.
1427 """
1427 """
1428 if base == None:
1428 if base == None:
1429 base = {}
1429 base = {}
1430 self.findincoming(remote, base, heads, force=force)
1430 self.findincoming(remote, base, heads, force=force)
1431
1431
1432 self.ui.debug(_("common changesets up to ")
1432 self.ui.debug(_("common changesets up to ")
1433 + " ".join(map(short, base.keys())) + "\n")
1433 + " ".join(map(short, base.keys())) + "\n")
1434
1434
1435 remain = dict.fromkeys(self.changelog.nodemap)
1435 remain = dict.fromkeys(self.changelog.nodemap)
1436
1436
1437 # prune everything remote has from the tree
1437 # prune everything remote has from the tree
1438 del remain[nullid]
1438 del remain[nullid]
1439 remove = base.keys()
1439 remove = base.keys()
1440 while remove:
1440 while remove:
1441 n = remove.pop(0)
1441 n = remove.pop(0)
1442 if n in remain:
1442 if n in remain:
1443 del remain[n]
1443 del remain[n]
1444 for p in self.changelog.parents(n):
1444 for p in self.changelog.parents(n):
1445 remove.append(p)
1445 remove.append(p)
1446
1446
1447 # find every node whose parents have been pruned
1447 # find every node whose parents have been pruned
1448 subset = []
1448 subset = []
1449 # find every remote head that will get new children
1449 # find every remote head that will get new children
1450 updated_heads = {}
1450 updated_heads = {}
1451 for n in remain:
1451 for n in remain:
1452 p1, p2 = self.changelog.parents(n)
1452 p1, p2 = self.changelog.parents(n)
1453 if p1 not in remain and p2 not in remain:
1453 if p1 not in remain and p2 not in remain:
1454 subset.append(n)
1454 subset.append(n)
1455 if heads:
1455 if heads:
1456 if p1 in heads:
1456 if p1 in heads:
1457 updated_heads[p1] = True
1457 updated_heads[p1] = True
1458 if p2 in heads:
1458 if p2 in heads:
1459 updated_heads[p2] = True
1459 updated_heads[p2] = True
1460
1460
1461 # this is the set of all roots we have to push
1461 # this is the set of all roots we have to push
1462 if heads:
1462 if heads:
1463 return subset, updated_heads.keys()
1463 return subset, updated_heads.keys()
1464 else:
1464 else:
1465 return subset
1465 return subset
1466
1466
1467 def pull(self, remote, heads=None, force=False):
1467 def pull(self, remote, heads=None, force=False):
1468 lock = self.lock()
1468 lock = self.lock()
1469 try:
1469 try:
1470 fetch = self.findincoming(remote, heads=heads, force=force)
1470 fetch = self.findincoming(remote, heads=heads, force=force)
1471 if fetch == [nullid]:
1471 if fetch == [nullid]:
1472 self.ui.status(_("requesting all changes\n"))
1472 self.ui.status(_("requesting all changes\n"))
1473
1473
1474 if not fetch:
1474 if not fetch:
1475 self.ui.status(_("no changes found\n"))
1475 self.ui.status(_("no changes found\n"))
1476 return 0
1476 return 0
1477
1477
1478 if heads is None:
1478 if heads is None:
1479 cg = remote.changegroup(fetch, 'pull')
1479 cg = remote.changegroup(fetch, 'pull')
1480 else:
1480 else:
1481 if 'changegroupsubset' not in remote.capabilities:
1481 if 'changegroupsubset' not in remote.capabilities:
1482 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1482 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1483 cg = remote.changegroupsubset(fetch, heads, 'pull')
1483 cg = remote.changegroupsubset(fetch, heads, 'pull')
1484 return self.addchangegroup(cg, 'pull', remote.url())
1484 return self.addchangegroup(cg, 'pull', remote.url())
1485 finally:
1485 finally:
1486 del lock
1486 del lock
1487
1487
1488 def push(self, remote, force=False, revs=None):
1488 def push(self, remote, force=False, revs=None):
1489 # there are two ways to push to remote repo:
1489 # there are two ways to push to remote repo:
1490 #
1490 #
1491 # addchangegroup assumes local user can lock remote
1491 # addchangegroup assumes local user can lock remote
1492 # repo (local filesystem, old ssh servers).
1492 # repo (local filesystem, old ssh servers).
1493 #
1493 #
1494 # unbundle assumes local user cannot lock remote repo (new ssh
1494 # unbundle assumes local user cannot lock remote repo (new ssh
1495 # servers, http servers).
1495 # servers, http servers).
1496
1496
1497 if remote.capable('unbundle'):
1497 if remote.capable('unbundle'):
1498 return self.push_unbundle(remote, force, revs)
1498 return self.push_unbundle(remote, force, revs)
1499 return self.push_addchangegroup(remote, force, revs)
1499 return self.push_addchangegroup(remote, force, revs)
1500
1500
1501 def prepush(self, remote, force, revs):
1501 def prepush(self, remote, force, revs):
1502 base = {}
1502 base = {}
1503 remote_heads = remote.heads()
1503 remote_heads = remote.heads()
1504 inc = self.findincoming(remote, base, remote_heads, force=force)
1504 inc = self.findincoming(remote, base, remote_heads, force=force)
1505
1505
1506 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1506 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1507 if revs is not None:
1507 if revs is not None:
1508 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1508 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1509 else:
1509 else:
1510 bases, heads = update, self.changelog.heads()
1510 bases, heads = update, self.changelog.heads()
1511
1511
1512 if not bases:
1512 if not bases:
1513 self.ui.status(_("no changes found\n"))
1513 self.ui.status(_("no changes found\n"))
1514 return None, 1
1514 return None, 1
1515 elif not force:
1515 elif not force:
1516 # check if we're creating new remote heads
1516 # check if we're creating new remote heads
1517 # to be a remote head after push, node must be either
1517 # to be a remote head after push, node must be either
1518 # - unknown locally
1518 # - unknown locally
1519 # - a local outgoing head descended from update
1519 # - a local outgoing head descended from update
1520 # - a remote head that's known locally and not
1520 # - a remote head that's known locally and not
1521 # ancestral to an outgoing head
1521 # ancestral to an outgoing head
1522
1522
1523 warn = 0
1523 warn = 0
1524
1524
1525 if remote_heads == [nullid]:
1525 if remote_heads == [nullid]:
1526 warn = 0
1526 warn = 0
1527 elif not revs and len(heads) > len(remote_heads):
1527 elif not revs and len(heads) > len(remote_heads):
1528 warn = 1
1528 warn = 1
1529 else:
1529 else:
1530 newheads = list(heads)
1530 newheads = list(heads)
1531 for r in remote_heads:
1531 for r in remote_heads:
1532 if r in self.changelog.nodemap:
1532 if r in self.changelog.nodemap:
1533 desc = self.changelog.heads(r, heads)
1533 desc = self.changelog.heads(r, heads)
1534 l = [h for h in heads if h in desc]
1534 l = [h for h in heads if h in desc]
1535 if not l:
1535 if not l:
1536 newheads.append(r)
1536 newheads.append(r)
1537 else:
1537 else:
1538 newheads.append(r)
1538 newheads.append(r)
1539 if len(newheads) > len(remote_heads):
1539 if len(newheads) > len(remote_heads):
1540 warn = 1
1540 warn = 1
1541
1541
1542 if warn:
1542 if warn:
1543 self.ui.warn(_("abort: push creates new remote heads!\n"))
1543 self.ui.warn(_("abort: push creates new remote heads!\n"))
1544 self.ui.status(_("(did you forget to merge?"
1544 self.ui.status(_("(did you forget to merge?"
1545 " use push -f to force)\n"))
1545 " use push -f to force)\n"))
1546 return None, 0
1546 return None, 0
1547 elif inc:
1547 elif inc:
1548 self.ui.warn(_("note: unsynced remote changes!\n"))
1548 self.ui.warn(_("note: unsynced remote changes!\n"))
1549
1549
1550
1550
1551 if revs is None:
1551 if revs is None:
1552 cg = self.changegroup(update, 'push')
1552 cg = self.changegroup(update, 'push')
1553 else:
1553 else:
1554 cg = self.changegroupsubset(update, revs, 'push')
1554 cg = self.changegroupsubset(update, revs, 'push')
1555 return cg, remote_heads
1555 return cg, remote_heads
1556
1556
1557 def push_addchangegroup(self, remote, force, revs):
1557 def push_addchangegroup(self, remote, force, revs):
1558 lock = remote.lock()
1558 lock = remote.lock()
1559 try:
1559 try:
1560 ret = self.prepush(remote, force, revs)
1560 ret = self.prepush(remote, force, revs)
1561 if ret[0] is not None:
1561 if ret[0] is not None:
1562 cg, remote_heads = ret
1562 cg, remote_heads = ret
1563 return remote.addchangegroup(cg, 'push', self.url())
1563 return remote.addchangegroup(cg, 'push', self.url())
1564 return ret[1]
1564 return ret[1]
1565 finally:
1565 finally:
1566 del lock
1566 del lock
1567
1567
1568 def push_unbundle(self, remote, force, revs):
1568 def push_unbundle(self, remote, force, revs):
1569 # local repo finds heads on server, finds out what revs it
1569 # local repo finds heads on server, finds out what revs it
1570 # must push. once revs transferred, if server finds it has
1570 # must push. once revs transferred, if server finds it has
1571 # different heads (someone else won commit/push race), server
1571 # different heads (someone else won commit/push race), server
1572 # aborts.
1572 # aborts.
1573
1573
1574 ret = self.prepush(remote, force, revs)
1574 ret = self.prepush(remote, force, revs)
1575 if ret[0] is not None:
1575 if ret[0] is not None:
1576 cg, remote_heads = ret
1576 cg, remote_heads = ret
1577 if force: remote_heads = ['force']
1577 if force: remote_heads = ['force']
1578 return remote.unbundle(cg, remote_heads, 'push')
1578 return remote.unbundle(cg, remote_heads, 'push')
1579 return ret[1]
1579 return ret[1]
1580
1580
1581 def changegroupinfo(self, nodes, source):
1581 def changegroupinfo(self, nodes, source):
1582 if self.ui.verbose or source == 'bundle':
1582 if self.ui.verbose or source == 'bundle':
1583 self.ui.status(_("%d changesets found\n") % len(nodes))
1583 self.ui.status(_("%d changesets found\n") % len(nodes))
1584 if self.ui.debugflag:
1584 if self.ui.debugflag:
1585 self.ui.debug(_("List of changesets:\n"))
1585 self.ui.debug(_("List of changesets:\n"))
1586 for node in nodes:
1586 for node in nodes:
1587 self.ui.debug("%s\n" % hex(node))
1587 self.ui.debug("%s\n" % hex(node))
1588
1588
1589 def changegroupsubset(self, bases, heads, source, extranodes=None):
1589 def changegroupsubset(self, bases, heads, source, extranodes=None):
1590 """This function generates a changegroup consisting of all the nodes
1590 """This function generates a changegroup consisting of all the nodes
1591 that are descendents of any of the bases, and ancestors of any of
1591 that are descendents of any of the bases, and ancestors of any of
1592 the heads.
1592 the heads.
1593
1593
1594 It is fairly complex as determining which filenodes and which
1594 It is fairly complex as determining which filenodes and which
1595 manifest nodes need to be included for the changeset to be complete
1595 manifest nodes need to be included for the changeset to be complete
1596 is non-trivial.
1596 is non-trivial.
1597
1597
1598 Another wrinkle is doing the reverse, figuring out which changeset in
1598 Another wrinkle is doing the reverse, figuring out which changeset in
1599 the changegroup a particular filenode or manifestnode belongs to.
1599 the changegroup a particular filenode or manifestnode belongs to.
1600
1600
1601 The caller can specify some nodes that must be included in the
1601 The caller can specify some nodes that must be included in the
1602 changegroup using the extranodes argument. It should be a dict
1602 changegroup using the extranodes argument. It should be a dict
1603 where the keys are the filenames (or 1 for the manifest), and the
1603 where the keys are the filenames (or 1 for the manifest), and the
1604 values are lists of (node, linknode) tuples, where node is a wanted
1604 values are lists of (node, linknode) tuples, where node is a wanted
1605 node and linknode is the changelog node that should be transmitted as
1605 node and linknode is the changelog node that should be transmitted as
1606 the linkrev.
1606 the linkrev.
1607 """
1607 """
1608
1608
1609 self.hook('preoutgoing', throw=True, source=source)
1609 self.hook('preoutgoing', throw=True, source=source)
1610
1610
1611 # Set up some initial variables
1611 # Set up some initial variables
1612 # Make it easy to refer to self.changelog
1612 # Make it easy to refer to self.changelog
1613 cl = self.changelog
1613 cl = self.changelog
1614 # msng is short for missing - compute the list of changesets in this
1614 # msng is short for missing - compute the list of changesets in this
1615 # changegroup.
1615 # changegroup.
1616 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1616 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1617 self.changegroupinfo(msng_cl_lst, source)
1617 self.changegroupinfo(msng_cl_lst, source)
1618 # Some bases may turn out to be superfluous, and some heads may be
1618 # Some bases may turn out to be superfluous, and some heads may be
1619 # too. nodesbetween will return the minimal set of bases and heads
1619 # too. nodesbetween will return the minimal set of bases and heads
1620 # necessary to re-create the changegroup.
1620 # necessary to re-create the changegroup.
1621
1621
1622 # Known heads are the list of heads that it is assumed the recipient
1622 # Known heads are the list of heads that it is assumed the recipient
1623 # of this changegroup will know about.
1623 # of this changegroup will know about.
1624 knownheads = {}
1624 knownheads = {}
1625 # We assume that all parents of bases are known heads.
1625 # We assume that all parents of bases are known heads.
1626 for n in bases:
1626 for n in bases:
1627 for p in cl.parents(n):
1627 for p in cl.parents(n):
1628 if p != nullid:
1628 if p != nullid:
1629 knownheads[p] = 1
1629 knownheads[p] = 1
1630 knownheads = knownheads.keys()
1630 knownheads = knownheads.keys()
1631 if knownheads:
1631 if knownheads:
1632 # Now that we know what heads are known, we can compute which
1632 # Now that we know what heads are known, we can compute which
1633 # changesets are known. The recipient must know about all
1633 # changesets are known. The recipient must know about all
1634 # changesets required to reach the known heads from the null
1634 # changesets required to reach the known heads from the null
1635 # changeset.
1635 # changeset.
1636 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1636 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1637 junk = None
1637 junk = None
1638 # Transform the list into an ersatz set.
1638 # Transform the list into an ersatz set.
1639 has_cl_set = dict.fromkeys(has_cl_set)
1639 has_cl_set = dict.fromkeys(has_cl_set)
1640 else:
1640 else:
1641 # If there were no known heads, the recipient cannot be assumed to
1641 # If there were no known heads, the recipient cannot be assumed to
1642 # know about any changesets.
1642 # know about any changesets.
1643 has_cl_set = {}
1643 has_cl_set = {}
1644
1644
1645 # Make it easy to refer to self.manifest
1645 # Make it easy to refer to self.manifest
1646 mnfst = self.manifest
1646 mnfst = self.manifest
1647 # We don't know which manifests are missing yet
1647 # We don't know which manifests are missing yet
1648 msng_mnfst_set = {}
1648 msng_mnfst_set = {}
1649 # Nor do we know which filenodes are missing.
1649 # Nor do we know which filenodes are missing.
1650 msng_filenode_set = {}
1650 msng_filenode_set = {}
1651
1651
1652 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1652 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1653 junk = None
1653 junk = None
1654
1654
1655 # A changeset always belongs to itself, so the changenode lookup
1655 # A changeset always belongs to itself, so the changenode lookup
1656 # function for a changenode is identity.
1656 # function for a changenode is identity.
1657 def identity(x):
1657 def identity(x):
1658 return x
1658 return x
1659
1659
1660 # A function generating function. Sets up an environment for the
1660 # A function generating function. Sets up an environment for the
1661 # inner function.
1661 # inner function.
1662 def cmp_by_rev_func(revlog):
1662 def cmp_by_rev_func(revlog):
1663 # Compare two nodes by their revision number in the environment's
1663 # Compare two nodes by their revision number in the environment's
1664 # revision history. Since the revision number both represents the
1664 # revision history. Since the revision number both represents the
1665 # most efficient order to read the nodes in, and represents a
1665 # most efficient order to read the nodes in, and represents a
1666 # topological sorting of the nodes, this function is often useful.
1666 # topological sorting of the nodes, this function is often useful.
1667 def cmp_by_rev(a, b):
1667 def cmp_by_rev(a, b):
1668 return cmp(revlog.rev(a), revlog.rev(b))
1668 return cmp(revlog.rev(a), revlog.rev(b))
1669 return cmp_by_rev
1669 return cmp_by_rev
1670
1670
1671 # If we determine that a particular file or manifest node must be a
1671 # If we determine that a particular file or manifest node must be a
1672 # node that the recipient of the changegroup will already have, we can
1672 # node that the recipient of the changegroup will already have, we can
1673 # also assume the recipient will have all the parents. This function
1673 # also assume the recipient will have all the parents. This function
1674 # prunes them from the set of missing nodes.
1674 # prunes them from the set of missing nodes.
1675 def prune_parents(revlog, hasset, msngset):
1675 def prune_parents(revlog, hasset, msngset):
1676 haslst = hasset.keys()
1676 haslst = hasset.keys()
1677 haslst.sort(cmp_by_rev_func(revlog))
1677 haslst.sort(cmp_by_rev_func(revlog))
1678 for node in haslst:
1678 for node in haslst:
1679 parentlst = [p for p in revlog.parents(node) if p != nullid]
1679 parentlst = [p for p in revlog.parents(node) if p != nullid]
1680 while parentlst:
1680 while parentlst:
1681 n = parentlst.pop()
1681 n = parentlst.pop()
1682 if n not in hasset:
1682 if n not in hasset:
1683 hasset[n] = 1
1683 hasset[n] = 1
1684 p = [p for p in revlog.parents(n) if p != nullid]
1684 p = [p for p in revlog.parents(n) if p != nullid]
1685 parentlst.extend(p)
1685 parentlst.extend(p)
1686 for n in hasset:
1686 for n in hasset:
1687 msngset.pop(n, None)
1687 msngset.pop(n, None)
1688
1688
1689 # This is a function generating function used to set up an environment
1689 # This is a function generating function used to set up an environment
1690 # for the inner function to execute in.
1690 # for the inner function to execute in.
1691 def manifest_and_file_collector(changedfileset):
1691 def manifest_and_file_collector(changedfileset):
1692 # This is an information gathering function that gathers
1692 # This is an information gathering function that gathers
1693 # information from each changeset node that goes out as part of
1693 # information from each changeset node that goes out as part of
1694 # the changegroup. The information gathered is a list of which
1694 # the changegroup. The information gathered is a list of which
1695 # manifest nodes are potentially required (the recipient may
1695 # manifest nodes are potentially required (the recipient may
1696 # already have them) and total list of all files which were
1696 # already have them) and total list of all files which were
1697 # changed in any changeset in the changegroup.
1697 # changed in any changeset in the changegroup.
1698 #
1698 #
1699 # We also remember the first changenode we saw any manifest
1699 # We also remember the first changenode we saw any manifest
1700 # referenced by so we can later determine which changenode 'owns'
1700 # referenced by so we can later determine which changenode 'owns'
1701 # the manifest.
1701 # the manifest.
1702 def collect_manifests_and_files(clnode):
1702 def collect_manifests_and_files(clnode):
1703 c = cl.read(clnode)
1703 c = cl.read(clnode)
1704 for f in c[3]:
1704 for f in c[3]:
1705 # This is to make sure we only have one instance of each
1705 # This is to make sure we only have one instance of each
1706 # filename string for each filename.
1706 # filename string for each filename.
1707 changedfileset.setdefault(f, f)
1707 changedfileset.setdefault(f, f)
1708 msng_mnfst_set.setdefault(c[0], clnode)
1708 msng_mnfst_set.setdefault(c[0], clnode)
1709 return collect_manifests_and_files
1709 return collect_manifests_and_files
1710
1710
1711 # Figure out which manifest nodes (of the ones we think might be part
1711 # Figure out which manifest nodes (of the ones we think might be part
1712 # of the changegroup) the recipient must know about and remove them
1712 # of the changegroup) the recipient must know about and remove them
1713 # from the changegroup.
1713 # from the changegroup.
1714 def prune_manifests():
1714 def prune_manifests():
1715 has_mnfst_set = {}
1715 has_mnfst_set = {}
1716 for n in msng_mnfst_set:
1716 for n in msng_mnfst_set:
1717 # If a 'missing' manifest thinks it belongs to a changenode
1717 # If a 'missing' manifest thinks it belongs to a changenode
1718 # the recipient is assumed to have, obviously the recipient
1718 # the recipient is assumed to have, obviously the recipient
1719 # must have that manifest.
1719 # must have that manifest.
1720 linknode = cl.node(mnfst.linkrev(n))
1720 linknode = cl.node(mnfst.linkrev(n))
1721 if linknode in has_cl_set:
1721 if linknode in has_cl_set:
1722 has_mnfst_set[n] = 1
1722 has_mnfst_set[n] = 1
1723 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1723 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1724
1724
1725 # Use the information collected in collect_manifests_and_files to say
1725 # Use the information collected in collect_manifests_and_files to say
1726 # which changenode any manifestnode belongs to.
1726 # which changenode any manifestnode belongs to.
1727 def lookup_manifest_link(mnfstnode):
1727 def lookup_manifest_link(mnfstnode):
1728 return msng_mnfst_set[mnfstnode]
1728 return msng_mnfst_set[mnfstnode]
1729
1729
1730 # A function generating function that sets up the initial environment
1730 # A function generating function that sets up the initial environment
1731 # the inner function.
1731 # the inner function.
1732 def filenode_collector(changedfiles):
1732 def filenode_collector(changedfiles):
1733 next_rev = [0]
1733 next_rev = [0]
1734 # This gathers information from each manifestnode included in the
1734 # This gathers information from each manifestnode included in the
1735 # changegroup about which filenodes the manifest node references
1735 # changegroup about which filenodes the manifest node references
1736 # so we can include those in the changegroup too.
1736 # so we can include those in the changegroup too.
1737 #
1737 #
1738 # It also remembers which changenode each filenode belongs to. It
1738 # It also remembers which changenode each filenode belongs to. It
1739 # does this by assuming the a filenode belongs to the changenode
1739 # does this by assuming the a filenode belongs to the changenode
1740 # the first manifest that references it belongs to.
1740 # the first manifest that references it belongs to.
1741 def collect_msng_filenodes(mnfstnode):
1741 def collect_msng_filenodes(mnfstnode):
1742 r = mnfst.rev(mnfstnode)
1742 r = mnfst.rev(mnfstnode)
1743 if r == next_rev[0]:
1743 if r == next_rev[0]:
1744 # If the last rev we looked at was the one just previous,
1744 # If the last rev we looked at was the one just previous,
1745 # we only need to see a diff.
1745 # we only need to see a diff.
1746 deltamf = mnfst.readdelta(mnfstnode)
1746 deltamf = mnfst.readdelta(mnfstnode)
1747 # For each line in the delta
1747 # For each line in the delta
1748 for f, fnode in deltamf.items():
1748 for f, fnode in deltamf.items():
1749 f = changedfiles.get(f, None)
1749 f = changedfiles.get(f, None)
1750 # And if the file is in the list of files we care
1750 # And if the file is in the list of files we care
1751 # about.
1751 # about.
1752 if f is not None:
1752 if f is not None:
1753 # Get the changenode this manifest belongs to
1753 # Get the changenode this manifest belongs to
1754 clnode = msng_mnfst_set[mnfstnode]
1754 clnode = msng_mnfst_set[mnfstnode]
1755 # Create the set of filenodes for the file if
1755 # Create the set of filenodes for the file if
1756 # there isn't one already.
1756 # there isn't one already.
1757 ndset = msng_filenode_set.setdefault(f, {})
1757 ndset = msng_filenode_set.setdefault(f, {})
1758 # And set the filenode's changelog node to the
1758 # And set the filenode's changelog node to the
1759 # manifest's if it hasn't been set already.
1759 # manifest's if it hasn't been set already.
1760 ndset.setdefault(fnode, clnode)
1760 ndset.setdefault(fnode, clnode)
1761 else:
1761 else:
1762 # Otherwise we need a full manifest.
1762 # Otherwise we need a full manifest.
1763 m = mnfst.read(mnfstnode)
1763 m = mnfst.read(mnfstnode)
1764 # For every file in we care about.
1764 # For every file in we care about.
1765 for f in changedfiles:
1765 for f in changedfiles:
1766 fnode = m.get(f, None)
1766 fnode = m.get(f, None)
1767 # If it's in the manifest
1767 # If it's in the manifest
1768 if fnode is not None:
1768 if fnode is not None:
1769 # See comments above.
1769 # See comments above.
1770 clnode = msng_mnfst_set[mnfstnode]
1770 clnode = msng_mnfst_set[mnfstnode]
1771 ndset = msng_filenode_set.setdefault(f, {})
1771 ndset = msng_filenode_set.setdefault(f, {})
1772 ndset.setdefault(fnode, clnode)
1772 ndset.setdefault(fnode, clnode)
1773 # Remember the revision we hope to see next.
1773 # Remember the revision we hope to see next.
1774 next_rev[0] = r + 1
1774 next_rev[0] = r + 1
1775 return collect_msng_filenodes
1775 return collect_msng_filenodes
1776
1776
1777 # We have a list of filenodes we think we need for a file, lets remove
1777 # We have a list of filenodes we think we need for a file, lets remove
1778 # all those we now the recipient must have.
1778 # all those we now the recipient must have.
1779 def prune_filenodes(f, filerevlog):
1779 def prune_filenodes(f, filerevlog):
1780 msngset = msng_filenode_set[f]
1780 msngset = msng_filenode_set[f]
1781 hasset = {}
1781 hasset = {}
1782 # If a 'missing' filenode thinks it belongs to a changenode we
1782 # If a 'missing' filenode thinks it belongs to a changenode we
1783 # assume the recipient must have, then the recipient must have
1783 # assume the recipient must have, then the recipient must have
1784 # that filenode.
1784 # that filenode.
1785 for n in msngset:
1785 for n in msngset:
1786 clnode = cl.node(filerevlog.linkrev(n))
1786 clnode = cl.node(filerevlog.linkrev(n))
1787 if clnode in has_cl_set:
1787 if clnode in has_cl_set:
1788 hasset[n] = 1
1788 hasset[n] = 1
1789 prune_parents(filerevlog, hasset, msngset)
1789 prune_parents(filerevlog, hasset, msngset)
1790
1790
1791 # A function generator function that sets up the a context for the
1791 # A function generator function that sets up the a context for the
1792 # inner function.
1792 # inner function.
1793 def lookup_filenode_link_func(fname):
1793 def lookup_filenode_link_func(fname):
1794 msngset = msng_filenode_set[fname]
1794 msngset = msng_filenode_set[fname]
1795 # Lookup the changenode the filenode belongs to.
1795 # Lookup the changenode the filenode belongs to.
1796 def lookup_filenode_link(fnode):
1796 def lookup_filenode_link(fnode):
1797 return msngset[fnode]
1797 return msngset[fnode]
1798 return lookup_filenode_link
1798 return lookup_filenode_link
1799
1799
1800 # Add the nodes that were explicitly requested.
1800 # Add the nodes that were explicitly requested.
1801 def add_extra_nodes(name, nodes):
1801 def add_extra_nodes(name, nodes):
1802 if not extranodes or name not in extranodes:
1802 if not extranodes or name not in extranodes:
1803 return
1803 return
1804
1804
1805 for node, linknode in extranodes[name]:
1805 for node, linknode in extranodes[name]:
1806 if node not in nodes:
1806 if node not in nodes:
1807 nodes[node] = linknode
1807 nodes[node] = linknode
1808
1808
1809 # Now that we have all theses utility functions to help out and
1809 # Now that we have all theses utility functions to help out and
1810 # logically divide up the task, generate the group.
1810 # logically divide up the task, generate the group.
1811 def gengroup():
1811 def gengroup():
1812 # The set of changed files starts empty.
1812 # The set of changed files starts empty.
1813 changedfiles = {}
1813 changedfiles = {}
1814 # Create a changenode group generator that will call our functions
1814 # Create a changenode group generator that will call our functions
1815 # back to lookup the owning changenode and collect information.
1815 # back to lookup the owning changenode and collect information.
1816 group = cl.group(msng_cl_lst, identity,
1816 group = cl.group(msng_cl_lst, identity,
1817 manifest_and_file_collector(changedfiles))
1817 manifest_and_file_collector(changedfiles))
1818 for chnk in group:
1818 for chnk in group:
1819 yield chnk
1819 yield chnk
1820
1820
1821 # The list of manifests has been collected by the generator
1821 # The list of manifests has been collected by the generator
1822 # calling our functions back.
1822 # calling our functions back.
1823 prune_manifests()
1823 prune_manifests()
1824 add_extra_nodes(1, msng_mnfst_set)
1824 add_extra_nodes(1, msng_mnfst_set)
1825 msng_mnfst_lst = msng_mnfst_set.keys()
1825 msng_mnfst_lst = msng_mnfst_set.keys()
1826 # Sort the manifestnodes by revision number.
1826 # Sort the manifestnodes by revision number.
1827 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1827 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1828 # Create a generator for the manifestnodes that calls our lookup
1828 # Create a generator for the manifestnodes that calls our lookup
1829 # and data collection functions back.
1829 # and data collection functions back.
1830 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1830 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1831 filenode_collector(changedfiles))
1831 filenode_collector(changedfiles))
1832 for chnk in group:
1832 for chnk in group:
1833 yield chnk
1833 yield chnk
1834
1834
1835 # These are no longer needed, dereference and toss the memory for
1835 # These are no longer needed, dereference and toss the memory for
1836 # them.
1836 # them.
1837 msng_mnfst_lst = None
1837 msng_mnfst_lst = None
1838 msng_mnfst_set.clear()
1838 msng_mnfst_set.clear()
1839
1839
1840 if extranodes:
1840 if extranodes:
1841 for fname in extranodes:
1841 for fname in extranodes:
1842 if isinstance(fname, int):
1842 if isinstance(fname, int):
1843 continue
1843 continue
1844 add_extra_nodes(fname,
1844 add_extra_nodes(fname,
1845 msng_filenode_set.setdefault(fname, {}))
1845 msng_filenode_set.setdefault(fname, {}))
1846 changedfiles[fname] = 1
1846 changedfiles[fname] = 1
1847 changedfiles = changedfiles.keys()
1847 changedfiles = changedfiles.keys()
1848 changedfiles.sort()
1848 changedfiles.sort()
1849 # Go through all our files in order sorted by name.
1849 # Go through all our files in order sorted by name.
1850 for fname in changedfiles:
1850 for fname in changedfiles:
1851 filerevlog = self.file(fname)
1851 filerevlog = self.file(fname)
1852 if filerevlog.count() == 0:
1852 if filerevlog.count() == 0:
1853 raise util.Abort(_("empty or missing revlog for %s") % fname)
1853 raise util.Abort(_("empty or missing revlog for %s") % fname)
1854 # Toss out the filenodes that the recipient isn't really
1854 # Toss out the filenodes that the recipient isn't really
1855 # missing.
1855 # missing.
1856 if fname in msng_filenode_set:
1856 if fname in msng_filenode_set:
1857 prune_filenodes(fname, filerevlog)
1857 prune_filenodes(fname, filerevlog)
1858 msng_filenode_lst = msng_filenode_set[fname].keys()
1858 msng_filenode_lst = msng_filenode_set[fname].keys()
1859 else:
1859 else:
1860 msng_filenode_lst = []
1860 msng_filenode_lst = []
1861 # If any filenodes are left, generate the group for them,
1861 # If any filenodes are left, generate the group for them,
1862 # otherwise don't bother.
1862 # otherwise don't bother.
1863 if len(msng_filenode_lst) > 0:
1863 if len(msng_filenode_lst) > 0:
1864 yield changegroup.chunkheader(len(fname))
1864 yield changegroup.chunkheader(len(fname))
1865 yield fname
1865 yield fname
1866 # Sort the filenodes by their revision #
1866 # Sort the filenodes by their revision #
1867 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1867 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1868 # Create a group generator and only pass in a changenode
1868 # Create a group generator and only pass in a changenode
1869 # lookup function as we need to collect no information
1869 # lookup function as we need to collect no information
1870 # from filenodes.
1870 # from filenodes.
1871 group = filerevlog.group(msng_filenode_lst,
1871 group = filerevlog.group(msng_filenode_lst,
1872 lookup_filenode_link_func(fname))
1872 lookup_filenode_link_func(fname))
1873 for chnk in group:
1873 for chnk in group:
1874 yield chnk
1874 yield chnk
1875 if fname in msng_filenode_set:
1875 if fname in msng_filenode_set:
1876 # Don't need this anymore, toss it to free memory.
1876 # Don't need this anymore, toss it to free memory.
1877 del msng_filenode_set[fname]
1877 del msng_filenode_set[fname]
1878 # Signal that no more groups are left.
1878 # Signal that no more groups are left.
1879 yield changegroup.closechunk()
1879 yield changegroup.closechunk()
1880
1880
1881 if msng_cl_lst:
1881 if msng_cl_lst:
1882 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1882 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1883
1883
1884 return util.chunkbuffer(gengroup())
1884 return util.chunkbuffer(gengroup())
1885
1885
1886 def changegroup(self, basenodes, source):
1886 def changegroup(self, basenodes, source):
1887 """Generate a changegroup of all nodes that we have that a recipient
1887 """Generate a changegroup of all nodes that we have that a recipient
1888 doesn't.
1888 doesn't.
1889
1889
1890 This is much easier than the previous function as we can assume that
1890 This is much easier than the previous function as we can assume that
1891 the recipient has any changenode we aren't sending them."""
1891 the recipient has any changenode we aren't sending them."""
1892
1892
1893 self.hook('preoutgoing', throw=True, source=source)
1893 self.hook('preoutgoing', throw=True, source=source)
1894
1894
1895 cl = self.changelog
1895 cl = self.changelog
1896 nodes = cl.nodesbetween(basenodes, None)[0]
1896 nodes = cl.nodesbetween(basenodes, None)[0]
1897 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1897 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1898 self.changegroupinfo(nodes, source)
1898 self.changegroupinfo(nodes, source)
1899
1899
1900 def identity(x):
1900 def identity(x):
1901 return x
1901 return x
1902
1902
1903 def gennodelst(revlog):
1903 def gennodelst(revlog):
1904 for r in xrange(0, revlog.count()):
1904 for r in xrange(0, revlog.count()):
1905 n = revlog.node(r)
1905 n = revlog.node(r)
1906 if revlog.linkrev(n) in revset:
1906 if revlog.linkrev(n) in revset:
1907 yield n
1907 yield n
1908
1908
1909 def changed_file_collector(changedfileset):
1909 def changed_file_collector(changedfileset):
1910 def collect_changed_files(clnode):
1910 def collect_changed_files(clnode):
1911 c = cl.read(clnode)
1911 c = cl.read(clnode)
1912 for fname in c[3]:
1912 for fname in c[3]:
1913 changedfileset[fname] = 1
1913 changedfileset[fname] = 1
1914 return collect_changed_files
1914 return collect_changed_files
1915
1915
1916 def lookuprevlink_func(revlog):
1916 def lookuprevlink_func(revlog):
1917 def lookuprevlink(n):
1917 def lookuprevlink(n):
1918 return cl.node(revlog.linkrev(n))
1918 return cl.node(revlog.linkrev(n))
1919 return lookuprevlink
1919 return lookuprevlink
1920
1920
1921 def gengroup():
1921 def gengroup():
1922 # construct a list of all changed files
1922 # construct a list of all changed files
1923 changedfiles = {}
1923 changedfiles = {}
1924
1924
1925 for chnk in cl.group(nodes, identity,
1925 for chnk in cl.group(nodes, identity,
1926 changed_file_collector(changedfiles)):
1926 changed_file_collector(changedfiles)):
1927 yield chnk
1927 yield chnk
1928 changedfiles = changedfiles.keys()
1928 changedfiles = changedfiles.keys()
1929 changedfiles.sort()
1929 changedfiles.sort()
1930
1930
1931 mnfst = self.manifest
1931 mnfst = self.manifest
1932 nodeiter = gennodelst(mnfst)
1932 nodeiter = gennodelst(mnfst)
1933 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1933 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1934 yield chnk
1934 yield chnk
1935
1935
1936 for fname in changedfiles:
1936 for fname in changedfiles:
1937 filerevlog = self.file(fname)
1937 filerevlog = self.file(fname)
1938 if filerevlog.count() == 0:
1938 if filerevlog.count() == 0:
1939 raise util.Abort(_("empty or missing revlog for %s") % fname)
1939 raise util.Abort(_("empty or missing revlog for %s") % fname)
1940 nodeiter = gennodelst(filerevlog)
1940 nodeiter = gennodelst(filerevlog)
1941 nodeiter = list(nodeiter)
1941 nodeiter = list(nodeiter)
1942 if nodeiter:
1942 if nodeiter:
1943 yield changegroup.chunkheader(len(fname))
1943 yield changegroup.chunkheader(len(fname))
1944 yield fname
1944 yield fname
1945 lookup = lookuprevlink_func(filerevlog)
1945 lookup = lookuprevlink_func(filerevlog)
1946 for chnk in filerevlog.group(nodeiter, lookup):
1946 for chnk in filerevlog.group(nodeiter, lookup):
1947 yield chnk
1947 yield chnk
1948
1948
1949 yield changegroup.closechunk()
1949 yield changegroup.closechunk()
1950
1950
1951 if nodes:
1951 if nodes:
1952 self.hook('outgoing', node=hex(nodes[0]), source=source)
1952 self.hook('outgoing', node=hex(nodes[0]), source=source)
1953
1953
1954 return util.chunkbuffer(gengroup())
1954 return util.chunkbuffer(gengroup())
1955
1955
1956 def addchangegroup(self, source, srctype, url, emptyok=False):
1956 def addchangegroup(self, source, srctype, url, emptyok=False):
1957 """add changegroup to repo.
1957 """add changegroup to repo.
1958
1958
1959 return values:
1959 return values:
1960 - nothing changed or no source: 0
1960 - nothing changed or no source: 0
1961 - more heads than before: 1+added heads (2..n)
1961 - more heads than before: 1+added heads (2..n)
1962 - less heads than before: -1-removed heads (-2..-n)
1962 - less heads than before: -1-removed heads (-2..-n)
1963 - number of heads stays the same: 1
1963 - number of heads stays the same: 1
1964 """
1964 """
1965 def csmap(x):
1965 def csmap(x):
1966 self.ui.debug(_("add changeset %s\n") % short(x))
1966 self.ui.debug(_("add changeset %s\n") % short(x))
1967 return cl.count()
1967 return cl.count()
1968
1968
1969 def revmap(x):
1969 def revmap(x):
1970 return cl.rev(x)
1970 return cl.rev(x)
1971
1971
1972 if not source:
1972 if not source:
1973 return 0
1973 return 0
1974
1974
1975 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1975 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1976
1976
1977 changesets = files = revisions = 0
1977 changesets = files = revisions = 0
1978
1978
1979 # write changelog data to temp files so concurrent readers will not see
1979 # write changelog data to temp files so concurrent readers will not see
1980 # inconsistent view
1980 # inconsistent view
1981 cl = self.changelog
1981 cl = self.changelog
1982 cl.delayupdate()
1982 cl.delayupdate()
1983 oldheads = len(cl.heads())
1983 oldheads = len(cl.heads())
1984
1984
1985 tr = self.transaction()
1985 tr = self.transaction()
1986 try:
1986 try:
1987 trp = weakref.proxy(tr)
1987 trp = weakref.proxy(tr)
1988 # pull off the changeset group
1988 # pull off the changeset group
1989 self.ui.status(_("adding changesets\n"))
1989 self.ui.status(_("adding changesets\n"))
1990 cor = cl.count() - 1
1990 cor = cl.count() - 1
1991 chunkiter = changegroup.chunkiter(source)
1991 chunkiter = changegroup.chunkiter(source)
1992 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1992 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1993 raise util.Abort(_("received changelog group is empty"))
1993 raise util.Abort(_("received changelog group is empty"))
1994 cnr = cl.count() - 1
1994 cnr = cl.count() - 1
1995 changesets = cnr - cor
1995 changesets = cnr - cor
1996
1996
1997 # pull off the manifest group
1997 # pull off the manifest group
1998 self.ui.status(_("adding manifests\n"))
1998 self.ui.status(_("adding manifests\n"))
1999 chunkiter = changegroup.chunkiter(source)
1999 chunkiter = changegroup.chunkiter(source)
2000 # no need to check for empty manifest group here:
2000 # no need to check for empty manifest group here:
2001 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2001 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2002 # no new manifest will be created and the manifest group will
2002 # no new manifest will be created and the manifest group will
2003 # be empty during the pull
2003 # be empty during the pull
2004 self.manifest.addgroup(chunkiter, revmap, trp)
2004 self.manifest.addgroup(chunkiter, revmap, trp)
2005
2005
2006 # process the files
2006 # process the files
2007 self.ui.status(_("adding file changes\n"))
2007 self.ui.status(_("adding file changes\n"))
2008 while 1:
2008 while 1:
2009 f = changegroup.getchunk(source)
2009 f = changegroup.getchunk(source)
2010 if not f:
2010 if not f:
2011 break
2011 break
2012 self.ui.debug(_("adding %s revisions\n") % f)
2012 self.ui.debug(_("adding %s revisions\n") % f)
2013 fl = self.file(f)
2013 fl = self.file(f)
2014 o = fl.count()
2014 o = fl.count()
2015 chunkiter = changegroup.chunkiter(source)
2015 chunkiter = changegroup.chunkiter(source)
2016 if fl.addgroup(chunkiter, revmap, trp) is None:
2016 if fl.addgroup(chunkiter, revmap, trp) is None:
2017 raise util.Abort(_("received file revlog group is empty"))
2017 raise util.Abort(_("received file revlog group is empty"))
2018 revisions += fl.count() - o
2018 revisions += fl.count() - o
2019 files += 1
2019 files += 1
2020
2020
2021 # make changelog see real files again
2021 # make changelog see real files again
2022 cl.finalize(trp)
2022 cl.finalize(trp)
2023
2023
2024 newheads = len(self.changelog.heads())
2024 newheads = len(self.changelog.heads())
2025 heads = ""
2025 heads = ""
2026 if oldheads and newheads != oldheads:
2026 if oldheads and newheads != oldheads:
2027 heads = _(" (%+d heads)") % (newheads - oldheads)
2027 heads = _(" (%+d heads)") % (newheads - oldheads)
2028
2028
2029 self.ui.status(_("added %d changesets"
2029 self.ui.status(_("added %d changesets"
2030 " with %d changes to %d files%s\n")
2030 " with %d changes to %d files%s\n")
2031 % (changesets, revisions, files, heads))
2031 % (changesets, revisions, files, heads))
2032
2032
2033 if changesets > 0:
2033 if changesets > 0:
2034 self.hook('pretxnchangegroup', throw=True,
2034 self.hook('pretxnchangegroup', throw=True,
2035 node=hex(self.changelog.node(cor+1)), source=srctype,
2035 node=hex(self.changelog.node(cor+1)), source=srctype,
2036 url=url)
2036 url=url)
2037
2037
2038 tr.close()
2038 tr.close()
2039 finally:
2039 finally:
2040 del tr
2040 del tr
2041
2041
2042 if changesets > 0:
2042 if changesets > 0:
2043 # forcefully update the on-disk branch cache
2043 # forcefully update the on-disk branch cache
2044 self.ui.debug(_("updating the branch cache\n"))
2044 self.ui.debug(_("updating the branch cache\n"))
2045 self.branchtags()
2045 self.branchtags()
2046 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2046 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2047 source=srctype, url=url)
2047 source=srctype, url=url)
2048
2048
2049 for i in xrange(cor + 1, cnr + 1):
2049 for i in xrange(cor + 1, cnr + 1):
2050 self.hook("incoming", node=hex(self.changelog.node(i)),
2050 self.hook("incoming", node=hex(self.changelog.node(i)),
2051 source=srctype, url=url)
2051 source=srctype, url=url)
2052
2052
2053 # never return 0 here:
2053 # never return 0 here:
2054 if newheads < oldheads:
2054 if newheads < oldheads:
2055 return newheads - oldheads - 1
2055 return newheads - oldheads - 1
2056 else:
2056 else:
2057 return newheads - oldheads + 1
2057 return newheads - oldheads + 1
2058
2058
2059
2059
2060 def stream_in(self, remote):
2060 def stream_in(self, remote):
2061 fp = remote.stream_out()
2061 fp = remote.stream_out()
2062 l = fp.readline()
2062 l = fp.readline()
2063 try:
2063 try:
2064 resp = int(l)
2064 resp = int(l)
2065 except ValueError:
2065 except ValueError:
2066 raise util.UnexpectedOutput(
2066 raise util.UnexpectedOutput(
2067 _('Unexpected response from remote server:'), l)
2067 _('Unexpected response from remote server:'), l)
2068 if resp == 1:
2068 if resp == 1:
2069 raise util.Abort(_('operation forbidden by server'))
2069 raise util.Abort(_('operation forbidden by server'))
2070 elif resp == 2:
2070 elif resp == 2:
2071 raise util.Abort(_('locking the remote repository failed'))
2071 raise util.Abort(_('locking the remote repository failed'))
2072 elif resp != 0:
2072 elif resp != 0:
2073 raise util.Abort(_('the server sent an unknown error code'))
2073 raise util.Abort(_('the server sent an unknown error code'))
2074 self.ui.status(_('streaming all changes\n'))
2074 self.ui.status(_('streaming all changes\n'))
2075 l = fp.readline()
2075 l = fp.readline()
2076 try:
2076 try:
2077 total_files, total_bytes = map(int, l.split(' ', 1))
2077 total_files, total_bytes = map(int, l.split(' ', 1))
2078 except ValueError, TypeError:
2078 except (ValueError, TypeError):
2079 raise util.UnexpectedOutput(
2079 raise util.UnexpectedOutput(
2080 _('Unexpected response from remote server:'), l)
2080 _('Unexpected response from remote server:'), l)
2081 self.ui.status(_('%d files to transfer, %s of data\n') %
2081 self.ui.status(_('%d files to transfer, %s of data\n') %
2082 (total_files, util.bytecount(total_bytes)))
2082 (total_files, util.bytecount(total_bytes)))
2083 start = time.time()
2083 start = time.time()
2084 for i in xrange(total_files):
2084 for i in xrange(total_files):
2085 # XXX doesn't support '\n' or '\r' in filenames
2085 # XXX doesn't support '\n' or '\r' in filenames
2086 l = fp.readline()
2086 l = fp.readline()
2087 try:
2087 try:
2088 name, size = l.split('\0', 1)
2088 name, size = l.split('\0', 1)
2089 size = int(size)
2089 size = int(size)
2090 except ValueError, TypeError:
2090 except ValueError, TypeError:
2091 raise util.UnexpectedOutput(
2091 raise util.UnexpectedOutput(
2092 _('Unexpected response from remote server:'), l)
2092 _('Unexpected response from remote server:'), l)
2093 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2093 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2094 ofp = self.sopener(name, 'w')
2094 ofp = self.sopener(name, 'w')
2095 for chunk in util.filechunkiter(fp, limit=size):
2095 for chunk in util.filechunkiter(fp, limit=size):
2096 ofp.write(chunk)
2096 ofp.write(chunk)
2097 ofp.close()
2097 ofp.close()
2098 elapsed = time.time() - start
2098 elapsed = time.time() - start
2099 if elapsed <= 0:
2099 if elapsed <= 0:
2100 elapsed = 0.001
2100 elapsed = 0.001
2101 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2101 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2102 (util.bytecount(total_bytes), elapsed,
2102 (util.bytecount(total_bytes), elapsed,
2103 util.bytecount(total_bytes / elapsed)))
2103 util.bytecount(total_bytes / elapsed)))
2104 self.invalidate()
2104 self.invalidate()
2105 return len(self.heads()) + 1
2105 return len(self.heads()) + 1
2106
2106
2107 def clone(self, remote, heads=[], stream=False):
2107 def clone(self, remote, heads=[], stream=False):
2108 '''clone remote repository.
2108 '''clone remote repository.
2109
2109
2110 keyword arguments:
2110 keyword arguments:
2111 heads: list of revs to clone (forces use of pull)
2111 heads: list of revs to clone (forces use of pull)
2112 stream: use streaming clone if possible'''
2112 stream: use streaming clone if possible'''
2113
2113
2114 # now, all clients that can request uncompressed clones can
2114 # now, all clients that can request uncompressed clones can
2115 # read repo formats supported by all servers that can serve
2115 # read repo formats supported by all servers that can serve
2116 # them.
2116 # them.
2117
2117
2118 # if revlog format changes, client will have to check version
2118 # if revlog format changes, client will have to check version
2119 # and format flags on "stream" capability, and use
2119 # and format flags on "stream" capability, and use
2120 # uncompressed only if compatible.
2120 # uncompressed only if compatible.
2121
2121
2122 if stream and not heads and remote.capable('stream'):
2122 if stream and not heads and remote.capable('stream'):
2123 return self.stream_in(remote)
2123 return self.stream_in(remote)
2124 return self.pull(remote, heads)
2124 return self.pull(remote, heads)
2125
2125
2126 # used to avoid circular references so destructors work
2126 # used to avoid circular references so destructors work
2127 def aftertrans(files):
2127 def aftertrans(files):
2128 renamefiles = [tuple(t) for t in files]
2128 renamefiles = [tuple(t) for t in files]
2129 def a():
2129 def a():
2130 for src, dest in renamefiles:
2130 for src, dest in renamefiles:
2131 util.rename(src, dest)
2131 util.rename(src, dest)
2132 return a
2132 return a
2133
2133
2134 def instance(ui, path, create):
2134 def instance(ui, path, create):
2135 return localrepository(ui, util.drop_scheme('file', path), create)
2135 return localrepository(ui, util.drop_scheme('file', path), create)
2136
2136
2137 def islocal(path):
2137 def islocal(path):
2138 return True
2138 return True
General Comments 0
You need to be logged in to leave comments. Login now