##// END OF EJS Templates
don't use hasattr in repo.invalidate...
Alexis S. L. Carvalho -
r6371:b2f1d97d default
parent child Browse files
Show More
@@ -1,2133 +1,2133 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui
12 import lock, transaction, stat, errno, ui
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = util.set(('lookup', 'changegroupsubset'))
16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 repo.repository.__init__(self)
20 repo.repository.__init__(self)
21 self.root = os.path.realpath(path)
21 self.root = os.path.realpath(path)
22 self.path = os.path.join(self.root, ".hg")
22 self.path = os.path.join(self.root, ".hg")
23 self.origroot = path
23 self.origroot = path
24 self.opener = util.opener(self.path)
24 self.opener = util.opener(self.path)
25 self.wopener = util.opener(self.root)
25 self.wopener = util.opener(self.root)
26
26
27 if not os.path.isdir(self.path):
27 if not os.path.isdir(self.path):
28 if create:
28 if create:
29 if not os.path.exists(path):
29 if not os.path.exists(path):
30 os.mkdir(path)
30 os.mkdir(path)
31 os.mkdir(self.path)
31 os.mkdir(self.path)
32 requirements = ["revlogv1"]
32 requirements = ["revlogv1"]
33 if parentui.configbool('format', 'usestore', True):
33 if parentui.configbool('format', 'usestore', True):
34 os.mkdir(os.path.join(self.path, "store"))
34 os.mkdir(os.path.join(self.path, "store"))
35 requirements.append("store")
35 requirements.append("store")
36 # create an invalid changelog
36 # create an invalid changelog
37 self.opener("00changelog.i", "a").write(
37 self.opener("00changelog.i", "a").write(
38 '\0\0\0\2' # represents revlogv2
38 '\0\0\0\2' # represents revlogv2
39 ' dummy changelog to prevent using the old repo layout'
39 ' dummy changelog to prevent using the old repo layout'
40 )
40 )
41 reqfile = self.opener("requires", "w")
41 reqfile = self.opener("requires", "w")
42 for r in requirements:
42 for r in requirements:
43 reqfile.write("%s\n" % r)
43 reqfile.write("%s\n" % r)
44 reqfile.close()
44 reqfile.close()
45 else:
45 else:
46 raise repo.RepoError(_("repository %s not found") % path)
46 raise repo.RepoError(_("repository %s not found") % path)
47 elif create:
47 elif create:
48 raise repo.RepoError(_("repository %s already exists") % path)
48 raise repo.RepoError(_("repository %s already exists") % path)
49 else:
49 else:
50 # find requirements
50 # find requirements
51 try:
51 try:
52 requirements = self.opener("requires").read().splitlines()
52 requirements = self.opener("requires").read().splitlines()
53 except IOError, inst:
53 except IOError, inst:
54 if inst.errno != errno.ENOENT:
54 if inst.errno != errno.ENOENT:
55 raise
55 raise
56 requirements = []
56 requirements = []
57 # check them
57 # check them
58 for r in requirements:
58 for r in requirements:
59 if r not in self.supported:
59 if r not in self.supported:
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61
61
62 # setup store
62 # setup store
63 if "store" in requirements:
63 if "store" in requirements:
64 self.encodefn = util.encodefilename
64 self.encodefn = util.encodefilename
65 self.decodefn = util.decodefilename
65 self.decodefn = util.decodefilename
66 self.spath = os.path.join(self.path, "store")
66 self.spath = os.path.join(self.path, "store")
67 else:
67 else:
68 self.encodefn = lambda x: x
68 self.encodefn = lambda x: x
69 self.decodefn = lambda x: x
69 self.decodefn = lambda x: x
70 self.spath = self.path
70 self.spath = self.path
71
71
72 try:
72 try:
73 # files in .hg/ will be created using this mode
73 # files in .hg/ will be created using this mode
74 mode = os.stat(self.spath).st_mode
74 mode = os.stat(self.spath).st_mode
75 # avoid some useless chmods
75 # avoid some useless chmods
76 if (0777 & ~util._umask) == (0777 & mode):
76 if (0777 & ~util._umask) == (0777 & mode):
77 mode = None
77 mode = None
78 except OSError:
78 except OSError:
79 mode = None
79 mode = None
80
80
81 self._createmode = mode
81 self._createmode = mode
82 self.opener.createmode = mode
82 self.opener.createmode = mode
83 sopener = util.opener(self.spath)
83 sopener = util.opener(self.spath)
84 sopener.createmode = mode
84 sopener.createmode = mode
85 self.sopener = util.encodedopener(sopener, self.encodefn)
85 self.sopener = util.encodedopener(sopener, self.encodefn)
86
86
87 self.ui = ui.ui(parentui=parentui)
87 self.ui = ui.ui(parentui=parentui)
88 try:
88 try:
89 self.ui.readconfig(self.join("hgrc"), self.root)
89 self.ui.readconfig(self.join("hgrc"), self.root)
90 extensions.loadall(self.ui)
90 extensions.loadall(self.ui)
91 except IOError:
91 except IOError:
92 pass
92 pass
93
93
94 self.tagscache = None
94 self.tagscache = None
95 self._tagstypecache = None
95 self._tagstypecache = None
96 self.branchcache = None
96 self.branchcache = None
97 self._ubranchcache = None # UTF-8 version of branchcache
97 self._ubranchcache = None # UTF-8 version of branchcache
98 self._branchcachetip = None
98 self._branchcachetip = None
99 self.nodetagscache = None
99 self.nodetagscache = None
100 self.filterpats = {}
100 self.filterpats = {}
101 self._datafilters = {}
101 self._datafilters = {}
102 self._transref = self._lockref = self._wlockref = None
102 self._transref = self._lockref = self._wlockref = None
103
103
104 def __getattr__(self, name):
104 def __getattr__(self, name):
105 if name == 'changelog':
105 if name == 'changelog':
106 self.changelog = changelog.changelog(self.sopener)
106 self.changelog = changelog.changelog(self.sopener)
107 self.sopener.defversion = self.changelog.version
107 self.sopener.defversion = self.changelog.version
108 return self.changelog
108 return self.changelog
109 if name == 'manifest':
109 if name == 'manifest':
110 self.changelog
110 self.changelog
111 self.manifest = manifest.manifest(self.sopener)
111 self.manifest = manifest.manifest(self.sopener)
112 return self.manifest
112 return self.manifest
113 if name == 'dirstate':
113 if name == 'dirstate':
114 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
114 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
115 return self.dirstate
115 return self.dirstate
116 else:
116 else:
117 raise AttributeError, name
117 raise AttributeError, name
118
118
119 def url(self):
119 def url(self):
120 return 'file:' + self.root
120 return 'file:' + self.root
121
121
122 def hook(self, name, throw=False, **args):
122 def hook(self, name, throw=False, **args):
123 return hook.hook(self.ui, self, name, throw, **args)
123 return hook.hook(self.ui, self, name, throw, **args)
124
124
125 tag_disallowed = ':\r\n'
125 tag_disallowed = ':\r\n'
126
126
127 def _tag(self, names, node, message, local, user, date, parent=None,
127 def _tag(self, names, node, message, local, user, date, parent=None,
128 extra={}):
128 extra={}):
129 use_dirstate = parent is None
129 use_dirstate = parent is None
130
130
131 if isinstance(names, str):
131 if isinstance(names, str):
132 allchars = names
132 allchars = names
133 names = (names,)
133 names = (names,)
134 else:
134 else:
135 allchars = ''.join(names)
135 allchars = ''.join(names)
136 for c in self.tag_disallowed:
136 for c in self.tag_disallowed:
137 if c in allchars:
137 if c in allchars:
138 raise util.Abort(_('%r cannot be used in a tag name') % c)
138 raise util.Abort(_('%r cannot be used in a tag name') % c)
139
139
140 for name in names:
140 for name in names:
141 self.hook('pretag', throw=True, node=hex(node), tag=name,
141 self.hook('pretag', throw=True, node=hex(node), tag=name,
142 local=local)
142 local=local)
143
143
144 def writetags(fp, names, munge, prevtags):
144 def writetags(fp, names, munge, prevtags):
145 fp.seek(0, 2)
145 fp.seek(0, 2)
146 if prevtags and prevtags[-1] != '\n':
146 if prevtags and prevtags[-1] != '\n':
147 fp.write('\n')
147 fp.write('\n')
148 for name in names:
148 for name in names:
149 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
149 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
150 fp.close()
150 fp.close()
151
151
152 prevtags = ''
152 prevtags = ''
153 if local:
153 if local:
154 try:
154 try:
155 fp = self.opener('localtags', 'r+')
155 fp = self.opener('localtags', 'r+')
156 except IOError, err:
156 except IOError, err:
157 fp = self.opener('localtags', 'a')
157 fp = self.opener('localtags', 'a')
158 else:
158 else:
159 prevtags = fp.read()
159 prevtags = fp.read()
160
160
161 # local tags are stored in the current charset
161 # local tags are stored in the current charset
162 writetags(fp, names, None, prevtags)
162 writetags(fp, names, None, prevtags)
163 for name in names:
163 for name in names:
164 self.hook('tag', node=hex(node), tag=name, local=local)
164 self.hook('tag', node=hex(node), tag=name, local=local)
165 return
165 return
166
166
167 if use_dirstate:
167 if use_dirstate:
168 try:
168 try:
169 fp = self.wfile('.hgtags', 'rb+')
169 fp = self.wfile('.hgtags', 'rb+')
170 except IOError, err:
170 except IOError, err:
171 fp = self.wfile('.hgtags', 'ab')
171 fp = self.wfile('.hgtags', 'ab')
172 else:
172 else:
173 prevtags = fp.read()
173 prevtags = fp.read()
174 else:
174 else:
175 try:
175 try:
176 prevtags = self.filectx('.hgtags', parent).data()
176 prevtags = self.filectx('.hgtags', parent).data()
177 except revlog.LookupError:
177 except revlog.LookupError:
178 pass
178 pass
179 fp = self.wfile('.hgtags', 'wb')
179 fp = self.wfile('.hgtags', 'wb')
180 if prevtags:
180 if prevtags:
181 fp.write(prevtags)
181 fp.write(prevtags)
182
182
183 # committed tags are stored in UTF-8
183 # committed tags are stored in UTF-8
184 writetags(fp, names, util.fromlocal, prevtags)
184 writetags(fp, names, util.fromlocal, prevtags)
185
185
186 if use_dirstate and '.hgtags' not in self.dirstate:
186 if use_dirstate and '.hgtags' not in self.dirstate:
187 self.add(['.hgtags'])
187 self.add(['.hgtags'])
188
188
189 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
189 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
190 extra=extra)
190 extra=extra)
191
191
192 for name in names:
192 for name in names:
193 self.hook('tag', node=hex(node), tag=name, local=local)
193 self.hook('tag', node=hex(node), tag=name, local=local)
194
194
195 return tagnode
195 return tagnode
196
196
197 def tag(self, names, node, message, local, user, date):
197 def tag(self, names, node, message, local, user, date):
198 '''tag a revision with one or more symbolic names.
198 '''tag a revision with one or more symbolic names.
199
199
200 names is a list of strings or, when adding a single tag, names may be a
200 names is a list of strings or, when adding a single tag, names may be a
201 string.
201 string.
202
202
203 if local is True, the tags are stored in a per-repository file.
203 if local is True, the tags are stored in a per-repository file.
204 otherwise, they are stored in the .hgtags file, and a new
204 otherwise, they are stored in the .hgtags file, and a new
205 changeset is committed with the change.
205 changeset is committed with the change.
206
206
207 keyword arguments:
207 keyword arguments:
208
208
209 local: whether to store tags in non-version-controlled file
209 local: whether to store tags in non-version-controlled file
210 (default False)
210 (default False)
211
211
212 message: commit message to use if committing
212 message: commit message to use if committing
213
213
214 user: name of user to use if committing
214 user: name of user to use if committing
215
215
216 date: date tuple to use if committing'''
216 date: date tuple to use if committing'''
217
217
218 for x in self.status()[:5]:
218 for x in self.status()[:5]:
219 if '.hgtags' in x:
219 if '.hgtags' in x:
220 raise util.Abort(_('working copy of .hgtags is changed '
220 raise util.Abort(_('working copy of .hgtags is changed '
221 '(please commit .hgtags manually)'))
221 '(please commit .hgtags manually)'))
222
222
223 self._tag(names, node, message, local, user, date)
223 self._tag(names, node, message, local, user, date)
224
224
225 def tags(self):
225 def tags(self):
226 '''return a mapping of tag to node'''
226 '''return a mapping of tag to node'''
227 if self.tagscache:
227 if self.tagscache:
228 return self.tagscache
228 return self.tagscache
229
229
230 globaltags = {}
230 globaltags = {}
231 tagtypes = {}
231 tagtypes = {}
232
232
233 def readtags(lines, fn, tagtype):
233 def readtags(lines, fn, tagtype):
234 filetags = {}
234 filetags = {}
235 count = 0
235 count = 0
236
236
237 def warn(msg):
237 def warn(msg):
238 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
238 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
239
239
240 for l in lines:
240 for l in lines:
241 count += 1
241 count += 1
242 if not l:
242 if not l:
243 continue
243 continue
244 s = l.split(" ", 1)
244 s = l.split(" ", 1)
245 if len(s) != 2:
245 if len(s) != 2:
246 warn(_("cannot parse entry"))
246 warn(_("cannot parse entry"))
247 continue
247 continue
248 node, key = s
248 node, key = s
249 key = util.tolocal(key.strip()) # stored in UTF-8
249 key = util.tolocal(key.strip()) # stored in UTF-8
250 try:
250 try:
251 bin_n = bin(node)
251 bin_n = bin(node)
252 except TypeError:
252 except TypeError:
253 warn(_("node '%s' is not well formed") % node)
253 warn(_("node '%s' is not well formed") % node)
254 continue
254 continue
255 if bin_n not in self.changelog.nodemap:
255 if bin_n not in self.changelog.nodemap:
256 warn(_("tag '%s' refers to unknown node") % key)
256 warn(_("tag '%s' refers to unknown node") % key)
257 continue
257 continue
258
258
259 h = []
259 h = []
260 if key in filetags:
260 if key in filetags:
261 n, h = filetags[key]
261 n, h = filetags[key]
262 h.append(n)
262 h.append(n)
263 filetags[key] = (bin_n, h)
263 filetags[key] = (bin_n, h)
264
264
265 for k, nh in filetags.items():
265 for k, nh in filetags.items():
266 if k not in globaltags:
266 if k not in globaltags:
267 globaltags[k] = nh
267 globaltags[k] = nh
268 tagtypes[k] = tagtype
268 tagtypes[k] = tagtype
269 continue
269 continue
270
270
271 # we prefer the global tag if:
271 # we prefer the global tag if:
272 # it supercedes us OR
272 # it supercedes us OR
273 # mutual supercedes and it has a higher rank
273 # mutual supercedes and it has a higher rank
274 # otherwise we win because we're tip-most
274 # otherwise we win because we're tip-most
275 an, ah = nh
275 an, ah = nh
276 bn, bh = globaltags[k]
276 bn, bh = globaltags[k]
277 if (bn != an and an in bh and
277 if (bn != an and an in bh and
278 (bn not in ah or len(bh) > len(ah))):
278 (bn not in ah or len(bh) > len(ah))):
279 an = bn
279 an = bn
280 ah.extend([n for n in bh if n not in ah])
280 ah.extend([n for n in bh if n not in ah])
281 globaltags[k] = an, ah
281 globaltags[k] = an, ah
282 tagtypes[k] = tagtype
282 tagtypes[k] = tagtype
283
283
284 # read the tags file from each head, ending with the tip
284 # read the tags file from each head, ending with the tip
285 f = None
285 f = None
286 for rev, node, fnode in self._hgtagsnodes():
286 for rev, node, fnode in self._hgtagsnodes():
287 f = (f and f.filectx(fnode) or
287 f = (f and f.filectx(fnode) or
288 self.filectx('.hgtags', fileid=fnode))
288 self.filectx('.hgtags', fileid=fnode))
289 readtags(f.data().splitlines(), f, "global")
289 readtags(f.data().splitlines(), f, "global")
290
290
291 try:
291 try:
292 data = util.fromlocal(self.opener("localtags").read())
292 data = util.fromlocal(self.opener("localtags").read())
293 # localtags are stored in the local character set
293 # localtags are stored in the local character set
294 # while the internal tag table is stored in UTF-8
294 # while the internal tag table is stored in UTF-8
295 readtags(data.splitlines(), "localtags", "local")
295 readtags(data.splitlines(), "localtags", "local")
296 except IOError:
296 except IOError:
297 pass
297 pass
298
298
299 self.tagscache = {}
299 self.tagscache = {}
300 self._tagstypecache = {}
300 self._tagstypecache = {}
301 for k,nh in globaltags.items():
301 for k,nh in globaltags.items():
302 n = nh[0]
302 n = nh[0]
303 if n != nullid:
303 if n != nullid:
304 self.tagscache[k] = n
304 self.tagscache[k] = n
305 self._tagstypecache[k] = tagtypes[k]
305 self._tagstypecache[k] = tagtypes[k]
306 self.tagscache['tip'] = self.changelog.tip()
306 self.tagscache['tip'] = self.changelog.tip()
307
307
308 return self.tagscache
308 return self.tagscache
309
309
310 def tagtype(self, tagname):
310 def tagtype(self, tagname):
311 '''
311 '''
312 return the type of the given tag. result can be:
312 return the type of the given tag. result can be:
313
313
314 'local' : a local tag
314 'local' : a local tag
315 'global' : a global tag
315 'global' : a global tag
316 None : tag does not exist
316 None : tag does not exist
317 '''
317 '''
318
318
319 self.tags()
319 self.tags()
320
320
321 return self._tagstypecache.get(tagname)
321 return self._tagstypecache.get(tagname)
322
322
323 def _hgtagsnodes(self):
323 def _hgtagsnodes(self):
324 heads = self.heads()
324 heads = self.heads()
325 heads.reverse()
325 heads.reverse()
326 last = {}
326 last = {}
327 ret = []
327 ret = []
328 for node in heads:
328 for node in heads:
329 c = self.changectx(node)
329 c = self.changectx(node)
330 rev = c.rev()
330 rev = c.rev()
331 try:
331 try:
332 fnode = c.filenode('.hgtags')
332 fnode = c.filenode('.hgtags')
333 except revlog.LookupError:
333 except revlog.LookupError:
334 continue
334 continue
335 ret.append((rev, node, fnode))
335 ret.append((rev, node, fnode))
336 if fnode in last:
336 if fnode in last:
337 ret[last[fnode]] = None
337 ret[last[fnode]] = None
338 last[fnode] = len(ret) - 1
338 last[fnode] = len(ret) - 1
339 return [item for item in ret if item]
339 return [item for item in ret if item]
340
340
341 def tagslist(self):
341 def tagslist(self):
342 '''return a list of tags ordered by revision'''
342 '''return a list of tags ordered by revision'''
343 l = []
343 l = []
344 for t, n in self.tags().items():
344 for t, n in self.tags().items():
345 try:
345 try:
346 r = self.changelog.rev(n)
346 r = self.changelog.rev(n)
347 except:
347 except:
348 r = -2 # sort to the beginning of the list if unknown
348 r = -2 # sort to the beginning of the list if unknown
349 l.append((r, t, n))
349 l.append((r, t, n))
350 l.sort()
350 l.sort()
351 return [(t, n) for r, t, n in l]
351 return [(t, n) for r, t, n in l]
352
352
353 def nodetags(self, node):
353 def nodetags(self, node):
354 '''return the tags associated with a node'''
354 '''return the tags associated with a node'''
355 if not self.nodetagscache:
355 if not self.nodetagscache:
356 self.nodetagscache = {}
356 self.nodetagscache = {}
357 for t, n in self.tags().items():
357 for t, n in self.tags().items():
358 self.nodetagscache.setdefault(n, []).append(t)
358 self.nodetagscache.setdefault(n, []).append(t)
359 return self.nodetagscache.get(node, [])
359 return self.nodetagscache.get(node, [])
360
360
361 def _branchtags(self, partial, lrev):
361 def _branchtags(self, partial, lrev):
362 tiprev = self.changelog.count() - 1
362 tiprev = self.changelog.count() - 1
363 if lrev != tiprev:
363 if lrev != tiprev:
364 self._updatebranchcache(partial, lrev+1, tiprev+1)
364 self._updatebranchcache(partial, lrev+1, tiprev+1)
365 self._writebranchcache(partial, self.changelog.tip(), tiprev)
365 self._writebranchcache(partial, self.changelog.tip(), tiprev)
366
366
367 return partial
367 return partial
368
368
369 def branchtags(self):
369 def branchtags(self):
370 tip = self.changelog.tip()
370 tip = self.changelog.tip()
371 if self.branchcache is not None and self._branchcachetip == tip:
371 if self.branchcache is not None and self._branchcachetip == tip:
372 return self.branchcache
372 return self.branchcache
373
373
374 oldtip = self._branchcachetip
374 oldtip = self._branchcachetip
375 self._branchcachetip = tip
375 self._branchcachetip = tip
376 if self.branchcache is None:
376 if self.branchcache is None:
377 self.branchcache = {} # avoid recursion in changectx
377 self.branchcache = {} # avoid recursion in changectx
378 else:
378 else:
379 self.branchcache.clear() # keep using the same dict
379 self.branchcache.clear() # keep using the same dict
380 if oldtip is None or oldtip not in self.changelog.nodemap:
380 if oldtip is None or oldtip not in self.changelog.nodemap:
381 partial, last, lrev = self._readbranchcache()
381 partial, last, lrev = self._readbranchcache()
382 else:
382 else:
383 lrev = self.changelog.rev(oldtip)
383 lrev = self.changelog.rev(oldtip)
384 partial = self._ubranchcache
384 partial = self._ubranchcache
385
385
386 self._branchtags(partial, lrev)
386 self._branchtags(partial, lrev)
387
387
388 # the branch cache is stored on disk as UTF-8, but in the local
388 # the branch cache is stored on disk as UTF-8, but in the local
389 # charset internally
389 # charset internally
390 for k, v in partial.items():
390 for k, v in partial.items():
391 self.branchcache[util.tolocal(k)] = v
391 self.branchcache[util.tolocal(k)] = v
392 self._ubranchcache = partial
392 self._ubranchcache = partial
393 return self.branchcache
393 return self.branchcache
394
394
395 def _readbranchcache(self):
395 def _readbranchcache(self):
396 partial = {}
396 partial = {}
397 try:
397 try:
398 f = self.opener("branch.cache")
398 f = self.opener("branch.cache")
399 lines = f.read().split('\n')
399 lines = f.read().split('\n')
400 f.close()
400 f.close()
401 except (IOError, OSError):
401 except (IOError, OSError):
402 return {}, nullid, nullrev
402 return {}, nullid, nullrev
403
403
404 try:
404 try:
405 last, lrev = lines.pop(0).split(" ", 1)
405 last, lrev = lines.pop(0).split(" ", 1)
406 last, lrev = bin(last), int(lrev)
406 last, lrev = bin(last), int(lrev)
407 if not (lrev < self.changelog.count() and
407 if not (lrev < self.changelog.count() and
408 self.changelog.node(lrev) == last): # sanity check
408 self.changelog.node(lrev) == last): # sanity check
409 # invalidate the cache
409 # invalidate the cache
410 raise ValueError('invalidating branch cache (tip differs)')
410 raise ValueError('invalidating branch cache (tip differs)')
411 for l in lines:
411 for l in lines:
412 if not l: continue
412 if not l: continue
413 node, label = l.split(" ", 1)
413 node, label = l.split(" ", 1)
414 partial[label.strip()] = bin(node)
414 partial[label.strip()] = bin(node)
415 except (KeyboardInterrupt, util.SignalInterrupt):
415 except (KeyboardInterrupt, util.SignalInterrupt):
416 raise
416 raise
417 except Exception, inst:
417 except Exception, inst:
418 if self.ui.debugflag:
418 if self.ui.debugflag:
419 self.ui.warn(str(inst), '\n')
419 self.ui.warn(str(inst), '\n')
420 partial, last, lrev = {}, nullid, nullrev
420 partial, last, lrev = {}, nullid, nullrev
421 return partial, last, lrev
421 return partial, last, lrev
422
422
423 def _writebranchcache(self, branches, tip, tiprev):
423 def _writebranchcache(self, branches, tip, tiprev):
424 try:
424 try:
425 f = self.opener("branch.cache", "w", atomictemp=True)
425 f = self.opener("branch.cache", "w", atomictemp=True)
426 f.write("%s %s\n" % (hex(tip), tiprev))
426 f.write("%s %s\n" % (hex(tip), tiprev))
427 for label, node in branches.iteritems():
427 for label, node in branches.iteritems():
428 f.write("%s %s\n" % (hex(node), label))
428 f.write("%s %s\n" % (hex(node), label))
429 f.rename()
429 f.rename()
430 except (IOError, OSError):
430 except (IOError, OSError):
431 pass
431 pass
432
432
433 def _updatebranchcache(self, partial, start, end):
433 def _updatebranchcache(self, partial, start, end):
434 for r in xrange(start, end):
434 for r in xrange(start, end):
435 c = self.changectx(r)
435 c = self.changectx(r)
436 b = c.branch()
436 b = c.branch()
437 partial[b] = c.node()
437 partial[b] = c.node()
438
438
439 def lookup(self, key):
439 def lookup(self, key):
440 if key == '.':
440 if key == '.':
441 key, second = self.dirstate.parents()
441 key, second = self.dirstate.parents()
442 if key == nullid:
442 if key == nullid:
443 raise repo.RepoError(_("no revision checked out"))
443 raise repo.RepoError(_("no revision checked out"))
444 if second != nullid:
444 if second != nullid:
445 self.ui.warn(_("warning: working directory has two parents, "
445 self.ui.warn(_("warning: working directory has two parents, "
446 "tag '.' uses the first\n"))
446 "tag '.' uses the first\n"))
447 elif key == 'null':
447 elif key == 'null':
448 return nullid
448 return nullid
449 n = self.changelog._match(key)
449 n = self.changelog._match(key)
450 if n:
450 if n:
451 return n
451 return n
452 if key in self.tags():
452 if key in self.tags():
453 return self.tags()[key]
453 return self.tags()[key]
454 if key in self.branchtags():
454 if key in self.branchtags():
455 return self.branchtags()[key]
455 return self.branchtags()[key]
456 n = self.changelog._partialmatch(key)
456 n = self.changelog._partialmatch(key)
457 if n:
457 if n:
458 return n
458 return n
459 try:
459 try:
460 if len(key) == 20:
460 if len(key) == 20:
461 key = hex(key)
461 key = hex(key)
462 except:
462 except:
463 pass
463 pass
464 raise repo.RepoError(_("unknown revision '%s'") % key)
464 raise repo.RepoError(_("unknown revision '%s'") % key)
465
465
466 def local(self):
466 def local(self):
467 return True
467 return True
468
468
469 def join(self, f):
469 def join(self, f):
470 return os.path.join(self.path, f)
470 return os.path.join(self.path, f)
471
471
472 def sjoin(self, f):
472 def sjoin(self, f):
473 f = self.encodefn(f)
473 f = self.encodefn(f)
474 return os.path.join(self.spath, f)
474 return os.path.join(self.spath, f)
475
475
476 def wjoin(self, f):
476 def wjoin(self, f):
477 return os.path.join(self.root, f)
477 return os.path.join(self.root, f)
478
478
479 def file(self, f):
479 def file(self, f):
480 if f[0] == '/':
480 if f[0] == '/':
481 f = f[1:]
481 f = f[1:]
482 return filelog.filelog(self.sopener, f)
482 return filelog.filelog(self.sopener, f)
483
483
484 def changectx(self, changeid=None):
484 def changectx(self, changeid=None):
485 return context.changectx(self, changeid)
485 return context.changectx(self, changeid)
486
486
487 def workingctx(self):
487 def workingctx(self):
488 return context.workingctx(self)
488 return context.workingctx(self)
489
489
490 def parents(self, changeid=None):
490 def parents(self, changeid=None):
491 '''
491 '''
492 get list of changectxs for parents of changeid or working directory
492 get list of changectxs for parents of changeid or working directory
493 '''
493 '''
494 if changeid is None:
494 if changeid is None:
495 pl = self.dirstate.parents()
495 pl = self.dirstate.parents()
496 else:
496 else:
497 n = self.changelog.lookup(changeid)
497 n = self.changelog.lookup(changeid)
498 pl = self.changelog.parents(n)
498 pl = self.changelog.parents(n)
499 if pl[1] == nullid:
499 if pl[1] == nullid:
500 return [self.changectx(pl[0])]
500 return [self.changectx(pl[0])]
501 return [self.changectx(pl[0]), self.changectx(pl[1])]
501 return [self.changectx(pl[0]), self.changectx(pl[1])]
502
502
503 def filectx(self, path, changeid=None, fileid=None):
503 def filectx(self, path, changeid=None, fileid=None):
504 """changeid can be a changeset revision, node, or tag.
504 """changeid can be a changeset revision, node, or tag.
505 fileid can be a file revision or node."""
505 fileid can be a file revision or node."""
506 return context.filectx(self, path, changeid, fileid)
506 return context.filectx(self, path, changeid, fileid)
507
507
508 def getcwd(self):
508 def getcwd(self):
509 return self.dirstate.getcwd()
509 return self.dirstate.getcwd()
510
510
511 def pathto(self, f, cwd=None):
511 def pathto(self, f, cwd=None):
512 return self.dirstate.pathto(f, cwd)
512 return self.dirstate.pathto(f, cwd)
513
513
514 def wfile(self, f, mode='r'):
514 def wfile(self, f, mode='r'):
515 return self.wopener(f, mode)
515 return self.wopener(f, mode)
516
516
517 def _link(self, f):
517 def _link(self, f):
518 return os.path.islink(self.wjoin(f))
518 return os.path.islink(self.wjoin(f))
519
519
520 def _filter(self, filter, filename, data):
520 def _filter(self, filter, filename, data):
521 if filter not in self.filterpats:
521 if filter not in self.filterpats:
522 l = []
522 l = []
523 for pat, cmd in self.ui.configitems(filter):
523 for pat, cmd in self.ui.configitems(filter):
524 mf = util.matcher(self.root, "", [pat], [], [])[1]
524 mf = util.matcher(self.root, "", [pat], [], [])[1]
525 fn = None
525 fn = None
526 params = cmd
526 params = cmd
527 for name, filterfn in self._datafilters.iteritems():
527 for name, filterfn in self._datafilters.iteritems():
528 if cmd.startswith(name):
528 if cmd.startswith(name):
529 fn = filterfn
529 fn = filterfn
530 params = cmd[len(name):].lstrip()
530 params = cmd[len(name):].lstrip()
531 break
531 break
532 if not fn:
532 if not fn:
533 fn = lambda s, c, **kwargs: util.filter(s, c)
533 fn = lambda s, c, **kwargs: util.filter(s, c)
534 # Wrap old filters not supporting keyword arguments
534 # Wrap old filters not supporting keyword arguments
535 if not inspect.getargspec(fn)[2]:
535 if not inspect.getargspec(fn)[2]:
536 oldfn = fn
536 oldfn = fn
537 fn = lambda s, c, **kwargs: oldfn(s, c)
537 fn = lambda s, c, **kwargs: oldfn(s, c)
538 l.append((mf, fn, params))
538 l.append((mf, fn, params))
539 self.filterpats[filter] = l
539 self.filterpats[filter] = l
540
540
541 for mf, fn, cmd in self.filterpats[filter]:
541 for mf, fn, cmd in self.filterpats[filter]:
542 if mf(filename):
542 if mf(filename):
543 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
543 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
544 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
544 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
545 break
545 break
546
546
547 return data
547 return data
548
548
549 def adddatafilter(self, name, filter):
549 def adddatafilter(self, name, filter):
550 self._datafilters[name] = filter
550 self._datafilters[name] = filter
551
551
552 def wread(self, filename):
552 def wread(self, filename):
553 if self._link(filename):
553 if self._link(filename):
554 data = os.readlink(self.wjoin(filename))
554 data = os.readlink(self.wjoin(filename))
555 else:
555 else:
556 data = self.wopener(filename, 'r').read()
556 data = self.wopener(filename, 'r').read()
557 return self._filter("encode", filename, data)
557 return self._filter("encode", filename, data)
558
558
559 def wwrite(self, filename, data, flags):
559 def wwrite(self, filename, data, flags):
560 data = self._filter("decode", filename, data)
560 data = self._filter("decode", filename, data)
561 try:
561 try:
562 os.unlink(self.wjoin(filename))
562 os.unlink(self.wjoin(filename))
563 except OSError:
563 except OSError:
564 pass
564 pass
565 self.wopener(filename, 'w').write(data)
565 self.wopener(filename, 'w').write(data)
566 util.set_flags(self.wjoin(filename), flags)
566 util.set_flags(self.wjoin(filename), flags)
567
567
568 def wwritedata(self, filename, data):
568 def wwritedata(self, filename, data):
569 return self._filter("decode", filename, data)
569 return self._filter("decode", filename, data)
570
570
571 def transaction(self):
571 def transaction(self):
572 if self._transref and self._transref():
572 if self._transref and self._transref():
573 return self._transref().nest()
573 return self._transref().nest()
574
574
575 # abort here if the journal already exists
575 # abort here if the journal already exists
576 if os.path.exists(self.sjoin("journal")):
576 if os.path.exists(self.sjoin("journal")):
577 raise repo.RepoError(_("journal already exists - run hg recover"))
577 raise repo.RepoError(_("journal already exists - run hg recover"))
578
578
579 # save dirstate for rollback
579 # save dirstate for rollback
580 try:
580 try:
581 ds = self.opener("dirstate").read()
581 ds = self.opener("dirstate").read()
582 except IOError:
582 except IOError:
583 ds = ""
583 ds = ""
584 self.opener("journal.dirstate", "w").write(ds)
584 self.opener("journal.dirstate", "w").write(ds)
585 self.opener("journal.branch", "w").write(self.dirstate.branch())
585 self.opener("journal.branch", "w").write(self.dirstate.branch())
586
586
587 renames = [(self.sjoin("journal"), self.sjoin("undo")),
587 renames = [(self.sjoin("journal"), self.sjoin("undo")),
588 (self.join("journal.dirstate"), self.join("undo.dirstate")),
588 (self.join("journal.dirstate"), self.join("undo.dirstate")),
589 (self.join("journal.branch"), self.join("undo.branch"))]
589 (self.join("journal.branch"), self.join("undo.branch"))]
590 tr = transaction.transaction(self.ui.warn, self.sopener,
590 tr = transaction.transaction(self.ui.warn, self.sopener,
591 self.sjoin("journal"),
591 self.sjoin("journal"),
592 aftertrans(renames),
592 aftertrans(renames),
593 self._createmode)
593 self._createmode)
594 self._transref = weakref.ref(tr)
594 self._transref = weakref.ref(tr)
595 return tr
595 return tr
596
596
597 def recover(self):
597 def recover(self):
598 l = self.lock()
598 l = self.lock()
599 try:
599 try:
600 if os.path.exists(self.sjoin("journal")):
600 if os.path.exists(self.sjoin("journal")):
601 self.ui.status(_("rolling back interrupted transaction\n"))
601 self.ui.status(_("rolling back interrupted transaction\n"))
602 transaction.rollback(self.sopener, self.sjoin("journal"))
602 transaction.rollback(self.sopener, self.sjoin("journal"))
603 self.invalidate()
603 self.invalidate()
604 return True
604 return True
605 else:
605 else:
606 self.ui.warn(_("no interrupted transaction available\n"))
606 self.ui.warn(_("no interrupted transaction available\n"))
607 return False
607 return False
608 finally:
608 finally:
609 del l
609 del l
610
610
611 def rollback(self):
611 def rollback(self):
612 wlock = lock = None
612 wlock = lock = None
613 try:
613 try:
614 wlock = self.wlock()
614 wlock = self.wlock()
615 lock = self.lock()
615 lock = self.lock()
616 if os.path.exists(self.sjoin("undo")):
616 if os.path.exists(self.sjoin("undo")):
617 self.ui.status(_("rolling back last transaction\n"))
617 self.ui.status(_("rolling back last transaction\n"))
618 transaction.rollback(self.sopener, self.sjoin("undo"))
618 transaction.rollback(self.sopener, self.sjoin("undo"))
619 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
619 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
620 try:
620 try:
621 branch = self.opener("undo.branch").read()
621 branch = self.opener("undo.branch").read()
622 self.dirstate.setbranch(branch)
622 self.dirstate.setbranch(branch)
623 except IOError:
623 except IOError:
624 self.ui.warn(_("Named branch could not be reset, "
624 self.ui.warn(_("Named branch could not be reset, "
625 "current branch still is: %s\n")
625 "current branch still is: %s\n")
626 % util.tolocal(self.dirstate.branch()))
626 % util.tolocal(self.dirstate.branch()))
627 self.invalidate()
627 self.invalidate()
628 self.dirstate.invalidate()
628 self.dirstate.invalidate()
629 else:
629 else:
630 self.ui.warn(_("no rollback information available\n"))
630 self.ui.warn(_("no rollback information available\n"))
631 finally:
631 finally:
632 del lock, wlock
632 del lock, wlock
633
633
634 def invalidate(self):
634 def invalidate(self):
635 for a in "changelog manifest".split():
635 for a in "changelog manifest".split():
636 if hasattr(self, a):
636 if a in self.__dict__:
637 self.__delattr__(a)
637 delattr(self, a)
638 self.tagscache = None
638 self.tagscache = None
639 self._tagstypecache = None
639 self._tagstypecache = None
640 self.nodetagscache = None
640 self.nodetagscache = None
641 self.branchcache = None
641 self.branchcache = None
642 self._ubranchcache = None
642 self._ubranchcache = None
643 self._branchcachetip = None
643 self._branchcachetip = None
644
644
645 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
645 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
646 try:
646 try:
647 l = lock.lock(lockname, 0, releasefn, desc=desc)
647 l = lock.lock(lockname, 0, releasefn, desc=desc)
648 except lock.LockHeld, inst:
648 except lock.LockHeld, inst:
649 if not wait:
649 if not wait:
650 raise
650 raise
651 self.ui.warn(_("waiting for lock on %s held by %r\n") %
651 self.ui.warn(_("waiting for lock on %s held by %r\n") %
652 (desc, inst.locker))
652 (desc, inst.locker))
653 # default to 600 seconds timeout
653 # default to 600 seconds timeout
654 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
654 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
655 releasefn, desc=desc)
655 releasefn, desc=desc)
656 if acquirefn:
656 if acquirefn:
657 acquirefn()
657 acquirefn()
658 return l
658 return l
659
659
660 def lock(self, wait=True):
660 def lock(self, wait=True):
661 if self._lockref and self._lockref():
661 if self._lockref and self._lockref():
662 return self._lockref()
662 return self._lockref()
663
663
664 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
664 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
665 _('repository %s') % self.origroot)
665 _('repository %s') % self.origroot)
666 self._lockref = weakref.ref(l)
666 self._lockref = weakref.ref(l)
667 return l
667 return l
668
668
669 def wlock(self, wait=True):
669 def wlock(self, wait=True):
670 if self._wlockref and self._wlockref():
670 if self._wlockref and self._wlockref():
671 return self._wlockref()
671 return self._wlockref()
672
672
673 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
673 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
674 self.dirstate.invalidate, _('working directory of %s') %
674 self.dirstate.invalidate, _('working directory of %s') %
675 self.origroot)
675 self.origroot)
676 self._wlockref = weakref.ref(l)
676 self._wlockref = weakref.ref(l)
677 return l
677 return l
678
678
679 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
679 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
680 """
680 """
681 commit an individual file as part of a larger transaction
681 commit an individual file as part of a larger transaction
682 """
682 """
683
683
684 t = self.wread(fn)
684 t = self.wread(fn)
685 fl = self.file(fn)
685 fl = self.file(fn)
686 fp1 = manifest1.get(fn, nullid)
686 fp1 = manifest1.get(fn, nullid)
687 fp2 = manifest2.get(fn, nullid)
687 fp2 = manifest2.get(fn, nullid)
688
688
689 meta = {}
689 meta = {}
690 cp = self.dirstate.copied(fn)
690 cp = self.dirstate.copied(fn)
691 if cp:
691 if cp:
692 # Mark the new revision of this file as a copy of another
692 # Mark the new revision of this file as a copy of another
693 # file. This copy data will effectively act as a parent
693 # file. This copy data will effectively act as a parent
694 # of this new revision. If this is a merge, the first
694 # of this new revision. If this is a merge, the first
695 # parent will be the nullid (meaning "look up the copy data")
695 # parent will be the nullid (meaning "look up the copy data")
696 # and the second one will be the other parent. For example:
696 # and the second one will be the other parent. For example:
697 #
697 #
698 # 0 --- 1 --- 3 rev1 changes file foo
698 # 0 --- 1 --- 3 rev1 changes file foo
699 # \ / rev2 renames foo to bar and changes it
699 # \ / rev2 renames foo to bar and changes it
700 # \- 2 -/ rev3 should have bar with all changes and
700 # \- 2 -/ rev3 should have bar with all changes and
701 # should record that bar descends from
701 # should record that bar descends from
702 # bar in rev2 and foo in rev1
702 # bar in rev2 and foo in rev1
703 #
703 #
704 # this allows this merge to succeed:
704 # this allows this merge to succeed:
705 #
705 #
706 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
706 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
707 # \ / merging rev3 and rev4 should use bar@rev2
707 # \ / merging rev3 and rev4 should use bar@rev2
708 # \- 2 --- 4 as the merge base
708 # \- 2 --- 4 as the merge base
709 #
709 #
710 meta["copy"] = cp
710 meta["copy"] = cp
711 if not manifest2: # not a branch merge
711 if not manifest2: # not a branch merge
712 meta["copyrev"] = hex(manifest1.get(cp, nullid))
712 meta["copyrev"] = hex(manifest1.get(cp, nullid))
713 fp2 = nullid
713 fp2 = nullid
714 elif fp2 != nullid: # copied on remote side
714 elif fp2 != nullid: # copied on remote side
715 meta["copyrev"] = hex(manifest1.get(cp, nullid))
715 meta["copyrev"] = hex(manifest1.get(cp, nullid))
716 elif fp1 != nullid: # copied on local side, reversed
716 elif fp1 != nullid: # copied on local side, reversed
717 meta["copyrev"] = hex(manifest2.get(cp))
717 meta["copyrev"] = hex(manifest2.get(cp))
718 fp2 = fp1
718 fp2 = fp1
719 elif cp in manifest2: # directory rename on local side
719 elif cp in manifest2: # directory rename on local side
720 meta["copyrev"] = hex(manifest2[cp])
720 meta["copyrev"] = hex(manifest2[cp])
721 else: # directory rename on remote side
721 else: # directory rename on remote side
722 meta["copyrev"] = hex(manifest1.get(cp, nullid))
722 meta["copyrev"] = hex(manifest1.get(cp, nullid))
723 self.ui.debug(_(" %s: copy %s:%s\n") %
723 self.ui.debug(_(" %s: copy %s:%s\n") %
724 (fn, cp, meta["copyrev"]))
724 (fn, cp, meta["copyrev"]))
725 fp1 = nullid
725 fp1 = nullid
726 elif fp2 != nullid:
726 elif fp2 != nullid:
727 # is one parent an ancestor of the other?
727 # is one parent an ancestor of the other?
728 fpa = fl.ancestor(fp1, fp2)
728 fpa = fl.ancestor(fp1, fp2)
729 if fpa == fp1:
729 if fpa == fp1:
730 fp1, fp2 = fp2, nullid
730 fp1, fp2 = fp2, nullid
731 elif fpa == fp2:
731 elif fpa == fp2:
732 fp2 = nullid
732 fp2 = nullid
733
733
734 # is the file unmodified from the parent? report existing entry
734 # is the file unmodified from the parent? report existing entry
735 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
735 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
736 return fp1
736 return fp1
737
737
738 changelist.append(fn)
738 changelist.append(fn)
739 return fl.add(t, meta, tr, linkrev, fp1, fp2)
739 return fl.add(t, meta, tr, linkrev, fp1, fp2)
740
740
741 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
741 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
742 if p1 is None:
742 if p1 is None:
743 p1, p2 = self.dirstate.parents()
743 p1, p2 = self.dirstate.parents()
744 return self.commit(files=files, text=text, user=user, date=date,
744 return self.commit(files=files, text=text, user=user, date=date,
745 p1=p1, p2=p2, extra=extra, empty_ok=True)
745 p1=p1, p2=p2, extra=extra, empty_ok=True)
746
746
747 def commit(self, files=None, text="", user=None, date=None,
747 def commit(self, files=None, text="", user=None, date=None,
748 match=util.always, force=False, force_editor=False,
748 match=util.always, force=False, force_editor=False,
749 p1=None, p2=None, extra={}, empty_ok=False):
749 p1=None, p2=None, extra={}, empty_ok=False):
750 wlock = lock = tr = None
750 wlock = lock = tr = None
751 valid = 0 # don't save the dirstate if this isn't set
751 valid = 0 # don't save the dirstate if this isn't set
752 if files:
752 if files:
753 files = util.unique(files)
753 files = util.unique(files)
754 try:
754 try:
755 wlock = self.wlock()
755 wlock = self.wlock()
756 lock = self.lock()
756 lock = self.lock()
757 commit = []
757 commit = []
758 remove = []
758 remove = []
759 changed = []
759 changed = []
760 use_dirstate = (p1 is None) # not rawcommit
760 use_dirstate = (p1 is None) # not rawcommit
761 extra = extra.copy()
761 extra = extra.copy()
762
762
763 if use_dirstate:
763 if use_dirstate:
764 if files:
764 if files:
765 for f in files:
765 for f in files:
766 s = self.dirstate[f]
766 s = self.dirstate[f]
767 if s in 'nma':
767 if s in 'nma':
768 commit.append(f)
768 commit.append(f)
769 elif s == 'r':
769 elif s == 'r':
770 remove.append(f)
770 remove.append(f)
771 else:
771 else:
772 self.ui.warn(_("%s not tracked!\n") % f)
772 self.ui.warn(_("%s not tracked!\n") % f)
773 else:
773 else:
774 changes = self.status(match=match)[:5]
774 changes = self.status(match=match)[:5]
775 modified, added, removed, deleted, unknown = changes
775 modified, added, removed, deleted, unknown = changes
776 commit = modified + added
776 commit = modified + added
777 remove = removed
777 remove = removed
778 else:
778 else:
779 commit = files
779 commit = files
780
780
781 if use_dirstate:
781 if use_dirstate:
782 p1, p2 = self.dirstate.parents()
782 p1, p2 = self.dirstate.parents()
783 update_dirstate = True
783 update_dirstate = True
784 else:
784 else:
785 p1, p2 = p1, p2 or nullid
785 p1, p2 = p1, p2 or nullid
786 update_dirstate = (self.dirstate.parents()[0] == p1)
786 update_dirstate = (self.dirstate.parents()[0] == p1)
787
787
788 c1 = self.changelog.read(p1)
788 c1 = self.changelog.read(p1)
789 c2 = self.changelog.read(p2)
789 c2 = self.changelog.read(p2)
790 m1 = self.manifest.read(c1[0]).copy()
790 m1 = self.manifest.read(c1[0]).copy()
791 m2 = self.manifest.read(c2[0])
791 m2 = self.manifest.read(c2[0])
792
792
793 if use_dirstate:
793 if use_dirstate:
794 branchname = self.workingctx().branch()
794 branchname = self.workingctx().branch()
795 try:
795 try:
796 branchname = branchname.decode('UTF-8').encode('UTF-8')
796 branchname = branchname.decode('UTF-8').encode('UTF-8')
797 except UnicodeDecodeError:
797 except UnicodeDecodeError:
798 raise util.Abort(_('branch name not in UTF-8!'))
798 raise util.Abort(_('branch name not in UTF-8!'))
799 else:
799 else:
800 branchname = ""
800 branchname = ""
801
801
802 if use_dirstate:
802 if use_dirstate:
803 oldname = c1[5].get("branch") # stored in UTF-8
803 oldname = c1[5].get("branch") # stored in UTF-8
804 if (not commit and not remove and not force and p2 == nullid
804 if (not commit and not remove and not force and p2 == nullid
805 and branchname == oldname):
805 and branchname == oldname):
806 self.ui.status(_("nothing changed\n"))
806 self.ui.status(_("nothing changed\n"))
807 return None
807 return None
808
808
809 xp1 = hex(p1)
809 xp1 = hex(p1)
810 if p2 == nullid: xp2 = ''
810 if p2 == nullid: xp2 = ''
811 else: xp2 = hex(p2)
811 else: xp2 = hex(p2)
812
812
813 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
813 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
814
814
815 tr = self.transaction()
815 tr = self.transaction()
816 trp = weakref.proxy(tr)
816 trp = weakref.proxy(tr)
817
817
818 # check in files
818 # check in files
819 new = {}
819 new = {}
820 linkrev = self.changelog.count()
820 linkrev = self.changelog.count()
821 commit.sort()
821 commit.sort()
822 is_exec = util.execfunc(self.root, m1.execf)
822 is_exec = util.execfunc(self.root, m1.execf)
823 is_link = util.linkfunc(self.root, m1.linkf)
823 is_link = util.linkfunc(self.root, m1.linkf)
824 for f in commit:
824 for f in commit:
825 self.ui.note(f + "\n")
825 self.ui.note(f + "\n")
826 try:
826 try:
827 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
827 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
828 new_exec = is_exec(f)
828 new_exec = is_exec(f)
829 new_link = is_link(f)
829 new_link = is_link(f)
830 if ((not changed or changed[-1] != f) and
830 if ((not changed or changed[-1] != f) and
831 m2.get(f) != new[f]):
831 m2.get(f) != new[f]):
832 # mention the file in the changelog if some
832 # mention the file in the changelog if some
833 # flag changed, even if there was no content
833 # flag changed, even if there was no content
834 # change.
834 # change.
835 old_exec = m1.execf(f)
835 old_exec = m1.execf(f)
836 old_link = m1.linkf(f)
836 old_link = m1.linkf(f)
837 if old_exec != new_exec or old_link != new_link:
837 if old_exec != new_exec or old_link != new_link:
838 changed.append(f)
838 changed.append(f)
839 m1.set(f, new_exec, new_link)
839 m1.set(f, new_exec, new_link)
840 if use_dirstate:
840 if use_dirstate:
841 self.dirstate.normal(f)
841 self.dirstate.normal(f)
842
842
843 except (OSError, IOError):
843 except (OSError, IOError):
844 if use_dirstate:
844 if use_dirstate:
845 self.ui.warn(_("trouble committing %s!\n") % f)
845 self.ui.warn(_("trouble committing %s!\n") % f)
846 raise
846 raise
847 else:
847 else:
848 remove.append(f)
848 remove.append(f)
849
849
850 # update manifest
850 # update manifest
851 m1.update(new)
851 m1.update(new)
852 remove.sort()
852 remove.sort()
853 removed = []
853 removed = []
854
854
855 for f in remove:
855 for f in remove:
856 if f in m1:
856 if f in m1:
857 del m1[f]
857 del m1[f]
858 removed.append(f)
858 removed.append(f)
859 elif f in m2:
859 elif f in m2:
860 removed.append(f)
860 removed.append(f)
861 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
861 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
862 (new, removed))
862 (new, removed))
863
863
864 # add changeset
864 # add changeset
865 new = new.keys()
865 new = new.keys()
866 new.sort()
866 new.sort()
867
867
868 user = user or self.ui.username()
868 user = user or self.ui.username()
869 if (not empty_ok and not text) or force_editor:
869 if (not empty_ok and not text) or force_editor:
870 edittext = []
870 edittext = []
871 if text:
871 if text:
872 edittext.append(text)
872 edittext.append(text)
873 edittext.append("")
873 edittext.append("")
874 edittext.append(_("HG: Enter commit message."
874 edittext.append(_("HG: Enter commit message."
875 " Lines beginning with 'HG:' are removed."))
875 " Lines beginning with 'HG:' are removed."))
876 edittext.append("HG: --")
876 edittext.append("HG: --")
877 edittext.append("HG: user: %s" % user)
877 edittext.append("HG: user: %s" % user)
878 if p2 != nullid:
878 if p2 != nullid:
879 edittext.append("HG: branch merge")
879 edittext.append("HG: branch merge")
880 if branchname:
880 if branchname:
881 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
881 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
882 edittext.extend(["HG: changed %s" % f for f in changed])
882 edittext.extend(["HG: changed %s" % f for f in changed])
883 edittext.extend(["HG: removed %s" % f for f in removed])
883 edittext.extend(["HG: removed %s" % f for f in removed])
884 if not changed and not remove:
884 if not changed and not remove:
885 edittext.append("HG: no files changed")
885 edittext.append("HG: no files changed")
886 edittext.append("")
886 edittext.append("")
887 # run editor in the repository root
887 # run editor in the repository root
888 olddir = os.getcwd()
888 olddir = os.getcwd()
889 os.chdir(self.root)
889 os.chdir(self.root)
890 text = self.ui.edit("\n".join(edittext), user)
890 text = self.ui.edit("\n".join(edittext), user)
891 os.chdir(olddir)
891 os.chdir(olddir)
892
892
893 if branchname:
893 if branchname:
894 extra["branch"] = branchname
894 extra["branch"] = branchname
895
895
896 lines = [line.rstrip() for line in text.rstrip().splitlines()]
896 lines = [line.rstrip() for line in text.rstrip().splitlines()]
897 while lines and not lines[0]:
897 while lines and not lines[0]:
898 del lines[0]
898 del lines[0]
899 if not lines and use_dirstate:
899 if not lines and use_dirstate:
900 raise util.Abort(_("empty commit message"))
900 raise util.Abort(_("empty commit message"))
901 text = '\n'.join(lines)
901 text = '\n'.join(lines)
902
902
903 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
903 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
904 user, date, extra)
904 user, date, extra)
905 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
905 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
906 parent2=xp2)
906 parent2=xp2)
907 tr.close()
907 tr.close()
908
908
909 if self.branchcache:
909 if self.branchcache:
910 self.branchtags()
910 self.branchtags()
911
911
912 if use_dirstate or update_dirstate:
912 if use_dirstate or update_dirstate:
913 self.dirstate.setparents(n)
913 self.dirstate.setparents(n)
914 if use_dirstate:
914 if use_dirstate:
915 for f in removed:
915 for f in removed:
916 self.dirstate.forget(f)
916 self.dirstate.forget(f)
917 valid = 1 # our dirstate updates are complete
917 valid = 1 # our dirstate updates are complete
918
918
919 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
919 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
920 return n
920 return n
921 finally:
921 finally:
922 if not valid: # don't save our updated dirstate
922 if not valid: # don't save our updated dirstate
923 self.dirstate.invalidate()
923 self.dirstate.invalidate()
924 del tr, lock, wlock
924 del tr, lock, wlock
925
925
926 def walk(self, node=None, files=[], match=util.always, badmatch=None):
926 def walk(self, node=None, files=[], match=util.always, badmatch=None):
927 '''
927 '''
928 walk recursively through the directory tree or a given
928 walk recursively through the directory tree or a given
929 changeset, finding all files matched by the match
929 changeset, finding all files matched by the match
930 function
930 function
931
931
932 results are yielded in a tuple (src, filename), where src
932 results are yielded in a tuple (src, filename), where src
933 is one of:
933 is one of:
934 'f' the file was found in the directory tree
934 'f' the file was found in the directory tree
935 'm' the file was only in the dirstate and not in the tree
935 'm' the file was only in the dirstate and not in the tree
936 'b' file was not found and matched badmatch
936 'b' file was not found and matched badmatch
937 '''
937 '''
938
938
939 if node:
939 if node:
940 fdict = dict.fromkeys(files)
940 fdict = dict.fromkeys(files)
941 # for dirstate.walk, files=['.'] means "walk the whole tree".
941 # for dirstate.walk, files=['.'] means "walk the whole tree".
942 # follow that here, too
942 # follow that here, too
943 fdict.pop('.', None)
943 fdict.pop('.', None)
944 mdict = self.manifest.read(self.changelog.read(node)[0])
944 mdict = self.manifest.read(self.changelog.read(node)[0])
945 mfiles = mdict.keys()
945 mfiles = mdict.keys()
946 mfiles.sort()
946 mfiles.sort()
947 for fn in mfiles:
947 for fn in mfiles:
948 for ffn in fdict:
948 for ffn in fdict:
949 # match if the file is the exact name or a directory
949 # match if the file is the exact name or a directory
950 if ffn == fn or fn.startswith("%s/" % ffn):
950 if ffn == fn or fn.startswith("%s/" % ffn):
951 del fdict[ffn]
951 del fdict[ffn]
952 break
952 break
953 if match(fn):
953 if match(fn):
954 yield 'm', fn
954 yield 'm', fn
955 ffiles = fdict.keys()
955 ffiles = fdict.keys()
956 ffiles.sort()
956 ffiles.sort()
957 for fn in ffiles:
957 for fn in ffiles:
958 if badmatch and badmatch(fn):
958 if badmatch and badmatch(fn):
959 if match(fn):
959 if match(fn):
960 yield 'b', fn
960 yield 'b', fn
961 else:
961 else:
962 self.ui.warn(_('%s: No such file in rev %s\n')
962 self.ui.warn(_('%s: No such file in rev %s\n')
963 % (self.pathto(fn), short(node)))
963 % (self.pathto(fn), short(node)))
964 else:
964 else:
965 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
965 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
966 yield src, fn
966 yield src, fn
967
967
968 def status(self, node1=None, node2=None, files=[], match=util.always,
968 def status(self, node1=None, node2=None, files=[], match=util.always,
969 list_ignored=False, list_clean=False, list_unknown=True):
969 list_ignored=False, list_clean=False, list_unknown=True):
970 """return status of files between two nodes or node and working directory
970 """return status of files between two nodes or node and working directory
971
971
972 If node1 is None, use the first dirstate parent instead.
972 If node1 is None, use the first dirstate parent instead.
973 If node2 is None, compare node1 with working directory.
973 If node2 is None, compare node1 with working directory.
974 """
974 """
975
975
976 def fcmp(fn, getnode):
976 def fcmp(fn, getnode):
977 t1 = self.wread(fn)
977 t1 = self.wread(fn)
978 return self.file(fn).cmp(getnode(fn), t1)
978 return self.file(fn).cmp(getnode(fn), t1)
979
979
980 def mfmatches(node):
980 def mfmatches(node):
981 change = self.changelog.read(node)
981 change = self.changelog.read(node)
982 mf = self.manifest.read(change[0]).copy()
982 mf = self.manifest.read(change[0]).copy()
983 for fn in mf.keys():
983 for fn in mf.keys():
984 if not match(fn):
984 if not match(fn):
985 del mf[fn]
985 del mf[fn]
986 return mf
986 return mf
987
987
988 modified, added, removed, deleted, unknown = [], [], [], [], []
988 modified, added, removed, deleted, unknown = [], [], [], [], []
989 ignored, clean = [], []
989 ignored, clean = [], []
990
990
991 compareworking = False
991 compareworking = False
992 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
992 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
993 compareworking = True
993 compareworking = True
994
994
995 if not compareworking:
995 if not compareworking:
996 # read the manifest from node1 before the manifest from node2,
996 # read the manifest from node1 before the manifest from node2,
997 # so that we'll hit the manifest cache if we're going through
997 # so that we'll hit the manifest cache if we're going through
998 # all the revisions in parent->child order.
998 # all the revisions in parent->child order.
999 mf1 = mfmatches(node1)
999 mf1 = mfmatches(node1)
1000
1000
1001 # are we comparing the working directory?
1001 # are we comparing the working directory?
1002 if not node2:
1002 if not node2:
1003 (lookup, modified, added, removed, deleted, unknown,
1003 (lookup, modified, added, removed, deleted, unknown,
1004 ignored, clean) = self.dirstate.status(files, match,
1004 ignored, clean) = self.dirstate.status(files, match,
1005 list_ignored, list_clean,
1005 list_ignored, list_clean,
1006 list_unknown)
1006 list_unknown)
1007
1007
1008 # are we comparing working dir against its parent?
1008 # are we comparing working dir against its parent?
1009 if compareworking:
1009 if compareworking:
1010 if lookup:
1010 if lookup:
1011 fixup = []
1011 fixup = []
1012 # do a full compare of any files that might have changed
1012 # do a full compare of any files that might have changed
1013 ctx = self.changectx()
1013 ctx = self.changectx()
1014 mexec = lambda f: 'x' in ctx.fileflags(f)
1014 mexec = lambda f: 'x' in ctx.fileflags(f)
1015 mlink = lambda f: 'l' in ctx.fileflags(f)
1015 mlink = lambda f: 'l' in ctx.fileflags(f)
1016 is_exec = util.execfunc(self.root, mexec)
1016 is_exec = util.execfunc(self.root, mexec)
1017 is_link = util.linkfunc(self.root, mlink)
1017 is_link = util.linkfunc(self.root, mlink)
1018 def flags(f):
1018 def flags(f):
1019 return is_link(f) and 'l' or is_exec(f) and 'x' or ''
1019 return is_link(f) and 'l' or is_exec(f) and 'x' or ''
1020 for f in lookup:
1020 for f in lookup:
1021 if (f not in ctx or flags(f) != ctx.fileflags(f)
1021 if (f not in ctx or flags(f) != ctx.fileflags(f)
1022 or ctx[f].cmp(self.wread(f))):
1022 or ctx[f].cmp(self.wread(f))):
1023 modified.append(f)
1023 modified.append(f)
1024 else:
1024 else:
1025 fixup.append(f)
1025 fixup.append(f)
1026 if list_clean:
1026 if list_clean:
1027 clean.append(f)
1027 clean.append(f)
1028
1028
1029 # update dirstate for files that are actually clean
1029 # update dirstate for files that are actually clean
1030 if fixup:
1030 if fixup:
1031 wlock = None
1031 wlock = None
1032 try:
1032 try:
1033 try:
1033 try:
1034 wlock = self.wlock(False)
1034 wlock = self.wlock(False)
1035 except lock.LockException:
1035 except lock.LockException:
1036 pass
1036 pass
1037 if wlock:
1037 if wlock:
1038 for f in fixup:
1038 for f in fixup:
1039 self.dirstate.normal(f)
1039 self.dirstate.normal(f)
1040 finally:
1040 finally:
1041 del wlock
1041 del wlock
1042 else:
1042 else:
1043 # we are comparing working dir against non-parent
1043 # we are comparing working dir against non-parent
1044 # generate a pseudo-manifest for the working dir
1044 # generate a pseudo-manifest for the working dir
1045 # XXX: create it in dirstate.py ?
1045 # XXX: create it in dirstate.py ?
1046 mf2 = mfmatches(self.dirstate.parents()[0])
1046 mf2 = mfmatches(self.dirstate.parents()[0])
1047 is_exec = util.execfunc(self.root, mf2.execf)
1047 is_exec = util.execfunc(self.root, mf2.execf)
1048 is_link = util.linkfunc(self.root, mf2.linkf)
1048 is_link = util.linkfunc(self.root, mf2.linkf)
1049 for f in lookup + modified + added:
1049 for f in lookup + modified + added:
1050 mf2[f] = ""
1050 mf2[f] = ""
1051 mf2.set(f, is_exec(f), is_link(f))
1051 mf2.set(f, is_exec(f), is_link(f))
1052 for f in removed:
1052 for f in removed:
1053 if f in mf2:
1053 if f in mf2:
1054 del mf2[f]
1054 del mf2[f]
1055
1055
1056 else:
1056 else:
1057 # we are comparing two revisions
1057 # we are comparing two revisions
1058 mf2 = mfmatches(node2)
1058 mf2 = mfmatches(node2)
1059
1059
1060 if not compareworking:
1060 if not compareworking:
1061 # flush lists from dirstate before comparing manifests
1061 # flush lists from dirstate before comparing manifests
1062 modified, added, clean = [], [], []
1062 modified, added, clean = [], [], []
1063
1063
1064 # make sure to sort the files so we talk to the disk in a
1064 # make sure to sort the files so we talk to the disk in a
1065 # reasonable order
1065 # reasonable order
1066 mf2keys = mf2.keys()
1066 mf2keys = mf2.keys()
1067 mf2keys.sort()
1067 mf2keys.sort()
1068 getnode = lambda fn: mf1.get(fn, nullid)
1068 getnode = lambda fn: mf1.get(fn, nullid)
1069 for fn in mf2keys:
1069 for fn in mf2keys:
1070 if fn in mf1:
1070 if fn in mf1:
1071 if (mf1.flags(fn) != mf2.flags(fn) or
1071 if (mf1.flags(fn) != mf2.flags(fn) or
1072 (mf1[fn] != mf2[fn] and
1072 (mf1[fn] != mf2[fn] and
1073 (mf2[fn] != "" or fcmp(fn, getnode)))):
1073 (mf2[fn] != "" or fcmp(fn, getnode)))):
1074 modified.append(fn)
1074 modified.append(fn)
1075 elif list_clean:
1075 elif list_clean:
1076 clean.append(fn)
1076 clean.append(fn)
1077 del mf1[fn]
1077 del mf1[fn]
1078 else:
1078 else:
1079 added.append(fn)
1079 added.append(fn)
1080
1080
1081 removed = mf1.keys()
1081 removed = mf1.keys()
1082
1082
1083 # sort and return results:
1083 # sort and return results:
1084 for l in modified, added, removed, deleted, unknown, ignored, clean:
1084 for l in modified, added, removed, deleted, unknown, ignored, clean:
1085 l.sort()
1085 l.sort()
1086 return (modified, added, removed, deleted, unknown, ignored, clean)
1086 return (modified, added, removed, deleted, unknown, ignored, clean)
1087
1087
1088 def add(self, list):
1088 def add(self, list):
1089 wlock = self.wlock()
1089 wlock = self.wlock()
1090 try:
1090 try:
1091 rejected = []
1091 rejected = []
1092 for f in list:
1092 for f in list:
1093 p = self.wjoin(f)
1093 p = self.wjoin(f)
1094 try:
1094 try:
1095 st = os.lstat(p)
1095 st = os.lstat(p)
1096 except:
1096 except:
1097 self.ui.warn(_("%s does not exist!\n") % f)
1097 self.ui.warn(_("%s does not exist!\n") % f)
1098 rejected.append(f)
1098 rejected.append(f)
1099 continue
1099 continue
1100 if st.st_size > 10000000:
1100 if st.st_size > 10000000:
1101 self.ui.warn(_("%s: files over 10MB may cause memory and"
1101 self.ui.warn(_("%s: files over 10MB may cause memory and"
1102 " performance problems\n"
1102 " performance problems\n"
1103 "(use 'hg revert %s' to unadd the file)\n")
1103 "(use 'hg revert %s' to unadd the file)\n")
1104 % (f, f))
1104 % (f, f))
1105 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1105 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1106 self.ui.warn(_("%s not added: only files and symlinks "
1106 self.ui.warn(_("%s not added: only files and symlinks "
1107 "supported currently\n") % f)
1107 "supported currently\n") % f)
1108 rejected.append(p)
1108 rejected.append(p)
1109 elif self.dirstate[f] in 'amn':
1109 elif self.dirstate[f] in 'amn':
1110 self.ui.warn(_("%s already tracked!\n") % f)
1110 self.ui.warn(_("%s already tracked!\n") % f)
1111 elif self.dirstate[f] == 'r':
1111 elif self.dirstate[f] == 'r':
1112 self.dirstate.normallookup(f)
1112 self.dirstate.normallookup(f)
1113 else:
1113 else:
1114 self.dirstate.add(f)
1114 self.dirstate.add(f)
1115 return rejected
1115 return rejected
1116 finally:
1116 finally:
1117 del wlock
1117 del wlock
1118
1118
1119 def forget(self, list):
1119 def forget(self, list):
1120 wlock = self.wlock()
1120 wlock = self.wlock()
1121 try:
1121 try:
1122 for f in list:
1122 for f in list:
1123 if self.dirstate[f] != 'a':
1123 if self.dirstate[f] != 'a':
1124 self.ui.warn(_("%s not added!\n") % f)
1124 self.ui.warn(_("%s not added!\n") % f)
1125 else:
1125 else:
1126 self.dirstate.forget(f)
1126 self.dirstate.forget(f)
1127 finally:
1127 finally:
1128 del wlock
1128 del wlock
1129
1129
1130 def remove(self, list, unlink=False):
1130 def remove(self, list, unlink=False):
1131 wlock = None
1131 wlock = None
1132 try:
1132 try:
1133 if unlink:
1133 if unlink:
1134 for f in list:
1134 for f in list:
1135 try:
1135 try:
1136 util.unlink(self.wjoin(f))
1136 util.unlink(self.wjoin(f))
1137 except OSError, inst:
1137 except OSError, inst:
1138 if inst.errno != errno.ENOENT:
1138 if inst.errno != errno.ENOENT:
1139 raise
1139 raise
1140 wlock = self.wlock()
1140 wlock = self.wlock()
1141 for f in list:
1141 for f in list:
1142 if unlink and os.path.exists(self.wjoin(f)):
1142 if unlink and os.path.exists(self.wjoin(f)):
1143 self.ui.warn(_("%s still exists!\n") % f)
1143 self.ui.warn(_("%s still exists!\n") % f)
1144 elif self.dirstate[f] == 'a':
1144 elif self.dirstate[f] == 'a':
1145 self.dirstate.forget(f)
1145 self.dirstate.forget(f)
1146 elif f not in self.dirstate:
1146 elif f not in self.dirstate:
1147 self.ui.warn(_("%s not tracked!\n") % f)
1147 self.ui.warn(_("%s not tracked!\n") % f)
1148 else:
1148 else:
1149 self.dirstate.remove(f)
1149 self.dirstate.remove(f)
1150 finally:
1150 finally:
1151 del wlock
1151 del wlock
1152
1152
1153 def undelete(self, list):
1153 def undelete(self, list):
1154 wlock = None
1154 wlock = None
1155 try:
1155 try:
1156 manifests = [self.manifest.read(self.changelog.read(p)[0])
1156 manifests = [self.manifest.read(self.changelog.read(p)[0])
1157 for p in self.dirstate.parents() if p != nullid]
1157 for p in self.dirstate.parents() if p != nullid]
1158 wlock = self.wlock()
1158 wlock = self.wlock()
1159 for f in list:
1159 for f in list:
1160 if self.dirstate[f] != 'r':
1160 if self.dirstate[f] != 'r':
1161 self.ui.warn("%s not removed!\n" % f)
1161 self.ui.warn("%s not removed!\n" % f)
1162 else:
1162 else:
1163 m = f in manifests[0] and manifests[0] or manifests[1]
1163 m = f in manifests[0] and manifests[0] or manifests[1]
1164 t = self.file(f).read(m[f])
1164 t = self.file(f).read(m[f])
1165 self.wwrite(f, t, m.flags(f))
1165 self.wwrite(f, t, m.flags(f))
1166 self.dirstate.normal(f)
1166 self.dirstate.normal(f)
1167 finally:
1167 finally:
1168 del wlock
1168 del wlock
1169
1169
1170 def copy(self, source, dest):
1170 def copy(self, source, dest):
1171 wlock = None
1171 wlock = None
1172 try:
1172 try:
1173 p = self.wjoin(dest)
1173 p = self.wjoin(dest)
1174 if not (os.path.exists(p) or os.path.islink(p)):
1174 if not (os.path.exists(p) or os.path.islink(p)):
1175 self.ui.warn(_("%s does not exist!\n") % dest)
1175 self.ui.warn(_("%s does not exist!\n") % dest)
1176 elif not (os.path.isfile(p) or os.path.islink(p)):
1176 elif not (os.path.isfile(p) or os.path.islink(p)):
1177 self.ui.warn(_("copy failed: %s is not a file or a "
1177 self.ui.warn(_("copy failed: %s is not a file or a "
1178 "symbolic link\n") % dest)
1178 "symbolic link\n") % dest)
1179 else:
1179 else:
1180 wlock = self.wlock()
1180 wlock = self.wlock()
1181 if dest not in self.dirstate:
1181 if dest not in self.dirstate:
1182 self.dirstate.add(dest)
1182 self.dirstate.add(dest)
1183 self.dirstate.copy(source, dest)
1183 self.dirstate.copy(source, dest)
1184 finally:
1184 finally:
1185 del wlock
1185 del wlock
1186
1186
1187 def heads(self, start=None):
1187 def heads(self, start=None):
1188 heads = self.changelog.heads(start)
1188 heads = self.changelog.heads(start)
1189 # sort the output in rev descending order
1189 # sort the output in rev descending order
1190 heads = [(-self.changelog.rev(h), h) for h in heads]
1190 heads = [(-self.changelog.rev(h), h) for h in heads]
1191 heads.sort()
1191 heads.sort()
1192 return [n for (r, n) in heads]
1192 return [n for (r, n) in heads]
1193
1193
1194 def branchheads(self, branch, start=None):
1194 def branchheads(self, branch, start=None):
1195 branches = self.branchtags()
1195 branches = self.branchtags()
1196 if branch not in branches:
1196 if branch not in branches:
1197 return []
1197 return []
1198 # The basic algorithm is this:
1198 # The basic algorithm is this:
1199 #
1199 #
1200 # Start from the branch tip since there are no later revisions that can
1200 # Start from the branch tip since there are no later revisions that can
1201 # possibly be in this branch, and the tip is a guaranteed head.
1201 # possibly be in this branch, and the tip is a guaranteed head.
1202 #
1202 #
1203 # Remember the tip's parents as the first ancestors, since these by
1203 # Remember the tip's parents as the first ancestors, since these by
1204 # definition are not heads.
1204 # definition are not heads.
1205 #
1205 #
1206 # Step backwards from the brach tip through all the revisions. We are
1206 # Step backwards from the brach tip through all the revisions. We are
1207 # guaranteed by the rules of Mercurial that we will now be visiting the
1207 # guaranteed by the rules of Mercurial that we will now be visiting the
1208 # nodes in reverse topological order (children before parents).
1208 # nodes in reverse topological order (children before parents).
1209 #
1209 #
1210 # If a revision is one of the ancestors of a head then we can toss it
1210 # If a revision is one of the ancestors of a head then we can toss it
1211 # out of the ancestors set (we've already found it and won't be
1211 # out of the ancestors set (we've already found it and won't be
1212 # visiting it again) and put its parents in the ancestors set.
1212 # visiting it again) and put its parents in the ancestors set.
1213 #
1213 #
1214 # Otherwise, if a revision is in the branch it's another head, since it
1214 # Otherwise, if a revision is in the branch it's another head, since it
1215 # wasn't in the ancestor list of an existing head. So add it to the
1215 # wasn't in the ancestor list of an existing head. So add it to the
1216 # head list, and add its parents to the ancestor list.
1216 # head list, and add its parents to the ancestor list.
1217 #
1217 #
1218 # If it is not in the branch ignore it.
1218 # If it is not in the branch ignore it.
1219 #
1219 #
1220 # Once we have a list of heads, use nodesbetween to filter out all the
1220 # Once we have a list of heads, use nodesbetween to filter out all the
1221 # heads that cannot be reached from startrev. There may be a more
1221 # heads that cannot be reached from startrev. There may be a more
1222 # efficient way to do this as part of the previous algorithm.
1222 # efficient way to do this as part of the previous algorithm.
1223
1223
1224 set = util.set
1224 set = util.set
1225 heads = [self.changelog.rev(branches[branch])]
1225 heads = [self.changelog.rev(branches[branch])]
1226 # Don't care if ancestors contains nullrev or not.
1226 # Don't care if ancestors contains nullrev or not.
1227 ancestors = set(self.changelog.parentrevs(heads[0]))
1227 ancestors = set(self.changelog.parentrevs(heads[0]))
1228 for rev in xrange(heads[0] - 1, nullrev, -1):
1228 for rev in xrange(heads[0] - 1, nullrev, -1):
1229 if rev in ancestors:
1229 if rev in ancestors:
1230 ancestors.update(self.changelog.parentrevs(rev))
1230 ancestors.update(self.changelog.parentrevs(rev))
1231 ancestors.remove(rev)
1231 ancestors.remove(rev)
1232 elif self.changectx(rev).branch() == branch:
1232 elif self.changectx(rev).branch() == branch:
1233 heads.append(rev)
1233 heads.append(rev)
1234 ancestors.update(self.changelog.parentrevs(rev))
1234 ancestors.update(self.changelog.parentrevs(rev))
1235 heads = [self.changelog.node(rev) for rev in heads]
1235 heads = [self.changelog.node(rev) for rev in heads]
1236 if start is not None:
1236 if start is not None:
1237 heads = self.changelog.nodesbetween([start], heads)[2]
1237 heads = self.changelog.nodesbetween([start], heads)[2]
1238 return heads
1238 return heads
1239
1239
1240 def branches(self, nodes):
1240 def branches(self, nodes):
1241 if not nodes:
1241 if not nodes:
1242 nodes = [self.changelog.tip()]
1242 nodes = [self.changelog.tip()]
1243 b = []
1243 b = []
1244 for n in nodes:
1244 for n in nodes:
1245 t = n
1245 t = n
1246 while 1:
1246 while 1:
1247 p = self.changelog.parents(n)
1247 p = self.changelog.parents(n)
1248 if p[1] != nullid or p[0] == nullid:
1248 if p[1] != nullid or p[0] == nullid:
1249 b.append((t, n, p[0], p[1]))
1249 b.append((t, n, p[0], p[1]))
1250 break
1250 break
1251 n = p[0]
1251 n = p[0]
1252 return b
1252 return b
1253
1253
1254 def between(self, pairs):
1254 def between(self, pairs):
1255 r = []
1255 r = []
1256
1256
1257 for top, bottom in pairs:
1257 for top, bottom in pairs:
1258 n, l, i = top, [], 0
1258 n, l, i = top, [], 0
1259 f = 1
1259 f = 1
1260
1260
1261 while n != bottom:
1261 while n != bottom:
1262 p = self.changelog.parents(n)[0]
1262 p = self.changelog.parents(n)[0]
1263 if i == f:
1263 if i == f:
1264 l.append(n)
1264 l.append(n)
1265 f = f * 2
1265 f = f * 2
1266 n = p
1266 n = p
1267 i += 1
1267 i += 1
1268
1268
1269 r.append(l)
1269 r.append(l)
1270
1270
1271 return r
1271 return r
1272
1272
1273 def findincoming(self, remote, base=None, heads=None, force=False):
1273 def findincoming(self, remote, base=None, heads=None, force=False):
1274 """Return list of roots of the subsets of missing nodes from remote
1274 """Return list of roots of the subsets of missing nodes from remote
1275
1275
1276 If base dict is specified, assume that these nodes and their parents
1276 If base dict is specified, assume that these nodes and their parents
1277 exist on the remote side and that no child of a node of base exists
1277 exist on the remote side and that no child of a node of base exists
1278 in both remote and self.
1278 in both remote and self.
1279 Furthermore base will be updated to include the nodes that exists
1279 Furthermore base will be updated to include the nodes that exists
1280 in self and remote but no children exists in self and remote.
1280 in self and remote but no children exists in self and remote.
1281 If a list of heads is specified, return only nodes which are heads
1281 If a list of heads is specified, return only nodes which are heads
1282 or ancestors of these heads.
1282 or ancestors of these heads.
1283
1283
1284 All the ancestors of base are in self and in remote.
1284 All the ancestors of base are in self and in remote.
1285 All the descendants of the list returned are missing in self.
1285 All the descendants of the list returned are missing in self.
1286 (and so we know that the rest of the nodes are missing in remote, see
1286 (and so we know that the rest of the nodes are missing in remote, see
1287 outgoing)
1287 outgoing)
1288 """
1288 """
1289 m = self.changelog.nodemap
1289 m = self.changelog.nodemap
1290 search = []
1290 search = []
1291 fetch = {}
1291 fetch = {}
1292 seen = {}
1292 seen = {}
1293 seenbranch = {}
1293 seenbranch = {}
1294 if base == None:
1294 if base == None:
1295 base = {}
1295 base = {}
1296
1296
1297 if not heads:
1297 if not heads:
1298 heads = remote.heads()
1298 heads = remote.heads()
1299
1299
1300 if self.changelog.tip() == nullid:
1300 if self.changelog.tip() == nullid:
1301 base[nullid] = 1
1301 base[nullid] = 1
1302 if heads != [nullid]:
1302 if heads != [nullid]:
1303 return [nullid]
1303 return [nullid]
1304 return []
1304 return []
1305
1305
1306 # assume we're closer to the tip than the root
1306 # assume we're closer to the tip than the root
1307 # and start by examining the heads
1307 # and start by examining the heads
1308 self.ui.status(_("searching for changes\n"))
1308 self.ui.status(_("searching for changes\n"))
1309
1309
1310 unknown = []
1310 unknown = []
1311 for h in heads:
1311 for h in heads:
1312 if h not in m:
1312 if h not in m:
1313 unknown.append(h)
1313 unknown.append(h)
1314 else:
1314 else:
1315 base[h] = 1
1315 base[h] = 1
1316
1316
1317 if not unknown:
1317 if not unknown:
1318 return []
1318 return []
1319
1319
1320 req = dict.fromkeys(unknown)
1320 req = dict.fromkeys(unknown)
1321 reqcnt = 0
1321 reqcnt = 0
1322
1322
1323 # search through remote branches
1323 # search through remote branches
1324 # a 'branch' here is a linear segment of history, with four parts:
1324 # a 'branch' here is a linear segment of history, with four parts:
1325 # head, root, first parent, second parent
1325 # head, root, first parent, second parent
1326 # (a branch always has two parents (or none) by definition)
1326 # (a branch always has two parents (or none) by definition)
1327 unknown = remote.branches(unknown)
1327 unknown = remote.branches(unknown)
1328 while unknown:
1328 while unknown:
1329 r = []
1329 r = []
1330 while unknown:
1330 while unknown:
1331 n = unknown.pop(0)
1331 n = unknown.pop(0)
1332 if n[0] in seen:
1332 if n[0] in seen:
1333 continue
1333 continue
1334
1334
1335 self.ui.debug(_("examining %s:%s\n")
1335 self.ui.debug(_("examining %s:%s\n")
1336 % (short(n[0]), short(n[1])))
1336 % (short(n[0]), short(n[1])))
1337 if n[0] == nullid: # found the end of the branch
1337 if n[0] == nullid: # found the end of the branch
1338 pass
1338 pass
1339 elif n in seenbranch:
1339 elif n in seenbranch:
1340 self.ui.debug(_("branch already found\n"))
1340 self.ui.debug(_("branch already found\n"))
1341 continue
1341 continue
1342 elif n[1] and n[1] in m: # do we know the base?
1342 elif n[1] and n[1] in m: # do we know the base?
1343 self.ui.debug(_("found incomplete branch %s:%s\n")
1343 self.ui.debug(_("found incomplete branch %s:%s\n")
1344 % (short(n[0]), short(n[1])))
1344 % (short(n[0]), short(n[1])))
1345 search.append(n) # schedule branch range for scanning
1345 search.append(n) # schedule branch range for scanning
1346 seenbranch[n] = 1
1346 seenbranch[n] = 1
1347 else:
1347 else:
1348 if n[1] not in seen and n[1] not in fetch:
1348 if n[1] not in seen and n[1] not in fetch:
1349 if n[2] in m and n[3] in m:
1349 if n[2] in m and n[3] in m:
1350 self.ui.debug(_("found new changeset %s\n") %
1350 self.ui.debug(_("found new changeset %s\n") %
1351 short(n[1]))
1351 short(n[1]))
1352 fetch[n[1]] = 1 # earliest unknown
1352 fetch[n[1]] = 1 # earliest unknown
1353 for p in n[2:4]:
1353 for p in n[2:4]:
1354 if p in m:
1354 if p in m:
1355 base[p] = 1 # latest known
1355 base[p] = 1 # latest known
1356
1356
1357 for p in n[2:4]:
1357 for p in n[2:4]:
1358 if p not in req and p not in m:
1358 if p not in req and p not in m:
1359 r.append(p)
1359 r.append(p)
1360 req[p] = 1
1360 req[p] = 1
1361 seen[n[0]] = 1
1361 seen[n[0]] = 1
1362
1362
1363 if r:
1363 if r:
1364 reqcnt += 1
1364 reqcnt += 1
1365 self.ui.debug(_("request %d: %s\n") %
1365 self.ui.debug(_("request %d: %s\n") %
1366 (reqcnt, " ".join(map(short, r))))
1366 (reqcnt, " ".join(map(short, r))))
1367 for p in xrange(0, len(r), 10):
1367 for p in xrange(0, len(r), 10):
1368 for b in remote.branches(r[p:p+10]):
1368 for b in remote.branches(r[p:p+10]):
1369 self.ui.debug(_("received %s:%s\n") %
1369 self.ui.debug(_("received %s:%s\n") %
1370 (short(b[0]), short(b[1])))
1370 (short(b[0]), short(b[1])))
1371 unknown.append(b)
1371 unknown.append(b)
1372
1372
1373 # do binary search on the branches we found
1373 # do binary search on the branches we found
1374 while search:
1374 while search:
1375 n = search.pop(0)
1375 n = search.pop(0)
1376 reqcnt += 1
1376 reqcnt += 1
1377 l = remote.between([(n[0], n[1])])[0]
1377 l = remote.between([(n[0], n[1])])[0]
1378 l.append(n[1])
1378 l.append(n[1])
1379 p = n[0]
1379 p = n[0]
1380 f = 1
1380 f = 1
1381 for i in l:
1381 for i in l:
1382 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1382 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1383 if i in m:
1383 if i in m:
1384 if f <= 2:
1384 if f <= 2:
1385 self.ui.debug(_("found new branch changeset %s\n") %
1385 self.ui.debug(_("found new branch changeset %s\n") %
1386 short(p))
1386 short(p))
1387 fetch[p] = 1
1387 fetch[p] = 1
1388 base[i] = 1
1388 base[i] = 1
1389 else:
1389 else:
1390 self.ui.debug(_("narrowed branch search to %s:%s\n")
1390 self.ui.debug(_("narrowed branch search to %s:%s\n")
1391 % (short(p), short(i)))
1391 % (short(p), short(i)))
1392 search.append((p, i))
1392 search.append((p, i))
1393 break
1393 break
1394 p, f = i, f * 2
1394 p, f = i, f * 2
1395
1395
1396 # sanity check our fetch list
1396 # sanity check our fetch list
1397 for f in fetch.keys():
1397 for f in fetch.keys():
1398 if f in m:
1398 if f in m:
1399 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1399 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1400
1400
1401 if base.keys() == [nullid]:
1401 if base.keys() == [nullid]:
1402 if force:
1402 if force:
1403 self.ui.warn(_("warning: repository is unrelated\n"))
1403 self.ui.warn(_("warning: repository is unrelated\n"))
1404 else:
1404 else:
1405 raise util.Abort(_("repository is unrelated"))
1405 raise util.Abort(_("repository is unrelated"))
1406
1406
1407 self.ui.debug(_("found new changesets starting at ") +
1407 self.ui.debug(_("found new changesets starting at ") +
1408 " ".join([short(f) for f in fetch]) + "\n")
1408 " ".join([short(f) for f in fetch]) + "\n")
1409
1409
1410 self.ui.debug(_("%d total queries\n") % reqcnt)
1410 self.ui.debug(_("%d total queries\n") % reqcnt)
1411
1411
1412 return fetch.keys()
1412 return fetch.keys()
1413
1413
1414 def findoutgoing(self, remote, base=None, heads=None, force=False):
1414 def findoutgoing(self, remote, base=None, heads=None, force=False):
1415 """Return list of nodes that are roots of subsets not in remote
1415 """Return list of nodes that are roots of subsets not in remote
1416
1416
1417 If base dict is specified, assume that these nodes and their parents
1417 If base dict is specified, assume that these nodes and their parents
1418 exist on the remote side.
1418 exist on the remote side.
1419 If a list of heads is specified, return only nodes which are heads
1419 If a list of heads is specified, return only nodes which are heads
1420 or ancestors of these heads, and return a second element which
1420 or ancestors of these heads, and return a second element which
1421 contains all remote heads which get new children.
1421 contains all remote heads which get new children.
1422 """
1422 """
1423 if base == None:
1423 if base == None:
1424 base = {}
1424 base = {}
1425 self.findincoming(remote, base, heads, force=force)
1425 self.findincoming(remote, base, heads, force=force)
1426
1426
1427 self.ui.debug(_("common changesets up to ")
1427 self.ui.debug(_("common changesets up to ")
1428 + " ".join(map(short, base.keys())) + "\n")
1428 + " ".join(map(short, base.keys())) + "\n")
1429
1429
1430 remain = dict.fromkeys(self.changelog.nodemap)
1430 remain = dict.fromkeys(self.changelog.nodemap)
1431
1431
1432 # prune everything remote has from the tree
1432 # prune everything remote has from the tree
1433 del remain[nullid]
1433 del remain[nullid]
1434 remove = base.keys()
1434 remove = base.keys()
1435 while remove:
1435 while remove:
1436 n = remove.pop(0)
1436 n = remove.pop(0)
1437 if n in remain:
1437 if n in remain:
1438 del remain[n]
1438 del remain[n]
1439 for p in self.changelog.parents(n):
1439 for p in self.changelog.parents(n):
1440 remove.append(p)
1440 remove.append(p)
1441
1441
1442 # find every node whose parents have been pruned
1442 # find every node whose parents have been pruned
1443 subset = []
1443 subset = []
1444 # find every remote head that will get new children
1444 # find every remote head that will get new children
1445 updated_heads = {}
1445 updated_heads = {}
1446 for n in remain:
1446 for n in remain:
1447 p1, p2 = self.changelog.parents(n)
1447 p1, p2 = self.changelog.parents(n)
1448 if p1 not in remain and p2 not in remain:
1448 if p1 not in remain and p2 not in remain:
1449 subset.append(n)
1449 subset.append(n)
1450 if heads:
1450 if heads:
1451 if p1 in heads:
1451 if p1 in heads:
1452 updated_heads[p1] = True
1452 updated_heads[p1] = True
1453 if p2 in heads:
1453 if p2 in heads:
1454 updated_heads[p2] = True
1454 updated_heads[p2] = True
1455
1455
1456 # this is the set of all roots we have to push
1456 # this is the set of all roots we have to push
1457 if heads:
1457 if heads:
1458 return subset, updated_heads.keys()
1458 return subset, updated_heads.keys()
1459 else:
1459 else:
1460 return subset
1460 return subset
1461
1461
1462 def pull(self, remote, heads=None, force=False):
1462 def pull(self, remote, heads=None, force=False):
1463 lock = self.lock()
1463 lock = self.lock()
1464 try:
1464 try:
1465 fetch = self.findincoming(remote, heads=heads, force=force)
1465 fetch = self.findincoming(remote, heads=heads, force=force)
1466 if fetch == [nullid]:
1466 if fetch == [nullid]:
1467 self.ui.status(_("requesting all changes\n"))
1467 self.ui.status(_("requesting all changes\n"))
1468
1468
1469 if not fetch:
1469 if not fetch:
1470 self.ui.status(_("no changes found\n"))
1470 self.ui.status(_("no changes found\n"))
1471 return 0
1471 return 0
1472
1472
1473 if heads is None:
1473 if heads is None:
1474 cg = remote.changegroup(fetch, 'pull')
1474 cg = remote.changegroup(fetch, 'pull')
1475 else:
1475 else:
1476 if 'changegroupsubset' not in remote.capabilities:
1476 if 'changegroupsubset' not in remote.capabilities:
1477 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1477 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1478 cg = remote.changegroupsubset(fetch, heads, 'pull')
1478 cg = remote.changegroupsubset(fetch, heads, 'pull')
1479 return self.addchangegroup(cg, 'pull', remote.url())
1479 return self.addchangegroup(cg, 'pull', remote.url())
1480 finally:
1480 finally:
1481 del lock
1481 del lock
1482
1482
1483 def push(self, remote, force=False, revs=None):
1483 def push(self, remote, force=False, revs=None):
1484 # there are two ways to push to remote repo:
1484 # there are two ways to push to remote repo:
1485 #
1485 #
1486 # addchangegroup assumes local user can lock remote
1486 # addchangegroup assumes local user can lock remote
1487 # repo (local filesystem, old ssh servers).
1487 # repo (local filesystem, old ssh servers).
1488 #
1488 #
1489 # unbundle assumes local user cannot lock remote repo (new ssh
1489 # unbundle assumes local user cannot lock remote repo (new ssh
1490 # servers, http servers).
1490 # servers, http servers).
1491
1491
1492 if remote.capable('unbundle'):
1492 if remote.capable('unbundle'):
1493 return self.push_unbundle(remote, force, revs)
1493 return self.push_unbundle(remote, force, revs)
1494 return self.push_addchangegroup(remote, force, revs)
1494 return self.push_addchangegroup(remote, force, revs)
1495
1495
1496 def prepush(self, remote, force, revs):
1496 def prepush(self, remote, force, revs):
1497 base = {}
1497 base = {}
1498 remote_heads = remote.heads()
1498 remote_heads = remote.heads()
1499 inc = self.findincoming(remote, base, remote_heads, force=force)
1499 inc = self.findincoming(remote, base, remote_heads, force=force)
1500
1500
1501 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1501 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1502 if revs is not None:
1502 if revs is not None:
1503 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1503 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1504 else:
1504 else:
1505 bases, heads = update, self.changelog.heads()
1505 bases, heads = update, self.changelog.heads()
1506
1506
1507 if not bases:
1507 if not bases:
1508 self.ui.status(_("no changes found\n"))
1508 self.ui.status(_("no changes found\n"))
1509 return None, 1
1509 return None, 1
1510 elif not force:
1510 elif not force:
1511 # check if we're creating new remote heads
1511 # check if we're creating new remote heads
1512 # to be a remote head after push, node must be either
1512 # to be a remote head after push, node must be either
1513 # - unknown locally
1513 # - unknown locally
1514 # - a local outgoing head descended from update
1514 # - a local outgoing head descended from update
1515 # - a remote head that's known locally and not
1515 # - a remote head that's known locally and not
1516 # ancestral to an outgoing head
1516 # ancestral to an outgoing head
1517
1517
1518 warn = 0
1518 warn = 0
1519
1519
1520 if remote_heads == [nullid]:
1520 if remote_heads == [nullid]:
1521 warn = 0
1521 warn = 0
1522 elif not revs and len(heads) > len(remote_heads):
1522 elif not revs and len(heads) > len(remote_heads):
1523 warn = 1
1523 warn = 1
1524 else:
1524 else:
1525 newheads = list(heads)
1525 newheads = list(heads)
1526 for r in remote_heads:
1526 for r in remote_heads:
1527 if r in self.changelog.nodemap:
1527 if r in self.changelog.nodemap:
1528 desc = self.changelog.heads(r, heads)
1528 desc = self.changelog.heads(r, heads)
1529 l = [h for h in heads if h in desc]
1529 l = [h for h in heads if h in desc]
1530 if not l:
1530 if not l:
1531 newheads.append(r)
1531 newheads.append(r)
1532 else:
1532 else:
1533 newheads.append(r)
1533 newheads.append(r)
1534 if len(newheads) > len(remote_heads):
1534 if len(newheads) > len(remote_heads):
1535 warn = 1
1535 warn = 1
1536
1536
1537 if warn:
1537 if warn:
1538 self.ui.warn(_("abort: push creates new remote heads!\n"))
1538 self.ui.warn(_("abort: push creates new remote heads!\n"))
1539 self.ui.status(_("(did you forget to merge?"
1539 self.ui.status(_("(did you forget to merge?"
1540 " use push -f to force)\n"))
1540 " use push -f to force)\n"))
1541 return None, 0
1541 return None, 0
1542 elif inc:
1542 elif inc:
1543 self.ui.warn(_("note: unsynced remote changes!\n"))
1543 self.ui.warn(_("note: unsynced remote changes!\n"))
1544
1544
1545
1545
1546 if revs is None:
1546 if revs is None:
1547 cg = self.changegroup(update, 'push')
1547 cg = self.changegroup(update, 'push')
1548 else:
1548 else:
1549 cg = self.changegroupsubset(update, revs, 'push')
1549 cg = self.changegroupsubset(update, revs, 'push')
1550 return cg, remote_heads
1550 return cg, remote_heads
1551
1551
1552 def push_addchangegroup(self, remote, force, revs):
1552 def push_addchangegroup(self, remote, force, revs):
1553 lock = remote.lock()
1553 lock = remote.lock()
1554 try:
1554 try:
1555 ret = self.prepush(remote, force, revs)
1555 ret = self.prepush(remote, force, revs)
1556 if ret[0] is not None:
1556 if ret[0] is not None:
1557 cg, remote_heads = ret
1557 cg, remote_heads = ret
1558 return remote.addchangegroup(cg, 'push', self.url())
1558 return remote.addchangegroup(cg, 'push', self.url())
1559 return ret[1]
1559 return ret[1]
1560 finally:
1560 finally:
1561 del lock
1561 del lock
1562
1562
1563 def push_unbundle(self, remote, force, revs):
1563 def push_unbundle(self, remote, force, revs):
1564 # local repo finds heads on server, finds out what revs it
1564 # local repo finds heads on server, finds out what revs it
1565 # must push. once revs transferred, if server finds it has
1565 # must push. once revs transferred, if server finds it has
1566 # different heads (someone else won commit/push race), server
1566 # different heads (someone else won commit/push race), server
1567 # aborts.
1567 # aborts.
1568
1568
1569 ret = self.prepush(remote, force, revs)
1569 ret = self.prepush(remote, force, revs)
1570 if ret[0] is not None:
1570 if ret[0] is not None:
1571 cg, remote_heads = ret
1571 cg, remote_heads = ret
1572 if force: remote_heads = ['force']
1572 if force: remote_heads = ['force']
1573 return remote.unbundle(cg, remote_heads, 'push')
1573 return remote.unbundle(cg, remote_heads, 'push')
1574 return ret[1]
1574 return ret[1]
1575
1575
1576 def changegroupinfo(self, nodes, source):
1576 def changegroupinfo(self, nodes, source):
1577 if self.ui.verbose or source == 'bundle':
1577 if self.ui.verbose or source == 'bundle':
1578 self.ui.status(_("%d changesets found\n") % len(nodes))
1578 self.ui.status(_("%d changesets found\n") % len(nodes))
1579 if self.ui.debugflag:
1579 if self.ui.debugflag:
1580 self.ui.debug(_("List of changesets:\n"))
1580 self.ui.debug(_("List of changesets:\n"))
1581 for node in nodes:
1581 for node in nodes:
1582 self.ui.debug("%s\n" % hex(node))
1582 self.ui.debug("%s\n" % hex(node))
1583
1583
1584 def changegroupsubset(self, bases, heads, source, extranodes=None):
1584 def changegroupsubset(self, bases, heads, source, extranodes=None):
1585 """This function generates a changegroup consisting of all the nodes
1585 """This function generates a changegroup consisting of all the nodes
1586 that are descendents of any of the bases, and ancestors of any of
1586 that are descendents of any of the bases, and ancestors of any of
1587 the heads.
1587 the heads.
1588
1588
1589 It is fairly complex as determining which filenodes and which
1589 It is fairly complex as determining which filenodes and which
1590 manifest nodes need to be included for the changeset to be complete
1590 manifest nodes need to be included for the changeset to be complete
1591 is non-trivial.
1591 is non-trivial.
1592
1592
1593 Another wrinkle is doing the reverse, figuring out which changeset in
1593 Another wrinkle is doing the reverse, figuring out which changeset in
1594 the changegroup a particular filenode or manifestnode belongs to.
1594 the changegroup a particular filenode or manifestnode belongs to.
1595
1595
1596 The caller can specify some nodes that must be included in the
1596 The caller can specify some nodes that must be included in the
1597 changegroup using the extranodes argument. It should be a dict
1597 changegroup using the extranodes argument. It should be a dict
1598 where the keys are the filenames (or 1 for the manifest), and the
1598 where the keys are the filenames (or 1 for the manifest), and the
1599 values are lists of (node, linknode) tuples, where node is a wanted
1599 values are lists of (node, linknode) tuples, where node is a wanted
1600 node and linknode is the changelog node that should be transmitted as
1600 node and linknode is the changelog node that should be transmitted as
1601 the linkrev.
1601 the linkrev.
1602 """
1602 """
1603
1603
1604 self.hook('preoutgoing', throw=True, source=source)
1604 self.hook('preoutgoing', throw=True, source=source)
1605
1605
1606 # Set up some initial variables
1606 # Set up some initial variables
1607 # Make it easy to refer to self.changelog
1607 # Make it easy to refer to self.changelog
1608 cl = self.changelog
1608 cl = self.changelog
1609 # msng is short for missing - compute the list of changesets in this
1609 # msng is short for missing - compute the list of changesets in this
1610 # changegroup.
1610 # changegroup.
1611 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1611 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1612 self.changegroupinfo(msng_cl_lst, source)
1612 self.changegroupinfo(msng_cl_lst, source)
1613 # Some bases may turn out to be superfluous, and some heads may be
1613 # Some bases may turn out to be superfluous, and some heads may be
1614 # too. nodesbetween will return the minimal set of bases and heads
1614 # too. nodesbetween will return the minimal set of bases and heads
1615 # necessary to re-create the changegroup.
1615 # necessary to re-create the changegroup.
1616
1616
1617 # Known heads are the list of heads that it is assumed the recipient
1617 # Known heads are the list of heads that it is assumed the recipient
1618 # of this changegroup will know about.
1618 # of this changegroup will know about.
1619 knownheads = {}
1619 knownheads = {}
1620 # We assume that all parents of bases are known heads.
1620 # We assume that all parents of bases are known heads.
1621 for n in bases:
1621 for n in bases:
1622 for p in cl.parents(n):
1622 for p in cl.parents(n):
1623 if p != nullid:
1623 if p != nullid:
1624 knownheads[p] = 1
1624 knownheads[p] = 1
1625 knownheads = knownheads.keys()
1625 knownheads = knownheads.keys()
1626 if knownheads:
1626 if knownheads:
1627 # Now that we know what heads are known, we can compute which
1627 # Now that we know what heads are known, we can compute which
1628 # changesets are known. The recipient must know about all
1628 # changesets are known. The recipient must know about all
1629 # changesets required to reach the known heads from the null
1629 # changesets required to reach the known heads from the null
1630 # changeset.
1630 # changeset.
1631 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1631 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1632 junk = None
1632 junk = None
1633 # Transform the list into an ersatz set.
1633 # Transform the list into an ersatz set.
1634 has_cl_set = dict.fromkeys(has_cl_set)
1634 has_cl_set = dict.fromkeys(has_cl_set)
1635 else:
1635 else:
1636 # If there were no known heads, the recipient cannot be assumed to
1636 # If there were no known heads, the recipient cannot be assumed to
1637 # know about any changesets.
1637 # know about any changesets.
1638 has_cl_set = {}
1638 has_cl_set = {}
1639
1639
1640 # Make it easy to refer to self.manifest
1640 # Make it easy to refer to self.manifest
1641 mnfst = self.manifest
1641 mnfst = self.manifest
1642 # We don't know which manifests are missing yet
1642 # We don't know which manifests are missing yet
1643 msng_mnfst_set = {}
1643 msng_mnfst_set = {}
1644 # Nor do we know which filenodes are missing.
1644 # Nor do we know which filenodes are missing.
1645 msng_filenode_set = {}
1645 msng_filenode_set = {}
1646
1646
1647 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1647 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1648 junk = None
1648 junk = None
1649
1649
1650 # A changeset always belongs to itself, so the changenode lookup
1650 # A changeset always belongs to itself, so the changenode lookup
1651 # function for a changenode is identity.
1651 # function for a changenode is identity.
1652 def identity(x):
1652 def identity(x):
1653 return x
1653 return x
1654
1654
1655 # A function generating function. Sets up an environment for the
1655 # A function generating function. Sets up an environment for the
1656 # inner function.
1656 # inner function.
1657 def cmp_by_rev_func(revlog):
1657 def cmp_by_rev_func(revlog):
1658 # Compare two nodes by their revision number in the environment's
1658 # Compare two nodes by their revision number in the environment's
1659 # revision history. Since the revision number both represents the
1659 # revision history. Since the revision number both represents the
1660 # most efficient order to read the nodes in, and represents a
1660 # most efficient order to read the nodes in, and represents a
1661 # topological sorting of the nodes, this function is often useful.
1661 # topological sorting of the nodes, this function is often useful.
1662 def cmp_by_rev(a, b):
1662 def cmp_by_rev(a, b):
1663 return cmp(revlog.rev(a), revlog.rev(b))
1663 return cmp(revlog.rev(a), revlog.rev(b))
1664 return cmp_by_rev
1664 return cmp_by_rev
1665
1665
1666 # If we determine that a particular file or manifest node must be a
1666 # If we determine that a particular file or manifest node must be a
1667 # node that the recipient of the changegroup will already have, we can
1667 # node that the recipient of the changegroup will already have, we can
1668 # also assume the recipient will have all the parents. This function
1668 # also assume the recipient will have all the parents. This function
1669 # prunes them from the set of missing nodes.
1669 # prunes them from the set of missing nodes.
1670 def prune_parents(revlog, hasset, msngset):
1670 def prune_parents(revlog, hasset, msngset):
1671 haslst = hasset.keys()
1671 haslst = hasset.keys()
1672 haslst.sort(cmp_by_rev_func(revlog))
1672 haslst.sort(cmp_by_rev_func(revlog))
1673 for node in haslst:
1673 for node in haslst:
1674 parentlst = [p for p in revlog.parents(node) if p != nullid]
1674 parentlst = [p for p in revlog.parents(node) if p != nullid]
1675 while parentlst:
1675 while parentlst:
1676 n = parentlst.pop()
1676 n = parentlst.pop()
1677 if n not in hasset:
1677 if n not in hasset:
1678 hasset[n] = 1
1678 hasset[n] = 1
1679 p = [p for p in revlog.parents(n) if p != nullid]
1679 p = [p for p in revlog.parents(n) if p != nullid]
1680 parentlst.extend(p)
1680 parentlst.extend(p)
1681 for n in hasset:
1681 for n in hasset:
1682 msngset.pop(n, None)
1682 msngset.pop(n, None)
1683
1683
1684 # This is a function generating function used to set up an environment
1684 # This is a function generating function used to set up an environment
1685 # for the inner function to execute in.
1685 # for the inner function to execute in.
1686 def manifest_and_file_collector(changedfileset):
1686 def manifest_and_file_collector(changedfileset):
1687 # This is an information gathering function that gathers
1687 # This is an information gathering function that gathers
1688 # information from each changeset node that goes out as part of
1688 # information from each changeset node that goes out as part of
1689 # the changegroup. The information gathered is a list of which
1689 # the changegroup. The information gathered is a list of which
1690 # manifest nodes are potentially required (the recipient may
1690 # manifest nodes are potentially required (the recipient may
1691 # already have them) and total list of all files which were
1691 # already have them) and total list of all files which were
1692 # changed in any changeset in the changegroup.
1692 # changed in any changeset in the changegroup.
1693 #
1693 #
1694 # We also remember the first changenode we saw any manifest
1694 # We also remember the first changenode we saw any manifest
1695 # referenced by so we can later determine which changenode 'owns'
1695 # referenced by so we can later determine which changenode 'owns'
1696 # the manifest.
1696 # the manifest.
1697 def collect_manifests_and_files(clnode):
1697 def collect_manifests_and_files(clnode):
1698 c = cl.read(clnode)
1698 c = cl.read(clnode)
1699 for f in c[3]:
1699 for f in c[3]:
1700 # This is to make sure we only have one instance of each
1700 # This is to make sure we only have one instance of each
1701 # filename string for each filename.
1701 # filename string for each filename.
1702 changedfileset.setdefault(f, f)
1702 changedfileset.setdefault(f, f)
1703 msng_mnfst_set.setdefault(c[0], clnode)
1703 msng_mnfst_set.setdefault(c[0], clnode)
1704 return collect_manifests_and_files
1704 return collect_manifests_and_files
1705
1705
1706 # Figure out which manifest nodes (of the ones we think might be part
1706 # Figure out which manifest nodes (of the ones we think might be part
1707 # of the changegroup) the recipient must know about and remove them
1707 # of the changegroup) the recipient must know about and remove them
1708 # from the changegroup.
1708 # from the changegroup.
1709 def prune_manifests():
1709 def prune_manifests():
1710 has_mnfst_set = {}
1710 has_mnfst_set = {}
1711 for n in msng_mnfst_set:
1711 for n in msng_mnfst_set:
1712 # If a 'missing' manifest thinks it belongs to a changenode
1712 # If a 'missing' manifest thinks it belongs to a changenode
1713 # the recipient is assumed to have, obviously the recipient
1713 # the recipient is assumed to have, obviously the recipient
1714 # must have that manifest.
1714 # must have that manifest.
1715 linknode = cl.node(mnfst.linkrev(n))
1715 linknode = cl.node(mnfst.linkrev(n))
1716 if linknode in has_cl_set:
1716 if linknode in has_cl_set:
1717 has_mnfst_set[n] = 1
1717 has_mnfst_set[n] = 1
1718 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1718 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1719
1719
1720 # Use the information collected in collect_manifests_and_files to say
1720 # Use the information collected in collect_manifests_and_files to say
1721 # which changenode any manifestnode belongs to.
1721 # which changenode any manifestnode belongs to.
1722 def lookup_manifest_link(mnfstnode):
1722 def lookup_manifest_link(mnfstnode):
1723 return msng_mnfst_set[mnfstnode]
1723 return msng_mnfst_set[mnfstnode]
1724
1724
1725 # A function generating function that sets up the initial environment
1725 # A function generating function that sets up the initial environment
1726 # the inner function.
1726 # the inner function.
1727 def filenode_collector(changedfiles):
1727 def filenode_collector(changedfiles):
1728 next_rev = [0]
1728 next_rev = [0]
1729 # This gathers information from each manifestnode included in the
1729 # This gathers information from each manifestnode included in the
1730 # changegroup about which filenodes the manifest node references
1730 # changegroup about which filenodes the manifest node references
1731 # so we can include those in the changegroup too.
1731 # so we can include those in the changegroup too.
1732 #
1732 #
1733 # It also remembers which changenode each filenode belongs to. It
1733 # It also remembers which changenode each filenode belongs to. It
1734 # does this by assuming the a filenode belongs to the changenode
1734 # does this by assuming the a filenode belongs to the changenode
1735 # the first manifest that references it belongs to.
1735 # the first manifest that references it belongs to.
1736 def collect_msng_filenodes(mnfstnode):
1736 def collect_msng_filenodes(mnfstnode):
1737 r = mnfst.rev(mnfstnode)
1737 r = mnfst.rev(mnfstnode)
1738 if r == next_rev[0]:
1738 if r == next_rev[0]:
1739 # If the last rev we looked at was the one just previous,
1739 # If the last rev we looked at was the one just previous,
1740 # we only need to see a diff.
1740 # we only need to see a diff.
1741 deltamf = mnfst.readdelta(mnfstnode)
1741 deltamf = mnfst.readdelta(mnfstnode)
1742 # For each line in the delta
1742 # For each line in the delta
1743 for f, fnode in deltamf.items():
1743 for f, fnode in deltamf.items():
1744 f = changedfiles.get(f, None)
1744 f = changedfiles.get(f, None)
1745 # And if the file is in the list of files we care
1745 # And if the file is in the list of files we care
1746 # about.
1746 # about.
1747 if f is not None:
1747 if f is not None:
1748 # Get the changenode this manifest belongs to
1748 # Get the changenode this manifest belongs to
1749 clnode = msng_mnfst_set[mnfstnode]
1749 clnode = msng_mnfst_set[mnfstnode]
1750 # Create the set of filenodes for the file if
1750 # Create the set of filenodes for the file if
1751 # there isn't one already.
1751 # there isn't one already.
1752 ndset = msng_filenode_set.setdefault(f, {})
1752 ndset = msng_filenode_set.setdefault(f, {})
1753 # And set the filenode's changelog node to the
1753 # And set the filenode's changelog node to the
1754 # manifest's if it hasn't been set already.
1754 # manifest's if it hasn't been set already.
1755 ndset.setdefault(fnode, clnode)
1755 ndset.setdefault(fnode, clnode)
1756 else:
1756 else:
1757 # Otherwise we need a full manifest.
1757 # Otherwise we need a full manifest.
1758 m = mnfst.read(mnfstnode)
1758 m = mnfst.read(mnfstnode)
1759 # For every file in we care about.
1759 # For every file in we care about.
1760 for f in changedfiles:
1760 for f in changedfiles:
1761 fnode = m.get(f, None)
1761 fnode = m.get(f, None)
1762 # If it's in the manifest
1762 # If it's in the manifest
1763 if fnode is not None:
1763 if fnode is not None:
1764 # See comments above.
1764 # See comments above.
1765 clnode = msng_mnfst_set[mnfstnode]
1765 clnode = msng_mnfst_set[mnfstnode]
1766 ndset = msng_filenode_set.setdefault(f, {})
1766 ndset = msng_filenode_set.setdefault(f, {})
1767 ndset.setdefault(fnode, clnode)
1767 ndset.setdefault(fnode, clnode)
1768 # Remember the revision we hope to see next.
1768 # Remember the revision we hope to see next.
1769 next_rev[0] = r + 1
1769 next_rev[0] = r + 1
1770 return collect_msng_filenodes
1770 return collect_msng_filenodes
1771
1771
1772 # We have a list of filenodes we think we need for a file, lets remove
1772 # We have a list of filenodes we think we need for a file, lets remove
1773 # all those we now the recipient must have.
1773 # all those we now the recipient must have.
1774 def prune_filenodes(f, filerevlog):
1774 def prune_filenodes(f, filerevlog):
1775 msngset = msng_filenode_set[f]
1775 msngset = msng_filenode_set[f]
1776 hasset = {}
1776 hasset = {}
1777 # If a 'missing' filenode thinks it belongs to a changenode we
1777 # If a 'missing' filenode thinks it belongs to a changenode we
1778 # assume the recipient must have, then the recipient must have
1778 # assume the recipient must have, then the recipient must have
1779 # that filenode.
1779 # that filenode.
1780 for n in msngset:
1780 for n in msngset:
1781 clnode = cl.node(filerevlog.linkrev(n))
1781 clnode = cl.node(filerevlog.linkrev(n))
1782 if clnode in has_cl_set:
1782 if clnode in has_cl_set:
1783 hasset[n] = 1
1783 hasset[n] = 1
1784 prune_parents(filerevlog, hasset, msngset)
1784 prune_parents(filerevlog, hasset, msngset)
1785
1785
1786 # A function generator function that sets up the a context for the
1786 # A function generator function that sets up the a context for the
1787 # inner function.
1787 # inner function.
1788 def lookup_filenode_link_func(fname):
1788 def lookup_filenode_link_func(fname):
1789 msngset = msng_filenode_set[fname]
1789 msngset = msng_filenode_set[fname]
1790 # Lookup the changenode the filenode belongs to.
1790 # Lookup the changenode the filenode belongs to.
1791 def lookup_filenode_link(fnode):
1791 def lookup_filenode_link(fnode):
1792 return msngset[fnode]
1792 return msngset[fnode]
1793 return lookup_filenode_link
1793 return lookup_filenode_link
1794
1794
1795 # Add the nodes that were explicitly requested.
1795 # Add the nodes that were explicitly requested.
1796 def add_extra_nodes(name, nodes):
1796 def add_extra_nodes(name, nodes):
1797 if not extranodes or name not in extranodes:
1797 if not extranodes or name not in extranodes:
1798 return
1798 return
1799
1799
1800 for node, linknode in extranodes[name]:
1800 for node, linknode in extranodes[name]:
1801 if node not in nodes:
1801 if node not in nodes:
1802 nodes[node] = linknode
1802 nodes[node] = linknode
1803
1803
1804 # Now that we have all theses utility functions to help out and
1804 # Now that we have all theses utility functions to help out and
1805 # logically divide up the task, generate the group.
1805 # logically divide up the task, generate the group.
1806 def gengroup():
1806 def gengroup():
1807 # The set of changed files starts empty.
1807 # The set of changed files starts empty.
1808 changedfiles = {}
1808 changedfiles = {}
1809 # Create a changenode group generator that will call our functions
1809 # Create a changenode group generator that will call our functions
1810 # back to lookup the owning changenode and collect information.
1810 # back to lookup the owning changenode and collect information.
1811 group = cl.group(msng_cl_lst, identity,
1811 group = cl.group(msng_cl_lst, identity,
1812 manifest_and_file_collector(changedfiles))
1812 manifest_and_file_collector(changedfiles))
1813 for chnk in group:
1813 for chnk in group:
1814 yield chnk
1814 yield chnk
1815
1815
1816 # The list of manifests has been collected by the generator
1816 # The list of manifests has been collected by the generator
1817 # calling our functions back.
1817 # calling our functions back.
1818 prune_manifests()
1818 prune_manifests()
1819 add_extra_nodes(1, msng_mnfst_set)
1819 add_extra_nodes(1, msng_mnfst_set)
1820 msng_mnfst_lst = msng_mnfst_set.keys()
1820 msng_mnfst_lst = msng_mnfst_set.keys()
1821 # Sort the manifestnodes by revision number.
1821 # Sort the manifestnodes by revision number.
1822 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1822 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1823 # Create a generator for the manifestnodes that calls our lookup
1823 # Create a generator for the manifestnodes that calls our lookup
1824 # and data collection functions back.
1824 # and data collection functions back.
1825 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1825 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1826 filenode_collector(changedfiles))
1826 filenode_collector(changedfiles))
1827 for chnk in group:
1827 for chnk in group:
1828 yield chnk
1828 yield chnk
1829
1829
1830 # These are no longer needed, dereference and toss the memory for
1830 # These are no longer needed, dereference and toss the memory for
1831 # them.
1831 # them.
1832 msng_mnfst_lst = None
1832 msng_mnfst_lst = None
1833 msng_mnfst_set.clear()
1833 msng_mnfst_set.clear()
1834
1834
1835 if extranodes:
1835 if extranodes:
1836 for fname in extranodes:
1836 for fname in extranodes:
1837 if isinstance(fname, int):
1837 if isinstance(fname, int):
1838 continue
1838 continue
1839 add_extra_nodes(fname,
1839 add_extra_nodes(fname,
1840 msng_filenode_set.setdefault(fname, {}))
1840 msng_filenode_set.setdefault(fname, {}))
1841 changedfiles[fname] = 1
1841 changedfiles[fname] = 1
1842 changedfiles = changedfiles.keys()
1842 changedfiles = changedfiles.keys()
1843 changedfiles.sort()
1843 changedfiles.sort()
1844 # Go through all our files in order sorted by name.
1844 # Go through all our files in order sorted by name.
1845 for fname in changedfiles:
1845 for fname in changedfiles:
1846 filerevlog = self.file(fname)
1846 filerevlog = self.file(fname)
1847 if filerevlog.count() == 0:
1847 if filerevlog.count() == 0:
1848 raise util.Abort(_("empty or missing revlog for %s") % fname)
1848 raise util.Abort(_("empty or missing revlog for %s") % fname)
1849 # Toss out the filenodes that the recipient isn't really
1849 # Toss out the filenodes that the recipient isn't really
1850 # missing.
1850 # missing.
1851 if fname in msng_filenode_set:
1851 if fname in msng_filenode_set:
1852 prune_filenodes(fname, filerevlog)
1852 prune_filenodes(fname, filerevlog)
1853 msng_filenode_lst = msng_filenode_set[fname].keys()
1853 msng_filenode_lst = msng_filenode_set[fname].keys()
1854 else:
1854 else:
1855 msng_filenode_lst = []
1855 msng_filenode_lst = []
1856 # If any filenodes are left, generate the group for them,
1856 # If any filenodes are left, generate the group for them,
1857 # otherwise don't bother.
1857 # otherwise don't bother.
1858 if len(msng_filenode_lst) > 0:
1858 if len(msng_filenode_lst) > 0:
1859 yield changegroup.chunkheader(len(fname))
1859 yield changegroup.chunkheader(len(fname))
1860 yield fname
1860 yield fname
1861 # Sort the filenodes by their revision #
1861 # Sort the filenodes by their revision #
1862 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1862 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1863 # Create a group generator and only pass in a changenode
1863 # Create a group generator and only pass in a changenode
1864 # lookup function as we need to collect no information
1864 # lookup function as we need to collect no information
1865 # from filenodes.
1865 # from filenodes.
1866 group = filerevlog.group(msng_filenode_lst,
1866 group = filerevlog.group(msng_filenode_lst,
1867 lookup_filenode_link_func(fname))
1867 lookup_filenode_link_func(fname))
1868 for chnk in group:
1868 for chnk in group:
1869 yield chnk
1869 yield chnk
1870 if fname in msng_filenode_set:
1870 if fname in msng_filenode_set:
1871 # Don't need this anymore, toss it to free memory.
1871 # Don't need this anymore, toss it to free memory.
1872 del msng_filenode_set[fname]
1872 del msng_filenode_set[fname]
1873 # Signal that no more groups are left.
1873 # Signal that no more groups are left.
1874 yield changegroup.closechunk()
1874 yield changegroup.closechunk()
1875
1875
1876 if msng_cl_lst:
1876 if msng_cl_lst:
1877 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1877 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1878
1878
1879 return util.chunkbuffer(gengroup())
1879 return util.chunkbuffer(gengroup())
1880
1880
1881 def changegroup(self, basenodes, source):
1881 def changegroup(self, basenodes, source):
1882 """Generate a changegroup of all nodes that we have that a recipient
1882 """Generate a changegroup of all nodes that we have that a recipient
1883 doesn't.
1883 doesn't.
1884
1884
1885 This is much easier than the previous function as we can assume that
1885 This is much easier than the previous function as we can assume that
1886 the recipient has any changenode we aren't sending them."""
1886 the recipient has any changenode we aren't sending them."""
1887
1887
1888 self.hook('preoutgoing', throw=True, source=source)
1888 self.hook('preoutgoing', throw=True, source=source)
1889
1889
1890 cl = self.changelog
1890 cl = self.changelog
1891 nodes = cl.nodesbetween(basenodes, None)[0]
1891 nodes = cl.nodesbetween(basenodes, None)[0]
1892 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1892 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1893 self.changegroupinfo(nodes, source)
1893 self.changegroupinfo(nodes, source)
1894
1894
1895 def identity(x):
1895 def identity(x):
1896 return x
1896 return x
1897
1897
1898 def gennodelst(revlog):
1898 def gennodelst(revlog):
1899 for r in xrange(0, revlog.count()):
1899 for r in xrange(0, revlog.count()):
1900 n = revlog.node(r)
1900 n = revlog.node(r)
1901 if revlog.linkrev(n) in revset:
1901 if revlog.linkrev(n) in revset:
1902 yield n
1902 yield n
1903
1903
1904 def changed_file_collector(changedfileset):
1904 def changed_file_collector(changedfileset):
1905 def collect_changed_files(clnode):
1905 def collect_changed_files(clnode):
1906 c = cl.read(clnode)
1906 c = cl.read(clnode)
1907 for fname in c[3]:
1907 for fname in c[3]:
1908 changedfileset[fname] = 1
1908 changedfileset[fname] = 1
1909 return collect_changed_files
1909 return collect_changed_files
1910
1910
1911 def lookuprevlink_func(revlog):
1911 def lookuprevlink_func(revlog):
1912 def lookuprevlink(n):
1912 def lookuprevlink(n):
1913 return cl.node(revlog.linkrev(n))
1913 return cl.node(revlog.linkrev(n))
1914 return lookuprevlink
1914 return lookuprevlink
1915
1915
1916 def gengroup():
1916 def gengroup():
1917 # construct a list of all changed files
1917 # construct a list of all changed files
1918 changedfiles = {}
1918 changedfiles = {}
1919
1919
1920 for chnk in cl.group(nodes, identity,
1920 for chnk in cl.group(nodes, identity,
1921 changed_file_collector(changedfiles)):
1921 changed_file_collector(changedfiles)):
1922 yield chnk
1922 yield chnk
1923 changedfiles = changedfiles.keys()
1923 changedfiles = changedfiles.keys()
1924 changedfiles.sort()
1924 changedfiles.sort()
1925
1925
1926 mnfst = self.manifest
1926 mnfst = self.manifest
1927 nodeiter = gennodelst(mnfst)
1927 nodeiter = gennodelst(mnfst)
1928 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1928 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1929 yield chnk
1929 yield chnk
1930
1930
1931 for fname in changedfiles:
1931 for fname in changedfiles:
1932 filerevlog = self.file(fname)
1932 filerevlog = self.file(fname)
1933 if filerevlog.count() == 0:
1933 if filerevlog.count() == 0:
1934 raise util.Abort(_("empty or missing revlog for %s") % fname)
1934 raise util.Abort(_("empty or missing revlog for %s") % fname)
1935 nodeiter = gennodelst(filerevlog)
1935 nodeiter = gennodelst(filerevlog)
1936 nodeiter = list(nodeiter)
1936 nodeiter = list(nodeiter)
1937 if nodeiter:
1937 if nodeiter:
1938 yield changegroup.chunkheader(len(fname))
1938 yield changegroup.chunkheader(len(fname))
1939 yield fname
1939 yield fname
1940 lookup = lookuprevlink_func(filerevlog)
1940 lookup = lookuprevlink_func(filerevlog)
1941 for chnk in filerevlog.group(nodeiter, lookup):
1941 for chnk in filerevlog.group(nodeiter, lookup):
1942 yield chnk
1942 yield chnk
1943
1943
1944 yield changegroup.closechunk()
1944 yield changegroup.closechunk()
1945
1945
1946 if nodes:
1946 if nodes:
1947 self.hook('outgoing', node=hex(nodes[0]), source=source)
1947 self.hook('outgoing', node=hex(nodes[0]), source=source)
1948
1948
1949 return util.chunkbuffer(gengroup())
1949 return util.chunkbuffer(gengroup())
1950
1950
1951 def addchangegroup(self, source, srctype, url, emptyok=False):
1951 def addchangegroup(self, source, srctype, url, emptyok=False):
1952 """add changegroup to repo.
1952 """add changegroup to repo.
1953
1953
1954 return values:
1954 return values:
1955 - nothing changed or no source: 0
1955 - nothing changed or no source: 0
1956 - more heads than before: 1+added heads (2..n)
1956 - more heads than before: 1+added heads (2..n)
1957 - less heads than before: -1-removed heads (-2..-n)
1957 - less heads than before: -1-removed heads (-2..-n)
1958 - number of heads stays the same: 1
1958 - number of heads stays the same: 1
1959 """
1959 """
1960 def csmap(x):
1960 def csmap(x):
1961 self.ui.debug(_("add changeset %s\n") % short(x))
1961 self.ui.debug(_("add changeset %s\n") % short(x))
1962 return cl.count()
1962 return cl.count()
1963
1963
1964 def revmap(x):
1964 def revmap(x):
1965 return cl.rev(x)
1965 return cl.rev(x)
1966
1966
1967 if not source:
1967 if not source:
1968 return 0
1968 return 0
1969
1969
1970 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1970 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1971
1971
1972 changesets = files = revisions = 0
1972 changesets = files = revisions = 0
1973
1973
1974 # write changelog data to temp files so concurrent readers will not see
1974 # write changelog data to temp files so concurrent readers will not see
1975 # inconsistent view
1975 # inconsistent view
1976 cl = self.changelog
1976 cl = self.changelog
1977 cl.delayupdate()
1977 cl.delayupdate()
1978 oldheads = len(cl.heads())
1978 oldheads = len(cl.heads())
1979
1979
1980 tr = self.transaction()
1980 tr = self.transaction()
1981 try:
1981 try:
1982 trp = weakref.proxy(tr)
1982 trp = weakref.proxy(tr)
1983 # pull off the changeset group
1983 # pull off the changeset group
1984 self.ui.status(_("adding changesets\n"))
1984 self.ui.status(_("adding changesets\n"))
1985 cor = cl.count() - 1
1985 cor = cl.count() - 1
1986 chunkiter = changegroup.chunkiter(source)
1986 chunkiter = changegroup.chunkiter(source)
1987 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1987 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1988 raise util.Abort(_("received changelog group is empty"))
1988 raise util.Abort(_("received changelog group is empty"))
1989 cnr = cl.count() - 1
1989 cnr = cl.count() - 1
1990 changesets = cnr - cor
1990 changesets = cnr - cor
1991
1991
1992 # pull off the manifest group
1992 # pull off the manifest group
1993 self.ui.status(_("adding manifests\n"))
1993 self.ui.status(_("adding manifests\n"))
1994 chunkiter = changegroup.chunkiter(source)
1994 chunkiter = changegroup.chunkiter(source)
1995 # no need to check for empty manifest group here:
1995 # no need to check for empty manifest group here:
1996 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1996 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1997 # no new manifest will be created and the manifest group will
1997 # no new manifest will be created and the manifest group will
1998 # be empty during the pull
1998 # be empty during the pull
1999 self.manifest.addgroup(chunkiter, revmap, trp)
1999 self.manifest.addgroup(chunkiter, revmap, trp)
2000
2000
2001 # process the files
2001 # process the files
2002 self.ui.status(_("adding file changes\n"))
2002 self.ui.status(_("adding file changes\n"))
2003 while 1:
2003 while 1:
2004 f = changegroup.getchunk(source)
2004 f = changegroup.getchunk(source)
2005 if not f:
2005 if not f:
2006 break
2006 break
2007 self.ui.debug(_("adding %s revisions\n") % f)
2007 self.ui.debug(_("adding %s revisions\n") % f)
2008 fl = self.file(f)
2008 fl = self.file(f)
2009 o = fl.count()
2009 o = fl.count()
2010 chunkiter = changegroup.chunkiter(source)
2010 chunkiter = changegroup.chunkiter(source)
2011 if fl.addgroup(chunkiter, revmap, trp) is None:
2011 if fl.addgroup(chunkiter, revmap, trp) is None:
2012 raise util.Abort(_("received file revlog group is empty"))
2012 raise util.Abort(_("received file revlog group is empty"))
2013 revisions += fl.count() - o
2013 revisions += fl.count() - o
2014 files += 1
2014 files += 1
2015
2015
2016 # make changelog see real files again
2016 # make changelog see real files again
2017 cl.finalize(trp)
2017 cl.finalize(trp)
2018
2018
2019 newheads = len(self.changelog.heads())
2019 newheads = len(self.changelog.heads())
2020 heads = ""
2020 heads = ""
2021 if oldheads and newheads != oldheads:
2021 if oldheads and newheads != oldheads:
2022 heads = _(" (%+d heads)") % (newheads - oldheads)
2022 heads = _(" (%+d heads)") % (newheads - oldheads)
2023
2023
2024 self.ui.status(_("added %d changesets"
2024 self.ui.status(_("added %d changesets"
2025 " with %d changes to %d files%s\n")
2025 " with %d changes to %d files%s\n")
2026 % (changesets, revisions, files, heads))
2026 % (changesets, revisions, files, heads))
2027
2027
2028 if changesets > 0:
2028 if changesets > 0:
2029 self.hook('pretxnchangegroup', throw=True,
2029 self.hook('pretxnchangegroup', throw=True,
2030 node=hex(self.changelog.node(cor+1)), source=srctype,
2030 node=hex(self.changelog.node(cor+1)), source=srctype,
2031 url=url)
2031 url=url)
2032
2032
2033 tr.close()
2033 tr.close()
2034 finally:
2034 finally:
2035 del tr
2035 del tr
2036
2036
2037 if changesets > 0:
2037 if changesets > 0:
2038 # forcefully update the on-disk branch cache
2038 # forcefully update the on-disk branch cache
2039 self.ui.debug(_("updating the branch cache\n"))
2039 self.ui.debug(_("updating the branch cache\n"))
2040 self.branchtags()
2040 self.branchtags()
2041 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2041 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2042 source=srctype, url=url)
2042 source=srctype, url=url)
2043
2043
2044 for i in xrange(cor + 1, cnr + 1):
2044 for i in xrange(cor + 1, cnr + 1):
2045 self.hook("incoming", node=hex(self.changelog.node(i)),
2045 self.hook("incoming", node=hex(self.changelog.node(i)),
2046 source=srctype, url=url)
2046 source=srctype, url=url)
2047
2047
2048 # never return 0 here:
2048 # never return 0 here:
2049 if newheads < oldheads:
2049 if newheads < oldheads:
2050 return newheads - oldheads - 1
2050 return newheads - oldheads - 1
2051 else:
2051 else:
2052 return newheads - oldheads + 1
2052 return newheads - oldheads + 1
2053
2053
2054
2054
2055 def stream_in(self, remote):
2055 def stream_in(self, remote):
2056 fp = remote.stream_out()
2056 fp = remote.stream_out()
2057 l = fp.readline()
2057 l = fp.readline()
2058 try:
2058 try:
2059 resp = int(l)
2059 resp = int(l)
2060 except ValueError:
2060 except ValueError:
2061 raise util.UnexpectedOutput(
2061 raise util.UnexpectedOutput(
2062 _('Unexpected response from remote server:'), l)
2062 _('Unexpected response from remote server:'), l)
2063 if resp == 1:
2063 if resp == 1:
2064 raise util.Abort(_('operation forbidden by server'))
2064 raise util.Abort(_('operation forbidden by server'))
2065 elif resp == 2:
2065 elif resp == 2:
2066 raise util.Abort(_('locking the remote repository failed'))
2066 raise util.Abort(_('locking the remote repository failed'))
2067 elif resp != 0:
2067 elif resp != 0:
2068 raise util.Abort(_('the server sent an unknown error code'))
2068 raise util.Abort(_('the server sent an unknown error code'))
2069 self.ui.status(_('streaming all changes\n'))
2069 self.ui.status(_('streaming all changes\n'))
2070 l = fp.readline()
2070 l = fp.readline()
2071 try:
2071 try:
2072 total_files, total_bytes = map(int, l.split(' ', 1))
2072 total_files, total_bytes = map(int, l.split(' ', 1))
2073 except ValueError, TypeError:
2073 except ValueError, TypeError:
2074 raise util.UnexpectedOutput(
2074 raise util.UnexpectedOutput(
2075 _('Unexpected response from remote server:'), l)
2075 _('Unexpected response from remote server:'), l)
2076 self.ui.status(_('%d files to transfer, %s of data\n') %
2076 self.ui.status(_('%d files to transfer, %s of data\n') %
2077 (total_files, util.bytecount(total_bytes)))
2077 (total_files, util.bytecount(total_bytes)))
2078 start = time.time()
2078 start = time.time()
2079 for i in xrange(total_files):
2079 for i in xrange(total_files):
2080 # XXX doesn't support '\n' or '\r' in filenames
2080 # XXX doesn't support '\n' or '\r' in filenames
2081 l = fp.readline()
2081 l = fp.readline()
2082 try:
2082 try:
2083 name, size = l.split('\0', 1)
2083 name, size = l.split('\0', 1)
2084 size = int(size)
2084 size = int(size)
2085 except ValueError, TypeError:
2085 except ValueError, TypeError:
2086 raise util.UnexpectedOutput(
2086 raise util.UnexpectedOutput(
2087 _('Unexpected response from remote server:'), l)
2087 _('Unexpected response from remote server:'), l)
2088 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2088 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2089 ofp = self.sopener(name, 'w')
2089 ofp = self.sopener(name, 'w')
2090 for chunk in util.filechunkiter(fp, limit=size):
2090 for chunk in util.filechunkiter(fp, limit=size):
2091 ofp.write(chunk)
2091 ofp.write(chunk)
2092 ofp.close()
2092 ofp.close()
2093 elapsed = time.time() - start
2093 elapsed = time.time() - start
2094 if elapsed <= 0:
2094 if elapsed <= 0:
2095 elapsed = 0.001
2095 elapsed = 0.001
2096 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2096 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2097 (util.bytecount(total_bytes), elapsed,
2097 (util.bytecount(total_bytes), elapsed,
2098 util.bytecount(total_bytes / elapsed)))
2098 util.bytecount(total_bytes / elapsed)))
2099 self.invalidate()
2099 self.invalidate()
2100 return len(self.heads()) + 1
2100 return len(self.heads()) + 1
2101
2101
2102 def clone(self, remote, heads=[], stream=False):
2102 def clone(self, remote, heads=[], stream=False):
2103 '''clone remote repository.
2103 '''clone remote repository.
2104
2104
2105 keyword arguments:
2105 keyword arguments:
2106 heads: list of revs to clone (forces use of pull)
2106 heads: list of revs to clone (forces use of pull)
2107 stream: use streaming clone if possible'''
2107 stream: use streaming clone if possible'''
2108
2108
2109 # now, all clients that can request uncompressed clones can
2109 # now, all clients that can request uncompressed clones can
2110 # read repo formats supported by all servers that can serve
2110 # read repo formats supported by all servers that can serve
2111 # them.
2111 # them.
2112
2112
2113 # if revlog format changes, client will have to check version
2113 # if revlog format changes, client will have to check version
2114 # and format flags on "stream" capability, and use
2114 # and format flags on "stream" capability, and use
2115 # uncompressed only if compatible.
2115 # uncompressed only if compatible.
2116
2116
2117 if stream and not heads and remote.capable('stream'):
2117 if stream and not heads and remote.capable('stream'):
2118 return self.stream_in(remote)
2118 return self.stream_in(remote)
2119 return self.pull(remote, heads)
2119 return self.pull(remote, heads)
2120
2120
2121 # used to avoid circular references so destructors work
2121 # used to avoid circular references so destructors work
2122 def aftertrans(files):
2122 def aftertrans(files):
2123 renamefiles = [tuple(t) for t in files]
2123 renamefiles = [tuple(t) for t in files]
2124 def a():
2124 def a():
2125 for src, dest in renamefiles:
2125 for src, dest in renamefiles:
2126 util.rename(src, dest)
2126 util.rename(src, dest)
2127 return a
2127 return a
2128
2128
2129 def instance(ui, path, create):
2129 def instance(ui, path, create):
2130 return localrepository(ui, util.drop_scheme('file', path), create)
2130 return localrepository(ui, util.drop_scheme('file', path), create)
2131
2131
2132 def islocal(path):
2132 def islocal(path):
2133 return True
2133 return True
General Comments 0
You need to be logged in to leave comments. Login now