##// END OF EJS Templates
localrepo: use "vfs" intead of "opener" while ensuring repository directory...
FUJIWARA Katsunori -
r17160:22b9b1d2 default
parent child Browse files
Show More
@@ -1,2457 +1,2457
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import repo, changegroup, subrepo, discovery, pushkey, obsolete
9 import repo, changegroup, subrepo, discovery, pushkey, obsolete
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19 filecache = scmutil.filecache
19 filecache = scmutil.filecache
20
20
21 class storecache(filecache):
21 class storecache(filecache):
22 """filecache for files in the store"""
22 """filecache for files in the store"""
23 def join(self, obj, fname):
23 def join(self, obj, fname):
24 return obj.sjoin(fname)
24 return obj.sjoin(fname)
25
25
26 class localrepository(repo.repository):
26 class localrepository(repo.repository):
27 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
27 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
28 'known', 'getbundle'))
28 'known', 'getbundle'))
29 supportedformats = set(('revlogv1', 'generaldelta'))
29 supportedformats = set(('revlogv1', 'generaldelta'))
30 supported = supportedformats | set(('store', 'fncache', 'shared',
30 supported = supportedformats | set(('store', 'fncache', 'shared',
31 'dotencode'))
31 'dotencode'))
32 openerreqs = set(('revlogv1', 'generaldelta'))
32 openerreqs = set(('revlogv1', 'generaldelta'))
33 requirements = ['revlogv1']
33 requirements = ['revlogv1']
34
34
35 def _baserequirements(self, create):
35 def _baserequirements(self, create):
36 return self.requirements[:]
36 return self.requirements[:]
37
37
38 def __init__(self, baseui, path=None, create=False):
38 def __init__(self, baseui, path=None, create=False):
39 repo.repository.__init__(self)
39 repo.repository.__init__(self)
40 self.wopener = scmutil.opener(path, expand=True)
40 self.wopener = scmutil.opener(path, expand=True)
41 self.wvfs = self.wopener
41 self.wvfs = self.wopener
42 self.root = self.wvfs.base
42 self.root = self.wvfs.base
43 self.path = self.wvfs.join(".hg")
43 self.path = self.wvfs.join(".hg")
44 self.origroot = path
44 self.origroot = path
45 self.auditor = scmutil.pathauditor(self.root, self._checknested)
45 self.auditor = scmutil.pathauditor(self.root, self._checknested)
46 self.opener = scmutil.opener(self.path)
46 self.opener = scmutil.opener(self.path)
47 self.vfs = self.opener
47 self.vfs = self.opener
48 self.baseui = baseui
48 self.baseui = baseui
49 self.ui = baseui.copy()
49 self.ui = baseui.copy()
50 # A list of callback to shape the phase if no data were found.
50 # A list of callback to shape the phase if no data were found.
51 # Callback are in the form: func(repo, roots) --> processed root.
51 # Callback are in the form: func(repo, roots) --> processed root.
52 # This list it to be filled by extension during repo setup
52 # This list it to be filled by extension during repo setup
53 self._phasedefaults = []
53 self._phasedefaults = []
54
54
55 try:
55 try:
56 self.ui.readconfig(self.join("hgrc"), self.root)
56 self.ui.readconfig(self.join("hgrc"), self.root)
57 extensions.loadall(self.ui)
57 extensions.loadall(self.ui)
58 except IOError:
58 except IOError:
59 pass
59 pass
60
60
61 if not os.path.isdir(self.path):
61 if not os.path.isdir(self.path):
62 if create:
62 if create:
63 if not os.path.exists(self.root):
63 if not os.path.exists(self.root):
64 util.makedirs(self.root)
64 util.makedirs(self.root)
65 util.makedir(self.path, notindexed=True)
65 util.makedir(self.path, notindexed=True)
66 requirements = self._baserequirements(create)
66 requirements = self._baserequirements(create)
67 if self.ui.configbool('format', 'usestore', True):
67 if self.ui.configbool('format', 'usestore', True):
68 os.mkdir(os.path.join(self.path, "store"))
68 os.mkdir(os.path.join(self.path, "store"))
69 requirements.append("store")
69 requirements.append("store")
70 if self.ui.configbool('format', 'usefncache', True):
70 if self.ui.configbool('format', 'usefncache', True):
71 requirements.append("fncache")
71 requirements.append("fncache")
72 if self.ui.configbool('format', 'dotencode', True):
72 if self.ui.configbool('format', 'dotencode', True):
73 requirements.append('dotencode')
73 requirements.append('dotencode')
74 # create an invalid changelog
74 # create an invalid changelog
75 self.opener.append(
75 self.vfs.append(
76 "00changelog.i",
76 "00changelog.i",
77 '\0\0\0\2' # represents revlogv2
77 '\0\0\0\2' # represents revlogv2
78 ' dummy changelog to prevent using the old repo layout'
78 ' dummy changelog to prevent using the old repo layout'
79 )
79 )
80 if self.ui.configbool('format', 'generaldelta', False):
80 if self.ui.configbool('format', 'generaldelta', False):
81 requirements.append("generaldelta")
81 requirements.append("generaldelta")
82 requirements = set(requirements)
82 requirements = set(requirements)
83 else:
83 else:
84 raise error.RepoError(_("repository %s not found") % path)
84 raise error.RepoError(_("repository %s not found") % path)
85 elif create:
85 elif create:
86 raise error.RepoError(_("repository %s already exists") % path)
86 raise error.RepoError(_("repository %s already exists") % path)
87 else:
87 else:
88 try:
88 try:
89 requirements = scmutil.readrequires(self.opener, self.supported)
89 requirements = scmutil.readrequires(self.vfs, self.supported)
90 except IOError, inst:
90 except IOError, inst:
91 if inst.errno != errno.ENOENT:
91 if inst.errno != errno.ENOENT:
92 raise
92 raise
93 requirements = set()
93 requirements = set()
94
94
95 self.sharedpath = self.path
95 self.sharedpath = self.path
96 try:
96 try:
97 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
97 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
98 if not os.path.exists(s):
98 if not os.path.exists(s):
99 raise error.RepoError(
99 raise error.RepoError(
100 _('.hg/sharedpath points to nonexistent directory %s') % s)
100 _('.hg/sharedpath points to nonexistent directory %s') % s)
101 self.sharedpath = s
101 self.sharedpath = s
102 except IOError, inst:
102 except IOError, inst:
103 if inst.errno != errno.ENOENT:
103 if inst.errno != errno.ENOENT:
104 raise
104 raise
105
105
106 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
106 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
107 self.spath = self.store.path
107 self.spath = self.store.path
108 self.sopener = self.store.opener
108 self.sopener = self.store.opener
109 self.svfs = self.sopener
109 self.svfs = self.sopener
110 self.sjoin = self.store.join
110 self.sjoin = self.store.join
111 self.opener.createmode = self.store.createmode
111 self.opener.createmode = self.store.createmode
112 self._applyrequirements(requirements)
112 self._applyrequirements(requirements)
113 if create:
113 if create:
114 self._writerequirements()
114 self._writerequirements()
115
115
116
116
117 self._branchcache = None
117 self._branchcache = None
118 self._branchcachetip = None
118 self._branchcachetip = None
119 self.filterpats = {}
119 self.filterpats = {}
120 self._datafilters = {}
120 self._datafilters = {}
121 self._transref = self._lockref = self._wlockref = None
121 self._transref = self._lockref = self._wlockref = None
122
122
123 # A cache for various files under .hg/ that tracks file changes,
123 # A cache for various files under .hg/ that tracks file changes,
124 # (used by the filecache decorator)
124 # (used by the filecache decorator)
125 #
125 #
126 # Maps a property name to its util.filecacheentry
126 # Maps a property name to its util.filecacheentry
127 self._filecache = {}
127 self._filecache = {}
128
128
129 def _applyrequirements(self, requirements):
129 def _applyrequirements(self, requirements):
130 self.requirements = requirements
130 self.requirements = requirements
131 self.sopener.options = dict((r, 1) for r in requirements
131 self.sopener.options = dict((r, 1) for r in requirements
132 if r in self.openerreqs)
132 if r in self.openerreqs)
133
133
134 def _writerequirements(self):
134 def _writerequirements(self):
135 reqfile = self.opener("requires", "w")
135 reqfile = self.opener("requires", "w")
136 for r in self.requirements:
136 for r in self.requirements:
137 reqfile.write("%s\n" % r)
137 reqfile.write("%s\n" % r)
138 reqfile.close()
138 reqfile.close()
139
139
140 def _checknested(self, path):
140 def _checknested(self, path):
141 """Determine if path is a legal nested repository."""
141 """Determine if path is a legal nested repository."""
142 if not path.startswith(self.root):
142 if not path.startswith(self.root):
143 return False
143 return False
144 subpath = path[len(self.root) + 1:]
144 subpath = path[len(self.root) + 1:]
145 normsubpath = util.pconvert(subpath)
145 normsubpath = util.pconvert(subpath)
146
146
147 # XXX: Checking against the current working copy is wrong in
147 # XXX: Checking against the current working copy is wrong in
148 # the sense that it can reject things like
148 # the sense that it can reject things like
149 #
149 #
150 # $ hg cat -r 10 sub/x.txt
150 # $ hg cat -r 10 sub/x.txt
151 #
151 #
152 # if sub/ is no longer a subrepository in the working copy
152 # if sub/ is no longer a subrepository in the working copy
153 # parent revision.
153 # parent revision.
154 #
154 #
155 # However, it can of course also allow things that would have
155 # However, it can of course also allow things that would have
156 # been rejected before, such as the above cat command if sub/
156 # been rejected before, such as the above cat command if sub/
157 # is a subrepository now, but was a normal directory before.
157 # is a subrepository now, but was a normal directory before.
158 # The old path auditor would have rejected by mistake since it
158 # The old path auditor would have rejected by mistake since it
159 # panics when it sees sub/.hg/.
159 # panics when it sees sub/.hg/.
160 #
160 #
161 # All in all, checking against the working copy seems sensible
161 # All in all, checking against the working copy seems sensible
162 # since we want to prevent access to nested repositories on
162 # since we want to prevent access to nested repositories on
163 # the filesystem *now*.
163 # the filesystem *now*.
164 ctx = self[None]
164 ctx = self[None]
165 parts = util.splitpath(subpath)
165 parts = util.splitpath(subpath)
166 while parts:
166 while parts:
167 prefix = '/'.join(parts)
167 prefix = '/'.join(parts)
168 if prefix in ctx.substate:
168 if prefix in ctx.substate:
169 if prefix == normsubpath:
169 if prefix == normsubpath:
170 return True
170 return True
171 else:
171 else:
172 sub = ctx.sub(prefix)
172 sub = ctx.sub(prefix)
173 return sub.checknested(subpath[len(prefix) + 1:])
173 return sub.checknested(subpath[len(prefix) + 1:])
174 else:
174 else:
175 parts.pop()
175 parts.pop()
176 return False
176 return False
177
177
178 @filecache('bookmarks')
178 @filecache('bookmarks')
179 def _bookmarks(self):
179 def _bookmarks(self):
180 return bookmarks.read(self)
180 return bookmarks.read(self)
181
181
182 @filecache('bookmarks.current')
182 @filecache('bookmarks.current')
183 def _bookmarkcurrent(self):
183 def _bookmarkcurrent(self):
184 return bookmarks.readcurrent(self)
184 return bookmarks.readcurrent(self)
185
185
186 def _writebookmarks(self, marks):
186 def _writebookmarks(self, marks):
187 bookmarks.write(self)
187 bookmarks.write(self)
188
188
189 def bookmarkheads(self, bookmark):
189 def bookmarkheads(self, bookmark):
190 name = bookmark.split('@', 1)[0]
190 name = bookmark.split('@', 1)[0]
191 heads = []
191 heads = []
192 for mark, n in self._bookmarks.iteritems():
192 for mark, n in self._bookmarks.iteritems():
193 if mark.split('@', 1)[0] == name:
193 if mark.split('@', 1)[0] == name:
194 heads.append(n)
194 heads.append(n)
195 return heads
195 return heads
196
196
197 @storecache('phaseroots')
197 @storecache('phaseroots')
198 def _phasecache(self):
198 def _phasecache(self):
199 return phases.phasecache(self, self._phasedefaults)
199 return phases.phasecache(self, self._phasedefaults)
200
200
201 @storecache('obsstore')
201 @storecache('obsstore')
202 def obsstore(self):
202 def obsstore(self):
203 store = obsolete.obsstore(self.sopener)
203 store = obsolete.obsstore(self.sopener)
204 return store
204 return store
205
205
206 @storecache('00changelog.i')
206 @storecache('00changelog.i')
207 def changelog(self):
207 def changelog(self):
208 c = changelog.changelog(self.sopener)
208 c = changelog.changelog(self.sopener)
209 if 'HG_PENDING' in os.environ:
209 if 'HG_PENDING' in os.environ:
210 p = os.environ['HG_PENDING']
210 p = os.environ['HG_PENDING']
211 if p.startswith(self.root):
211 if p.startswith(self.root):
212 c.readpending('00changelog.i.a')
212 c.readpending('00changelog.i.a')
213 return c
213 return c
214
214
215 @storecache('00manifest.i')
215 @storecache('00manifest.i')
216 def manifest(self):
216 def manifest(self):
217 return manifest.manifest(self.sopener)
217 return manifest.manifest(self.sopener)
218
218
219 @filecache('dirstate')
219 @filecache('dirstate')
220 def dirstate(self):
220 def dirstate(self):
221 warned = [0]
221 warned = [0]
222 def validate(node):
222 def validate(node):
223 try:
223 try:
224 self.changelog.rev(node)
224 self.changelog.rev(node)
225 return node
225 return node
226 except error.LookupError:
226 except error.LookupError:
227 if not warned[0]:
227 if not warned[0]:
228 warned[0] = True
228 warned[0] = True
229 self.ui.warn(_("warning: ignoring unknown"
229 self.ui.warn(_("warning: ignoring unknown"
230 " working parent %s!\n") % short(node))
230 " working parent %s!\n") % short(node))
231 return nullid
231 return nullid
232
232
233 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
233 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
234
234
235 def __getitem__(self, changeid):
235 def __getitem__(self, changeid):
236 if changeid is None:
236 if changeid is None:
237 return context.workingctx(self)
237 return context.workingctx(self)
238 return context.changectx(self, changeid)
238 return context.changectx(self, changeid)
239
239
240 def __contains__(self, changeid):
240 def __contains__(self, changeid):
241 try:
241 try:
242 return bool(self.lookup(changeid))
242 return bool(self.lookup(changeid))
243 except error.RepoLookupError:
243 except error.RepoLookupError:
244 return False
244 return False
245
245
246 def __nonzero__(self):
246 def __nonzero__(self):
247 return True
247 return True
248
248
249 def __len__(self):
249 def __len__(self):
250 return len(self.changelog)
250 return len(self.changelog)
251
251
252 def __iter__(self):
252 def __iter__(self):
253 for i in xrange(len(self)):
253 for i in xrange(len(self)):
254 yield i
254 yield i
255
255
256 def revs(self, expr, *args):
256 def revs(self, expr, *args):
257 '''Return a list of revisions matching the given revset'''
257 '''Return a list of revisions matching the given revset'''
258 expr = revset.formatspec(expr, *args)
258 expr = revset.formatspec(expr, *args)
259 m = revset.match(None, expr)
259 m = revset.match(None, expr)
260 return [r for r in m(self, range(len(self)))]
260 return [r for r in m(self, range(len(self)))]
261
261
262 def set(self, expr, *args):
262 def set(self, expr, *args):
263 '''
263 '''
264 Yield a context for each matching revision, after doing arg
264 Yield a context for each matching revision, after doing arg
265 replacement via revset.formatspec
265 replacement via revset.formatspec
266 '''
266 '''
267 for r in self.revs(expr, *args):
267 for r in self.revs(expr, *args):
268 yield self[r]
268 yield self[r]
269
269
270 def url(self):
270 def url(self):
271 return 'file:' + self.root
271 return 'file:' + self.root
272
272
273 def hook(self, name, throw=False, **args):
273 def hook(self, name, throw=False, **args):
274 return hook.hook(self.ui, self, name, throw, **args)
274 return hook.hook(self.ui, self, name, throw, **args)
275
275
276 tag_disallowed = ':\r\n'
276 tag_disallowed = ':\r\n'
277
277
278 def _tag(self, names, node, message, local, user, date, extra={}):
278 def _tag(self, names, node, message, local, user, date, extra={}):
279 if isinstance(names, str):
279 if isinstance(names, str):
280 allchars = names
280 allchars = names
281 names = (names,)
281 names = (names,)
282 else:
282 else:
283 allchars = ''.join(names)
283 allchars = ''.join(names)
284 for c in self.tag_disallowed:
284 for c in self.tag_disallowed:
285 if c in allchars:
285 if c in allchars:
286 raise util.Abort(_('%r cannot be used in a tag name') % c)
286 raise util.Abort(_('%r cannot be used in a tag name') % c)
287
287
288 branches = self.branchmap()
288 branches = self.branchmap()
289 for name in names:
289 for name in names:
290 self.hook('pretag', throw=True, node=hex(node), tag=name,
290 self.hook('pretag', throw=True, node=hex(node), tag=name,
291 local=local)
291 local=local)
292 if name in branches:
292 if name in branches:
293 self.ui.warn(_("warning: tag %s conflicts with existing"
293 self.ui.warn(_("warning: tag %s conflicts with existing"
294 " branch name\n") % name)
294 " branch name\n") % name)
295
295
296 def writetags(fp, names, munge, prevtags):
296 def writetags(fp, names, munge, prevtags):
297 fp.seek(0, 2)
297 fp.seek(0, 2)
298 if prevtags and prevtags[-1] != '\n':
298 if prevtags and prevtags[-1] != '\n':
299 fp.write('\n')
299 fp.write('\n')
300 for name in names:
300 for name in names:
301 m = munge and munge(name) or name
301 m = munge and munge(name) or name
302 if (self._tagscache.tagtypes and
302 if (self._tagscache.tagtypes and
303 name in self._tagscache.tagtypes):
303 name in self._tagscache.tagtypes):
304 old = self.tags().get(name, nullid)
304 old = self.tags().get(name, nullid)
305 fp.write('%s %s\n' % (hex(old), m))
305 fp.write('%s %s\n' % (hex(old), m))
306 fp.write('%s %s\n' % (hex(node), m))
306 fp.write('%s %s\n' % (hex(node), m))
307 fp.close()
307 fp.close()
308
308
309 prevtags = ''
309 prevtags = ''
310 if local:
310 if local:
311 try:
311 try:
312 fp = self.opener('localtags', 'r+')
312 fp = self.opener('localtags', 'r+')
313 except IOError:
313 except IOError:
314 fp = self.opener('localtags', 'a')
314 fp = self.opener('localtags', 'a')
315 else:
315 else:
316 prevtags = fp.read()
316 prevtags = fp.read()
317
317
318 # local tags are stored in the current charset
318 # local tags are stored in the current charset
319 writetags(fp, names, None, prevtags)
319 writetags(fp, names, None, prevtags)
320 for name in names:
320 for name in names:
321 self.hook('tag', node=hex(node), tag=name, local=local)
321 self.hook('tag', node=hex(node), tag=name, local=local)
322 return
322 return
323
323
324 try:
324 try:
325 fp = self.wfile('.hgtags', 'rb+')
325 fp = self.wfile('.hgtags', 'rb+')
326 except IOError, e:
326 except IOError, e:
327 if e.errno != errno.ENOENT:
327 if e.errno != errno.ENOENT:
328 raise
328 raise
329 fp = self.wfile('.hgtags', 'ab')
329 fp = self.wfile('.hgtags', 'ab')
330 else:
330 else:
331 prevtags = fp.read()
331 prevtags = fp.read()
332
332
333 # committed tags are stored in UTF-8
333 # committed tags are stored in UTF-8
334 writetags(fp, names, encoding.fromlocal, prevtags)
334 writetags(fp, names, encoding.fromlocal, prevtags)
335
335
336 fp.close()
336 fp.close()
337
337
338 self.invalidatecaches()
338 self.invalidatecaches()
339
339
340 if '.hgtags' not in self.dirstate:
340 if '.hgtags' not in self.dirstate:
341 self[None].add(['.hgtags'])
341 self[None].add(['.hgtags'])
342
342
343 m = matchmod.exact(self.root, '', ['.hgtags'])
343 m = matchmod.exact(self.root, '', ['.hgtags'])
344 tagnode = self.commit(message, user, date, extra=extra, match=m)
344 tagnode = self.commit(message, user, date, extra=extra, match=m)
345
345
346 for name in names:
346 for name in names:
347 self.hook('tag', node=hex(node), tag=name, local=local)
347 self.hook('tag', node=hex(node), tag=name, local=local)
348
348
349 return tagnode
349 return tagnode
350
350
351 def tag(self, names, node, message, local, user, date):
351 def tag(self, names, node, message, local, user, date):
352 '''tag a revision with one or more symbolic names.
352 '''tag a revision with one or more symbolic names.
353
353
354 names is a list of strings or, when adding a single tag, names may be a
354 names is a list of strings or, when adding a single tag, names may be a
355 string.
355 string.
356
356
357 if local is True, the tags are stored in a per-repository file.
357 if local is True, the tags are stored in a per-repository file.
358 otherwise, they are stored in the .hgtags file, and a new
358 otherwise, they are stored in the .hgtags file, and a new
359 changeset is committed with the change.
359 changeset is committed with the change.
360
360
361 keyword arguments:
361 keyword arguments:
362
362
363 local: whether to store tags in non-version-controlled file
363 local: whether to store tags in non-version-controlled file
364 (default False)
364 (default False)
365
365
366 message: commit message to use if committing
366 message: commit message to use if committing
367
367
368 user: name of user to use if committing
368 user: name of user to use if committing
369
369
370 date: date tuple to use if committing'''
370 date: date tuple to use if committing'''
371
371
372 if not local:
372 if not local:
373 for x in self.status()[:5]:
373 for x in self.status()[:5]:
374 if '.hgtags' in x:
374 if '.hgtags' in x:
375 raise util.Abort(_('working copy of .hgtags is changed '
375 raise util.Abort(_('working copy of .hgtags is changed '
376 '(please commit .hgtags manually)'))
376 '(please commit .hgtags manually)'))
377
377
378 self.tags() # instantiate the cache
378 self.tags() # instantiate the cache
379 self._tag(names, node, message, local, user, date)
379 self._tag(names, node, message, local, user, date)
380
380
381 @propertycache
381 @propertycache
382 def _tagscache(self):
382 def _tagscache(self):
383 '''Returns a tagscache object that contains various tags related
383 '''Returns a tagscache object that contains various tags related
384 caches.'''
384 caches.'''
385
385
386 # This simplifies its cache management by having one decorated
386 # This simplifies its cache management by having one decorated
387 # function (this one) and the rest simply fetch things from it.
387 # function (this one) and the rest simply fetch things from it.
388 class tagscache(object):
388 class tagscache(object):
389 def __init__(self):
389 def __init__(self):
390 # These two define the set of tags for this repository. tags
390 # These two define the set of tags for this repository. tags
391 # maps tag name to node; tagtypes maps tag name to 'global' or
391 # maps tag name to node; tagtypes maps tag name to 'global' or
392 # 'local'. (Global tags are defined by .hgtags across all
392 # 'local'. (Global tags are defined by .hgtags across all
393 # heads, and local tags are defined in .hg/localtags.)
393 # heads, and local tags are defined in .hg/localtags.)
394 # They constitute the in-memory cache of tags.
394 # They constitute the in-memory cache of tags.
395 self.tags = self.tagtypes = None
395 self.tags = self.tagtypes = None
396
396
397 self.nodetagscache = self.tagslist = None
397 self.nodetagscache = self.tagslist = None
398
398
399 cache = tagscache()
399 cache = tagscache()
400 cache.tags, cache.tagtypes = self._findtags()
400 cache.tags, cache.tagtypes = self._findtags()
401
401
402 return cache
402 return cache
403
403
404 def tags(self):
404 def tags(self):
405 '''return a mapping of tag to node'''
405 '''return a mapping of tag to node'''
406 t = {}
406 t = {}
407 for k, v in self._tagscache.tags.iteritems():
407 for k, v in self._tagscache.tags.iteritems():
408 try:
408 try:
409 # ignore tags to unknown nodes
409 # ignore tags to unknown nodes
410 self.changelog.rev(v)
410 self.changelog.rev(v)
411 t[k] = v
411 t[k] = v
412 except (error.LookupError, ValueError):
412 except (error.LookupError, ValueError):
413 pass
413 pass
414 return t
414 return t
415
415
416 def _findtags(self):
416 def _findtags(self):
417 '''Do the hard work of finding tags. Return a pair of dicts
417 '''Do the hard work of finding tags. Return a pair of dicts
418 (tags, tagtypes) where tags maps tag name to node, and tagtypes
418 (tags, tagtypes) where tags maps tag name to node, and tagtypes
419 maps tag name to a string like \'global\' or \'local\'.
419 maps tag name to a string like \'global\' or \'local\'.
420 Subclasses or extensions are free to add their own tags, but
420 Subclasses or extensions are free to add their own tags, but
421 should be aware that the returned dicts will be retained for the
421 should be aware that the returned dicts will be retained for the
422 duration of the localrepo object.'''
422 duration of the localrepo object.'''
423
423
424 # XXX what tagtype should subclasses/extensions use? Currently
424 # XXX what tagtype should subclasses/extensions use? Currently
425 # mq and bookmarks add tags, but do not set the tagtype at all.
425 # mq and bookmarks add tags, but do not set the tagtype at all.
426 # Should each extension invent its own tag type? Should there
426 # Should each extension invent its own tag type? Should there
427 # be one tagtype for all such "virtual" tags? Or is the status
427 # be one tagtype for all such "virtual" tags? Or is the status
428 # quo fine?
428 # quo fine?
429
429
430 alltags = {} # map tag name to (node, hist)
430 alltags = {} # map tag name to (node, hist)
431 tagtypes = {}
431 tagtypes = {}
432
432
433 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
433 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
434 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
434 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
435
435
436 # Build the return dicts. Have to re-encode tag names because
436 # Build the return dicts. Have to re-encode tag names because
437 # the tags module always uses UTF-8 (in order not to lose info
437 # the tags module always uses UTF-8 (in order not to lose info
438 # writing to the cache), but the rest of Mercurial wants them in
438 # writing to the cache), but the rest of Mercurial wants them in
439 # local encoding.
439 # local encoding.
440 tags = {}
440 tags = {}
441 for (name, (node, hist)) in alltags.iteritems():
441 for (name, (node, hist)) in alltags.iteritems():
442 if node != nullid:
442 if node != nullid:
443 tags[encoding.tolocal(name)] = node
443 tags[encoding.tolocal(name)] = node
444 tags['tip'] = self.changelog.tip()
444 tags['tip'] = self.changelog.tip()
445 tagtypes = dict([(encoding.tolocal(name), value)
445 tagtypes = dict([(encoding.tolocal(name), value)
446 for (name, value) in tagtypes.iteritems()])
446 for (name, value) in tagtypes.iteritems()])
447 return (tags, tagtypes)
447 return (tags, tagtypes)
448
448
449 def tagtype(self, tagname):
449 def tagtype(self, tagname):
450 '''
450 '''
451 return the type of the given tag. result can be:
451 return the type of the given tag. result can be:
452
452
453 'local' : a local tag
453 'local' : a local tag
454 'global' : a global tag
454 'global' : a global tag
455 None : tag does not exist
455 None : tag does not exist
456 '''
456 '''
457
457
458 return self._tagscache.tagtypes.get(tagname)
458 return self._tagscache.tagtypes.get(tagname)
459
459
460 def tagslist(self):
460 def tagslist(self):
461 '''return a list of tags ordered by revision'''
461 '''return a list of tags ordered by revision'''
462 if not self._tagscache.tagslist:
462 if not self._tagscache.tagslist:
463 l = []
463 l = []
464 for t, n in self.tags().iteritems():
464 for t, n in self.tags().iteritems():
465 r = self.changelog.rev(n)
465 r = self.changelog.rev(n)
466 l.append((r, t, n))
466 l.append((r, t, n))
467 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
467 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
468
468
469 return self._tagscache.tagslist
469 return self._tagscache.tagslist
470
470
471 def nodetags(self, node):
471 def nodetags(self, node):
472 '''return the tags associated with a node'''
472 '''return the tags associated with a node'''
473 if not self._tagscache.nodetagscache:
473 if not self._tagscache.nodetagscache:
474 nodetagscache = {}
474 nodetagscache = {}
475 for t, n in self._tagscache.tags.iteritems():
475 for t, n in self._tagscache.tags.iteritems():
476 nodetagscache.setdefault(n, []).append(t)
476 nodetagscache.setdefault(n, []).append(t)
477 for tags in nodetagscache.itervalues():
477 for tags in nodetagscache.itervalues():
478 tags.sort()
478 tags.sort()
479 self._tagscache.nodetagscache = nodetagscache
479 self._tagscache.nodetagscache = nodetagscache
480 return self._tagscache.nodetagscache.get(node, [])
480 return self._tagscache.nodetagscache.get(node, [])
481
481
482 def nodebookmarks(self, node):
482 def nodebookmarks(self, node):
483 marks = []
483 marks = []
484 for bookmark, n in self._bookmarks.iteritems():
484 for bookmark, n in self._bookmarks.iteritems():
485 if n == node:
485 if n == node:
486 marks.append(bookmark)
486 marks.append(bookmark)
487 return sorted(marks)
487 return sorted(marks)
488
488
489 def _branchtags(self, partial, lrev):
489 def _branchtags(self, partial, lrev):
490 # TODO: rename this function?
490 # TODO: rename this function?
491 tiprev = len(self) - 1
491 tiprev = len(self) - 1
492 if lrev != tiprev:
492 if lrev != tiprev:
493 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
493 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
494 self._updatebranchcache(partial, ctxgen)
494 self._updatebranchcache(partial, ctxgen)
495 self._writebranchcache(partial, self.changelog.tip(), tiprev)
495 self._writebranchcache(partial, self.changelog.tip(), tiprev)
496
496
497 return partial
497 return partial
498
498
499 def updatebranchcache(self):
499 def updatebranchcache(self):
500 tip = self.changelog.tip()
500 tip = self.changelog.tip()
501 if self._branchcache is not None and self._branchcachetip == tip:
501 if self._branchcache is not None and self._branchcachetip == tip:
502 return
502 return
503
503
504 oldtip = self._branchcachetip
504 oldtip = self._branchcachetip
505 self._branchcachetip = tip
505 self._branchcachetip = tip
506 if oldtip is None or oldtip not in self.changelog.nodemap:
506 if oldtip is None or oldtip not in self.changelog.nodemap:
507 partial, last, lrev = self._readbranchcache()
507 partial, last, lrev = self._readbranchcache()
508 else:
508 else:
509 lrev = self.changelog.rev(oldtip)
509 lrev = self.changelog.rev(oldtip)
510 partial = self._branchcache
510 partial = self._branchcache
511
511
512 self._branchtags(partial, lrev)
512 self._branchtags(partial, lrev)
513 # this private cache holds all heads (not just the branch tips)
513 # this private cache holds all heads (not just the branch tips)
514 self._branchcache = partial
514 self._branchcache = partial
515
515
516 def branchmap(self):
516 def branchmap(self):
517 '''returns a dictionary {branch: [branchheads]}'''
517 '''returns a dictionary {branch: [branchheads]}'''
518 self.updatebranchcache()
518 self.updatebranchcache()
519 return self._branchcache
519 return self._branchcache
520
520
521 def _branchtip(self, heads):
521 def _branchtip(self, heads):
522 '''return the tipmost branch head in heads'''
522 '''return the tipmost branch head in heads'''
523 tip = heads[-1]
523 tip = heads[-1]
524 for h in reversed(heads):
524 for h in reversed(heads):
525 if not self[h].closesbranch():
525 if not self[h].closesbranch():
526 tip = h
526 tip = h
527 break
527 break
528 return tip
528 return tip
529
529
530 def branchtip(self, branch):
530 def branchtip(self, branch):
531 '''return the tip node for a given branch'''
531 '''return the tip node for a given branch'''
532 if branch not in self.branchmap():
532 if branch not in self.branchmap():
533 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
533 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
534 return self._branchtip(self.branchmap()[branch])
534 return self._branchtip(self.branchmap()[branch])
535
535
536 def branchtags(self):
536 def branchtags(self):
537 '''return a dict where branch names map to the tipmost head of
537 '''return a dict where branch names map to the tipmost head of
538 the branch, open heads come before closed'''
538 the branch, open heads come before closed'''
539 bt = {}
539 bt = {}
540 for bn, heads in self.branchmap().iteritems():
540 for bn, heads in self.branchmap().iteritems():
541 bt[bn] = self._branchtip(heads)
541 bt[bn] = self._branchtip(heads)
542 return bt
542 return bt
543
543
544 def _readbranchcache(self):
544 def _readbranchcache(self):
545 partial = {}
545 partial = {}
546 try:
546 try:
547 f = self.opener("cache/branchheads")
547 f = self.opener("cache/branchheads")
548 lines = f.read().split('\n')
548 lines = f.read().split('\n')
549 f.close()
549 f.close()
550 except (IOError, OSError):
550 except (IOError, OSError):
551 return {}, nullid, nullrev
551 return {}, nullid, nullrev
552
552
553 try:
553 try:
554 last, lrev = lines.pop(0).split(" ", 1)
554 last, lrev = lines.pop(0).split(" ", 1)
555 last, lrev = bin(last), int(lrev)
555 last, lrev = bin(last), int(lrev)
556 if lrev >= len(self) or self[lrev].node() != last:
556 if lrev >= len(self) or self[lrev].node() != last:
557 # invalidate the cache
557 # invalidate the cache
558 raise ValueError('invalidating branch cache (tip differs)')
558 raise ValueError('invalidating branch cache (tip differs)')
559 for l in lines:
559 for l in lines:
560 if not l:
560 if not l:
561 continue
561 continue
562 node, label = l.split(" ", 1)
562 node, label = l.split(" ", 1)
563 label = encoding.tolocal(label.strip())
563 label = encoding.tolocal(label.strip())
564 if not node in self:
564 if not node in self:
565 raise ValueError('invalidating branch cache because node '+
565 raise ValueError('invalidating branch cache because node '+
566 '%s does not exist' % node)
566 '%s does not exist' % node)
567 partial.setdefault(label, []).append(bin(node))
567 partial.setdefault(label, []).append(bin(node))
568 except KeyboardInterrupt:
568 except KeyboardInterrupt:
569 raise
569 raise
570 except Exception, inst:
570 except Exception, inst:
571 if self.ui.debugflag:
571 if self.ui.debugflag:
572 self.ui.warn(str(inst), '\n')
572 self.ui.warn(str(inst), '\n')
573 partial, last, lrev = {}, nullid, nullrev
573 partial, last, lrev = {}, nullid, nullrev
574 return partial, last, lrev
574 return partial, last, lrev
575
575
576 def _writebranchcache(self, branches, tip, tiprev):
576 def _writebranchcache(self, branches, tip, tiprev):
577 try:
577 try:
578 f = self.opener("cache/branchheads", "w", atomictemp=True)
578 f = self.opener("cache/branchheads", "w", atomictemp=True)
579 f.write("%s %s\n" % (hex(tip), tiprev))
579 f.write("%s %s\n" % (hex(tip), tiprev))
580 for label, nodes in branches.iteritems():
580 for label, nodes in branches.iteritems():
581 for node in nodes:
581 for node in nodes:
582 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
582 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
583 f.close()
583 f.close()
584 except (IOError, OSError):
584 except (IOError, OSError):
585 pass
585 pass
586
586
587 def _updatebranchcache(self, partial, ctxgen):
587 def _updatebranchcache(self, partial, ctxgen):
588 """Given a branchhead cache, partial, that may have extra nodes or be
588 """Given a branchhead cache, partial, that may have extra nodes or be
589 missing heads, and a generator of nodes that are at least a superset of
589 missing heads, and a generator of nodes that are at least a superset of
590 heads missing, this function updates partial to be correct.
590 heads missing, this function updates partial to be correct.
591 """
591 """
592 # collect new branch entries
592 # collect new branch entries
593 newbranches = {}
593 newbranches = {}
594 for c in ctxgen:
594 for c in ctxgen:
595 newbranches.setdefault(c.branch(), []).append(c.node())
595 newbranches.setdefault(c.branch(), []).append(c.node())
596 # if older branchheads are reachable from new ones, they aren't
596 # if older branchheads are reachable from new ones, they aren't
597 # really branchheads. Note checking parents is insufficient:
597 # really branchheads. Note checking parents is insufficient:
598 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
598 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
599 for branch, newnodes in newbranches.iteritems():
599 for branch, newnodes in newbranches.iteritems():
600 bheads = partial.setdefault(branch, [])
600 bheads = partial.setdefault(branch, [])
601 # Remove candidate heads that no longer are in the repo (e.g., as
601 # Remove candidate heads that no longer are in the repo (e.g., as
602 # the result of a strip that just happened). Avoid using 'node in
602 # the result of a strip that just happened). Avoid using 'node in
603 # self' here because that dives down into branchcache code somewhat
603 # self' here because that dives down into branchcache code somewhat
604 # recrusively.
604 # recrusively.
605 bheadrevs = [self.changelog.rev(node) for node in bheads
605 bheadrevs = [self.changelog.rev(node) for node in bheads
606 if self.changelog.hasnode(node)]
606 if self.changelog.hasnode(node)]
607 newheadrevs = [self.changelog.rev(node) for node in newnodes
607 newheadrevs = [self.changelog.rev(node) for node in newnodes
608 if self.changelog.hasnode(node)]
608 if self.changelog.hasnode(node)]
609 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
609 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
610 # Remove duplicates - nodes that are in newheadrevs and are already
610 # Remove duplicates - nodes that are in newheadrevs and are already
611 # in bheadrevs. This can happen if you strip a node whose parent
611 # in bheadrevs. This can happen if you strip a node whose parent
612 # was already a head (because they're on different branches).
612 # was already a head (because they're on different branches).
613 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
613 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
614
614
615 # Starting from tip means fewer passes over reachable. If we know
615 # Starting from tip means fewer passes over reachable. If we know
616 # the new candidates are not ancestors of existing heads, we don't
616 # the new candidates are not ancestors of existing heads, we don't
617 # have to examine ancestors of existing heads
617 # have to examine ancestors of existing heads
618 if ctxisnew:
618 if ctxisnew:
619 iterrevs = sorted(newheadrevs)
619 iterrevs = sorted(newheadrevs)
620 else:
620 else:
621 iterrevs = list(bheadrevs)
621 iterrevs = list(bheadrevs)
622
622
623 # This loop prunes out two kinds of heads - heads that are
623 # This loop prunes out two kinds of heads - heads that are
624 # superceded by a head in newheadrevs, and newheadrevs that are not
624 # superceded by a head in newheadrevs, and newheadrevs that are not
625 # heads because an existing head is their descendant.
625 # heads because an existing head is their descendant.
626 while iterrevs:
626 while iterrevs:
627 latest = iterrevs.pop()
627 latest = iterrevs.pop()
628 if latest not in bheadrevs:
628 if latest not in bheadrevs:
629 continue
629 continue
630 ancestors = set(self.changelog.ancestors([latest],
630 ancestors = set(self.changelog.ancestors([latest],
631 bheadrevs[0]))
631 bheadrevs[0]))
632 if ancestors:
632 if ancestors:
633 bheadrevs = [b for b in bheadrevs if b not in ancestors]
633 bheadrevs = [b for b in bheadrevs if b not in ancestors]
634 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
634 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
635
635
636 # There may be branches that cease to exist when the last commit in the
636 # There may be branches that cease to exist when the last commit in the
637 # branch was stripped. This code filters them out. Note that the
637 # branch was stripped. This code filters them out. Note that the
638 # branch that ceased to exist may not be in newbranches because
638 # branch that ceased to exist may not be in newbranches because
639 # newbranches is the set of candidate heads, which when you strip the
639 # newbranches is the set of candidate heads, which when you strip the
640 # last commit in a branch will be the parent branch.
640 # last commit in a branch will be the parent branch.
641 for branch in partial:
641 for branch in partial:
642 nodes = [head for head in partial[branch]
642 nodes = [head for head in partial[branch]
643 if self.changelog.hasnode(head)]
643 if self.changelog.hasnode(head)]
644 if not nodes:
644 if not nodes:
645 del partial[branch]
645 del partial[branch]
646
646
647 def lookup(self, key):
647 def lookup(self, key):
648 return self[key].node()
648 return self[key].node()
649
649
650 def lookupbranch(self, key, remote=None):
650 def lookupbranch(self, key, remote=None):
651 repo = remote or self
651 repo = remote or self
652 if key in repo.branchmap():
652 if key in repo.branchmap():
653 return key
653 return key
654
654
655 repo = (remote and remote.local()) and remote or self
655 repo = (remote and remote.local()) and remote or self
656 return repo[key].branch()
656 return repo[key].branch()
657
657
658 def known(self, nodes):
658 def known(self, nodes):
659 nm = self.changelog.nodemap
659 nm = self.changelog.nodemap
660 pc = self._phasecache
660 pc = self._phasecache
661 result = []
661 result = []
662 for n in nodes:
662 for n in nodes:
663 r = nm.get(n)
663 r = nm.get(n)
664 resp = not (r is None or pc.phase(self, r) >= phases.secret)
664 resp = not (r is None or pc.phase(self, r) >= phases.secret)
665 result.append(resp)
665 result.append(resp)
666 return result
666 return result
667
667
668 def local(self):
668 def local(self):
669 return self
669 return self
670
670
671 def join(self, f):
671 def join(self, f):
672 return os.path.join(self.path, f)
672 return os.path.join(self.path, f)
673
673
674 def wjoin(self, f):
674 def wjoin(self, f):
675 return os.path.join(self.root, f)
675 return os.path.join(self.root, f)
676
676
677 def file(self, f):
677 def file(self, f):
678 if f[0] == '/':
678 if f[0] == '/':
679 f = f[1:]
679 f = f[1:]
680 return filelog.filelog(self.sopener, f)
680 return filelog.filelog(self.sopener, f)
681
681
682 def changectx(self, changeid):
682 def changectx(self, changeid):
683 return self[changeid]
683 return self[changeid]
684
684
685 def parents(self, changeid=None):
685 def parents(self, changeid=None):
686 '''get list of changectxs for parents of changeid'''
686 '''get list of changectxs for parents of changeid'''
687 return self[changeid].parents()
687 return self[changeid].parents()
688
688
689 def setparents(self, p1, p2=nullid):
689 def setparents(self, p1, p2=nullid):
690 copies = self.dirstate.setparents(p1, p2)
690 copies = self.dirstate.setparents(p1, p2)
691 if copies:
691 if copies:
692 # Adjust copy records, the dirstate cannot do it, it
692 # Adjust copy records, the dirstate cannot do it, it
693 # requires access to parents manifests. Preserve them
693 # requires access to parents manifests. Preserve them
694 # only for entries added to first parent.
694 # only for entries added to first parent.
695 pctx = self[p1]
695 pctx = self[p1]
696 for f in copies:
696 for f in copies:
697 if f not in pctx and copies[f] in pctx:
697 if f not in pctx and copies[f] in pctx:
698 self.dirstate.copy(copies[f], f)
698 self.dirstate.copy(copies[f], f)
699
699
700 def filectx(self, path, changeid=None, fileid=None):
700 def filectx(self, path, changeid=None, fileid=None):
701 """changeid can be a changeset revision, node, or tag.
701 """changeid can be a changeset revision, node, or tag.
702 fileid can be a file revision or node."""
702 fileid can be a file revision or node."""
703 return context.filectx(self, path, changeid, fileid)
703 return context.filectx(self, path, changeid, fileid)
704
704
705 def getcwd(self):
705 def getcwd(self):
706 return self.dirstate.getcwd()
706 return self.dirstate.getcwd()
707
707
708 def pathto(self, f, cwd=None):
708 def pathto(self, f, cwd=None):
709 return self.dirstate.pathto(f, cwd)
709 return self.dirstate.pathto(f, cwd)
710
710
711 def wfile(self, f, mode='r'):
711 def wfile(self, f, mode='r'):
712 return self.wopener(f, mode)
712 return self.wopener(f, mode)
713
713
714 def _link(self, f):
714 def _link(self, f):
715 return os.path.islink(self.wjoin(f))
715 return os.path.islink(self.wjoin(f))
716
716
717 def _loadfilter(self, filter):
717 def _loadfilter(self, filter):
718 if filter not in self.filterpats:
718 if filter not in self.filterpats:
719 l = []
719 l = []
720 for pat, cmd in self.ui.configitems(filter):
720 for pat, cmd in self.ui.configitems(filter):
721 if cmd == '!':
721 if cmd == '!':
722 continue
722 continue
723 mf = matchmod.match(self.root, '', [pat])
723 mf = matchmod.match(self.root, '', [pat])
724 fn = None
724 fn = None
725 params = cmd
725 params = cmd
726 for name, filterfn in self._datafilters.iteritems():
726 for name, filterfn in self._datafilters.iteritems():
727 if cmd.startswith(name):
727 if cmd.startswith(name):
728 fn = filterfn
728 fn = filterfn
729 params = cmd[len(name):].lstrip()
729 params = cmd[len(name):].lstrip()
730 break
730 break
731 if not fn:
731 if not fn:
732 fn = lambda s, c, **kwargs: util.filter(s, c)
732 fn = lambda s, c, **kwargs: util.filter(s, c)
733 # Wrap old filters not supporting keyword arguments
733 # Wrap old filters not supporting keyword arguments
734 if not inspect.getargspec(fn)[2]:
734 if not inspect.getargspec(fn)[2]:
735 oldfn = fn
735 oldfn = fn
736 fn = lambda s, c, **kwargs: oldfn(s, c)
736 fn = lambda s, c, **kwargs: oldfn(s, c)
737 l.append((mf, fn, params))
737 l.append((mf, fn, params))
738 self.filterpats[filter] = l
738 self.filterpats[filter] = l
739 return self.filterpats[filter]
739 return self.filterpats[filter]
740
740
741 def _filter(self, filterpats, filename, data):
741 def _filter(self, filterpats, filename, data):
742 for mf, fn, cmd in filterpats:
742 for mf, fn, cmd in filterpats:
743 if mf(filename):
743 if mf(filename):
744 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
744 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
745 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
745 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
746 break
746 break
747
747
748 return data
748 return data
749
749
750 @propertycache
750 @propertycache
751 def _encodefilterpats(self):
751 def _encodefilterpats(self):
752 return self._loadfilter('encode')
752 return self._loadfilter('encode')
753
753
754 @propertycache
754 @propertycache
755 def _decodefilterpats(self):
755 def _decodefilterpats(self):
756 return self._loadfilter('decode')
756 return self._loadfilter('decode')
757
757
758 def adddatafilter(self, name, filter):
758 def adddatafilter(self, name, filter):
759 self._datafilters[name] = filter
759 self._datafilters[name] = filter
760
760
761 def wread(self, filename):
761 def wread(self, filename):
762 if self._link(filename):
762 if self._link(filename):
763 data = os.readlink(self.wjoin(filename))
763 data = os.readlink(self.wjoin(filename))
764 else:
764 else:
765 data = self.wopener.read(filename)
765 data = self.wopener.read(filename)
766 return self._filter(self._encodefilterpats, filename, data)
766 return self._filter(self._encodefilterpats, filename, data)
767
767
768 def wwrite(self, filename, data, flags):
768 def wwrite(self, filename, data, flags):
769 data = self._filter(self._decodefilterpats, filename, data)
769 data = self._filter(self._decodefilterpats, filename, data)
770 if 'l' in flags:
770 if 'l' in flags:
771 self.wopener.symlink(data, filename)
771 self.wopener.symlink(data, filename)
772 else:
772 else:
773 self.wopener.write(filename, data)
773 self.wopener.write(filename, data)
774 if 'x' in flags:
774 if 'x' in flags:
775 util.setflags(self.wjoin(filename), False, True)
775 util.setflags(self.wjoin(filename), False, True)
776
776
777 def wwritedata(self, filename, data):
777 def wwritedata(self, filename, data):
778 return self._filter(self._decodefilterpats, filename, data)
778 return self._filter(self._decodefilterpats, filename, data)
779
779
780 def transaction(self, desc):
780 def transaction(self, desc):
781 tr = self._transref and self._transref() or None
781 tr = self._transref and self._transref() or None
782 if tr and tr.running():
782 if tr and tr.running():
783 return tr.nest()
783 return tr.nest()
784
784
785 # abort here if the journal already exists
785 # abort here if the journal already exists
786 if os.path.exists(self.sjoin("journal")):
786 if os.path.exists(self.sjoin("journal")):
787 raise error.RepoError(
787 raise error.RepoError(
788 _("abandoned transaction found - run hg recover"))
788 _("abandoned transaction found - run hg recover"))
789
789
790 self._writejournal(desc)
790 self._writejournal(desc)
791 renames = [(x, undoname(x)) for x in self._journalfiles()]
791 renames = [(x, undoname(x)) for x in self._journalfiles()]
792
792
793 tr = transaction.transaction(self.ui.warn, self.sopener,
793 tr = transaction.transaction(self.ui.warn, self.sopener,
794 self.sjoin("journal"),
794 self.sjoin("journal"),
795 aftertrans(renames),
795 aftertrans(renames),
796 self.store.createmode)
796 self.store.createmode)
797 self._transref = weakref.ref(tr)
797 self._transref = weakref.ref(tr)
798 return tr
798 return tr
799
799
800 def _journalfiles(self):
800 def _journalfiles(self):
801 return (self.sjoin('journal'), self.join('journal.dirstate'),
801 return (self.sjoin('journal'), self.join('journal.dirstate'),
802 self.join('journal.branch'), self.join('journal.desc'),
802 self.join('journal.branch'), self.join('journal.desc'),
803 self.join('journal.bookmarks'),
803 self.join('journal.bookmarks'),
804 self.sjoin('journal.phaseroots'))
804 self.sjoin('journal.phaseroots'))
805
805
806 def undofiles(self):
806 def undofiles(self):
807 return [undoname(x) for x in self._journalfiles()]
807 return [undoname(x) for x in self._journalfiles()]
808
808
809 def _writejournal(self, desc):
809 def _writejournal(self, desc):
810 self.opener.write("journal.dirstate",
810 self.opener.write("journal.dirstate",
811 self.opener.tryread("dirstate"))
811 self.opener.tryread("dirstate"))
812 self.opener.write("journal.branch",
812 self.opener.write("journal.branch",
813 encoding.fromlocal(self.dirstate.branch()))
813 encoding.fromlocal(self.dirstate.branch()))
814 self.opener.write("journal.desc",
814 self.opener.write("journal.desc",
815 "%d\n%s\n" % (len(self), desc))
815 "%d\n%s\n" % (len(self), desc))
816 self.opener.write("journal.bookmarks",
816 self.opener.write("journal.bookmarks",
817 self.opener.tryread("bookmarks"))
817 self.opener.tryread("bookmarks"))
818 self.sopener.write("journal.phaseroots",
818 self.sopener.write("journal.phaseroots",
819 self.sopener.tryread("phaseroots"))
819 self.sopener.tryread("phaseroots"))
820
820
821 def recover(self):
821 def recover(self):
822 lock = self.lock()
822 lock = self.lock()
823 try:
823 try:
824 if os.path.exists(self.sjoin("journal")):
824 if os.path.exists(self.sjoin("journal")):
825 self.ui.status(_("rolling back interrupted transaction\n"))
825 self.ui.status(_("rolling back interrupted transaction\n"))
826 transaction.rollback(self.sopener, self.sjoin("journal"),
826 transaction.rollback(self.sopener, self.sjoin("journal"),
827 self.ui.warn)
827 self.ui.warn)
828 self.invalidate()
828 self.invalidate()
829 return True
829 return True
830 else:
830 else:
831 self.ui.warn(_("no interrupted transaction available\n"))
831 self.ui.warn(_("no interrupted transaction available\n"))
832 return False
832 return False
833 finally:
833 finally:
834 lock.release()
834 lock.release()
835
835
836 def rollback(self, dryrun=False, force=False):
836 def rollback(self, dryrun=False, force=False):
837 wlock = lock = None
837 wlock = lock = None
838 try:
838 try:
839 wlock = self.wlock()
839 wlock = self.wlock()
840 lock = self.lock()
840 lock = self.lock()
841 if os.path.exists(self.sjoin("undo")):
841 if os.path.exists(self.sjoin("undo")):
842 return self._rollback(dryrun, force)
842 return self._rollback(dryrun, force)
843 else:
843 else:
844 self.ui.warn(_("no rollback information available\n"))
844 self.ui.warn(_("no rollback information available\n"))
845 return 1
845 return 1
846 finally:
846 finally:
847 release(lock, wlock)
847 release(lock, wlock)
848
848
849 def _rollback(self, dryrun, force):
849 def _rollback(self, dryrun, force):
850 ui = self.ui
850 ui = self.ui
851 try:
851 try:
852 args = self.opener.read('undo.desc').splitlines()
852 args = self.opener.read('undo.desc').splitlines()
853 (oldlen, desc, detail) = (int(args[0]), args[1], None)
853 (oldlen, desc, detail) = (int(args[0]), args[1], None)
854 if len(args) >= 3:
854 if len(args) >= 3:
855 detail = args[2]
855 detail = args[2]
856 oldtip = oldlen - 1
856 oldtip = oldlen - 1
857
857
858 if detail and ui.verbose:
858 if detail and ui.verbose:
859 msg = (_('repository tip rolled back to revision %s'
859 msg = (_('repository tip rolled back to revision %s'
860 ' (undo %s: %s)\n')
860 ' (undo %s: %s)\n')
861 % (oldtip, desc, detail))
861 % (oldtip, desc, detail))
862 else:
862 else:
863 msg = (_('repository tip rolled back to revision %s'
863 msg = (_('repository tip rolled back to revision %s'
864 ' (undo %s)\n')
864 ' (undo %s)\n')
865 % (oldtip, desc))
865 % (oldtip, desc))
866 except IOError:
866 except IOError:
867 msg = _('rolling back unknown transaction\n')
867 msg = _('rolling back unknown transaction\n')
868 desc = None
868 desc = None
869
869
870 if not force and self['.'] != self['tip'] and desc == 'commit':
870 if not force and self['.'] != self['tip'] and desc == 'commit':
871 raise util.Abort(
871 raise util.Abort(
872 _('rollback of last commit while not checked out '
872 _('rollback of last commit while not checked out '
873 'may lose data'), hint=_('use -f to force'))
873 'may lose data'), hint=_('use -f to force'))
874
874
875 ui.status(msg)
875 ui.status(msg)
876 if dryrun:
876 if dryrun:
877 return 0
877 return 0
878
878
879 parents = self.dirstate.parents()
879 parents = self.dirstate.parents()
880 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
880 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
881 if os.path.exists(self.join('undo.bookmarks')):
881 if os.path.exists(self.join('undo.bookmarks')):
882 util.rename(self.join('undo.bookmarks'),
882 util.rename(self.join('undo.bookmarks'),
883 self.join('bookmarks'))
883 self.join('bookmarks'))
884 if os.path.exists(self.sjoin('undo.phaseroots')):
884 if os.path.exists(self.sjoin('undo.phaseroots')):
885 util.rename(self.sjoin('undo.phaseroots'),
885 util.rename(self.sjoin('undo.phaseroots'),
886 self.sjoin('phaseroots'))
886 self.sjoin('phaseroots'))
887 self.invalidate()
887 self.invalidate()
888
888
889 parentgone = (parents[0] not in self.changelog.nodemap or
889 parentgone = (parents[0] not in self.changelog.nodemap or
890 parents[1] not in self.changelog.nodemap)
890 parents[1] not in self.changelog.nodemap)
891 if parentgone:
891 if parentgone:
892 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
892 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
893 try:
893 try:
894 branch = self.opener.read('undo.branch')
894 branch = self.opener.read('undo.branch')
895 self.dirstate.setbranch(branch)
895 self.dirstate.setbranch(branch)
896 except IOError:
896 except IOError:
897 ui.warn(_('named branch could not be reset: '
897 ui.warn(_('named branch could not be reset: '
898 'current branch is still \'%s\'\n')
898 'current branch is still \'%s\'\n')
899 % self.dirstate.branch())
899 % self.dirstate.branch())
900
900
901 self.dirstate.invalidate()
901 self.dirstate.invalidate()
902 parents = tuple([p.rev() for p in self.parents()])
902 parents = tuple([p.rev() for p in self.parents()])
903 if len(parents) > 1:
903 if len(parents) > 1:
904 ui.status(_('working directory now based on '
904 ui.status(_('working directory now based on '
905 'revisions %d and %d\n') % parents)
905 'revisions %d and %d\n') % parents)
906 else:
906 else:
907 ui.status(_('working directory now based on '
907 ui.status(_('working directory now based on '
908 'revision %d\n') % parents)
908 'revision %d\n') % parents)
909 # TODO: if we know which new heads may result from this rollback, pass
909 # TODO: if we know which new heads may result from this rollback, pass
910 # them to destroy(), which will prevent the branchhead cache from being
910 # them to destroy(), which will prevent the branchhead cache from being
911 # invalidated.
911 # invalidated.
912 self.destroyed()
912 self.destroyed()
913 return 0
913 return 0
914
914
915 def invalidatecaches(self):
915 def invalidatecaches(self):
916 def delcache(name):
916 def delcache(name):
917 try:
917 try:
918 delattr(self, name)
918 delattr(self, name)
919 except AttributeError:
919 except AttributeError:
920 pass
920 pass
921
921
922 delcache('_tagscache')
922 delcache('_tagscache')
923
923
924 self._branchcache = None # in UTF-8
924 self._branchcache = None # in UTF-8
925 self._branchcachetip = None
925 self._branchcachetip = None
926
926
927 def invalidatedirstate(self):
927 def invalidatedirstate(self):
928 '''Invalidates the dirstate, causing the next call to dirstate
928 '''Invalidates the dirstate, causing the next call to dirstate
929 to check if it was modified since the last time it was read,
929 to check if it was modified since the last time it was read,
930 rereading it if it has.
930 rereading it if it has.
931
931
932 This is different to dirstate.invalidate() that it doesn't always
932 This is different to dirstate.invalidate() that it doesn't always
933 rereads the dirstate. Use dirstate.invalidate() if you want to
933 rereads the dirstate. Use dirstate.invalidate() if you want to
934 explicitly read the dirstate again (i.e. restoring it to a previous
934 explicitly read the dirstate again (i.e. restoring it to a previous
935 known good state).'''
935 known good state).'''
936 if 'dirstate' in self.__dict__:
936 if 'dirstate' in self.__dict__:
937 for k in self.dirstate._filecache:
937 for k in self.dirstate._filecache:
938 try:
938 try:
939 delattr(self.dirstate, k)
939 delattr(self.dirstate, k)
940 except AttributeError:
940 except AttributeError:
941 pass
941 pass
942 delattr(self, 'dirstate')
942 delattr(self, 'dirstate')
943
943
944 def invalidate(self):
944 def invalidate(self):
945 for k in self._filecache:
945 for k in self._filecache:
946 # dirstate is invalidated separately in invalidatedirstate()
946 # dirstate is invalidated separately in invalidatedirstate()
947 if k == 'dirstate':
947 if k == 'dirstate':
948 continue
948 continue
949
949
950 try:
950 try:
951 delattr(self, k)
951 delattr(self, k)
952 except AttributeError:
952 except AttributeError:
953 pass
953 pass
954 self.invalidatecaches()
954 self.invalidatecaches()
955
955
956 # Discard all cache entries to force reloading everything.
956 # Discard all cache entries to force reloading everything.
957 self._filecache.clear()
957 self._filecache.clear()
958
958
959 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
959 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
960 try:
960 try:
961 l = lock.lock(lockname, 0, releasefn, desc=desc)
961 l = lock.lock(lockname, 0, releasefn, desc=desc)
962 except error.LockHeld, inst:
962 except error.LockHeld, inst:
963 if not wait:
963 if not wait:
964 raise
964 raise
965 self.ui.warn(_("waiting for lock on %s held by %r\n") %
965 self.ui.warn(_("waiting for lock on %s held by %r\n") %
966 (desc, inst.locker))
966 (desc, inst.locker))
967 # default to 600 seconds timeout
967 # default to 600 seconds timeout
968 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
968 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
969 releasefn, desc=desc)
969 releasefn, desc=desc)
970 if acquirefn:
970 if acquirefn:
971 acquirefn()
971 acquirefn()
972 return l
972 return l
973
973
974 def _afterlock(self, callback):
974 def _afterlock(self, callback):
975 """add a callback to the current repository lock.
975 """add a callback to the current repository lock.
976
976
977 The callback will be executed on lock release."""
977 The callback will be executed on lock release."""
978 l = self._lockref and self._lockref()
978 l = self._lockref and self._lockref()
979 if l:
979 if l:
980 l.postrelease.append(callback)
980 l.postrelease.append(callback)
981 else:
981 else:
982 callback()
982 callback()
983
983
984 def lock(self, wait=True):
984 def lock(self, wait=True):
985 '''Lock the repository store (.hg/store) and return a weak reference
985 '''Lock the repository store (.hg/store) and return a weak reference
986 to the lock. Use this before modifying the store (e.g. committing or
986 to the lock. Use this before modifying the store (e.g. committing or
987 stripping). If you are opening a transaction, get a lock as well.)'''
987 stripping). If you are opening a transaction, get a lock as well.)'''
988 l = self._lockref and self._lockref()
988 l = self._lockref and self._lockref()
989 if l is not None and l.held:
989 if l is not None and l.held:
990 l.lock()
990 l.lock()
991 return l
991 return l
992
992
993 def unlock():
993 def unlock():
994 self.store.write()
994 self.store.write()
995 if '_phasecache' in vars(self):
995 if '_phasecache' in vars(self):
996 self._phasecache.write()
996 self._phasecache.write()
997 for k, ce in self._filecache.items():
997 for k, ce in self._filecache.items():
998 if k == 'dirstate':
998 if k == 'dirstate':
999 continue
999 continue
1000 ce.refresh()
1000 ce.refresh()
1001
1001
1002 l = self._lock(self.sjoin("lock"), wait, unlock,
1002 l = self._lock(self.sjoin("lock"), wait, unlock,
1003 self.invalidate, _('repository %s') % self.origroot)
1003 self.invalidate, _('repository %s') % self.origroot)
1004 self._lockref = weakref.ref(l)
1004 self._lockref = weakref.ref(l)
1005 return l
1005 return l
1006
1006
1007 def wlock(self, wait=True):
1007 def wlock(self, wait=True):
1008 '''Lock the non-store parts of the repository (everything under
1008 '''Lock the non-store parts of the repository (everything under
1009 .hg except .hg/store) and return a weak reference to the lock.
1009 .hg except .hg/store) and return a weak reference to the lock.
1010 Use this before modifying files in .hg.'''
1010 Use this before modifying files in .hg.'''
1011 l = self._wlockref and self._wlockref()
1011 l = self._wlockref and self._wlockref()
1012 if l is not None and l.held:
1012 if l is not None and l.held:
1013 l.lock()
1013 l.lock()
1014 return l
1014 return l
1015
1015
1016 def unlock():
1016 def unlock():
1017 self.dirstate.write()
1017 self.dirstate.write()
1018 ce = self._filecache.get('dirstate')
1018 ce = self._filecache.get('dirstate')
1019 if ce:
1019 if ce:
1020 ce.refresh()
1020 ce.refresh()
1021
1021
1022 l = self._lock(self.join("wlock"), wait, unlock,
1022 l = self._lock(self.join("wlock"), wait, unlock,
1023 self.invalidatedirstate, _('working directory of %s') %
1023 self.invalidatedirstate, _('working directory of %s') %
1024 self.origroot)
1024 self.origroot)
1025 self._wlockref = weakref.ref(l)
1025 self._wlockref = weakref.ref(l)
1026 return l
1026 return l
1027
1027
1028 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1028 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1029 """
1029 """
1030 commit an individual file as part of a larger transaction
1030 commit an individual file as part of a larger transaction
1031 """
1031 """
1032
1032
1033 fname = fctx.path()
1033 fname = fctx.path()
1034 text = fctx.data()
1034 text = fctx.data()
1035 flog = self.file(fname)
1035 flog = self.file(fname)
1036 fparent1 = manifest1.get(fname, nullid)
1036 fparent1 = manifest1.get(fname, nullid)
1037 fparent2 = fparent2o = manifest2.get(fname, nullid)
1037 fparent2 = fparent2o = manifest2.get(fname, nullid)
1038
1038
1039 meta = {}
1039 meta = {}
1040 copy = fctx.renamed()
1040 copy = fctx.renamed()
1041 if copy and copy[0] != fname:
1041 if copy and copy[0] != fname:
1042 # Mark the new revision of this file as a copy of another
1042 # Mark the new revision of this file as a copy of another
1043 # file. This copy data will effectively act as a parent
1043 # file. This copy data will effectively act as a parent
1044 # of this new revision. If this is a merge, the first
1044 # of this new revision. If this is a merge, the first
1045 # parent will be the nullid (meaning "look up the copy data")
1045 # parent will be the nullid (meaning "look up the copy data")
1046 # and the second one will be the other parent. For example:
1046 # and the second one will be the other parent. For example:
1047 #
1047 #
1048 # 0 --- 1 --- 3 rev1 changes file foo
1048 # 0 --- 1 --- 3 rev1 changes file foo
1049 # \ / rev2 renames foo to bar and changes it
1049 # \ / rev2 renames foo to bar and changes it
1050 # \- 2 -/ rev3 should have bar with all changes and
1050 # \- 2 -/ rev3 should have bar with all changes and
1051 # should record that bar descends from
1051 # should record that bar descends from
1052 # bar in rev2 and foo in rev1
1052 # bar in rev2 and foo in rev1
1053 #
1053 #
1054 # this allows this merge to succeed:
1054 # this allows this merge to succeed:
1055 #
1055 #
1056 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1056 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1057 # \ / merging rev3 and rev4 should use bar@rev2
1057 # \ / merging rev3 and rev4 should use bar@rev2
1058 # \- 2 --- 4 as the merge base
1058 # \- 2 --- 4 as the merge base
1059 #
1059 #
1060
1060
1061 cfname = copy[0]
1061 cfname = copy[0]
1062 crev = manifest1.get(cfname)
1062 crev = manifest1.get(cfname)
1063 newfparent = fparent2
1063 newfparent = fparent2
1064
1064
1065 if manifest2: # branch merge
1065 if manifest2: # branch merge
1066 if fparent2 == nullid or crev is None: # copied on remote side
1066 if fparent2 == nullid or crev is None: # copied on remote side
1067 if cfname in manifest2:
1067 if cfname in manifest2:
1068 crev = manifest2[cfname]
1068 crev = manifest2[cfname]
1069 newfparent = fparent1
1069 newfparent = fparent1
1070
1070
1071 # find source in nearest ancestor if we've lost track
1071 # find source in nearest ancestor if we've lost track
1072 if not crev:
1072 if not crev:
1073 self.ui.debug(" %s: searching for copy revision for %s\n" %
1073 self.ui.debug(" %s: searching for copy revision for %s\n" %
1074 (fname, cfname))
1074 (fname, cfname))
1075 for ancestor in self[None].ancestors():
1075 for ancestor in self[None].ancestors():
1076 if cfname in ancestor:
1076 if cfname in ancestor:
1077 crev = ancestor[cfname].filenode()
1077 crev = ancestor[cfname].filenode()
1078 break
1078 break
1079
1079
1080 if crev:
1080 if crev:
1081 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1081 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1082 meta["copy"] = cfname
1082 meta["copy"] = cfname
1083 meta["copyrev"] = hex(crev)
1083 meta["copyrev"] = hex(crev)
1084 fparent1, fparent2 = nullid, newfparent
1084 fparent1, fparent2 = nullid, newfparent
1085 else:
1085 else:
1086 self.ui.warn(_("warning: can't find ancestor for '%s' "
1086 self.ui.warn(_("warning: can't find ancestor for '%s' "
1087 "copied from '%s'!\n") % (fname, cfname))
1087 "copied from '%s'!\n") % (fname, cfname))
1088
1088
1089 elif fparent2 != nullid:
1089 elif fparent2 != nullid:
1090 # is one parent an ancestor of the other?
1090 # is one parent an ancestor of the other?
1091 fparentancestor = flog.ancestor(fparent1, fparent2)
1091 fparentancestor = flog.ancestor(fparent1, fparent2)
1092 if fparentancestor == fparent1:
1092 if fparentancestor == fparent1:
1093 fparent1, fparent2 = fparent2, nullid
1093 fparent1, fparent2 = fparent2, nullid
1094 elif fparentancestor == fparent2:
1094 elif fparentancestor == fparent2:
1095 fparent2 = nullid
1095 fparent2 = nullid
1096
1096
1097 # is the file changed?
1097 # is the file changed?
1098 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1098 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1099 changelist.append(fname)
1099 changelist.append(fname)
1100 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1100 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1101
1101
1102 # are just the flags changed during merge?
1102 # are just the flags changed during merge?
1103 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1103 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1104 changelist.append(fname)
1104 changelist.append(fname)
1105
1105
1106 return fparent1
1106 return fparent1
1107
1107
1108 def commit(self, text="", user=None, date=None, match=None, force=False,
1108 def commit(self, text="", user=None, date=None, match=None, force=False,
1109 editor=False, extra={}):
1109 editor=False, extra={}):
1110 """Add a new revision to current repository.
1110 """Add a new revision to current repository.
1111
1111
1112 Revision information is gathered from the working directory,
1112 Revision information is gathered from the working directory,
1113 match can be used to filter the committed files. If editor is
1113 match can be used to filter the committed files. If editor is
1114 supplied, it is called to get a commit message.
1114 supplied, it is called to get a commit message.
1115 """
1115 """
1116
1116
1117 def fail(f, msg):
1117 def fail(f, msg):
1118 raise util.Abort('%s: %s' % (f, msg))
1118 raise util.Abort('%s: %s' % (f, msg))
1119
1119
1120 if not match:
1120 if not match:
1121 match = matchmod.always(self.root, '')
1121 match = matchmod.always(self.root, '')
1122
1122
1123 if not force:
1123 if not force:
1124 vdirs = []
1124 vdirs = []
1125 match.dir = vdirs.append
1125 match.dir = vdirs.append
1126 match.bad = fail
1126 match.bad = fail
1127
1127
1128 wlock = self.wlock()
1128 wlock = self.wlock()
1129 try:
1129 try:
1130 wctx = self[None]
1130 wctx = self[None]
1131 merge = len(wctx.parents()) > 1
1131 merge = len(wctx.parents()) > 1
1132
1132
1133 if (not force and merge and match and
1133 if (not force and merge and match and
1134 (match.files() or match.anypats())):
1134 (match.files() or match.anypats())):
1135 raise util.Abort(_('cannot partially commit a merge '
1135 raise util.Abort(_('cannot partially commit a merge '
1136 '(do not specify files or patterns)'))
1136 '(do not specify files or patterns)'))
1137
1137
1138 changes = self.status(match=match, clean=force)
1138 changes = self.status(match=match, clean=force)
1139 if force:
1139 if force:
1140 changes[0].extend(changes[6]) # mq may commit unchanged files
1140 changes[0].extend(changes[6]) # mq may commit unchanged files
1141
1141
1142 # check subrepos
1142 # check subrepos
1143 subs = []
1143 subs = []
1144 commitsubs = set()
1144 commitsubs = set()
1145 newstate = wctx.substate.copy()
1145 newstate = wctx.substate.copy()
1146 # only manage subrepos and .hgsubstate if .hgsub is present
1146 # only manage subrepos and .hgsubstate if .hgsub is present
1147 if '.hgsub' in wctx:
1147 if '.hgsub' in wctx:
1148 # we'll decide whether to track this ourselves, thanks
1148 # we'll decide whether to track this ourselves, thanks
1149 if '.hgsubstate' in changes[0]:
1149 if '.hgsubstate' in changes[0]:
1150 changes[0].remove('.hgsubstate')
1150 changes[0].remove('.hgsubstate')
1151 if '.hgsubstate' in changes[2]:
1151 if '.hgsubstate' in changes[2]:
1152 changes[2].remove('.hgsubstate')
1152 changes[2].remove('.hgsubstate')
1153
1153
1154 # compare current state to last committed state
1154 # compare current state to last committed state
1155 # build new substate based on last committed state
1155 # build new substate based on last committed state
1156 oldstate = wctx.p1().substate
1156 oldstate = wctx.p1().substate
1157 for s in sorted(newstate.keys()):
1157 for s in sorted(newstate.keys()):
1158 if not match(s):
1158 if not match(s):
1159 # ignore working copy, use old state if present
1159 # ignore working copy, use old state if present
1160 if s in oldstate:
1160 if s in oldstate:
1161 newstate[s] = oldstate[s]
1161 newstate[s] = oldstate[s]
1162 continue
1162 continue
1163 if not force:
1163 if not force:
1164 raise util.Abort(
1164 raise util.Abort(
1165 _("commit with new subrepo %s excluded") % s)
1165 _("commit with new subrepo %s excluded") % s)
1166 if wctx.sub(s).dirty(True):
1166 if wctx.sub(s).dirty(True):
1167 if not self.ui.configbool('ui', 'commitsubrepos'):
1167 if not self.ui.configbool('ui', 'commitsubrepos'):
1168 raise util.Abort(
1168 raise util.Abort(
1169 _("uncommitted changes in subrepo %s") % s,
1169 _("uncommitted changes in subrepo %s") % s,
1170 hint=_("use --subrepos for recursive commit"))
1170 hint=_("use --subrepos for recursive commit"))
1171 subs.append(s)
1171 subs.append(s)
1172 commitsubs.add(s)
1172 commitsubs.add(s)
1173 else:
1173 else:
1174 bs = wctx.sub(s).basestate()
1174 bs = wctx.sub(s).basestate()
1175 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1175 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1176 if oldstate.get(s, (None, None, None))[1] != bs:
1176 if oldstate.get(s, (None, None, None))[1] != bs:
1177 subs.append(s)
1177 subs.append(s)
1178
1178
1179 # check for removed subrepos
1179 # check for removed subrepos
1180 for p in wctx.parents():
1180 for p in wctx.parents():
1181 r = [s for s in p.substate if s not in newstate]
1181 r = [s for s in p.substate if s not in newstate]
1182 subs += [s for s in r if match(s)]
1182 subs += [s for s in r if match(s)]
1183 if subs:
1183 if subs:
1184 if (not match('.hgsub') and
1184 if (not match('.hgsub') and
1185 '.hgsub' in (wctx.modified() + wctx.added())):
1185 '.hgsub' in (wctx.modified() + wctx.added())):
1186 raise util.Abort(
1186 raise util.Abort(
1187 _("can't commit subrepos without .hgsub"))
1187 _("can't commit subrepos without .hgsub"))
1188 changes[0].insert(0, '.hgsubstate')
1188 changes[0].insert(0, '.hgsubstate')
1189
1189
1190 elif '.hgsub' in changes[2]:
1190 elif '.hgsub' in changes[2]:
1191 # clean up .hgsubstate when .hgsub is removed
1191 # clean up .hgsubstate when .hgsub is removed
1192 if ('.hgsubstate' in wctx and
1192 if ('.hgsubstate' in wctx and
1193 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1193 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1194 changes[2].insert(0, '.hgsubstate')
1194 changes[2].insert(0, '.hgsubstate')
1195
1195
1196 # make sure all explicit patterns are matched
1196 # make sure all explicit patterns are matched
1197 if not force and match.files():
1197 if not force and match.files():
1198 matched = set(changes[0] + changes[1] + changes[2])
1198 matched = set(changes[0] + changes[1] + changes[2])
1199
1199
1200 for f in match.files():
1200 for f in match.files():
1201 if f == '.' or f in matched or f in wctx.substate:
1201 if f == '.' or f in matched or f in wctx.substate:
1202 continue
1202 continue
1203 if f in changes[3]: # missing
1203 if f in changes[3]: # missing
1204 fail(f, _('file not found!'))
1204 fail(f, _('file not found!'))
1205 if f in vdirs: # visited directory
1205 if f in vdirs: # visited directory
1206 d = f + '/'
1206 d = f + '/'
1207 for mf in matched:
1207 for mf in matched:
1208 if mf.startswith(d):
1208 if mf.startswith(d):
1209 break
1209 break
1210 else:
1210 else:
1211 fail(f, _("no match under directory!"))
1211 fail(f, _("no match under directory!"))
1212 elif f not in self.dirstate:
1212 elif f not in self.dirstate:
1213 fail(f, _("file not tracked!"))
1213 fail(f, _("file not tracked!"))
1214
1214
1215 if (not force and not extra.get("close") and not merge
1215 if (not force and not extra.get("close") and not merge
1216 and not (changes[0] or changes[1] or changes[2])
1216 and not (changes[0] or changes[1] or changes[2])
1217 and wctx.branch() == wctx.p1().branch()):
1217 and wctx.branch() == wctx.p1().branch()):
1218 return None
1218 return None
1219
1219
1220 if merge and changes[3]:
1220 if merge and changes[3]:
1221 raise util.Abort(_("cannot commit merge with missing files"))
1221 raise util.Abort(_("cannot commit merge with missing files"))
1222
1222
1223 ms = mergemod.mergestate(self)
1223 ms = mergemod.mergestate(self)
1224 for f in changes[0]:
1224 for f in changes[0]:
1225 if f in ms and ms[f] == 'u':
1225 if f in ms and ms[f] == 'u':
1226 raise util.Abort(_("unresolved merge conflicts "
1226 raise util.Abort(_("unresolved merge conflicts "
1227 "(see hg help resolve)"))
1227 "(see hg help resolve)"))
1228
1228
1229 cctx = context.workingctx(self, text, user, date, extra, changes)
1229 cctx = context.workingctx(self, text, user, date, extra, changes)
1230 if editor:
1230 if editor:
1231 cctx._text = editor(self, cctx, subs)
1231 cctx._text = editor(self, cctx, subs)
1232 edited = (text != cctx._text)
1232 edited = (text != cctx._text)
1233
1233
1234 # commit subs and write new state
1234 # commit subs and write new state
1235 if subs:
1235 if subs:
1236 for s in sorted(commitsubs):
1236 for s in sorted(commitsubs):
1237 sub = wctx.sub(s)
1237 sub = wctx.sub(s)
1238 self.ui.status(_('committing subrepository %s\n') %
1238 self.ui.status(_('committing subrepository %s\n') %
1239 subrepo.subrelpath(sub))
1239 subrepo.subrelpath(sub))
1240 sr = sub.commit(cctx._text, user, date)
1240 sr = sub.commit(cctx._text, user, date)
1241 newstate[s] = (newstate[s][0], sr)
1241 newstate[s] = (newstate[s][0], sr)
1242 subrepo.writestate(self, newstate)
1242 subrepo.writestate(self, newstate)
1243
1243
1244 # Save commit message in case this transaction gets rolled back
1244 # Save commit message in case this transaction gets rolled back
1245 # (e.g. by a pretxncommit hook). Leave the content alone on
1245 # (e.g. by a pretxncommit hook). Leave the content alone on
1246 # the assumption that the user will use the same editor again.
1246 # the assumption that the user will use the same editor again.
1247 msgfn = self.savecommitmessage(cctx._text)
1247 msgfn = self.savecommitmessage(cctx._text)
1248
1248
1249 p1, p2 = self.dirstate.parents()
1249 p1, p2 = self.dirstate.parents()
1250 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1250 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1251 try:
1251 try:
1252 self.hook("precommit", throw=True, parent1=hookp1,
1252 self.hook("precommit", throw=True, parent1=hookp1,
1253 parent2=hookp2)
1253 parent2=hookp2)
1254 ret = self.commitctx(cctx, True)
1254 ret = self.commitctx(cctx, True)
1255 except: # re-raises
1255 except: # re-raises
1256 if edited:
1256 if edited:
1257 self.ui.write(
1257 self.ui.write(
1258 _('note: commit message saved in %s\n') % msgfn)
1258 _('note: commit message saved in %s\n') % msgfn)
1259 raise
1259 raise
1260
1260
1261 # update bookmarks, dirstate and mergestate
1261 # update bookmarks, dirstate and mergestate
1262 bookmarks.update(self, [p1, p2], ret)
1262 bookmarks.update(self, [p1, p2], ret)
1263 for f in changes[0] + changes[1]:
1263 for f in changes[0] + changes[1]:
1264 self.dirstate.normal(f)
1264 self.dirstate.normal(f)
1265 for f in changes[2]:
1265 for f in changes[2]:
1266 self.dirstate.drop(f)
1266 self.dirstate.drop(f)
1267 self.dirstate.setparents(ret)
1267 self.dirstate.setparents(ret)
1268 ms.reset()
1268 ms.reset()
1269 finally:
1269 finally:
1270 wlock.release()
1270 wlock.release()
1271
1271
1272 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1272 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1273 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1273 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1274 self._afterlock(commithook)
1274 self._afterlock(commithook)
1275 return ret
1275 return ret
1276
1276
1277 def commitctx(self, ctx, error=False):
1277 def commitctx(self, ctx, error=False):
1278 """Add a new revision to current repository.
1278 """Add a new revision to current repository.
1279 Revision information is passed via the context argument.
1279 Revision information is passed via the context argument.
1280 """
1280 """
1281
1281
1282 tr = lock = None
1282 tr = lock = None
1283 removed = list(ctx.removed())
1283 removed = list(ctx.removed())
1284 p1, p2 = ctx.p1(), ctx.p2()
1284 p1, p2 = ctx.p1(), ctx.p2()
1285 user = ctx.user()
1285 user = ctx.user()
1286
1286
1287 lock = self.lock()
1287 lock = self.lock()
1288 try:
1288 try:
1289 tr = self.transaction("commit")
1289 tr = self.transaction("commit")
1290 trp = weakref.proxy(tr)
1290 trp = weakref.proxy(tr)
1291
1291
1292 if ctx.files():
1292 if ctx.files():
1293 m1 = p1.manifest().copy()
1293 m1 = p1.manifest().copy()
1294 m2 = p2.manifest()
1294 m2 = p2.manifest()
1295
1295
1296 # check in files
1296 # check in files
1297 new = {}
1297 new = {}
1298 changed = []
1298 changed = []
1299 linkrev = len(self)
1299 linkrev = len(self)
1300 for f in sorted(ctx.modified() + ctx.added()):
1300 for f in sorted(ctx.modified() + ctx.added()):
1301 self.ui.note(f + "\n")
1301 self.ui.note(f + "\n")
1302 try:
1302 try:
1303 fctx = ctx[f]
1303 fctx = ctx[f]
1304 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1304 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1305 changed)
1305 changed)
1306 m1.set(f, fctx.flags())
1306 m1.set(f, fctx.flags())
1307 except OSError, inst:
1307 except OSError, inst:
1308 self.ui.warn(_("trouble committing %s!\n") % f)
1308 self.ui.warn(_("trouble committing %s!\n") % f)
1309 raise
1309 raise
1310 except IOError, inst:
1310 except IOError, inst:
1311 errcode = getattr(inst, 'errno', errno.ENOENT)
1311 errcode = getattr(inst, 'errno', errno.ENOENT)
1312 if error or errcode and errcode != errno.ENOENT:
1312 if error or errcode and errcode != errno.ENOENT:
1313 self.ui.warn(_("trouble committing %s!\n") % f)
1313 self.ui.warn(_("trouble committing %s!\n") % f)
1314 raise
1314 raise
1315 else:
1315 else:
1316 removed.append(f)
1316 removed.append(f)
1317
1317
1318 # update manifest
1318 # update manifest
1319 m1.update(new)
1319 m1.update(new)
1320 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1320 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1321 drop = [f for f in removed if f in m1]
1321 drop = [f for f in removed if f in m1]
1322 for f in drop:
1322 for f in drop:
1323 del m1[f]
1323 del m1[f]
1324 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1324 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1325 p2.manifestnode(), (new, drop))
1325 p2.manifestnode(), (new, drop))
1326 files = changed + removed
1326 files = changed + removed
1327 else:
1327 else:
1328 mn = p1.manifestnode()
1328 mn = p1.manifestnode()
1329 files = []
1329 files = []
1330
1330
1331 # update changelog
1331 # update changelog
1332 self.changelog.delayupdate()
1332 self.changelog.delayupdate()
1333 n = self.changelog.add(mn, files, ctx.description(),
1333 n = self.changelog.add(mn, files, ctx.description(),
1334 trp, p1.node(), p2.node(),
1334 trp, p1.node(), p2.node(),
1335 user, ctx.date(), ctx.extra().copy())
1335 user, ctx.date(), ctx.extra().copy())
1336 p = lambda: self.changelog.writepending() and self.root or ""
1336 p = lambda: self.changelog.writepending() and self.root or ""
1337 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1337 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1338 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1338 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1339 parent2=xp2, pending=p)
1339 parent2=xp2, pending=p)
1340 self.changelog.finalize(trp)
1340 self.changelog.finalize(trp)
1341 # set the new commit is proper phase
1341 # set the new commit is proper phase
1342 targetphase = phases.newcommitphase(self.ui)
1342 targetphase = phases.newcommitphase(self.ui)
1343 if targetphase:
1343 if targetphase:
1344 # retract boundary do not alter parent changeset.
1344 # retract boundary do not alter parent changeset.
1345 # if a parent have higher the resulting phase will
1345 # if a parent have higher the resulting phase will
1346 # be compliant anyway
1346 # be compliant anyway
1347 #
1347 #
1348 # if minimal phase was 0 we don't need to retract anything
1348 # if minimal phase was 0 we don't need to retract anything
1349 phases.retractboundary(self, targetphase, [n])
1349 phases.retractboundary(self, targetphase, [n])
1350 tr.close()
1350 tr.close()
1351 self.updatebranchcache()
1351 self.updatebranchcache()
1352 return n
1352 return n
1353 finally:
1353 finally:
1354 if tr:
1354 if tr:
1355 tr.release()
1355 tr.release()
1356 lock.release()
1356 lock.release()
1357
1357
1358 def destroyed(self, newheadnodes=None):
1358 def destroyed(self, newheadnodes=None):
1359 '''Inform the repository that nodes have been destroyed.
1359 '''Inform the repository that nodes have been destroyed.
1360 Intended for use by strip and rollback, so there's a common
1360 Intended for use by strip and rollback, so there's a common
1361 place for anything that has to be done after destroying history.
1361 place for anything that has to be done after destroying history.
1362
1362
1363 If you know the branchheadcache was uptodate before nodes were removed
1363 If you know the branchheadcache was uptodate before nodes were removed
1364 and you also know the set of candidate new heads that may have resulted
1364 and you also know the set of candidate new heads that may have resulted
1365 from the destruction, you can set newheadnodes. This will enable the
1365 from the destruction, you can set newheadnodes. This will enable the
1366 code to update the branchheads cache, rather than having future code
1366 code to update the branchheads cache, rather than having future code
1367 decide it's invalid and regenrating it from scratch.
1367 decide it's invalid and regenrating it from scratch.
1368 '''
1368 '''
1369 # If we have info, newheadnodes, on how to update the branch cache, do
1369 # If we have info, newheadnodes, on how to update the branch cache, do
1370 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1370 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1371 # will be caught the next time it is read.
1371 # will be caught the next time it is read.
1372 if newheadnodes:
1372 if newheadnodes:
1373 tiprev = len(self) - 1
1373 tiprev = len(self) - 1
1374 ctxgen = (self[node] for node in newheadnodes
1374 ctxgen = (self[node] for node in newheadnodes
1375 if self.changelog.hasnode(node))
1375 if self.changelog.hasnode(node))
1376 self._updatebranchcache(self._branchcache, ctxgen)
1376 self._updatebranchcache(self._branchcache, ctxgen)
1377 self._writebranchcache(self._branchcache, self.changelog.tip(),
1377 self._writebranchcache(self._branchcache, self.changelog.tip(),
1378 tiprev)
1378 tiprev)
1379
1379
1380 # Ensure the persistent tag cache is updated. Doing it now
1380 # Ensure the persistent tag cache is updated. Doing it now
1381 # means that the tag cache only has to worry about destroyed
1381 # means that the tag cache only has to worry about destroyed
1382 # heads immediately after a strip/rollback. That in turn
1382 # heads immediately after a strip/rollback. That in turn
1383 # guarantees that "cachetip == currenttip" (comparing both rev
1383 # guarantees that "cachetip == currenttip" (comparing both rev
1384 # and node) always means no nodes have been added or destroyed.
1384 # and node) always means no nodes have been added or destroyed.
1385
1385
1386 # XXX this is suboptimal when qrefresh'ing: we strip the current
1386 # XXX this is suboptimal when qrefresh'ing: we strip the current
1387 # head, refresh the tag cache, then immediately add a new head.
1387 # head, refresh the tag cache, then immediately add a new head.
1388 # But I think doing it this way is necessary for the "instant
1388 # But I think doing it this way is necessary for the "instant
1389 # tag cache retrieval" case to work.
1389 # tag cache retrieval" case to work.
1390 self.invalidatecaches()
1390 self.invalidatecaches()
1391
1391
1392 def walk(self, match, node=None):
1392 def walk(self, match, node=None):
1393 '''
1393 '''
1394 walk recursively through the directory tree or a given
1394 walk recursively through the directory tree or a given
1395 changeset, finding all files matched by the match
1395 changeset, finding all files matched by the match
1396 function
1396 function
1397 '''
1397 '''
1398 return self[node].walk(match)
1398 return self[node].walk(match)
1399
1399
1400 def status(self, node1='.', node2=None, match=None,
1400 def status(self, node1='.', node2=None, match=None,
1401 ignored=False, clean=False, unknown=False,
1401 ignored=False, clean=False, unknown=False,
1402 listsubrepos=False):
1402 listsubrepos=False):
1403 """return status of files between two nodes or node and working
1403 """return status of files between two nodes or node and working
1404 directory.
1404 directory.
1405
1405
1406 If node1 is None, use the first dirstate parent instead.
1406 If node1 is None, use the first dirstate parent instead.
1407 If node2 is None, compare node1 with working directory.
1407 If node2 is None, compare node1 with working directory.
1408 """
1408 """
1409
1409
1410 def mfmatches(ctx):
1410 def mfmatches(ctx):
1411 mf = ctx.manifest().copy()
1411 mf = ctx.manifest().copy()
1412 if match.always():
1412 if match.always():
1413 return mf
1413 return mf
1414 for fn in mf.keys():
1414 for fn in mf.keys():
1415 if not match(fn):
1415 if not match(fn):
1416 del mf[fn]
1416 del mf[fn]
1417 return mf
1417 return mf
1418
1418
1419 if isinstance(node1, context.changectx):
1419 if isinstance(node1, context.changectx):
1420 ctx1 = node1
1420 ctx1 = node1
1421 else:
1421 else:
1422 ctx1 = self[node1]
1422 ctx1 = self[node1]
1423 if isinstance(node2, context.changectx):
1423 if isinstance(node2, context.changectx):
1424 ctx2 = node2
1424 ctx2 = node2
1425 else:
1425 else:
1426 ctx2 = self[node2]
1426 ctx2 = self[node2]
1427
1427
1428 working = ctx2.rev() is None
1428 working = ctx2.rev() is None
1429 parentworking = working and ctx1 == self['.']
1429 parentworking = working and ctx1 == self['.']
1430 match = match or matchmod.always(self.root, self.getcwd())
1430 match = match or matchmod.always(self.root, self.getcwd())
1431 listignored, listclean, listunknown = ignored, clean, unknown
1431 listignored, listclean, listunknown = ignored, clean, unknown
1432
1432
1433 # load earliest manifest first for caching reasons
1433 # load earliest manifest first for caching reasons
1434 if not working and ctx2.rev() < ctx1.rev():
1434 if not working and ctx2.rev() < ctx1.rev():
1435 ctx2.manifest()
1435 ctx2.manifest()
1436
1436
1437 if not parentworking:
1437 if not parentworking:
1438 def bad(f, msg):
1438 def bad(f, msg):
1439 # 'f' may be a directory pattern from 'match.files()',
1439 # 'f' may be a directory pattern from 'match.files()',
1440 # so 'f not in ctx1' is not enough
1440 # so 'f not in ctx1' is not enough
1441 if f not in ctx1 and f not in ctx1.dirs():
1441 if f not in ctx1 and f not in ctx1.dirs():
1442 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1442 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1443 match.bad = bad
1443 match.bad = bad
1444
1444
1445 if working: # we need to scan the working dir
1445 if working: # we need to scan the working dir
1446 subrepos = []
1446 subrepos = []
1447 if '.hgsub' in self.dirstate:
1447 if '.hgsub' in self.dirstate:
1448 subrepos = ctx2.substate.keys()
1448 subrepos = ctx2.substate.keys()
1449 s = self.dirstate.status(match, subrepos, listignored,
1449 s = self.dirstate.status(match, subrepos, listignored,
1450 listclean, listunknown)
1450 listclean, listunknown)
1451 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1451 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1452
1452
1453 # check for any possibly clean files
1453 # check for any possibly clean files
1454 if parentworking and cmp:
1454 if parentworking and cmp:
1455 fixup = []
1455 fixup = []
1456 # do a full compare of any files that might have changed
1456 # do a full compare of any files that might have changed
1457 for f in sorted(cmp):
1457 for f in sorted(cmp):
1458 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1458 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1459 or ctx1[f].cmp(ctx2[f])):
1459 or ctx1[f].cmp(ctx2[f])):
1460 modified.append(f)
1460 modified.append(f)
1461 else:
1461 else:
1462 fixup.append(f)
1462 fixup.append(f)
1463
1463
1464 # update dirstate for files that are actually clean
1464 # update dirstate for files that are actually clean
1465 if fixup:
1465 if fixup:
1466 if listclean:
1466 if listclean:
1467 clean += fixup
1467 clean += fixup
1468
1468
1469 try:
1469 try:
1470 # updating the dirstate is optional
1470 # updating the dirstate is optional
1471 # so we don't wait on the lock
1471 # so we don't wait on the lock
1472 wlock = self.wlock(False)
1472 wlock = self.wlock(False)
1473 try:
1473 try:
1474 for f in fixup:
1474 for f in fixup:
1475 self.dirstate.normal(f)
1475 self.dirstate.normal(f)
1476 finally:
1476 finally:
1477 wlock.release()
1477 wlock.release()
1478 except error.LockError:
1478 except error.LockError:
1479 pass
1479 pass
1480
1480
1481 if not parentworking:
1481 if not parentworking:
1482 mf1 = mfmatches(ctx1)
1482 mf1 = mfmatches(ctx1)
1483 if working:
1483 if working:
1484 # we are comparing working dir against non-parent
1484 # we are comparing working dir against non-parent
1485 # generate a pseudo-manifest for the working dir
1485 # generate a pseudo-manifest for the working dir
1486 mf2 = mfmatches(self['.'])
1486 mf2 = mfmatches(self['.'])
1487 for f in cmp + modified + added:
1487 for f in cmp + modified + added:
1488 mf2[f] = None
1488 mf2[f] = None
1489 mf2.set(f, ctx2.flags(f))
1489 mf2.set(f, ctx2.flags(f))
1490 for f in removed:
1490 for f in removed:
1491 if f in mf2:
1491 if f in mf2:
1492 del mf2[f]
1492 del mf2[f]
1493 else:
1493 else:
1494 # we are comparing two revisions
1494 # we are comparing two revisions
1495 deleted, unknown, ignored = [], [], []
1495 deleted, unknown, ignored = [], [], []
1496 mf2 = mfmatches(ctx2)
1496 mf2 = mfmatches(ctx2)
1497
1497
1498 modified, added, clean = [], [], []
1498 modified, added, clean = [], [], []
1499 withflags = mf1.withflags() | mf2.withflags()
1499 withflags = mf1.withflags() | mf2.withflags()
1500 for fn in mf2:
1500 for fn in mf2:
1501 if fn in mf1:
1501 if fn in mf1:
1502 if (fn not in deleted and
1502 if (fn not in deleted and
1503 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1503 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1504 (mf1[fn] != mf2[fn] and
1504 (mf1[fn] != mf2[fn] and
1505 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1505 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1506 modified.append(fn)
1506 modified.append(fn)
1507 elif listclean:
1507 elif listclean:
1508 clean.append(fn)
1508 clean.append(fn)
1509 del mf1[fn]
1509 del mf1[fn]
1510 elif fn not in deleted:
1510 elif fn not in deleted:
1511 added.append(fn)
1511 added.append(fn)
1512 removed = mf1.keys()
1512 removed = mf1.keys()
1513
1513
1514 if working and modified and not self.dirstate._checklink:
1514 if working and modified and not self.dirstate._checklink:
1515 # Symlink placeholders may get non-symlink-like contents
1515 # Symlink placeholders may get non-symlink-like contents
1516 # via user error or dereferencing by NFS or Samba servers,
1516 # via user error or dereferencing by NFS or Samba servers,
1517 # so we filter out any placeholders that don't look like a
1517 # so we filter out any placeholders that don't look like a
1518 # symlink
1518 # symlink
1519 sane = []
1519 sane = []
1520 for f in modified:
1520 for f in modified:
1521 if ctx2.flags(f) == 'l':
1521 if ctx2.flags(f) == 'l':
1522 d = ctx2[f].data()
1522 d = ctx2[f].data()
1523 if len(d) >= 1024 or '\n' in d or util.binary(d):
1523 if len(d) >= 1024 or '\n' in d or util.binary(d):
1524 self.ui.debug('ignoring suspect symlink placeholder'
1524 self.ui.debug('ignoring suspect symlink placeholder'
1525 ' "%s"\n' % f)
1525 ' "%s"\n' % f)
1526 continue
1526 continue
1527 sane.append(f)
1527 sane.append(f)
1528 modified = sane
1528 modified = sane
1529
1529
1530 r = modified, added, removed, deleted, unknown, ignored, clean
1530 r = modified, added, removed, deleted, unknown, ignored, clean
1531
1531
1532 if listsubrepos:
1532 if listsubrepos:
1533 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1533 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1534 if working:
1534 if working:
1535 rev2 = None
1535 rev2 = None
1536 else:
1536 else:
1537 rev2 = ctx2.substate[subpath][1]
1537 rev2 = ctx2.substate[subpath][1]
1538 try:
1538 try:
1539 submatch = matchmod.narrowmatcher(subpath, match)
1539 submatch = matchmod.narrowmatcher(subpath, match)
1540 s = sub.status(rev2, match=submatch, ignored=listignored,
1540 s = sub.status(rev2, match=submatch, ignored=listignored,
1541 clean=listclean, unknown=listunknown,
1541 clean=listclean, unknown=listunknown,
1542 listsubrepos=True)
1542 listsubrepos=True)
1543 for rfiles, sfiles in zip(r, s):
1543 for rfiles, sfiles in zip(r, s):
1544 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1544 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1545 except error.LookupError:
1545 except error.LookupError:
1546 self.ui.status(_("skipping missing subrepository: %s\n")
1546 self.ui.status(_("skipping missing subrepository: %s\n")
1547 % subpath)
1547 % subpath)
1548
1548
1549 for l in r:
1549 for l in r:
1550 l.sort()
1550 l.sort()
1551 return r
1551 return r
1552
1552
1553 def heads(self, start=None):
1553 def heads(self, start=None):
1554 heads = self.changelog.heads(start)
1554 heads = self.changelog.heads(start)
1555 # sort the output in rev descending order
1555 # sort the output in rev descending order
1556 return sorted(heads, key=self.changelog.rev, reverse=True)
1556 return sorted(heads, key=self.changelog.rev, reverse=True)
1557
1557
1558 def branchheads(self, branch=None, start=None, closed=False):
1558 def branchheads(self, branch=None, start=None, closed=False):
1559 '''return a (possibly filtered) list of heads for the given branch
1559 '''return a (possibly filtered) list of heads for the given branch
1560
1560
1561 Heads are returned in topological order, from newest to oldest.
1561 Heads are returned in topological order, from newest to oldest.
1562 If branch is None, use the dirstate branch.
1562 If branch is None, use the dirstate branch.
1563 If start is not None, return only heads reachable from start.
1563 If start is not None, return only heads reachable from start.
1564 If closed is True, return heads that are marked as closed as well.
1564 If closed is True, return heads that are marked as closed as well.
1565 '''
1565 '''
1566 if branch is None:
1566 if branch is None:
1567 branch = self[None].branch()
1567 branch = self[None].branch()
1568 branches = self.branchmap()
1568 branches = self.branchmap()
1569 if branch not in branches:
1569 if branch not in branches:
1570 return []
1570 return []
1571 # the cache returns heads ordered lowest to highest
1571 # the cache returns heads ordered lowest to highest
1572 bheads = list(reversed(branches[branch]))
1572 bheads = list(reversed(branches[branch]))
1573 if start is not None:
1573 if start is not None:
1574 # filter out the heads that cannot be reached from startrev
1574 # filter out the heads that cannot be reached from startrev
1575 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1575 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1576 bheads = [h for h in bheads if h in fbheads]
1576 bheads = [h for h in bheads if h in fbheads]
1577 if not closed:
1577 if not closed:
1578 bheads = [h for h in bheads if not self[h].closesbranch()]
1578 bheads = [h for h in bheads if not self[h].closesbranch()]
1579 return bheads
1579 return bheads
1580
1580
1581 def branches(self, nodes):
1581 def branches(self, nodes):
1582 if not nodes:
1582 if not nodes:
1583 nodes = [self.changelog.tip()]
1583 nodes = [self.changelog.tip()]
1584 b = []
1584 b = []
1585 for n in nodes:
1585 for n in nodes:
1586 t = n
1586 t = n
1587 while True:
1587 while True:
1588 p = self.changelog.parents(n)
1588 p = self.changelog.parents(n)
1589 if p[1] != nullid or p[0] == nullid:
1589 if p[1] != nullid or p[0] == nullid:
1590 b.append((t, n, p[0], p[1]))
1590 b.append((t, n, p[0], p[1]))
1591 break
1591 break
1592 n = p[0]
1592 n = p[0]
1593 return b
1593 return b
1594
1594
1595 def between(self, pairs):
1595 def between(self, pairs):
1596 r = []
1596 r = []
1597
1597
1598 for top, bottom in pairs:
1598 for top, bottom in pairs:
1599 n, l, i = top, [], 0
1599 n, l, i = top, [], 0
1600 f = 1
1600 f = 1
1601
1601
1602 while n != bottom and n != nullid:
1602 while n != bottom and n != nullid:
1603 p = self.changelog.parents(n)[0]
1603 p = self.changelog.parents(n)[0]
1604 if i == f:
1604 if i == f:
1605 l.append(n)
1605 l.append(n)
1606 f = f * 2
1606 f = f * 2
1607 n = p
1607 n = p
1608 i += 1
1608 i += 1
1609
1609
1610 r.append(l)
1610 r.append(l)
1611
1611
1612 return r
1612 return r
1613
1613
1614 def pull(self, remote, heads=None, force=False):
1614 def pull(self, remote, heads=None, force=False):
1615 # don't open transaction for nothing or you break future useful
1615 # don't open transaction for nothing or you break future useful
1616 # rollback call
1616 # rollback call
1617 tr = None
1617 tr = None
1618 trname = 'pull\n' + util.hidepassword(remote.url())
1618 trname = 'pull\n' + util.hidepassword(remote.url())
1619 lock = self.lock()
1619 lock = self.lock()
1620 try:
1620 try:
1621 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1621 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1622 force=force)
1622 force=force)
1623 common, fetch, rheads = tmp
1623 common, fetch, rheads = tmp
1624 if not fetch:
1624 if not fetch:
1625 self.ui.status(_("no changes found\n"))
1625 self.ui.status(_("no changes found\n"))
1626 added = []
1626 added = []
1627 result = 0
1627 result = 0
1628 else:
1628 else:
1629 tr = self.transaction(trname)
1629 tr = self.transaction(trname)
1630 if heads is None and list(common) == [nullid]:
1630 if heads is None and list(common) == [nullid]:
1631 self.ui.status(_("requesting all changes\n"))
1631 self.ui.status(_("requesting all changes\n"))
1632 elif heads is None and remote.capable('changegroupsubset'):
1632 elif heads is None and remote.capable('changegroupsubset'):
1633 # issue1320, avoid a race if remote changed after discovery
1633 # issue1320, avoid a race if remote changed after discovery
1634 heads = rheads
1634 heads = rheads
1635
1635
1636 if remote.capable('getbundle'):
1636 if remote.capable('getbundle'):
1637 cg = remote.getbundle('pull', common=common,
1637 cg = remote.getbundle('pull', common=common,
1638 heads=heads or rheads)
1638 heads=heads or rheads)
1639 elif heads is None:
1639 elif heads is None:
1640 cg = remote.changegroup(fetch, 'pull')
1640 cg = remote.changegroup(fetch, 'pull')
1641 elif not remote.capable('changegroupsubset'):
1641 elif not remote.capable('changegroupsubset'):
1642 raise util.Abort(_("partial pull cannot be done because "
1642 raise util.Abort(_("partial pull cannot be done because "
1643 "other repository doesn't support "
1643 "other repository doesn't support "
1644 "changegroupsubset."))
1644 "changegroupsubset."))
1645 else:
1645 else:
1646 cg = remote.changegroupsubset(fetch, heads, 'pull')
1646 cg = remote.changegroupsubset(fetch, heads, 'pull')
1647 clstart = len(self.changelog)
1647 clstart = len(self.changelog)
1648 result = self.addchangegroup(cg, 'pull', remote.url())
1648 result = self.addchangegroup(cg, 'pull', remote.url())
1649 clend = len(self.changelog)
1649 clend = len(self.changelog)
1650 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1650 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1651
1651
1652 # compute target subset
1652 # compute target subset
1653 if heads is None:
1653 if heads is None:
1654 # We pulled every thing possible
1654 # We pulled every thing possible
1655 # sync on everything common
1655 # sync on everything common
1656 subset = common + added
1656 subset = common + added
1657 else:
1657 else:
1658 # We pulled a specific subset
1658 # We pulled a specific subset
1659 # sync on this subset
1659 # sync on this subset
1660 subset = heads
1660 subset = heads
1661
1661
1662 # Get remote phases data from remote
1662 # Get remote phases data from remote
1663 remotephases = remote.listkeys('phases')
1663 remotephases = remote.listkeys('phases')
1664 publishing = bool(remotephases.get('publishing', False))
1664 publishing = bool(remotephases.get('publishing', False))
1665 if remotephases and not publishing:
1665 if remotephases and not publishing:
1666 # remote is new and unpublishing
1666 # remote is new and unpublishing
1667 pheads, _dr = phases.analyzeremotephases(self, subset,
1667 pheads, _dr = phases.analyzeremotephases(self, subset,
1668 remotephases)
1668 remotephases)
1669 phases.advanceboundary(self, phases.public, pheads)
1669 phases.advanceboundary(self, phases.public, pheads)
1670 phases.advanceboundary(self, phases.draft, subset)
1670 phases.advanceboundary(self, phases.draft, subset)
1671 else:
1671 else:
1672 # Remote is old or publishing all common changesets
1672 # Remote is old or publishing all common changesets
1673 # should be seen as public
1673 # should be seen as public
1674 phases.advanceboundary(self, phases.public, subset)
1674 phases.advanceboundary(self, phases.public, subset)
1675
1675
1676 remoteobs = remote.listkeys('obsolete')
1676 remoteobs = remote.listkeys('obsolete')
1677 if 'dump' in remoteobs:
1677 if 'dump' in remoteobs:
1678 if tr is None:
1678 if tr is None:
1679 tr = self.transaction(trname)
1679 tr = self.transaction(trname)
1680 data = base85.b85decode(remoteobs['dump'])
1680 data = base85.b85decode(remoteobs['dump'])
1681 self.obsstore.mergemarkers(tr, data)
1681 self.obsstore.mergemarkers(tr, data)
1682 if tr is not None:
1682 if tr is not None:
1683 tr.close()
1683 tr.close()
1684 finally:
1684 finally:
1685 if tr is not None:
1685 if tr is not None:
1686 tr.release()
1686 tr.release()
1687 lock.release()
1687 lock.release()
1688
1688
1689 return result
1689 return result
1690
1690
1691 def checkpush(self, force, revs):
1691 def checkpush(self, force, revs):
1692 """Extensions can override this function if additional checks have
1692 """Extensions can override this function if additional checks have
1693 to be performed before pushing, or call it if they override push
1693 to be performed before pushing, or call it if they override push
1694 command.
1694 command.
1695 """
1695 """
1696 pass
1696 pass
1697
1697
1698 def push(self, remote, force=False, revs=None, newbranch=False):
1698 def push(self, remote, force=False, revs=None, newbranch=False):
1699 '''Push outgoing changesets (limited by revs) from the current
1699 '''Push outgoing changesets (limited by revs) from the current
1700 repository to remote. Return an integer:
1700 repository to remote. Return an integer:
1701 - None means nothing to push
1701 - None means nothing to push
1702 - 0 means HTTP error
1702 - 0 means HTTP error
1703 - 1 means we pushed and remote head count is unchanged *or*
1703 - 1 means we pushed and remote head count is unchanged *or*
1704 we have outgoing changesets but refused to push
1704 we have outgoing changesets but refused to push
1705 - other values as described by addchangegroup()
1705 - other values as described by addchangegroup()
1706 '''
1706 '''
1707 # there are two ways to push to remote repo:
1707 # there are two ways to push to remote repo:
1708 #
1708 #
1709 # addchangegroup assumes local user can lock remote
1709 # addchangegroup assumes local user can lock remote
1710 # repo (local filesystem, old ssh servers).
1710 # repo (local filesystem, old ssh servers).
1711 #
1711 #
1712 # unbundle assumes local user cannot lock remote repo (new ssh
1712 # unbundle assumes local user cannot lock remote repo (new ssh
1713 # servers, http servers).
1713 # servers, http servers).
1714
1714
1715 # get local lock as we might write phase data
1715 # get local lock as we might write phase data
1716 locallock = self.lock()
1716 locallock = self.lock()
1717 try:
1717 try:
1718 self.checkpush(force, revs)
1718 self.checkpush(force, revs)
1719 lock = None
1719 lock = None
1720 unbundle = remote.capable('unbundle')
1720 unbundle = remote.capable('unbundle')
1721 if not unbundle:
1721 if not unbundle:
1722 lock = remote.lock()
1722 lock = remote.lock()
1723 try:
1723 try:
1724 # discovery
1724 # discovery
1725 fci = discovery.findcommonincoming
1725 fci = discovery.findcommonincoming
1726 commoninc = fci(self, remote, force=force)
1726 commoninc = fci(self, remote, force=force)
1727 common, inc, remoteheads = commoninc
1727 common, inc, remoteheads = commoninc
1728 fco = discovery.findcommonoutgoing
1728 fco = discovery.findcommonoutgoing
1729 outgoing = fco(self, remote, onlyheads=revs,
1729 outgoing = fco(self, remote, onlyheads=revs,
1730 commoninc=commoninc, force=force)
1730 commoninc=commoninc, force=force)
1731
1731
1732
1732
1733 if not outgoing.missing:
1733 if not outgoing.missing:
1734 # nothing to push
1734 # nothing to push
1735 scmutil.nochangesfound(self.ui, outgoing.excluded)
1735 scmutil.nochangesfound(self.ui, outgoing.excluded)
1736 ret = None
1736 ret = None
1737 else:
1737 else:
1738 # something to push
1738 # something to push
1739 if not force:
1739 if not force:
1740 discovery.checkheads(self, remote, outgoing,
1740 discovery.checkheads(self, remote, outgoing,
1741 remoteheads, newbranch,
1741 remoteheads, newbranch,
1742 bool(inc))
1742 bool(inc))
1743
1743
1744 # create a changegroup from local
1744 # create a changegroup from local
1745 if revs is None and not outgoing.excluded:
1745 if revs is None and not outgoing.excluded:
1746 # push everything,
1746 # push everything,
1747 # use the fast path, no race possible on push
1747 # use the fast path, no race possible on push
1748 cg = self._changegroup(outgoing.missing, 'push')
1748 cg = self._changegroup(outgoing.missing, 'push')
1749 else:
1749 else:
1750 cg = self.getlocalbundle('push', outgoing)
1750 cg = self.getlocalbundle('push', outgoing)
1751
1751
1752 # apply changegroup to remote
1752 # apply changegroup to remote
1753 if unbundle:
1753 if unbundle:
1754 # local repo finds heads on server, finds out what
1754 # local repo finds heads on server, finds out what
1755 # revs it must push. once revs transferred, if server
1755 # revs it must push. once revs transferred, if server
1756 # finds it has different heads (someone else won
1756 # finds it has different heads (someone else won
1757 # commit/push race), server aborts.
1757 # commit/push race), server aborts.
1758 if force:
1758 if force:
1759 remoteheads = ['force']
1759 remoteheads = ['force']
1760 # ssh: return remote's addchangegroup()
1760 # ssh: return remote's addchangegroup()
1761 # http: return remote's addchangegroup() or 0 for error
1761 # http: return remote's addchangegroup() or 0 for error
1762 ret = remote.unbundle(cg, remoteheads, 'push')
1762 ret = remote.unbundle(cg, remoteheads, 'push')
1763 else:
1763 else:
1764 # we return an integer indicating remote head count
1764 # we return an integer indicating remote head count
1765 # change
1765 # change
1766 ret = remote.addchangegroup(cg, 'push', self.url())
1766 ret = remote.addchangegroup(cg, 'push', self.url())
1767
1767
1768 if ret:
1768 if ret:
1769 # push succeed, synchonize target of the push
1769 # push succeed, synchonize target of the push
1770 cheads = outgoing.missingheads
1770 cheads = outgoing.missingheads
1771 elif revs is None:
1771 elif revs is None:
1772 # All out push fails. synchronize all common
1772 # All out push fails. synchronize all common
1773 cheads = outgoing.commonheads
1773 cheads = outgoing.commonheads
1774 else:
1774 else:
1775 # I want cheads = heads(::missingheads and ::commonheads)
1775 # I want cheads = heads(::missingheads and ::commonheads)
1776 # (missingheads is revs with secret changeset filtered out)
1776 # (missingheads is revs with secret changeset filtered out)
1777 #
1777 #
1778 # This can be expressed as:
1778 # This can be expressed as:
1779 # cheads = ( (missingheads and ::commonheads)
1779 # cheads = ( (missingheads and ::commonheads)
1780 # + (commonheads and ::missingheads))"
1780 # + (commonheads and ::missingheads))"
1781 # )
1781 # )
1782 #
1782 #
1783 # while trying to push we already computed the following:
1783 # while trying to push we already computed the following:
1784 # common = (::commonheads)
1784 # common = (::commonheads)
1785 # missing = ((commonheads::missingheads) - commonheads)
1785 # missing = ((commonheads::missingheads) - commonheads)
1786 #
1786 #
1787 # We can pick:
1787 # We can pick:
1788 # * missingheads part of comon (::commonheads)
1788 # * missingheads part of comon (::commonheads)
1789 common = set(outgoing.common)
1789 common = set(outgoing.common)
1790 cheads = [node for node in revs if node in common]
1790 cheads = [node for node in revs if node in common]
1791 # and
1791 # and
1792 # * commonheads parents on missing
1792 # * commonheads parents on missing
1793 revset = self.set('%ln and parents(roots(%ln))',
1793 revset = self.set('%ln and parents(roots(%ln))',
1794 outgoing.commonheads,
1794 outgoing.commonheads,
1795 outgoing.missing)
1795 outgoing.missing)
1796 cheads.extend(c.node() for c in revset)
1796 cheads.extend(c.node() for c in revset)
1797 # even when we don't push, exchanging phase data is useful
1797 # even when we don't push, exchanging phase data is useful
1798 remotephases = remote.listkeys('phases')
1798 remotephases = remote.listkeys('phases')
1799 if not remotephases: # old server or public only repo
1799 if not remotephases: # old server or public only repo
1800 phases.advanceboundary(self, phases.public, cheads)
1800 phases.advanceboundary(self, phases.public, cheads)
1801 # don't push any phase data as there is nothing to push
1801 # don't push any phase data as there is nothing to push
1802 else:
1802 else:
1803 ana = phases.analyzeremotephases(self, cheads, remotephases)
1803 ana = phases.analyzeremotephases(self, cheads, remotephases)
1804 pheads, droots = ana
1804 pheads, droots = ana
1805 ### Apply remote phase on local
1805 ### Apply remote phase on local
1806 if remotephases.get('publishing', False):
1806 if remotephases.get('publishing', False):
1807 phases.advanceboundary(self, phases.public, cheads)
1807 phases.advanceboundary(self, phases.public, cheads)
1808 else: # publish = False
1808 else: # publish = False
1809 phases.advanceboundary(self, phases.public, pheads)
1809 phases.advanceboundary(self, phases.public, pheads)
1810 phases.advanceboundary(self, phases.draft, cheads)
1810 phases.advanceboundary(self, phases.draft, cheads)
1811 ### Apply local phase on remote
1811 ### Apply local phase on remote
1812
1812
1813 # Get the list of all revs draft on remote by public here.
1813 # Get the list of all revs draft on remote by public here.
1814 # XXX Beware that revset break if droots is not strictly
1814 # XXX Beware that revset break if droots is not strictly
1815 # XXX root we may want to ensure it is but it is costly
1815 # XXX root we may want to ensure it is but it is costly
1816 outdated = self.set('heads((%ln::%ln) and public())',
1816 outdated = self.set('heads((%ln::%ln) and public())',
1817 droots, cheads)
1817 droots, cheads)
1818 for newremotehead in outdated:
1818 for newremotehead in outdated:
1819 r = remote.pushkey('phases',
1819 r = remote.pushkey('phases',
1820 newremotehead.hex(),
1820 newremotehead.hex(),
1821 str(phases.draft),
1821 str(phases.draft),
1822 str(phases.public))
1822 str(phases.public))
1823 if not r:
1823 if not r:
1824 self.ui.warn(_('updating %s to public failed!\n')
1824 self.ui.warn(_('updating %s to public failed!\n')
1825 % newremotehead)
1825 % newremotehead)
1826 if 'obsolete' in self.listkeys('namespaces') and self.obsstore:
1826 if 'obsolete' in self.listkeys('namespaces') and self.obsstore:
1827 data = self.listkeys('obsolete')['dump']
1827 data = self.listkeys('obsolete')['dump']
1828 r = remote.pushkey('obsolete', 'dump', '', data)
1828 r = remote.pushkey('obsolete', 'dump', '', data)
1829 if not r:
1829 if not r:
1830 self.ui.warn(_('failed to push obsolete markers!\n'))
1830 self.ui.warn(_('failed to push obsolete markers!\n'))
1831 finally:
1831 finally:
1832 if lock is not None:
1832 if lock is not None:
1833 lock.release()
1833 lock.release()
1834 finally:
1834 finally:
1835 locallock.release()
1835 locallock.release()
1836
1836
1837 self.ui.debug("checking for updated bookmarks\n")
1837 self.ui.debug("checking for updated bookmarks\n")
1838 rb = remote.listkeys('bookmarks')
1838 rb = remote.listkeys('bookmarks')
1839 for k in rb.keys():
1839 for k in rb.keys():
1840 if k in self._bookmarks:
1840 if k in self._bookmarks:
1841 nr, nl = rb[k], hex(self._bookmarks[k])
1841 nr, nl = rb[k], hex(self._bookmarks[k])
1842 if nr in self:
1842 if nr in self:
1843 cr = self[nr]
1843 cr = self[nr]
1844 cl = self[nl]
1844 cl = self[nl]
1845 if cl in cr.descendants():
1845 if cl in cr.descendants():
1846 r = remote.pushkey('bookmarks', k, nr, nl)
1846 r = remote.pushkey('bookmarks', k, nr, nl)
1847 if r:
1847 if r:
1848 self.ui.status(_("updating bookmark %s\n") % k)
1848 self.ui.status(_("updating bookmark %s\n") % k)
1849 else:
1849 else:
1850 self.ui.warn(_('updating bookmark %s'
1850 self.ui.warn(_('updating bookmark %s'
1851 ' failed!\n') % k)
1851 ' failed!\n') % k)
1852
1852
1853 return ret
1853 return ret
1854
1854
1855 def changegroupinfo(self, nodes, source):
1855 def changegroupinfo(self, nodes, source):
1856 if self.ui.verbose or source == 'bundle':
1856 if self.ui.verbose or source == 'bundle':
1857 self.ui.status(_("%d changesets found\n") % len(nodes))
1857 self.ui.status(_("%d changesets found\n") % len(nodes))
1858 if self.ui.debugflag:
1858 if self.ui.debugflag:
1859 self.ui.debug("list of changesets:\n")
1859 self.ui.debug("list of changesets:\n")
1860 for node in nodes:
1860 for node in nodes:
1861 self.ui.debug("%s\n" % hex(node))
1861 self.ui.debug("%s\n" % hex(node))
1862
1862
1863 def changegroupsubset(self, bases, heads, source):
1863 def changegroupsubset(self, bases, heads, source):
1864 """Compute a changegroup consisting of all the nodes that are
1864 """Compute a changegroup consisting of all the nodes that are
1865 descendants of any of the bases and ancestors of any of the heads.
1865 descendants of any of the bases and ancestors of any of the heads.
1866 Return a chunkbuffer object whose read() method will return
1866 Return a chunkbuffer object whose read() method will return
1867 successive changegroup chunks.
1867 successive changegroup chunks.
1868
1868
1869 It is fairly complex as determining which filenodes and which
1869 It is fairly complex as determining which filenodes and which
1870 manifest nodes need to be included for the changeset to be complete
1870 manifest nodes need to be included for the changeset to be complete
1871 is non-trivial.
1871 is non-trivial.
1872
1872
1873 Another wrinkle is doing the reverse, figuring out which changeset in
1873 Another wrinkle is doing the reverse, figuring out which changeset in
1874 the changegroup a particular filenode or manifestnode belongs to.
1874 the changegroup a particular filenode or manifestnode belongs to.
1875 """
1875 """
1876 cl = self.changelog
1876 cl = self.changelog
1877 if not bases:
1877 if not bases:
1878 bases = [nullid]
1878 bases = [nullid]
1879 csets, bases, heads = cl.nodesbetween(bases, heads)
1879 csets, bases, heads = cl.nodesbetween(bases, heads)
1880 # We assume that all ancestors of bases are known
1880 # We assume that all ancestors of bases are known
1881 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1881 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1882 return self._changegroupsubset(common, csets, heads, source)
1882 return self._changegroupsubset(common, csets, heads, source)
1883
1883
1884 def getlocalbundle(self, source, outgoing):
1884 def getlocalbundle(self, source, outgoing):
1885 """Like getbundle, but taking a discovery.outgoing as an argument.
1885 """Like getbundle, but taking a discovery.outgoing as an argument.
1886
1886
1887 This is only implemented for local repos and reuses potentially
1887 This is only implemented for local repos and reuses potentially
1888 precomputed sets in outgoing."""
1888 precomputed sets in outgoing."""
1889 if not outgoing.missing:
1889 if not outgoing.missing:
1890 return None
1890 return None
1891 return self._changegroupsubset(outgoing.common,
1891 return self._changegroupsubset(outgoing.common,
1892 outgoing.missing,
1892 outgoing.missing,
1893 outgoing.missingheads,
1893 outgoing.missingheads,
1894 source)
1894 source)
1895
1895
1896 def getbundle(self, source, heads=None, common=None):
1896 def getbundle(self, source, heads=None, common=None):
1897 """Like changegroupsubset, but returns the set difference between the
1897 """Like changegroupsubset, but returns the set difference between the
1898 ancestors of heads and the ancestors common.
1898 ancestors of heads and the ancestors common.
1899
1899
1900 If heads is None, use the local heads. If common is None, use [nullid].
1900 If heads is None, use the local heads. If common is None, use [nullid].
1901
1901
1902 The nodes in common might not all be known locally due to the way the
1902 The nodes in common might not all be known locally due to the way the
1903 current discovery protocol works.
1903 current discovery protocol works.
1904 """
1904 """
1905 cl = self.changelog
1905 cl = self.changelog
1906 if common:
1906 if common:
1907 nm = cl.nodemap
1907 nm = cl.nodemap
1908 common = [n for n in common if n in nm]
1908 common = [n for n in common if n in nm]
1909 else:
1909 else:
1910 common = [nullid]
1910 common = [nullid]
1911 if not heads:
1911 if not heads:
1912 heads = cl.heads()
1912 heads = cl.heads()
1913 return self.getlocalbundle(source,
1913 return self.getlocalbundle(source,
1914 discovery.outgoing(cl, common, heads))
1914 discovery.outgoing(cl, common, heads))
1915
1915
1916 def _changegroupsubset(self, commonrevs, csets, heads, source):
1916 def _changegroupsubset(self, commonrevs, csets, heads, source):
1917
1917
1918 cl = self.changelog
1918 cl = self.changelog
1919 mf = self.manifest
1919 mf = self.manifest
1920 mfs = {} # needed manifests
1920 mfs = {} # needed manifests
1921 fnodes = {} # needed file nodes
1921 fnodes = {} # needed file nodes
1922 changedfiles = set()
1922 changedfiles = set()
1923 fstate = ['', {}]
1923 fstate = ['', {}]
1924 count = [0, 0]
1924 count = [0, 0]
1925
1925
1926 # can we go through the fast path ?
1926 # can we go through the fast path ?
1927 heads.sort()
1927 heads.sort()
1928 if heads == sorted(self.heads()):
1928 if heads == sorted(self.heads()):
1929 return self._changegroup(csets, source)
1929 return self._changegroup(csets, source)
1930
1930
1931 # slow path
1931 # slow path
1932 self.hook('preoutgoing', throw=True, source=source)
1932 self.hook('preoutgoing', throw=True, source=source)
1933 self.changegroupinfo(csets, source)
1933 self.changegroupinfo(csets, source)
1934
1934
1935 # filter any nodes that claim to be part of the known set
1935 # filter any nodes that claim to be part of the known set
1936 def prune(revlog, missing):
1936 def prune(revlog, missing):
1937 rr, rl = revlog.rev, revlog.linkrev
1937 rr, rl = revlog.rev, revlog.linkrev
1938 return [n for n in missing
1938 return [n for n in missing
1939 if rl(rr(n)) not in commonrevs]
1939 if rl(rr(n)) not in commonrevs]
1940
1940
1941 progress = self.ui.progress
1941 progress = self.ui.progress
1942 _bundling = _('bundling')
1942 _bundling = _('bundling')
1943 _changesets = _('changesets')
1943 _changesets = _('changesets')
1944 _manifests = _('manifests')
1944 _manifests = _('manifests')
1945 _files = _('files')
1945 _files = _('files')
1946
1946
1947 def lookup(revlog, x):
1947 def lookup(revlog, x):
1948 if revlog == cl:
1948 if revlog == cl:
1949 c = cl.read(x)
1949 c = cl.read(x)
1950 changedfiles.update(c[3])
1950 changedfiles.update(c[3])
1951 mfs.setdefault(c[0], x)
1951 mfs.setdefault(c[0], x)
1952 count[0] += 1
1952 count[0] += 1
1953 progress(_bundling, count[0],
1953 progress(_bundling, count[0],
1954 unit=_changesets, total=count[1])
1954 unit=_changesets, total=count[1])
1955 return x
1955 return x
1956 elif revlog == mf:
1956 elif revlog == mf:
1957 clnode = mfs[x]
1957 clnode = mfs[x]
1958 mdata = mf.readfast(x)
1958 mdata = mf.readfast(x)
1959 for f, n in mdata.iteritems():
1959 for f, n in mdata.iteritems():
1960 if f in changedfiles:
1960 if f in changedfiles:
1961 fnodes[f].setdefault(n, clnode)
1961 fnodes[f].setdefault(n, clnode)
1962 count[0] += 1
1962 count[0] += 1
1963 progress(_bundling, count[0],
1963 progress(_bundling, count[0],
1964 unit=_manifests, total=count[1])
1964 unit=_manifests, total=count[1])
1965 return clnode
1965 return clnode
1966 else:
1966 else:
1967 progress(_bundling, count[0], item=fstate[0],
1967 progress(_bundling, count[0], item=fstate[0],
1968 unit=_files, total=count[1])
1968 unit=_files, total=count[1])
1969 return fstate[1][x]
1969 return fstate[1][x]
1970
1970
1971 bundler = changegroup.bundle10(lookup)
1971 bundler = changegroup.bundle10(lookup)
1972 reorder = self.ui.config('bundle', 'reorder', 'auto')
1972 reorder = self.ui.config('bundle', 'reorder', 'auto')
1973 if reorder == 'auto':
1973 if reorder == 'auto':
1974 reorder = None
1974 reorder = None
1975 else:
1975 else:
1976 reorder = util.parsebool(reorder)
1976 reorder = util.parsebool(reorder)
1977
1977
1978 def gengroup():
1978 def gengroup():
1979 # Create a changenode group generator that will call our functions
1979 # Create a changenode group generator that will call our functions
1980 # back to lookup the owning changenode and collect information.
1980 # back to lookup the owning changenode and collect information.
1981 count[:] = [0, len(csets)]
1981 count[:] = [0, len(csets)]
1982 for chunk in cl.group(csets, bundler, reorder=reorder):
1982 for chunk in cl.group(csets, bundler, reorder=reorder):
1983 yield chunk
1983 yield chunk
1984 progress(_bundling, None)
1984 progress(_bundling, None)
1985
1985
1986 # Create a generator for the manifestnodes that calls our lookup
1986 # Create a generator for the manifestnodes that calls our lookup
1987 # and data collection functions back.
1987 # and data collection functions back.
1988 for f in changedfiles:
1988 for f in changedfiles:
1989 fnodes[f] = {}
1989 fnodes[f] = {}
1990 count[:] = [0, len(mfs)]
1990 count[:] = [0, len(mfs)]
1991 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1991 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1992 yield chunk
1992 yield chunk
1993 progress(_bundling, None)
1993 progress(_bundling, None)
1994
1994
1995 mfs.clear()
1995 mfs.clear()
1996
1996
1997 # Go through all our files in order sorted by name.
1997 # Go through all our files in order sorted by name.
1998 count[:] = [0, len(changedfiles)]
1998 count[:] = [0, len(changedfiles)]
1999 for fname in sorted(changedfiles):
1999 for fname in sorted(changedfiles):
2000 filerevlog = self.file(fname)
2000 filerevlog = self.file(fname)
2001 if not len(filerevlog):
2001 if not len(filerevlog):
2002 raise util.Abort(_("empty or missing revlog for %s")
2002 raise util.Abort(_("empty or missing revlog for %s")
2003 % fname)
2003 % fname)
2004 fstate[0] = fname
2004 fstate[0] = fname
2005 fstate[1] = fnodes.pop(fname, {})
2005 fstate[1] = fnodes.pop(fname, {})
2006
2006
2007 nodelist = prune(filerevlog, fstate[1])
2007 nodelist = prune(filerevlog, fstate[1])
2008 if nodelist:
2008 if nodelist:
2009 count[0] += 1
2009 count[0] += 1
2010 yield bundler.fileheader(fname)
2010 yield bundler.fileheader(fname)
2011 for chunk in filerevlog.group(nodelist, bundler, reorder):
2011 for chunk in filerevlog.group(nodelist, bundler, reorder):
2012 yield chunk
2012 yield chunk
2013
2013
2014 # Signal that no more groups are left.
2014 # Signal that no more groups are left.
2015 yield bundler.close()
2015 yield bundler.close()
2016 progress(_bundling, None)
2016 progress(_bundling, None)
2017
2017
2018 if csets:
2018 if csets:
2019 self.hook('outgoing', node=hex(csets[0]), source=source)
2019 self.hook('outgoing', node=hex(csets[0]), source=source)
2020
2020
2021 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2021 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2022
2022
2023 def changegroup(self, basenodes, source):
2023 def changegroup(self, basenodes, source):
2024 # to avoid a race we use changegroupsubset() (issue1320)
2024 # to avoid a race we use changegroupsubset() (issue1320)
2025 return self.changegroupsubset(basenodes, self.heads(), source)
2025 return self.changegroupsubset(basenodes, self.heads(), source)
2026
2026
2027 def _changegroup(self, nodes, source):
2027 def _changegroup(self, nodes, source):
2028 """Compute the changegroup of all nodes that we have that a recipient
2028 """Compute the changegroup of all nodes that we have that a recipient
2029 doesn't. Return a chunkbuffer object whose read() method will return
2029 doesn't. Return a chunkbuffer object whose read() method will return
2030 successive changegroup chunks.
2030 successive changegroup chunks.
2031
2031
2032 This is much easier than the previous function as we can assume that
2032 This is much easier than the previous function as we can assume that
2033 the recipient has any changenode we aren't sending them.
2033 the recipient has any changenode we aren't sending them.
2034
2034
2035 nodes is the set of nodes to send"""
2035 nodes is the set of nodes to send"""
2036
2036
2037 cl = self.changelog
2037 cl = self.changelog
2038 mf = self.manifest
2038 mf = self.manifest
2039 mfs = {}
2039 mfs = {}
2040 changedfiles = set()
2040 changedfiles = set()
2041 fstate = ['']
2041 fstate = ['']
2042 count = [0, 0]
2042 count = [0, 0]
2043
2043
2044 self.hook('preoutgoing', throw=True, source=source)
2044 self.hook('preoutgoing', throw=True, source=source)
2045 self.changegroupinfo(nodes, source)
2045 self.changegroupinfo(nodes, source)
2046
2046
2047 revset = set([cl.rev(n) for n in nodes])
2047 revset = set([cl.rev(n) for n in nodes])
2048
2048
2049 def gennodelst(log):
2049 def gennodelst(log):
2050 ln, llr = log.node, log.linkrev
2050 ln, llr = log.node, log.linkrev
2051 return [ln(r) for r in log if llr(r) in revset]
2051 return [ln(r) for r in log if llr(r) in revset]
2052
2052
2053 progress = self.ui.progress
2053 progress = self.ui.progress
2054 _bundling = _('bundling')
2054 _bundling = _('bundling')
2055 _changesets = _('changesets')
2055 _changesets = _('changesets')
2056 _manifests = _('manifests')
2056 _manifests = _('manifests')
2057 _files = _('files')
2057 _files = _('files')
2058
2058
2059 def lookup(revlog, x):
2059 def lookup(revlog, x):
2060 if revlog == cl:
2060 if revlog == cl:
2061 c = cl.read(x)
2061 c = cl.read(x)
2062 changedfiles.update(c[3])
2062 changedfiles.update(c[3])
2063 mfs.setdefault(c[0], x)
2063 mfs.setdefault(c[0], x)
2064 count[0] += 1
2064 count[0] += 1
2065 progress(_bundling, count[0],
2065 progress(_bundling, count[0],
2066 unit=_changesets, total=count[1])
2066 unit=_changesets, total=count[1])
2067 return x
2067 return x
2068 elif revlog == mf:
2068 elif revlog == mf:
2069 count[0] += 1
2069 count[0] += 1
2070 progress(_bundling, count[0],
2070 progress(_bundling, count[0],
2071 unit=_manifests, total=count[1])
2071 unit=_manifests, total=count[1])
2072 return cl.node(revlog.linkrev(revlog.rev(x)))
2072 return cl.node(revlog.linkrev(revlog.rev(x)))
2073 else:
2073 else:
2074 progress(_bundling, count[0], item=fstate[0],
2074 progress(_bundling, count[0], item=fstate[0],
2075 total=count[1], unit=_files)
2075 total=count[1], unit=_files)
2076 return cl.node(revlog.linkrev(revlog.rev(x)))
2076 return cl.node(revlog.linkrev(revlog.rev(x)))
2077
2077
2078 bundler = changegroup.bundle10(lookup)
2078 bundler = changegroup.bundle10(lookup)
2079 reorder = self.ui.config('bundle', 'reorder', 'auto')
2079 reorder = self.ui.config('bundle', 'reorder', 'auto')
2080 if reorder == 'auto':
2080 if reorder == 'auto':
2081 reorder = None
2081 reorder = None
2082 else:
2082 else:
2083 reorder = util.parsebool(reorder)
2083 reorder = util.parsebool(reorder)
2084
2084
2085 def gengroup():
2085 def gengroup():
2086 '''yield a sequence of changegroup chunks (strings)'''
2086 '''yield a sequence of changegroup chunks (strings)'''
2087 # construct a list of all changed files
2087 # construct a list of all changed files
2088
2088
2089 count[:] = [0, len(nodes)]
2089 count[:] = [0, len(nodes)]
2090 for chunk in cl.group(nodes, bundler, reorder=reorder):
2090 for chunk in cl.group(nodes, bundler, reorder=reorder):
2091 yield chunk
2091 yield chunk
2092 progress(_bundling, None)
2092 progress(_bundling, None)
2093
2093
2094 count[:] = [0, len(mfs)]
2094 count[:] = [0, len(mfs)]
2095 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2095 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2096 yield chunk
2096 yield chunk
2097 progress(_bundling, None)
2097 progress(_bundling, None)
2098
2098
2099 count[:] = [0, len(changedfiles)]
2099 count[:] = [0, len(changedfiles)]
2100 for fname in sorted(changedfiles):
2100 for fname in sorted(changedfiles):
2101 filerevlog = self.file(fname)
2101 filerevlog = self.file(fname)
2102 if not len(filerevlog):
2102 if not len(filerevlog):
2103 raise util.Abort(_("empty or missing revlog for %s")
2103 raise util.Abort(_("empty or missing revlog for %s")
2104 % fname)
2104 % fname)
2105 fstate[0] = fname
2105 fstate[0] = fname
2106 nodelist = gennodelst(filerevlog)
2106 nodelist = gennodelst(filerevlog)
2107 if nodelist:
2107 if nodelist:
2108 count[0] += 1
2108 count[0] += 1
2109 yield bundler.fileheader(fname)
2109 yield bundler.fileheader(fname)
2110 for chunk in filerevlog.group(nodelist, bundler, reorder):
2110 for chunk in filerevlog.group(nodelist, bundler, reorder):
2111 yield chunk
2111 yield chunk
2112 yield bundler.close()
2112 yield bundler.close()
2113 progress(_bundling, None)
2113 progress(_bundling, None)
2114
2114
2115 if nodes:
2115 if nodes:
2116 self.hook('outgoing', node=hex(nodes[0]), source=source)
2116 self.hook('outgoing', node=hex(nodes[0]), source=source)
2117
2117
2118 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2118 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2119
2119
2120 def addchangegroup(self, source, srctype, url, emptyok=False):
2120 def addchangegroup(self, source, srctype, url, emptyok=False):
2121 """Add the changegroup returned by source.read() to this repo.
2121 """Add the changegroup returned by source.read() to this repo.
2122 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2122 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2123 the URL of the repo where this changegroup is coming from.
2123 the URL of the repo where this changegroup is coming from.
2124
2124
2125 Return an integer summarizing the change to this repo:
2125 Return an integer summarizing the change to this repo:
2126 - nothing changed or no source: 0
2126 - nothing changed or no source: 0
2127 - more heads than before: 1+added heads (2..n)
2127 - more heads than before: 1+added heads (2..n)
2128 - fewer heads than before: -1-removed heads (-2..-n)
2128 - fewer heads than before: -1-removed heads (-2..-n)
2129 - number of heads stays the same: 1
2129 - number of heads stays the same: 1
2130 """
2130 """
2131 def csmap(x):
2131 def csmap(x):
2132 self.ui.debug("add changeset %s\n" % short(x))
2132 self.ui.debug("add changeset %s\n" % short(x))
2133 return len(cl)
2133 return len(cl)
2134
2134
2135 def revmap(x):
2135 def revmap(x):
2136 return cl.rev(x)
2136 return cl.rev(x)
2137
2137
2138 if not source:
2138 if not source:
2139 return 0
2139 return 0
2140
2140
2141 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2141 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2142
2142
2143 changesets = files = revisions = 0
2143 changesets = files = revisions = 0
2144 efiles = set()
2144 efiles = set()
2145
2145
2146 # write changelog data to temp files so concurrent readers will not see
2146 # write changelog data to temp files so concurrent readers will not see
2147 # inconsistent view
2147 # inconsistent view
2148 cl = self.changelog
2148 cl = self.changelog
2149 cl.delayupdate()
2149 cl.delayupdate()
2150 oldheads = cl.heads()
2150 oldheads = cl.heads()
2151
2151
2152 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2152 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2153 try:
2153 try:
2154 trp = weakref.proxy(tr)
2154 trp = weakref.proxy(tr)
2155 # pull off the changeset group
2155 # pull off the changeset group
2156 self.ui.status(_("adding changesets\n"))
2156 self.ui.status(_("adding changesets\n"))
2157 clstart = len(cl)
2157 clstart = len(cl)
2158 class prog(object):
2158 class prog(object):
2159 step = _('changesets')
2159 step = _('changesets')
2160 count = 1
2160 count = 1
2161 ui = self.ui
2161 ui = self.ui
2162 total = None
2162 total = None
2163 def __call__(self):
2163 def __call__(self):
2164 self.ui.progress(self.step, self.count, unit=_('chunks'),
2164 self.ui.progress(self.step, self.count, unit=_('chunks'),
2165 total=self.total)
2165 total=self.total)
2166 self.count += 1
2166 self.count += 1
2167 pr = prog()
2167 pr = prog()
2168 source.callback = pr
2168 source.callback = pr
2169
2169
2170 source.changelogheader()
2170 source.changelogheader()
2171 srccontent = cl.addgroup(source, csmap, trp)
2171 srccontent = cl.addgroup(source, csmap, trp)
2172 if not (srccontent or emptyok):
2172 if not (srccontent or emptyok):
2173 raise util.Abort(_("received changelog group is empty"))
2173 raise util.Abort(_("received changelog group is empty"))
2174 clend = len(cl)
2174 clend = len(cl)
2175 changesets = clend - clstart
2175 changesets = clend - clstart
2176 for c in xrange(clstart, clend):
2176 for c in xrange(clstart, clend):
2177 efiles.update(self[c].files())
2177 efiles.update(self[c].files())
2178 efiles = len(efiles)
2178 efiles = len(efiles)
2179 self.ui.progress(_('changesets'), None)
2179 self.ui.progress(_('changesets'), None)
2180
2180
2181 # pull off the manifest group
2181 # pull off the manifest group
2182 self.ui.status(_("adding manifests\n"))
2182 self.ui.status(_("adding manifests\n"))
2183 pr.step = _('manifests')
2183 pr.step = _('manifests')
2184 pr.count = 1
2184 pr.count = 1
2185 pr.total = changesets # manifests <= changesets
2185 pr.total = changesets # manifests <= changesets
2186 # no need to check for empty manifest group here:
2186 # no need to check for empty manifest group here:
2187 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2187 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2188 # no new manifest will be created and the manifest group will
2188 # no new manifest will be created and the manifest group will
2189 # be empty during the pull
2189 # be empty during the pull
2190 source.manifestheader()
2190 source.manifestheader()
2191 self.manifest.addgroup(source, revmap, trp)
2191 self.manifest.addgroup(source, revmap, trp)
2192 self.ui.progress(_('manifests'), None)
2192 self.ui.progress(_('manifests'), None)
2193
2193
2194 needfiles = {}
2194 needfiles = {}
2195 if self.ui.configbool('server', 'validate', default=False):
2195 if self.ui.configbool('server', 'validate', default=False):
2196 # validate incoming csets have their manifests
2196 # validate incoming csets have their manifests
2197 for cset in xrange(clstart, clend):
2197 for cset in xrange(clstart, clend):
2198 mfest = self.changelog.read(self.changelog.node(cset))[0]
2198 mfest = self.changelog.read(self.changelog.node(cset))[0]
2199 mfest = self.manifest.readdelta(mfest)
2199 mfest = self.manifest.readdelta(mfest)
2200 # store file nodes we must see
2200 # store file nodes we must see
2201 for f, n in mfest.iteritems():
2201 for f, n in mfest.iteritems():
2202 needfiles.setdefault(f, set()).add(n)
2202 needfiles.setdefault(f, set()).add(n)
2203
2203
2204 # process the files
2204 # process the files
2205 self.ui.status(_("adding file changes\n"))
2205 self.ui.status(_("adding file changes\n"))
2206 pr.step = _('files')
2206 pr.step = _('files')
2207 pr.count = 1
2207 pr.count = 1
2208 pr.total = efiles
2208 pr.total = efiles
2209 source.callback = None
2209 source.callback = None
2210
2210
2211 while True:
2211 while True:
2212 chunkdata = source.filelogheader()
2212 chunkdata = source.filelogheader()
2213 if not chunkdata:
2213 if not chunkdata:
2214 break
2214 break
2215 f = chunkdata["filename"]
2215 f = chunkdata["filename"]
2216 self.ui.debug("adding %s revisions\n" % f)
2216 self.ui.debug("adding %s revisions\n" % f)
2217 pr()
2217 pr()
2218 fl = self.file(f)
2218 fl = self.file(f)
2219 o = len(fl)
2219 o = len(fl)
2220 if not fl.addgroup(source, revmap, trp):
2220 if not fl.addgroup(source, revmap, trp):
2221 raise util.Abort(_("received file revlog group is empty"))
2221 raise util.Abort(_("received file revlog group is empty"))
2222 revisions += len(fl) - o
2222 revisions += len(fl) - o
2223 files += 1
2223 files += 1
2224 if f in needfiles:
2224 if f in needfiles:
2225 needs = needfiles[f]
2225 needs = needfiles[f]
2226 for new in xrange(o, len(fl)):
2226 for new in xrange(o, len(fl)):
2227 n = fl.node(new)
2227 n = fl.node(new)
2228 if n in needs:
2228 if n in needs:
2229 needs.remove(n)
2229 needs.remove(n)
2230 if not needs:
2230 if not needs:
2231 del needfiles[f]
2231 del needfiles[f]
2232 self.ui.progress(_('files'), None)
2232 self.ui.progress(_('files'), None)
2233
2233
2234 for f, needs in needfiles.iteritems():
2234 for f, needs in needfiles.iteritems():
2235 fl = self.file(f)
2235 fl = self.file(f)
2236 for n in needs:
2236 for n in needs:
2237 try:
2237 try:
2238 fl.rev(n)
2238 fl.rev(n)
2239 except error.LookupError:
2239 except error.LookupError:
2240 raise util.Abort(
2240 raise util.Abort(
2241 _('missing file data for %s:%s - run hg verify') %
2241 _('missing file data for %s:%s - run hg verify') %
2242 (f, hex(n)))
2242 (f, hex(n)))
2243
2243
2244 dh = 0
2244 dh = 0
2245 if oldheads:
2245 if oldheads:
2246 heads = cl.heads()
2246 heads = cl.heads()
2247 dh = len(heads) - len(oldheads)
2247 dh = len(heads) - len(oldheads)
2248 for h in heads:
2248 for h in heads:
2249 if h not in oldheads and self[h].closesbranch():
2249 if h not in oldheads and self[h].closesbranch():
2250 dh -= 1
2250 dh -= 1
2251 htext = ""
2251 htext = ""
2252 if dh:
2252 if dh:
2253 htext = _(" (%+d heads)") % dh
2253 htext = _(" (%+d heads)") % dh
2254
2254
2255 self.ui.status(_("added %d changesets"
2255 self.ui.status(_("added %d changesets"
2256 " with %d changes to %d files%s\n")
2256 " with %d changes to %d files%s\n")
2257 % (changesets, revisions, files, htext))
2257 % (changesets, revisions, files, htext))
2258
2258
2259 if changesets > 0:
2259 if changesets > 0:
2260 p = lambda: cl.writepending() and self.root or ""
2260 p = lambda: cl.writepending() and self.root or ""
2261 self.hook('pretxnchangegroup', throw=True,
2261 self.hook('pretxnchangegroup', throw=True,
2262 node=hex(cl.node(clstart)), source=srctype,
2262 node=hex(cl.node(clstart)), source=srctype,
2263 url=url, pending=p)
2263 url=url, pending=p)
2264
2264
2265 added = [cl.node(r) for r in xrange(clstart, clend)]
2265 added = [cl.node(r) for r in xrange(clstart, clend)]
2266 publishing = self.ui.configbool('phases', 'publish', True)
2266 publishing = self.ui.configbool('phases', 'publish', True)
2267 if srctype == 'push':
2267 if srctype == 'push':
2268 # Old server can not push the boundary themself.
2268 # Old server can not push the boundary themself.
2269 # New server won't push the boundary if changeset already
2269 # New server won't push the boundary if changeset already
2270 # existed locally as secrete
2270 # existed locally as secrete
2271 #
2271 #
2272 # We should not use added here but the list of all change in
2272 # We should not use added here but the list of all change in
2273 # the bundle
2273 # the bundle
2274 if publishing:
2274 if publishing:
2275 phases.advanceboundary(self, phases.public, srccontent)
2275 phases.advanceboundary(self, phases.public, srccontent)
2276 else:
2276 else:
2277 phases.advanceboundary(self, phases.draft, srccontent)
2277 phases.advanceboundary(self, phases.draft, srccontent)
2278 phases.retractboundary(self, phases.draft, added)
2278 phases.retractboundary(self, phases.draft, added)
2279 elif srctype != 'strip':
2279 elif srctype != 'strip':
2280 # publishing only alter behavior during push
2280 # publishing only alter behavior during push
2281 #
2281 #
2282 # strip should not touch boundary at all
2282 # strip should not touch boundary at all
2283 phases.retractboundary(self, phases.draft, added)
2283 phases.retractboundary(self, phases.draft, added)
2284
2284
2285 # make changelog see real files again
2285 # make changelog see real files again
2286 cl.finalize(trp)
2286 cl.finalize(trp)
2287
2287
2288 tr.close()
2288 tr.close()
2289
2289
2290 if changesets > 0:
2290 if changesets > 0:
2291 def runhooks():
2291 def runhooks():
2292 # forcefully update the on-disk branch cache
2292 # forcefully update the on-disk branch cache
2293 self.ui.debug("updating the branch cache\n")
2293 self.ui.debug("updating the branch cache\n")
2294 self.updatebranchcache()
2294 self.updatebranchcache()
2295 self.hook("changegroup", node=hex(cl.node(clstart)),
2295 self.hook("changegroup", node=hex(cl.node(clstart)),
2296 source=srctype, url=url)
2296 source=srctype, url=url)
2297
2297
2298 for n in added:
2298 for n in added:
2299 self.hook("incoming", node=hex(n), source=srctype,
2299 self.hook("incoming", node=hex(n), source=srctype,
2300 url=url)
2300 url=url)
2301 self._afterlock(runhooks)
2301 self._afterlock(runhooks)
2302
2302
2303 finally:
2303 finally:
2304 tr.release()
2304 tr.release()
2305 # never return 0 here:
2305 # never return 0 here:
2306 if dh < 0:
2306 if dh < 0:
2307 return dh - 1
2307 return dh - 1
2308 else:
2308 else:
2309 return dh + 1
2309 return dh + 1
2310
2310
2311 def stream_in(self, remote, requirements):
2311 def stream_in(self, remote, requirements):
2312 lock = self.lock()
2312 lock = self.lock()
2313 try:
2313 try:
2314 fp = remote.stream_out()
2314 fp = remote.stream_out()
2315 l = fp.readline()
2315 l = fp.readline()
2316 try:
2316 try:
2317 resp = int(l)
2317 resp = int(l)
2318 except ValueError:
2318 except ValueError:
2319 raise error.ResponseError(
2319 raise error.ResponseError(
2320 _('unexpected response from remote server:'), l)
2320 _('unexpected response from remote server:'), l)
2321 if resp == 1:
2321 if resp == 1:
2322 raise util.Abort(_('operation forbidden by server'))
2322 raise util.Abort(_('operation forbidden by server'))
2323 elif resp == 2:
2323 elif resp == 2:
2324 raise util.Abort(_('locking the remote repository failed'))
2324 raise util.Abort(_('locking the remote repository failed'))
2325 elif resp != 0:
2325 elif resp != 0:
2326 raise util.Abort(_('the server sent an unknown error code'))
2326 raise util.Abort(_('the server sent an unknown error code'))
2327 self.ui.status(_('streaming all changes\n'))
2327 self.ui.status(_('streaming all changes\n'))
2328 l = fp.readline()
2328 l = fp.readline()
2329 try:
2329 try:
2330 total_files, total_bytes = map(int, l.split(' ', 1))
2330 total_files, total_bytes = map(int, l.split(' ', 1))
2331 except (ValueError, TypeError):
2331 except (ValueError, TypeError):
2332 raise error.ResponseError(
2332 raise error.ResponseError(
2333 _('unexpected response from remote server:'), l)
2333 _('unexpected response from remote server:'), l)
2334 self.ui.status(_('%d files to transfer, %s of data\n') %
2334 self.ui.status(_('%d files to transfer, %s of data\n') %
2335 (total_files, util.bytecount(total_bytes)))
2335 (total_files, util.bytecount(total_bytes)))
2336 handled_bytes = 0
2336 handled_bytes = 0
2337 self.ui.progress(_('clone'), 0, total=total_bytes)
2337 self.ui.progress(_('clone'), 0, total=total_bytes)
2338 start = time.time()
2338 start = time.time()
2339 for i in xrange(total_files):
2339 for i in xrange(total_files):
2340 # XXX doesn't support '\n' or '\r' in filenames
2340 # XXX doesn't support '\n' or '\r' in filenames
2341 l = fp.readline()
2341 l = fp.readline()
2342 try:
2342 try:
2343 name, size = l.split('\0', 1)
2343 name, size = l.split('\0', 1)
2344 size = int(size)
2344 size = int(size)
2345 except (ValueError, TypeError):
2345 except (ValueError, TypeError):
2346 raise error.ResponseError(
2346 raise error.ResponseError(
2347 _('unexpected response from remote server:'), l)
2347 _('unexpected response from remote server:'), l)
2348 if self.ui.debugflag:
2348 if self.ui.debugflag:
2349 self.ui.debug('adding %s (%s)\n' %
2349 self.ui.debug('adding %s (%s)\n' %
2350 (name, util.bytecount(size)))
2350 (name, util.bytecount(size)))
2351 # for backwards compat, name was partially encoded
2351 # for backwards compat, name was partially encoded
2352 ofp = self.sopener(store.decodedir(name), 'w')
2352 ofp = self.sopener(store.decodedir(name), 'w')
2353 for chunk in util.filechunkiter(fp, limit=size):
2353 for chunk in util.filechunkiter(fp, limit=size):
2354 handled_bytes += len(chunk)
2354 handled_bytes += len(chunk)
2355 self.ui.progress(_('clone'), handled_bytes,
2355 self.ui.progress(_('clone'), handled_bytes,
2356 total=total_bytes)
2356 total=total_bytes)
2357 ofp.write(chunk)
2357 ofp.write(chunk)
2358 ofp.close()
2358 ofp.close()
2359 elapsed = time.time() - start
2359 elapsed = time.time() - start
2360 if elapsed <= 0:
2360 if elapsed <= 0:
2361 elapsed = 0.001
2361 elapsed = 0.001
2362 self.ui.progress(_('clone'), None)
2362 self.ui.progress(_('clone'), None)
2363 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2363 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2364 (util.bytecount(total_bytes), elapsed,
2364 (util.bytecount(total_bytes), elapsed,
2365 util.bytecount(total_bytes / elapsed)))
2365 util.bytecount(total_bytes / elapsed)))
2366
2366
2367 # new requirements = old non-format requirements +
2367 # new requirements = old non-format requirements +
2368 # new format-related
2368 # new format-related
2369 # requirements from the streamed-in repository
2369 # requirements from the streamed-in repository
2370 requirements.update(set(self.requirements) - self.supportedformats)
2370 requirements.update(set(self.requirements) - self.supportedformats)
2371 self._applyrequirements(requirements)
2371 self._applyrequirements(requirements)
2372 self._writerequirements()
2372 self._writerequirements()
2373
2373
2374 self.invalidate()
2374 self.invalidate()
2375 return len(self.heads()) + 1
2375 return len(self.heads()) + 1
2376 finally:
2376 finally:
2377 lock.release()
2377 lock.release()
2378
2378
2379 def clone(self, remote, heads=[], stream=False):
2379 def clone(self, remote, heads=[], stream=False):
2380 '''clone remote repository.
2380 '''clone remote repository.
2381
2381
2382 keyword arguments:
2382 keyword arguments:
2383 heads: list of revs to clone (forces use of pull)
2383 heads: list of revs to clone (forces use of pull)
2384 stream: use streaming clone if possible'''
2384 stream: use streaming clone if possible'''
2385
2385
2386 # now, all clients that can request uncompressed clones can
2386 # now, all clients that can request uncompressed clones can
2387 # read repo formats supported by all servers that can serve
2387 # read repo formats supported by all servers that can serve
2388 # them.
2388 # them.
2389
2389
2390 # if revlog format changes, client will have to check version
2390 # if revlog format changes, client will have to check version
2391 # and format flags on "stream" capability, and use
2391 # and format flags on "stream" capability, and use
2392 # uncompressed only if compatible.
2392 # uncompressed only if compatible.
2393
2393
2394 if not stream:
2394 if not stream:
2395 # if the server explicitely prefer to stream (for fast LANs)
2395 # if the server explicitely prefer to stream (for fast LANs)
2396 stream = remote.capable('stream-preferred')
2396 stream = remote.capable('stream-preferred')
2397
2397
2398 if stream and not heads:
2398 if stream and not heads:
2399 # 'stream' means remote revlog format is revlogv1 only
2399 # 'stream' means remote revlog format is revlogv1 only
2400 if remote.capable('stream'):
2400 if remote.capable('stream'):
2401 return self.stream_in(remote, set(('revlogv1',)))
2401 return self.stream_in(remote, set(('revlogv1',)))
2402 # otherwise, 'streamreqs' contains the remote revlog format
2402 # otherwise, 'streamreqs' contains the remote revlog format
2403 streamreqs = remote.capable('streamreqs')
2403 streamreqs = remote.capable('streamreqs')
2404 if streamreqs:
2404 if streamreqs:
2405 streamreqs = set(streamreqs.split(','))
2405 streamreqs = set(streamreqs.split(','))
2406 # if we support it, stream in and adjust our requirements
2406 # if we support it, stream in and adjust our requirements
2407 if not streamreqs - self.supportedformats:
2407 if not streamreqs - self.supportedformats:
2408 return self.stream_in(remote, streamreqs)
2408 return self.stream_in(remote, streamreqs)
2409 return self.pull(remote, heads)
2409 return self.pull(remote, heads)
2410
2410
2411 def pushkey(self, namespace, key, old, new):
2411 def pushkey(self, namespace, key, old, new):
2412 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2412 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2413 old=old, new=new)
2413 old=old, new=new)
2414 ret = pushkey.push(self, namespace, key, old, new)
2414 ret = pushkey.push(self, namespace, key, old, new)
2415 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2415 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2416 ret=ret)
2416 ret=ret)
2417 return ret
2417 return ret
2418
2418
2419 def listkeys(self, namespace):
2419 def listkeys(self, namespace):
2420 self.hook('prelistkeys', throw=True, namespace=namespace)
2420 self.hook('prelistkeys', throw=True, namespace=namespace)
2421 values = pushkey.list(self, namespace)
2421 values = pushkey.list(self, namespace)
2422 self.hook('listkeys', namespace=namespace, values=values)
2422 self.hook('listkeys', namespace=namespace, values=values)
2423 return values
2423 return values
2424
2424
2425 def debugwireargs(self, one, two, three=None, four=None, five=None):
2425 def debugwireargs(self, one, two, three=None, four=None, five=None):
2426 '''used to test argument passing over the wire'''
2426 '''used to test argument passing over the wire'''
2427 return "%s %s %s %s %s" % (one, two, three, four, five)
2427 return "%s %s %s %s %s" % (one, two, three, four, five)
2428
2428
2429 def savecommitmessage(self, text):
2429 def savecommitmessage(self, text):
2430 fp = self.opener('last-message.txt', 'wb')
2430 fp = self.opener('last-message.txt', 'wb')
2431 try:
2431 try:
2432 fp.write(text)
2432 fp.write(text)
2433 finally:
2433 finally:
2434 fp.close()
2434 fp.close()
2435 return self.pathto(fp.name[len(self.root)+1:])
2435 return self.pathto(fp.name[len(self.root)+1:])
2436
2436
2437 # used to avoid circular references so destructors work
2437 # used to avoid circular references so destructors work
2438 def aftertrans(files):
2438 def aftertrans(files):
2439 renamefiles = [tuple(t) for t in files]
2439 renamefiles = [tuple(t) for t in files]
2440 def a():
2440 def a():
2441 for src, dest in renamefiles:
2441 for src, dest in renamefiles:
2442 try:
2442 try:
2443 util.rename(src, dest)
2443 util.rename(src, dest)
2444 except OSError: # journal file does not yet exist
2444 except OSError: # journal file does not yet exist
2445 pass
2445 pass
2446 return a
2446 return a
2447
2447
2448 def undoname(fn):
2448 def undoname(fn):
2449 base, name = os.path.split(fn)
2449 base, name = os.path.split(fn)
2450 assert name.startswith('journal')
2450 assert name.startswith('journal')
2451 return os.path.join(base, name.replace('journal', 'undo', 1))
2451 return os.path.join(base, name.replace('journal', 'undo', 1))
2452
2452
2453 def instance(ui, path, create):
2453 def instance(ui, path, create):
2454 return localrepository(ui, util.urllocalpath(path), create)
2454 return localrepository(ui, util.urllocalpath(path), create)
2455
2455
2456 def islocal(path):
2456 def islocal(path):
2457 return True
2457 return True
General Comments 0
You need to be logged in to leave comments. Login now