##// END OF EJS Templates
localrepo: fix unpushable repos when using bookmarks (issue3317)...
Michael Bacarella -
r16243:b9c43023 stable
parent child Browse files
Show More
@@ -1,2324 +1,2325 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class storecache(filecache):
22 class storecache(filecache):
23 """filecache for files in the store"""
23 """filecache for files in the store"""
24 def join(self, obj, fname):
24 def join(self, obj, fname):
25 return obj.sjoin(fname)
25 return obj.sjoin(fname)
26
26
27 class localrepository(repo.repository):
27 class localrepository(repo.repository):
28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
29 'known', 'getbundle'))
29 'known', 'getbundle'))
30 supportedformats = set(('revlogv1', 'generaldelta'))
30 supportedformats = set(('revlogv1', 'generaldelta'))
31 supported = supportedformats | set(('store', 'fncache', 'shared',
31 supported = supportedformats | set(('store', 'fncache', 'shared',
32 'dotencode'))
32 'dotencode'))
33
33
34 def __init__(self, baseui, path=None, create=False):
34 def __init__(self, baseui, path=None, create=False):
35 repo.repository.__init__(self)
35 repo.repository.__init__(self)
36 self.root = os.path.realpath(util.expandpath(path))
36 self.root = os.path.realpath(util.expandpath(path))
37 self.path = os.path.join(self.root, ".hg")
37 self.path = os.path.join(self.root, ".hg")
38 self.origroot = path
38 self.origroot = path
39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
40 self.opener = scmutil.opener(self.path)
40 self.opener = scmutil.opener(self.path)
41 self.wopener = scmutil.opener(self.root)
41 self.wopener = scmutil.opener(self.root)
42 self.baseui = baseui
42 self.baseui = baseui
43 self.ui = baseui.copy()
43 self.ui = baseui.copy()
44 self._dirtyphases = False
44 self._dirtyphases = False
45 # A list of callback to shape the phase if no data were found.
45 # A list of callback to shape the phase if no data were found.
46 # Callback are in the form: func(repo, roots) --> processed root.
46 # Callback are in the form: func(repo, roots) --> processed root.
47 # This list it to be filled by extension during repo setup
47 # This list it to be filled by extension during repo setup
48 self._phasedefaults = []
48 self._phasedefaults = []
49
49
50 try:
50 try:
51 self.ui.readconfig(self.join("hgrc"), self.root)
51 self.ui.readconfig(self.join("hgrc"), self.root)
52 extensions.loadall(self.ui)
52 extensions.loadall(self.ui)
53 except IOError:
53 except IOError:
54 pass
54 pass
55
55
56 if not os.path.isdir(self.path):
56 if not os.path.isdir(self.path):
57 if create:
57 if create:
58 if not os.path.exists(path):
58 if not os.path.exists(path):
59 util.makedirs(path)
59 util.makedirs(path)
60 util.makedir(self.path, notindexed=True)
60 util.makedir(self.path, notindexed=True)
61 requirements = ["revlogv1"]
61 requirements = ["revlogv1"]
62 if self.ui.configbool('format', 'usestore', True):
62 if self.ui.configbool('format', 'usestore', True):
63 os.mkdir(os.path.join(self.path, "store"))
63 os.mkdir(os.path.join(self.path, "store"))
64 requirements.append("store")
64 requirements.append("store")
65 if self.ui.configbool('format', 'usefncache', True):
65 if self.ui.configbool('format', 'usefncache', True):
66 requirements.append("fncache")
66 requirements.append("fncache")
67 if self.ui.configbool('format', 'dotencode', True):
67 if self.ui.configbool('format', 'dotencode', True):
68 requirements.append('dotencode')
68 requirements.append('dotencode')
69 # create an invalid changelog
69 # create an invalid changelog
70 self.opener.append(
70 self.opener.append(
71 "00changelog.i",
71 "00changelog.i",
72 '\0\0\0\2' # represents revlogv2
72 '\0\0\0\2' # represents revlogv2
73 ' dummy changelog to prevent using the old repo layout'
73 ' dummy changelog to prevent using the old repo layout'
74 )
74 )
75 if self.ui.configbool('format', 'generaldelta', False):
75 if self.ui.configbool('format', 'generaldelta', False):
76 requirements.append("generaldelta")
76 requirements.append("generaldelta")
77 requirements = set(requirements)
77 requirements = set(requirements)
78 else:
78 else:
79 raise error.RepoError(_("repository %s not found") % path)
79 raise error.RepoError(_("repository %s not found") % path)
80 elif create:
80 elif create:
81 raise error.RepoError(_("repository %s already exists") % path)
81 raise error.RepoError(_("repository %s already exists") % path)
82 else:
82 else:
83 try:
83 try:
84 requirements = scmutil.readrequires(self.opener, self.supported)
84 requirements = scmutil.readrequires(self.opener, self.supported)
85 except IOError, inst:
85 except IOError, inst:
86 if inst.errno != errno.ENOENT:
86 if inst.errno != errno.ENOENT:
87 raise
87 raise
88 requirements = set()
88 requirements = set()
89
89
90 self.sharedpath = self.path
90 self.sharedpath = self.path
91 try:
91 try:
92 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
92 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
93 if not os.path.exists(s):
93 if not os.path.exists(s):
94 raise error.RepoError(
94 raise error.RepoError(
95 _('.hg/sharedpath points to nonexistent directory %s') % s)
95 _('.hg/sharedpath points to nonexistent directory %s') % s)
96 self.sharedpath = s
96 self.sharedpath = s
97 except IOError, inst:
97 except IOError, inst:
98 if inst.errno != errno.ENOENT:
98 if inst.errno != errno.ENOENT:
99 raise
99 raise
100
100
101 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
101 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
102 self.spath = self.store.path
102 self.spath = self.store.path
103 self.sopener = self.store.opener
103 self.sopener = self.store.opener
104 self.sjoin = self.store.join
104 self.sjoin = self.store.join
105 self.opener.createmode = self.store.createmode
105 self.opener.createmode = self.store.createmode
106 self._applyrequirements(requirements)
106 self._applyrequirements(requirements)
107 if create:
107 if create:
108 self._writerequirements()
108 self._writerequirements()
109
109
110
110
111 self._branchcache = None
111 self._branchcache = None
112 self._branchcachetip = None
112 self._branchcachetip = None
113 self.filterpats = {}
113 self.filterpats = {}
114 self._datafilters = {}
114 self._datafilters = {}
115 self._transref = self._lockref = self._wlockref = None
115 self._transref = self._lockref = self._wlockref = None
116
116
117 # A cache for various files under .hg/ that tracks file changes,
117 # A cache for various files under .hg/ that tracks file changes,
118 # (used by the filecache decorator)
118 # (used by the filecache decorator)
119 #
119 #
120 # Maps a property name to its util.filecacheentry
120 # Maps a property name to its util.filecacheentry
121 self._filecache = {}
121 self._filecache = {}
122
122
123 def _applyrequirements(self, requirements):
123 def _applyrequirements(self, requirements):
124 self.requirements = requirements
124 self.requirements = requirements
125 openerreqs = set(('revlogv1', 'generaldelta'))
125 openerreqs = set(('revlogv1', 'generaldelta'))
126 self.sopener.options = dict((r, 1) for r in requirements
126 self.sopener.options = dict((r, 1) for r in requirements
127 if r in openerreqs)
127 if r in openerreqs)
128
128
129 def _writerequirements(self):
129 def _writerequirements(self):
130 reqfile = self.opener("requires", "w")
130 reqfile = self.opener("requires", "w")
131 for r in self.requirements:
131 for r in self.requirements:
132 reqfile.write("%s\n" % r)
132 reqfile.write("%s\n" % r)
133 reqfile.close()
133 reqfile.close()
134
134
135 def _checknested(self, path):
135 def _checknested(self, path):
136 """Determine if path is a legal nested repository."""
136 """Determine if path is a legal nested repository."""
137 if not path.startswith(self.root):
137 if not path.startswith(self.root):
138 return False
138 return False
139 subpath = path[len(self.root) + 1:]
139 subpath = path[len(self.root) + 1:]
140 normsubpath = util.pconvert(subpath)
140 normsubpath = util.pconvert(subpath)
141
141
142 # XXX: Checking against the current working copy is wrong in
142 # XXX: Checking against the current working copy is wrong in
143 # the sense that it can reject things like
143 # the sense that it can reject things like
144 #
144 #
145 # $ hg cat -r 10 sub/x.txt
145 # $ hg cat -r 10 sub/x.txt
146 #
146 #
147 # if sub/ is no longer a subrepository in the working copy
147 # if sub/ is no longer a subrepository in the working copy
148 # parent revision.
148 # parent revision.
149 #
149 #
150 # However, it can of course also allow things that would have
150 # However, it can of course also allow things that would have
151 # been rejected before, such as the above cat command if sub/
151 # been rejected before, such as the above cat command if sub/
152 # is a subrepository now, but was a normal directory before.
152 # is a subrepository now, but was a normal directory before.
153 # The old path auditor would have rejected by mistake since it
153 # The old path auditor would have rejected by mistake since it
154 # panics when it sees sub/.hg/.
154 # panics when it sees sub/.hg/.
155 #
155 #
156 # All in all, checking against the working copy seems sensible
156 # All in all, checking against the working copy seems sensible
157 # since we want to prevent access to nested repositories on
157 # since we want to prevent access to nested repositories on
158 # the filesystem *now*.
158 # the filesystem *now*.
159 ctx = self[None]
159 ctx = self[None]
160 parts = util.splitpath(subpath)
160 parts = util.splitpath(subpath)
161 while parts:
161 while parts:
162 prefix = '/'.join(parts)
162 prefix = '/'.join(parts)
163 if prefix in ctx.substate:
163 if prefix in ctx.substate:
164 if prefix == normsubpath:
164 if prefix == normsubpath:
165 return True
165 return True
166 else:
166 else:
167 sub = ctx.sub(prefix)
167 sub = ctx.sub(prefix)
168 return sub.checknested(subpath[len(prefix) + 1:])
168 return sub.checknested(subpath[len(prefix) + 1:])
169 else:
169 else:
170 parts.pop()
170 parts.pop()
171 return False
171 return False
172
172
173 @filecache('bookmarks')
173 @filecache('bookmarks')
174 def _bookmarks(self):
174 def _bookmarks(self):
175 return bookmarks.read(self)
175 return bookmarks.read(self)
176
176
177 @filecache('bookmarks.current')
177 @filecache('bookmarks.current')
178 def _bookmarkcurrent(self):
178 def _bookmarkcurrent(self):
179 return bookmarks.readcurrent(self)
179 return bookmarks.readcurrent(self)
180
180
181 def _writebookmarks(self, marks):
181 def _writebookmarks(self, marks):
182 bookmarks.write(self)
182 bookmarks.write(self)
183
183
184 @storecache('phaseroots')
184 @storecache('phaseroots')
185 def _phaseroots(self):
185 def _phaseroots(self):
186 self._dirtyphases = False
186 self._dirtyphases = False
187 phaseroots = phases.readroots(self)
187 phaseroots = phases.readroots(self)
188 phases.filterunknown(self, phaseroots)
188 phases.filterunknown(self, phaseroots)
189 return phaseroots
189 return phaseroots
190
190
191 @propertycache
191 @propertycache
192 def _phaserev(self):
192 def _phaserev(self):
193 cache = [phases.public] * len(self)
193 cache = [phases.public] * len(self)
194 for phase in phases.trackedphases:
194 for phase in phases.trackedphases:
195 roots = map(self.changelog.rev, self._phaseroots[phase])
195 roots = map(self.changelog.rev, self._phaseroots[phase])
196 if roots:
196 if roots:
197 for rev in roots:
197 for rev in roots:
198 cache[rev] = phase
198 cache[rev] = phase
199 for rev in self.changelog.descendants(*roots):
199 for rev in self.changelog.descendants(*roots):
200 cache[rev] = phase
200 cache[rev] = phase
201 return cache
201 return cache
202
202
203 @storecache('00changelog.i')
203 @storecache('00changelog.i')
204 def changelog(self):
204 def changelog(self):
205 c = changelog.changelog(self.sopener)
205 c = changelog.changelog(self.sopener)
206 if 'HG_PENDING' in os.environ:
206 if 'HG_PENDING' in os.environ:
207 p = os.environ['HG_PENDING']
207 p = os.environ['HG_PENDING']
208 if p.startswith(self.root):
208 if p.startswith(self.root):
209 c.readpending('00changelog.i.a')
209 c.readpending('00changelog.i.a')
210 return c
210 return c
211
211
212 @storecache('00manifest.i')
212 @storecache('00manifest.i')
213 def manifest(self):
213 def manifest(self):
214 return manifest.manifest(self.sopener)
214 return manifest.manifest(self.sopener)
215
215
216 @filecache('dirstate')
216 @filecache('dirstate')
217 def dirstate(self):
217 def dirstate(self):
218 warned = [0]
218 warned = [0]
219 def validate(node):
219 def validate(node):
220 try:
220 try:
221 self.changelog.rev(node)
221 self.changelog.rev(node)
222 return node
222 return node
223 except error.LookupError:
223 except error.LookupError:
224 if not warned[0]:
224 if not warned[0]:
225 warned[0] = True
225 warned[0] = True
226 self.ui.warn(_("warning: ignoring unknown"
226 self.ui.warn(_("warning: ignoring unknown"
227 " working parent %s!\n") % short(node))
227 " working parent %s!\n") % short(node))
228 return nullid
228 return nullid
229
229
230 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
230 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
231
231
232 def __getitem__(self, changeid):
232 def __getitem__(self, changeid):
233 if changeid is None:
233 if changeid is None:
234 return context.workingctx(self)
234 return context.workingctx(self)
235 return context.changectx(self, changeid)
235 return context.changectx(self, changeid)
236
236
237 def __contains__(self, changeid):
237 def __contains__(self, changeid):
238 try:
238 try:
239 return bool(self.lookup(changeid))
239 return bool(self.lookup(changeid))
240 except error.RepoLookupError:
240 except error.RepoLookupError:
241 return False
241 return False
242
242
243 def __nonzero__(self):
243 def __nonzero__(self):
244 return True
244 return True
245
245
246 def __len__(self):
246 def __len__(self):
247 return len(self.changelog)
247 return len(self.changelog)
248
248
249 def __iter__(self):
249 def __iter__(self):
250 for i in xrange(len(self)):
250 for i in xrange(len(self)):
251 yield i
251 yield i
252
252
253 def revs(self, expr, *args):
253 def revs(self, expr, *args):
254 '''Return a list of revisions matching the given revset'''
254 '''Return a list of revisions matching the given revset'''
255 expr = revset.formatspec(expr, *args)
255 expr = revset.formatspec(expr, *args)
256 m = revset.match(None, expr)
256 m = revset.match(None, expr)
257 return [r for r in m(self, range(len(self)))]
257 return [r for r in m(self, range(len(self)))]
258
258
259 def set(self, expr, *args):
259 def set(self, expr, *args):
260 '''
260 '''
261 Yield a context for each matching revision, after doing arg
261 Yield a context for each matching revision, after doing arg
262 replacement via revset.formatspec
262 replacement via revset.formatspec
263 '''
263 '''
264 for r in self.revs(expr, *args):
264 for r in self.revs(expr, *args):
265 yield self[r]
265 yield self[r]
266
266
267 def url(self):
267 def url(self):
268 return 'file:' + self.root
268 return 'file:' + self.root
269
269
270 def hook(self, name, throw=False, **args):
270 def hook(self, name, throw=False, **args):
271 return hook.hook(self.ui, self, name, throw, **args)
271 return hook.hook(self.ui, self, name, throw, **args)
272
272
273 tag_disallowed = ':\r\n'
273 tag_disallowed = ':\r\n'
274
274
275 def _tag(self, names, node, message, local, user, date, extra={}):
275 def _tag(self, names, node, message, local, user, date, extra={}):
276 if isinstance(names, str):
276 if isinstance(names, str):
277 allchars = names
277 allchars = names
278 names = (names,)
278 names = (names,)
279 else:
279 else:
280 allchars = ''.join(names)
280 allchars = ''.join(names)
281 for c in self.tag_disallowed:
281 for c in self.tag_disallowed:
282 if c in allchars:
282 if c in allchars:
283 raise util.Abort(_('%r cannot be used in a tag name') % c)
283 raise util.Abort(_('%r cannot be used in a tag name') % c)
284
284
285 branches = self.branchmap()
285 branches = self.branchmap()
286 for name in names:
286 for name in names:
287 self.hook('pretag', throw=True, node=hex(node), tag=name,
287 self.hook('pretag', throw=True, node=hex(node), tag=name,
288 local=local)
288 local=local)
289 if name in branches:
289 if name in branches:
290 self.ui.warn(_("warning: tag %s conflicts with existing"
290 self.ui.warn(_("warning: tag %s conflicts with existing"
291 " branch name\n") % name)
291 " branch name\n") % name)
292
292
293 def writetags(fp, names, munge, prevtags):
293 def writetags(fp, names, munge, prevtags):
294 fp.seek(0, 2)
294 fp.seek(0, 2)
295 if prevtags and prevtags[-1] != '\n':
295 if prevtags and prevtags[-1] != '\n':
296 fp.write('\n')
296 fp.write('\n')
297 for name in names:
297 for name in names:
298 m = munge and munge(name) or name
298 m = munge and munge(name) or name
299 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
299 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
300 old = self.tags().get(name, nullid)
300 old = self.tags().get(name, nullid)
301 fp.write('%s %s\n' % (hex(old), m))
301 fp.write('%s %s\n' % (hex(old), m))
302 fp.write('%s %s\n' % (hex(node), m))
302 fp.write('%s %s\n' % (hex(node), m))
303 fp.close()
303 fp.close()
304
304
305 prevtags = ''
305 prevtags = ''
306 if local:
306 if local:
307 try:
307 try:
308 fp = self.opener('localtags', 'r+')
308 fp = self.opener('localtags', 'r+')
309 except IOError:
309 except IOError:
310 fp = self.opener('localtags', 'a')
310 fp = self.opener('localtags', 'a')
311 else:
311 else:
312 prevtags = fp.read()
312 prevtags = fp.read()
313
313
314 # local tags are stored in the current charset
314 # local tags are stored in the current charset
315 writetags(fp, names, None, prevtags)
315 writetags(fp, names, None, prevtags)
316 for name in names:
316 for name in names:
317 self.hook('tag', node=hex(node), tag=name, local=local)
317 self.hook('tag', node=hex(node), tag=name, local=local)
318 return
318 return
319
319
320 try:
320 try:
321 fp = self.wfile('.hgtags', 'rb+')
321 fp = self.wfile('.hgtags', 'rb+')
322 except IOError, e:
322 except IOError, e:
323 if e.errno != errno.ENOENT:
323 if e.errno != errno.ENOENT:
324 raise
324 raise
325 fp = self.wfile('.hgtags', 'ab')
325 fp = self.wfile('.hgtags', 'ab')
326 else:
326 else:
327 prevtags = fp.read()
327 prevtags = fp.read()
328
328
329 # committed tags are stored in UTF-8
329 # committed tags are stored in UTF-8
330 writetags(fp, names, encoding.fromlocal, prevtags)
330 writetags(fp, names, encoding.fromlocal, prevtags)
331
331
332 fp.close()
332 fp.close()
333
333
334 self.invalidatecaches()
334 self.invalidatecaches()
335
335
336 if '.hgtags' not in self.dirstate:
336 if '.hgtags' not in self.dirstate:
337 self[None].add(['.hgtags'])
337 self[None].add(['.hgtags'])
338
338
339 m = matchmod.exact(self.root, '', ['.hgtags'])
339 m = matchmod.exact(self.root, '', ['.hgtags'])
340 tagnode = self.commit(message, user, date, extra=extra, match=m)
340 tagnode = self.commit(message, user, date, extra=extra, match=m)
341
341
342 for name in names:
342 for name in names:
343 self.hook('tag', node=hex(node), tag=name, local=local)
343 self.hook('tag', node=hex(node), tag=name, local=local)
344
344
345 return tagnode
345 return tagnode
346
346
347 def tag(self, names, node, message, local, user, date):
347 def tag(self, names, node, message, local, user, date):
348 '''tag a revision with one or more symbolic names.
348 '''tag a revision with one or more symbolic names.
349
349
350 names is a list of strings or, when adding a single tag, names may be a
350 names is a list of strings or, when adding a single tag, names may be a
351 string.
351 string.
352
352
353 if local is True, the tags are stored in a per-repository file.
353 if local is True, the tags are stored in a per-repository file.
354 otherwise, they are stored in the .hgtags file, and a new
354 otherwise, they are stored in the .hgtags file, and a new
355 changeset is committed with the change.
355 changeset is committed with the change.
356
356
357 keyword arguments:
357 keyword arguments:
358
358
359 local: whether to store tags in non-version-controlled file
359 local: whether to store tags in non-version-controlled file
360 (default False)
360 (default False)
361
361
362 message: commit message to use if committing
362 message: commit message to use if committing
363
363
364 user: name of user to use if committing
364 user: name of user to use if committing
365
365
366 date: date tuple to use if committing'''
366 date: date tuple to use if committing'''
367
367
368 if not local:
368 if not local:
369 for x in self.status()[:5]:
369 for x in self.status()[:5]:
370 if '.hgtags' in x:
370 if '.hgtags' in x:
371 raise util.Abort(_('working copy of .hgtags is changed '
371 raise util.Abort(_('working copy of .hgtags is changed '
372 '(please commit .hgtags manually)'))
372 '(please commit .hgtags manually)'))
373
373
374 self.tags() # instantiate the cache
374 self.tags() # instantiate the cache
375 self._tag(names, node, message, local, user, date)
375 self._tag(names, node, message, local, user, date)
376
376
377 @propertycache
377 @propertycache
378 def _tagscache(self):
378 def _tagscache(self):
379 '''Returns a tagscache object that contains various tags related caches.'''
379 '''Returns a tagscache object that contains various tags related caches.'''
380
380
381 # This simplifies its cache management by having one decorated
381 # This simplifies its cache management by having one decorated
382 # function (this one) and the rest simply fetch things from it.
382 # function (this one) and the rest simply fetch things from it.
383 class tagscache(object):
383 class tagscache(object):
384 def __init__(self):
384 def __init__(self):
385 # These two define the set of tags for this repository. tags
385 # These two define the set of tags for this repository. tags
386 # maps tag name to node; tagtypes maps tag name to 'global' or
386 # maps tag name to node; tagtypes maps tag name to 'global' or
387 # 'local'. (Global tags are defined by .hgtags across all
387 # 'local'. (Global tags are defined by .hgtags across all
388 # heads, and local tags are defined in .hg/localtags.)
388 # heads, and local tags are defined in .hg/localtags.)
389 # They constitute the in-memory cache of tags.
389 # They constitute the in-memory cache of tags.
390 self.tags = self.tagtypes = None
390 self.tags = self.tagtypes = None
391
391
392 self.nodetagscache = self.tagslist = None
392 self.nodetagscache = self.tagslist = None
393
393
394 cache = tagscache()
394 cache = tagscache()
395 cache.tags, cache.tagtypes = self._findtags()
395 cache.tags, cache.tagtypes = self._findtags()
396
396
397 return cache
397 return cache
398
398
399 def tags(self):
399 def tags(self):
400 '''return a mapping of tag to node'''
400 '''return a mapping of tag to node'''
401 return self._tagscache.tags
401 return self._tagscache.tags
402
402
403 def _findtags(self):
403 def _findtags(self):
404 '''Do the hard work of finding tags. Return a pair of dicts
404 '''Do the hard work of finding tags. Return a pair of dicts
405 (tags, tagtypes) where tags maps tag name to node, and tagtypes
405 (tags, tagtypes) where tags maps tag name to node, and tagtypes
406 maps tag name to a string like \'global\' or \'local\'.
406 maps tag name to a string like \'global\' or \'local\'.
407 Subclasses or extensions are free to add their own tags, but
407 Subclasses or extensions are free to add their own tags, but
408 should be aware that the returned dicts will be retained for the
408 should be aware that the returned dicts will be retained for the
409 duration of the localrepo object.'''
409 duration of the localrepo object.'''
410
410
411 # XXX what tagtype should subclasses/extensions use? Currently
411 # XXX what tagtype should subclasses/extensions use? Currently
412 # mq and bookmarks add tags, but do not set the tagtype at all.
412 # mq and bookmarks add tags, but do not set the tagtype at all.
413 # Should each extension invent its own tag type? Should there
413 # Should each extension invent its own tag type? Should there
414 # be one tagtype for all such "virtual" tags? Or is the status
414 # be one tagtype for all such "virtual" tags? Or is the status
415 # quo fine?
415 # quo fine?
416
416
417 alltags = {} # map tag name to (node, hist)
417 alltags = {} # map tag name to (node, hist)
418 tagtypes = {}
418 tagtypes = {}
419
419
420 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
420 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
421 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
421 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
422
422
423 # Build the return dicts. Have to re-encode tag names because
423 # Build the return dicts. Have to re-encode tag names because
424 # the tags module always uses UTF-8 (in order not to lose info
424 # the tags module always uses UTF-8 (in order not to lose info
425 # writing to the cache), but the rest of Mercurial wants them in
425 # writing to the cache), but the rest of Mercurial wants them in
426 # local encoding.
426 # local encoding.
427 tags = {}
427 tags = {}
428 for (name, (node, hist)) in alltags.iteritems():
428 for (name, (node, hist)) in alltags.iteritems():
429 if node != nullid:
429 if node != nullid:
430 try:
430 try:
431 # ignore tags to unknown nodes
431 # ignore tags to unknown nodes
432 self.changelog.lookup(node)
432 self.changelog.lookup(node)
433 tags[encoding.tolocal(name)] = node
433 tags[encoding.tolocal(name)] = node
434 except error.LookupError:
434 except error.LookupError:
435 pass
435 pass
436 tags['tip'] = self.changelog.tip()
436 tags['tip'] = self.changelog.tip()
437 tagtypes = dict([(encoding.tolocal(name), value)
437 tagtypes = dict([(encoding.tolocal(name), value)
438 for (name, value) in tagtypes.iteritems()])
438 for (name, value) in tagtypes.iteritems()])
439 return (tags, tagtypes)
439 return (tags, tagtypes)
440
440
441 def tagtype(self, tagname):
441 def tagtype(self, tagname):
442 '''
442 '''
443 return the type of the given tag. result can be:
443 return the type of the given tag. result can be:
444
444
445 'local' : a local tag
445 'local' : a local tag
446 'global' : a global tag
446 'global' : a global tag
447 None : tag does not exist
447 None : tag does not exist
448 '''
448 '''
449
449
450 return self._tagscache.tagtypes.get(tagname)
450 return self._tagscache.tagtypes.get(tagname)
451
451
452 def tagslist(self):
452 def tagslist(self):
453 '''return a list of tags ordered by revision'''
453 '''return a list of tags ordered by revision'''
454 if not self._tagscache.tagslist:
454 if not self._tagscache.tagslist:
455 l = []
455 l = []
456 for t, n in self.tags().iteritems():
456 for t, n in self.tags().iteritems():
457 r = self.changelog.rev(n)
457 r = self.changelog.rev(n)
458 l.append((r, t, n))
458 l.append((r, t, n))
459 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
459 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
460
460
461 return self._tagscache.tagslist
461 return self._tagscache.tagslist
462
462
463 def nodetags(self, node):
463 def nodetags(self, node):
464 '''return the tags associated with a node'''
464 '''return the tags associated with a node'''
465 if not self._tagscache.nodetagscache:
465 if not self._tagscache.nodetagscache:
466 nodetagscache = {}
466 nodetagscache = {}
467 for t, n in self.tags().iteritems():
467 for t, n in self.tags().iteritems():
468 nodetagscache.setdefault(n, []).append(t)
468 nodetagscache.setdefault(n, []).append(t)
469 for tags in nodetagscache.itervalues():
469 for tags in nodetagscache.itervalues():
470 tags.sort()
470 tags.sort()
471 self._tagscache.nodetagscache = nodetagscache
471 self._tagscache.nodetagscache = nodetagscache
472 return self._tagscache.nodetagscache.get(node, [])
472 return self._tagscache.nodetagscache.get(node, [])
473
473
474 def nodebookmarks(self, node):
474 def nodebookmarks(self, node):
475 marks = []
475 marks = []
476 for bookmark, n in self._bookmarks.iteritems():
476 for bookmark, n in self._bookmarks.iteritems():
477 if n == node:
477 if n == node:
478 marks.append(bookmark)
478 marks.append(bookmark)
479 return sorted(marks)
479 return sorted(marks)
480
480
481 def _branchtags(self, partial, lrev):
481 def _branchtags(self, partial, lrev):
482 # TODO: rename this function?
482 # TODO: rename this function?
483 tiprev = len(self) - 1
483 tiprev = len(self) - 1
484 if lrev != tiprev:
484 if lrev != tiprev:
485 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
485 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
486 self._updatebranchcache(partial, ctxgen)
486 self._updatebranchcache(partial, ctxgen)
487 self._writebranchcache(partial, self.changelog.tip(), tiprev)
487 self._writebranchcache(partial, self.changelog.tip(), tiprev)
488
488
489 return partial
489 return partial
490
490
491 def updatebranchcache(self):
491 def updatebranchcache(self):
492 tip = self.changelog.tip()
492 tip = self.changelog.tip()
493 if self._branchcache is not None and self._branchcachetip == tip:
493 if self._branchcache is not None and self._branchcachetip == tip:
494 return
494 return
495
495
496 oldtip = self._branchcachetip
496 oldtip = self._branchcachetip
497 self._branchcachetip = tip
497 self._branchcachetip = tip
498 if oldtip is None or oldtip not in self.changelog.nodemap:
498 if oldtip is None or oldtip not in self.changelog.nodemap:
499 partial, last, lrev = self._readbranchcache()
499 partial, last, lrev = self._readbranchcache()
500 else:
500 else:
501 lrev = self.changelog.rev(oldtip)
501 lrev = self.changelog.rev(oldtip)
502 partial = self._branchcache
502 partial = self._branchcache
503
503
504 self._branchtags(partial, lrev)
504 self._branchtags(partial, lrev)
505 # this private cache holds all heads (not just tips)
505 # this private cache holds all heads (not just tips)
506 self._branchcache = partial
506 self._branchcache = partial
507
507
508 def branchmap(self):
508 def branchmap(self):
509 '''returns a dictionary {branch: [branchheads]}'''
509 '''returns a dictionary {branch: [branchheads]}'''
510 self.updatebranchcache()
510 self.updatebranchcache()
511 return self._branchcache
511 return self._branchcache
512
512
513 def branchtags(self):
513 def branchtags(self):
514 '''return a dict where branch names map to the tipmost head of
514 '''return a dict where branch names map to the tipmost head of
515 the branch, open heads come before closed'''
515 the branch, open heads come before closed'''
516 bt = {}
516 bt = {}
517 for bn, heads in self.branchmap().iteritems():
517 for bn, heads in self.branchmap().iteritems():
518 tip = heads[-1]
518 tip = heads[-1]
519 for h in reversed(heads):
519 for h in reversed(heads):
520 if 'close' not in self.changelog.read(h)[5]:
520 if 'close' not in self.changelog.read(h)[5]:
521 tip = h
521 tip = h
522 break
522 break
523 bt[bn] = tip
523 bt[bn] = tip
524 return bt
524 return bt
525
525
526 def _readbranchcache(self):
526 def _readbranchcache(self):
527 partial = {}
527 partial = {}
528 try:
528 try:
529 f = self.opener("cache/branchheads")
529 f = self.opener("cache/branchheads")
530 lines = f.read().split('\n')
530 lines = f.read().split('\n')
531 f.close()
531 f.close()
532 except (IOError, OSError):
532 except (IOError, OSError):
533 return {}, nullid, nullrev
533 return {}, nullid, nullrev
534
534
535 try:
535 try:
536 last, lrev = lines.pop(0).split(" ", 1)
536 last, lrev = lines.pop(0).split(" ", 1)
537 last, lrev = bin(last), int(lrev)
537 last, lrev = bin(last), int(lrev)
538 if lrev >= len(self) or self[lrev].node() != last:
538 if lrev >= len(self) or self[lrev].node() != last:
539 # invalidate the cache
539 # invalidate the cache
540 raise ValueError('invalidating branch cache (tip differs)')
540 raise ValueError('invalidating branch cache (tip differs)')
541 for l in lines:
541 for l in lines:
542 if not l:
542 if not l:
543 continue
543 continue
544 node, label = l.split(" ", 1)
544 node, label = l.split(" ", 1)
545 label = encoding.tolocal(label.strip())
545 label = encoding.tolocal(label.strip())
546 partial.setdefault(label, []).append(bin(node))
546 partial.setdefault(label, []).append(bin(node))
547 except KeyboardInterrupt:
547 except KeyboardInterrupt:
548 raise
548 raise
549 except Exception, inst:
549 except Exception, inst:
550 if self.ui.debugflag:
550 if self.ui.debugflag:
551 self.ui.warn(str(inst), '\n')
551 self.ui.warn(str(inst), '\n')
552 partial, last, lrev = {}, nullid, nullrev
552 partial, last, lrev = {}, nullid, nullrev
553 return partial, last, lrev
553 return partial, last, lrev
554
554
555 def _writebranchcache(self, branches, tip, tiprev):
555 def _writebranchcache(self, branches, tip, tiprev):
556 try:
556 try:
557 f = self.opener("cache/branchheads", "w", atomictemp=True)
557 f = self.opener("cache/branchheads", "w", atomictemp=True)
558 f.write("%s %s\n" % (hex(tip), tiprev))
558 f.write("%s %s\n" % (hex(tip), tiprev))
559 for label, nodes in branches.iteritems():
559 for label, nodes in branches.iteritems():
560 for node in nodes:
560 for node in nodes:
561 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
561 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
562 f.close()
562 f.close()
563 except (IOError, OSError):
563 except (IOError, OSError):
564 pass
564 pass
565
565
566 def _updatebranchcache(self, partial, ctxgen):
566 def _updatebranchcache(self, partial, ctxgen):
567 # collect new branch entries
567 # collect new branch entries
568 newbranches = {}
568 newbranches = {}
569 for c in ctxgen:
569 for c in ctxgen:
570 newbranches.setdefault(c.branch(), []).append(c.node())
570 newbranches.setdefault(c.branch(), []).append(c.node())
571 # if older branchheads are reachable from new ones, they aren't
571 # if older branchheads are reachable from new ones, they aren't
572 # really branchheads. Note checking parents is insufficient:
572 # really branchheads. Note checking parents is insufficient:
573 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
573 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
574 for branch, newnodes in newbranches.iteritems():
574 for branch, newnodes in newbranches.iteritems():
575 bheads = partial.setdefault(branch, [])
575 bheads = partial.setdefault(branch, [])
576 bheads.extend(newnodes)
576 bheads.extend(newnodes)
577 if len(bheads) <= 1:
577 if len(bheads) <= 1:
578 continue
578 continue
579 bheads = sorted(bheads, key=lambda x: self[x].rev())
579 bheads = sorted(bheads, key=lambda x: self[x].rev())
580 # starting from tip means fewer passes over reachable
580 # starting from tip means fewer passes over reachable
581 while newnodes:
581 while newnodes:
582 latest = newnodes.pop()
582 latest = newnodes.pop()
583 if latest not in bheads:
583 if latest not in bheads:
584 continue
584 continue
585 minbhrev = self[bheads[0]].node()
585 minbhrev = self[bheads[0]].node()
586 reachable = self.changelog.reachable(latest, minbhrev)
586 reachable = self.changelog.reachable(latest, minbhrev)
587 reachable.remove(latest)
587 reachable.remove(latest)
588 if reachable:
588 if reachable:
589 bheads = [b for b in bheads if b not in reachable]
589 bheads = [b for b in bheads if b not in reachable]
590 partial[branch] = bheads
590 partial[branch] = bheads
591
591
592 def lookup(self, key):
592 def lookup(self, key):
593 if isinstance(key, int):
593 if isinstance(key, int):
594 return self.changelog.node(key)
594 return self.changelog.node(key)
595 elif key == '.':
595 elif key == '.':
596 return self.dirstate.p1()
596 return self.dirstate.p1()
597 elif key == 'null':
597 elif key == 'null':
598 return nullid
598 return nullid
599 elif key == 'tip':
599 elif key == 'tip':
600 return self.changelog.tip()
600 return self.changelog.tip()
601 n = self.changelog._match(key)
601 n = self.changelog._match(key)
602 if n:
602 if n:
603 return n
603 return n
604 if key in self._bookmarks:
604 if key in self._bookmarks:
605 return self._bookmarks[key]
605 return self._bookmarks[key]
606 if key in self.tags():
606 if key in self.tags():
607 return self.tags()[key]
607 return self.tags()[key]
608 if key in self.branchtags():
608 if key in self.branchtags():
609 return self.branchtags()[key]
609 return self.branchtags()[key]
610 n = self.changelog._partialmatch(key)
610 n = self.changelog._partialmatch(key)
611 if n:
611 if n:
612 return n
612 return n
613
613
614 # can't find key, check if it might have come from damaged dirstate
614 # can't find key, check if it might have come from damaged dirstate
615 if key in self.dirstate.parents():
615 if key in self.dirstate.parents():
616 raise error.Abort(_("working directory has unknown parent '%s'!")
616 raise error.Abort(_("working directory has unknown parent '%s'!")
617 % short(key))
617 % short(key))
618 try:
618 try:
619 if len(key) == 20:
619 if len(key) == 20:
620 key = hex(key)
620 key = hex(key)
621 except TypeError:
621 except TypeError:
622 pass
622 pass
623 raise error.RepoLookupError(_("unknown revision '%s'") % key)
623 raise error.RepoLookupError(_("unknown revision '%s'") % key)
624
624
625 def lookupbranch(self, key, remote=None):
625 def lookupbranch(self, key, remote=None):
626 repo = remote or self
626 repo = remote or self
627 if key in repo.branchmap():
627 if key in repo.branchmap():
628 return key
628 return key
629
629
630 repo = (remote and remote.local()) and remote or self
630 repo = (remote and remote.local()) and remote or self
631 return repo[key].branch()
631 return repo[key].branch()
632
632
633 def known(self, nodes):
633 def known(self, nodes):
634 nm = self.changelog.nodemap
634 nm = self.changelog.nodemap
635 result = []
635 result = []
636 for n in nodes:
636 for n in nodes:
637 r = nm.get(n)
637 r = nm.get(n)
638 resp = not (r is None or self._phaserev[r] >= phases.secret)
638 resp = not (r is None or self._phaserev[r] >= phases.secret)
639 result.append(resp)
639 result.append(resp)
640 return result
640 return result
641
641
642 def local(self):
642 def local(self):
643 return self
643 return self
644
644
645 def join(self, f):
645 def join(self, f):
646 return os.path.join(self.path, f)
646 return os.path.join(self.path, f)
647
647
648 def wjoin(self, f):
648 def wjoin(self, f):
649 return os.path.join(self.root, f)
649 return os.path.join(self.root, f)
650
650
651 def file(self, f):
651 def file(self, f):
652 if f[0] == '/':
652 if f[0] == '/':
653 f = f[1:]
653 f = f[1:]
654 return filelog.filelog(self.sopener, f)
654 return filelog.filelog(self.sopener, f)
655
655
656 def changectx(self, changeid):
656 def changectx(self, changeid):
657 return self[changeid]
657 return self[changeid]
658
658
659 def parents(self, changeid=None):
659 def parents(self, changeid=None):
660 '''get list of changectxs for parents of changeid'''
660 '''get list of changectxs for parents of changeid'''
661 return self[changeid].parents()
661 return self[changeid].parents()
662
662
663 def filectx(self, path, changeid=None, fileid=None):
663 def filectx(self, path, changeid=None, fileid=None):
664 """changeid can be a changeset revision, node, or tag.
664 """changeid can be a changeset revision, node, or tag.
665 fileid can be a file revision or node."""
665 fileid can be a file revision or node."""
666 return context.filectx(self, path, changeid, fileid)
666 return context.filectx(self, path, changeid, fileid)
667
667
668 def getcwd(self):
668 def getcwd(self):
669 return self.dirstate.getcwd()
669 return self.dirstate.getcwd()
670
670
671 def pathto(self, f, cwd=None):
671 def pathto(self, f, cwd=None):
672 return self.dirstate.pathto(f, cwd)
672 return self.dirstate.pathto(f, cwd)
673
673
674 def wfile(self, f, mode='r'):
674 def wfile(self, f, mode='r'):
675 return self.wopener(f, mode)
675 return self.wopener(f, mode)
676
676
677 def _link(self, f):
677 def _link(self, f):
678 return os.path.islink(self.wjoin(f))
678 return os.path.islink(self.wjoin(f))
679
679
680 def _loadfilter(self, filter):
680 def _loadfilter(self, filter):
681 if filter not in self.filterpats:
681 if filter not in self.filterpats:
682 l = []
682 l = []
683 for pat, cmd in self.ui.configitems(filter):
683 for pat, cmd in self.ui.configitems(filter):
684 if cmd == '!':
684 if cmd == '!':
685 continue
685 continue
686 mf = matchmod.match(self.root, '', [pat])
686 mf = matchmod.match(self.root, '', [pat])
687 fn = None
687 fn = None
688 params = cmd
688 params = cmd
689 for name, filterfn in self._datafilters.iteritems():
689 for name, filterfn in self._datafilters.iteritems():
690 if cmd.startswith(name):
690 if cmd.startswith(name):
691 fn = filterfn
691 fn = filterfn
692 params = cmd[len(name):].lstrip()
692 params = cmd[len(name):].lstrip()
693 break
693 break
694 if not fn:
694 if not fn:
695 fn = lambda s, c, **kwargs: util.filter(s, c)
695 fn = lambda s, c, **kwargs: util.filter(s, c)
696 # Wrap old filters not supporting keyword arguments
696 # Wrap old filters not supporting keyword arguments
697 if not inspect.getargspec(fn)[2]:
697 if not inspect.getargspec(fn)[2]:
698 oldfn = fn
698 oldfn = fn
699 fn = lambda s, c, **kwargs: oldfn(s, c)
699 fn = lambda s, c, **kwargs: oldfn(s, c)
700 l.append((mf, fn, params))
700 l.append((mf, fn, params))
701 self.filterpats[filter] = l
701 self.filterpats[filter] = l
702 return self.filterpats[filter]
702 return self.filterpats[filter]
703
703
704 def _filter(self, filterpats, filename, data):
704 def _filter(self, filterpats, filename, data):
705 for mf, fn, cmd in filterpats:
705 for mf, fn, cmd in filterpats:
706 if mf(filename):
706 if mf(filename):
707 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
707 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
708 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
708 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
709 break
709 break
710
710
711 return data
711 return data
712
712
713 @propertycache
713 @propertycache
714 def _encodefilterpats(self):
714 def _encodefilterpats(self):
715 return self._loadfilter('encode')
715 return self._loadfilter('encode')
716
716
717 @propertycache
717 @propertycache
718 def _decodefilterpats(self):
718 def _decodefilterpats(self):
719 return self._loadfilter('decode')
719 return self._loadfilter('decode')
720
720
721 def adddatafilter(self, name, filter):
721 def adddatafilter(self, name, filter):
722 self._datafilters[name] = filter
722 self._datafilters[name] = filter
723
723
724 def wread(self, filename):
724 def wread(self, filename):
725 if self._link(filename):
725 if self._link(filename):
726 data = os.readlink(self.wjoin(filename))
726 data = os.readlink(self.wjoin(filename))
727 else:
727 else:
728 data = self.wopener.read(filename)
728 data = self.wopener.read(filename)
729 return self._filter(self._encodefilterpats, filename, data)
729 return self._filter(self._encodefilterpats, filename, data)
730
730
731 def wwrite(self, filename, data, flags):
731 def wwrite(self, filename, data, flags):
732 data = self._filter(self._decodefilterpats, filename, data)
732 data = self._filter(self._decodefilterpats, filename, data)
733 if 'l' in flags:
733 if 'l' in flags:
734 self.wopener.symlink(data, filename)
734 self.wopener.symlink(data, filename)
735 else:
735 else:
736 self.wopener.write(filename, data)
736 self.wopener.write(filename, data)
737 if 'x' in flags:
737 if 'x' in flags:
738 util.setflags(self.wjoin(filename), False, True)
738 util.setflags(self.wjoin(filename), False, True)
739
739
740 def wwritedata(self, filename, data):
740 def wwritedata(self, filename, data):
741 return self._filter(self._decodefilterpats, filename, data)
741 return self._filter(self._decodefilterpats, filename, data)
742
742
743 def transaction(self, desc):
743 def transaction(self, desc):
744 tr = self._transref and self._transref() or None
744 tr = self._transref and self._transref() or None
745 if tr and tr.running():
745 if tr and tr.running():
746 return tr.nest()
746 return tr.nest()
747
747
748 # abort here if the journal already exists
748 # abort here if the journal already exists
749 if os.path.exists(self.sjoin("journal")):
749 if os.path.exists(self.sjoin("journal")):
750 raise error.RepoError(
750 raise error.RepoError(
751 _("abandoned transaction found - run hg recover"))
751 _("abandoned transaction found - run hg recover"))
752
752
753 journalfiles = self._writejournal(desc)
753 journalfiles = self._writejournal(desc)
754 renames = [(x, undoname(x)) for x in journalfiles]
754 renames = [(x, undoname(x)) for x in journalfiles]
755
755
756 tr = transaction.transaction(self.ui.warn, self.sopener,
756 tr = transaction.transaction(self.ui.warn, self.sopener,
757 self.sjoin("journal"),
757 self.sjoin("journal"),
758 aftertrans(renames),
758 aftertrans(renames),
759 self.store.createmode)
759 self.store.createmode)
760 self._transref = weakref.ref(tr)
760 self._transref = weakref.ref(tr)
761 return tr
761 return tr
762
762
763 def _writejournal(self, desc):
763 def _writejournal(self, desc):
764 # save dirstate for rollback
764 # save dirstate for rollback
765 try:
765 try:
766 ds = self.opener.read("dirstate")
766 ds = self.opener.read("dirstate")
767 except IOError:
767 except IOError:
768 ds = ""
768 ds = ""
769 self.opener.write("journal.dirstate", ds)
769 self.opener.write("journal.dirstate", ds)
770 self.opener.write("journal.branch",
770 self.opener.write("journal.branch",
771 encoding.fromlocal(self.dirstate.branch()))
771 encoding.fromlocal(self.dirstate.branch()))
772 self.opener.write("journal.desc",
772 self.opener.write("journal.desc",
773 "%d\n%s\n" % (len(self), desc))
773 "%d\n%s\n" % (len(self), desc))
774
774
775 bkname = self.join('bookmarks')
775 try:
776 if os.path.exists(bkname):
776 bk = self.opener.read("bookmarks")
777 util.copyfile(bkname, self.join('journal.bookmarks'))
777 except IOError:
778 else:
778 bk = ""
779 self.opener.write('journal.bookmarks', '')
779 self.opener.write("journal.bookmarks", bk)
780
780 phasesname = self.sjoin('phaseroots')
781 phasesname = self.sjoin('phaseroots')
781 if os.path.exists(phasesname):
782 if os.path.exists(phasesname):
782 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
783 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
783 else:
784 else:
784 self.sopener.write('journal.phaseroots', '')
785 self.sopener.write('journal.phaseroots', '')
785
786
786 return (self.sjoin('journal'), self.join('journal.dirstate'),
787 return (self.sjoin('journal'), self.join('journal.dirstate'),
787 self.join('journal.branch'), self.join('journal.desc'),
788 self.join('journal.branch'), self.join('journal.desc'),
788 self.join('journal.bookmarks'),
789 self.join('journal.bookmarks'),
789 self.sjoin('journal.phaseroots'))
790 self.sjoin('journal.phaseroots'))
790
791
791 def recover(self):
792 def recover(self):
792 lock = self.lock()
793 lock = self.lock()
793 try:
794 try:
794 if os.path.exists(self.sjoin("journal")):
795 if os.path.exists(self.sjoin("journal")):
795 self.ui.status(_("rolling back interrupted transaction\n"))
796 self.ui.status(_("rolling back interrupted transaction\n"))
796 transaction.rollback(self.sopener, self.sjoin("journal"),
797 transaction.rollback(self.sopener, self.sjoin("journal"),
797 self.ui.warn)
798 self.ui.warn)
798 self.invalidate()
799 self.invalidate()
799 return True
800 return True
800 else:
801 else:
801 self.ui.warn(_("no interrupted transaction available\n"))
802 self.ui.warn(_("no interrupted transaction available\n"))
802 return False
803 return False
803 finally:
804 finally:
804 lock.release()
805 lock.release()
805
806
806 def rollback(self, dryrun=False, force=False):
807 def rollback(self, dryrun=False, force=False):
807 wlock = lock = None
808 wlock = lock = None
808 try:
809 try:
809 wlock = self.wlock()
810 wlock = self.wlock()
810 lock = self.lock()
811 lock = self.lock()
811 if os.path.exists(self.sjoin("undo")):
812 if os.path.exists(self.sjoin("undo")):
812 return self._rollback(dryrun, force)
813 return self._rollback(dryrun, force)
813 else:
814 else:
814 self.ui.warn(_("no rollback information available\n"))
815 self.ui.warn(_("no rollback information available\n"))
815 return 1
816 return 1
816 finally:
817 finally:
817 release(lock, wlock)
818 release(lock, wlock)
818
819
819 def _rollback(self, dryrun, force):
820 def _rollback(self, dryrun, force):
820 ui = self.ui
821 ui = self.ui
821 try:
822 try:
822 args = self.opener.read('undo.desc').splitlines()
823 args = self.opener.read('undo.desc').splitlines()
823 (oldlen, desc, detail) = (int(args[0]), args[1], None)
824 (oldlen, desc, detail) = (int(args[0]), args[1], None)
824 if len(args) >= 3:
825 if len(args) >= 3:
825 detail = args[2]
826 detail = args[2]
826 oldtip = oldlen - 1
827 oldtip = oldlen - 1
827
828
828 if detail and ui.verbose:
829 if detail and ui.verbose:
829 msg = (_('repository tip rolled back to revision %s'
830 msg = (_('repository tip rolled back to revision %s'
830 ' (undo %s: %s)\n')
831 ' (undo %s: %s)\n')
831 % (oldtip, desc, detail))
832 % (oldtip, desc, detail))
832 else:
833 else:
833 msg = (_('repository tip rolled back to revision %s'
834 msg = (_('repository tip rolled back to revision %s'
834 ' (undo %s)\n')
835 ' (undo %s)\n')
835 % (oldtip, desc))
836 % (oldtip, desc))
836 except IOError:
837 except IOError:
837 msg = _('rolling back unknown transaction\n')
838 msg = _('rolling back unknown transaction\n')
838 desc = None
839 desc = None
839
840
840 if not force and self['.'] != self['tip'] and desc == 'commit':
841 if not force and self['.'] != self['tip'] and desc == 'commit':
841 raise util.Abort(
842 raise util.Abort(
842 _('rollback of last commit while not checked out '
843 _('rollback of last commit while not checked out '
843 'may lose data'), hint=_('use -f to force'))
844 'may lose data'), hint=_('use -f to force'))
844
845
845 ui.status(msg)
846 ui.status(msg)
846 if dryrun:
847 if dryrun:
847 return 0
848 return 0
848
849
849 parents = self.dirstate.parents()
850 parents = self.dirstate.parents()
850 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
851 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
851 if os.path.exists(self.join('undo.bookmarks')):
852 if os.path.exists(self.join('undo.bookmarks')):
852 util.rename(self.join('undo.bookmarks'),
853 util.rename(self.join('undo.bookmarks'),
853 self.join('bookmarks'))
854 self.join('bookmarks'))
854 if os.path.exists(self.sjoin('undo.phaseroots')):
855 if os.path.exists(self.sjoin('undo.phaseroots')):
855 util.rename(self.sjoin('undo.phaseroots'),
856 util.rename(self.sjoin('undo.phaseroots'),
856 self.sjoin('phaseroots'))
857 self.sjoin('phaseroots'))
857 self.invalidate()
858 self.invalidate()
858
859
859 parentgone = (parents[0] not in self.changelog.nodemap or
860 parentgone = (parents[0] not in self.changelog.nodemap or
860 parents[1] not in self.changelog.nodemap)
861 parents[1] not in self.changelog.nodemap)
861 if parentgone:
862 if parentgone:
862 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
863 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
863 try:
864 try:
864 branch = self.opener.read('undo.branch')
865 branch = self.opener.read('undo.branch')
865 self.dirstate.setbranch(branch)
866 self.dirstate.setbranch(branch)
866 except IOError:
867 except IOError:
867 ui.warn(_('named branch could not be reset: '
868 ui.warn(_('named branch could not be reset: '
868 'current branch is still \'%s\'\n')
869 'current branch is still \'%s\'\n')
869 % self.dirstate.branch())
870 % self.dirstate.branch())
870
871
871 self.dirstate.invalidate()
872 self.dirstate.invalidate()
872 parents = tuple([p.rev() for p in self.parents()])
873 parents = tuple([p.rev() for p in self.parents()])
873 if len(parents) > 1:
874 if len(parents) > 1:
874 ui.status(_('working directory now based on '
875 ui.status(_('working directory now based on '
875 'revisions %d and %d\n') % parents)
876 'revisions %d and %d\n') % parents)
876 else:
877 else:
877 ui.status(_('working directory now based on '
878 ui.status(_('working directory now based on '
878 'revision %d\n') % parents)
879 'revision %d\n') % parents)
879 self.destroyed()
880 self.destroyed()
880 return 0
881 return 0
881
882
882 def invalidatecaches(self):
883 def invalidatecaches(self):
883 def delcache(name):
884 def delcache(name):
884 try:
885 try:
885 delattr(self, name)
886 delattr(self, name)
886 except AttributeError:
887 except AttributeError:
887 pass
888 pass
888
889
889 delcache('_tagscache')
890 delcache('_tagscache')
890 delcache('_phaserev')
891 delcache('_phaserev')
891
892
892 self._branchcache = None # in UTF-8
893 self._branchcache = None # in UTF-8
893 self._branchcachetip = None
894 self._branchcachetip = None
894
895
895 def invalidatedirstate(self):
896 def invalidatedirstate(self):
896 '''Invalidates the dirstate, causing the next call to dirstate
897 '''Invalidates the dirstate, causing the next call to dirstate
897 to check if it was modified since the last time it was read,
898 to check if it was modified since the last time it was read,
898 rereading it if it has.
899 rereading it if it has.
899
900
900 This is different to dirstate.invalidate() that it doesn't always
901 This is different to dirstate.invalidate() that it doesn't always
901 rereads the dirstate. Use dirstate.invalidate() if you want to
902 rereads the dirstate. Use dirstate.invalidate() if you want to
902 explicitly read the dirstate again (i.e. restoring it to a previous
903 explicitly read the dirstate again (i.e. restoring it to a previous
903 known good state).'''
904 known good state).'''
904 if 'dirstate' in self.__dict__:
905 if 'dirstate' in self.__dict__:
905 for k in self.dirstate._filecache:
906 for k in self.dirstate._filecache:
906 try:
907 try:
907 delattr(self.dirstate, k)
908 delattr(self.dirstate, k)
908 except AttributeError:
909 except AttributeError:
909 pass
910 pass
910 delattr(self, 'dirstate')
911 delattr(self, 'dirstate')
911
912
912 def invalidate(self):
913 def invalidate(self):
913 for k in self._filecache:
914 for k in self._filecache:
914 # dirstate is invalidated separately in invalidatedirstate()
915 # dirstate is invalidated separately in invalidatedirstate()
915 if k == 'dirstate':
916 if k == 'dirstate':
916 continue
917 continue
917
918
918 try:
919 try:
919 delattr(self, k)
920 delattr(self, k)
920 except AttributeError:
921 except AttributeError:
921 pass
922 pass
922 self.invalidatecaches()
923 self.invalidatecaches()
923
924
924 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
925 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
925 try:
926 try:
926 l = lock.lock(lockname, 0, releasefn, desc=desc)
927 l = lock.lock(lockname, 0, releasefn, desc=desc)
927 except error.LockHeld, inst:
928 except error.LockHeld, inst:
928 if not wait:
929 if not wait:
929 raise
930 raise
930 self.ui.warn(_("waiting for lock on %s held by %r\n") %
931 self.ui.warn(_("waiting for lock on %s held by %r\n") %
931 (desc, inst.locker))
932 (desc, inst.locker))
932 # default to 600 seconds timeout
933 # default to 600 seconds timeout
933 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
934 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
934 releasefn, desc=desc)
935 releasefn, desc=desc)
935 if acquirefn:
936 if acquirefn:
936 acquirefn()
937 acquirefn()
937 return l
938 return l
938
939
939 def _afterlock(self, callback):
940 def _afterlock(self, callback):
940 """add a callback to the current repository lock.
941 """add a callback to the current repository lock.
941
942
942 The callback will be executed on lock release."""
943 The callback will be executed on lock release."""
943 l = self._lockref and self._lockref()
944 l = self._lockref and self._lockref()
944 if l:
945 if l:
945 l.postrelease.append(callback)
946 l.postrelease.append(callback)
946
947
947 def lock(self, wait=True):
948 def lock(self, wait=True):
948 '''Lock the repository store (.hg/store) and return a weak reference
949 '''Lock the repository store (.hg/store) and return a weak reference
949 to the lock. Use this before modifying the store (e.g. committing or
950 to the lock. Use this before modifying the store (e.g. committing or
950 stripping). If you are opening a transaction, get a lock as well.)'''
951 stripping). If you are opening a transaction, get a lock as well.)'''
951 l = self._lockref and self._lockref()
952 l = self._lockref and self._lockref()
952 if l is not None and l.held:
953 if l is not None and l.held:
953 l.lock()
954 l.lock()
954 return l
955 return l
955
956
956 def unlock():
957 def unlock():
957 self.store.write()
958 self.store.write()
958 if self._dirtyphases:
959 if self._dirtyphases:
959 phases.writeroots(self)
960 phases.writeroots(self)
960 self._dirtyphases = False
961 self._dirtyphases = False
961 for k, ce in self._filecache.items():
962 for k, ce in self._filecache.items():
962 if k == 'dirstate':
963 if k == 'dirstate':
963 continue
964 continue
964 ce.refresh()
965 ce.refresh()
965
966
966 l = self._lock(self.sjoin("lock"), wait, unlock,
967 l = self._lock(self.sjoin("lock"), wait, unlock,
967 self.invalidate, _('repository %s') % self.origroot)
968 self.invalidate, _('repository %s') % self.origroot)
968 self._lockref = weakref.ref(l)
969 self._lockref = weakref.ref(l)
969 return l
970 return l
970
971
971 def wlock(self, wait=True):
972 def wlock(self, wait=True):
972 '''Lock the non-store parts of the repository (everything under
973 '''Lock the non-store parts of the repository (everything under
973 .hg except .hg/store) and return a weak reference to the lock.
974 .hg except .hg/store) and return a weak reference to the lock.
974 Use this before modifying files in .hg.'''
975 Use this before modifying files in .hg.'''
975 l = self._wlockref and self._wlockref()
976 l = self._wlockref and self._wlockref()
976 if l is not None and l.held:
977 if l is not None and l.held:
977 l.lock()
978 l.lock()
978 return l
979 return l
979
980
980 def unlock():
981 def unlock():
981 self.dirstate.write()
982 self.dirstate.write()
982 ce = self._filecache.get('dirstate')
983 ce = self._filecache.get('dirstate')
983 if ce:
984 if ce:
984 ce.refresh()
985 ce.refresh()
985
986
986 l = self._lock(self.join("wlock"), wait, unlock,
987 l = self._lock(self.join("wlock"), wait, unlock,
987 self.invalidatedirstate, _('working directory of %s') %
988 self.invalidatedirstate, _('working directory of %s') %
988 self.origroot)
989 self.origroot)
989 self._wlockref = weakref.ref(l)
990 self._wlockref = weakref.ref(l)
990 return l
991 return l
991
992
992 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
993 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
993 """
994 """
994 commit an individual file as part of a larger transaction
995 commit an individual file as part of a larger transaction
995 """
996 """
996
997
997 fname = fctx.path()
998 fname = fctx.path()
998 text = fctx.data()
999 text = fctx.data()
999 flog = self.file(fname)
1000 flog = self.file(fname)
1000 fparent1 = manifest1.get(fname, nullid)
1001 fparent1 = manifest1.get(fname, nullid)
1001 fparent2 = fparent2o = manifest2.get(fname, nullid)
1002 fparent2 = fparent2o = manifest2.get(fname, nullid)
1002
1003
1003 meta = {}
1004 meta = {}
1004 copy = fctx.renamed()
1005 copy = fctx.renamed()
1005 if copy and copy[0] != fname:
1006 if copy and copy[0] != fname:
1006 # Mark the new revision of this file as a copy of another
1007 # Mark the new revision of this file as a copy of another
1007 # file. This copy data will effectively act as a parent
1008 # file. This copy data will effectively act as a parent
1008 # of this new revision. If this is a merge, the first
1009 # of this new revision. If this is a merge, the first
1009 # parent will be the nullid (meaning "look up the copy data")
1010 # parent will be the nullid (meaning "look up the copy data")
1010 # and the second one will be the other parent. For example:
1011 # and the second one will be the other parent. For example:
1011 #
1012 #
1012 # 0 --- 1 --- 3 rev1 changes file foo
1013 # 0 --- 1 --- 3 rev1 changes file foo
1013 # \ / rev2 renames foo to bar and changes it
1014 # \ / rev2 renames foo to bar and changes it
1014 # \- 2 -/ rev3 should have bar with all changes and
1015 # \- 2 -/ rev3 should have bar with all changes and
1015 # should record that bar descends from
1016 # should record that bar descends from
1016 # bar in rev2 and foo in rev1
1017 # bar in rev2 and foo in rev1
1017 #
1018 #
1018 # this allows this merge to succeed:
1019 # this allows this merge to succeed:
1019 #
1020 #
1020 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1021 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1021 # \ / merging rev3 and rev4 should use bar@rev2
1022 # \ / merging rev3 and rev4 should use bar@rev2
1022 # \- 2 --- 4 as the merge base
1023 # \- 2 --- 4 as the merge base
1023 #
1024 #
1024
1025
1025 cfname = copy[0]
1026 cfname = copy[0]
1026 crev = manifest1.get(cfname)
1027 crev = manifest1.get(cfname)
1027 newfparent = fparent2
1028 newfparent = fparent2
1028
1029
1029 if manifest2: # branch merge
1030 if manifest2: # branch merge
1030 if fparent2 == nullid or crev is None: # copied on remote side
1031 if fparent2 == nullid or crev is None: # copied on remote side
1031 if cfname in manifest2:
1032 if cfname in manifest2:
1032 crev = manifest2[cfname]
1033 crev = manifest2[cfname]
1033 newfparent = fparent1
1034 newfparent = fparent1
1034
1035
1035 # find source in nearest ancestor if we've lost track
1036 # find source in nearest ancestor if we've lost track
1036 if not crev:
1037 if not crev:
1037 self.ui.debug(" %s: searching for copy revision for %s\n" %
1038 self.ui.debug(" %s: searching for copy revision for %s\n" %
1038 (fname, cfname))
1039 (fname, cfname))
1039 for ancestor in self[None].ancestors():
1040 for ancestor in self[None].ancestors():
1040 if cfname in ancestor:
1041 if cfname in ancestor:
1041 crev = ancestor[cfname].filenode()
1042 crev = ancestor[cfname].filenode()
1042 break
1043 break
1043
1044
1044 if crev:
1045 if crev:
1045 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1046 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1046 meta["copy"] = cfname
1047 meta["copy"] = cfname
1047 meta["copyrev"] = hex(crev)
1048 meta["copyrev"] = hex(crev)
1048 fparent1, fparent2 = nullid, newfparent
1049 fparent1, fparent2 = nullid, newfparent
1049 else:
1050 else:
1050 self.ui.warn(_("warning: can't find ancestor for '%s' "
1051 self.ui.warn(_("warning: can't find ancestor for '%s' "
1051 "copied from '%s'!\n") % (fname, cfname))
1052 "copied from '%s'!\n") % (fname, cfname))
1052
1053
1053 elif fparent2 != nullid:
1054 elif fparent2 != nullid:
1054 # is one parent an ancestor of the other?
1055 # is one parent an ancestor of the other?
1055 fparentancestor = flog.ancestor(fparent1, fparent2)
1056 fparentancestor = flog.ancestor(fparent1, fparent2)
1056 if fparentancestor == fparent1:
1057 if fparentancestor == fparent1:
1057 fparent1, fparent2 = fparent2, nullid
1058 fparent1, fparent2 = fparent2, nullid
1058 elif fparentancestor == fparent2:
1059 elif fparentancestor == fparent2:
1059 fparent2 = nullid
1060 fparent2 = nullid
1060
1061
1061 # is the file changed?
1062 # is the file changed?
1062 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1063 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1063 changelist.append(fname)
1064 changelist.append(fname)
1064 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1065 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1065
1066
1066 # are just the flags changed during merge?
1067 # are just the flags changed during merge?
1067 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1068 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1068 changelist.append(fname)
1069 changelist.append(fname)
1069
1070
1070 return fparent1
1071 return fparent1
1071
1072
1072 def commit(self, text="", user=None, date=None, match=None, force=False,
1073 def commit(self, text="", user=None, date=None, match=None, force=False,
1073 editor=False, extra={}):
1074 editor=False, extra={}):
1074 """Add a new revision to current repository.
1075 """Add a new revision to current repository.
1075
1076
1076 Revision information is gathered from the working directory,
1077 Revision information is gathered from the working directory,
1077 match can be used to filter the committed files. If editor is
1078 match can be used to filter the committed files. If editor is
1078 supplied, it is called to get a commit message.
1079 supplied, it is called to get a commit message.
1079 """
1080 """
1080
1081
1081 def fail(f, msg):
1082 def fail(f, msg):
1082 raise util.Abort('%s: %s' % (f, msg))
1083 raise util.Abort('%s: %s' % (f, msg))
1083
1084
1084 if not match:
1085 if not match:
1085 match = matchmod.always(self.root, '')
1086 match = matchmod.always(self.root, '')
1086
1087
1087 if not force:
1088 if not force:
1088 vdirs = []
1089 vdirs = []
1089 match.dir = vdirs.append
1090 match.dir = vdirs.append
1090 match.bad = fail
1091 match.bad = fail
1091
1092
1092 wlock = self.wlock()
1093 wlock = self.wlock()
1093 try:
1094 try:
1094 wctx = self[None]
1095 wctx = self[None]
1095 merge = len(wctx.parents()) > 1
1096 merge = len(wctx.parents()) > 1
1096
1097
1097 if (not force and merge and match and
1098 if (not force and merge and match and
1098 (match.files() or match.anypats())):
1099 (match.files() or match.anypats())):
1099 raise util.Abort(_('cannot partially commit a merge '
1100 raise util.Abort(_('cannot partially commit a merge '
1100 '(do not specify files or patterns)'))
1101 '(do not specify files or patterns)'))
1101
1102
1102 changes = self.status(match=match, clean=force)
1103 changes = self.status(match=match, clean=force)
1103 if force:
1104 if force:
1104 changes[0].extend(changes[6]) # mq may commit unchanged files
1105 changes[0].extend(changes[6]) # mq may commit unchanged files
1105
1106
1106 # check subrepos
1107 # check subrepos
1107 subs = []
1108 subs = []
1108 removedsubs = set()
1109 removedsubs = set()
1109 if '.hgsub' in wctx:
1110 if '.hgsub' in wctx:
1110 # only manage subrepos and .hgsubstate if .hgsub is present
1111 # only manage subrepos and .hgsubstate if .hgsub is present
1111 for p in wctx.parents():
1112 for p in wctx.parents():
1112 removedsubs.update(s for s in p.substate if match(s))
1113 removedsubs.update(s for s in p.substate if match(s))
1113 for s in wctx.substate:
1114 for s in wctx.substate:
1114 removedsubs.discard(s)
1115 removedsubs.discard(s)
1115 if match(s) and wctx.sub(s).dirty():
1116 if match(s) and wctx.sub(s).dirty():
1116 subs.append(s)
1117 subs.append(s)
1117 if (subs or removedsubs):
1118 if (subs or removedsubs):
1118 if (not match('.hgsub') and
1119 if (not match('.hgsub') and
1119 '.hgsub' in (wctx.modified() + wctx.added())):
1120 '.hgsub' in (wctx.modified() + wctx.added())):
1120 raise util.Abort(
1121 raise util.Abort(
1121 _("can't commit subrepos without .hgsub"))
1122 _("can't commit subrepos without .hgsub"))
1122 if '.hgsubstate' not in changes[0]:
1123 if '.hgsubstate' not in changes[0]:
1123 changes[0].insert(0, '.hgsubstate')
1124 changes[0].insert(0, '.hgsubstate')
1124 if '.hgsubstate' in changes[2]:
1125 if '.hgsubstate' in changes[2]:
1125 changes[2].remove('.hgsubstate')
1126 changes[2].remove('.hgsubstate')
1126 elif '.hgsub' in changes[2]:
1127 elif '.hgsub' in changes[2]:
1127 # clean up .hgsubstate when .hgsub is removed
1128 # clean up .hgsubstate when .hgsub is removed
1128 if ('.hgsubstate' in wctx and
1129 if ('.hgsubstate' in wctx and
1129 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1130 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1130 changes[2].insert(0, '.hgsubstate')
1131 changes[2].insert(0, '.hgsubstate')
1131
1132
1132 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1133 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1133 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1134 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1134 if changedsubs:
1135 if changedsubs:
1135 raise util.Abort(_("uncommitted changes in subrepo %s")
1136 raise util.Abort(_("uncommitted changes in subrepo %s")
1136 % changedsubs[0],
1137 % changedsubs[0],
1137 hint=_("use --subrepos for recursive commit"))
1138 hint=_("use --subrepos for recursive commit"))
1138
1139
1139 # make sure all explicit patterns are matched
1140 # make sure all explicit patterns are matched
1140 if not force and match.files():
1141 if not force and match.files():
1141 matched = set(changes[0] + changes[1] + changes[2])
1142 matched = set(changes[0] + changes[1] + changes[2])
1142
1143
1143 for f in match.files():
1144 for f in match.files():
1144 if f == '.' or f in matched or f in wctx.substate:
1145 if f == '.' or f in matched or f in wctx.substate:
1145 continue
1146 continue
1146 if f in changes[3]: # missing
1147 if f in changes[3]: # missing
1147 fail(f, _('file not found!'))
1148 fail(f, _('file not found!'))
1148 if f in vdirs: # visited directory
1149 if f in vdirs: # visited directory
1149 d = f + '/'
1150 d = f + '/'
1150 for mf in matched:
1151 for mf in matched:
1151 if mf.startswith(d):
1152 if mf.startswith(d):
1152 break
1153 break
1153 else:
1154 else:
1154 fail(f, _("no match under directory!"))
1155 fail(f, _("no match under directory!"))
1155 elif f not in self.dirstate:
1156 elif f not in self.dirstate:
1156 fail(f, _("file not tracked!"))
1157 fail(f, _("file not tracked!"))
1157
1158
1158 if (not force and not extra.get("close") and not merge
1159 if (not force and not extra.get("close") and not merge
1159 and not (changes[0] or changes[1] or changes[2])
1160 and not (changes[0] or changes[1] or changes[2])
1160 and wctx.branch() == wctx.p1().branch()):
1161 and wctx.branch() == wctx.p1().branch()):
1161 return None
1162 return None
1162
1163
1163 ms = mergemod.mergestate(self)
1164 ms = mergemod.mergestate(self)
1164 for f in changes[0]:
1165 for f in changes[0]:
1165 if f in ms and ms[f] == 'u':
1166 if f in ms and ms[f] == 'u':
1166 raise util.Abort(_("unresolved merge conflicts "
1167 raise util.Abort(_("unresolved merge conflicts "
1167 "(see hg help resolve)"))
1168 "(see hg help resolve)"))
1168
1169
1169 cctx = context.workingctx(self, text, user, date, extra, changes)
1170 cctx = context.workingctx(self, text, user, date, extra, changes)
1170 if editor:
1171 if editor:
1171 cctx._text = editor(self, cctx, subs)
1172 cctx._text = editor(self, cctx, subs)
1172 edited = (text != cctx._text)
1173 edited = (text != cctx._text)
1173
1174
1174 # commit subs
1175 # commit subs
1175 if subs or removedsubs:
1176 if subs or removedsubs:
1176 state = wctx.substate.copy()
1177 state = wctx.substate.copy()
1177 for s in sorted(subs):
1178 for s in sorted(subs):
1178 sub = wctx.sub(s)
1179 sub = wctx.sub(s)
1179 self.ui.status(_('committing subrepository %s\n') %
1180 self.ui.status(_('committing subrepository %s\n') %
1180 subrepo.subrelpath(sub))
1181 subrepo.subrelpath(sub))
1181 sr = sub.commit(cctx._text, user, date)
1182 sr = sub.commit(cctx._text, user, date)
1182 state[s] = (state[s][0], sr)
1183 state[s] = (state[s][0], sr)
1183 subrepo.writestate(self, state)
1184 subrepo.writestate(self, state)
1184
1185
1185 # Save commit message in case this transaction gets rolled back
1186 # Save commit message in case this transaction gets rolled back
1186 # (e.g. by a pretxncommit hook). Leave the content alone on
1187 # (e.g. by a pretxncommit hook). Leave the content alone on
1187 # the assumption that the user will use the same editor again.
1188 # the assumption that the user will use the same editor again.
1188 msgfn = self.savecommitmessage(cctx._text)
1189 msgfn = self.savecommitmessage(cctx._text)
1189
1190
1190 p1, p2 = self.dirstate.parents()
1191 p1, p2 = self.dirstate.parents()
1191 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1192 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1192 try:
1193 try:
1193 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1194 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1194 ret = self.commitctx(cctx, True)
1195 ret = self.commitctx(cctx, True)
1195 except:
1196 except:
1196 if edited:
1197 if edited:
1197 self.ui.write(
1198 self.ui.write(
1198 _('note: commit message saved in %s\n') % msgfn)
1199 _('note: commit message saved in %s\n') % msgfn)
1199 raise
1200 raise
1200
1201
1201 # update bookmarks, dirstate and mergestate
1202 # update bookmarks, dirstate and mergestate
1202 bookmarks.update(self, p1, ret)
1203 bookmarks.update(self, p1, ret)
1203 for f in changes[0] + changes[1]:
1204 for f in changes[0] + changes[1]:
1204 self.dirstate.normal(f)
1205 self.dirstate.normal(f)
1205 for f in changes[2]:
1206 for f in changes[2]:
1206 self.dirstate.drop(f)
1207 self.dirstate.drop(f)
1207 self.dirstate.setparents(ret)
1208 self.dirstate.setparents(ret)
1208 ms.reset()
1209 ms.reset()
1209 finally:
1210 finally:
1210 wlock.release()
1211 wlock.release()
1211
1212
1212 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1213 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1213 return ret
1214 return ret
1214
1215
1215 def commitctx(self, ctx, error=False):
1216 def commitctx(self, ctx, error=False):
1216 """Add a new revision to current repository.
1217 """Add a new revision to current repository.
1217 Revision information is passed via the context argument.
1218 Revision information is passed via the context argument.
1218 """
1219 """
1219
1220
1220 tr = lock = None
1221 tr = lock = None
1221 removed = list(ctx.removed())
1222 removed = list(ctx.removed())
1222 p1, p2 = ctx.p1(), ctx.p2()
1223 p1, p2 = ctx.p1(), ctx.p2()
1223 user = ctx.user()
1224 user = ctx.user()
1224
1225
1225 lock = self.lock()
1226 lock = self.lock()
1226 try:
1227 try:
1227 tr = self.transaction("commit")
1228 tr = self.transaction("commit")
1228 trp = weakref.proxy(tr)
1229 trp = weakref.proxy(tr)
1229
1230
1230 if ctx.files():
1231 if ctx.files():
1231 m1 = p1.manifest().copy()
1232 m1 = p1.manifest().copy()
1232 m2 = p2.manifest()
1233 m2 = p2.manifest()
1233
1234
1234 # check in files
1235 # check in files
1235 new = {}
1236 new = {}
1236 changed = []
1237 changed = []
1237 linkrev = len(self)
1238 linkrev = len(self)
1238 for f in sorted(ctx.modified() + ctx.added()):
1239 for f in sorted(ctx.modified() + ctx.added()):
1239 self.ui.note(f + "\n")
1240 self.ui.note(f + "\n")
1240 try:
1241 try:
1241 fctx = ctx[f]
1242 fctx = ctx[f]
1242 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1243 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1243 changed)
1244 changed)
1244 m1.set(f, fctx.flags())
1245 m1.set(f, fctx.flags())
1245 except OSError, inst:
1246 except OSError, inst:
1246 self.ui.warn(_("trouble committing %s!\n") % f)
1247 self.ui.warn(_("trouble committing %s!\n") % f)
1247 raise
1248 raise
1248 except IOError, inst:
1249 except IOError, inst:
1249 errcode = getattr(inst, 'errno', errno.ENOENT)
1250 errcode = getattr(inst, 'errno', errno.ENOENT)
1250 if error or errcode and errcode != errno.ENOENT:
1251 if error or errcode and errcode != errno.ENOENT:
1251 self.ui.warn(_("trouble committing %s!\n") % f)
1252 self.ui.warn(_("trouble committing %s!\n") % f)
1252 raise
1253 raise
1253 else:
1254 else:
1254 removed.append(f)
1255 removed.append(f)
1255
1256
1256 # update manifest
1257 # update manifest
1257 m1.update(new)
1258 m1.update(new)
1258 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1259 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1259 drop = [f for f in removed if f in m1]
1260 drop = [f for f in removed if f in m1]
1260 for f in drop:
1261 for f in drop:
1261 del m1[f]
1262 del m1[f]
1262 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1263 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1263 p2.manifestnode(), (new, drop))
1264 p2.manifestnode(), (new, drop))
1264 files = changed + removed
1265 files = changed + removed
1265 else:
1266 else:
1266 mn = p1.manifestnode()
1267 mn = p1.manifestnode()
1267 files = []
1268 files = []
1268
1269
1269 # update changelog
1270 # update changelog
1270 self.changelog.delayupdate()
1271 self.changelog.delayupdate()
1271 n = self.changelog.add(mn, files, ctx.description(),
1272 n = self.changelog.add(mn, files, ctx.description(),
1272 trp, p1.node(), p2.node(),
1273 trp, p1.node(), p2.node(),
1273 user, ctx.date(), ctx.extra().copy())
1274 user, ctx.date(), ctx.extra().copy())
1274 p = lambda: self.changelog.writepending() and self.root or ""
1275 p = lambda: self.changelog.writepending() and self.root or ""
1275 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1276 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1276 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1277 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1277 parent2=xp2, pending=p)
1278 parent2=xp2, pending=p)
1278 self.changelog.finalize(trp)
1279 self.changelog.finalize(trp)
1279 # set the new commit is proper phase
1280 # set the new commit is proper phase
1280 targetphase = phases.newcommitphase(self.ui)
1281 targetphase = phases.newcommitphase(self.ui)
1281 if targetphase:
1282 if targetphase:
1282 # retract boundary do not alter parent changeset.
1283 # retract boundary do not alter parent changeset.
1283 # if a parent have higher the resulting phase will
1284 # if a parent have higher the resulting phase will
1284 # be compliant anyway
1285 # be compliant anyway
1285 #
1286 #
1286 # if minimal phase was 0 we don't need to retract anything
1287 # if minimal phase was 0 we don't need to retract anything
1287 phases.retractboundary(self, targetphase, [n])
1288 phases.retractboundary(self, targetphase, [n])
1288 tr.close()
1289 tr.close()
1289 self.updatebranchcache()
1290 self.updatebranchcache()
1290 return n
1291 return n
1291 finally:
1292 finally:
1292 if tr:
1293 if tr:
1293 tr.release()
1294 tr.release()
1294 lock.release()
1295 lock.release()
1295
1296
1296 def destroyed(self):
1297 def destroyed(self):
1297 '''Inform the repository that nodes have been destroyed.
1298 '''Inform the repository that nodes have been destroyed.
1298 Intended for use by strip and rollback, so there's a common
1299 Intended for use by strip and rollback, so there's a common
1299 place for anything that has to be done after destroying history.'''
1300 place for anything that has to be done after destroying history.'''
1300 # XXX it might be nice if we could take the list of destroyed
1301 # XXX it might be nice if we could take the list of destroyed
1301 # nodes, but I don't see an easy way for rollback() to do that
1302 # nodes, but I don't see an easy way for rollback() to do that
1302
1303
1303 # Ensure the persistent tag cache is updated. Doing it now
1304 # Ensure the persistent tag cache is updated. Doing it now
1304 # means that the tag cache only has to worry about destroyed
1305 # means that the tag cache only has to worry about destroyed
1305 # heads immediately after a strip/rollback. That in turn
1306 # heads immediately after a strip/rollback. That in turn
1306 # guarantees that "cachetip == currenttip" (comparing both rev
1307 # guarantees that "cachetip == currenttip" (comparing both rev
1307 # and node) always means no nodes have been added or destroyed.
1308 # and node) always means no nodes have been added or destroyed.
1308
1309
1309 # XXX this is suboptimal when qrefresh'ing: we strip the current
1310 # XXX this is suboptimal when qrefresh'ing: we strip the current
1310 # head, refresh the tag cache, then immediately add a new head.
1311 # head, refresh the tag cache, then immediately add a new head.
1311 # But I think doing it this way is necessary for the "instant
1312 # But I think doing it this way is necessary for the "instant
1312 # tag cache retrieval" case to work.
1313 # tag cache retrieval" case to work.
1313 self.invalidatecaches()
1314 self.invalidatecaches()
1314
1315
1315 # Discard all cache entries to force reloading everything.
1316 # Discard all cache entries to force reloading everything.
1316 self._filecache.clear()
1317 self._filecache.clear()
1317
1318
1318 def walk(self, match, node=None):
1319 def walk(self, match, node=None):
1319 '''
1320 '''
1320 walk recursively through the directory tree or a given
1321 walk recursively through the directory tree or a given
1321 changeset, finding all files matched by the match
1322 changeset, finding all files matched by the match
1322 function
1323 function
1323 '''
1324 '''
1324 return self[node].walk(match)
1325 return self[node].walk(match)
1325
1326
1326 def status(self, node1='.', node2=None, match=None,
1327 def status(self, node1='.', node2=None, match=None,
1327 ignored=False, clean=False, unknown=False,
1328 ignored=False, clean=False, unknown=False,
1328 listsubrepos=False):
1329 listsubrepos=False):
1329 """return status of files between two nodes or node and working directory
1330 """return status of files between two nodes or node and working directory
1330
1331
1331 If node1 is None, use the first dirstate parent instead.
1332 If node1 is None, use the first dirstate parent instead.
1332 If node2 is None, compare node1 with working directory.
1333 If node2 is None, compare node1 with working directory.
1333 """
1334 """
1334
1335
1335 def mfmatches(ctx):
1336 def mfmatches(ctx):
1336 mf = ctx.manifest().copy()
1337 mf = ctx.manifest().copy()
1337 for fn in mf.keys():
1338 for fn in mf.keys():
1338 if not match(fn):
1339 if not match(fn):
1339 del mf[fn]
1340 del mf[fn]
1340 return mf
1341 return mf
1341
1342
1342 if isinstance(node1, context.changectx):
1343 if isinstance(node1, context.changectx):
1343 ctx1 = node1
1344 ctx1 = node1
1344 else:
1345 else:
1345 ctx1 = self[node1]
1346 ctx1 = self[node1]
1346 if isinstance(node2, context.changectx):
1347 if isinstance(node2, context.changectx):
1347 ctx2 = node2
1348 ctx2 = node2
1348 else:
1349 else:
1349 ctx2 = self[node2]
1350 ctx2 = self[node2]
1350
1351
1351 working = ctx2.rev() is None
1352 working = ctx2.rev() is None
1352 parentworking = working and ctx1 == self['.']
1353 parentworking = working and ctx1 == self['.']
1353 match = match or matchmod.always(self.root, self.getcwd())
1354 match = match or matchmod.always(self.root, self.getcwd())
1354 listignored, listclean, listunknown = ignored, clean, unknown
1355 listignored, listclean, listunknown = ignored, clean, unknown
1355
1356
1356 # load earliest manifest first for caching reasons
1357 # load earliest manifest first for caching reasons
1357 if not working and ctx2.rev() < ctx1.rev():
1358 if not working and ctx2.rev() < ctx1.rev():
1358 ctx2.manifest()
1359 ctx2.manifest()
1359
1360
1360 if not parentworking:
1361 if not parentworking:
1361 def bad(f, msg):
1362 def bad(f, msg):
1362 # 'f' may be a directory pattern from 'match.files()',
1363 # 'f' may be a directory pattern from 'match.files()',
1363 # so 'f not in ctx1' is not enough
1364 # so 'f not in ctx1' is not enough
1364 if f not in ctx1 and f not in ctx1.dirs():
1365 if f not in ctx1 and f not in ctx1.dirs():
1365 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1366 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1366 match.bad = bad
1367 match.bad = bad
1367
1368
1368 if working: # we need to scan the working dir
1369 if working: # we need to scan the working dir
1369 subrepos = []
1370 subrepos = []
1370 if '.hgsub' in self.dirstate:
1371 if '.hgsub' in self.dirstate:
1371 subrepos = ctx2.substate.keys()
1372 subrepos = ctx2.substate.keys()
1372 s = self.dirstate.status(match, subrepos, listignored,
1373 s = self.dirstate.status(match, subrepos, listignored,
1373 listclean, listunknown)
1374 listclean, listunknown)
1374 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1375 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1375
1376
1376 # check for any possibly clean files
1377 # check for any possibly clean files
1377 if parentworking and cmp:
1378 if parentworking and cmp:
1378 fixup = []
1379 fixup = []
1379 # do a full compare of any files that might have changed
1380 # do a full compare of any files that might have changed
1380 for f in sorted(cmp):
1381 for f in sorted(cmp):
1381 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1382 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1382 or ctx1[f].cmp(ctx2[f])):
1383 or ctx1[f].cmp(ctx2[f])):
1383 modified.append(f)
1384 modified.append(f)
1384 else:
1385 else:
1385 fixup.append(f)
1386 fixup.append(f)
1386
1387
1387 # update dirstate for files that are actually clean
1388 # update dirstate for files that are actually clean
1388 if fixup:
1389 if fixup:
1389 if listclean:
1390 if listclean:
1390 clean += fixup
1391 clean += fixup
1391
1392
1392 try:
1393 try:
1393 # updating the dirstate is optional
1394 # updating the dirstate is optional
1394 # so we don't wait on the lock
1395 # so we don't wait on the lock
1395 wlock = self.wlock(False)
1396 wlock = self.wlock(False)
1396 try:
1397 try:
1397 for f in fixup:
1398 for f in fixup:
1398 self.dirstate.normal(f)
1399 self.dirstate.normal(f)
1399 finally:
1400 finally:
1400 wlock.release()
1401 wlock.release()
1401 except error.LockError:
1402 except error.LockError:
1402 pass
1403 pass
1403
1404
1404 if not parentworking:
1405 if not parentworking:
1405 mf1 = mfmatches(ctx1)
1406 mf1 = mfmatches(ctx1)
1406 if working:
1407 if working:
1407 # we are comparing working dir against non-parent
1408 # we are comparing working dir against non-parent
1408 # generate a pseudo-manifest for the working dir
1409 # generate a pseudo-manifest for the working dir
1409 mf2 = mfmatches(self['.'])
1410 mf2 = mfmatches(self['.'])
1410 for f in cmp + modified + added:
1411 for f in cmp + modified + added:
1411 mf2[f] = None
1412 mf2[f] = None
1412 mf2.set(f, ctx2.flags(f))
1413 mf2.set(f, ctx2.flags(f))
1413 for f in removed:
1414 for f in removed:
1414 if f in mf2:
1415 if f in mf2:
1415 del mf2[f]
1416 del mf2[f]
1416 else:
1417 else:
1417 # we are comparing two revisions
1418 # we are comparing two revisions
1418 deleted, unknown, ignored = [], [], []
1419 deleted, unknown, ignored = [], [], []
1419 mf2 = mfmatches(ctx2)
1420 mf2 = mfmatches(ctx2)
1420
1421
1421 modified, added, clean = [], [], []
1422 modified, added, clean = [], [], []
1422 for fn in mf2:
1423 for fn in mf2:
1423 if fn in mf1:
1424 if fn in mf1:
1424 if (fn not in deleted and
1425 if (fn not in deleted and
1425 (mf1.flags(fn) != mf2.flags(fn) or
1426 (mf1.flags(fn) != mf2.flags(fn) or
1426 (mf1[fn] != mf2[fn] and
1427 (mf1[fn] != mf2[fn] and
1427 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1428 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1428 modified.append(fn)
1429 modified.append(fn)
1429 elif listclean:
1430 elif listclean:
1430 clean.append(fn)
1431 clean.append(fn)
1431 del mf1[fn]
1432 del mf1[fn]
1432 elif fn not in deleted:
1433 elif fn not in deleted:
1433 added.append(fn)
1434 added.append(fn)
1434 removed = mf1.keys()
1435 removed = mf1.keys()
1435
1436
1436 if working and modified and not self.dirstate._checklink:
1437 if working and modified and not self.dirstate._checklink:
1437 # Symlink placeholders may get non-symlink-like contents
1438 # Symlink placeholders may get non-symlink-like contents
1438 # via user error or dereferencing by NFS or Samba servers,
1439 # via user error or dereferencing by NFS or Samba servers,
1439 # so we filter out any placeholders that don't look like a
1440 # so we filter out any placeholders that don't look like a
1440 # symlink
1441 # symlink
1441 sane = []
1442 sane = []
1442 for f in modified:
1443 for f in modified:
1443 if ctx2.flags(f) == 'l':
1444 if ctx2.flags(f) == 'l':
1444 d = ctx2[f].data()
1445 d = ctx2[f].data()
1445 if len(d) >= 1024 or '\n' in d or util.binary(d):
1446 if len(d) >= 1024 or '\n' in d or util.binary(d):
1446 self.ui.debug('ignoring suspect symlink placeholder'
1447 self.ui.debug('ignoring suspect symlink placeholder'
1447 ' "%s"\n' % f)
1448 ' "%s"\n' % f)
1448 continue
1449 continue
1449 sane.append(f)
1450 sane.append(f)
1450 modified = sane
1451 modified = sane
1451
1452
1452 r = modified, added, removed, deleted, unknown, ignored, clean
1453 r = modified, added, removed, deleted, unknown, ignored, clean
1453
1454
1454 if listsubrepos:
1455 if listsubrepos:
1455 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1456 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1456 if working:
1457 if working:
1457 rev2 = None
1458 rev2 = None
1458 else:
1459 else:
1459 rev2 = ctx2.substate[subpath][1]
1460 rev2 = ctx2.substate[subpath][1]
1460 try:
1461 try:
1461 submatch = matchmod.narrowmatcher(subpath, match)
1462 submatch = matchmod.narrowmatcher(subpath, match)
1462 s = sub.status(rev2, match=submatch, ignored=listignored,
1463 s = sub.status(rev2, match=submatch, ignored=listignored,
1463 clean=listclean, unknown=listunknown,
1464 clean=listclean, unknown=listunknown,
1464 listsubrepos=True)
1465 listsubrepos=True)
1465 for rfiles, sfiles in zip(r, s):
1466 for rfiles, sfiles in zip(r, s):
1466 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1467 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1467 except error.LookupError:
1468 except error.LookupError:
1468 self.ui.status(_("skipping missing subrepository: %s\n")
1469 self.ui.status(_("skipping missing subrepository: %s\n")
1469 % subpath)
1470 % subpath)
1470
1471
1471 for l in r:
1472 for l in r:
1472 l.sort()
1473 l.sort()
1473 return r
1474 return r
1474
1475
1475 def heads(self, start=None):
1476 def heads(self, start=None):
1476 heads = self.changelog.heads(start)
1477 heads = self.changelog.heads(start)
1477 # sort the output in rev descending order
1478 # sort the output in rev descending order
1478 return sorted(heads, key=self.changelog.rev, reverse=True)
1479 return sorted(heads, key=self.changelog.rev, reverse=True)
1479
1480
1480 def branchheads(self, branch=None, start=None, closed=False):
1481 def branchheads(self, branch=None, start=None, closed=False):
1481 '''return a (possibly filtered) list of heads for the given branch
1482 '''return a (possibly filtered) list of heads for the given branch
1482
1483
1483 Heads are returned in topological order, from newest to oldest.
1484 Heads are returned in topological order, from newest to oldest.
1484 If branch is None, use the dirstate branch.
1485 If branch is None, use the dirstate branch.
1485 If start is not None, return only heads reachable from start.
1486 If start is not None, return only heads reachable from start.
1486 If closed is True, return heads that are marked as closed as well.
1487 If closed is True, return heads that are marked as closed as well.
1487 '''
1488 '''
1488 if branch is None:
1489 if branch is None:
1489 branch = self[None].branch()
1490 branch = self[None].branch()
1490 branches = self.branchmap()
1491 branches = self.branchmap()
1491 if branch not in branches:
1492 if branch not in branches:
1492 return []
1493 return []
1493 # the cache returns heads ordered lowest to highest
1494 # the cache returns heads ordered lowest to highest
1494 bheads = list(reversed(branches[branch]))
1495 bheads = list(reversed(branches[branch]))
1495 if start is not None:
1496 if start is not None:
1496 # filter out the heads that cannot be reached from startrev
1497 # filter out the heads that cannot be reached from startrev
1497 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1498 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1498 bheads = [h for h in bheads if h in fbheads]
1499 bheads = [h for h in bheads if h in fbheads]
1499 if not closed:
1500 if not closed:
1500 bheads = [h for h in bheads if
1501 bheads = [h for h in bheads if
1501 ('close' not in self.changelog.read(h)[5])]
1502 ('close' not in self.changelog.read(h)[5])]
1502 return bheads
1503 return bheads
1503
1504
1504 def branches(self, nodes):
1505 def branches(self, nodes):
1505 if not nodes:
1506 if not nodes:
1506 nodes = [self.changelog.tip()]
1507 nodes = [self.changelog.tip()]
1507 b = []
1508 b = []
1508 for n in nodes:
1509 for n in nodes:
1509 t = n
1510 t = n
1510 while True:
1511 while True:
1511 p = self.changelog.parents(n)
1512 p = self.changelog.parents(n)
1512 if p[1] != nullid or p[0] == nullid:
1513 if p[1] != nullid or p[0] == nullid:
1513 b.append((t, n, p[0], p[1]))
1514 b.append((t, n, p[0], p[1]))
1514 break
1515 break
1515 n = p[0]
1516 n = p[0]
1516 return b
1517 return b
1517
1518
1518 def between(self, pairs):
1519 def between(self, pairs):
1519 r = []
1520 r = []
1520
1521
1521 for top, bottom in pairs:
1522 for top, bottom in pairs:
1522 n, l, i = top, [], 0
1523 n, l, i = top, [], 0
1523 f = 1
1524 f = 1
1524
1525
1525 while n != bottom and n != nullid:
1526 while n != bottom and n != nullid:
1526 p = self.changelog.parents(n)[0]
1527 p = self.changelog.parents(n)[0]
1527 if i == f:
1528 if i == f:
1528 l.append(n)
1529 l.append(n)
1529 f = f * 2
1530 f = f * 2
1530 n = p
1531 n = p
1531 i += 1
1532 i += 1
1532
1533
1533 r.append(l)
1534 r.append(l)
1534
1535
1535 return r
1536 return r
1536
1537
1537 def pull(self, remote, heads=None, force=False):
1538 def pull(self, remote, heads=None, force=False):
1538 lock = self.lock()
1539 lock = self.lock()
1539 try:
1540 try:
1540 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1541 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1541 force=force)
1542 force=force)
1542 common, fetch, rheads = tmp
1543 common, fetch, rheads = tmp
1543 if not fetch:
1544 if not fetch:
1544 self.ui.status(_("no changes found\n"))
1545 self.ui.status(_("no changes found\n"))
1545 added = []
1546 added = []
1546 result = 0
1547 result = 0
1547 else:
1548 else:
1548 if heads is None and list(common) == [nullid]:
1549 if heads is None and list(common) == [nullid]:
1549 self.ui.status(_("requesting all changes\n"))
1550 self.ui.status(_("requesting all changes\n"))
1550 elif heads is None and remote.capable('changegroupsubset'):
1551 elif heads is None and remote.capable('changegroupsubset'):
1551 # issue1320, avoid a race if remote changed after discovery
1552 # issue1320, avoid a race if remote changed after discovery
1552 heads = rheads
1553 heads = rheads
1553
1554
1554 if remote.capable('getbundle'):
1555 if remote.capable('getbundle'):
1555 cg = remote.getbundle('pull', common=common,
1556 cg = remote.getbundle('pull', common=common,
1556 heads=heads or rheads)
1557 heads=heads or rheads)
1557 elif heads is None:
1558 elif heads is None:
1558 cg = remote.changegroup(fetch, 'pull')
1559 cg = remote.changegroup(fetch, 'pull')
1559 elif not remote.capable('changegroupsubset'):
1560 elif not remote.capable('changegroupsubset'):
1560 raise util.Abort(_("partial pull cannot be done because "
1561 raise util.Abort(_("partial pull cannot be done because "
1561 "other repository doesn't support "
1562 "other repository doesn't support "
1562 "changegroupsubset."))
1563 "changegroupsubset."))
1563 else:
1564 else:
1564 cg = remote.changegroupsubset(fetch, heads, 'pull')
1565 cg = remote.changegroupsubset(fetch, heads, 'pull')
1565 clstart = len(self.changelog)
1566 clstart = len(self.changelog)
1566 result = self.addchangegroup(cg, 'pull', remote.url())
1567 result = self.addchangegroup(cg, 'pull', remote.url())
1567 clend = len(self.changelog)
1568 clend = len(self.changelog)
1568 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1569 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1569
1570
1570 # compute target subset
1571 # compute target subset
1571 if heads is None:
1572 if heads is None:
1572 # We pulled every thing possible
1573 # We pulled every thing possible
1573 # sync on everything common
1574 # sync on everything common
1574 subset = common + added
1575 subset = common + added
1575 else:
1576 else:
1576 # We pulled a specific subset
1577 # We pulled a specific subset
1577 # sync on this subset
1578 # sync on this subset
1578 subset = heads
1579 subset = heads
1579
1580
1580 # Get remote phases data from remote
1581 # Get remote phases data from remote
1581 remotephases = remote.listkeys('phases')
1582 remotephases = remote.listkeys('phases')
1582 publishing = bool(remotephases.get('publishing', False))
1583 publishing = bool(remotephases.get('publishing', False))
1583 if remotephases and not publishing:
1584 if remotephases and not publishing:
1584 # remote is new and unpublishing
1585 # remote is new and unpublishing
1585 pheads, _dr = phases.analyzeremotephases(self, subset,
1586 pheads, _dr = phases.analyzeremotephases(self, subset,
1586 remotephases)
1587 remotephases)
1587 phases.advanceboundary(self, phases.public, pheads)
1588 phases.advanceboundary(self, phases.public, pheads)
1588 phases.advanceboundary(self, phases.draft, subset)
1589 phases.advanceboundary(self, phases.draft, subset)
1589 else:
1590 else:
1590 # Remote is old or publishing all common changesets
1591 # Remote is old or publishing all common changesets
1591 # should be seen as public
1592 # should be seen as public
1592 phases.advanceboundary(self, phases.public, subset)
1593 phases.advanceboundary(self, phases.public, subset)
1593 finally:
1594 finally:
1594 lock.release()
1595 lock.release()
1595
1596
1596 return result
1597 return result
1597
1598
1598 def checkpush(self, force, revs):
1599 def checkpush(self, force, revs):
1599 """Extensions can override this function if additional checks have
1600 """Extensions can override this function if additional checks have
1600 to be performed before pushing, or call it if they override push
1601 to be performed before pushing, or call it if they override push
1601 command.
1602 command.
1602 """
1603 """
1603 pass
1604 pass
1604
1605
1605 def push(self, remote, force=False, revs=None, newbranch=False):
1606 def push(self, remote, force=False, revs=None, newbranch=False):
1606 '''Push outgoing changesets (limited by revs) from the current
1607 '''Push outgoing changesets (limited by revs) from the current
1607 repository to remote. Return an integer:
1608 repository to remote. Return an integer:
1608 - None means nothing to push
1609 - None means nothing to push
1609 - 0 means HTTP error
1610 - 0 means HTTP error
1610 - 1 means we pushed and remote head count is unchanged *or*
1611 - 1 means we pushed and remote head count is unchanged *or*
1611 we have outgoing changesets but refused to push
1612 we have outgoing changesets but refused to push
1612 - other values as described by addchangegroup()
1613 - other values as described by addchangegroup()
1613 '''
1614 '''
1614 # there are two ways to push to remote repo:
1615 # there are two ways to push to remote repo:
1615 #
1616 #
1616 # addchangegroup assumes local user can lock remote
1617 # addchangegroup assumes local user can lock remote
1617 # repo (local filesystem, old ssh servers).
1618 # repo (local filesystem, old ssh servers).
1618 #
1619 #
1619 # unbundle assumes local user cannot lock remote repo (new ssh
1620 # unbundle assumes local user cannot lock remote repo (new ssh
1620 # servers, http servers).
1621 # servers, http servers).
1621
1622
1622 # get local lock as we might write phase data
1623 # get local lock as we might write phase data
1623 locallock = self.lock()
1624 locallock = self.lock()
1624 try:
1625 try:
1625 self.checkpush(force, revs)
1626 self.checkpush(force, revs)
1626 lock = None
1627 lock = None
1627 unbundle = remote.capable('unbundle')
1628 unbundle = remote.capable('unbundle')
1628 if not unbundle:
1629 if not unbundle:
1629 lock = remote.lock()
1630 lock = remote.lock()
1630 try:
1631 try:
1631 # discovery
1632 # discovery
1632 fci = discovery.findcommonincoming
1633 fci = discovery.findcommonincoming
1633 commoninc = fci(self, remote, force=force)
1634 commoninc = fci(self, remote, force=force)
1634 common, inc, remoteheads = commoninc
1635 common, inc, remoteheads = commoninc
1635 fco = discovery.findcommonoutgoing
1636 fco = discovery.findcommonoutgoing
1636 outgoing = fco(self, remote, onlyheads=revs,
1637 outgoing = fco(self, remote, onlyheads=revs,
1637 commoninc=commoninc, force=force)
1638 commoninc=commoninc, force=force)
1638
1639
1639
1640
1640 if not outgoing.missing:
1641 if not outgoing.missing:
1641 # nothing to push
1642 # nothing to push
1642 scmutil.nochangesfound(self.ui, outgoing.excluded)
1643 scmutil.nochangesfound(self.ui, outgoing.excluded)
1643 ret = None
1644 ret = None
1644 else:
1645 else:
1645 # something to push
1646 # something to push
1646 if not force:
1647 if not force:
1647 discovery.checkheads(self, remote, outgoing,
1648 discovery.checkheads(self, remote, outgoing,
1648 remoteheads, newbranch,
1649 remoteheads, newbranch,
1649 bool(inc))
1650 bool(inc))
1650
1651
1651 # create a changegroup from local
1652 # create a changegroup from local
1652 if revs is None and not outgoing.excluded:
1653 if revs is None and not outgoing.excluded:
1653 # push everything,
1654 # push everything,
1654 # use the fast path, no race possible on push
1655 # use the fast path, no race possible on push
1655 cg = self._changegroup(outgoing.missing, 'push')
1656 cg = self._changegroup(outgoing.missing, 'push')
1656 else:
1657 else:
1657 cg = self.getlocalbundle('push', outgoing)
1658 cg = self.getlocalbundle('push', outgoing)
1658
1659
1659 # apply changegroup to remote
1660 # apply changegroup to remote
1660 if unbundle:
1661 if unbundle:
1661 # local repo finds heads on server, finds out what
1662 # local repo finds heads on server, finds out what
1662 # revs it must push. once revs transferred, if server
1663 # revs it must push. once revs transferred, if server
1663 # finds it has different heads (someone else won
1664 # finds it has different heads (someone else won
1664 # commit/push race), server aborts.
1665 # commit/push race), server aborts.
1665 if force:
1666 if force:
1666 remoteheads = ['force']
1667 remoteheads = ['force']
1667 # ssh: return remote's addchangegroup()
1668 # ssh: return remote's addchangegroup()
1668 # http: return remote's addchangegroup() or 0 for error
1669 # http: return remote's addchangegroup() or 0 for error
1669 ret = remote.unbundle(cg, remoteheads, 'push')
1670 ret = remote.unbundle(cg, remoteheads, 'push')
1670 else:
1671 else:
1671 # we return an integer indicating remote head count change
1672 # we return an integer indicating remote head count change
1672 ret = remote.addchangegroup(cg, 'push', self.url())
1673 ret = remote.addchangegroup(cg, 'push', self.url())
1673
1674
1674 if ret:
1675 if ret:
1675 # push succeed, synchonize target of the push
1676 # push succeed, synchonize target of the push
1676 cheads = outgoing.missingheads
1677 cheads = outgoing.missingheads
1677 elif revs is None:
1678 elif revs is None:
1678 # All out push fails. synchronize all common
1679 # All out push fails. synchronize all common
1679 cheads = outgoing.commonheads
1680 cheads = outgoing.commonheads
1680 else:
1681 else:
1681 # I want cheads = heads(::missingheads and ::commonheads)
1682 # I want cheads = heads(::missingheads and ::commonheads)
1682 # (missingheads is revs with secret changeset filtered out)
1683 # (missingheads is revs with secret changeset filtered out)
1683 #
1684 #
1684 # This can be expressed as:
1685 # This can be expressed as:
1685 # cheads = ( (missingheads and ::commonheads)
1686 # cheads = ( (missingheads and ::commonheads)
1686 # + (commonheads and ::missingheads))"
1687 # + (commonheads and ::missingheads))"
1687 # )
1688 # )
1688 #
1689 #
1689 # while trying to push we already computed the following:
1690 # while trying to push we already computed the following:
1690 # common = (::commonheads)
1691 # common = (::commonheads)
1691 # missing = ((commonheads::missingheads) - commonheads)
1692 # missing = ((commonheads::missingheads) - commonheads)
1692 #
1693 #
1693 # We can pick:
1694 # We can pick:
1694 # * missingheads part of comon (::commonheads)
1695 # * missingheads part of comon (::commonheads)
1695 common = set(outgoing.common)
1696 common = set(outgoing.common)
1696 cheads = [node for node in revs if node in common]
1697 cheads = [node for node in revs if node in common]
1697 # and
1698 # and
1698 # * commonheads parents on missing
1699 # * commonheads parents on missing
1699 revset = self.set('%ln and parents(roots(%ln))',
1700 revset = self.set('%ln and parents(roots(%ln))',
1700 outgoing.commonheads,
1701 outgoing.commonheads,
1701 outgoing.missing)
1702 outgoing.missing)
1702 cheads.extend(c.node() for c in revset)
1703 cheads.extend(c.node() for c in revset)
1703 # even when we don't push, exchanging phase data is useful
1704 # even when we don't push, exchanging phase data is useful
1704 remotephases = remote.listkeys('phases')
1705 remotephases = remote.listkeys('phases')
1705 if not remotephases: # old server or public only repo
1706 if not remotephases: # old server or public only repo
1706 phases.advanceboundary(self, phases.public, cheads)
1707 phases.advanceboundary(self, phases.public, cheads)
1707 # don't push any phase data as there is nothing to push
1708 # don't push any phase data as there is nothing to push
1708 else:
1709 else:
1709 ana = phases.analyzeremotephases(self, cheads, remotephases)
1710 ana = phases.analyzeremotephases(self, cheads, remotephases)
1710 pheads, droots = ana
1711 pheads, droots = ana
1711 ### Apply remote phase on local
1712 ### Apply remote phase on local
1712 if remotephases.get('publishing', False):
1713 if remotephases.get('publishing', False):
1713 phases.advanceboundary(self, phases.public, cheads)
1714 phases.advanceboundary(self, phases.public, cheads)
1714 else: # publish = False
1715 else: # publish = False
1715 phases.advanceboundary(self, phases.public, pheads)
1716 phases.advanceboundary(self, phases.public, pheads)
1716 phases.advanceboundary(self, phases.draft, cheads)
1717 phases.advanceboundary(self, phases.draft, cheads)
1717 ### Apply local phase on remote
1718 ### Apply local phase on remote
1718
1719
1719 # Get the list of all revs draft on remote by public here.
1720 # Get the list of all revs draft on remote by public here.
1720 # XXX Beware that revset break if droots is not strictly
1721 # XXX Beware that revset break if droots is not strictly
1721 # XXX root we may want to ensure it is but it is costly
1722 # XXX root we may want to ensure it is but it is costly
1722 outdated = self.set('heads((%ln::%ln) and public())',
1723 outdated = self.set('heads((%ln::%ln) and public())',
1723 droots, cheads)
1724 droots, cheads)
1724 for newremotehead in outdated:
1725 for newremotehead in outdated:
1725 r = remote.pushkey('phases',
1726 r = remote.pushkey('phases',
1726 newremotehead.hex(),
1727 newremotehead.hex(),
1727 str(phases.draft),
1728 str(phases.draft),
1728 str(phases.public))
1729 str(phases.public))
1729 if not r:
1730 if not r:
1730 self.ui.warn(_('updating %s to public failed!\n')
1731 self.ui.warn(_('updating %s to public failed!\n')
1731 % newremotehead)
1732 % newremotehead)
1732 finally:
1733 finally:
1733 if lock is not None:
1734 if lock is not None:
1734 lock.release()
1735 lock.release()
1735 finally:
1736 finally:
1736 locallock.release()
1737 locallock.release()
1737
1738
1738 self.ui.debug("checking for updated bookmarks\n")
1739 self.ui.debug("checking for updated bookmarks\n")
1739 rb = remote.listkeys('bookmarks')
1740 rb = remote.listkeys('bookmarks')
1740 for k in rb.keys():
1741 for k in rb.keys():
1741 if k in self._bookmarks:
1742 if k in self._bookmarks:
1742 nr, nl = rb[k], hex(self._bookmarks[k])
1743 nr, nl = rb[k], hex(self._bookmarks[k])
1743 if nr in self:
1744 if nr in self:
1744 cr = self[nr]
1745 cr = self[nr]
1745 cl = self[nl]
1746 cl = self[nl]
1746 if cl in cr.descendants():
1747 if cl in cr.descendants():
1747 r = remote.pushkey('bookmarks', k, nr, nl)
1748 r = remote.pushkey('bookmarks', k, nr, nl)
1748 if r:
1749 if r:
1749 self.ui.status(_("updating bookmark %s\n") % k)
1750 self.ui.status(_("updating bookmark %s\n") % k)
1750 else:
1751 else:
1751 self.ui.warn(_('updating bookmark %s'
1752 self.ui.warn(_('updating bookmark %s'
1752 ' failed!\n') % k)
1753 ' failed!\n') % k)
1753
1754
1754 return ret
1755 return ret
1755
1756
1756 def changegroupinfo(self, nodes, source):
1757 def changegroupinfo(self, nodes, source):
1757 if self.ui.verbose or source == 'bundle':
1758 if self.ui.verbose or source == 'bundle':
1758 self.ui.status(_("%d changesets found\n") % len(nodes))
1759 self.ui.status(_("%d changesets found\n") % len(nodes))
1759 if self.ui.debugflag:
1760 if self.ui.debugflag:
1760 self.ui.debug("list of changesets:\n")
1761 self.ui.debug("list of changesets:\n")
1761 for node in nodes:
1762 for node in nodes:
1762 self.ui.debug("%s\n" % hex(node))
1763 self.ui.debug("%s\n" % hex(node))
1763
1764
1764 def changegroupsubset(self, bases, heads, source):
1765 def changegroupsubset(self, bases, heads, source):
1765 """Compute a changegroup consisting of all the nodes that are
1766 """Compute a changegroup consisting of all the nodes that are
1766 descendants of any of the bases and ancestors of any of the heads.
1767 descendants of any of the bases and ancestors of any of the heads.
1767 Return a chunkbuffer object whose read() method will return
1768 Return a chunkbuffer object whose read() method will return
1768 successive changegroup chunks.
1769 successive changegroup chunks.
1769
1770
1770 It is fairly complex as determining which filenodes and which
1771 It is fairly complex as determining which filenodes and which
1771 manifest nodes need to be included for the changeset to be complete
1772 manifest nodes need to be included for the changeset to be complete
1772 is non-trivial.
1773 is non-trivial.
1773
1774
1774 Another wrinkle is doing the reverse, figuring out which changeset in
1775 Another wrinkle is doing the reverse, figuring out which changeset in
1775 the changegroup a particular filenode or manifestnode belongs to.
1776 the changegroup a particular filenode or manifestnode belongs to.
1776 """
1777 """
1777 cl = self.changelog
1778 cl = self.changelog
1778 if not bases:
1779 if not bases:
1779 bases = [nullid]
1780 bases = [nullid]
1780 csets, bases, heads = cl.nodesbetween(bases, heads)
1781 csets, bases, heads = cl.nodesbetween(bases, heads)
1781 # We assume that all ancestors of bases are known
1782 # We assume that all ancestors of bases are known
1782 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1783 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1783 return self._changegroupsubset(common, csets, heads, source)
1784 return self._changegroupsubset(common, csets, heads, source)
1784
1785
1785 def getlocalbundle(self, source, outgoing):
1786 def getlocalbundle(self, source, outgoing):
1786 """Like getbundle, but taking a discovery.outgoing as an argument.
1787 """Like getbundle, but taking a discovery.outgoing as an argument.
1787
1788
1788 This is only implemented for local repos and reuses potentially
1789 This is only implemented for local repos and reuses potentially
1789 precomputed sets in outgoing."""
1790 precomputed sets in outgoing."""
1790 if not outgoing.missing:
1791 if not outgoing.missing:
1791 return None
1792 return None
1792 return self._changegroupsubset(outgoing.common,
1793 return self._changegroupsubset(outgoing.common,
1793 outgoing.missing,
1794 outgoing.missing,
1794 outgoing.missingheads,
1795 outgoing.missingheads,
1795 source)
1796 source)
1796
1797
1797 def getbundle(self, source, heads=None, common=None):
1798 def getbundle(self, source, heads=None, common=None):
1798 """Like changegroupsubset, but returns the set difference between the
1799 """Like changegroupsubset, but returns the set difference between the
1799 ancestors of heads and the ancestors common.
1800 ancestors of heads and the ancestors common.
1800
1801
1801 If heads is None, use the local heads. If common is None, use [nullid].
1802 If heads is None, use the local heads. If common is None, use [nullid].
1802
1803
1803 The nodes in common might not all be known locally due to the way the
1804 The nodes in common might not all be known locally due to the way the
1804 current discovery protocol works.
1805 current discovery protocol works.
1805 """
1806 """
1806 cl = self.changelog
1807 cl = self.changelog
1807 if common:
1808 if common:
1808 nm = cl.nodemap
1809 nm = cl.nodemap
1809 common = [n for n in common if n in nm]
1810 common = [n for n in common if n in nm]
1810 else:
1811 else:
1811 common = [nullid]
1812 common = [nullid]
1812 if not heads:
1813 if not heads:
1813 heads = cl.heads()
1814 heads = cl.heads()
1814 return self.getlocalbundle(source,
1815 return self.getlocalbundle(source,
1815 discovery.outgoing(cl, common, heads))
1816 discovery.outgoing(cl, common, heads))
1816
1817
1817 def _changegroupsubset(self, commonrevs, csets, heads, source):
1818 def _changegroupsubset(self, commonrevs, csets, heads, source):
1818
1819
1819 cl = self.changelog
1820 cl = self.changelog
1820 mf = self.manifest
1821 mf = self.manifest
1821 mfs = {} # needed manifests
1822 mfs = {} # needed manifests
1822 fnodes = {} # needed file nodes
1823 fnodes = {} # needed file nodes
1823 changedfiles = set()
1824 changedfiles = set()
1824 fstate = ['', {}]
1825 fstate = ['', {}]
1825 count = [0]
1826 count = [0]
1826
1827
1827 # can we go through the fast path ?
1828 # can we go through the fast path ?
1828 heads.sort()
1829 heads.sort()
1829 if heads == sorted(self.heads()):
1830 if heads == sorted(self.heads()):
1830 return self._changegroup(csets, source)
1831 return self._changegroup(csets, source)
1831
1832
1832 # slow path
1833 # slow path
1833 self.hook('preoutgoing', throw=True, source=source)
1834 self.hook('preoutgoing', throw=True, source=source)
1834 self.changegroupinfo(csets, source)
1835 self.changegroupinfo(csets, source)
1835
1836
1836 # filter any nodes that claim to be part of the known set
1837 # filter any nodes that claim to be part of the known set
1837 def prune(revlog, missing):
1838 def prune(revlog, missing):
1838 return [n for n in missing
1839 return [n for n in missing
1839 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1840 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1840
1841
1841 def lookup(revlog, x):
1842 def lookup(revlog, x):
1842 if revlog == cl:
1843 if revlog == cl:
1843 c = cl.read(x)
1844 c = cl.read(x)
1844 changedfiles.update(c[3])
1845 changedfiles.update(c[3])
1845 mfs.setdefault(c[0], x)
1846 mfs.setdefault(c[0], x)
1846 count[0] += 1
1847 count[0] += 1
1847 self.ui.progress(_('bundling'), count[0],
1848 self.ui.progress(_('bundling'), count[0],
1848 unit=_('changesets'), total=len(csets))
1849 unit=_('changesets'), total=len(csets))
1849 return x
1850 return x
1850 elif revlog == mf:
1851 elif revlog == mf:
1851 clnode = mfs[x]
1852 clnode = mfs[x]
1852 mdata = mf.readfast(x)
1853 mdata = mf.readfast(x)
1853 for f in changedfiles:
1854 for f in changedfiles:
1854 if f in mdata:
1855 if f in mdata:
1855 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1856 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1856 count[0] += 1
1857 count[0] += 1
1857 self.ui.progress(_('bundling'), count[0],
1858 self.ui.progress(_('bundling'), count[0],
1858 unit=_('manifests'), total=len(mfs))
1859 unit=_('manifests'), total=len(mfs))
1859 return mfs[x]
1860 return mfs[x]
1860 else:
1861 else:
1861 self.ui.progress(
1862 self.ui.progress(
1862 _('bundling'), count[0], item=fstate[0],
1863 _('bundling'), count[0], item=fstate[0],
1863 unit=_('files'), total=len(changedfiles))
1864 unit=_('files'), total=len(changedfiles))
1864 return fstate[1][x]
1865 return fstate[1][x]
1865
1866
1866 bundler = changegroup.bundle10(lookup)
1867 bundler = changegroup.bundle10(lookup)
1867 reorder = self.ui.config('bundle', 'reorder', 'auto')
1868 reorder = self.ui.config('bundle', 'reorder', 'auto')
1868 if reorder == 'auto':
1869 if reorder == 'auto':
1869 reorder = None
1870 reorder = None
1870 else:
1871 else:
1871 reorder = util.parsebool(reorder)
1872 reorder = util.parsebool(reorder)
1872
1873
1873 def gengroup():
1874 def gengroup():
1874 # Create a changenode group generator that will call our functions
1875 # Create a changenode group generator that will call our functions
1875 # back to lookup the owning changenode and collect information.
1876 # back to lookup the owning changenode and collect information.
1876 for chunk in cl.group(csets, bundler, reorder=reorder):
1877 for chunk in cl.group(csets, bundler, reorder=reorder):
1877 yield chunk
1878 yield chunk
1878 self.ui.progress(_('bundling'), None)
1879 self.ui.progress(_('bundling'), None)
1879
1880
1880 # Create a generator for the manifestnodes that calls our lookup
1881 # Create a generator for the manifestnodes that calls our lookup
1881 # and data collection functions back.
1882 # and data collection functions back.
1882 count[0] = 0
1883 count[0] = 0
1883 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1884 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1884 yield chunk
1885 yield chunk
1885 self.ui.progress(_('bundling'), None)
1886 self.ui.progress(_('bundling'), None)
1886
1887
1887 mfs.clear()
1888 mfs.clear()
1888
1889
1889 # Go through all our files in order sorted by name.
1890 # Go through all our files in order sorted by name.
1890 count[0] = 0
1891 count[0] = 0
1891 for fname in sorted(changedfiles):
1892 for fname in sorted(changedfiles):
1892 filerevlog = self.file(fname)
1893 filerevlog = self.file(fname)
1893 if not len(filerevlog):
1894 if not len(filerevlog):
1894 raise util.Abort(_("empty or missing revlog for %s") % fname)
1895 raise util.Abort(_("empty or missing revlog for %s") % fname)
1895 fstate[0] = fname
1896 fstate[0] = fname
1896 fstate[1] = fnodes.pop(fname, {})
1897 fstate[1] = fnodes.pop(fname, {})
1897
1898
1898 nodelist = prune(filerevlog, fstate[1])
1899 nodelist = prune(filerevlog, fstate[1])
1899 if nodelist:
1900 if nodelist:
1900 count[0] += 1
1901 count[0] += 1
1901 yield bundler.fileheader(fname)
1902 yield bundler.fileheader(fname)
1902 for chunk in filerevlog.group(nodelist, bundler, reorder):
1903 for chunk in filerevlog.group(nodelist, bundler, reorder):
1903 yield chunk
1904 yield chunk
1904
1905
1905 # Signal that no more groups are left.
1906 # Signal that no more groups are left.
1906 yield bundler.close()
1907 yield bundler.close()
1907 self.ui.progress(_('bundling'), None)
1908 self.ui.progress(_('bundling'), None)
1908
1909
1909 if csets:
1910 if csets:
1910 self.hook('outgoing', node=hex(csets[0]), source=source)
1911 self.hook('outgoing', node=hex(csets[0]), source=source)
1911
1912
1912 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1913 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1913
1914
1914 def changegroup(self, basenodes, source):
1915 def changegroup(self, basenodes, source):
1915 # to avoid a race we use changegroupsubset() (issue1320)
1916 # to avoid a race we use changegroupsubset() (issue1320)
1916 return self.changegroupsubset(basenodes, self.heads(), source)
1917 return self.changegroupsubset(basenodes, self.heads(), source)
1917
1918
1918 def _changegroup(self, nodes, source):
1919 def _changegroup(self, nodes, source):
1919 """Compute the changegroup of all nodes that we have that a recipient
1920 """Compute the changegroup of all nodes that we have that a recipient
1920 doesn't. Return a chunkbuffer object whose read() method will return
1921 doesn't. Return a chunkbuffer object whose read() method will return
1921 successive changegroup chunks.
1922 successive changegroup chunks.
1922
1923
1923 This is much easier than the previous function as we can assume that
1924 This is much easier than the previous function as we can assume that
1924 the recipient has any changenode we aren't sending them.
1925 the recipient has any changenode we aren't sending them.
1925
1926
1926 nodes is the set of nodes to send"""
1927 nodes is the set of nodes to send"""
1927
1928
1928 cl = self.changelog
1929 cl = self.changelog
1929 mf = self.manifest
1930 mf = self.manifest
1930 mfs = {}
1931 mfs = {}
1931 changedfiles = set()
1932 changedfiles = set()
1932 fstate = ['']
1933 fstate = ['']
1933 count = [0]
1934 count = [0]
1934
1935
1935 self.hook('preoutgoing', throw=True, source=source)
1936 self.hook('preoutgoing', throw=True, source=source)
1936 self.changegroupinfo(nodes, source)
1937 self.changegroupinfo(nodes, source)
1937
1938
1938 revset = set([cl.rev(n) for n in nodes])
1939 revset = set([cl.rev(n) for n in nodes])
1939
1940
1940 def gennodelst(log):
1941 def gennodelst(log):
1941 return [log.node(r) for r in log if log.linkrev(r) in revset]
1942 return [log.node(r) for r in log if log.linkrev(r) in revset]
1942
1943
1943 def lookup(revlog, x):
1944 def lookup(revlog, x):
1944 if revlog == cl:
1945 if revlog == cl:
1945 c = cl.read(x)
1946 c = cl.read(x)
1946 changedfiles.update(c[3])
1947 changedfiles.update(c[3])
1947 mfs.setdefault(c[0], x)
1948 mfs.setdefault(c[0], x)
1948 count[0] += 1
1949 count[0] += 1
1949 self.ui.progress(_('bundling'), count[0],
1950 self.ui.progress(_('bundling'), count[0],
1950 unit=_('changesets'), total=len(nodes))
1951 unit=_('changesets'), total=len(nodes))
1951 return x
1952 return x
1952 elif revlog == mf:
1953 elif revlog == mf:
1953 count[0] += 1
1954 count[0] += 1
1954 self.ui.progress(_('bundling'), count[0],
1955 self.ui.progress(_('bundling'), count[0],
1955 unit=_('manifests'), total=len(mfs))
1956 unit=_('manifests'), total=len(mfs))
1956 return cl.node(revlog.linkrev(revlog.rev(x)))
1957 return cl.node(revlog.linkrev(revlog.rev(x)))
1957 else:
1958 else:
1958 self.ui.progress(
1959 self.ui.progress(
1959 _('bundling'), count[0], item=fstate[0],
1960 _('bundling'), count[0], item=fstate[0],
1960 total=len(changedfiles), unit=_('files'))
1961 total=len(changedfiles), unit=_('files'))
1961 return cl.node(revlog.linkrev(revlog.rev(x)))
1962 return cl.node(revlog.linkrev(revlog.rev(x)))
1962
1963
1963 bundler = changegroup.bundle10(lookup)
1964 bundler = changegroup.bundle10(lookup)
1964 reorder = self.ui.config('bundle', 'reorder', 'auto')
1965 reorder = self.ui.config('bundle', 'reorder', 'auto')
1965 if reorder == 'auto':
1966 if reorder == 'auto':
1966 reorder = None
1967 reorder = None
1967 else:
1968 else:
1968 reorder = util.parsebool(reorder)
1969 reorder = util.parsebool(reorder)
1969
1970
1970 def gengroup():
1971 def gengroup():
1971 '''yield a sequence of changegroup chunks (strings)'''
1972 '''yield a sequence of changegroup chunks (strings)'''
1972 # construct a list of all changed files
1973 # construct a list of all changed files
1973
1974
1974 for chunk in cl.group(nodes, bundler, reorder=reorder):
1975 for chunk in cl.group(nodes, bundler, reorder=reorder):
1975 yield chunk
1976 yield chunk
1976 self.ui.progress(_('bundling'), None)
1977 self.ui.progress(_('bundling'), None)
1977
1978
1978 count[0] = 0
1979 count[0] = 0
1979 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1980 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1980 yield chunk
1981 yield chunk
1981 self.ui.progress(_('bundling'), None)
1982 self.ui.progress(_('bundling'), None)
1982
1983
1983 count[0] = 0
1984 count[0] = 0
1984 for fname in sorted(changedfiles):
1985 for fname in sorted(changedfiles):
1985 filerevlog = self.file(fname)
1986 filerevlog = self.file(fname)
1986 if not len(filerevlog):
1987 if not len(filerevlog):
1987 raise util.Abort(_("empty or missing revlog for %s") % fname)
1988 raise util.Abort(_("empty or missing revlog for %s") % fname)
1988 fstate[0] = fname
1989 fstate[0] = fname
1989 nodelist = gennodelst(filerevlog)
1990 nodelist = gennodelst(filerevlog)
1990 if nodelist:
1991 if nodelist:
1991 count[0] += 1
1992 count[0] += 1
1992 yield bundler.fileheader(fname)
1993 yield bundler.fileheader(fname)
1993 for chunk in filerevlog.group(nodelist, bundler, reorder):
1994 for chunk in filerevlog.group(nodelist, bundler, reorder):
1994 yield chunk
1995 yield chunk
1995 yield bundler.close()
1996 yield bundler.close()
1996 self.ui.progress(_('bundling'), None)
1997 self.ui.progress(_('bundling'), None)
1997
1998
1998 if nodes:
1999 if nodes:
1999 self.hook('outgoing', node=hex(nodes[0]), source=source)
2000 self.hook('outgoing', node=hex(nodes[0]), source=source)
2000
2001
2001 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2002 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2002
2003
2003 def addchangegroup(self, source, srctype, url, emptyok=False):
2004 def addchangegroup(self, source, srctype, url, emptyok=False):
2004 """Add the changegroup returned by source.read() to this repo.
2005 """Add the changegroup returned by source.read() to this repo.
2005 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2006 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2006 the URL of the repo where this changegroup is coming from.
2007 the URL of the repo where this changegroup is coming from.
2007
2008
2008 Return an integer summarizing the change to this repo:
2009 Return an integer summarizing the change to this repo:
2009 - nothing changed or no source: 0
2010 - nothing changed or no source: 0
2010 - more heads than before: 1+added heads (2..n)
2011 - more heads than before: 1+added heads (2..n)
2011 - fewer heads than before: -1-removed heads (-2..-n)
2012 - fewer heads than before: -1-removed heads (-2..-n)
2012 - number of heads stays the same: 1
2013 - number of heads stays the same: 1
2013 """
2014 """
2014 def csmap(x):
2015 def csmap(x):
2015 self.ui.debug("add changeset %s\n" % short(x))
2016 self.ui.debug("add changeset %s\n" % short(x))
2016 return len(cl)
2017 return len(cl)
2017
2018
2018 def revmap(x):
2019 def revmap(x):
2019 return cl.rev(x)
2020 return cl.rev(x)
2020
2021
2021 if not source:
2022 if not source:
2022 return 0
2023 return 0
2023
2024
2024 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2025 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2025
2026
2026 changesets = files = revisions = 0
2027 changesets = files = revisions = 0
2027 efiles = set()
2028 efiles = set()
2028
2029
2029 # write changelog data to temp files so concurrent readers will not see
2030 # write changelog data to temp files so concurrent readers will not see
2030 # inconsistent view
2031 # inconsistent view
2031 cl = self.changelog
2032 cl = self.changelog
2032 cl.delayupdate()
2033 cl.delayupdate()
2033 oldheads = cl.heads()
2034 oldheads = cl.heads()
2034
2035
2035 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2036 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2036 try:
2037 try:
2037 trp = weakref.proxy(tr)
2038 trp = weakref.proxy(tr)
2038 # pull off the changeset group
2039 # pull off the changeset group
2039 self.ui.status(_("adding changesets\n"))
2040 self.ui.status(_("adding changesets\n"))
2040 clstart = len(cl)
2041 clstart = len(cl)
2041 class prog(object):
2042 class prog(object):
2042 step = _('changesets')
2043 step = _('changesets')
2043 count = 1
2044 count = 1
2044 ui = self.ui
2045 ui = self.ui
2045 total = None
2046 total = None
2046 def __call__(self):
2047 def __call__(self):
2047 self.ui.progress(self.step, self.count, unit=_('chunks'),
2048 self.ui.progress(self.step, self.count, unit=_('chunks'),
2048 total=self.total)
2049 total=self.total)
2049 self.count += 1
2050 self.count += 1
2050 pr = prog()
2051 pr = prog()
2051 source.callback = pr
2052 source.callback = pr
2052
2053
2053 source.changelogheader()
2054 source.changelogheader()
2054 srccontent = cl.addgroup(source, csmap, trp)
2055 srccontent = cl.addgroup(source, csmap, trp)
2055 if not (srccontent or emptyok):
2056 if not (srccontent or emptyok):
2056 raise util.Abort(_("received changelog group is empty"))
2057 raise util.Abort(_("received changelog group is empty"))
2057 clend = len(cl)
2058 clend = len(cl)
2058 changesets = clend - clstart
2059 changesets = clend - clstart
2059 for c in xrange(clstart, clend):
2060 for c in xrange(clstart, clend):
2060 efiles.update(self[c].files())
2061 efiles.update(self[c].files())
2061 efiles = len(efiles)
2062 efiles = len(efiles)
2062 self.ui.progress(_('changesets'), None)
2063 self.ui.progress(_('changesets'), None)
2063
2064
2064 # pull off the manifest group
2065 # pull off the manifest group
2065 self.ui.status(_("adding manifests\n"))
2066 self.ui.status(_("adding manifests\n"))
2066 pr.step = _('manifests')
2067 pr.step = _('manifests')
2067 pr.count = 1
2068 pr.count = 1
2068 pr.total = changesets # manifests <= changesets
2069 pr.total = changesets # manifests <= changesets
2069 # no need to check for empty manifest group here:
2070 # no need to check for empty manifest group here:
2070 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2071 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2071 # no new manifest will be created and the manifest group will
2072 # no new manifest will be created and the manifest group will
2072 # be empty during the pull
2073 # be empty during the pull
2073 source.manifestheader()
2074 source.manifestheader()
2074 self.manifest.addgroup(source, revmap, trp)
2075 self.manifest.addgroup(source, revmap, trp)
2075 self.ui.progress(_('manifests'), None)
2076 self.ui.progress(_('manifests'), None)
2076
2077
2077 needfiles = {}
2078 needfiles = {}
2078 if self.ui.configbool('server', 'validate', default=False):
2079 if self.ui.configbool('server', 'validate', default=False):
2079 # validate incoming csets have their manifests
2080 # validate incoming csets have their manifests
2080 for cset in xrange(clstart, clend):
2081 for cset in xrange(clstart, clend):
2081 mfest = self.changelog.read(self.changelog.node(cset))[0]
2082 mfest = self.changelog.read(self.changelog.node(cset))[0]
2082 mfest = self.manifest.readdelta(mfest)
2083 mfest = self.manifest.readdelta(mfest)
2083 # store file nodes we must see
2084 # store file nodes we must see
2084 for f, n in mfest.iteritems():
2085 for f, n in mfest.iteritems():
2085 needfiles.setdefault(f, set()).add(n)
2086 needfiles.setdefault(f, set()).add(n)
2086
2087
2087 # process the files
2088 # process the files
2088 self.ui.status(_("adding file changes\n"))
2089 self.ui.status(_("adding file changes\n"))
2089 pr.step = _('files')
2090 pr.step = _('files')
2090 pr.count = 1
2091 pr.count = 1
2091 pr.total = efiles
2092 pr.total = efiles
2092 source.callback = None
2093 source.callback = None
2093
2094
2094 while True:
2095 while True:
2095 chunkdata = source.filelogheader()
2096 chunkdata = source.filelogheader()
2096 if not chunkdata:
2097 if not chunkdata:
2097 break
2098 break
2098 f = chunkdata["filename"]
2099 f = chunkdata["filename"]
2099 self.ui.debug("adding %s revisions\n" % f)
2100 self.ui.debug("adding %s revisions\n" % f)
2100 pr()
2101 pr()
2101 fl = self.file(f)
2102 fl = self.file(f)
2102 o = len(fl)
2103 o = len(fl)
2103 if not fl.addgroup(source, revmap, trp):
2104 if not fl.addgroup(source, revmap, trp):
2104 raise util.Abort(_("received file revlog group is empty"))
2105 raise util.Abort(_("received file revlog group is empty"))
2105 revisions += len(fl) - o
2106 revisions += len(fl) - o
2106 files += 1
2107 files += 1
2107 if f in needfiles:
2108 if f in needfiles:
2108 needs = needfiles[f]
2109 needs = needfiles[f]
2109 for new in xrange(o, len(fl)):
2110 for new in xrange(o, len(fl)):
2110 n = fl.node(new)
2111 n = fl.node(new)
2111 if n in needs:
2112 if n in needs:
2112 needs.remove(n)
2113 needs.remove(n)
2113 if not needs:
2114 if not needs:
2114 del needfiles[f]
2115 del needfiles[f]
2115 self.ui.progress(_('files'), None)
2116 self.ui.progress(_('files'), None)
2116
2117
2117 for f, needs in needfiles.iteritems():
2118 for f, needs in needfiles.iteritems():
2118 fl = self.file(f)
2119 fl = self.file(f)
2119 for n in needs:
2120 for n in needs:
2120 try:
2121 try:
2121 fl.rev(n)
2122 fl.rev(n)
2122 except error.LookupError:
2123 except error.LookupError:
2123 raise util.Abort(
2124 raise util.Abort(
2124 _('missing file data for %s:%s - run hg verify') %
2125 _('missing file data for %s:%s - run hg verify') %
2125 (f, hex(n)))
2126 (f, hex(n)))
2126
2127
2127 dh = 0
2128 dh = 0
2128 if oldheads:
2129 if oldheads:
2129 heads = cl.heads()
2130 heads = cl.heads()
2130 dh = len(heads) - len(oldheads)
2131 dh = len(heads) - len(oldheads)
2131 for h in heads:
2132 for h in heads:
2132 if h not in oldheads and 'close' in self[h].extra():
2133 if h not in oldheads and 'close' in self[h].extra():
2133 dh -= 1
2134 dh -= 1
2134 htext = ""
2135 htext = ""
2135 if dh:
2136 if dh:
2136 htext = _(" (%+d heads)") % dh
2137 htext = _(" (%+d heads)") % dh
2137
2138
2138 self.ui.status(_("added %d changesets"
2139 self.ui.status(_("added %d changesets"
2139 " with %d changes to %d files%s\n")
2140 " with %d changes to %d files%s\n")
2140 % (changesets, revisions, files, htext))
2141 % (changesets, revisions, files, htext))
2141
2142
2142 if changesets > 0:
2143 if changesets > 0:
2143 p = lambda: cl.writepending() and self.root or ""
2144 p = lambda: cl.writepending() and self.root or ""
2144 self.hook('pretxnchangegroup', throw=True,
2145 self.hook('pretxnchangegroup', throw=True,
2145 node=hex(cl.node(clstart)), source=srctype,
2146 node=hex(cl.node(clstart)), source=srctype,
2146 url=url, pending=p)
2147 url=url, pending=p)
2147
2148
2148 added = [cl.node(r) for r in xrange(clstart, clend)]
2149 added = [cl.node(r) for r in xrange(clstart, clend)]
2149 publishing = self.ui.configbool('phases', 'publish', True)
2150 publishing = self.ui.configbool('phases', 'publish', True)
2150 if srctype == 'push':
2151 if srctype == 'push':
2151 # Old server can not push the boundary themself.
2152 # Old server can not push the boundary themself.
2152 # New server won't push the boundary if changeset already
2153 # New server won't push the boundary if changeset already
2153 # existed locally as secrete
2154 # existed locally as secrete
2154 #
2155 #
2155 # We should not use added here but the list of all change in
2156 # We should not use added here but the list of all change in
2156 # the bundle
2157 # the bundle
2157 if publishing:
2158 if publishing:
2158 phases.advanceboundary(self, phases.public, srccontent)
2159 phases.advanceboundary(self, phases.public, srccontent)
2159 else:
2160 else:
2160 phases.advanceboundary(self, phases.draft, srccontent)
2161 phases.advanceboundary(self, phases.draft, srccontent)
2161 phases.retractboundary(self, phases.draft, added)
2162 phases.retractboundary(self, phases.draft, added)
2162 elif srctype != 'strip':
2163 elif srctype != 'strip':
2163 # publishing only alter behavior during push
2164 # publishing only alter behavior during push
2164 #
2165 #
2165 # strip should not touch boundary at all
2166 # strip should not touch boundary at all
2166 phases.retractboundary(self, phases.draft, added)
2167 phases.retractboundary(self, phases.draft, added)
2167
2168
2168 # make changelog see real files again
2169 # make changelog see real files again
2169 cl.finalize(trp)
2170 cl.finalize(trp)
2170
2171
2171 tr.close()
2172 tr.close()
2172
2173
2173 if changesets > 0:
2174 if changesets > 0:
2174 def runhooks():
2175 def runhooks():
2175 # forcefully update the on-disk branch cache
2176 # forcefully update the on-disk branch cache
2176 self.ui.debug("updating the branch cache\n")
2177 self.ui.debug("updating the branch cache\n")
2177 self.updatebranchcache()
2178 self.updatebranchcache()
2178 self.hook("changegroup", node=hex(cl.node(clstart)),
2179 self.hook("changegroup", node=hex(cl.node(clstart)),
2179 source=srctype, url=url)
2180 source=srctype, url=url)
2180
2181
2181 for n in added:
2182 for n in added:
2182 self.hook("incoming", node=hex(n), source=srctype,
2183 self.hook("incoming", node=hex(n), source=srctype,
2183 url=url)
2184 url=url)
2184 self._afterlock(runhooks)
2185 self._afterlock(runhooks)
2185
2186
2186 finally:
2187 finally:
2187 tr.release()
2188 tr.release()
2188 # never return 0 here:
2189 # never return 0 here:
2189 if dh < 0:
2190 if dh < 0:
2190 return dh - 1
2191 return dh - 1
2191 else:
2192 else:
2192 return dh + 1
2193 return dh + 1
2193
2194
2194 def stream_in(self, remote, requirements):
2195 def stream_in(self, remote, requirements):
2195 lock = self.lock()
2196 lock = self.lock()
2196 try:
2197 try:
2197 fp = remote.stream_out()
2198 fp = remote.stream_out()
2198 l = fp.readline()
2199 l = fp.readline()
2199 try:
2200 try:
2200 resp = int(l)
2201 resp = int(l)
2201 except ValueError:
2202 except ValueError:
2202 raise error.ResponseError(
2203 raise error.ResponseError(
2203 _('Unexpected response from remote server:'), l)
2204 _('Unexpected response from remote server:'), l)
2204 if resp == 1:
2205 if resp == 1:
2205 raise util.Abort(_('operation forbidden by server'))
2206 raise util.Abort(_('operation forbidden by server'))
2206 elif resp == 2:
2207 elif resp == 2:
2207 raise util.Abort(_('locking the remote repository failed'))
2208 raise util.Abort(_('locking the remote repository failed'))
2208 elif resp != 0:
2209 elif resp != 0:
2209 raise util.Abort(_('the server sent an unknown error code'))
2210 raise util.Abort(_('the server sent an unknown error code'))
2210 self.ui.status(_('streaming all changes\n'))
2211 self.ui.status(_('streaming all changes\n'))
2211 l = fp.readline()
2212 l = fp.readline()
2212 try:
2213 try:
2213 total_files, total_bytes = map(int, l.split(' ', 1))
2214 total_files, total_bytes = map(int, l.split(' ', 1))
2214 except (ValueError, TypeError):
2215 except (ValueError, TypeError):
2215 raise error.ResponseError(
2216 raise error.ResponseError(
2216 _('Unexpected response from remote server:'), l)
2217 _('Unexpected response from remote server:'), l)
2217 self.ui.status(_('%d files to transfer, %s of data\n') %
2218 self.ui.status(_('%d files to transfer, %s of data\n') %
2218 (total_files, util.bytecount(total_bytes)))
2219 (total_files, util.bytecount(total_bytes)))
2219 start = time.time()
2220 start = time.time()
2220 for i in xrange(total_files):
2221 for i in xrange(total_files):
2221 # XXX doesn't support '\n' or '\r' in filenames
2222 # XXX doesn't support '\n' or '\r' in filenames
2222 l = fp.readline()
2223 l = fp.readline()
2223 try:
2224 try:
2224 name, size = l.split('\0', 1)
2225 name, size = l.split('\0', 1)
2225 size = int(size)
2226 size = int(size)
2226 except (ValueError, TypeError):
2227 except (ValueError, TypeError):
2227 raise error.ResponseError(
2228 raise error.ResponseError(
2228 _('Unexpected response from remote server:'), l)
2229 _('Unexpected response from remote server:'), l)
2229 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2230 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2230 # for backwards compat, name was partially encoded
2231 # for backwards compat, name was partially encoded
2231 ofp = self.sopener(store.decodedir(name), 'w')
2232 ofp = self.sopener(store.decodedir(name), 'w')
2232 for chunk in util.filechunkiter(fp, limit=size):
2233 for chunk in util.filechunkiter(fp, limit=size):
2233 ofp.write(chunk)
2234 ofp.write(chunk)
2234 ofp.close()
2235 ofp.close()
2235 elapsed = time.time() - start
2236 elapsed = time.time() - start
2236 if elapsed <= 0:
2237 if elapsed <= 0:
2237 elapsed = 0.001
2238 elapsed = 0.001
2238 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2239 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2239 (util.bytecount(total_bytes), elapsed,
2240 (util.bytecount(total_bytes), elapsed,
2240 util.bytecount(total_bytes / elapsed)))
2241 util.bytecount(total_bytes / elapsed)))
2241
2242
2242 # new requirements = old non-format requirements + new format-related
2243 # new requirements = old non-format requirements + new format-related
2243 # requirements from the streamed-in repository
2244 # requirements from the streamed-in repository
2244 requirements.update(set(self.requirements) - self.supportedformats)
2245 requirements.update(set(self.requirements) - self.supportedformats)
2245 self._applyrequirements(requirements)
2246 self._applyrequirements(requirements)
2246 self._writerequirements()
2247 self._writerequirements()
2247
2248
2248 self.invalidate()
2249 self.invalidate()
2249 return len(self.heads()) + 1
2250 return len(self.heads()) + 1
2250 finally:
2251 finally:
2251 lock.release()
2252 lock.release()
2252
2253
2253 def clone(self, remote, heads=[], stream=False):
2254 def clone(self, remote, heads=[], stream=False):
2254 '''clone remote repository.
2255 '''clone remote repository.
2255
2256
2256 keyword arguments:
2257 keyword arguments:
2257 heads: list of revs to clone (forces use of pull)
2258 heads: list of revs to clone (forces use of pull)
2258 stream: use streaming clone if possible'''
2259 stream: use streaming clone if possible'''
2259
2260
2260 # now, all clients that can request uncompressed clones can
2261 # now, all clients that can request uncompressed clones can
2261 # read repo formats supported by all servers that can serve
2262 # read repo formats supported by all servers that can serve
2262 # them.
2263 # them.
2263
2264
2264 # if revlog format changes, client will have to check version
2265 # if revlog format changes, client will have to check version
2265 # and format flags on "stream" capability, and use
2266 # and format flags on "stream" capability, and use
2266 # uncompressed only if compatible.
2267 # uncompressed only if compatible.
2267
2268
2268 if stream and not heads:
2269 if stream and not heads:
2269 # 'stream' means remote revlog format is revlogv1 only
2270 # 'stream' means remote revlog format is revlogv1 only
2270 if remote.capable('stream'):
2271 if remote.capable('stream'):
2271 return self.stream_in(remote, set(('revlogv1',)))
2272 return self.stream_in(remote, set(('revlogv1',)))
2272 # otherwise, 'streamreqs' contains the remote revlog format
2273 # otherwise, 'streamreqs' contains the remote revlog format
2273 streamreqs = remote.capable('streamreqs')
2274 streamreqs = remote.capable('streamreqs')
2274 if streamreqs:
2275 if streamreqs:
2275 streamreqs = set(streamreqs.split(','))
2276 streamreqs = set(streamreqs.split(','))
2276 # if we support it, stream in and adjust our requirements
2277 # if we support it, stream in and adjust our requirements
2277 if not streamreqs - self.supportedformats:
2278 if not streamreqs - self.supportedformats:
2278 return self.stream_in(remote, streamreqs)
2279 return self.stream_in(remote, streamreqs)
2279 return self.pull(remote, heads)
2280 return self.pull(remote, heads)
2280
2281
2281 def pushkey(self, namespace, key, old, new):
2282 def pushkey(self, namespace, key, old, new):
2282 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2283 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2283 old=old, new=new)
2284 old=old, new=new)
2284 ret = pushkey.push(self, namespace, key, old, new)
2285 ret = pushkey.push(self, namespace, key, old, new)
2285 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2286 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2286 ret=ret)
2287 ret=ret)
2287 return ret
2288 return ret
2288
2289
2289 def listkeys(self, namespace):
2290 def listkeys(self, namespace):
2290 self.hook('prelistkeys', throw=True, namespace=namespace)
2291 self.hook('prelistkeys', throw=True, namespace=namespace)
2291 values = pushkey.list(self, namespace)
2292 values = pushkey.list(self, namespace)
2292 self.hook('listkeys', namespace=namespace, values=values)
2293 self.hook('listkeys', namespace=namespace, values=values)
2293 return values
2294 return values
2294
2295
2295 def debugwireargs(self, one, two, three=None, four=None, five=None):
2296 def debugwireargs(self, one, two, three=None, four=None, five=None):
2296 '''used to test argument passing over the wire'''
2297 '''used to test argument passing over the wire'''
2297 return "%s %s %s %s %s" % (one, two, three, four, five)
2298 return "%s %s %s %s %s" % (one, two, three, four, five)
2298
2299
2299 def savecommitmessage(self, text):
2300 def savecommitmessage(self, text):
2300 fp = self.opener('last-message.txt', 'wb')
2301 fp = self.opener('last-message.txt', 'wb')
2301 try:
2302 try:
2302 fp.write(text)
2303 fp.write(text)
2303 finally:
2304 finally:
2304 fp.close()
2305 fp.close()
2305 return self.pathto(fp.name[len(self.root)+1:])
2306 return self.pathto(fp.name[len(self.root)+1:])
2306
2307
2307 # used to avoid circular references so destructors work
2308 # used to avoid circular references so destructors work
2308 def aftertrans(files):
2309 def aftertrans(files):
2309 renamefiles = [tuple(t) for t in files]
2310 renamefiles = [tuple(t) for t in files]
2310 def a():
2311 def a():
2311 for src, dest in renamefiles:
2312 for src, dest in renamefiles:
2312 util.rename(src, dest)
2313 util.rename(src, dest)
2313 return a
2314 return a
2314
2315
2315 def undoname(fn):
2316 def undoname(fn):
2316 base, name = os.path.split(fn)
2317 base, name = os.path.split(fn)
2317 assert name.startswith('journal')
2318 assert name.startswith('journal')
2318 return os.path.join(base, name.replace('journal', 'undo', 1))
2319 return os.path.join(base, name.replace('journal', 'undo', 1))
2319
2320
2320 def instance(ui, path, create):
2321 def instance(ui, path, create):
2321 return localrepository(ui, util.urllocalpath(path), create)
2322 return localrepository(ui, util.urllocalpath(path), create)
2322
2323
2323 def islocal(path):
2324 def islocal(path):
2324 return True
2325 return True
General Comments 0
You need to be logged in to leave comments. Login now