##// END OF EJS Templates
obsolete: move obsolete markers read/write logic to obsstore object...
Pierre-Yves David -
r17124:f1b7683f default
parent child Browse files
Show More
@@ -1,2453 +1,2442
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import repo, changegroup, subrepo, discovery, pushkey, obsolete
9 import repo, changegroup, subrepo, discovery, pushkey, obsolete
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19 filecache = scmutil.filecache
19 filecache = scmutil.filecache
20
20
21 class storecache(filecache):
21 class storecache(filecache):
22 """filecache for files in the store"""
22 """filecache for files in the store"""
23 def join(self, obj, fname):
23 def join(self, obj, fname):
24 return obj.sjoin(fname)
24 return obj.sjoin(fname)
25
25
26 class localrepository(repo.repository):
26 class localrepository(repo.repository):
27 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
27 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
28 'known', 'getbundle'))
28 'known', 'getbundle'))
29 supportedformats = set(('revlogv1', 'generaldelta'))
29 supportedformats = set(('revlogv1', 'generaldelta'))
30 supported = supportedformats | set(('store', 'fncache', 'shared',
30 supported = supportedformats | set(('store', 'fncache', 'shared',
31 'dotencode'))
31 'dotencode'))
32
32
33 def __init__(self, baseui, path=None, create=False):
33 def __init__(self, baseui, path=None, create=False):
34 repo.repository.__init__(self)
34 repo.repository.__init__(self)
35 self.root = os.path.realpath(util.expandpath(path))
35 self.root = os.path.realpath(util.expandpath(path))
36 self.path = os.path.join(self.root, ".hg")
36 self.path = os.path.join(self.root, ".hg")
37 self.origroot = path
37 self.origroot = path
38 self.auditor = scmutil.pathauditor(self.root, self._checknested)
38 self.auditor = scmutil.pathauditor(self.root, self._checknested)
39 self.opener = scmutil.opener(self.path)
39 self.opener = scmutil.opener(self.path)
40 self.wopener = scmutil.opener(self.root)
40 self.wopener = scmutil.opener(self.root)
41 self.baseui = baseui
41 self.baseui = baseui
42 self.ui = baseui.copy()
42 self.ui = baseui.copy()
43 # A list of callback to shape the phase if no data were found.
43 # A list of callback to shape the phase if no data were found.
44 # Callback are in the form: func(repo, roots) --> processed root.
44 # Callback are in the form: func(repo, roots) --> processed root.
45 # This list it to be filled by extension during repo setup
45 # This list it to be filled by extension during repo setup
46 self._phasedefaults = []
46 self._phasedefaults = []
47
47
48 try:
48 try:
49 self.ui.readconfig(self.join("hgrc"), self.root)
49 self.ui.readconfig(self.join("hgrc"), self.root)
50 extensions.loadall(self.ui)
50 extensions.loadall(self.ui)
51 except IOError:
51 except IOError:
52 pass
52 pass
53
53
54 if not os.path.isdir(self.path):
54 if not os.path.isdir(self.path):
55 if create:
55 if create:
56 if not os.path.exists(path):
56 if not os.path.exists(path):
57 util.makedirs(path)
57 util.makedirs(path)
58 util.makedir(self.path, notindexed=True)
58 util.makedir(self.path, notindexed=True)
59 requirements = ["revlogv1"]
59 requirements = ["revlogv1"]
60 if self.ui.configbool('format', 'usestore', True):
60 if self.ui.configbool('format', 'usestore', True):
61 os.mkdir(os.path.join(self.path, "store"))
61 os.mkdir(os.path.join(self.path, "store"))
62 requirements.append("store")
62 requirements.append("store")
63 if self.ui.configbool('format', 'usefncache', True):
63 if self.ui.configbool('format', 'usefncache', True):
64 requirements.append("fncache")
64 requirements.append("fncache")
65 if self.ui.configbool('format', 'dotencode', True):
65 if self.ui.configbool('format', 'dotencode', True):
66 requirements.append('dotencode')
66 requirements.append('dotencode')
67 # create an invalid changelog
67 # create an invalid changelog
68 self.opener.append(
68 self.opener.append(
69 "00changelog.i",
69 "00changelog.i",
70 '\0\0\0\2' # represents revlogv2
70 '\0\0\0\2' # represents revlogv2
71 ' dummy changelog to prevent using the old repo layout'
71 ' dummy changelog to prevent using the old repo layout'
72 )
72 )
73 if self.ui.configbool('format', 'generaldelta', False):
73 if self.ui.configbool('format', 'generaldelta', False):
74 requirements.append("generaldelta")
74 requirements.append("generaldelta")
75 requirements = set(requirements)
75 requirements = set(requirements)
76 else:
76 else:
77 raise error.RepoError(_("repository %s not found") % path)
77 raise error.RepoError(_("repository %s not found") % path)
78 elif create:
78 elif create:
79 raise error.RepoError(_("repository %s already exists") % path)
79 raise error.RepoError(_("repository %s already exists") % path)
80 else:
80 else:
81 try:
81 try:
82 requirements = scmutil.readrequires(self.opener, self.supported)
82 requirements = scmutil.readrequires(self.opener, self.supported)
83 except IOError, inst:
83 except IOError, inst:
84 if inst.errno != errno.ENOENT:
84 if inst.errno != errno.ENOENT:
85 raise
85 raise
86 requirements = set()
86 requirements = set()
87
87
88 self.sharedpath = self.path
88 self.sharedpath = self.path
89 try:
89 try:
90 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
90 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
91 if not os.path.exists(s):
91 if not os.path.exists(s):
92 raise error.RepoError(
92 raise error.RepoError(
93 _('.hg/sharedpath points to nonexistent directory %s') % s)
93 _('.hg/sharedpath points to nonexistent directory %s') % s)
94 self.sharedpath = s
94 self.sharedpath = s
95 except IOError, inst:
95 except IOError, inst:
96 if inst.errno != errno.ENOENT:
96 if inst.errno != errno.ENOENT:
97 raise
97 raise
98
98
99 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
99 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
100 self.spath = self.store.path
100 self.spath = self.store.path
101 self.sopener = self.store.opener
101 self.sopener = self.store.opener
102 self.sjoin = self.store.join
102 self.sjoin = self.store.join
103 self.opener.createmode = self.store.createmode
103 self.opener.createmode = self.store.createmode
104 self._applyrequirements(requirements)
104 self._applyrequirements(requirements)
105 if create:
105 if create:
106 self._writerequirements()
106 self._writerequirements()
107
107
108
108
109 self._branchcache = None
109 self._branchcache = None
110 self._branchcachetip = None
110 self._branchcachetip = None
111 self.filterpats = {}
111 self.filterpats = {}
112 self._datafilters = {}
112 self._datafilters = {}
113 self._transref = self._lockref = self._wlockref = None
113 self._transref = self._lockref = self._wlockref = None
114
114
115 # A cache for various files under .hg/ that tracks file changes,
115 # A cache for various files under .hg/ that tracks file changes,
116 # (used by the filecache decorator)
116 # (used by the filecache decorator)
117 #
117 #
118 # Maps a property name to its util.filecacheentry
118 # Maps a property name to its util.filecacheentry
119 self._filecache = {}
119 self._filecache = {}
120
120
121 def _applyrequirements(self, requirements):
121 def _applyrequirements(self, requirements):
122 self.requirements = requirements
122 self.requirements = requirements
123 openerreqs = set(('revlogv1', 'generaldelta'))
123 openerreqs = set(('revlogv1', 'generaldelta'))
124 self.sopener.options = dict((r, 1) for r in requirements
124 self.sopener.options = dict((r, 1) for r in requirements
125 if r in openerreqs)
125 if r in openerreqs)
126
126
127 def _writerequirements(self):
127 def _writerequirements(self):
128 reqfile = self.opener("requires", "w")
128 reqfile = self.opener("requires", "w")
129 for r in self.requirements:
129 for r in self.requirements:
130 reqfile.write("%s\n" % r)
130 reqfile.write("%s\n" % r)
131 reqfile.close()
131 reqfile.close()
132
132
133 def _checknested(self, path):
133 def _checknested(self, path):
134 """Determine if path is a legal nested repository."""
134 """Determine if path is a legal nested repository."""
135 if not path.startswith(self.root):
135 if not path.startswith(self.root):
136 return False
136 return False
137 subpath = path[len(self.root) + 1:]
137 subpath = path[len(self.root) + 1:]
138 normsubpath = util.pconvert(subpath)
138 normsubpath = util.pconvert(subpath)
139
139
140 # XXX: Checking against the current working copy is wrong in
140 # XXX: Checking against the current working copy is wrong in
141 # the sense that it can reject things like
141 # the sense that it can reject things like
142 #
142 #
143 # $ hg cat -r 10 sub/x.txt
143 # $ hg cat -r 10 sub/x.txt
144 #
144 #
145 # if sub/ is no longer a subrepository in the working copy
145 # if sub/ is no longer a subrepository in the working copy
146 # parent revision.
146 # parent revision.
147 #
147 #
148 # However, it can of course also allow things that would have
148 # However, it can of course also allow things that would have
149 # been rejected before, such as the above cat command if sub/
149 # been rejected before, such as the above cat command if sub/
150 # is a subrepository now, but was a normal directory before.
150 # is a subrepository now, but was a normal directory before.
151 # The old path auditor would have rejected by mistake since it
151 # The old path auditor would have rejected by mistake since it
152 # panics when it sees sub/.hg/.
152 # panics when it sees sub/.hg/.
153 #
153 #
154 # All in all, checking against the working copy seems sensible
154 # All in all, checking against the working copy seems sensible
155 # since we want to prevent access to nested repositories on
155 # since we want to prevent access to nested repositories on
156 # the filesystem *now*.
156 # the filesystem *now*.
157 ctx = self[None]
157 ctx = self[None]
158 parts = util.splitpath(subpath)
158 parts = util.splitpath(subpath)
159 while parts:
159 while parts:
160 prefix = '/'.join(parts)
160 prefix = '/'.join(parts)
161 if prefix in ctx.substate:
161 if prefix in ctx.substate:
162 if prefix == normsubpath:
162 if prefix == normsubpath:
163 return True
163 return True
164 else:
164 else:
165 sub = ctx.sub(prefix)
165 sub = ctx.sub(prefix)
166 return sub.checknested(subpath[len(prefix) + 1:])
166 return sub.checknested(subpath[len(prefix) + 1:])
167 else:
167 else:
168 parts.pop()
168 parts.pop()
169 return False
169 return False
170
170
171 @filecache('bookmarks')
171 @filecache('bookmarks')
172 def _bookmarks(self):
172 def _bookmarks(self):
173 return bookmarks.read(self)
173 return bookmarks.read(self)
174
174
175 @filecache('bookmarks.current')
175 @filecache('bookmarks.current')
176 def _bookmarkcurrent(self):
176 def _bookmarkcurrent(self):
177 return bookmarks.readcurrent(self)
177 return bookmarks.readcurrent(self)
178
178
179 def _writebookmarks(self, marks):
179 def _writebookmarks(self, marks):
180 bookmarks.write(self)
180 bookmarks.write(self)
181
181
182 def bookmarkheads(self, bookmark):
182 def bookmarkheads(self, bookmark):
183 name = bookmark.split('@', 1)[0]
183 name = bookmark.split('@', 1)[0]
184 heads = []
184 heads = []
185 for mark, n in self._bookmarks.iteritems():
185 for mark, n in self._bookmarks.iteritems():
186 if mark.split('@', 1)[0] == name:
186 if mark.split('@', 1)[0] == name:
187 heads.append(n)
187 heads.append(n)
188 return heads
188 return heads
189
189
190 @storecache('phaseroots')
190 @storecache('phaseroots')
191 def _phasecache(self):
191 def _phasecache(self):
192 return phases.phasecache(self, self._phasedefaults)
192 return phases.phasecache(self, self._phasedefaults)
193
193
194 @storecache('obsstore')
194 @storecache('obsstore')
195 def obsstore(self):
195 def obsstore(self):
196 store = obsolete.obsstore()
196 store = obsolete.obsstore(self.sopener)
197 data = self.sopener.tryread('obsstore')
198 if data:
199 store.loadmarkers(data)
200 return store
197 return store
201
198
202 @storecache('00changelog.i')
199 @storecache('00changelog.i')
203 def changelog(self):
200 def changelog(self):
204 c = changelog.changelog(self.sopener)
201 c = changelog.changelog(self.sopener)
205 if 'HG_PENDING' in os.environ:
202 if 'HG_PENDING' in os.environ:
206 p = os.environ['HG_PENDING']
203 p = os.environ['HG_PENDING']
207 if p.startswith(self.root):
204 if p.startswith(self.root):
208 c.readpending('00changelog.i.a')
205 c.readpending('00changelog.i.a')
209 return c
206 return c
210
207
211 @storecache('00manifest.i')
208 @storecache('00manifest.i')
212 def manifest(self):
209 def manifest(self):
213 return manifest.manifest(self.sopener)
210 return manifest.manifest(self.sopener)
214
211
215 @filecache('dirstate')
212 @filecache('dirstate')
216 def dirstate(self):
213 def dirstate(self):
217 warned = [0]
214 warned = [0]
218 def validate(node):
215 def validate(node):
219 try:
216 try:
220 self.changelog.rev(node)
217 self.changelog.rev(node)
221 return node
218 return node
222 except error.LookupError:
219 except error.LookupError:
223 if not warned[0]:
220 if not warned[0]:
224 warned[0] = True
221 warned[0] = True
225 self.ui.warn(_("warning: ignoring unknown"
222 self.ui.warn(_("warning: ignoring unknown"
226 " working parent %s!\n") % short(node))
223 " working parent %s!\n") % short(node))
227 return nullid
224 return nullid
228
225
229 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
226 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
230
227
231 def __getitem__(self, changeid):
228 def __getitem__(self, changeid):
232 if changeid is None:
229 if changeid is None:
233 return context.workingctx(self)
230 return context.workingctx(self)
234 return context.changectx(self, changeid)
231 return context.changectx(self, changeid)
235
232
236 def __contains__(self, changeid):
233 def __contains__(self, changeid):
237 try:
234 try:
238 return bool(self.lookup(changeid))
235 return bool(self.lookup(changeid))
239 except error.RepoLookupError:
236 except error.RepoLookupError:
240 return False
237 return False
241
238
242 def __nonzero__(self):
239 def __nonzero__(self):
243 return True
240 return True
244
241
245 def __len__(self):
242 def __len__(self):
246 return len(self.changelog)
243 return len(self.changelog)
247
244
248 def __iter__(self):
245 def __iter__(self):
249 for i in xrange(len(self)):
246 for i in xrange(len(self)):
250 yield i
247 yield i
251
248
252 def revs(self, expr, *args):
249 def revs(self, expr, *args):
253 '''Return a list of revisions matching the given revset'''
250 '''Return a list of revisions matching the given revset'''
254 expr = revset.formatspec(expr, *args)
251 expr = revset.formatspec(expr, *args)
255 m = revset.match(None, expr)
252 m = revset.match(None, expr)
256 return [r for r in m(self, range(len(self)))]
253 return [r for r in m(self, range(len(self)))]
257
254
258 def set(self, expr, *args):
255 def set(self, expr, *args):
259 '''
256 '''
260 Yield a context for each matching revision, after doing arg
257 Yield a context for each matching revision, after doing arg
261 replacement via revset.formatspec
258 replacement via revset.formatspec
262 '''
259 '''
263 for r in self.revs(expr, *args):
260 for r in self.revs(expr, *args):
264 yield self[r]
261 yield self[r]
265
262
266 def url(self):
263 def url(self):
267 return 'file:' + self.root
264 return 'file:' + self.root
268
265
269 def hook(self, name, throw=False, **args):
266 def hook(self, name, throw=False, **args):
270 return hook.hook(self.ui, self, name, throw, **args)
267 return hook.hook(self.ui, self, name, throw, **args)
271
268
272 tag_disallowed = ':\r\n'
269 tag_disallowed = ':\r\n'
273
270
274 def _tag(self, names, node, message, local, user, date, extra={}):
271 def _tag(self, names, node, message, local, user, date, extra={}):
275 if isinstance(names, str):
272 if isinstance(names, str):
276 allchars = names
273 allchars = names
277 names = (names,)
274 names = (names,)
278 else:
275 else:
279 allchars = ''.join(names)
276 allchars = ''.join(names)
280 for c in self.tag_disallowed:
277 for c in self.tag_disallowed:
281 if c in allchars:
278 if c in allchars:
282 raise util.Abort(_('%r cannot be used in a tag name') % c)
279 raise util.Abort(_('%r cannot be used in a tag name') % c)
283
280
284 branches = self.branchmap()
281 branches = self.branchmap()
285 for name in names:
282 for name in names:
286 self.hook('pretag', throw=True, node=hex(node), tag=name,
283 self.hook('pretag', throw=True, node=hex(node), tag=name,
287 local=local)
284 local=local)
288 if name in branches:
285 if name in branches:
289 self.ui.warn(_("warning: tag %s conflicts with existing"
286 self.ui.warn(_("warning: tag %s conflicts with existing"
290 " branch name\n") % name)
287 " branch name\n") % name)
291
288
292 def writetags(fp, names, munge, prevtags):
289 def writetags(fp, names, munge, prevtags):
293 fp.seek(0, 2)
290 fp.seek(0, 2)
294 if prevtags and prevtags[-1] != '\n':
291 if prevtags and prevtags[-1] != '\n':
295 fp.write('\n')
292 fp.write('\n')
296 for name in names:
293 for name in names:
297 m = munge and munge(name) or name
294 m = munge and munge(name) or name
298 if (self._tagscache.tagtypes and
295 if (self._tagscache.tagtypes and
299 name in self._tagscache.tagtypes):
296 name in self._tagscache.tagtypes):
300 old = self.tags().get(name, nullid)
297 old = self.tags().get(name, nullid)
301 fp.write('%s %s\n' % (hex(old), m))
298 fp.write('%s %s\n' % (hex(old), m))
302 fp.write('%s %s\n' % (hex(node), m))
299 fp.write('%s %s\n' % (hex(node), m))
303 fp.close()
300 fp.close()
304
301
305 prevtags = ''
302 prevtags = ''
306 if local:
303 if local:
307 try:
304 try:
308 fp = self.opener('localtags', 'r+')
305 fp = self.opener('localtags', 'r+')
309 except IOError:
306 except IOError:
310 fp = self.opener('localtags', 'a')
307 fp = self.opener('localtags', 'a')
311 else:
308 else:
312 prevtags = fp.read()
309 prevtags = fp.read()
313
310
314 # local tags are stored in the current charset
311 # local tags are stored in the current charset
315 writetags(fp, names, None, prevtags)
312 writetags(fp, names, None, prevtags)
316 for name in names:
313 for name in names:
317 self.hook('tag', node=hex(node), tag=name, local=local)
314 self.hook('tag', node=hex(node), tag=name, local=local)
318 return
315 return
319
316
320 try:
317 try:
321 fp = self.wfile('.hgtags', 'rb+')
318 fp = self.wfile('.hgtags', 'rb+')
322 except IOError, e:
319 except IOError, e:
323 if e.errno != errno.ENOENT:
320 if e.errno != errno.ENOENT:
324 raise
321 raise
325 fp = self.wfile('.hgtags', 'ab')
322 fp = self.wfile('.hgtags', 'ab')
326 else:
323 else:
327 prevtags = fp.read()
324 prevtags = fp.read()
328
325
329 # committed tags are stored in UTF-8
326 # committed tags are stored in UTF-8
330 writetags(fp, names, encoding.fromlocal, prevtags)
327 writetags(fp, names, encoding.fromlocal, prevtags)
331
328
332 fp.close()
329 fp.close()
333
330
334 self.invalidatecaches()
331 self.invalidatecaches()
335
332
336 if '.hgtags' not in self.dirstate:
333 if '.hgtags' not in self.dirstate:
337 self[None].add(['.hgtags'])
334 self[None].add(['.hgtags'])
338
335
339 m = matchmod.exact(self.root, '', ['.hgtags'])
336 m = matchmod.exact(self.root, '', ['.hgtags'])
340 tagnode = self.commit(message, user, date, extra=extra, match=m)
337 tagnode = self.commit(message, user, date, extra=extra, match=m)
341
338
342 for name in names:
339 for name in names:
343 self.hook('tag', node=hex(node), tag=name, local=local)
340 self.hook('tag', node=hex(node), tag=name, local=local)
344
341
345 return tagnode
342 return tagnode
346
343
347 def tag(self, names, node, message, local, user, date):
344 def tag(self, names, node, message, local, user, date):
348 '''tag a revision with one or more symbolic names.
345 '''tag a revision with one or more symbolic names.
349
346
350 names is a list of strings or, when adding a single tag, names may be a
347 names is a list of strings or, when adding a single tag, names may be a
351 string.
348 string.
352
349
353 if local is True, the tags are stored in a per-repository file.
350 if local is True, the tags are stored in a per-repository file.
354 otherwise, they are stored in the .hgtags file, and a new
351 otherwise, they are stored in the .hgtags file, and a new
355 changeset is committed with the change.
352 changeset is committed with the change.
356
353
357 keyword arguments:
354 keyword arguments:
358
355
359 local: whether to store tags in non-version-controlled file
356 local: whether to store tags in non-version-controlled file
360 (default False)
357 (default False)
361
358
362 message: commit message to use if committing
359 message: commit message to use if committing
363
360
364 user: name of user to use if committing
361 user: name of user to use if committing
365
362
366 date: date tuple to use if committing'''
363 date: date tuple to use if committing'''
367
364
368 if not local:
365 if not local:
369 for x in self.status()[:5]:
366 for x in self.status()[:5]:
370 if '.hgtags' in x:
367 if '.hgtags' in x:
371 raise util.Abort(_('working copy of .hgtags is changed '
368 raise util.Abort(_('working copy of .hgtags is changed '
372 '(please commit .hgtags manually)'))
369 '(please commit .hgtags manually)'))
373
370
374 self.tags() # instantiate the cache
371 self.tags() # instantiate the cache
375 self._tag(names, node, message, local, user, date)
372 self._tag(names, node, message, local, user, date)
376
373
377 @propertycache
374 @propertycache
378 def _tagscache(self):
375 def _tagscache(self):
379 '''Returns a tagscache object that contains various tags related
376 '''Returns a tagscache object that contains various tags related
380 caches.'''
377 caches.'''
381
378
382 # This simplifies its cache management by having one decorated
379 # This simplifies its cache management by having one decorated
383 # function (this one) and the rest simply fetch things from it.
380 # function (this one) and the rest simply fetch things from it.
384 class tagscache(object):
381 class tagscache(object):
385 def __init__(self):
382 def __init__(self):
386 # These two define the set of tags for this repository. tags
383 # These two define the set of tags for this repository. tags
387 # maps tag name to node; tagtypes maps tag name to 'global' or
384 # maps tag name to node; tagtypes maps tag name to 'global' or
388 # 'local'. (Global tags are defined by .hgtags across all
385 # 'local'. (Global tags are defined by .hgtags across all
389 # heads, and local tags are defined in .hg/localtags.)
386 # heads, and local tags are defined in .hg/localtags.)
390 # They constitute the in-memory cache of tags.
387 # They constitute the in-memory cache of tags.
391 self.tags = self.tagtypes = None
388 self.tags = self.tagtypes = None
392
389
393 self.nodetagscache = self.tagslist = None
390 self.nodetagscache = self.tagslist = None
394
391
395 cache = tagscache()
392 cache = tagscache()
396 cache.tags, cache.tagtypes = self._findtags()
393 cache.tags, cache.tagtypes = self._findtags()
397
394
398 return cache
395 return cache
399
396
400 def tags(self):
397 def tags(self):
401 '''return a mapping of tag to node'''
398 '''return a mapping of tag to node'''
402 t = {}
399 t = {}
403 for k, v in self._tagscache.tags.iteritems():
400 for k, v in self._tagscache.tags.iteritems():
404 try:
401 try:
405 # ignore tags to unknown nodes
402 # ignore tags to unknown nodes
406 self.changelog.rev(v)
403 self.changelog.rev(v)
407 t[k] = v
404 t[k] = v
408 except (error.LookupError, ValueError):
405 except (error.LookupError, ValueError):
409 pass
406 pass
410 return t
407 return t
411
408
412 def _findtags(self):
409 def _findtags(self):
413 '''Do the hard work of finding tags. Return a pair of dicts
410 '''Do the hard work of finding tags. Return a pair of dicts
414 (tags, tagtypes) where tags maps tag name to node, and tagtypes
411 (tags, tagtypes) where tags maps tag name to node, and tagtypes
415 maps tag name to a string like \'global\' or \'local\'.
412 maps tag name to a string like \'global\' or \'local\'.
416 Subclasses or extensions are free to add their own tags, but
413 Subclasses or extensions are free to add their own tags, but
417 should be aware that the returned dicts will be retained for the
414 should be aware that the returned dicts will be retained for the
418 duration of the localrepo object.'''
415 duration of the localrepo object.'''
419
416
420 # XXX what tagtype should subclasses/extensions use? Currently
417 # XXX what tagtype should subclasses/extensions use? Currently
421 # mq and bookmarks add tags, but do not set the tagtype at all.
418 # mq and bookmarks add tags, but do not set the tagtype at all.
422 # Should each extension invent its own tag type? Should there
419 # Should each extension invent its own tag type? Should there
423 # be one tagtype for all such "virtual" tags? Or is the status
420 # be one tagtype for all such "virtual" tags? Or is the status
424 # quo fine?
421 # quo fine?
425
422
426 alltags = {} # map tag name to (node, hist)
423 alltags = {} # map tag name to (node, hist)
427 tagtypes = {}
424 tagtypes = {}
428
425
429 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
426 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
430 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
427 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
431
428
432 # Build the return dicts. Have to re-encode tag names because
429 # Build the return dicts. Have to re-encode tag names because
433 # the tags module always uses UTF-8 (in order not to lose info
430 # the tags module always uses UTF-8 (in order not to lose info
434 # writing to the cache), but the rest of Mercurial wants them in
431 # writing to the cache), but the rest of Mercurial wants them in
435 # local encoding.
432 # local encoding.
436 tags = {}
433 tags = {}
437 for (name, (node, hist)) in alltags.iteritems():
434 for (name, (node, hist)) in alltags.iteritems():
438 if node != nullid:
435 if node != nullid:
439 tags[encoding.tolocal(name)] = node
436 tags[encoding.tolocal(name)] = node
440 tags['tip'] = self.changelog.tip()
437 tags['tip'] = self.changelog.tip()
441 tagtypes = dict([(encoding.tolocal(name), value)
438 tagtypes = dict([(encoding.tolocal(name), value)
442 for (name, value) in tagtypes.iteritems()])
439 for (name, value) in tagtypes.iteritems()])
443 return (tags, tagtypes)
440 return (tags, tagtypes)
444
441
445 def tagtype(self, tagname):
442 def tagtype(self, tagname):
446 '''
443 '''
447 return the type of the given tag. result can be:
444 return the type of the given tag. result can be:
448
445
449 'local' : a local tag
446 'local' : a local tag
450 'global' : a global tag
447 'global' : a global tag
451 None : tag does not exist
448 None : tag does not exist
452 '''
449 '''
453
450
454 return self._tagscache.tagtypes.get(tagname)
451 return self._tagscache.tagtypes.get(tagname)
455
452
456 def tagslist(self):
453 def tagslist(self):
457 '''return a list of tags ordered by revision'''
454 '''return a list of tags ordered by revision'''
458 if not self._tagscache.tagslist:
455 if not self._tagscache.tagslist:
459 l = []
456 l = []
460 for t, n in self.tags().iteritems():
457 for t, n in self.tags().iteritems():
461 r = self.changelog.rev(n)
458 r = self.changelog.rev(n)
462 l.append((r, t, n))
459 l.append((r, t, n))
463 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
460 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
464
461
465 return self._tagscache.tagslist
462 return self._tagscache.tagslist
466
463
467 def nodetags(self, node):
464 def nodetags(self, node):
468 '''return the tags associated with a node'''
465 '''return the tags associated with a node'''
469 if not self._tagscache.nodetagscache:
466 if not self._tagscache.nodetagscache:
470 nodetagscache = {}
467 nodetagscache = {}
471 for t, n in self._tagscache.tags.iteritems():
468 for t, n in self._tagscache.tags.iteritems():
472 nodetagscache.setdefault(n, []).append(t)
469 nodetagscache.setdefault(n, []).append(t)
473 for tags in nodetagscache.itervalues():
470 for tags in nodetagscache.itervalues():
474 tags.sort()
471 tags.sort()
475 self._tagscache.nodetagscache = nodetagscache
472 self._tagscache.nodetagscache = nodetagscache
476 return self._tagscache.nodetagscache.get(node, [])
473 return self._tagscache.nodetagscache.get(node, [])
477
474
478 def nodebookmarks(self, node):
475 def nodebookmarks(self, node):
479 marks = []
476 marks = []
480 for bookmark, n in self._bookmarks.iteritems():
477 for bookmark, n in self._bookmarks.iteritems():
481 if n == node:
478 if n == node:
482 marks.append(bookmark)
479 marks.append(bookmark)
483 return sorted(marks)
480 return sorted(marks)
484
481
485 def _branchtags(self, partial, lrev):
482 def _branchtags(self, partial, lrev):
486 # TODO: rename this function?
483 # TODO: rename this function?
487 tiprev = len(self) - 1
484 tiprev = len(self) - 1
488 if lrev != tiprev:
485 if lrev != tiprev:
489 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
486 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
490 self._updatebranchcache(partial, ctxgen)
487 self._updatebranchcache(partial, ctxgen)
491 self._writebranchcache(partial, self.changelog.tip(), tiprev)
488 self._writebranchcache(partial, self.changelog.tip(), tiprev)
492
489
493 return partial
490 return partial
494
491
495 def updatebranchcache(self):
492 def updatebranchcache(self):
496 tip = self.changelog.tip()
493 tip = self.changelog.tip()
497 if self._branchcache is not None and self._branchcachetip == tip:
494 if self._branchcache is not None and self._branchcachetip == tip:
498 return
495 return
499
496
500 oldtip = self._branchcachetip
497 oldtip = self._branchcachetip
501 self._branchcachetip = tip
498 self._branchcachetip = tip
502 if oldtip is None or oldtip not in self.changelog.nodemap:
499 if oldtip is None or oldtip not in self.changelog.nodemap:
503 partial, last, lrev = self._readbranchcache()
500 partial, last, lrev = self._readbranchcache()
504 else:
501 else:
505 lrev = self.changelog.rev(oldtip)
502 lrev = self.changelog.rev(oldtip)
506 partial = self._branchcache
503 partial = self._branchcache
507
504
508 self._branchtags(partial, lrev)
505 self._branchtags(partial, lrev)
509 # this private cache holds all heads (not just the branch tips)
506 # this private cache holds all heads (not just the branch tips)
510 self._branchcache = partial
507 self._branchcache = partial
511
508
512 def branchmap(self):
509 def branchmap(self):
513 '''returns a dictionary {branch: [branchheads]}'''
510 '''returns a dictionary {branch: [branchheads]}'''
514 self.updatebranchcache()
511 self.updatebranchcache()
515 return self._branchcache
512 return self._branchcache
516
513
517 def _branchtip(self, heads):
514 def _branchtip(self, heads):
518 '''return the tipmost branch head in heads'''
515 '''return the tipmost branch head in heads'''
519 tip = heads[-1]
516 tip = heads[-1]
520 for h in reversed(heads):
517 for h in reversed(heads):
521 if not self[h].closesbranch():
518 if not self[h].closesbranch():
522 tip = h
519 tip = h
523 break
520 break
524 return tip
521 return tip
525
522
526 def branchtip(self, branch):
523 def branchtip(self, branch):
527 '''return the tip node for a given branch'''
524 '''return the tip node for a given branch'''
528 if branch not in self.branchmap():
525 if branch not in self.branchmap():
529 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
526 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
530 return self._branchtip(self.branchmap()[branch])
527 return self._branchtip(self.branchmap()[branch])
531
528
532 def branchtags(self):
529 def branchtags(self):
533 '''return a dict where branch names map to the tipmost head of
530 '''return a dict where branch names map to the tipmost head of
534 the branch, open heads come before closed'''
531 the branch, open heads come before closed'''
535 bt = {}
532 bt = {}
536 for bn, heads in self.branchmap().iteritems():
533 for bn, heads in self.branchmap().iteritems():
537 bt[bn] = self._branchtip(heads)
534 bt[bn] = self._branchtip(heads)
538 return bt
535 return bt
539
536
540 def _readbranchcache(self):
537 def _readbranchcache(self):
541 partial = {}
538 partial = {}
542 try:
539 try:
543 f = self.opener("cache/branchheads")
540 f = self.opener("cache/branchheads")
544 lines = f.read().split('\n')
541 lines = f.read().split('\n')
545 f.close()
542 f.close()
546 except (IOError, OSError):
543 except (IOError, OSError):
547 return {}, nullid, nullrev
544 return {}, nullid, nullrev
548
545
549 try:
546 try:
550 last, lrev = lines.pop(0).split(" ", 1)
547 last, lrev = lines.pop(0).split(" ", 1)
551 last, lrev = bin(last), int(lrev)
548 last, lrev = bin(last), int(lrev)
552 if lrev >= len(self) or self[lrev].node() != last:
549 if lrev >= len(self) or self[lrev].node() != last:
553 # invalidate the cache
550 # invalidate the cache
554 raise ValueError('invalidating branch cache (tip differs)')
551 raise ValueError('invalidating branch cache (tip differs)')
555 for l in lines:
552 for l in lines:
556 if not l:
553 if not l:
557 continue
554 continue
558 node, label = l.split(" ", 1)
555 node, label = l.split(" ", 1)
559 label = encoding.tolocal(label.strip())
556 label = encoding.tolocal(label.strip())
560 if not node in self:
557 if not node in self:
561 raise ValueError('invalidating branch cache because node '+
558 raise ValueError('invalidating branch cache because node '+
562 '%s does not exist' % node)
559 '%s does not exist' % node)
563 partial.setdefault(label, []).append(bin(node))
560 partial.setdefault(label, []).append(bin(node))
564 except KeyboardInterrupt:
561 except KeyboardInterrupt:
565 raise
562 raise
566 except Exception, inst:
563 except Exception, inst:
567 if self.ui.debugflag:
564 if self.ui.debugflag:
568 self.ui.warn(str(inst), '\n')
565 self.ui.warn(str(inst), '\n')
569 partial, last, lrev = {}, nullid, nullrev
566 partial, last, lrev = {}, nullid, nullrev
570 return partial, last, lrev
567 return partial, last, lrev
571
568
572 def _writebranchcache(self, branches, tip, tiprev):
569 def _writebranchcache(self, branches, tip, tiprev):
573 try:
570 try:
574 f = self.opener("cache/branchheads", "w", atomictemp=True)
571 f = self.opener("cache/branchheads", "w", atomictemp=True)
575 f.write("%s %s\n" % (hex(tip), tiprev))
572 f.write("%s %s\n" % (hex(tip), tiprev))
576 for label, nodes in branches.iteritems():
573 for label, nodes in branches.iteritems():
577 for node in nodes:
574 for node in nodes:
578 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
575 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
579 f.close()
576 f.close()
580 except (IOError, OSError):
577 except (IOError, OSError):
581 pass
578 pass
582
579
583 def _updatebranchcache(self, partial, ctxgen):
580 def _updatebranchcache(self, partial, ctxgen):
584 """Given a branchhead cache, partial, that may have extra nodes or be
581 """Given a branchhead cache, partial, that may have extra nodes or be
585 missing heads, and a generator of nodes that are at least a superset of
582 missing heads, and a generator of nodes that are at least a superset of
586 heads missing, this function updates partial to be correct.
583 heads missing, this function updates partial to be correct.
587 """
584 """
588 # collect new branch entries
585 # collect new branch entries
589 newbranches = {}
586 newbranches = {}
590 for c in ctxgen:
587 for c in ctxgen:
591 newbranches.setdefault(c.branch(), []).append(c.node())
588 newbranches.setdefault(c.branch(), []).append(c.node())
592 # if older branchheads are reachable from new ones, they aren't
589 # if older branchheads are reachable from new ones, they aren't
593 # really branchheads. Note checking parents is insufficient:
590 # really branchheads. Note checking parents is insufficient:
594 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
591 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
595 for branch, newnodes in newbranches.iteritems():
592 for branch, newnodes in newbranches.iteritems():
596 bheads = partial.setdefault(branch, [])
593 bheads = partial.setdefault(branch, [])
597 # Remove candidate heads that no longer are in the repo (e.g., as
594 # Remove candidate heads that no longer are in the repo (e.g., as
598 # the result of a strip that just happened). Avoid using 'node in
595 # the result of a strip that just happened). Avoid using 'node in
599 # self' here because that dives down into branchcache code somewhat
596 # self' here because that dives down into branchcache code somewhat
600 # recrusively.
597 # recrusively.
601 bheadrevs = [self.changelog.rev(node) for node in bheads
598 bheadrevs = [self.changelog.rev(node) for node in bheads
602 if self.changelog.hasnode(node)]
599 if self.changelog.hasnode(node)]
603 newheadrevs = [self.changelog.rev(node) for node in newnodes
600 newheadrevs = [self.changelog.rev(node) for node in newnodes
604 if self.changelog.hasnode(node)]
601 if self.changelog.hasnode(node)]
605 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
602 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
606 # Remove duplicates - nodes that are in newheadrevs and are already
603 # Remove duplicates - nodes that are in newheadrevs and are already
607 # in bheadrevs. This can happen if you strip a node whose parent
604 # in bheadrevs. This can happen if you strip a node whose parent
608 # was already a head (because they're on different branches).
605 # was already a head (because they're on different branches).
609 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
606 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
610
607
611 # Starting from tip means fewer passes over reachable. If we know
608 # Starting from tip means fewer passes over reachable. If we know
612 # the new candidates are not ancestors of existing heads, we don't
609 # the new candidates are not ancestors of existing heads, we don't
613 # have to examine ancestors of existing heads
610 # have to examine ancestors of existing heads
614 if ctxisnew:
611 if ctxisnew:
615 iterrevs = sorted(newheadrevs)
612 iterrevs = sorted(newheadrevs)
616 else:
613 else:
617 iterrevs = list(bheadrevs)
614 iterrevs = list(bheadrevs)
618
615
619 # This loop prunes out two kinds of heads - heads that are
616 # This loop prunes out two kinds of heads - heads that are
620 # superceded by a head in newheadrevs, and newheadrevs that are not
617 # superceded by a head in newheadrevs, and newheadrevs that are not
621 # heads because an existing head is their descendant.
618 # heads because an existing head is their descendant.
622 while iterrevs:
619 while iterrevs:
623 latest = iterrevs.pop()
620 latest = iterrevs.pop()
624 if latest not in bheadrevs:
621 if latest not in bheadrevs:
625 continue
622 continue
626 ancestors = set(self.changelog.ancestors([latest],
623 ancestors = set(self.changelog.ancestors([latest],
627 bheadrevs[0]))
624 bheadrevs[0]))
628 if ancestors:
625 if ancestors:
629 bheadrevs = [b for b in bheadrevs if b not in ancestors]
626 bheadrevs = [b for b in bheadrevs if b not in ancestors]
630 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
627 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
631
628
632 # There may be branches that cease to exist when the last commit in the
629 # There may be branches that cease to exist when the last commit in the
633 # branch was stripped. This code filters them out. Note that the
630 # branch was stripped. This code filters them out. Note that the
634 # branch that ceased to exist may not be in newbranches because
631 # branch that ceased to exist may not be in newbranches because
635 # newbranches is the set of candidate heads, which when you strip the
632 # newbranches is the set of candidate heads, which when you strip the
636 # last commit in a branch will be the parent branch.
633 # last commit in a branch will be the parent branch.
637 for branch in partial:
634 for branch in partial:
638 nodes = [head for head in partial[branch]
635 nodes = [head for head in partial[branch]
639 if self.changelog.hasnode(head)]
636 if self.changelog.hasnode(head)]
640 if not nodes:
637 if not nodes:
641 del partial[branch]
638 del partial[branch]
642
639
643 def lookup(self, key):
640 def lookup(self, key):
644 return self[key].node()
641 return self[key].node()
645
642
646 def lookupbranch(self, key, remote=None):
643 def lookupbranch(self, key, remote=None):
647 repo = remote or self
644 repo = remote or self
648 if key in repo.branchmap():
645 if key in repo.branchmap():
649 return key
646 return key
650
647
651 repo = (remote and remote.local()) and remote or self
648 repo = (remote and remote.local()) and remote or self
652 return repo[key].branch()
649 return repo[key].branch()
653
650
654 def known(self, nodes):
651 def known(self, nodes):
655 nm = self.changelog.nodemap
652 nm = self.changelog.nodemap
656 pc = self._phasecache
653 pc = self._phasecache
657 result = []
654 result = []
658 for n in nodes:
655 for n in nodes:
659 r = nm.get(n)
656 r = nm.get(n)
660 resp = not (r is None or pc.phase(self, r) >= phases.secret)
657 resp = not (r is None or pc.phase(self, r) >= phases.secret)
661 result.append(resp)
658 result.append(resp)
662 return result
659 return result
663
660
664 def local(self):
661 def local(self):
665 return self
662 return self
666
663
667 def join(self, f):
664 def join(self, f):
668 return os.path.join(self.path, f)
665 return os.path.join(self.path, f)
669
666
670 def wjoin(self, f):
667 def wjoin(self, f):
671 return os.path.join(self.root, f)
668 return os.path.join(self.root, f)
672
669
673 def file(self, f):
670 def file(self, f):
674 if f[0] == '/':
671 if f[0] == '/':
675 f = f[1:]
672 f = f[1:]
676 return filelog.filelog(self.sopener, f)
673 return filelog.filelog(self.sopener, f)
677
674
678 def changectx(self, changeid):
675 def changectx(self, changeid):
679 return self[changeid]
676 return self[changeid]
680
677
681 def parents(self, changeid=None):
678 def parents(self, changeid=None):
682 '''get list of changectxs for parents of changeid'''
679 '''get list of changectxs for parents of changeid'''
683 return self[changeid].parents()
680 return self[changeid].parents()
684
681
685 def setparents(self, p1, p2=nullid):
682 def setparents(self, p1, p2=nullid):
686 copies = self.dirstate.setparents(p1, p2)
683 copies = self.dirstate.setparents(p1, p2)
687 if copies:
684 if copies:
688 # Adjust copy records, the dirstate cannot do it, it
685 # Adjust copy records, the dirstate cannot do it, it
689 # requires access to parents manifests. Preserve them
686 # requires access to parents manifests. Preserve them
690 # only for entries added to first parent.
687 # only for entries added to first parent.
691 pctx = self[p1]
688 pctx = self[p1]
692 for f in copies:
689 for f in copies:
693 if f not in pctx and copies[f] in pctx:
690 if f not in pctx and copies[f] in pctx:
694 self.dirstate.copy(copies[f], f)
691 self.dirstate.copy(copies[f], f)
695
692
696 def filectx(self, path, changeid=None, fileid=None):
693 def filectx(self, path, changeid=None, fileid=None):
697 """changeid can be a changeset revision, node, or tag.
694 """changeid can be a changeset revision, node, or tag.
698 fileid can be a file revision or node."""
695 fileid can be a file revision or node."""
699 return context.filectx(self, path, changeid, fileid)
696 return context.filectx(self, path, changeid, fileid)
700
697
701 def getcwd(self):
698 def getcwd(self):
702 return self.dirstate.getcwd()
699 return self.dirstate.getcwd()
703
700
704 def pathto(self, f, cwd=None):
701 def pathto(self, f, cwd=None):
705 return self.dirstate.pathto(f, cwd)
702 return self.dirstate.pathto(f, cwd)
706
703
707 def wfile(self, f, mode='r'):
704 def wfile(self, f, mode='r'):
708 return self.wopener(f, mode)
705 return self.wopener(f, mode)
709
706
710 def _link(self, f):
707 def _link(self, f):
711 return os.path.islink(self.wjoin(f))
708 return os.path.islink(self.wjoin(f))
712
709
713 def _loadfilter(self, filter):
710 def _loadfilter(self, filter):
714 if filter not in self.filterpats:
711 if filter not in self.filterpats:
715 l = []
712 l = []
716 for pat, cmd in self.ui.configitems(filter):
713 for pat, cmd in self.ui.configitems(filter):
717 if cmd == '!':
714 if cmd == '!':
718 continue
715 continue
719 mf = matchmod.match(self.root, '', [pat])
716 mf = matchmod.match(self.root, '', [pat])
720 fn = None
717 fn = None
721 params = cmd
718 params = cmd
722 for name, filterfn in self._datafilters.iteritems():
719 for name, filterfn in self._datafilters.iteritems():
723 if cmd.startswith(name):
720 if cmd.startswith(name):
724 fn = filterfn
721 fn = filterfn
725 params = cmd[len(name):].lstrip()
722 params = cmd[len(name):].lstrip()
726 break
723 break
727 if not fn:
724 if not fn:
728 fn = lambda s, c, **kwargs: util.filter(s, c)
725 fn = lambda s, c, **kwargs: util.filter(s, c)
729 # Wrap old filters not supporting keyword arguments
726 # Wrap old filters not supporting keyword arguments
730 if not inspect.getargspec(fn)[2]:
727 if not inspect.getargspec(fn)[2]:
731 oldfn = fn
728 oldfn = fn
732 fn = lambda s, c, **kwargs: oldfn(s, c)
729 fn = lambda s, c, **kwargs: oldfn(s, c)
733 l.append((mf, fn, params))
730 l.append((mf, fn, params))
734 self.filterpats[filter] = l
731 self.filterpats[filter] = l
735 return self.filterpats[filter]
732 return self.filterpats[filter]
736
733
737 def _filter(self, filterpats, filename, data):
734 def _filter(self, filterpats, filename, data):
738 for mf, fn, cmd in filterpats:
735 for mf, fn, cmd in filterpats:
739 if mf(filename):
736 if mf(filename):
740 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
737 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
741 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
738 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
742 break
739 break
743
740
744 return data
741 return data
745
742
746 @propertycache
743 @propertycache
747 def _encodefilterpats(self):
744 def _encodefilterpats(self):
748 return self._loadfilter('encode')
745 return self._loadfilter('encode')
749
746
750 @propertycache
747 @propertycache
751 def _decodefilterpats(self):
748 def _decodefilterpats(self):
752 return self._loadfilter('decode')
749 return self._loadfilter('decode')
753
750
754 def adddatafilter(self, name, filter):
751 def adddatafilter(self, name, filter):
755 self._datafilters[name] = filter
752 self._datafilters[name] = filter
756
753
757 def wread(self, filename):
754 def wread(self, filename):
758 if self._link(filename):
755 if self._link(filename):
759 data = os.readlink(self.wjoin(filename))
756 data = os.readlink(self.wjoin(filename))
760 else:
757 else:
761 data = self.wopener.read(filename)
758 data = self.wopener.read(filename)
762 return self._filter(self._encodefilterpats, filename, data)
759 return self._filter(self._encodefilterpats, filename, data)
763
760
764 def wwrite(self, filename, data, flags):
761 def wwrite(self, filename, data, flags):
765 data = self._filter(self._decodefilterpats, filename, data)
762 data = self._filter(self._decodefilterpats, filename, data)
766 if 'l' in flags:
763 if 'l' in flags:
767 self.wopener.symlink(data, filename)
764 self.wopener.symlink(data, filename)
768 else:
765 else:
769 self.wopener.write(filename, data)
766 self.wopener.write(filename, data)
770 if 'x' in flags:
767 if 'x' in flags:
771 util.setflags(self.wjoin(filename), False, True)
768 util.setflags(self.wjoin(filename), False, True)
772
769
773 def wwritedata(self, filename, data):
770 def wwritedata(self, filename, data):
774 return self._filter(self._decodefilterpats, filename, data)
771 return self._filter(self._decodefilterpats, filename, data)
775
772
776 def transaction(self, desc):
773 def transaction(self, desc):
777 tr = self._transref and self._transref() or None
774 tr = self._transref and self._transref() or None
778 if tr and tr.running():
775 if tr and tr.running():
779 return tr.nest()
776 return tr.nest()
780
777
781 # abort here if the journal already exists
778 # abort here if the journal already exists
782 if os.path.exists(self.sjoin("journal")):
779 if os.path.exists(self.sjoin("journal")):
783 raise error.RepoError(
780 raise error.RepoError(
784 _("abandoned transaction found - run hg recover"))
781 _("abandoned transaction found - run hg recover"))
785
782
786 self._writejournal(desc)
783 self._writejournal(desc)
787 renames = [(x, undoname(x)) for x in self._journalfiles()]
784 renames = [(x, undoname(x)) for x in self._journalfiles()]
788
785
789 tr = transaction.transaction(self.ui.warn, self.sopener,
786 tr = transaction.transaction(self.ui.warn, self.sopener,
790 self.sjoin("journal"),
787 self.sjoin("journal"),
791 aftertrans(renames),
788 aftertrans(renames),
792 self.store.createmode)
789 self.store.createmode)
793 self._transref = weakref.ref(tr)
790 self._transref = weakref.ref(tr)
794 return tr
791 return tr
795
792
796 def _journalfiles(self):
793 def _journalfiles(self):
797 return (self.sjoin('journal'), self.join('journal.dirstate'),
794 return (self.sjoin('journal'), self.join('journal.dirstate'),
798 self.join('journal.branch'), self.join('journal.desc'),
795 self.join('journal.branch'), self.join('journal.desc'),
799 self.join('journal.bookmarks'),
796 self.join('journal.bookmarks'),
800 self.sjoin('journal.phaseroots'))
797 self.sjoin('journal.phaseroots'))
801
798
802 def undofiles(self):
799 def undofiles(self):
803 return [undoname(x) for x in self._journalfiles()]
800 return [undoname(x) for x in self._journalfiles()]
804
801
805 def _writejournal(self, desc):
802 def _writejournal(self, desc):
806 self.opener.write("journal.dirstate",
803 self.opener.write("journal.dirstate",
807 self.opener.tryread("dirstate"))
804 self.opener.tryread("dirstate"))
808 self.opener.write("journal.branch",
805 self.opener.write("journal.branch",
809 encoding.fromlocal(self.dirstate.branch()))
806 encoding.fromlocal(self.dirstate.branch()))
810 self.opener.write("journal.desc",
807 self.opener.write("journal.desc",
811 "%d\n%s\n" % (len(self), desc))
808 "%d\n%s\n" % (len(self), desc))
812 self.opener.write("journal.bookmarks",
809 self.opener.write("journal.bookmarks",
813 self.opener.tryread("bookmarks"))
810 self.opener.tryread("bookmarks"))
814 self.sopener.write("journal.phaseroots",
811 self.sopener.write("journal.phaseroots",
815 self.sopener.tryread("phaseroots"))
812 self.sopener.tryread("phaseroots"))
816
813
817 def recover(self):
814 def recover(self):
818 lock = self.lock()
815 lock = self.lock()
819 try:
816 try:
820 if os.path.exists(self.sjoin("journal")):
817 if os.path.exists(self.sjoin("journal")):
821 self.ui.status(_("rolling back interrupted transaction\n"))
818 self.ui.status(_("rolling back interrupted transaction\n"))
822 transaction.rollback(self.sopener, self.sjoin("journal"),
819 transaction.rollback(self.sopener, self.sjoin("journal"),
823 self.ui.warn)
820 self.ui.warn)
824 self.invalidate()
821 self.invalidate()
825 return True
822 return True
826 else:
823 else:
827 self.ui.warn(_("no interrupted transaction available\n"))
824 self.ui.warn(_("no interrupted transaction available\n"))
828 return False
825 return False
829 finally:
826 finally:
830 lock.release()
827 lock.release()
831
828
832 def rollback(self, dryrun=False, force=False):
829 def rollback(self, dryrun=False, force=False):
833 wlock = lock = None
830 wlock = lock = None
834 try:
831 try:
835 wlock = self.wlock()
832 wlock = self.wlock()
836 lock = self.lock()
833 lock = self.lock()
837 if os.path.exists(self.sjoin("undo")):
834 if os.path.exists(self.sjoin("undo")):
838 return self._rollback(dryrun, force)
835 return self._rollback(dryrun, force)
839 else:
836 else:
840 self.ui.warn(_("no rollback information available\n"))
837 self.ui.warn(_("no rollback information available\n"))
841 return 1
838 return 1
842 finally:
839 finally:
843 release(lock, wlock)
840 release(lock, wlock)
844
841
845 def _rollback(self, dryrun, force):
842 def _rollback(self, dryrun, force):
846 ui = self.ui
843 ui = self.ui
847 try:
844 try:
848 args = self.opener.read('undo.desc').splitlines()
845 args = self.opener.read('undo.desc').splitlines()
849 (oldlen, desc, detail) = (int(args[0]), args[1], None)
846 (oldlen, desc, detail) = (int(args[0]), args[1], None)
850 if len(args) >= 3:
847 if len(args) >= 3:
851 detail = args[2]
848 detail = args[2]
852 oldtip = oldlen - 1
849 oldtip = oldlen - 1
853
850
854 if detail and ui.verbose:
851 if detail and ui.verbose:
855 msg = (_('repository tip rolled back to revision %s'
852 msg = (_('repository tip rolled back to revision %s'
856 ' (undo %s: %s)\n')
853 ' (undo %s: %s)\n')
857 % (oldtip, desc, detail))
854 % (oldtip, desc, detail))
858 else:
855 else:
859 msg = (_('repository tip rolled back to revision %s'
856 msg = (_('repository tip rolled back to revision %s'
860 ' (undo %s)\n')
857 ' (undo %s)\n')
861 % (oldtip, desc))
858 % (oldtip, desc))
862 except IOError:
859 except IOError:
863 msg = _('rolling back unknown transaction\n')
860 msg = _('rolling back unknown transaction\n')
864 desc = None
861 desc = None
865
862
866 if not force and self['.'] != self['tip'] and desc == 'commit':
863 if not force and self['.'] != self['tip'] and desc == 'commit':
867 raise util.Abort(
864 raise util.Abort(
868 _('rollback of last commit while not checked out '
865 _('rollback of last commit while not checked out '
869 'may lose data'), hint=_('use -f to force'))
866 'may lose data'), hint=_('use -f to force'))
870
867
871 ui.status(msg)
868 ui.status(msg)
872 if dryrun:
869 if dryrun:
873 return 0
870 return 0
874
871
875 parents = self.dirstate.parents()
872 parents = self.dirstate.parents()
876 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
873 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
877 if os.path.exists(self.join('undo.bookmarks')):
874 if os.path.exists(self.join('undo.bookmarks')):
878 util.rename(self.join('undo.bookmarks'),
875 util.rename(self.join('undo.bookmarks'),
879 self.join('bookmarks'))
876 self.join('bookmarks'))
880 if os.path.exists(self.sjoin('undo.phaseroots')):
877 if os.path.exists(self.sjoin('undo.phaseroots')):
881 util.rename(self.sjoin('undo.phaseroots'),
878 util.rename(self.sjoin('undo.phaseroots'),
882 self.sjoin('phaseroots'))
879 self.sjoin('phaseroots'))
883 self.invalidate()
880 self.invalidate()
884
881
885 parentgone = (parents[0] not in self.changelog.nodemap or
882 parentgone = (parents[0] not in self.changelog.nodemap or
886 parents[1] not in self.changelog.nodemap)
883 parents[1] not in self.changelog.nodemap)
887 if parentgone:
884 if parentgone:
888 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
885 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
889 try:
886 try:
890 branch = self.opener.read('undo.branch')
887 branch = self.opener.read('undo.branch')
891 self.dirstate.setbranch(branch)
888 self.dirstate.setbranch(branch)
892 except IOError:
889 except IOError:
893 ui.warn(_('named branch could not be reset: '
890 ui.warn(_('named branch could not be reset: '
894 'current branch is still \'%s\'\n')
891 'current branch is still \'%s\'\n')
895 % self.dirstate.branch())
892 % self.dirstate.branch())
896
893
897 self.dirstate.invalidate()
894 self.dirstate.invalidate()
898 parents = tuple([p.rev() for p in self.parents()])
895 parents = tuple([p.rev() for p in self.parents()])
899 if len(parents) > 1:
896 if len(parents) > 1:
900 ui.status(_('working directory now based on '
897 ui.status(_('working directory now based on '
901 'revisions %d and %d\n') % parents)
898 'revisions %d and %d\n') % parents)
902 else:
899 else:
903 ui.status(_('working directory now based on '
900 ui.status(_('working directory now based on '
904 'revision %d\n') % parents)
901 'revision %d\n') % parents)
905 # TODO: if we know which new heads may result from this rollback, pass
902 # TODO: if we know which new heads may result from this rollback, pass
906 # them to destroy(), which will prevent the branchhead cache from being
903 # them to destroy(), which will prevent the branchhead cache from being
907 # invalidated.
904 # invalidated.
908 self.destroyed()
905 self.destroyed()
909 return 0
906 return 0
910
907
911 def invalidatecaches(self):
908 def invalidatecaches(self):
912 def delcache(name):
909 def delcache(name):
913 try:
910 try:
914 delattr(self, name)
911 delattr(self, name)
915 except AttributeError:
912 except AttributeError:
916 pass
913 pass
917
914
918 delcache('_tagscache')
915 delcache('_tagscache')
919
916
920 self._branchcache = None # in UTF-8
917 self._branchcache = None # in UTF-8
921 self._branchcachetip = None
918 self._branchcachetip = None
922
919
923 def invalidatedirstate(self):
920 def invalidatedirstate(self):
924 '''Invalidates the dirstate, causing the next call to dirstate
921 '''Invalidates the dirstate, causing the next call to dirstate
925 to check if it was modified since the last time it was read,
922 to check if it was modified since the last time it was read,
926 rereading it if it has.
923 rereading it if it has.
927
924
928 This is different to dirstate.invalidate() that it doesn't always
925 This is different to dirstate.invalidate() that it doesn't always
929 rereads the dirstate. Use dirstate.invalidate() if you want to
926 rereads the dirstate. Use dirstate.invalidate() if you want to
930 explicitly read the dirstate again (i.e. restoring it to a previous
927 explicitly read the dirstate again (i.e. restoring it to a previous
931 known good state).'''
928 known good state).'''
932 if 'dirstate' in self.__dict__:
929 if 'dirstate' in self.__dict__:
933 for k in self.dirstate._filecache:
930 for k in self.dirstate._filecache:
934 try:
931 try:
935 delattr(self.dirstate, k)
932 delattr(self.dirstate, k)
936 except AttributeError:
933 except AttributeError:
937 pass
934 pass
938 delattr(self, 'dirstate')
935 delattr(self, 'dirstate')
939
936
940 def invalidate(self):
937 def invalidate(self):
941 for k in self._filecache:
938 for k in self._filecache:
942 # dirstate is invalidated separately in invalidatedirstate()
939 # dirstate is invalidated separately in invalidatedirstate()
943 if k == 'dirstate':
940 if k == 'dirstate':
944 continue
941 continue
945
942
946 try:
943 try:
947 delattr(self, k)
944 delattr(self, k)
948 except AttributeError:
945 except AttributeError:
949 pass
946 pass
950 self.invalidatecaches()
947 self.invalidatecaches()
951
948
952 # Discard all cache entries to force reloading everything.
949 # Discard all cache entries to force reloading everything.
953 self._filecache.clear()
950 self._filecache.clear()
954
951
955 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
952 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
956 try:
953 try:
957 l = lock.lock(lockname, 0, releasefn, desc=desc)
954 l = lock.lock(lockname, 0, releasefn, desc=desc)
958 except error.LockHeld, inst:
955 except error.LockHeld, inst:
959 if not wait:
956 if not wait:
960 raise
957 raise
961 self.ui.warn(_("waiting for lock on %s held by %r\n") %
958 self.ui.warn(_("waiting for lock on %s held by %r\n") %
962 (desc, inst.locker))
959 (desc, inst.locker))
963 # default to 600 seconds timeout
960 # default to 600 seconds timeout
964 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
961 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
965 releasefn, desc=desc)
962 releasefn, desc=desc)
966 if acquirefn:
963 if acquirefn:
967 acquirefn()
964 acquirefn()
968 return l
965 return l
969
966
970 def _afterlock(self, callback):
967 def _afterlock(self, callback):
971 """add a callback to the current repository lock.
968 """add a callback to the current repository lock.
972
969
973 The callback will be executed on lock release."""
970 The callback will be executed on lock release."""
974 l = self._lockref and self._lockref()
971 l = self._lockref and self._lockref()
975 if l:
972 if l:
976 l.postrelease.append(callback)
973 l.postrelease.append(callback)
977 else:
974 else:
978 callback()
975 callback()
979
976
980 def lock(self, wait=True):
977 def lock(self, wait=True):
981 '''Lock the repository store (.hg/store) and return a weak reference
978 '''Lock the repository store (.hg/store) and return a weak reference
982 to the lock. Use this before modifying the store (e.g. committing or
979 to the lock. Use this before modifying the store (e.g. committing or
983 stripping). If you are opening a transaction, get a lock as well.)'''
980 stripping). If you are opening a transaction, get a lock as well.)'''
984 l = self._lockref and self._lockref()
981 l = self._lockref and self._lockref()
985 if l is not None and l.held:
982 if l is not None and l.held:
986 l.lock()
983 l.lock()
987 return l
984 return l
988
985
989 def unlock():
986 def unlock():
990 self.store.write()
987 self.store.write()
991 if '_phasecache' in vars(self):
988 if '_phasecache' in vars(self):
992 self._phasecache.write()
989 self._phasecache.write()
993 if 'obsstore' in vars(self) and self.obsstore._new:
990 if 'obsstore' in vars(self):
994 # XXX: transaction logic should be used here. But for
991 self.obsstore.flushmarkers()
995 # now rewriting the whole file is good enough.
996 f = self.sopener('obsstore', 'wb', atomictemp=True)
997 try:
998 self.obsstore.flushmarkers(f)
999 f.close()
1000 except: # re-raises
1001 f.discard()
1002 raise
1003 for k, ce in self._filecache.items():
992 for k, ce in self._filecache.items():
1004 if k == 'dirstate':
993 if k == 'dirstate':
1005 continue
994 continue
1006 ce.refresh()
995 ce.refresh()
1007
996
1008 l = self._lock(self.sjoin("lock"), wait, unlock,
997 l = self._lock(self.sjoin("lock"), wait, unlock,
1009 self.invalidate, _('repository %s') % self.origroot)
998 self.invalidate, _('repository %s') % self.origroot)
1010 self._lockref = weakref.ref(l)
999 self._lockref = weakref.ref(l)
1011 return l
1000 return l
1012
1001
1013 def wlock(self, wait=True):
1002 def wlock(self, wait=True):
1014 '''Lock the non-store parts of the repository (everything under
1003 '''Lock the non-store parts of the repository (everything under
1015 .hg except .hg/store) and return a weak reference to the lock.
1004 .hg except .hg/store) and return a weak reference to the lock.
1016 Use this before modifying files in .hg.'''
1005 Use this before modifying files in .hg.'''
1017 l = self._wlockref and self._wlockref()
1006 l = self._wlockref and self._wlockref()
1018 if l is not None and l.held:
1007 if l is not None and l.held:
1019 l.lock()
1008 l.lock()
1020 return l
1009 return l
1021
1010
1022 def unlock():
1011 def unlock():
1023 self.dirstate.write()
1012 self.dirstate.write()
1024 ce = self._filecache.get('dirstate')
1013 ce = self._filecache.get('dirstate')
1025 if ce:
1014 if ce:
1026 ce.refresh()
1015 ce.refresh()
1027
1016
1028 l = self._lock(self.join("wlock"), wait, unlock,
1017 l = self._lock(self.join("wlock"), wait, unlock,
1029 self.invalidatedirstate, _('working directory of %s') %
1018 self.invalidatedirstate, _('working directory of %s') %
1030 self.origroot)
1019 self.origroot)
1031 self._wlockref = weakref.ref(l)
1020 self._wlockref = weakref.ref(l)
1032 return l
1021 return l
1033
1022
1034 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1023 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1035 """
1024 """
1036 commit an individual file as part of a larger transaction
1025 commit an individual file as part of a larger transaction
1037 """
1026 """
1038
1027
1039 fname = fctx.path()
1028 fname = fctx.path()
1040 text = fctx.data()
1029 text = fctx.data()
1041 flog = self.file(fname)
1030 flog = self.file(fname)
1042 fparent1 = manifest1.get(fname, nullid)
1031 fparent1 = manifest1.get(fname, nullid)
1043 fparent2 = fparent2o = manifest2.get(fname, nullid)
1032 fparent2 = fparent2o = manifest2.get(fname, nullid)
1044
1033
1045 meta = {}
1034 meta = {}
1046 copy = fctx.renamed()
1035 copy = fctx.renamed()
1047 if copy and copy[0] != fname:
1036 if copy and copy[0] != fname:
1048 # Mark the new revision of this file as a copy of another
1037 # Mark the new revision of this file as a copy of another
1049 # file. This copy data will effectively act as a parent
1038 # file. This copy data will effectively act as a parent
1050 # of this new revision. If this is a merge, the first
1039 # of this new revision. If this is a merge, the first
1051 # parent will be the nullid (meaning "look up the copy data")
1040 # parent will be the nullid (meaning "look up the copy data")
1052 # and the second one will be the other parent. For example:
1041 # and the second one will be the other parent. For example:
1053 #
1042 #
1054 # 0 --- 1 --- 3 rev1 changes file foo
1043 # 0 --- 1 --- 3 rev1 changes file foo
1055 # \ / rev2 renames foo to bar and changes it
1044 # \ / rev2 renames foo to bar and changes it
1056 # \- 2 -/ rev3 should have bar with all changes and
1045 # \- 2 -/ rev3 should have bar with all changes and
1057 # should record that bar descends from
1046 # should record that bar descends from
1058 # bar in rev2 and foo in rev1
1047 # bar in rev2 and foo in rev1
1059 #
1048 #
1060 # this allows this merge to succeed:
1049 # this allows this merge to succeed:
1061 #
1050 #
1062 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1051 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1063 # \ / merging rev3 and rev4 should use bar@rev2
1052 # \ / merging rev3 and rev4 should use bar@rev2
1064 # \- 2 --- 4 as the merge base
1053 # \- 2 --- 4 as the merge base
1065 #
1054 #
1066
1055
1067 cfname = copy[0]
1056 cfname = copy[0]
1068 crev = manifest1.get(cfname)
1057 crev = manifest1.get(cfname)
1069 newfparent = fparent2
1058 newfparent = fparent2
1070
1059
1071 if manifest2: # branch merge
1060 if manifest2: # branch merge
1072 if fparent2 == nullid or crev is None: # copied on remote side
1061 if fparent2 == nullid or crev is None: # copied on remote side
1073 if cfname in manifest2:
1062 if cfname in manifest2:
1074 crev = manifest2[cfname]
1063 crev = manifest2[cfname]
1075 newfparent = fparent1
1064 newfparent = fparent1
1076
1065
1077 # find source in nearest ancestor if we've lost track
1066 # find source in nearest ancestor if we've lost track
1078 if not crev:
1067 if not crev:
1079 self.ui.debug(" %s: searching for copy revision for %s\n" %
1068 self.ui.debug(" %s: searching for copy revision for %s\n" %
1080 (fname, cfname))
1069 (fname, cfname))
1081 for ancestor in self[None].ancestors():
1070 for ancestor in self[None].ancestors():
1082 if cfname in ancestor:
1071 if cfname in ancestor:
1083 crev = ancestor[cfname].filenode()
1072 crev = ancestor[cfname].filenode()
1084 break
1073 break
1085
1074
1086 if crev:
1075 if crev:
1087 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1076 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1088 meta["copy"] = cfname
1077 meta["copy"] = cfname
1089 meta["copyrev"] = hex(crev)
1078 meta["copyrev"] = hex(crev)
1090 fparent1, fparent2 = nullid, newfparent
1079 fparent1, fparent2 = nullid, newfparent
1091 else:
1080 else:
1092 self.ui.warn(_("warning: can't find ancestor for '%s' "
1081 self.ui.warn(_("warning: can't find ancestor for '%s' "
1093 "copied from '%s'!\n") % (fname, cfname))
1082 "copied from '%s'!\n") % (fname, cfname))
1094
1083
1095 elif fparent2 != nullid:
1084 elif fparent2 != nullid:
1096 # is one parent an ancestor of the other?
1085 # is one parent an ancestor of the other?
1097 fparentancestor = flog.ancestor(fparent1, fparent2)
1086 fparentancestor = flog.ancestor(fparent1, fparent2)
1098 if fparentancestor == fparent1:
1087 if fparentancestor == fparent1:
1099 fparent1, fparent2 = fparent2, nullid
1088 fparent1, fparent2 = fparent2, nullid
1100 elif fparentancestor == fparent2:
1089 elif fparentancestor == fparent2:
1101 fparent2 = nullid
1090 fparent2 = nullid
1102
1091
1103 # is the file changed?
1092 # is the file changed?
1104 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1093 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1105 changelist.append(fname)
1094 changelist.append(fname)
1106 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1095 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1107
1096
1108 # are just the flags changed during merge?
1097 # are just the flags changed during merge?
1109 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1098 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1110 changelist.append(fname)
1099 changelist.append(fname)
1111
1100
1112 return fparent1
1101 return fparent1
1113
1102
1114 def commit(self, text="", user=None, date=None, match=None, force=False,
1103 def commit(self, text="", user=None, date=None, match=None, force=False,
1115 editor=False, extra={}):
1104 editor=False, extra={}):
1116 """Add a new revision to current repository.
1105 """Add a new revision to current repository.
1117
1106
1118 Revision information is gathered from the working directory,
1107 Revision information is gathered from the working directory,
1119 match can be used to filter the committed files. If editor is
1108 match can be used to filter the committed files. If editor is
1120 supplied, it is called to get a commit message.
1109 supplied, it is called to get a commit message.
1121 """
1110 """
1122
1111
1123 def fail(f, msg):
1112 def fail(f, msg):
1124 raise util.Abort('%s: %s' % (f, msg))
1113 raise util.Abort('%s: %s' % (f, msg))
1125
1114
1126 if not match:
1115 if not match:
1127 match = matchmod.always(self.root, '')
1116 match = matchmod.always(self.root, '')
1128
1117
1129 if not force:
1118 if not force:
1130 vdirs = []
1119 vdirs = []
1131 match.dir = vdirs.append
1120 match.dir = vdirs.append
1132 match.bad = fail
1121 match.bad = fail
1133
1122
1134 wlock = self.wlock()
1123 wlock = self.wlock()
1135 try:
1124 try:
1136 wctx = self[None]
1125 wctx = self[None]
1137 merge = len(wctx.parents()) > 1
1126 merge = len(wctx.parents()) > 1
1138
1127
1139 if (not force and merge and match and
1128 if (not force and merge and match and
1140 (match.files() or match.anypats())):
1129 (match.files() or match.anypats())):
1141 raise util.Abort(_('cannot partially commit a merge '
1130 raise util.Abort(_('cannot partially commit a merge '
1142 '(do not specify files or patterns)'))
1131 '(do not specify files or patterns)'))
1143
1132
1144 changes = self.status(match=match, clean=force)
1133 changes = self.status(match=match, clean=force)
1145 if force:
1134 if force:
1146 changes[0].extend(changes[6]) # mq may commit unchanged files
1135 changes[0].extend(changes[6]) # mq may commit unchanged files
1147
1136
1148 # check subrepos
1137 # check subrepos
1149 subs = []
1138 subs = []
1150 commitsubs = set()
1139 commitsubs = set()
1151 newstate = wctx.substate.copy()
1140 newstate = wctx.substate.copy()
1152 # only manage subrepos and .hgsubstate if .hgsub is present
1141 # only manage subrepos and .hgsubstate if .hgsub is present
1153 if '.hgsub' in wctx:
1142 if '.hgsub' in wctx:
1154 # we'll decide whether to track this ourselves, thanks
1143 # we'll decide whether to track this ourselves, thanks
1155 if '.hgsubstate' in changes[0]:
1144 if '.hgsubstate' in changes[0]:
1156 changes[0].remove('.hgsubstate')
1145 changes[0].remove('.hgsubstate')
1157 if '.hgsubstate' in changes[2]:
1146 if '.hgsubstate' in changes[2]:
1158 changes[2].remove('.hgsubstate')
1147 changes[2].remove('.hgsubstate')
1159
1148
1160 # compare current state to last committed state
1149 # compare current state to last committed state
1161 # build new substate based on last committed state
1150 # build new substate based on last committed state
1162 oldstate = wctx.p1().substate
1151 oldstate = wctx.p1().substate
1163 for s in sorted(newstate.keys()):
1152 for s in sorted(newstate.keys()):
1164 if not match(s):
1153 if not match(s):
1165 # ignore working copy, use old state if present
1154 # ignore working copy, use old state if present
1166 if s in oldstate:
1155 if s in oldstate:
1167 newstate[s] = oldstate[s]
1156 newstate[s] = oldstate[s]
1168 continue
1157 continue
1169 if not force:
1158 if not force:
1170 raise util.Abort(
1159 raise util.Abort(
1171 _("commit with new subrepo %s excluded") % s)
1160 _("commit with new subrepo %s excluded") % s)
1172 if wctx.sub(s).dirty(True):
1161 if wctx.sub(s).dirty(True):
1173 if not self.ui.configbool('ui', 'commitsubrepos'):
1162 if not self.ui.configbool('ui', 'commitsubrepos'):
1174 raise util.Abort(
1163 raise util.Abort(
1175 _("uncommitted changes in subrepo %s") % s,
1164 _("uncommitted changes in subrepo %s") % s,
1176 hint=_("use --subrepos for recursive commit"))
1165 hint=_("use --subrepos for recursive commit"))
1177 subs.append(s)
1166 subs.append(s)
1178 commitsubs.add(s)
1167 commitsubs.add(s)
1179 else:
1168 else:
1180 bs = wctx.sub(s).basestate()
1169 bs = wctx.sub(s).basestate()
1181 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1170 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1182 if oldstate.get(s, (None, None, None))[1] != bs:
1171 if oldstate.get(s, (None, None, None))[1] != bs:
1183 subs.append(s)
1172 subs.append(s)
1184
1173
1185 # check for removed subrepos
1174 # check for removed subrepos
1186 for p in wctx.parents():
1175 for p in wctx.parents():
1187 r = [s for s in p.substate if s not in newstate]
1176 r = [s for s in p.substate if s not in newstate]
1188 subs += [s for s in r if match(s)]
1177 subs += [s for s in r if match(s)]
1189 if subs:
1178 if subs:
1190 if (not match('.hgsub') and
1179 if (not match('.hgsub') and
1191 '.hgsub' in (wctx.modified() + wctx.added())):
1180 '.hgsub' in (wctx.modified() + wctx.added())):
1192 raise util.Abort(
1181 raise util.Abort(
1193 _("can't commit subrepos without .hgsub"))
1182 _("can't commit subrepos without .hgsub"))
1194 changes[0].insert(0, '.hgsubstate')
1183 changes[0].insert(0, '.hgsubstate')
1195
1184
1196 elif '.hgsub' in changes[2]:
1185 elif '.hgsub' in changes[2]:
1197 # clean up .hgsubstate when .hgsub is removed
1186 # clean up .hgsubstate when .hgsub is removed
1198 if ('.hgsubstate' in wctx and
1187 if ('.hgsubstate' in wctx and
1199 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1188 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1200 changes[2].insert(0, '.hgsubstate')
1189 changes[2].insert(0, '.hgsubstate')
1201
1190
1202 # make sure all explicit patterns are matched
1191 # make sure all explicit patterns are matched
1203 if not force and match.files():
1192 if not force and match.files():
1204 matched = set(changes[0] + changes[1] + changes[2])
1193 matched = set(changes[0] + changes[1] + changes[2])
1205
1194
1206 for f in match.files():
1195 for f in match.files():
1207 if f == '.' or f in matched or f in wctx.substate:
1196 if f == '.' or f in matched or f in wctx.substate:
1208 continue
1197 continue
1209 if f in changes[3]: # missing
1198 if f in changes[3]: # missing
1210 fail(f, _('file not found!'))
1199 fail(f, _('file not found!'))
1211 if f in vdirs: # visited directory
1200 if f in vdirs: # visited directory
1212 d = f + '/'
1201 d = f + '/'
1213 for mf in matched:
1202 for mf in matched:
1214 if mf.startswith(d):
1203 if mf.startswith(d):
1215 break
1204 break
1216 else:
1205 else:
1217 fail(f, _("no match under directory!"))
1206 fail(f, _("no match under directory!"))
1218 elif f not in self.dirstate:
1207 elif f not in self.dirstate:
1219 fail(f, _("file not tracked!"))
1208 fail(f, _("file not tracked!"))
1220
1209
1221 if (not force and not extra.get("close") and not merge
1210 if (not force and not extra.get("close") and not merge
1222 and not (changes[0] or changes[1] or changes[2])
1211 and not (changes[0] or changes[1] or changes[2])
1223 and wctx.branch() == wctx.p1().branch()):
1212 and wctx.branch() == wctx.p1().branch()):
1224 return None
1213 return None
1225
1214
1226 if merge and changes[3]:
1215 if merge and changes[3]:
1227 raise util.Abort(_("cannot commit merge with missing files"))
1216 raise util.Abort(_("cannot commit merge with missing files"))
1228
1217
1229 ms = mergemod.mergestate(self)
1218 ms = mergemod.mergestate(self)
1230 for f in changes[0]:
1219 for f in changes[0]:
1231 if f in ms and ms[f] == 'u':
1220 if f in ms and ms[f] == 'u':
1232 raise util.Abort(_("unresolved merge conflicts "
1221 raise util.Abort(_("unresolved merge conflicts "
1233 "(see hg help resolve)"))
1222 "(see hg help resolve)"))
1234
1223
1235 cctx = context.workingctx(self, text, user, date, extra, changes)
1224 cctx = context.workingctx(self, text, user, date, extra, changes)
1236 if editor:
1225 if editor:
1237 cctx._text = editor(self, cctx, subs)
1226 cctx._text = editor(self, cctx, subs)
1238 edited = (text != cctx._text)
1227 edited = (text != cctx._text)
1239
1228
1240 # commit subs and write new state
1229 # commit subs and write new state
1241 if subs:
1230 if subs:
1242 for s in sorted(commitsubs):
1231 for s in sorted(commitsubs):
1243 sub = wctx.sub(s)
1232 sub = wctx.sub(s)
1244 self.ui.status(_('committing subrepository %s\n') %
1233 self.ui.status(_('committing subrepository %s\n') %
1245 subrepo.subrelpath(sub))
1234 subrepo.subrelpath(sub))
1246 sr = sub.commit(cctx._text, user, date)
1235 sr = sub.commit(cctx._text, user, date)
1247 newstate[s] = (newstate[s][0], sr)
1236 newstate[s] = (newstate[s][0], sr)
1248 subrepo.writestate(self, newstate)
1237 subrepo.writestate(self, newstate)
1249
1238
1250 # Save commit message in case this transaction gets rolled back
1239 # Save commit message in case this transaction gets rolled back
1251 # (e.g. by a pretxncommit hook). Leave the content alone on
1240 # (e.g. by a pretxncommit hook). Leave the content alone on
1252 # the assumption that the user will use the same editor again.
1241 # the assumption that the user will use the same editor again.
1253 msgfn = self.savecommitmessage(cctx._text)
1242 msgfn = self.savecommitmessage(cctx._text)
1254
1243
1255 p1, p2 = self.dirstate.parents()
1244 p1, p2 = self.dirstate.parents()
1256 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1245 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1257 try:
1246 try:
1258 self.hook("precommit", throw=True, parent1=hookp1,
1247 self.hook("precommit", throw=True, parent1=hookp1,
1259 parent2=hookp2)
1248 parent2=hookp2)
1260 ret = self.commitctx(cctx, True)
1249 ret = self.commitctx(cctx, True)
1261 except: # re-raises
1250 except: # re-raises
1262 if edited:
1251 if edited:
1263 self.ui.write(
1252 self.ui.write(
1264 _('note: commit message saved in %s\n') % msgfn)
1253 _('note: commit message saved in %s\n') % msgfn)
1265 raise
1254 raise
1266
1255
1267 # update bookmarks, dirstate and mergestate
1256 # update bookmarks, dirstate and mergestate
1268 bookmarks.update(self, [p1, p2], ret)
1257 bookmarks.update(self, [p1, p2], ret)
1269 for f in changes[0] + changes[1]:
1258 for f in changes[0] + changes[1]:
1270 self.dirstate.normal(f)
1259 self.dirstate.normal(f)
1271 for f in changes[2]:
1260 for f in changes[2]:
1272 self.dirstate.drop(f)
1261 self.dirstate.drop(f)
1273 self.dirstate.setparents(ret)
1262 self.dirstate.setparents(ret)
1274 ms.reset()
1263 ms.reset()
1275 finally:
1264 finally:
1276 wlock.release()
1265 wlock.release()
1277
1266
1278 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1267 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1279 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1268 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1280 self._afterlock(commithook)
1269 self._afterlock(commithook)
1281 return ret
1270 return ret
1282
1271
1283 def commitctx(self, ctx, error=False):
1272 def commitctx(self, ctx, error=False):
1284 """Add a new revision to current repository.
1273 """Add a new revision to current repository.
1285 Revision information is passed via the context argument.
1274 Revision information is passed via the context argument.
1286 """
1275 """
1287
1276
1288 tr = lock = None
1277 tr = lock = None
1289 removed = list(ctx.removed())
1278 removed = list(ctx.removed())
1290 p1, p2 = ctx.p1(), ctx.p2()
1279 p1, p2 = ctx.p1(), ctx.p2()
1291 user = ctx.user()
1280 user = ctx.user()
1292
1281
1293 lock = self.lock()
1282 lock = self.lock()
1294 try:
1283 try:
1295 tr = self.transaction("commit")
1284 tr = self.transaction("commit")
1296 trp = weakref.proxy(tr)
1285 trp = weakref.proxy(tr)
1297
1286
1298 if ctx.files():
1287 if ctx.files():
1299 m1 = p1.manifest().copy()
1288 m1 = p1.manifest().copy()
1300 m2 = p2.manifest()
1289 m2 = p2.manifest()
1301
1290
1302 # check in files
1291 # check in files
1303 new = {}
1292 new = {}
1304 changed = []
1293 changed = []
1305 linkrev = len(self)
1294 linkrev = len(self)
1306 for f in sorted(ctx.modified() + ctx.added()):
1295 for f in sorted(ctx.modified() + ctx.added()):
1307 self.ui.note(f + "\n")
1296 self.ui.note(f + "\n")
1308 try:
1297 try:
1309 fctx = ctx[f]
1298 fctx = ctx[f]
1310 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1299 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1311 changed)
1300 changed)
1312 m1.set(f, fctx.flags())
1301 m1.set(f, fctx.flags())
1313 except OSError, inst:
1302 except OSError, inst:
1314 self.ui.warn(_("trouble committing %s!\n") % f)
1303 self.ui.warn(_("trouble committing %s!\n") % f)
1315 raise
1304 raise
1316 except IOError, inst:
1305 except IOError, inst:
1317 errcode = getattr(inst, 'errno', errno.ENOENT)
1306 errcode = getattr(inst, 'errno', errno.ENOENT)
1318 if error or errcode and errcode != errno.ENOENT:
1307 if error or errcode and errcode != errno.ENOENT:
1319 self.ui.warn(_("trouble committing %s!\n") % f)
1308 self.ui.warn(_("trouble committing %s!\n") % f)
1320 raise
1309 raise
1321 else:
1310 else:
1322 removed.append(f)
1311 removed.append(f)
1323
1312
1324 # update manifest
1313 # update manifest
1325 m1.update(new)
1314 m1.update(new)
1326 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1315 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1327 drop = [f for f in removed if f in m1]
1316 drop = [f for f in removed if f in m1]
1328 for f in drop:
1317 for f in drop:
1329 del m1[f]
1318 del m1[f]
1330 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1319 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1331 p2.manifestnode(), (new, drop))
1320 p2.manifestnode(), (new, drop))
1332 files = changed + removed
1321 files = changed + removed
1333 else:
1322 else:
1334 mn = p1.manifestnode()
1323 mn = p1.manifestnode()
1335 files = []
1324 files = []
1336
1325
1337 # update changelog
1326 # update changelog
1338 self.changelog.delayupdate()
1327 self.changelog.delayupdate()
1339 n = self.changelog.add(mn, files, ctx.description(),
1328 n = self.changelog.add(mn, files, ctx.description(),
1340 trp, p1.node(), p2.node(),
1329 trp, p1.node(), p2.node(),
1341 user, ctx.date(), ctx.extra().copy())
1330 user, ctx.date(), ctx.extra().copy())
1342 p = lambda: self.changelog.writepending() and self.root or ""
1331 p = lambda: self.changelog.writepending() and self.root or ""
1343 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1332 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1344 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1333 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1345 parent2=xp2, pending=p)
1334 parent2=xp2, pending=p)
1346 self.changelog.finalize(trp)
1335 self.changelog.finalize(trp)
1347 # set the new commit is proper phase
1336 # set the new commit is proper phase
1348 targetphase = phases.newcommitphase(self.ui)
1337 targetphase = phases.newcommitphase(self.ui)
1349 if targetphase:
1338 if targetphase:
1350 # retract boundary do not alter parent changeset.
1339 # retract boundary do not alter parent changeset.
1351 # if a parent have higher the resulting phase will
1340 # if a parent have higher the resulting phase will
1352 # be compliant anyway
1341 # be compliant anyway
1353 #
1342 #
1354 # if minimal phase was 0 we don't need to retract anything
1343 # if minimal phase was 0 we don't need to retract anything
1355 phases.retractboundary(self, targetphase, [n])
1344 phases.retractboundary(self, targetphase, [n])
1356 tr.close()
1345 tr.close()
1357 self.updatebranchcache()
1346 self.updatebranchcache()
1358 return n
1347 return n
1359 finally:
1348 finally:
1360 if tr:
1349 if tr:
1361 tr.release()
1350 tr.release()
1362 lock.release()
1351 lock.release()
1363
1352
1364 def destroyed(self, newheadnodes=None):
1353 def destroyed(self, newheadnodes=None):
1365 '''Inform the repository that nodes have been destroyed.
1354 '''Inform the repository that nodes have been destroyed.
1366 Intended for use by strip and rollback, so there's a common
1355 Intended for use by strip and rollback, so there's a common
1367 place for anything that has to be done after destroying history.
1356 place for anything that has to be done after destroying history.
1368
1357
1369 If you know the branchheadcache was uptodate before nodes were removed
1358 If you know the branchheadcache was uptodate before nodes were removed
1370 and you also know the set of candidate new heads that may have resulted
1359 and you also know the set of candidate new heads that may have resulted
1371 from the destruction, you can set newheadnodes. This will enable the
1360 from the destruction, you can set newheadnodes. This will enable the
1372 code to update the branchheads cache, rather than having future code
1361 code to update the branchheads cache, rather than having future code
1373 decide it's invalid and regenrating it from scratch.
1362 decide it's invalid and regenrating it from scratch.
1374 '''
1363 '''
1375 # If we have info, newheadnodes, on how to update the branch cache, do
1364 # If we have info, newheadnodes, on how to update the branch cache, do
1376 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1365 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1377 # will be caught the next time it is read.
1366 # will be caught the next time it is read.
1378 if newheadnodes:
1367 if newheadnodes:
1379 tiprev = len(self) - 1
1368 tiprev = len(self) - 1
1380 ctxgen = (self[node] for node in newheadnodes
1369 ctxgen = (self[node] for node in newheadnodes
1381 if self.changelog.hasnode(node))
1370 if self.changelog.hasnode(node))
1382 self._updatebranchcache(self._branchcache, ctxgen)
1371 self._updatebranchcache(self._branchcache, ctxgen)
1383 self._writebranchcache(self._branchcache, self.changelog.tip(),
1372 self._writebranchcache(self._branchcache, self.changelog.tip(),
1384 tiprev)
1373 tiprev)
1385
1374
1386 # Ensure the persistent tag cache is updated. Doing it now
1375 # Ensure the persistent tag cache is updated. Doing it now
1387 # means that the tag cache only has to worry about destroyed
1376 # means that the tag cache only has to worry about destroyed
1388 # heads immediately after a strip/rollback. That in turn
1377 # heads immediately after a strip/rollback. That in turn
1389 # guarantees that "cachetip == currenttip" (comparing both rev
1378 # guarantees that "cachetip == currenttip" (comparing both rev
1390 # and node) always means no nodes have been added or destroyed.
1379 # and node) always means no nodes have been added or destroyed.
1391
1380
1392 # XXX this is suboptimal when qrefresh'ing: we strip the current
1381 # XXX this is suboptimal when qrefresh'ing: we strip the current
1393 # head, refresh the tag cache, then immediately add a new head.
1382 # head, refresh the tag cache, then immediately add a new head.
1394 # But I think doing it this way is necessary for the "instant
1383 # But I think doing it this way is necessary for the "instant
1395 # tag cache retrieval" case to work.
1384 # tag cache retrieval" case to work.
1396 self.invalidatecaches()
1385 self.invalidatecaches()
1397
1386
1398 def walk(self, match, node=None):
1387 def walk(self, match, node=None):
1399 '''
1388 '''
1400 walk recursively through the directory tree or a given
1389 walk recursively through the directory tree or a given
1401 changeset, finding all files matched by the match
1390 changeset, finding all files matched by the match
1402 function
1391 function
1403 '''
1392 '''
1404 return self[node].walk(match)
1393 return self[node].walk(match)
1405
1394
1406 def status(self, node1='.', node2=None, match=None,
1395 def status(self, node1='.', node2=None, match=None,
1407 ignored=False, clean=False, unknown=False,
1396 ignored=False, clean=False, unknown=False,
1408 listsubrepos=False):
1397 listsubrepos=False):
1409 """return status of files between two nodes or node and working
1398 """return status of files between two nodes or node and working
1410 directory.
1399 directory.
1411
1400
1412 If node1 is None, use the first dirstate parent instead.
1401 If node1 is None, use the first dirstate parent instead.
1413 If node2 is None, compare node1 with working directory.
1402 If node2 is None, compare node1 with working directory.
1414 """
1403 """
1415
1404
1416 def mfmatches(ctx):
1405 def mfmatches(ctx):
1417 mf = ctx.manifest().copy()
1406 mf = ctx.manifest().copy()
1418 if match.always():
1407 if match.always():
1419 return mf
1408 return mf
1420 for fn in mf.keys():
1409 for fn in mf.keys():
1421 if not match(fn):
1410 if not match(fn):
1422 del mf[fn]
1411 del mf[fn]
1423 return mf
1412 return mf
1424
1413
1425 if isinstance(node1, context.changectx):
1414 if isinstance(node1, context.changectx):
1426 ctx1 = node1
1415 ctx1 = node1
1427 else:
1416 else:
1428 ctx1 = self[node1]
1417 ctx1 = self[node1]
1429 if isinstance(node2, context.changectx):
1418 if isinstance(node2, context.changectx):
1430 ctx2 = node2
1419 ctx2 = node2
1431 else:
1420 else:
1432 ctx2 = self[node2]
1421 ctx2 = self[node2]
1433
1422
1434 working = ctx2.rev() is None
1423 working = ctx2.rev() is None
1435 parentworking = working and ctx1 == self['.']
1424 parentworking = working and ctx1 == self['.']
1436 match = match or matchmod.always(self.root, self.getcwd())
1425 match = match or matchmod.always(self.root, self.getcwd())
1437 listignored, listclean, listunknown = ignored, clean, unknown
1426 listignored, listclean, listunknown = ignored, clean, unknown
1438
1427
1439 # load earliest manifest first for caching reasons
1428 # load earliest manifest first for caching reasons
1440 if not working and ctx2.rev() < ctx1.rev():
1429 if not working and ctx2.rev() < ctx1.rev():
1441 ctx2.manifest()
1430 ctx2.manifest()
1442
1431
1443 if not parentworking:
1432 if not parentworking:
1444 def bad(f, msg):
1433 def bad(f, msg):
1445 # 'f' may be a directory pattern from 'match.files()',
1434 # 'f' may be a directory pattern from 'match.files()',
1446 # so 'f not in ctx1' is not enough
1435 # so 'f not in ctx1' is not enough
1447 if f not in ctx1 and f not in ctx1.dirs():
1436 if f not in ctx1 and f not in ctx1.dirs():
1448 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1437 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1449 match.bad = bad
1438 match.bad = bad
1450
1439
1451 if working: # we need to scan the working dir
1440 if working: # we need to scan the working dir
1452 subrepos = []
1441 subrepos = []
1453 if '.hgsub' in self.dirstate:
1442 if '.hgsub' in self.dirstate:
1454 subrepos = ctx2.substate.keys()
1443 subrepos = ctx2.substate.keys()
1455 s = self.dirstate.status(match, subrepos, listignored,
1444 s = self.dirstate.status(match, subrepos, listignored,
1456 listclean, listunknown)
1445 listclean, listunknown)
1457 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1446 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1458
1447
1459 # check for any possibly clean files
1448 # check for any possibly clean files
1460 if parentworking and cmp:
1449 if parentworking and cmp:
1461 fixup = []
1450 fixup = []
1462 # do a full compare of any files that might have changed
1451 # do a full compare of any files that might have changed
1463 for f in sorted(cmp):
1452 for f in sorted(cmp):
1464 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1453 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1465 or ctx1[f].cmp(ctx2[f])):
1454 or ctx1[f].cmp(ctx2[f])):
1466 modified.append(f)
1455 modified.append(f)
1467 else:
1456 else:
1468 fixup.append(f)
1457 fixup.append(f)
1469
1458
1470 # update dirstate for files that are actually clean
1459 # update dirstate for files that are actually clean
1471 if fixup:
1460 if fixup:
1472 if listclean:
1461 if listclean:
1473 clean += fixup
1462 clean += fixup
1474
1463
1475 try:
1464 try:
1476 # updating the dirstate is optional
1465 # updating the dirstate is optional
1477 # so we don't wait on the lock
1466 # so we don't wait on the lock
1478 wlock = self.wlock(False)
1467 wlock = self.wlock(False)
1479 try:
1468 try:
1480 for f in fixup:
1469 for f in fixup:
1481 self.dirstate.normal(f)
1470 self.dirstate.normal(f)
1482 finally:
1471 finally:
1483 wlock.release()
1472 wlock.release()
1484 except error.LockError:
1473 except error.LockError:
1485 pass
1474 pass
1486
1475
1487 if not parentworking:
1476 if not parentworking:
1488 mf1 = mfmatches(ctx1)
1477 mf1 = mfmatches(ctx1)
1489 if working:
1478 if working:
1490 # we are comparing working dir against non-parent
1479 # we are comparing working dir against non-parent
1491 # generate a pseudo-manifest for the working dir
1480 # generate a pseudo-manifest for the working dir
1492 mf2 = mfmatches(self['.'])
1481 mf2 = mfmatches(self['.'])
1493 for f in cmp + modified + added:
1482 for f in cmp + modified + added:
1494 mf2[f] = None
1483 mf2[f] = None
1495 mf2.set(f, ctx2.flags(f))
1484 mf2.set(f, ctx2.flags(f))
1496 for f in removed:
1485 for f in removed:
1497 if f in mf2:
1486 if f in mf2:
1498 del mf2[f]
1487 del mf2[f]
1499 else:
1488 else:
1500 # we are comparing two revisions
1489 # we are comparing two revisions
1501 deleted, unknown, ignored = [], [], []
1490 deleted, unknown, ignored = [], [], []
1502 mf2 = mfmatches(ctx2)
1491 mf2 = mfmatches(ctx2)
1503
1492
1504 modified, added, clean = [], [], []
1493 modified, added, clean = [], [], []
1505 withflags = mf1.withflags() | mf2.withflags()
1494 withflags = mf1.withflags() | mf2.withflags()
1506 for fn in mf2:
1495 for fn in mf2:
1507 if fn in mf1:
1496 if fn in mf1:
1508 if (fn not in deleted and
1497 if (fn not in deleted and
1509 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1498 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1510 (mf1[fn] != mf2[fn] and
1499 (mf1[fn] != mf2[fn] and
1511 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1500 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1512 modified.append(fn)
1501 modified.append(fn)
1513 elif listclean:
1502 elif listclean:
1514 clean.append(fn)
1503 clean.append(fn)
1515 del mf1[fn]
1504 del mf1[fn]
1516 elif fn not in deleted:
1505 elif fn not in deleted:
1517 added.append(fn)
1506 added.append(fn)
1518 removed = mf1.keys()
1507 removed = mf1.keys()
1519
1508
1520 if working and modified and not self.dirstate._checklink:
1509 if working and modified and not self.dirstate._checklink:
1521 # Symlink placeholders may get non-symlink-like contents
1510 # Symlink placeholders may get non-symlink-like contents
1522 # via user error or dereferencing by NFS or Samba servers,
1511 # via user error or dereferencing by NFS or Samba servers,
1523 # so we filter out any placeholders that don't look like a
1512 # so we filter out any placeholders that don't look like a
1524 # symlink
1513 # symlink
1525 sane = []
1514 sane = []
1526 for f in modified:
1515 for f in modified:
1527 if ctx2.flags(f) == 'l':
1516 if ctx2.flags(f) == 'l':
1528 d = ctx2[f].data()
1517 d = ctx2[f].data()
1529 if len(d) >= 1024 or '\n' in d or util.binary(d):
1518 if len(d) >= 1024 or '\n' in d or util.binary(d):
1530 self.ui.debug('ignoring suspect symlink placeholder'
1519 self.ui.debug('ignoring suspect symlink placeholder'
1531 ' "%s"\n' % f)
1520 ' "%s"\n' % f)
1532 continue
1521 continue
1533 sane.append(f)
1522 sane.append(f)
1534 modified = sane
1523 modified = sane
1535
1524
1536 r = modified, added, removed, deleted, unknown, ignored, clean
1525 r = modified, added, removed, deleted, unknown, ignored, clean
1537
1526
1538 if listsubrepos:
1527 if listsubrepos:
1539 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1528 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1540 if working:
1529 if working:
1541 rev2 = None
1530 rev2 = None
1542 else:
1531 else:
1543 rev2 = ctx2.substate[subpath][1]
1532 rev2 = ctx2.substate[subpath][1]
1544 try:
1533 try:
1545 submatch = matchmod.narrowmatcher(subpath, match)
1534 submatch = matchmod.narrowmatcher(subpath, match)
1546 s = sub.status(rev2, match=submatch, ignored=listignored,
1535 s = sub.status(rev2, match=submatch, ignored=listignored,
1547 clean=listclean, unknown=listunknown,
1536 clean=listclean, unknown=listunknown,
1548 listsubrepos=True)
1537 listsubrepos=True)
1549 for rfiles, sfiles in zip(r, s):
1538 for rfiles, sfiles in zip(r, s):
1550 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1539 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1551 except error.LookupError:
1540 except error.LookupError:
1552 self.ui.status(_("skipping missing subrepository: %s\n")
1541 self.ui.status(_("skipping missing subrepository: %s\n")
1553 % subpath)
1542 % subpath)
1554
1543
1555 for l in r:
1544 for l in r:
1556 l.sort()
1545 l.sort()
1557 return r
1546 return r
1558
1547
1559 def heads(self, start=None):
1548 def heads(self, start=None):
1560 heads = self.changelog.heads(start)
1549 heads = self.changelog.heads(start)
1561 # sort the output in rev descending order
1550 # sort the output in rev descending order
1562 return sorted(heads, key=self.changelog.rev, reverse=True)
1551 return sorted(heads, key=self.changelog.rev, reverse=True)
1563
1552
1564 def branchheads(self, branch=None, start=None, closed=False):
1553 def branchheads(self, branch=None, start=None, closed=False):
1565 '''return a (possibly filtered) list of heads for the given branch
1554 '''return a (possibly filtered) list of heads for the given branch
1566
1555
1567 Heads are returned in topological order, from newest to oldest.
1556 Heads are returned in topological order, from newest to oldest.
1568 If branch is None, use the dirstate branch.
1557 If branch is None, use the dirstate branch.
1569 If start is not None, return only heads reachable from start.
1558 If start is not None, return only heads reachable from start.
1570 If closed is True, return heads that are marked as closed as well.
1559 If closed is True, return heads that are marked as closed as well.
1571 '''
1560 '''
1572 if branch is None:
1561 if branch is None:
1573 branch = self[None].branch()
1562 branch = self[None].branch()
1574 branches = self.branchmap()
1563 branches = self.branchmap()
1575 if branch not in branches:
1564 if branch not in branches:
1576 return []
1565 return []
1577 # the cache returns heads ordered lowest to highest
1566 # the cache returns heads ordered lowest to highest
1578 bheads = list(reversed(branches[branch]))
1567 bheads = list(reversed(branches[branch]))
1579 if start is not None:
1568 if start is not None:
1580 # filter out the heads that cannot be reached from startrev
1569 # filter out the heads that cannot be reached from startrev
1581 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1570 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1582 bheads = [h for h in bheads if h in fbheads]
1571 bheads = [h for h in bheads if h in fbheads]
1583 if not closed:
1572 if not closed:
1584 bheads = [h for h in bheads if not self[h].closesbranch()]
1573 bheads = [h for h in bheads if not self[h].closesbranch()]
1585 return bheads
1574 return bheads
1586
1575
1587 def branches(self, nodes):
1576 def branches(self, nodes):
1588 if not nodes:
1577 if not nodes:
1589 nodes = [self.changelog.tip()]
1578 nodes = [self.changelog.tip()]
1590 b = []
1579 b = []
1591 for n in nodes:
1580 for n in nodes:
1592 t = n
1581 t = n
1593 while True:
1582 while True:
1594 p = self.changelog.parents(n)
1583 p = self.changelog.parents(n)
1595 if p[1] != nullid or p[0] == nullid:
1584 if p[1] != nullid or p[0] == nullid:
1596 b.append((t, n, p[0], p[1]))
1585 b.append((t, n, p[0], p[1]))
1597 break
1586 break
1598 n = p[0]
1587 n = p[0]
1599 return b
1588 return b
1600
1589
1601 def between(self, pairs):
1590 def between(self, pairs):
1602 r = []
1591 r = []
1603
1592
1604 for top, bottom in pairs:
1593 for top, bottom in pairs:
1605 n, l, i = top, [], 0
1594 n, l, i = top, [], 0
1606 f = 1
1595 f = 1
1607
1596
1608 while n != bottom and n != nullid:
1597 while n != bottom and n != nullid:
1609 p = self.changelog.parents(n)[0]
1598 p = self.changelog.parents(n)[0]
1610 if i == f:
1599 if i == f:
1611 l.append(n)
1600 l.append(n)
1612 f = f * 2
1601 f = f * 2
1613 n = p
1602 n = p
1614 i += 1
1603 i += 1
1615
1604
1616 r.append(l)
1605 r.append(l)
1617
1606
1618 return r
1607 return r
1619
1608
1620 def pull(self, remote, heads=None, force=False):
1609 def pull(self, remote, heads=None, force=False):
1621 lock = self.lock()
1610 lock = self.lock()
1622 try:
1611 try:
1623 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1612 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1624 force=force)
1613 force=force)
1625 common, fetch, rheads = tmp
1614 common, fetch, rheads = tmp
1626 if not fetch:
1615 if not fetch:
1627 self.ui.status(_("no changes found\n"))
1616 self.ui.status(_("no changes found\n"))
1628 added = []
1617 added = []
1629 result = 0
1618 result = 0
1630 else:
1619 else:
1631 if heads is None and list(common) == [nullid]:
1620 if heads is None and list(common) == [nullid]:
1632 self.ui.status(_("requesting all changes\n"))
1621 self.ui.status(_("requesting all changes\n"))
1633 elif heads is None and remote.capable('changegroupsubset'):
1622 elif heads is None and remote.capable('changegroupsubset'):
1634 # issue1320, avoid a race if remote changed after discovery
1623 # issue1320, avoid a race if remote changed after discovery
1635 heads = rheads
1624 heads = rheads
1636
1625
1637 if remote.capable('getbundle'):
1626 if remote.capable('getbundle'):
1638 cg = remote.getbundle('pull', common=common,
1627 cg = remote.getbundle('pull', common=common,
1639 heads=heads or rheads)
1628 heads=heads or rheads)
1640 elif heads is None:
1629 elif heads is None:
1641 cg = remote.changegroup(fetch, 'pull')
1630 cg = remote.changegroup(fetch, 'pull')
1642 elif not remote.capable('changegroupsubset'):
1631 elif not remote.capable('changegroupsubset'):
1643 raise util.Abort(_("partial pull cannot be done because "
1632 raise util.Abort(_("partial pull cannot be done because "
1644 "other repository doesn't support "
1633 "other repository doesn't support "
1645 "changegroupsubset."))
1634 "changegroupsubset."))
1646 else:
1635 else:
1647 cg = remote.changegroupsubset(fetch, heads, 'pull')
1636 cg = remote.changegroupsubset(fetch, heads, 'pull')
1648 clstart = len(self.changelog)
1637 clstart = len(self.changelog)
1649 result = self.addchangegroup(cg, 'pull', remote.url())
1638 result = self.addchangegroup(cg, 'pull', remote.url())
1650 clend = len(self.changelog)
1639 clend = len(self.changelog)
1651 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1640 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1652
1641
1653 # compute target subset
1642 # compute target subset
1654 if heads is None:
1643 if heads is None:
1655 # We pulled every thing possible
1644 # We pulled every thing possible
1656 # sync on everything common
1645 # sync on everything common
1657 subset = common + added
1646 subset = common + added
1658 else:
1647 else:
1659 # We pulled a specific subset
1648 # We pulled a specific subset
1660 # sync on this subset
1649 # sync on this subset
1661 subset = heads
1650 subset = heads
1662
1651
1663 # Get remote phases data from remote
1652 # Get remote phases data from remote
1664 remotephases = remote.listkeys('phases')
1653 remotephases = remote.listkeys('phases')
1665 publishing = bool(remotephases.get('publishing', False))
1654 publishing = bool(remotephases.get('publishing', False))
1666 if remotephases and not publishing:
1655 if remotephases and not publishing:
1667 # remote is new and unpublishing
1656 # remote is new and unpublishing
1668 pheads, _dr = phases.analyzeremotephases(self, subset,
1657 pheads, _dr = phases.analyzeremotephases(self, subset,
1669 remotephases)
1658 remotephases)
1670 phases.advanceboundary(self, phases.public, pheads)
1659 phases.advanceboundary(self, phases.public, pheads)
1671 phases.advanceboundary(self, phases.draft, subset)
1660 phases.advanceboundary(self, phases.draft, subset)
1672 else:
1661 else:
1673 # Remote is old or publishing all common changesets
1662 # Remote is old or publishing all common changesets
1674 # should be seen as public
1663 # should be seen as public
1675 phases.advanceboundary(self, phases.public, subset)
1664 phases.advanceboundary(self, phases.public, subset)
1676
1665
1677 remoteobs = remote.listkeys('obsolete')
1666 remoteobs = remote.listkeys('obsolete')
1678 if 'dump' in remoteobs:
1667 if 'dump' in remoteobs:
1679 data = base85.b85decode(remoteobs['dump'])
1668 data = base85.b85decode(remoteobs['dump'])
1680 self.obsstore.mergemarkers(data)
1669 self.obsstore.mergemarkers(data)
1681 finally:
1670 finally:
1682 lock.release()
1671 lock.release()
1683
1672
1684 return result
1673 return result
1685
1674
1686 def checkpush(self, force, revs):
1675 def checkpush(self, force, revs):
1687 """Extensions can override this function if additional checks have
1676 """Extensions can override this function if additional checks have
1688 to be performed before pushing, or call it if they override push
1677 to be performed before pushing, or call it if they override push
1689 command.
1678 command.
1690 """
1679 """
1691 pass
1680 pass
1692
1681
1693 def push(self, remote, force=False, revs=None, newbranch=False):
1682 def push(self, remote, force=False, revs=None, newbranch=False):
1694 '''Push outgoing changesets (limited by revs) from the current
1683 '''Push outgoing changesets (limited by revs) from the current
1695 repository to remote. Return an integer:
1684 repository to remote. Return an integer:
1696 - None means nothing to push
1685 - None means nothing to push
1697 - 0 means HTTP error
1686 - 0 means HTTP error
1698 - 1 means we pushed and remote head count is unchanged *or*
1687 - 1 means we pushed and remote head count is unchanged *or*
1699 we have outgoing changesets but refused to push
1688 we have outgoing changesets but refused to push
1700 - other values as described by addchangegroup()
1689 - other values as described by addchangegroup()
1701 '''
1690 '''
1702 # there are two ways to push to remote repo:
1691 # there are two ways to push to remote repo:
1703 #
1692 #
1704 # addchangegroup assumes local user can lock remote
1693 # addchangegroup assumes local user can lock remote
1705 # repo (local filesystem, old ssh servers).
1694 # repo (local filesystem, old ssh servers).
1706 #
1695 #
1707 # unbundle assumes local user cannot lock remote repo (new ssh
1696 # unbundle assumes local user cannot lock remote repo (new ssh
1708 # servers, http servers).
1697 # servers, http servers).
1709
1698
1710 # get local lock as we might write phase data
1699 # get local lock as we might write phase data
1711 locallock = self.lock()
1700 locallock = self.lock()
1712 try:
1701 try:
1713 self.checkpush(force, revs)
1702 self.checkpush(force, revs)
1714 lock = None
1703 lock = None
1715 unbundle = remote.capable('unbundle')
1704 unbundle = remote.capable('unbundle')
1716 if not unbundle:
1705 if not unbundle:
1717 lock = remote.lock()
1706 lock = remote.lock()
1718 try:
1707 try:
1719 # discovery
1708 # discovery
1720 fci = discovery.findcommonincoming
1709 fci = discovery.findcommonincoming
1721 commoninc = fci(self, remote, force=force)
1710 commoninc = fci(self, remote, force=force)
1722 common, inc, remoteheads = commoninc
1711 common, inc, remoteheads = commoninc
1723 fco = discovery.findcommonoutgoing
1712 fco = discovery.findcommonoutgoing
1724 outgoing = fco(self, remote, onlyheads=revs,
1713 outgoing = fco(self, remote, onlyheads=revs,
1725 commoninc=commoninc, force=force)
1714 commoninc=commoninc, force=force)
1726
1715
1727
1716
1728 if not outgoing.missing:
1717 if not outgoing.missing:
1729 # nothing to push
1718 # nothing to push
1730 scmutil.nochangesfound(self.ui, outgoing.excluded)
1719 scmutil.nochangesfound(self.ui, outgoing.excluded)
1731 ret = None
1720 ret = None
1732 else:
1721 else:
1733 # something to push
1722 # something to push
1734 if not force:
1723 if not force:
1735 discovery.checkheads(self, remote, outgoing,
1724 discovery.checkheads(self, remote, outgoing,
1736 remoteheads, newbranch,
1725 remoteheads, newbranch,
1737 bool(inc))
1726 bool(inc))
1738
1727
1739 # create a changegroup from local
1728 # create a changegroup from local
1740 if revs is None and not outgoing.excluded:
1729 if revs is None and not outgoing.excluded:
1741 # push everything,
1730 # push everything,
1742 # use the fast path, no race possible on push
1731 # use the fast path, no race possible on push
1743 cg = self._changegroup(outgoing.missing, 'push')
1732 cg = self._changegroup(outgoing.missing, 'push')
1744 else:
1733 else:
1745 cg = self.getlocalbundle('push', outgoing)
1734 cg = self.getlocalbundle('push', outgoing)
1746
1735
1747 # apply changegroup to remote
1736 # apply changegroup to remote
1748 if unbundle:
1737 if unbundle:
1749 # local repo finds heads on server, finds out what
1738 # local repo finds heads on server, finds out what
1750 # revs it must push. once revs transferred, if server
1739 # revs it must push. once revs transferred, if server
1751 # finds it has different heads (someone else won
1740 # finds it has different heads (someone else won
1752 # commit/push race), server aborts.
1741 # commit/push race), server aborts.
1753 if force:
1742 if force:
1754 remoteheads = ['force']
1743 remoteheads = ['force']
1755 # ssh: return remote's addchangegroup()
1744 # ssh: return remote's addchangegroup()
1756 # http: return remote's addchangegroup() or 0 for error
1745 # http: return remote's addchangegroup() or 0 for error
1757 ret = remote.unbundle(cg, remoteheads, 'push')
1746 ret = remote.unbundle(cg, remoteheads, 'push')
1758 else:
1747 else:
1759 # we return an integer indicating remote head count
1748 # we return an integer indicating remote head count
1760 # change
1749 # change
1761 ret = remote.addchangegroup(cg, 'push', self.url())
1750 ret = remote.addchangegroup(cg, 'push', self.url())
1762
1751
1763 if ret:
1752 if ret:
1764 # push succeed, synchonize target of the push
1753 # push succeed, synchonize target of the push
1765 cheads = outgoing.missingheads
1754 cheads = outgoing.missingheads
1766 elif revs is None:
1755 elif revs is None:
1767 # All out push fails. synchronize all common
1756 # All out push fails. synchronize all common
1768 cheads = outgoing.commonheads
1757 cheads = outgoing.commonheads
1769 else:
1758 else:
1770 # I want cheads = heads(::missingheads and ::commonheads)
1759 # I want cheads = heads(::missingheads and ::commonheads)
1771 # (missingheads is revs with secret changeset filtered out)
1760 # (missingheads is revs with secret changeset filtered out)
1772 #
1761 #
1773 # This can be expressed as:
1762 # This can be expressed as:
1774 # cheads = ( (missingheads and ::commonheads)
1763 # cheads = ( (missingheads and ::commonheads)
1775 # + (commonheads and ::missingheads))"
1764 # + (commonheads and ::missingheads))"
1776 # )
1765 # )
1777 #
1766 #
1778 # while trying to push we already computed the following:
1767 # while trying to push we already computed the following:
1779 # common = (::commonheads)
1768 # common = (::commonheads)
1780 # missing = ((commonheads::missingheads) - commonheads)
1769 # missing = ((commonheads::missingheads) - commonheads)
1781 #
1770 #
1782 # We can pick:
1771 # We can pick:
1783 # * missingheads part of comon (::commonheads)
1772 # * missingheads part of comon (::commonheads)
1784 common = set(outgoing.common)
1773 common = set(outgoing.common)
1785 cheads = [node for node in revs if node in common]
1774 cheads = [node for node in revs if node in common]
1786 # and
1775 # and
1787 # * commonheads parents on missing
1776 # * commonheads parents on missing
1788 revset = self.set('%ln and parents(roots(%ln))',
1777 revset = self.set('%ln and parents(roots(%ln))',
1789 outgoing.commonheads,
1778 outgoing.commonheads,
1790 outgoing.missing)
1779 outgoing.missing)
1791 cheads.extend(c.node() for c in revset)
1780 cheads.extend(c.node() for c in revset)
1792 # even when we don't push, exchanging phase data is useful
1781 # even when we don't push, exchanging phase data is useful
1793 remotephases = remote.listkeys('phases')
1782 remotephases = remote.listkeys('phases')
1794 if not remotephases: # old server or public only repo
1783 if not remotephases: # old server or public only repo
1795 phases.advanceboundary(self, phases.public, cheads)
1784 phases.advanceboundary(self, phases.public, cheads)
1796 # don't push any phase data as there is nothing to push
1785 # don't push any phase data as there is nothing to push
1797 else:
1786 else:
1798 ana = phases.analyzeremotephases(self, cheads, remotephases)
1787 ana = phases.analyzeremotephases(self, cheads, remotephases)
1799 pheads, droots = ana
1788 pheads, droots = ana
1800 ### Apply remote phase on local
1789 ### Apply remote phase on local
1801 if remotephases.get('publishing', False):
1790 if remotephases.get('publishing', False):
1802 phases.advanceboundary(self, phases.public, cheads)
1791 phases.advanceboundary(self, phases.public, cheads)
1803 else: # publish = False
1792 else: # publish = False
1804 phases.advanceboundary(self, phases.public, pheads)
1793 phases.advanceboundary(self, phases.public, pheads)
1805 phases.advanceboundary(self, phases.draft, cheads)
1794 phases.advanceboundary(self, phases.draft, cheads)
1806 ### Apply local phase on remote
1795 ### Apply local phase on remote
1807
1796
1808 # Get the list of all revs draft on remote by public here.
1797 # Get the list of all revs draft on remote by public here.
1809 # XXX Beware that revset break if droots is not strictly
1798 # XXX Beware that revset break if droots is not strictly
1810 # XXX root we may want to ensure it is but it is costly
1799 # XXX root we may want to ensure it is but it is costly
1811 outdated = self.set('heads((%ln::%ln) and public())',
1800 outdated = self.set('heads((%ln::%ln) and public())',
1812 droots, cheads)
1801 droots, cheads)
1813 for newremotehead in outdated:
1802 for newremotehead in outdated:
1814 r = remote.pushkey('phases',
1803 r = remote.pushkey('phases',
1815 newremotehead.hex(),
1804 newremotehead.hex(),
1816 str(phases.draft),
1805 str(phases.draft),
1817 str(phases.public))
1806 str(phases.public))
1818 if not r:
1807 if not r:
1819 self.ui.warn(_('updating %s to public failed!\n')
1808 self.ui.warn(_('updating %s to public failed!\n')
1820 % newremotehead)
1809 % newremotehead)
1821 if 'obsolete' in self.listkeys('namespaces') and self.obsstore:
1810 if 'obsolete' in self.listkeys('namespaces') and self.obsstore:
1822 data = self.obsstore._writemarkers()
1811 data = self.obsstore._writemarkers()
1823 r = remote.pushkey('obsolete', 'dump', '',
1812 r = remote.pushkey('obsolete', 'dump', '',
1824 base85.b85encode(data))
1813 base85.b85encode(data))
1825 if not r:
1814 if not r:
1826 self.ui.warn(_('failed to push obsolete markers!\n'))
1815 self.ui.warn(_('failed to push obsolete markers!\n'))
1827 finally:
1816 finally:
1828 if lock is not None:
1817 if lock is not None:
1829 lock.release()
1818 lock.release()
1830 finally:
1819 finally:
1831 locallock.release()
1820 locallock.release()
1832
1821
1833 self.ui.debug("checking for updated bookmarks\n")
1822 self.ui.debug("checking for updated bookmarks\n")
1834 rb = remote.listkeys('bookmarks')
1823 rb = remote.listkeys('bookmarks')
1835 for k in rb.keys():
1824 for k in rb.keys():
1836 if k in self._bookmarks:
1825 if k in self._bookmarks:
1837 nr, nl = rb[k], hex(self._bookmarks[k])
1826 nr, nl = rb[k], hex(self._bookmarks[k])
1838 if nr in self:
1827 if nr in self:
1839 cr = self[nr]
1828 cr = self[nr]
1840 cl = self[nl]
1829 cl = self[nl]
1841 if cl in cr.descendants():
1830 if cl in cr.descendants():
1842 r = remote.pushkey('bookmarks', k, nr, nl)
1831 r = remote.pushkey('bookmarks', k, nr, nl)
1843 if r:
1832 if r:
1844 self.ui.status(_("updating bookmark %s\n") % k)
1833 self.ui.status(_("updating bookmark %s\n") % k)
1845 else:
1834 else:
1846 self.ui.warn(_('updating bookmark %s'
1835 self.ui.warn(_('updating bookmark %s'
1847 ' failed!\n') % k)
1836 ' failed!\n') % k)
1848
1837
1849 return ret
1838 return ret
1850
1839
1851 def changegroupinfo(self, nodes, source):
1840 def changegroupinfo(self, nodes, source):
1852 if self.ui.verbose or source == 'bundle':
1841 if self.ui.verbose or source == 'bundle':
1853 self.ui.status(_("%d changesets found\n") % len(nodes))
1842 self.ui.status(_("%d changesets found\n") % len(nodes))
1854 if self.ui.debugflag:
1843 if self.ui.debugflag:
1855 self.ui.debug("list of changesets:\n")
1844 self.ui.debug("list of changesets:\n")
1856 for node in nodes:
1845 for node in nodes:
1857 self.ui.debug("%s\n" % hex(node))
1846 self.ui.debug("%s\n" % hex(node))
1858
1847
1859 def changegroupsubset(self, bases, heads, source):
1848 def changegroupsubset(self, bases, heads, source):
1860 """Compute a changegroup consisting of all the nodes that are
1849 """Compute a changegroup consisting of all the nodes that are
1861 descendants of any of the bases and ancestors of any of the heads.
1850 descendants of any of the bases and ancestors of any of the heads.
1862 Return a chunkbuffer object whose read() method will return
1851 Return a chunkbuffer object whose read() method will return
1863 successive changegroup chunks.
1852 successive changegroup chunks.
1864
1853
1865 It is fairly complex as determining which filenodes and which
1854 It is fairly complex as determining which filenodes and which
1866 manifest nodes need to be included for the changeset to be complete
1855 manifest nodes need to be included for the changeset to be complete
1867 is non-trivial.
1856 is non-trivial.
1868
1857
1869 Another wrinkle is doing the reverse, figuring out which changeset in
1858 Another wrinkle is doing the reverse, figuring out which changeset in
1870 the changegroup a particular filenode or manifestnode belongs to.
1859 the changegroup a particular filenode or manifestnode belongs to.
1871 """
1860 """
1872 cl = self.changelog
1861 cl = self.changelog
1873 if not bases:
1862 if not bases:
1874 bases = [nullid]
1863 bases = [nullid]
1875 csets, bases, heads = cl.nodesbetween(bases, heads)
1864 csets, bases, heads = cl.nodesbetween(bases, heads)
1876 # We assume that all ancestors of bases are known
1865 # We assume that all ancestors of bases are known
1877 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1866 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1878 return self._changegroupsubset(common, csets, heads, source)
1867 return self._changegroupsubset(common, csets, heads, source)
1879
1868
1880 def getlocalbundle(self, source, outgoing):
1869 def getlocalbundle(self, source, outgoing):
1881 """Like getbundle, but taking a discovery.outgoing as an argument.
1870 """Like getbundle, but taking a discovery.outgoing as an argument.
1882
1871
1883 This is only implemented for local repos and reuses potentially
1872 This is only implemented for local repos and reuses potentially
1884 precomputed sets in outgoing."""
1873 precomputed sets in outgoing."""
1885 if not outgoing.missing:
1874 if not outgoing.missing:
1886 return None
1875 return None
1887 return self._changegroupsubset(outgoing.common,
1876 return self._changegroupsubset(outgoing.common,
1888 outgoing.missing,
1877 outgoing.missing,
1889 outgoing.missingheads,
1878 outgoing.missingheads,
1890 source)
1879 source)
1891
1880
1892 def getbundle(self, source, heads=None, common=None):
1881 def getbundle(self, source, heads=None, common=None):
1893 """Like changegroupsubset, but returns the set difference between the
1882 """Like changegroupsubset, but returns the set difference between the
1894 ancestors of heads and the ancestors common.
1883 ancestors of heads and the ancestors common.
1895
1884
1896 If heads is None, use the local heads. If common is None, use [nullid].
1885 If heads is None, use the local heads. If common is None, use [nullid].
1897
1886
1898 The nodes in common might not all be known locally due to the way the
1887 The nodes in common might not all be known locally due to the way the
1899 current discovery protocol works.
1888 current discovery protocol works.
1900 """
1889 """
1901 cl = self.changelog
1890 cl = self.changelog
1902 if common:
1891 if common:
1903 nm = cl.nodemap
1892 nm = cl.nodemap
1904 common = [n for n in common if n in nm]
1893 common = [n for n in common if n in nm]
1905 else:
1894 else:
1906 common = [nullid]
1895 common = [nullid]
1907 if not heads:
1896 if not heads:
1908 heads = cl.heads()
1897 heads = cl.heads()
1909 return self.getlocalbundle(source,
1898 return self.getlocalbundle(source,
1910 discovery.outgoing(cl, common, heads))
1899 discovery.outgoing(cl, common, heads))
1911
1900
1912 def _changegroupsubset(self, commonrevs, csets, heads, source):
1901 def _changegroupsubset(self, commonrevs, csets, heads, source):
1913
1902
1914 cl = self.changelog
1903 cl = self.changelog
1915 mf = self.manifest
1904 mf = self.manifest
1916 mfs = {} # needed manifests
1905 mfs = {} # needed manifests
1917 fnodes = {} # needed file nodes
1906 fnodes = {} # needed file nodes
1918 changedfiles = set()
1907 changedfiles = set()
1919 fstate = ['', {}]
1908 fstate = ['', {}]
1920 count = [0, 0]
1909 count = [0, 0]
1921
1910
1922 # can we go through the fast path ?
1911 # can we go through the fast path ?
1923 heads.sort()
1912 heads.sort()
1924 if heads == sorted(self.heads()):
1913 if heads == sorted(self.heads()):
1925 return self._changegroup(csets, source)
1914 return self._changegroup(csets, source)
1926
1915
1927 # slow path
1916 # slow path
1928 self.hook('preoutgoing', throw=True, source=source)
1917 self.hook('preoutgoing', throw=True, source=source)
1929 self.changegroupinfo(csets, source)
1918 self.changegroupinfo(csets, source)
1930
1919
1931 # filter any nodes that claim to be part of the known set
1920 # filter any nodes that claim to be part of the known set
1932 def prune(revlog, missing):
1921 def prune(revlog, missing):
1933 rr, rl = revlog.rev, revlog.linkrev
1922 rr, rl = revlog.rev, revlog.linkrev
1934 return [n for n in missing
1923 return [n for n in missing
1935 if rl(rr(n)) not in commonrevs]
1924 if rl(rr(n)) not in commonrevs]
1936
1925
1937 progress = self.ui.progress
1926 progress = self.ui.progress
1938 _bundling = _('bundling')
1927 _bundling = _('bundling')
1939 _changesets = _('changesets')
1928 _changesets = _('changesets')
1940 _manifests = _('manifests')
1929 _manifests = _('manifests')
1941 _files = _('files')
1930 _files = _('files')
1942
1931
1943 def lookup(revlog, x):
1932 def lookup(revlog, x):
1944 if revlog == cl:
1933 if revlog == cl:
1945 c = cl.read(x)
1934 c = cl.read(x)
1946 changedfiles.update(c[3])
1935 changedfiles.update(c[3])
1947 mfs.setdefault(c[0], x)
1936 mfs.setdefault(c[0], x)
1948 count[0] += 1
1937 count[0] += 1
1949 progress(_bundling, count[0],
1938 progress(_bundling, count[0],
1950 unit=_changesets, total=count[1])
1939 unit=_changesets, total=count[1])
1951 return x
1940 return x
1952 elif revlog == mf:
1941 elif revlog == mf:
1953 clnode = mfs[x]
1942 clnode = mfs[x]
1954 mdata = mf.readfast(x)
1943 mdata = mf.readfast(x)
1955 for f, n in mdata.iteritems():
1944 for f, n in mdata.iteritems():
1956 if f in changedfiles:
1945 if f in changedfiles:
1957 fnodes[f].setdefault(n, clnode)
1946 fnodes[f].setdefault(n, clnode)
1958 count[0] += 1
1947 count[0] += 1
1959 progress(_bundling, count[0],
1948 progress(_bundling, count[0],
1960 unit=_manifests, total=count[1])
1949 unit=_manifests, total=count[1])
1961 return clnode
1950 return clnode
1962 else:
1951 else:
1963 progress(_bundling, count[0], item=fstate[0],
1952 progress(_bundling, count[0], item=fstate[0],
1964 unit=_files, total=count[1])
1953 unit=_files, total=count[1])
1965 return fstate[1][x]
1954 return fstate[1][x]
1966
1955
1967 bundler = changegroup.bundle10(lookup)
1956 bundler = changegroup.bundle10(lookup)
1968 reorder = self.ui.config('bundle', 'reorder', 'auto')
1957 reorder = self.ui.config('bundle', 'reorder', 'auto')
1969 if reorder == 'auto':
1958 if reorder == 'auto':
1970 reorder = None
1959 reorder = None
1971 else:
1960 else:
1972 reorder = util.parsebool(reorder)
1961 reorder = util.parsebool(reorder)
1973
1962
1974 def gengroup():
1963 def gengroup():
1975 # Create a changenode group generator that will call our functions
1964 # Create a changenode group generator that will call our functions
1976 # back to lookup the owning changenode and collect information.
1965 # back to lookup the owning changenode and collect information.
1977 count[:] = [0, len(csets)]
1966 count[:] = [0, len(csets)]
1978 for chunk in cl.group(csets, bundler, reorder=reorder):
1967 for chunk in cl.group(csets, bundler, reorder=reorder):
1979 yield chunk
1968 yield chunk
1980 progress(_bundling, None)
1969 progress(_bundling, None)
1981
1970
1982 # Create a generator for the manifestnodes that calls our lookup
1971 # Create a generator for the manifestnodes that calls our lookup
1983 # and data collection functions back.
1972 # and data collection functions back.
1984 for f in changedfiles:
1973 for f in changedfiles:
1985 fnodes[f] = {}
1974 fnodes[f] = {}
1986 count[:] = [0, len(mfs)]
1975 count[:] = [0, len(mfs)]
1987 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1976 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1988 yield chunk
1977 yield chunk
1989 progress(_bundling, None)
1978 progress(_bundling, None)
1990
1979
1991 mfs.clear()
1980 mfs.clear()
1992
1981
1993 # Go through all our files in order sorted by name.
1982 # Go through all our files in order sorted by name.
1994 count[:] = [0, len(changedfiles)]
1983 count[:] = [0, len(changedfiles)]
1995 for fname in sorted(changedfiles):
1984 for fname in sorted(changedfiles):
1996 filerevlog = self.file(fname)
1985 filerevlog = self.file(fname)
1997 if not len(filerevlog):
1986 if not len(filerevlog):
1998 raise util.Abort(_("empty or missing revlog for %s")
1987 raise util.Abort(_("empty or missing revlog for %s")
1999 % fname)
1988 % fname)
2000 fstate[0] = fname
1989 fstate[0] = fname
2001 fstate[1] = fnodes.pop(fname, {})
1990 fstate[1] = fnodes.pop(fname, {})
2002
1991
2003 nodelist = prune(filerevlog, fstate[1])
1992 nodelist = prune(filerevlog, fstate[1])
2004 if nodelist:
1993 if nodelist:
2005 count[0] += 1
1994 count[0] += 1
2006 yield bundler.fileheader(fname)
1995 yield bundler.fileheader(fname)
2007 for chunk in filerevlog.group(nodelist, bundler, reorder):
1996 for chunk in filerevlog.group(nodelist, bundler, reorder):
2008 yield chunk
1997 yield chunk
2009
1998
2010 # Signal that no more groups are left.
1999 # Signal that no more groups are left.
2011 yield bundler.close()
2000 yield bundler.close()
2012 progress(_bundling, None)
2001 progress(_bundling, None)
2013
2002
2014 if csets:
2003 if csets:
2015 self.hook('outgoing', node=hex(csets[0]), source=source)
2004 self.hook('outgoing', node=hex(csets[0]), source=source)
2016
2005
2017 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2006 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2018
2007
2019 def changegroup(self, basenodes, source):
2008 def changegroup(self, basenodes, source):
2020 # to avoid a race we use changegroupsubset() (issue1320)
2009 # to avoid a race we use changegroupsubset() (issue1320)
2021 return self.changegroupsubset(basenodes, self.heads(), source)
2010 return self.changegroupsubset(basenodes, self.heads(), source)
2022
2011
2023 def _changegroup(self, nodes, source):
2012 def _changegroup(self, nodes, source):
2024 """Compute the changegroup of all nodes that we have that a recipient
2013 """Compute the changegroup of all nodes that we have that a recipient
2025 doesn't. Return a chunkbuffer object whose read() method will return
2014 doesn't. Return a chunkbuffer object whose read() method will return
2026 successive changegroup chunks.
2015 successive changegroup chunks.
2027
2016
2028 This is much easier than the previous function as we can assume that
2017 This is much easier than the previous function as we can assume that
2029 the recipient has any changenode we aren't sending them.
2018 the recipient has any changenode we aren't sending them.
2030
2019
2031 nodes is the set of nodes to send"""
2020 nodes is the set of nodes to send"""
2032
2021
2033 cl = self.changelog
2022 cl = self.changelog
2034 mf = self.manifest
2023 mf = self.manifest
2035 mfs = {}
2024 mfs = {}
2036 changedfiles = set()
2025 changedfiles = set()
2037 fstate = ['']
2026 fstate = ['']
2038 count = [0, 0]
2027 count = [0, 0]
2039
2028
2040 self.hook('preoutgoing', throw=True, source=source)
2029 self.hook('preoutgoing', throw=True, source=source)
2041 self.changegroupinfo(nodes, source)
2030 self.changegroupinfo(nodes, source)
2042
2031
2043 revset = set([cl.rev(n) for n in nodes])
2032 revset = set([cl.rev(n) for n in nodes])
2044
2033
2045 def gennodelst(log):
2034 def gennodelst(log):
2046 ln, llr = log.node, log.linkrev
2035 ln, llr = log.node, log.linkrev
2047 return [ln(r) for r in log if llr(r) in revset]
2036 return [ln(r) for r in log if llr(r) in revset]
2048
2037
2049 progress = self.ui.progress
2038 progress = self.ui.progress
2050 _bundling = _('bundling')
2039 _bundling = _('bundling')
2051 _changesets = _('changesets')
2040 _changesets = _('changesets')
2052 _manifests = _('manifests')
2041 _manifests = _('manifests')
2053 _files = _('files')
2042 _files = _('files')
2054
2043
2055 def lookup(revlog, x):
2044 def lookup(revlog, x):
2056 if revlog == cl:
2045 if revlog == cl:
2057 c = cl.read(x)
2046 c = cl.read(x)
2058 changedfiles.update(c[3])
2047 changedfiles.update(c[3])
2059 mfs.setdefault(c[0], x)
2048 mfs.setdefault(c[0], x)
2060 count[0] += 1
2049 count[0] += 1
2061 progress(_bundling, count[0],
2050 progress(_bundling, count[0],
2062 unit=_changesets, total=count[1])
2051 unit=_changesets, total=count[1])
2063 return x
2052 return x
2064 elif revlog == mf:
2053 elif revlog == mf:
2065 count[0] += 1
2054 count[0] += 1
2066 progress(_bundling, count[0],
2055 progress(_bundling, count[0],
2067 unit=_manifests, total=count[1])
2056 unit=_manifests, total=count[1])
2068 return cl.node(revlog.linkrev(revlog.rev(x)))
2057 return cl.node(revlog.linkrev(revlog.rev(x)))
2069 else:
2058 else:
2070 progress(_bundling, count[0], item=fstate[0],
2059 progress(_bundling, count[0], item=fstate[0],
2071 total=count[1], unit=_files)
2060 total=count[1], unit=_files)
2072 return cl.node(revlog.linkrev(revlog.rev(x)))
2061 return cl.node(revlog.linkrev(revlog.rev(x)))
2073
2062
2074 bundler = changegroup.bundle10(lookup)
2063 bundler = changegroup.bundle10(lookup)
2075 reorder = self.ui.config('bundle', 'reorder', 'auto')
2064 reorder = self.ui.config('bundle', 'reorder', 'auto')
2076 if reorder == 'auto':
2065 if reorder == 'auto':
2077 reorder = None
2066 reorder = None
2078 else:
2067 else:
2079 reorder = util.parsebool(reorder)
2068 reorder = util.parsebool(reorder)
2080
2069
2081 def gengroup():
2070 def gengroup():
2082 '''yield a sequence of changegroup chunks (strings)'''
2071 '''yield a sequence of changegroup chunks (strings)'''
2083 # construct a list of all changed files
2072 # construct a list of all changed files
2084
2073
2085 count[:] = [0, len(nodes)]
2074 count[:] = [0, len(nodes)]
2086 for chunk in cl.group(nodes, bundler, reorder=reorder):
2075 for chunk in cl.group(nodes, bundler, reorder=reorder):
2087 yield chunk
2076 yield chunk
2088 progress(_bundling, None)
2077 progress(_bundling, None)
2089
2078
2090 count[:] = [0, len(mfs)]
2079 count[:] = [0, len(mfs)]
2091 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2080 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2092 yield chunk
2081 yield chunk
2093 progress(_bundling, None)
2082 progress(_bundling, None)
2094
2083
2095 count[:] = [0, len(changedfiles)]
2084 count[:] = [0, len(changedfiles)]
2096 for fname in sorted(changedfiles):
2085 for fname in sorted(changedfiles):
2097 filerevlog = self.file(fname)
2086 filerevlog = self.file(fname)
2098 if not len(filerevlog):
2087 if not len(filerevlog):
2099 raise util.Abort(_("empty or missing revlog for %s")
2088 raise util.Abort(_("empty or missing revlog for %s")
2100 % fname)
2089 % fname)
2101 fstate[0] = fname
2090 fstate[0] = fname
2102 nodelist = gennodelst(filerevlog)
2091 nodelist = gennodelst(filerevlog)
2103 if nodelist:
2092 if nodelist:
2104 count[0] += 1
2093 count[0] += 1
2105 yield bundler.fileheader(fname)
2094 yield bundler.fileheader(fname)
2106 for chunk in filerevlog.group(nodelist, bundler, reorder):
2095 for chunk in filerevlog.group(nodelist, bundler, reorder):
2107 yield chunk
2096 yield chunk
2108 yield bundler.close()
2097 yield bundler.close()
2109 progress(_bundling, None)
2098 progress(_bundling, None)
2110
2099
2111 if nodes:
2100 if nodes:
2112 self.hook('outgoing', node=hex(nodes[0]), source=source)
2101 self.hook('outgoing', node=hex(nodes[0]), source=source)
2113
2102
2114 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2103 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2115
2104
2116 def addchangegroup(self, source, srctype, url, emptyok=False):
2105 def addchangegroup(self, source, srctype, url, emptyok=False):
2117 """Add the changegroup returned by source.read() to this repo.
2106 """Add the changegroup returned by source.read() to this repo.
2118 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2107 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2119 the URL of the repo where this changegroup is coming from.
2108 the URL of the repo where this changegroup is coming from.
2120
2109
2121 Return an integer summarizing the change to this repo:
2110 Return an integer summarizing the change to this repo:
2122 - nothing changed or no source: 0
2111 - nothing changed or no source: 0
2123 - more heads than before: 1+added heads (2..n)
2112 - more heads than before: 1+added heads (2..n)
2124 - fewer heads than before: -1-removed heads (-2..-n)
2113 - fewer heads than before: -1-removed heads (-2..-n)
2125 - number of heads stays the same: 1
2114 - number of heads stays the same: 1
2126 """
2115 """
2127 def csmap(x):
2116 def csmap(x):
2128 self.ui.debug("add changeset %s\n" % short(x))
2117 self.ui.debug("add changeset %s\n" % short(x))
2129 return len(cl)
2118 return len(cl)
2130
2119
2131 def revmap(x):
2120 def revmap(x):
2132 return cl.rev(x)
2121 return cl.rev(x)
2133
2122
2134 if not source:
2123 if not source:
2135 return 0
2124 return 0
2136
2125
2137 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2126 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2138
2127
2139 changesets = files = revisions = 0
2128 changesets = files = revisions = 0
2140 efiles = set()
2129 efiles = set()
2141
2130
2142 # write changelog data to temp files so concurrent readers will not see
2131 # write changelog data to temp files so concurrent readers will not see
2143 # inconsistent view
2132 # inconsistent view
2144 cl = self.changelog
2133 cl = self.changelog
2145 cl.delayupdate()
2134 cl.delayupdate()
2146 oldheads = cl.heads()
2135 oldheads = cl.heads()
2147
2136
2148 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2137 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2149 try:
2138 try:
2150 trp = weakref.proxy(tr)
2139 trp = weakref.proxy(tr)
2151 # pull off the changeset group
2140 # pull off the changeset group
2152 self.ui.status(_("adding changesets\n"))
2141 self.ui.status(_("adding changesets\n"))
2153 clstart = len(cl)
2142 clstart = len(cl)
2154 class prog(object):
2143 class prog(object):
2155 step = _('changesets')
2144 step = _('changesets')
2156 count = 1
2145 count = 1
2157 ui = self.ui
2146 ui = self.ui
2158 total = None
2147 total = None
2159 def __call__(self):
2148 def __call__(self):
2160 self.ui.progress(self.step, self.count, unit=_('chunks'),
2149 self.ui.progress(self.step, self.count, unit=_('chunks'),
2161 total=self.total)
2150 total=self.total)
2162 self.count += 1
2151 self.count += 1
2163 pr = prog()
2152 pr = prog()
2164 source.callback = pr
2153 source.callback = pr
2165
2154
2166 source.changelogheader()
2155 source.changelogheader()
2167 srccontent = cl.addgroup(source, csmap, trp)
2156 srccontent = cl.addgroup(source, csmap, trp)
2168 if not (srccontent or emptyok):
2157 if not (srccontent or emptyok):
2169 raise util.Abort(_("received changelog group is empty"))
2158 raise util.Abort(_("received changelog group is empty"))
2170 clend = len(cl)
2159 clend = len(cl)
2171 changesets = clend - clstart
2160 changesets = clend - clstart
2172 for c in xrange(clstart, clend):
2161 for c in xrange(clstart, clend):
2173 efiles.update(self[c].files())
2162 efiles.update(self[c].files())
2174 efiles = len(efiles)
2163 efiles = len(efiles)
2175 self.ui.progress(_('changesets'), None)
2164 self.ui.progress(_('changesets'), None)
2176
2165
2177 # pull off the manifest group
2166 # pull off the manifest group
2178 self.ui.status(_("adding manifests\n"))
2167 self.ui.status(_("adding manifests\n"))
2179 pr.step = _('manifests')
2168 pr.step = _('manifests')
2180 pr.count = 1
2169 pr.count = 1
2181 pr.total = changesets # manifests <= changesets
2170 pr.total = changesets # manifests <= changesets
2182 # no need to check for empty manifest group here:
2171 # no need to check for empty manifest group here:
2183 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2172 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2184 # no new manifest will be created and the manifest group will
2173 # no new manifest will be created and the manifest group will
2185 # be empty during the pull
2174 # be empty during the pull
2186 source.manifestheader()
2175 source.manifestheader()
2187 self.manifest.addgroup(source, revmap, trp)
2176 self.manifest.addgroup(source, revmap, trp)
2188 self.ui.progress(_('manifests'), None)
2177 self.ui.progress(_('manifests'), None)
2189
2178
2190 needfiles = {}
2179 needfiles = {}
2191 if self.ui.configbool('server', 'validate', default=False):
2180 if self.ui.configbool('server', 'validate', default=False):
2192 # validate incoming csets have their manifests
2181 # validate incoming csets have their manifests
2193 for cset in xrange(clstart, clend):
2182 for cset in xrange(clstart, clend):
2194 mfest = self.changelog.read(self.changelog.node(cset))[0]
2183 mfest = self.changelog.read(self.changelog.node(cset))[0]
2195 mfest = self.manifest.readdelta(mfest)
2184 mfest = self.manifest.readdelta(mfest)
2196 # store file nodes we must see
2185 # store file nodes we must see
2197 for f, n in mfest.iteritems():
2186 for f, n in mfest.iteritems():
2198 needfiles.setdefault(f, set()).add(n)
2187 needfiles.setdefault(f, set()).add(n)
2199
2188
2200 # process the files
2189 # process the files
2201 self.ui.status(_("adding file changes\n"))
2190 self.ui.status(_("adding file changes\n"))
2202 pr.step = _('files')
2191 pr.step = _('files')
2203 pr.count = 1
2192 pr.count = 1
2204 pr.total = efiles
2193 pr.total = efiles
2205 source.callback = None
2194 source.callback = None
2206
2195
2207 while True:
2196 while True:
2208 chunkdata = source.filelogheader()
2197 chunkdata = source.filelogheader()
2209 if not chunkdata:
2198 if not chunkdata:
2210 break
2199 break
2211 f = chunkdata["filename"]
2200 f = chunkdata["filename"]
2212 self.ui.debug("adding %s revisions\n" % f)
2201 self.ui.debug("adding %s revisions\n" % f)
2213 pr()
2202 pr()
2214 fl = self.file(f)
2203 fl = self.file(f)
2215 o = len(fl)
2204 o = len(fl)
2216 if not fl.addgroup(source, revmap, trp):
2205 if not fl.addgroup(source, revmap, trp):
2217 raise util.Abort(_("received file revlog group is empty"))
2206 raise util.Abort(_("received file revlog group is empty"))
2218 revisions += len(fl) - o
2207 revisions += len(fl) - o
2219 files += 1
2208 files += 1
2220 if f in needfiles:
2209 if f in needfiles:
2221 needs = needfiles[f]
2210 needs = needfiles[f]
2222 for new in xrange(o, len(fl)):
2211 for new in xrange(o, len(fl)):
2223 n = fl.node(new)
2212 n = fl.node(new)
2224 if n in needs:
2213 if n in needs:
2225 needs.remove(n)
2214 needs.remove(n)
2226 if not needs:
2215 if not needs:
2227 del needfiles[f]
2216 del needfiles[f]
2228 self.ui.progress(_('files'), None)
2217 self.ui.progress(_('files'), None)
2229
2218
2230 for f, needs in needfiles.iteritems():
2219 for f, needs in needfiles.iteritems():
2231 fl = self.file(f)
2220 fl = self.file(f)
2232 for n in needs:
2221 for n in needs:
2233 try:
2222 try:
2234 fl.rev(n)
2223 fl.rev(n)
2235 except error.LookupError:
2224 except error.LookupError:
2236 raise util.Abort(
2225 raise util.Abort(
2237 _('missing file data for %s:%s - run hg verify') %
2226 _('missing file data for %s:%s - run hg verify') %
2238 (f, hex(n)))
2227 (f, hex(n)))
2239
2228
2240 dh = 0
2229 dh = 0
2241 if oldheads:
2230 if oldheads:
2242 heads = cl.heads()
2231 heads = cl.heads()
2243 dh = len(heads) - len(oldheads)
2232 dh = len(heads) - len(oldheads)
2244 for h in heads:
2233 for h in heads:
2245 if h not in oldheads and self[h].closesbranch():
2234 if h not in oldheads and self[h].closesbranch():
2246 dh -= 1
2235 dh -= 1
2247 htext = ""
2236 htext = ""
2248 if dh:
2237 if dh:
2249 htext = _(" (%+d heads)") % dh
2238 htext = _(" (%+d heads)") % dh
2250
2239
2251 self.ui.status(_("added %d changesets"
2240 self.ui.status(_("added %d changesets"
2252 " with %d changes to %d files%s\n")
2241 " with %d changes to %d files%s\n")
2253 % (changesets, revisions, files, htext))
2242 % (changesets, revisions, files, htext))
2254
2243
2255 if changesets > 0:
2244 if changesets > 0:
2256 p = lambda: cl.writepending() and self.root or ""
2245 p = lambda: cl.writepending() and self.root or ""
2257 self.hook('pretxnchangegroup', throw=True,
2246 self.hook('pretxnchangegroup', throw=True,
2258 node=hex(cl.node(clstart)), source=srctype,
2247 node=hex(cl.node(clstart)), source=srctype,
2259 url=url, pending=p)
2248 url=url, pending=p)
2260
2249
2261 added = [cl.node(r) for r in xrange(clstart, clend)]
2250 added = [cl.node(r) for r in xrange(clstart, clend)]
2262 publishing = self.ui.configbool('phases', 'publish', True)
2251 publishing = self.ui.configbool('phases', 'publish', True)
2263 if srctype == 'push':
2252 if srctype == 'push':
2264 # Old server can not push the boundary themself.
2253 # Old server can not push the boundary themself.
2265 # New server won't push the boundary if changeset already
2254 # New server won't push the boundary if changeset already
2266 # existed locally as secrete
2255 # existed locally as secrete
2267 #
2256 #
2268 # We should not use added here but the list of all change in
2257 # We should not use added here but the list of all change in
2269 # the bundle
2258 # the bundle
2270 if publishing:
2259 if publishing:
2271 phases.advanceboundary(self, phases.public, srccontent)
2260 phases.advanceboundary(self, phases.public, srccontent)
2272 else:
2261 else:
2273 phases.advanceboundary(self, phases.draft, srccontent)
2262 phases.advanceboundary(self, phases.draft, srccontent)
2274 phases.retractboundary(self, phases.draft, added)
2263 phases.retractboundary(self, phases.draft, added)
2275 elif srctype != 'strip':
2264 elif srctype != 'strip':
2276 # publishing only alter behavior during push
2265 # publishing only alter behavior during push
2277 #
2266 #
2278 # strip should not touch boundary at all
2267 # strip should not touch boundary at all
2279 phases.retractboundary(self, phases.draft, added)
2268 phases.retractboundary(self, phases.draft, added)
2280
2269
2281 # make changelog see real files again
2270 # make changelog see real files again
2282 cl.finalize(trp)
2271 cl.finalize(trp)
2283
2272
2284 tr.close()
2273 tr.close()
2285
2274
2286 if changesets > 0:
2275 if changesets > 0:
2287 def runhooks():
2276 def runhooks():
2288 # forcefully update the on-disk branch cache
2277 # forcefully update the on-disk branch cache
2289 self.ui.debug("updating the branch cache\n")
2278 self.ui.debug("updating the branch cache\n")
2290 self.updatebranchcache()
2279 self.updatebranchcache()
2291 self.hook("changegroup", node=hex(cl.node(clstart)),
2280 self.hook("changegroup", node=hex(cl.node(clstart)),
2292 source=srctype, url=url)
2281 source=srctype, url=url)
2293
2282
2294 for n in added:
2283 for n in added:
2295 self.hook("incoming", node=hex(n), source=srctype,
2284 self.hook("incoming", node=hex(n), source=srctype,
2296 url=url)
2285 url=url)
2297 self._afterlock(runhooks)
2286 self._afterlock(runhooks)
2298
2287
2299 finally:
2288 finally:
2300 tr.release()
2289 tr.release()
2301 # never return 0 here:
2290 # never return 0 here:
2302 if dh < 0:
2291 if dh < 0:
2303 return dh - 1
2292 return dh - 1
2304 else:
2293 else:
2305 return dh + 1
2294 return dh + 1
2306
2295
2307 def stream_in(self, remote, requirements):
2296 def stream_in(self, remote, requirements):
2308 lock = self.lock()
2297 lock = self.lock()
2309 try:
2298 try:
2310 fp = remote.stream_out()
2299 fp = remote.stream_out()
2311 l = fp.readline()
2300 l = fp.readline()
2312 try:
2301 try:
2313 resp = int(l)
2302 resp = int(l)
2314 except ValueError:
2303 except ValueError:
2315 raise error.ResponseError(
2304 raise error.ResponseError(
2316 _('unexpected response from remote server:'), l)
2305 _('unexpected response from remote server:'), l)
2317 if resp == 1:
2306 if resp == 1:
2318 raise util.Abort(_('operation forbidden by server'))
2307 raise util.Abort(_('operation forbidden by server'))
2319 elif resp == 2:
2308 elif resp == 2:
2320 raise util.Abort(_('locking the remote repository failed'))
2309 raise util.Abort(_('locking the remote repository failed'))
2321 elif resp != 0:
2310 elif resp != 0:
2322 raise util.Abort(_('the server sent an unknown error code'))
2311 raise util.Abort(_('the server sent an unknown error code'))
2323 self.ui.status(_('streaming all changes\n'))
2312 self.ui.status(_('streaming all changes\n'))
2324 l = fp.readline()
2313 l = fp.readline()
2325 try:
2314 try:
2326 total_files, total_bytes = map(int, l.split(' ', 1))
2315 total_files, total_bytes = map(int, l.split(' ', 1))
2327 except (ValueError, TypeError):
2316 except (ValueError, TypeError):
2328 raise error.ResponseError(
2317 raise error.ResponseError(
2329 _('unexpected response from remote server:'), l)
2318 _('unexpected response from remote server:'), l)
2330 self.ui.status(_('%d files to transfer, %s of data\n') %
2319 self.ui.status(_('%d files to transfer, %s of data\n') %
2331 (total_files, util.bytecount(total_bytes)))
2320 (total_files, util.bytecount(total_bytes)))
2332 handled_bytes = 0
2321 handled_bytes = 0
2333 self.ui.progress(_('clone'), 0, total=total_bytes)
2322 self.ui.progress(_('clone'), 0, total=total_bytes)
2334 start = time.time()
2323 start = time.time()
2335 for i in xrange(total_files):
2324 for i in xrange(total_files):
2336 # XXX doesn't support '\n' or '\r' in filenames
2325 # XXX doesn't support '\n' or '\r' in filenames
2337 l = fp.readline()
2326 l = fp.readline()
2338 try:
2327 try:
2339 name, size = l.split('\0', 1)
2328 name, size = l.split('\0', 1)
2340 size = int(size)
2329 size = int(size)
2341 except (ValueError, TypeError):
2330 except (ValueError, TypeError):
2342 raise error.ResponseError(
2331 raise error.ResponseError(
2343 _('unexpected response from remote server:'), l)
2332 _('unexpected response from remote server:'), l)
2344 if self.ui.debugflag:
2333 if self.ui.debugflag:
2345 self.ui.debug('adding %s (%s)\n' %
2334 self.ui.debug('adding %s (%s)\n' %
2346 (name, util.bytecount(size)))
2335 (name, util.bytecount(size)))
2347 # for backwards compat, name was partially encoded
2336 # for backwards compat, name was partially encoded
2348 ofp = self.sopener(store.decodedir(name), 'w')
2337 ofp = self.sopener(store.decodedir(name), 'w')
2349 for chunk in util.filechunkiter(fp, limit=size):
2338 for chunk in util.filechunkiter(fp, limit=size):
2350 handled_bytes += len(chunk)
2339 handled_bytes += len(chunk)
2351 self.ui.progress(_('clone'), handled_bytes,
2340 self.ui.progress(_('clone'), handled_bytes,
2352 total=total_bytes)
2341 total=total_bytes)
2353 ofp.write(chunk)
2342 ofp.write(chunk)
2354 ofp.close()
2343 ofp.close()
2355 elapsed = time.time() - start
2344 elapsed = time.time() - start
2356 if elapsed <= 0:
2345 if elapsed <= 0:
2357 elapsed = 0.001
2346 elapsed = 0.001
2358 self.ui.progress(_('clone'), None)
2347 self.ui.progress(_('clone'), None)
2359 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2348 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2360 (util.bytecount(total_bytes), elapsed,
2349 (util.bytecount(total_bytes), elapsed,
2361 util.bytecount(total_bytes / elapsed)))
2350 util.bytecount(total_bytes / elapsed)))
2362
2351
2363 # new requirements = old non-format requirements +
2352 # new requirements = old non-format requirements +
2364 # new format-related
2353 # new format-related
2365 # requirements from the streamed-in repository
2354 # requirements from the streamed-in repository
2366 requirements.update(set(self.requirements) - self.supportedformats)
2355 requirements.update(set(self.requirements) - self.supportedformats)
2367 self._applyrequirements(requirements)
2356 self._applyrequirements(requirements)
2368 self._writerequirements()
2357 self._writerequirements()
2369
2358
2370 self.invalidate()
2359 self.invalidate()
2371 return len(self.heads()) + 1
2360 return len(self.heads()) + 1
2372 finally:
2361 finally:
2373 lock.release()
2362 lock.release()
2374
2363
2375 def clone(self, remote, heads=[], stream=False):
2364 def clone(self, remote, heads=[], stream=False):
2376 '''clone remote repository.
2365 '''clone remote repository.
2377
2366
2378 keyword arguments:
2367 keyword arguments:
2379 heads: list of revs to clone (forces use of pull)
2368 heads: list of revs to clone (forces use of pull)
2380 stream: use streaming clone if possible'''
2369 stream: use streaming clone if possible'''
2381
2370
2382 # now, all clients that can request uncompressed clones can
2371 # now, all clients that can request uncompressed clones can
2383 # read repo formats supported by all servers that can serve
2372 # read repo formats supported by all servers that can serve
2384 # them.
2373 # them.
2385
2374
2386 # if revlog format changes, client will have to check version
2375 # if revlog format changes, client will have to check version
2387 # and format flags on "stream" capability, and use
2376 # and format flags on "stream" capability, and use
2388 # uncompressed only if compatible.
2377 # uncompressed only if compatible.
2389
2378
2390 if not stream:
2379 if not stream:
2391 # if the server explicitely prefer to stream (for fast LANs)
2380 # if the server explicitely prefer to stream (for fast LANs)
2392 stream = remote.capable('stream-preferred')
2381 stream = remote.capable('stream-preferred')
2393
2382
2394 if stream and not heads:
2383 if stream and not heads:
2395 # 'stream' means remote revlog format is revlogv1 only
2384 # 'stream' means remote revlog format is revlogv1 only
2396 if remote.capable('stream'):
2385 if remote.capable('stream'):
2397 return self.stream_in(remote, set(('revlogv1',)))
2386 return self.stream_in(remote, set(('revlogv1',)))
2398 # otherwise, 'streamreqs' contains the remote revlog format
2387 # otherwise, 'streamreqs' contains the remote revlog format
2399 streamreqs = remote.capable('streamreqs')
2388 streamreqs = remote.capable('streamreqs')
2400 if streamreqs:
2389 if streamreqs:
2401 streamreqs = set(streamreqs.split(','))
2390 streamreqs = set(streamreqs.split(','))
2402 # if we support it, stream in and adjust our requirements
2391 # if we support it, stream in and adjust our requirements
2403 if not streamreqs - self.supportedformats:
2392 if not streamreqs - self.supportedformats:
2404 return self.stream_in(remote, streamreqs)
2393 return self.stream_in(remote, streamreqs)
2405 return self.pull(remote, heads)
2394 return self.pull(remote, heads)
2406
2395
2407 def pushkey(self, namespace, key, old, new):
2396 def pushkey(self, namespace, key, old, new):
2408 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2397 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2409 old=old, new=new)
2398 old=old, new=new)
2410 ret = pushkey.push(self, namespace, key, old, new)
2399 ret = pushkey.push(self, namespace, key, old, new)
2411 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2400 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2412 ret=ret)
2401 ret=ret)
2413 return ret
2402 return ret
2414
2403
2415 def listkeys(self, namespace):
2404 def listkeys(self, namespace):
2416 self.hook('prelistkeys', throw=True, namespace=namespace)
2405 self.hook('prelistkeys', throw=True, namespace=namespace)
2417 values = pushkey.list(self, namespace)
2406 values = pushkey.list(self, namespace)
2418 self.hook('listkeys', namespace=namespace, values=values)
2407 self.hook('listkeys', namespace=namespace, values=values)
2419 return values
2408 return values
2420
2409
2421 def debugwireargs(self, one, two, three=None, four=None, five=None):
2410 def debugwireargs(self, one, two, three=None, four=None, five=None):
2422 '''used to test argument passing over the wire'''
2411 '''used to test argument passing over the wire'''
2423 return "%s %s %s %s %s" % (one, two, three, four, five)
2412 return "%s %s %s %s %s" % (one, two, three, four, five)
2424
2413
2425 def savecommitmessage(self, text):
2414 def savecommitmessage(self, text):
2426 fp = self.opener('last-message.txt', 'wb')
2415 fp = self.opener('last-message.txt', 'wb')
2427 try:
2416 try:
2428 fp.write(text)
2417 fp.write(text)
2429 finally:
2418 finally:
2430 fp.close()
2419 fp.close()
2431 return self.pathto(fp.name[len(self.root)+1:])
2420 return self.pathto(fp.name[len(self.root)+1:])
2432
2421
2433 # used to avoid circular references so destructors work
2422 # used to avoid circular references so destructors work
2434 def aftertrans(files):
2423 def aftertrans(files):
2435 renamefiles = [tuple(t) for t in files]
2424 renamefiles = [tuple(t) for t in files]
2436 def a():
2425 def a():
2437 for src, dest in renamefiles:
2426 for src, dest in renamefiles:
2438 try:
2427 try:
2439 util.rename(src, dest)
2428 util.rename(src, dest)
2440 except OSError: # journal file does not yet exist
2429 except OSError: # journal file does not yet exist
2441 pass
2430 pass
2442 return a
2431 return a
2443
2432
2444 def undoname(fn):
2433 def undoname(fn):
2445 base, name = os.path.split(fn)
2434 base, name = os.path.split(fn)
2446 assert name.startswith('journal')
2435 assert name.startswith('journal')
2447 return os.path.join(base, name.replace('journal', 'undo', 1))
2436 return os.path.join(base, name.replace('journal', 'undo', 1))
2448
2437
2449 def instance(ui, path, create):
2438 def instance(ui, path, create):
2450 return localrepository(ui, util.urllocalpath(path), create)
2439 return localrepository(ui, util.urllocalpath(path), create)
2451
2440
2452 def islocal(path):
2441 def islocal(path):
2453 return True
2442 return True
@@ -1,279 +1,288
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete markers handling
9 """Obsolete markers handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewriting operations, and help
17 transformations performed by history rewriting operations, and help
18 building new tools to reconciliate conflicting rewriting actions. To
18 building new tools to reconciliate conflicting rewriting actions. To
19 facilitate conflicts resolution, markers include various annotations
19 facilitate conflicts resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23
23
24 Format
24 Format
25 ------
25 ------
26
26
27 Markers are stored in an append-only file stored in
27 Markers are stored in an append-only file stored in
28 '.hg/store/obsstore'.
28 '.hg/store/obsstore'.
29
29
30 The file starts with a version header:
30 The file starts with a version header:
31
31
32 - 1 unsigned byte: version number, starting at zero.
32 - 1 unsigned byte: version number, starting at zero.
33
33
34
34
35 The header is followed by the markers. Each marker is made of:
35 The header is followed by the markers. Each marker is made of:
36
36
37 - 1 unsigned byte: number of new changesets "R", could be zero.
37 - 1 unsigned byte: number of new changesets "R", could be zero.
38
38
39 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
39 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
40
40
41 - 1 byte: a bit field. It is reserved for flags used in obsolete
41 - 1 byte: a bit field. It is reserved for flags used in obsolete
42 markers common operations, to avoid repeated decoding of metadata
42 markers common operations, to avoid repeated decoding of metadata
43 entries.
43 entries.
44
44
45 - 20 bytes: obsoleted changeset identifier.
45 - 20 bytes: obsoleted changeset identifier.
46
46
47 - N*20 bytes: new changesets identifiers.
47 - N*20 bytes: new changesets identifiers.
48
48
49 - M bytes: metadata as a sequence of nul-terminated strings. Each
49 - M bytes: metadata as a sequence of nul-terminated strings. Each
50 string contains a key and a value, separated by a color ':', without
50 string contains a key and a value, separated by a color ':', without
51 additional encoding. Keys cannot contain '\0' or ':' and values
51 additional encoding. Keys cannot contain '\0' or ':' and values
52 cannot contain '\0'.
52 cannot contain '\0'.
53 """
53 """
54 import struct
54 import struct
55 from mercurial import util, base85
55 from mercurial import util, base85
56 from i18n import _
56 from i18n import _
57
57
58 _pack = struct.pack
58 _pack = struct.pack
59 _unpack = struct.unpack
59 _unpack = struct.unpack
60
60
61
61
62
62
63 # data used for parsing and writing
63 # data used for parsing and writing
64 _fmversion = 0
64 _fmversion = 0
65 _fmfixed = '>BIB20s'
65 _fmfixed = '>BIB20s'
66 _fmnode = '20s'
66 _fmnode = '20s'
67 _fmfsize = struct.calcsize(_fmfixed)
67 _fmfsize = struct.calcsize(_fmfixed)
68 _fnodesize = struct.calcsize(_fmnode)
68 _fnodesize = struct.calcsize(_fmnode)
69
69
70 def _readmarkers(data):
70 def _readmarkers(data):
71 """Read and enumerate markers from raw data"""
71 """Read and enumerate markers from raw data"""
72 off = 0
72 off = 0
73 diskversion = _unpack('>B', data[off:off + 1])[0]
73 diskversion = _unpack('>B', data[off:off + 1])[0]
74 off += 1
74 off += 1
75 if diskversion != _fmversion:
75 if diskversion != _fmversion:
76 raise util.Abort(_('parsing obsolete marker: unknown version %r')
76 raise util.Abort(_('parsing obsolete marker: unknown version %r')
77 % diskversion)
77 % diskversion)
78
78
79 # Loop on markers
79 # Loop on markers
80 l = len(data)
80 l = len(data)
81 while off + _fmfsize <= l:
81 while off + _fmfsize <= l:
82 # read fixed part
82 # read fixed part
83 cur = data[off:off + _fmfsize]
83 cur = data[off:off + _fmfsize]
84 off += _fmfsize
84 off += _fmfsize
85 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
85 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
86 # read replacement
86 # read replacement
87 sucs = ()
87 sucs = ()
88 if nbsuc:
88 if nbsuc:
89 s = (_fnodesize * nbsuc)
89 s = (_fnodesize * nbsuc)
90 cur = data[off:off + s]
90 cur = data[off:off + s]
91 sucs = _unpack(_fmnode * nbsuc, cur)
91 sucs = _unpack(_fmnode * nbsuc, cur)
92 off += s
92 off += s
93 # read metadata
93 # read metadata
94 # (metadata will be decoded on demand)
94 # (metadata will be decoded on demand)
95 metadata = data[off:off + mdsize]
95 metadata = data[off:off + mdsize]
96 if len(metadata) != mdsize:
96 if len(metadata) != mdsize:
97 raise util.Abort(_('parsing obsolete marker: metadata is too '
97 raise util.Abort(_('parsing obsolete marker: metadata is too '
98 'short, %d bytes expected, got %d')
98 'short, %d bytes expected, got %d')
99 % (len(metadata), mdsize))
99 % (len(metadata), mdsize))
100 off += mdsize
100 off += mdsize
101 yield (pre, sucs, flags, metadata)
101 yield (pre, sucs, flags, metadata)
102
102
103 def encodemeta(meta):
103 def encodemeta(meta):
104 """Return encoded metadata string to string mapping.
104 """Return encoded metadata string to string mapping.
105
105
106 Assume no ':' in key and no '\0' in both key and value."""
106 Assume no ':' in key and no '\0' in both key and value."""
107 for key, value in meta.iteritems():
107 for key, value in meta.iteritems():
108 if ':' in key or '\0' in key:
108 if ':' in key or '\0' in key:
109 raise ValueError("':' and '\0' are forbidden in metadata key'")
109 raise ValueError("':' and '\0' are forbidden in metadata key'")
110 if '\0' in value:
110 if '\0' in value:
111 raise ValueError("':' are forbidden in metadata value'")
111 raise ValueError("':' are forbidden in metadata value'")
112 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
112 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
113
113
114 def decodemeta(data):
114 def decodemeta(data):
115 """Return string to string dictionary from encoded version."""
115 """Return string to string dictionary from encoded version."""
116 d = {}
116 d = {}
117 for l in data.split('\0'):
117 for l in data.split('\0'):
118 if l:
118 if l:
119 key, value = l.split(':')
119 key, value = l.split(':')
120 d[key] = value
120 d[key] = value
121 return d
121 return d
122
122
123 class marker(object):
123 class marker(object):
124 """Wrap obsolete marker raw data"""
124 """Wrap obsolete marker raw data"""
125
125
126 def __init__(self, repo, data):
126 def __init__(self, repo, data):
127 # the repo argument will be used to create changectx in later version
127 # the repo argument will be used to create changectx in later version
128 self._repo = repo
128 self._repo = repo
129 self._data = data
129 self._data = data
130 self._decodedmeta = None
130 self._decodedmeta = None
131
131
132 def precnode(self):
132 def precnode(self):
133 """Precursor changeset node identifier"""
133 """Precursor changeset node identifier"""
134 return self._data[0]
134 return self._data[0]
135
135
136 def succnodes(self):
136 def succnodes(self):
137 """List of successor changesets node identifiers"""
137 """List of successor changesets node identifiers"""
138 return self._data[1]
138 return self._data[1]
139
139
140 def metadata(self):
140 def metadata(self):
141 """Decoded metadata dictionary"""
141 """Decoded metadata dictionary"""
142 if self._decodedmeta is None:
142 if self._decodedmeta is None:
143 self._decodedmeta = decodemeta(self._data[3])
143 self._decodedmeta = decodemeta(self._data[3])
144 return self._decodedmeta
144 return self._decodedmeta
145
145
146 def date(self):
146 def date(self):
147 """Creation date as (unixtime, offset)"""
147 """Creation date as (unixtime, offset)"""
148 parts = self.metadata()['date'].split(' ')
148 parts = self.metadata()['date'].split(' ')
149 return (float(parts[0]), int(parts[1]))
149 return (float(parts[0]), int(parts[1]))
150
150
151 class obsstore(object):
151 class obsstore(object):
152 """Store obsolete markers
152 """Store obsolete markers
153
153
154 Markers can be accessed with two mappings:
154 Markers can be accessed with two mappings:
155 - precursors: old -> set(new)
155 - precursors: old -> set(new)
156 - successors: new -> set(old)
156 - successors: new -> set(old)
157 """
157 """
158
158
159 def __init__(self):
159 def __init__(self, sopener):
160 self._all = []
160 self._all = []
161 # new markers to serialize
161 # new markers to serialize
162 self._new = []
162 self._new = []
163 self.precursors = {}
163 self.precursors = {}
164 self.successors = {}
164 self.successors = {}
165 self.sopener = sopener
166 data = sopener.tryread('obsstore')
167 if data:
168 for marker in _readmarkers(data):
169 self._load(marker)
165
170
166 def __iter__(self):
171 def __iter__(self):
167 return iter(self._all)
172 return iter(self._all)
168
173
169 def __nonzero__(self):
174 def __nonzero__(self):
170 return bool(self._all)
175 return bool(self._all)
171
176
172 def create(self, prec, succs=(), flag=0, metadata=None):
177 def create(self, prec, succs=(), flag=0, metadata=None):
173 """obsolete: add a new obsolete marker
178 """obsolete: add a new obsolete marker
174
179
175 * ensuring it is hashable
180 * ensuring it is hashable
176 * check mandatory metadata
181 * check mandatory metadata
177 * encode metadata
182 * encode metadata
178 """
183 """
179 if metadata is None:
184 if metadata is None:
180 metadata = {}
185 metadata = {}
181 if len(prec) != 20:
186 if len(prec) != 20:
182 raise ValueError(prec)
187 raise ValueError(prec)
183 for succ in succs:
188 for succ in succs:
184 if len(succ) != 20:
189 if len(succ) != 20:
185 raise ValueError(succ)
190 raise ValueError(succ)
186 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
191 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
187 self.add(marker)
192 self.add(marker)
188
193
189 def add(self, marker):
194 def add(self, marker):
190 """Add a new marker to the store
195 """Add a new marker to the store
191
196
192 This marker still needs to be written to disk"""
197 This marker still needs to be written to disk"""
193 self._new.append(marker)
198 self._new.append(marker)
194 self._load(marker)
199 self._load(marker)
195
200
196 def loadmarkers(self, data):
197 """Load all markers in data, mark them as known."""
198 for marker in _readmarkers(data):
199 self._load(marker)
200
201 def mergemarkers(self, data):
201 def mergemarkers(self, data):
202 other = set(_readmarkers(data))
202 other = set(_readmarkers(data))
203 local = set(self._all)
203 local = set(self._all)
204 new = other - local
204 new = other - local
205 for marker in new:
205 for marker in new:
206 self.add(marker)
206 self.add(marker)
207
207
208 def flushmarkers(self, stream):
208 def flushmarkers(self):
209 """Write all markers to a stream
209 """Write all markers on disk
210
210
211 After this operation, "new" markers are considered "known"."""
211 After this operation, "new" markers are considered "known"."""
212 self._writemarkers(stream)
212 if self._new:
213 self._new[:] = []
213 # XXX: transaction logic should be used here. But for
214 # now rewriting the whole file is good enough.
215 f = self.sopener('obsstore', 'wb', atomictemp=True)
216 try:
217 self._writemarkers(f)
218 f.close()
219 self._new[:] = []
220 except: # re-raises
221 f.discard()
222 raise
214
223
215 def _load(self, marker):
224 def _load(self, marker):
216 self._all.append(marker)
225 self._all.append(marker)
217 pre, sucs = marker[:2]
226 pre, sucs = marker[:2]
218 self.precursors.setdefault(pre, set()).add(marker)
227 self.precursors.setdefault(pre, set()).add(marker)
219 for suc in sucs:
228 for suc in sucs:
220 self.successors.setdefault(suc, set()).add(marker)
229 self.successors.setdefault(suc, set()).add(marker)
221
230
222 def _writemarkers(self, stream=None):
231 def _writemarkers(self, stream=None):
223 # Kept separate from flushmarkers(), it will be reused for
232 # Kept separate from flushmarkers(), it will be reused for
224 # markers exchange.
233 # markers exchange.
225 if stream is None:
234 if stream is None:
226 final = []
235 final = []
227 w = final.append
236 w = final.append
228 else:
237 else:
229 w = stream.write
238 w = stream.write
230 w(_pack('>B', _fmversion))
239 w(_pack('>B', _fmversion))
231 for marker in self._all:
240 for marker in self._all:
232 pre, sucs, flags, metadata = marker
241 pre, sucs, flags, metadata = marker
233 nbsuc = len(sucs)
242 nbsuc = len(sucs)
234 format = _fmfixed + (_fmnode * nbsuc)
243 format = _fmfixed + (_fmnode * nbsuc)
235 data = [nbsuc, len(metadata), flags, pre]
244 data = [nbsuc, len(metadata), flags, pre]
236 data.extend(sucs)
245 data.extend(sucs)
237 w(_pack(format, *data))
246 w(_pack(format, *data))
238 w(metadata)
247 w(metadata)
239 if stream is None:
248 if stream is None:
240 return ''.join(final)
249 return ''.join(final)
241
250
242 def listmarkers(repo):
251 def listmarkers(repo):
243 """List markers over pushkey"""
252 """List markers over pushkey"""
244 if not repo.obsstore:
253 if not repo.obsstore:
245 return {}
254 return {}
246 data = repo.obsstore._writemarkers()
255 data = repo.obsstore._writemarkers()
247 return {'dump': base85.b85encode(data)}
256 return {'dump': base85.b85encode(data)}
248
257
249 def pushmarker(repo, key, old, new):
258 def pushmarker(repo, key, old, new):
250 """Push markers over pushkey"""
259 """Push markers over pushkey"""
251 if key != 'dump':
260 if key != 'dump':
252 repo.ui.warn(_('unknown key: %r') % key)
261 repo.ui.warn(_('unknown key: %r') % key)
253 return 0
262 return 0
254 if old:
263 if old:
255 repo.ui.warn(_('unexpected old value') % key)
264 repo.ui.warn(_('unexpected old value') % key)
256 return 0
265 return 0
257 data = base85.b85decode(new)
266 data = base85.b85decode(new)
258 lock = repo.lock()
267 lock = repo.lock()
259 try:
268 try:
260 repo.obsstore.mergemarkers(data)
269 repo.obsstore.mergemarkers(data)
261 return 1
270 return 1
262 finally:
271 finally:
263 lock.release()
272 lock.release()
264
273
265 def allmarkers(repo):
274 def allmarkers(repo):
266 """all obsolete markers known in a repository"""
275 """all obsolete markers known in a repository"""
267 for markerdata in repo.obsstore:
276 for markerdata in repo.obsstore:
268 yield marker(repo, markerdata)
277 yield marker(repo, markerdata)
269
278
270 def precursormarkers(ctx):
279 def precursormarkers(ctx):
271 """obsolete marker making this changeset obsolete"""
280 """obsolete marker making this changeset obsolete"""
272 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
281 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
273 yield marker(ctx._repo, data)
282 yield marker(ctx._repo, data)
274
283
275 def successormarkers(ctx):
284 def successormarkers(ctx):
276 """obsolete marker marking this changeset as a successors"""
285 """obsolete marker marking this changeset as a successors"""
277 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
286 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
278 yield marker(ctx._repo, data)
287 yield marker(ctx._repo, data)
279
288
General Comments 0
You need to be logged in to leave comments. Login now