##// END OF EJS Templates
phases: add a cache allowing to know in which phase a changeset is
Pierre-Yves David -
r15420:e80d0d31 default
parent child Browse files
Show More
@@ -1,2108 +1,2120 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 'known', 'getbundle'))
24 'known', 'getbundle'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
26 supported = supportedformats | set(('store', 'fncache', 'shared',
26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 'dotencode'))
27 'dotencode'))
28
28
29 def __init__(self, baseui, path=None, create=False):
29 def __init__(self, baseui, path=None, create=False):
30 repo.repository.__init__(self)
30 repo.repository.__init__(self)
31 self.root = os.path.realpath(util.expandpath(path))
31 self.root = os.path.realpath(util.expandpath(path))
32 self.path = os.path.join(self.root, ".hg")
32 self.path = os.path.join(self.root, ".hg")
33 self.origroot = path
33 self.origroot = path
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 self.opener = scmutil.opener(self.path)
35 self.opener = scmutil.opener(self.path)
36 self.wopener = scmutil.opener(self.root)
36 self.wopener = scmutil.opener(self.root)
37 self.baseui = baseui
37 self.baseui = baseui
38 self.ui = baseui.copy()
38 self.ui = baseui.copy()
39
39
40 try:
40 try:
41 self.ui.readconfig(self.join("hgrc"), self.root)
41 self.ui.readconfig(self.join("hgrc"), self.root)
42 extensions.loadall(self.ui)
42 extensions.loadall(self.ui)
43 except IOError:
43 except IOError:
44 pass
44 pass
45
45
46 if not os.path.isdir(self.path):
46 if not os.path.isdir(self.path):
47 if create:
47 if create:
48 if not os.path.exists(path):
48 if not os.path.exists(path):
49 util.makedirs(path)
49 util.makedirs(path)
50 util.makedir(self.path, notindexed=True)
50 util.makedir(self.path, notindexed=True)
51 requirements = ["revlogv1"]
51 requirements = ["revlogv1"]
52 if self.ui.configbool('format', 'usestore', True):
52 if self.ui.configbool('format', 'usestore', True):
53 os.mkdir(os.path.join(self.path, "store"))
53 os.mkdir(os.path.join(self.path, "store"))
54 requirements.append("store")
54 requirements.append("store")
55 if self.ui.configbool('format', 'usefncache', True):
55 if self.ui.configbool('format', 'usefncache', True):
56 requirements.append("fncache")
56 requirements.append("fncache")
57 if self.ui.configbool('format', 'dotencode', True):
57 if self.ui.configbool('format', 'dotencode', True):
58 requirements.append('dotencode')
58 requirements.append('dotencode')
59 # create an invalid changelog
59 # create an invalid changelog
60 self.opener.append(
60 self.opener.append(
61 "00changelog.i",
61 "00changelog.i",
62 '\0\0\0\2' # represents revlogv2
62 '\0\0\0\2' # represents revlogv2
63 ' dummy changelog to prevent using the old repo layout'
63 ' dummy changelog to prevent using the old repo layout'
64 )
64 )
65 if self.ui.configbool('format', 'generaldelta', False):
65 if self.ui.configbool('format', 'generaldelta', False):
66 requirements.append("generaldelta")
66 requirements.append("generaldelta")
67 requirements = set(requirements)
67 requirements = set(requirements)
68 else:
68 else:
69 raise error.RepoError(_("repository %s not found") % path)
69 raise error.RepoError(_("repository %s not found") % path)
70 elif create:
70 elif create:
71 raise error.RepoError(_("repository %s already exists") % path)
71 raise error.RepoError(_("repository %s already exists") % path)
72 else:
72 else:
73 try:
73 try:
74 requirements = scmutil.readrequires(self.opener, self.supported)
74 requirements = scmutil.readrequires(self.opener, self.supported)
75 except IOError, inst:
75 except IOError, inst:
76 if inst.errno != errno.ENOENT:
76 if inst.errno != errno.ENOENT:
77 raise
77 raise
78 requirements = set()
78 requirements = set()
79
79
80 self.sharedpath = self.path
80 self.sharedpath = self.path
81 try:
81 try:
82 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
82 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
83 if not os.path.exists(s):
83 if not os.path.exists(s):
84 raise error.RepoError(
84 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
86 self.sharedpath = s
87 except IOError, inst:
87 except IOError, inst:
88 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
89 raise
89 raise
90
90
91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 self.spath = self.store.path
92 self.spath = self.store.path
93 self.sopener = self.store.opener
93 self.sopener = self.store.opener
94 self.sjoin = self.store.join
94 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
95 self.opener.createmode = self.store.createmode
96 self._applyrequirements(requirements)
96 self._applyrequirements(requirements)
97 if create:
97 if create:
98 self._writerequirements()
98 self._writerequirements()
99
99
100
100
101 self._branchcache = None
101 self._branchcache = None
102 self._branchcachetip = None
102 self._branchcachetip = None
103 self.filterpats = {}
103 self.filterpats = {}
104 self._datafilters = {}
104 self._datafilters = {}
105 self._transref = self._lockref = self._wlockref = None
105 self._transref = self._lockref = self._wlockref = None
106
106
107 # A cache for various files under .hg/ that tracks file changes,
107 # A cache for various files under .hg/ that tracks file changes,
108 # (used by the filecache decorator)
108 # (used by the filecache decorator)
109 #
109 #
110 # Maps a property name to its util.filecacheentry
110 # Maps a property name to its util.filecacheentry
111 self._filecache = {}
111 self._filecache = {}
112
112
113 def _applyrequirements(self, requirements):
113 def _applyrequirements(self, requirements):
114 self.requirements = requirements
114 self.requirements = requirements
115 openerreqs = set(('revlogv1', 'generaldelta'))
115 openerreqs = set(('revlogv1', 'generaldelta'))
116 self.sopener.options = dict((r, 1) for r in requirements
116 self.sopener.options = dict((r, 1) for r in requirements
117 if r in openerreqs)
117 if r in openerreqs)
118
118
119 def _writerequirements(self):
119 def _writerequirements(self):
120 reqfile = self.opener("requires", "w")
120 reqfile = self.opener("requires", "w")
121 for r in self.requirements:
121 for r in self.requirements:
122 reqfile.write("%s\n" % r)
122 reqfile.write("%s\n" % r)
123 reqfile.close()
123 reqfile.close()
124
124
125 def _checknested(self, path):
125 def _checknested(self, path):
126 """Determine if path is a legal nested repository."""
126 """Determine if path is a legal nested repository."""
127 if not path.startswith(self.root):
127 if not path.startswith(self.root):
128 return False
128 return False
129 subpath = path[len(self.root) + 1:]
129 subpath = path[len(self.root) + 1:]
130
130
131 # XXX: Checking against the current working copy is wrong in
131 # XXX: Checking against the current working copy is wrong in
132 # the sense that it can reject things like
132 # the sense that it can reject things like
133 #
133 #
134 # $ hg cat -r 10 sub/x.txt
134 # $ hg cat -r 10 sub/x.txt
135 #
135 #
136 # if sub/ is no longer a subrepository in the working copy
136 # if sub/ is no longer a subrepository in the working copy
137 # parent revision.
137 # parent revision.
138 #
138 #
139 # However, it can of course also allow things that would have
139 # However, it can of course also allow things that would have
140 # been rejected before, such as the above cat command if sub/
140 # been rejected before, such as the above cat command if sub/
141 # is a subrepository now, but was a normal directory before.
141 # is a subrepository now, but was a normal directory before.
142 # The old path auditor would have rejected by mistake since it
142 # The old path auditor would have rejected by mistake since it
143 # panics when it sees sub/.hg/.
143 # panics when it sees sub/.hg/.
144 #
144 #
145 # All in all, checking against the working copy seems sensible
145 # All in all, checking against the working copy seems sensible
146 # since we want to prevent access to nested repositories on
146 # since we want to prevent access to nested repositories on
147 # the filesystem *now*.
147 # the filesystem *now*.
148 ctx = self[None]
148 ctx = self[None]
149 parts = util.splitpath(subpath)
149 parts = util.splitpath(subpath)
150 while parts:
150 while parts:
151 prefix = os.sep.join(parts)
151 prefix = os.sep.join(parts)
152 if prefix in ctx.substate:
152 if prefix in ctx.substate:
153 if prefix == subpath:
153 if prefix == subpath:
154 return True
154 return True
155 else:
155 else:
156 sub = ctx.sub(prefix)
156 sub = ctx.sub(prefix)
157 return sub.checknested(subpath[len(prefix) + 1:])
157 return sub.checknested(subpath[len(prefix) + 1:])
158 else:
158 else:
159 parts.pop()
159 parts.pop()
160 return False
160 return False
161
161
162 @filecache('bookmarks')
162 @filecache('bookmarks')
163 def _bookmarks(self):
163 def _bookmarks(self):
164 return bookmarks.read(self)
164 return bookmarks.read(self)
165
165
166 @filecache('bookmarks.current')
166 @filecache('bookmarks.current')
167 def _bookmarkcurrent(self):
167 def _bookmarkcurrent(self):
168 return bookmarks.readcurrent(self)
168 return bookmarks.readcurrent(self)
169
169
170 def _writebookmarks(self, marks):
170 def _writebookmarks(self, marks):
171 bookmarks.write(self)
171 bookmarks.write(self)
172
172
173 @filecache('phaseroots')
173 @filecache('phaseroots')
174 def _phaseroots(self):
174 def _phaseroots(self):
175 return phases.readroots(self)
175 return phases.readroots(self)
176
176
177 @propertycache
178 def _phaserev(self):
179 cache = [0] * len(self)
180 for phase in phases.trackedphases:
181 roots = map(self.changelog.rev, self._phaseroots[phase])
182 if roots:
183 for rev in roots:
184 cache[rev] = phase
185 for rev in self.changelog.descendants(*roots):
186 cache[rev] = phase
187 return cache
188
177 @filecache('00changelog.i', True)
189 @filecache('00changelog.i', True)
178 def changelog(self):
190 def changelog(self):
179 c = changelog.changelog(self.sopener)
191 c = changelog.changelog(self.sopener)
180 if 'HG_PENDING' in os.environ:
192 if 'HG_PENDING' in os.environ:
181 p = os.environ['HG_PENDING']
193 p = os.environ['HG_PENDING']
182 if p.startswith(self.root):
194 if p.startswith(self.root):
183 c.readpending('00changelog.i.a')
195 c.readpending('00changelog.i.a')
184 return c
196 return c
185
197
186 @filecache('00manifest.i', True)
198 @filecache('00manifest.i', True)
187 def manifest(self):
199 def manifest(self):
188 return manifest.manifest(self.sopener)
200 return manifest.manifest(self.sopener)
189
201
190 @filecache('dirstate')
202 @filecache('dirstate')
191 def dirstate(self):
203 def dirstate(self):
192 warned = [0]
204 warned = [0]
193 def validate(node):
205 def validate(node):
194 try:
206 try:
195 self.changelog.rev(node)
207 self.changelog.rev(node)
196 return node
208 return node
197 except error.LookupError:
209 except error.LookupError:
198 if not warned[0]:
210 if not warned[0]:
199 warned[0] = True
211 warned[0] = True
200 self.ui.warn(_("warning: ignoring unknown"
212 self.ui.warn(_("warning: ignoring unknown"
201 " working parent %s!\n") % short(node))
213 " working parent %s!\n") % short(node))
202 return nullid
214 return nullid
203
215
204 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
216 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
205
217
206 def __getitem__(self, changeid):
218 def __getitem__(self, changeid):
207 if changeid is None:
219 if changeid is None:
208 return context.workingctx(self)
220 return context.workingctx(self)
209 return context.changectx(self, changeid)
221 return context.changectx(self, changeid)
210
222
211 def __contains__(self, changeid):
223 def __contains__(self, changeid):
212 try:
224 try:
213 return bool(self.lookup(changeid))
225 return bool(self.lookup(changeid))
214 except error.RepoLookupError:
226 except error.RepoLookupError:
215 return False
227 return False
216
228
217 def __nonzero__(self):
229 def __nonzero__(self):
218 return True
230 return True
219
231
220 def __len__(self):
232 def __len__(self):
221 return len(self.changelog)
233 return len(self.changelog)
222
234
223 def __iter__(self):
235 def __iter__(self):
224 for i in xrange(len(self)):
236 for i in xrange(len(self)):
225 yield i
237 yield i
226
238
227 def revs(self, expr, *args):
239 def revs(self, expr, *args):
228 '''Return a list of revisions matching the given revset'''
240 '''Return a list of revisions matching the given revset'''
229 expr = revset.formatspec(expr, *args)
241 expr = revset.formatspec(expr, *args)
230 m = revset.match(None, expr)
242 m = revset.match(None, expr)
231 return [r for r in m(self, range(len(self)))]
243 return [r for r in m(self, range(len(self)))]
232
244
233 def set(self, expr, *args):
245 def set(self, expr, *args):
234 '''
246 '''
235 Yield a context for each matching revision, after doing arg
247 Yield a context for each matching revision, after doing arg
236 replacement via revset.formatspec
248 replacement via revset.formatspec
237 '''
249 '''
238 for r in self.revs(expr, *args):
250 for r in self.revs(expr, *args):
239 yield self[r]
251 yield self[r]
240
252
241 def url(self):
253 def url(self):
242 return 'file:' + self.root
254 return 'file:' + self.root
243
255
244 def hook(self, name, throw=False, **args):
256 def hook(self, name, throw=False, **args):
245 return hook.hook(self.ui, self, name, throw, **args)
257 return hook.hook(self.ui, self, name, throw, **args)
246
258
247 tag_disallowed = ':\r\n'
259 tag_disallowed = ':\r\n'
248
260
249 def _tag(self, names, node, message, local, user, date, extra={}):
261 def _tag(self, names, node, message, local, user, date, extra={}):
250 if isinstance(names, str):
262 if isinstance(names, str):
251 allchars = names
263 allchars = names
252 names = (names,)
264 names = (names,)
253 else:
265 else:
254 allchars = ''.join(names)
266 allchars = ''.join(names)
255 for c in self.tag_disallowed:
267 for c in self.tag_disallowed:
256 if c in allchars:
268 if c in allchars:
257 raise util.Abort(_('%r cannot be used in a tag name') % c)
269 raise util.Abort(_('%r cannot be used in a tag name') % c)
258
270
259 branches = self.branchmap()
271 branches = self.branchmap()
260 for name in names:
272 for name in names:
261 self.hook('pretag', throw=True, node=hex(node), tag=name,
273 self.hook('pretag', throw=True, node=hex(node), tag=name,
262 local=local)
274 local=local)
263 if name in branches:
275 if name in branches:
264 self.ui.warn(_("warning: tag %s conflicts with existing"
276 self.ui.warn(_("warning: tag %s conflicts with existing"
265 " branch name\n") % name)
277 " branch name\n") % name)
266
278
267 def writetags(fp, names, munge, prevtags):
279 def writetags(fp, names, munge, prevtags):
268 fp.seek(0, 2)
280 fp.seek(0, 2)
269 if prevtags and prevtags[-1] != '\n':
281 if prevtags and prevtags[-1] != '\n':
270 fp.write('\n')
282 fp.write('\n')
271 for name in names:
283 for name in names:
272 m = munge and munge(name) or name
284 m = munge and munge(name) or name
273 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
285 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
274 old = self.tags().get(name, nullid)
286 old = self.tags().get(name, nullid)
275 fp.write('%s %s\n' % (hex(old), m))
287 fp.write('%s %s\n' % (hex(old), m))
276 fp.write('%s %s\n' % (hex(node), m))
288 fp.write('%s %s\n' % (hex(node), m))
277 fp.close()
289 fp.close()
278
290
279 prevtags = ''
291 prevtags = ''
280 if local:
292 if local:
281 try:
293 try:
282 fp = self.opener('localtags', 'r+')
294 fp = self.opener('localtags', 'r+')
283 except IOError:
295 except IOError:
284 fp = self.opener('localtags', 'a')
296 fp = self.opener('localtags', 'a')
285 else:
297 else:
286 prevtags = fp.read()
298 prevtags = fp.read()
287
299
288 # local tags are stored in the current charset
300 # local tags are stored in the current charset
289 writetags(fp, names, None, prevtags)
301 writetags(fp, names, None, prevtags)
290 for name in names:
302 for name in names:
291 self.hook('tag', node=hex(node), tag=name, local=local)
303 self.hook('tag', node=hex(node), tag=name, local=local)
292 return
304 return
293
305
294 try:
306 try:
295 fp = self.wfile('.hgtags', 'rb+')
307 fp = self.wfile('.hgtags', 'rb+')
296 except IOError, e:
308 except IOError, e:
297 if e.errno != errno.ENOENT:
309 if e.errno != errno.ENOENT:
298 raise
310 raise
299 fp = self.wfile('.hgtags', 'ab')
311 fp = self.wfile('.hgtags', 'ab')
300 else:
312 else:
301 prevtags = fp.read()
313 prevtags = fp.read()
302
314
303 # committed tags are stored in UTF-8
315 # committed tags are stored in UTF-8
304 writetags(fp, names, encoding.fromlocal, prevtags)
316 writetags(fp, names, encoding.fromlocal, prevtags)
305
317
306 fp.close()
318 fp.close()
307
319
308 if '.hgtags' not in self.dirstate:
320 if '.hgtags' not in self.dirstate:
309 self[None].add(['.hgtags'])
321 self[None].add(['.hgtags'])
310
322
311 m = matchmod.exact(self.root, '', ['.hgtags'])
323 m = matchmod.exact(self.root, '', ['.hgtags'])
312 tagnode = self.commit(message, user, date, extra=extra, match=m)
324 tagnode = self.commit(message, user, date, extra=extra, match=m)
313
325
314 for name in names:
326 for name in names:
315 self.hook('tag', node=hex(node), tag=name, local=local)
327 self.hook('tag', node=hex(node), tag=name, local=local)
316
328
317 return tagnode
329 return tagnode
318
330
319 def tag(self, names, node, message, local, user, date):
331 def tag(self, names, node, message, local, user, date):
320 '''tag a revision with one or more symbolic names.
332 '''tag a revision with one or more symbolic names.
321
333
322 names is a list of strings or, when adding a single tag, names may be a
334 names is a list of strings or, when adding a single tag, names may be a
323 string.
335 string.
324
336
325 if local is True, the tags are stored in a per-repository file.
337 if local is True, the tags are stored in a per-repository file.
326 otherwise, they are stored in the .hgtags file, and a new
338 otherwise, they are stored in the .hgtags file, and a new
327 changeset is committed with the change.
339 changeset is committed with the change.
328
340
329 keyword arguments:
341 keyword arguments:
330
342
331 local: whether to store tags in non-version-controlled file
343 local: whether to store tags in non-version-controlled file
332 (default False)
344 (default False)
333
345
334 message: commit message to use if committing
346 message: commit message to use if committing
335
347
336 user: name of user to use if committing
348 user: name of user to use if committing
337
349
338 date: date tuple to use if committing'''
350 date: date tuple to use if committing'''
339
351
340 if not local:
352 if not local:
341 for x in self.status()[:5]:
353 for x in self.status()[:5]:
342 if '.hgtags' in x:
354 if '.hgtags' in x:
343 raise util.Abort(_('working copy of .hgtags is changed '
355 raise util.Abort(_('working copy of .hgtags is changed '
344 '(please commit .hgtags manually)'))
356 '(please commit .hgtags manually)'))
345
357
346 self.tags() # instantiate the cache
358 self.tags() # instantiate the cache
347 self._tag(names, node, message, local, user, date)
359 self._tag(names, node, message, local, user, date)
348
360
349 @propertycache
361 @propertycache
350 def _tagscache(self):
362 def _tagscache(self):
351 '''Returns a tagscache object that contains various tags related caches.'''
363 '''Returns a tagscache object that contains various tags related caches.'''
352
364
353 # This simplifies its cache management by having one decorated
365 # This simplifies its cache management by having one decorated
354 # function (this one) and the rest simply fetch things from it.
366 # function (this one) and the rest simply fetch things from it.
355 class tagscache(object):
367 class tagscache(object):
356 def __init__(self):
368 def __init__(self):
357 # These two define the set of tags for this repository. tags
369 # These two define the set of tags for this repository. tags
358 # maps tag name to node; tagtypes maps tag name to 'global' or
370 # maps tag name to node; tagtypes maps tag name to 'global' or
359 # 'local'. (Global tags are defined by .hgtags across all
371 # 'local'. (Global tags are defined by .hgtags across all
360 # heads, and local tags are defined in .hg/localtags.)
372 # heads, and local tags are defined in .hg/localtags.)
361 # They constitute the in-memory cache of tags.
373 # They constitute the in-memory cache of tags.
362 self.tags = self.tagtypes = None
374 self.tags = self.tagtypes = None
363
375
364 self.nodetagscache = self.tagslist = None
376 self.nodetagscache = self.tagslist = None
365
377
366 cache = tagscache()
378 cache = tagscache()
367 cache.tags, cache.tagtypes = self._findtags()
379 cache.tags, cache.tagtypes = self._findtags()
368
380
369 return cache
381 return cache
370
382
371 def tags(self):
383 def tags(self):
372 '''return a mapping of tag to node'''
384 '''return a mapping of tag to node'''
373 return self._tagscache.tags
385 return self._tagscache.tags
374
386
375 def _findtags(self):
387 def _findtags(self):
376 '''Do the hard work of finding tags. Return a pair of dicts
388 '''Do the hard work of finding tags. Return a pair of dicts
377 (tags, tagtypes) where tags maps tag name to node, and tagtypes
389 (tags, tagtypes) where tags maps tag name to node, and tagtypes
378 maps tag name to a string like \'global\' or \'local\'.
390 maps tag name to a string like \'global\' or \'local\'.
379 Subclasses or extensions are free to add their own tags, but
391 Subclasses or extensions are free to add their own tags, but
380 should be aware that the returned dicts will be retained for the
392 should be aware that the returned dicts will be retained for the
381 duration of the localrepo object.'''
393 duration of the localrepo object.'''
382
394
383 # XXX what tagtype should subclasses/extensions use? Currently
395 # XXX what tagtype should subclasses/extensions use? Currently
384 # mq and bookmarks add tags, but do not set the tagtype at all.
396 # mq and bookmarks add tags, but do not set the tagtype at all.
385 # Should each extension invent its own tag type? Should there
397 # Should each extension invent its own tag type? Should there
386 # be one tagtype for all such "virtual" tags? Or is the status
398 # be one tagtype for all such "virtual" tags? Or is the status
387 # quo fine?
399 # quo fine?
388
400
389 alltags = {} # map tag name to (node, hist)
401 alltags = {} # map tag name to (node, hist)
390 tagtypes = {}
402 tagtypes = {}
391
403
392 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
404 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
393 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
405 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
394
406
395 # Build the return dicts. Have to re-encode tag names because
407 # Build the return dicts. Have to re-encode tag names because
396 # the tags module always uses UTF-8 (in order not to lose info
408 # the tags module always uses UTF-8 (in order not to lose info
397 # writing to the cache), but the rest of Mercurial wants them in
409 # writing to the cache), but the rest of Mercurial wants them in
398 # local encoding.
410 # local encoding.
399 tags = {}
411 tags = {}
400 for (name, (node, hist)) in alltags.iteritems():
412 for (name, (node, hist)) in alltags.iteritems():
401 if node != nullid:
413 if node != nullid:
402 try:
414 try:
403 # ignore tags to unknown nodes
415 # ignore tags to unknown nodes
404 self.changelog.lookup(node)
416 self.changelog.lookup(node)
405 tags[encoding.tolocal(name)] = node
417 tags[encoding.tolocal(name)] = node
406 except error.LookupError:
418 except error.LookupError:
407 pass
419 pass
408 tags['tip'] = self.changelog.tip()
420 tags['tip'] = self.changelog.tip()
409 tagtypes = dict([(encoding.tolocal(name), value)
421 tagtypes = dict([(encoding.tolocal(name), value)
410 for (name, value) in tagtypes.iteritems()])
422 for (name, value) in tagtypes.iteritems()])
411 return (tags, tagtypes)
423 return (tags, tagtypes)
412
424
413 def tagtype(self, tagname):
425 def tagtype(self, tagname):
414 '''
426 '''
415 return the type of the given tag. result can be:
427 return the type of the given tag. result can be:
416
428
417 'local' : a local tag
429 'local' : a local tag
418 'global' : a global tag
430 'global' : a global tag
419 None : tag does not exist
431 None : tag does not exist
420 '''
432 '''
421
433
422 return self._tagscache.tagtypes.get(tagname)
434 return self._tagscache.tagtypes.get(tagname)
423
435
424 def tagslist(self):
436 def tagslist(self):
425 '''return a list of tags ordered by revision'''
437 '''return a list of tags ordered by revision'''
426 if not self._tagscache.tagslist:
438 if not self._tagscache.tagslist:
427 l = []
439 l = []
428 for t, n in self.tags().iteritems():
440 for t, n in self.tags().iteritems():
429 r = self.changelog.rev(n)
441 r = self.changelog.rev(n)
430 l.append((r, t, n))
442 l.append((r, t, n))
431 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
443 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
432
444
433 return self._tagscache.tagslist
445 return self._tagscache.tagslist
434
446
435 def nodetags(self, node):
447 def nodetags(self, node):
436 '''return the tags associated with a node'''
448 '''return the tags associated with a node'''
437 if not self._tagscache.nodetagscache:
449 if not self._tagscache.nodetagscache:
438 nodetagscache = {}
450 nodetagscache = {}
439 for t, n in self.tags().iteritems():
451 for t, n in self.tags().iteritems():
440 nodetagscache.setdefault(n, []).append(t)
452 nodetagscache.setdefault(n, []).append(t)
441 for tags in nodetagscache.itervalues():
453 for tags in nodetagscache.itervalues():
442 tags.sort()
454 tags.sort()
443 self._tagscache.nodetagscache = nodetagscache
455 self._tagscache.nodetagscache = nodetagscache
444 return self._tagscache.nodetagscache.get(node, [])
456 return self._tagscache.nodetagscache.get(node, [])
445
457
446 def nodebookmarks(self, node):
458 def nodebookmarks(self, node):
447 marks = []
459 marks = []
448 for bookmark, n in self._bookmarks.iteritems():
460 for bookmark, n in self._bookmarks.iteritems():
449 if n == node:
461 if n == node:
450 marks.append(bookmark)
462 marks.append(bookmark)
451 return sorted(marks)
463 return sorted(marks)
452
464
453 def _branchtags(self, partial, lrev):
465 def _branchtags(self, partial, lrev):
454 # TODO: rename this function?
466 # TODO: rename this function?
455 tiprev = len(self) - 1
467 tiprev = len(self) - 1
456 if lrev != tiprev:
468 if lrev != tiprev:
457 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
469 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
458 self._updatebranchcache(partial, ctxgen)
470 self._updatebranchcache(partial, ctxgen)
459 self._writebranchcache(partial, self.changelog.tip(), tiprev)
471 self._writebranchcache(partial, self.changelog.tip(), tiprev)
460
472
461 return partial
473 return partial
462
474
463 def updatebranchcache(self):
475 def updatebranchcache(self):
464 tip = self.changelog.tip()
476 tip = self.changelog.tip()
465 if self._branchcache is not None and self._branchcachetip == tip:
477 if self._branchcache is not None and self._branchcachetip == tip:
466 return self._branchcache
478 return self._branchcache
467
479
468 oldtip = self._branchcachetip
480 oldtip = self._branchcachetip
469 self._branchcachetip = tip
481 self._branchcachetip = tip
470 if oldtip is None or oldtip not in self.changelog.nodemap:
482 if oldtip is None or oldtip not in self.changelog.nodemap:
471 partial, last, lrev = self._readbranchcache()
483 partial, last, lrev = self._readbranchcache()
472 else:
484 else:
473 lrev = self.changelog.rev(oldtip)
485 lrev = self.changelog.rev(oldtip)
474 partial = self._branchcache
486 partial = self._branchcache
475
487
476 self._branchtags(partial, lrev)
488 self._branchtags(partial, lrev)
477 # this private cache holds all heads (not just tips)
489 # this private cache holds all heads (not just tips)
478 self._branchcache = partial
490 self._branchcache = partial
479
491
480 def branchmap(self):
492 def branchmap(self):
481 '''returns a dictionary {branch: [branchheads]}'''
493 '''returns a dictionary {branch: [branchheads]}'''
482 self.updatebranchcache()
494 self.updatebranchcache()
483 return self._branchcache
495 return self._branchcache
484
496
485 def branchtags(self):
497 def branchtags(self):
486 '''return a dict where branch names map to the tipmost head of
498 '''return a dict where branch names map to the tipmost head of
487 the branch, open heads come before closed'''
499 the branch, open heads come before closed'''
488 bt = {}
500 bt = {}
489 for bn, heads in self.branchmap().iteritems():
501 for bn, heads in self.branchmap().iteritems():
490 tip = heads[-1]
502 tip = heads[-1]
491 for h in reversed(heads):
503 for h in reversed(heads):
492 if 'close' not in self.changelog.read(h)[5]:
504 if 'close' not in self.changelog.read(h)[5]:
493 tip = h
505 tip = h
494 break
506 break
495 bt[bn] = tip
507 bt[bn] = tip
496 return bt
508 return bt
497
509
498 def _readbranchcache(self):
510 def _readbranchcache(self):
499 partial = {}
511 partial = {}
500 try:
512 try:
501 f = self.opener("cache/branchheads")
513 f = self.opener("cache/branchheads")
502 lines = f.read().split('\n')
514 lines = f.read().split('\n')
503 f.close()
515 f.close()
504 except (IOError, OSError):
516 except (IOError, OSError):
505 return {}, nullid, nullrev
517 return {}, nullid, nullrev
506
518
507 try:
519 try:
508 last, lrev = lines.pop(0).split(" ", 1)
520 last, lrev = lines.pop(0).split(" ", 1)
509 last, lrev = bin(last), int(lrev)
521 last, lrev = bin(last), int(lrev)
510 if lrev >= len(self) or self[lrev].node() != last:
522 if lrev >= len(self) or self[lrev].node() != last:
511 # invalidate the cache
523 # invalidate the cache
512 raise ValueError('invalidating branch cache (tip differs)')
524 raise ValueError('invalidating branch cache (tip differs)')
513 for l in lines:
525 for l in lines:
514 if not l:
526 if not l:
515 continue
527 continue
516 node, label = l.split(" ", 1)
528 node, label = l.split(" ", 1)
517 label = encoding.tolocal(label.strip())
529 label = encoding.tolocal(label.strip())
518 partial.setdefault(label, []).append(bin(node))
530 partial.setdefault(label, []).append(bin(node))
519 except KeyboardInterrupt:
531 except KeyboardInterrupt:
520 raise
532 raise
521 except Exception, inst:
533 except Exception, inst:
522 if self.ui.debugflag:
534 if self.ui.debugflag:
523 self.ui.warn(str(inst), '\n')
535 self.ui.warn(str(inst), '\n')
524 partial, last, lrev = {}, nullid, nullrev
536 partial, last, lrev = {}, nullid, nullrev
525 return partial, last, lrev
537 return partial, last, lrev
526
538
527 def _writebranchcache(self, branches, tip, tiprev):
539 def _writebranchcache(self, branches, tip, tiprev):
528 try:
540 try:
529 f = self.opener("cache/branchheads", "w", atomictemp=True)
541 f = self.opener("cache/branchheads", "w", atomictemp=True)
530 f.write("%s %s\n" % (hex(tip), tiprev))
542 f.write("%s %s\n" % (hex(tip), tiprev))
531 for label, nodes in branches.iteritems():
543 for label, nodes in branches.iteritems():
532 for node in nodes:
544 for node in nodes:
533 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
545 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
534 f.close()
546 f.close()
535 except (IOError, OSError):
547 except (IOError, OSError):
536 pass
548 pass
537
549
538 def _updatebranchcache(self, partial, ctxgen):
550 def _updatebranchcache(self, partial, ctxgen):
539 # collect new branch entries
551 # collect new branch entries
540 newbranches = {}
552 newbranches = {}
541 for c in ctxgen:
553 for c in ctxgen:
542 newbranches.setdefault(c.branch(), []).append(c.node())
554 newbranches.setdefault(c.branch(), []).append(c.node())
543 # if older branchheads are reachable from new ones, they aren't
555 # if older branchheads are reachable from new ones, they aren't
544 # really branchheads. Note checking parents is insufficient:
556 # really branchheads. Note checking parents is insufficient:
545 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
557 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
546 for branch, newnodes in newbranches.iteritems():
558 for branch, newnodes in newbranches.iteritems():
547 bheads = partial.setdefault(branch, [])
559 bheads = partial.setdefault(branch, [])
548 bheads.extend(newnodes)
560 bheads.extend(newnodes)
549 if len(bheads) <= 1:
561 if len(bheads) <= 1:
550 continue
562 continue
551 bheads = sorted(bheads, key=lambda x: self[x].rev())
563 bheads = sorted(bheads, key=lambda x: self[x].rev())
552 # starting from tip means fewer passes over reachable
564 # starting from tip means fewer passes over reachable
553 while newnodes:
565 while newnodes:
554 latest = newnodes.pop()
566 latest = newnodes.pop()
555 if latest not in bheads:
567 if latest not in bheads:
556 continue
568 continue
557 minbhrev = self[bheads[0]].node()
569 minbhrev = self[bheads[0]].node()
558 reachable = self.changelog.reachable(latest, minbhrev)
570 reachable = self.changelog.reachable(latest, minbhrev)
559 reachable.remove(latest)
571 reachable.remove(latest)
560 if reachable:
572 if reachable:
561 bheads = [b for b in bheads if b not in reachable]
573 bheads = [b for b in bheads if b not in reachable]
562 partial[branch] = bheads
574 partial[branch] = bheads
563
575
564 def lookup(self, key):
576 def lookup(self, key):
565 if isinstance(key, int):
577 if isinstance(key, int):
566 return self.changelog.node(key)
578 return self.changelog.node(key)
567 elif key == '.':
579 elif key == '.':
568 return self.dirstate.p1()
580 return self.dirstate.p1()
569 elif key == 'null':
581 elif key == 'null':
570 return nullid
582 return nullid
571 elif key == 'tip':
583 elif key == 'tip':
572 return self.changelog.tip()
584 return self.changelog.tip()
573 n = self.changelog._match(key)
585 n = self.changelog._match(key)
574 if n:
586 if n:
575 return n
587 return n
576 if key in self._bookmarks:
588 if key in self._bookmarks:
577 return self._bookmarks[key]
589 return self._bookmarks[key]
578 if key in self.tags():
590 if key in self.tags():
579 return self.tags()[key]
591 return self.tags()[key]
580 if key in self.branchtags():
592 if key in self.branchtags():
581 return self.branchtags()[key]
593 return self.branchtags()[key]
582 n = self.changelog._partialmatch(key)
594 n = self.changelog._partialmatch(key)
583 if n:
595 if n:
584 return n
596 return n
585
597
586 # can't find key, check if it might have come from damaged dirstate
598 # can't find key, check if it might have come from damaged dirstate
587 if key in self.dirstate.parents():
599 if key in self.dirstate.parents():
588 raise error.Abort(_("working directory has unknown parent '%s'!")
600 raise error.Abort(_("working directory has unknown parent '%s'!")
589 % short(key))
601 % short(key))
590 try:
602 try:
591 if len(key) == 20:
603 if len(key) == 20:
592 key = hex(key)
604 key = hex(key)
593 except TypeError:
605 except TypeError:
594 pass
606 pass
595 raise error.RepoLookupError(_("unknown revision '%s'") % key)
607 raise error.RepoLookupError(_("unknown revision '%s'") % key)
596
608
597 def lookupbranch(self, key, remote=None):
609 def lookupbranch(self, key, remote=None):
598 repo = remote or self
610 repo = remote or self
599 if key in repo.branchmap():
611 if key in repo.branchmap():
600 return key
612 return key
601
613
602 repo = (remote and remote.local()) and remote or self
614 repo = (remote and remote.local()) and remote or self
603 return repo[key].branch()
615 return repo[key].branch()
604
616
605 def known(self, nodes):
617 def known(self, nodes):
606 nm = self.changelog.nodemap
618 nm = self.changelog.nodemap
607 return [(n in nm) for n in nodes]
619 return [(n in nm) for n in nodes]
608
620
609 def local(self):
621 def local(self):
610 return self
622 return self
611
623
612 def join(self, f):
624 def join(self, f):
613 return os.path.join(self.path, f)
625 return os.path.join(self.path, f)
614
626
615 def wjoin(self, f):
627 def wjoin(self, f):
616 return os.path.join(self.root, f)
628 return os.path.join(self.root, f)
617
629
618 def file(self, f):
630 def file(self, f):
619 if f[0] == '/':
631 if f[0] == '/':
620 f = f[1:]
632 f = f[1:]
621 return filelog.filelog(self.sopener, f)
633 return filelog.filelog(self.sopener, f)
622
634
623 def changectx(self, changeid):
635 def changectx(self, changeid):
624 return self[changeid]
636 return self[changeid]
625
637
626 def parents(self, changeid=None):
638 def parents(self, changeid=None):
627 '''get list of changectxs for parents of changeid'''
639 '''get list of changectxs for parents of changeid'''
628 return self[changeid].parents()
640 return self[changeid].parents()
629
641
630 def filectx(self, path, changeid=None, fileid=None):
642 def filectx(self, path, changeid=None, fileid=None):
631 """changeid can be a changeset revision, node, or tag.
643 """changeid can be a changeset revision, node, or tag.
632 fileid can be a file revision or node."""
644 fileid can be a file revision or node."""
633 return context.filectx(self, path, changeid, fileid)
645 return context.filectx(self, path, changeid, fileid)
634
646
635 def getcwd(self):
647 def getcwd(self):
636 return self.dirstate.getcwd()
648 return self.dirstate.getcwd()
637
649
638 def pathto(self, f, cwd=None):
650 def pathto(self, f, cwd=None):
639 return self.dirstate.pathto(f, cwd)
651 return self.dirstate.pathto(f, cwd)
640
652
641 def wfile(self, f, mode='r'):
653 def wfile(self, f, mode='r'):
642 return self.wopener(f, mode)
654 return self.wopener(f, mode)
643
655
644 def _link(self, f):
656 def _link(self, f):
645 return os.path.islink(self.wjoin(f))
657 return os.path.islink(self.wjoin(f))
646
658
647 def _loadfilter(self, filter):
659 def _loadfilter(self, filter):
648 if filter not in self.filterpats:
660 if filter not in self.filterpats:
649 l = []
661 l = []
650 for pat, cmd in self.ui.configitems(filter):
662 for pat, cmd in self.ui.configitems(filter):
651 if cmd == '!':
663 if cmd == '!':
652 continue
664 continue
653 mf = matchmod.match(self.root, '', [pat])
665 mf = matchmod.match(self.root, '', [pat])
654 fn = None
666 fn = None
655 params = cmd
667 params = cmd
656 for name, filterfn in self._datafilters.iteritems():
668 for name, filterfn in self._datafilters.iteritems():
657 if cmd.startswith(name):
669 if cmd.startswith(name):
658 fn = filterfn
670 fn = filterfn
659 params = cmd[len(name):].lstrip()
671 params = cmd[len(name):].lstrip()
660 break
672 break
661 if not fn:
673 if not fn:
662 fn = lambda s, c, **kwargs: util.filter(s, c)
674 fn = lambda s, c, **kwargs: util.filter(s, c)
663 # Wrap old filters not supporting keyword arguments
675 # Wrap old filters not supporting keyword arguments
664 if not inspect.getargspec(fn)[2]:
676 if not inspect.getargspec(fn)[2]:
665 oldfn = fn
677 oldfn = fn
666 fn = lambda s, c, **kwargs: oldfn(s, c)
678 fn = lambda s, c, **kwargs: oldfn(s, c)
667 l.append((mf, fn, params))
679 l.append((mf, fn, params))
668 self.filterpats[filter] = l
680 self.filterpats[filter] = l
669 return self.filterpats[filter]
681 return self.filterpats[filter]
670
682
671 def _filter(self, filterpats, filename, data):
683 def _filter(self, filterpats, filename, data):
672 for mf, fn, cmd in filterpats:
684 for mf, fn, cmd in filterpats:
673 if mf(filename):
685 if mf(filename):
674 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
686 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
675 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
687 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
676 break
688 break
677
689
678 return data
690 return data
679
691
680 @propertycache
692 @propertycache
681 def _encodefilterpats(self):
693 def _encodefilterpats(self):
682 return self._loadfilter('encode')
694 return self._loadfilter('encode')
683
695
684 @propertycache
696 @propertycache
685 def _decodefilterpats(self):
697 def _decodefilterpats(self):
686 return self._loadfilter('decode')
698 return self._loadfilter('decode')
687
699
688 def adddatafilter(self, name, filter):
700 def adddatafilter(self, name, filter):
689 self._datafilters[name] = filter
701 self._datafilters[name] = filter
690
702
691 def wread(self, filename):
703 def wread(self, filename):
692 if self._link(filename):
704 if self._link(filename):
693 data = os.readlink(self.wjoin(filename))
705 data = os.readlink(self.wjoin(filename))
694 else:
706 else:
695 data = self.wopener.read(filename)
707 data = self.wopener.read(filename)
696 return self._filter(self._encodefilterpats, filename, data)
708 return self._filter(self._encodefilterpats, filename, data)
697
709
698 def wwrite(self, filename, data, flags):
710 def wwrite(self, filename, data, flags):
699 data = self._filter(self._decodefilterpats, filename, data)
711 data = self._filter(self._decodefilterpats, filename, data)
700 if 'l' in flags:
712 if 'l' in flags:
701 self.wopener.symlink(data, filename)
713 self.wopener.symlink(data, filename)
702 else:
714 else:
703 self.wopener.write(filename, data)
715 self.wopener.write(filename, data)
704 if 'x' in flags:
716 if 'x' in flags:
705 util.setflags(self.wjoin(filename), False, True)
717 util.setflags(self.wjoin(filename), False, True)
706
718
707 def wwritedata(self, filename, data):
719 def wwritedata(self, filename, data):
708 return self._filter(self._decodefilterpats, filename, data)
720 return self._filter(self._decodefilterpats, filename, data)
709
721
710 def transaction(self, desc):
722 def transaction(self, desc):
711 tr = self._transref and self._transref() or None
723 tr = self._transref and self._transref() or None
712 if tr and tr.running():
724 if tr and tr.running():
713 return tr.nest()
725 return tr.nest()
714
726
715 # abort here if the journal already exists
727 # abort here if the journal already exists
716 if os.path.exists(self.sjoin("journal")):
728 if os.path.exists(self.sjoin("journal")):
717 raise error.RepoError(
729 raise error.RepoError(
718 _("abandoned transaction found - run hg recover"))
730 _("abandoned transaction found - run hg recover"))
719
731
720 journalfiles = self._writejournal(desc)
732 journalfiles = self._writejournal(desc)
721 renames = [(x, undoname(x)) for x in journalfiles]
733 renames = [(x, undoname(x)) for x in journalfiles]
722
734
723 tr = transaction.transaction(self.ui.warn, self.sopener,
735 tr = transaction.transaction(self.ui.warn, self.sopener,
724 self.sjoin("journal"),
736 self.sjoin("journal"),
725 aftertrans(renames),
737 aftertrans(renames),
726 self.store.createmode)
738 self.store.createmode)
727 self._transref = weakref.ref(tr)
739 self._transref = weakref.ref(tr)
728 return tr
740 return tr
729
741
730 def _writejournal(self, desc):
742 def _writejournal(self, desc):
731 # save dirstate for rollback
743 # save dirstate for rollback
732 try:
744 try:
733 ds = self.opener.read("dirstate")
745 ds = self.opener.read("dirstate")
734 except IOError:
746 except IOError:
735 ds = ""
747 ds = ""
736 self.opener.write("journal.dirstate", ds)
748 self.opener.write("journal.dirstate", ds)
737 self.opener.write("journal.branch",
749 self.opener.write("journal.branch",
738 encoding.fromlocal(self.dirstate.branch()))
750 encoding.fromlocal(self.dirstate.branch()))
739 self.opener.write("journal.desc",
751 self.opener.write("journal.desc",
740 "%d\n%s\n" % (len(self), desc))
752 "%d\n%s\n" % (len(self), desc))
741
753
742 bkname = self.join('bookmarks')
754 bkname = self.join('bookmarks')
743 if os.path.exists(bkname):
755 if os.path.exists(bkname):
744 util.copyfile(bkname, self.join('journal.bookmarks'))
756 util.copyfile(bkname, self.join('journal.bookmarks'))
745 else:
757 else:
746 self.opener.write('journal.bookmarks', '')
758 self.opener.write('journal.bookmarks', '')
747
759
748 return (self.sjoin('journal'), self.join('journal.dirstate'),
760 return (self.sjoin('journal'), self.join('journal.dirstate'),
749 self.join('journal.branch'), self.join('journal.desc'),
761 self.join('journal.branch'), self.join('journal.desc'),
750 self.join('journal.bookmarks'))
762 self.join('journal.bookmarks'))
751
763
752 def recover(self):
764 def recover(self):
753 lock = self.lock()
765 lock = self.lock()
754 try:
766 try:
755 if os.path.exists(self.sjoin("journal")):
767 if os.path.exists(self.sjoin("journal")):
756 self.ui.status(_("rolling back interrupted transaction\n"))
768 self.ui.status(_("rolling back interrupted transaction\n"))
757 transaction.rollback(self.sopener, self.sjoin("journal"),
769 transaction.rollback(self.sopener, self.sjoin("journal"),
758 self.ui.warn)
770 self.ui.warn)
759 self.invalidate()
771 self.invalidate()
760 return True
772 return True
761 else:
773 else:
762 self.ui.warn(_("no interrupted transaction available\n"))
774 self.ui.warn(_("no interrupted transaction available\n"))
763 return False
775 return False
764 finally:
776 finally:
765 lock.release()
777 lock.release()
766
778
767 def rollback(self, dryrun=False, force=False):
779 def rollback(self, dryrun=False, force=False):
768 wlock = lock = None
780 wlock = lock = None
769 try:
781 try:
770 wlock = self.wlock()
782 wlock = self.wlock()
771 lock = self.lock()
783 lock = self.lock()
772 if os.path.exists(self.sjoin("undo")):
784 if os.path.exists(self.sjoin("undo")):
773 return self._rollback(dryrun, force)
785 return self._rollback(dryrun, force)
774 else:
786 else:
775 self.ui.warn(_("no rollback information available\n"))
787 self.ui.warn(_("no rollback information available\n"))
776 return 1
788 return 1
777 finally:
789 finally:
778 release(lock, wlock)
790 release(lock, wlock)
779
791
780 def _rollback(self, dryrun, force):
792 def _rollback(self, dryrun, force):
781 ui = self.ui
793 ui = self.ui
782 try:
794 try:
783 args = self.opener.read('undo.desc').splitlines()
795 args = self.opener.read('undo.desc').splitlines()
784 (oldlen, desc, detail) = (int(args[0]), args[1], None)
796 (oldlen, desc, detail) = (int(args[0]), args[1], None)
785 if len(args) >= 3:
797 if len(args) >= 3:
786 detail = args[2]
798 detail = args[2]
787 oldtip = oldlen - 1
799 oldtip = oldlen - 1
788
800
789 if detail and ui.verbose:
801 if detail and ui.verbose:
790 msg = (_('repository tip rolled back to revision %s'
802 msg = (_('repository tip rolled back to revision %s'
791 ' (undo %s: %s)\n')
803 ' (undo %s: %s)\n')
792 % (oldtip, desc, detail))
804 % (oldtip, desc, detail))
793 else:
805 else:
794 msg = (_('repository tip rolled back to revision %s'
806 msg = (_('repository tip rolled back to revision %s'
795 ' (undo %s)\n')
807 ' (undo %s)\n')
796 % (oldtip, desc))
808 % (oldtip, desc))
797 except IOError:
809 except IOError:
798 msg = _('rolling back unknown transaction\n')
810 msg = _('rolling back unknown transaction\n')
799 desc = None
811 desc = None
800
812
801 if not force and self['.'] != self['tip'] and desc == 'commit':
813 if not force and self['.'] != self['tip'] and desc == 'commit':
802 raise util.Abort(
814 raise util.Abort(
803 _('rollback of last commit while not checked out '
815 _('rollback of last commit while not checked out '
804 'may lose data'), hint=_('use -f to force'))
816 'may lose data'), hint=_('use -f to force'))
805
817
806 ui.status(msg)
818 ui.status(msg)
807 if dryrun:
819 if dryrun:
808 return 0
820 return 0
809
821
810 parents = self.dirstate.parents()
822 parents = self.dirstate.parents()
811 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
823 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
812 if os.path.exists(self.join('undo.bookmarks')):
824 if os.path.exists(self.join('undo.bookmarks')):
813 util.rename(self.join('undo.bookmarks'),
825 util.rename(self.join('undo.bookmarks'),
814 self.join('bookmarks'))
826 self.join('bookmarks'))
815 self.invalidate()
827 self.invalidate()
816
828
817 parentgone = (parents[0] not in self.changelog.nodemap or
829 parentgone = (parents[0] not in self.changelog.nodemap or
818 parents[1] not in self.changelog.nodemap)
830 parents[1] not in self.changelog.nodemap)
819 if parentgone:
831 if parentgone:
820 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
832 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
821 try:
833 try:
822 branch = self.opener.read('undo.branch')
834 branch = self.opener.read('undo.branch')
823 self.dirstate.setbranch(branch)
835 self.dirstate.setbranch(branch)
824 except IOError:
836 except IOError:
825 ui.warn(_('named branch could not be reset: '
837 ui.warn(_('named branch could not be reset: '
826 'current branch is still \'%s\'\n')
838 'current branch is still \'%s\'\n')
827 % self.dirstate.branch())
839 % self.dirstate.branch())
828
840
829 self.dirstate.invalidate()
841 self.dirstate.invalidate()
830 self.destroyed()
842 self.destroyed()
831 parents = tuple([p.rev() for p in self.parents()])
843 parents = tuple([p.rev() for p in self.parents()])
832 if len(parents) > 1:
844 if len(parents) > 1:
833 ui.status(_('working directory now based on '
845 ui.status(_('working directory now based on '
834 'revisions %d and %d\n') % parents)
846 'revisions %d and %d\n') % parents)
835 else:
847 else:
836 ui.status(_('working directory now based on '
848 ui.status(_('working directory now based on '
837 'revision %d\n') % parents)
849 'revision %d\n') % parents)
838 return 0
850 return 0
839
851
840 def invalidatecaches(self):
852 def invalidatecaches(self):
841 try:
853 try:
842 delattr(self, '_tagscache')
854 delattr(self, '_tagscache')
843 except AttributeError:
855 except AttributeError:
844 pass
856 pass
845
857
846 self._branchcache = None # in UTF-8
858 self._branchcache = None # in UTF-8
847 self._branchcachetip = None
859 self._branchcachetip = None
848
860
849 def invalidatedirstate(self):
861 def invalidatedirstate(self):
850 '''Invalidates the dirstate, causing the next call to dirstate
862 '''Invalidates the dirstate, causing the next call to dirstate
851 to check if it was modified since the last time it was read,
863 to check if it was modified since the last time it was read,
852 rereading it if it has.
864 rereading it if it has.
853
865
854 This is different to dirstate.invalidate() that it doesn't always
866 This is different to dirstate.invalidate() that it doesn't always
855 rereads the dirstate. Use dirstate.invalidate() if you want to
867 rereads the dirstate. Use dirstate.invalidate() if you want to
856 explicitly read the dirstate again (i.e. restoring it to a previous
868 explicitly read the dirstate again (i.e. restoring it to a previous
857 known good state).'''
869 known good state).'''
858 try:
870 try:
859 delattr(self, 'dirstate')
871 delattr(self, 'dirstate')
860 except AttributeError:
872 except AttributeError:
861 pass
873 pass
862
874
863 def invalidate(self):
875 def invalidate(self):
864 for k in self._filecache:
876 for k in self._filecache:
865 # dirstate is invalidated separately in invalidatedirstate()
877 # dirstate is invalidated separately in invalidatedirstate()
866 if k == 'dirstate':
878 if k == 'dirstate':
867 continue
879 continue
868
880
869 try:
881 try:
870 delattr(self, k)
882 delattr(self, k)
871 except AttributeError:
883 except AttributeError:
872 pass
884 pass
873 self.invalidatecaches()
885 self.invalidatecaches()
874
886
875 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
887 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
876 try:
888 try:
877 l = lock.lock(lockname, 0, releasefn, desc=desc)
889 l = lock.lock(lockname, 0, releasefn, desc=desc)
878 except error.LockHeld, inst:
890 except error.LockHeld, inst:
879 if not wait:
891 if not wait:
880 raise
892 raise
881 self.ui.warn(_("waiting for lock on %s held by %r\n") %
893 self.ui.warn(_("waiting for lock on %s held by %r\n") %
882 (desc, inst.locker))
894 (desc, inst.locker))
883 # default to 600 seconds timeout
895 # default to 600 seconds timeout
884 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
896 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
885 releasefn, desc=desc)
897 releasefn, desc=desc)
886 if acquirefn:
898 if acquirefn:
887 acquirefn()
899 acquirefn()
888 return l
900 return l
889
901
890 def lock(self, wait=True):
902 def lock(self, wait=True):
891 '''Lock the repository store (.hg/store) and return a weak reference
903 '''Lock the repository store (.hg/store) and return a weak reference
892 to the lock. Use this before modifying the store (e.g. committing or
904 to the lock. Use this before modifying the store (e.g. committing or
893 stripping). If you are opening a transaction, get a lock as well.)'''
905 stripping). If you are opening a transaction, get a lock as well.)'''
894 l = self._lockref and self._lockref()
906 l = self._lockref and self._lockref()
895 if l is not None and l.held:
907 if l is not None and l.held:
896 l.lock()
908 l.lock()
897 return l
909 return l
898
910
899 def unlock():
911 def unlock():
900 self.store.write()
912 self.store.write()
901 for k, ce in self._filecache.items():
913 for k, ce in self._filecache.items():
902 if k == 'dirstate':
914 if k == 'dirstate':
903 continue
915 continue
904 ce.refresh()
916 ce.refresh()
905
917
906 l = self._lock(self.sjoin("lock"), wait, unlock,
918 l = self._lock(self.sjoin("lock"), wait, unlock,
907 self.invalidate, _('repository %s') % self.origroot)
919 self.invalidate, _('repository %s') % self.origroot)
908 self._lockref = weakref.ref(l)
920 self._lockref = weakref.ref(l)
909 return l
921 return l
910
922
911 def wlock(self, wait=True):
923 def wlock(self, wait=True):
912 '''Lock the non-store parts of the repository (everything under
924 '''Lock the non-store parts of the repository (everything under
913 .hg except .hg/store) and return a weak reference to the lock.
925 .hg except .hg/store) and return a weak reference to the lock.
914 Use this before modifying files in .hg.'''
926 Use this before modifying files in .hg.'''
915 l = self._wlockref and self._wlockref()
927 l = self._wlockref and self._wlockref()
916 if l is not None and l.held:
928 if l is not None and l.held:
917 l.lock()
929 l.lock()
918 return l
930 return l
919
931
920 def unlock():
932 def unlock():
921 self.dirstate.write()
933 self.dirstate.write()
922 ce = self._filecache.get('dirstate')
934 ce = self._filecache.get('dirstate')
923 if ce:
935 if ce:
924 ce.refresh()
936 ce.refresh()
925
937
926 l = self._lock(self.join("wlock"), wait, unlock,
938 l = self._lock(self.join("wlock"), wait, unlock,
927 self.invalidatedirstate, _('working directory of %s') %
939 self.invalidatedirstate, _('working directory of %s') %
928 self.origroot)
940 self.origroot)
929 self._wlockref = weakref.ref(l)
941 self._wlockref = weakref.ref(l)
930 return l
942 return l
931
943
932 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
944 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
933 """
945 """
934 commit an individual file as part of a larger transaction
946 commit an individual file as part of a larger transaction
935 """
947 """
936
948
937 fname = fctx.path()
949 fname = fctx.path()
938 text = fctx.data()
950 text = fctx.data()
939 flog = self.file(fname)
951 flog = self.file(fname)
940 fparent1 = manifest1.get(fname, nullid)
952 fparent1 = manifest1.get(fname, nullid)
941 fparent2 = fparent2o = manifest2.get(fname, nullid)
953 fparent2 = fparent2o = manifest2.get(fname, nullid)
942
954
943 meta = {}
955 meta = {}
944 copy = fctx.renamed()
956 copy = fctx.renamed()
945 if copy and copy[0] != fname:
957 if copy and copy[0] != fname:
946 # Mark the new revision of this file as a copy of another
958 # Mark the new revision of this file as a copy of another
947 # file. This copy data will effectively act as a parent
959 # file. This copy data will effectively act as a parent
948 # of this new revision. If this is a merge, the first
960 # of this new revision. If this is a merge, the first
949 # parent will be the nullid (meaning "look up the copy data")
961 # parent will be the nullid (meaning "look up the copy data")
950 # and the second one will be the other parent. For example:
962 # and the second one will be the other parent. For example:
951 #
963 #
952 # 0 --- 1 --- 3 rev1 changes file foo
964 # 0 --- 1 --- 3 rev1 changes file foo
953 # \ / rev2 renames foo to bar and changes it
965 # \ / rev2 renames foo to bar and changes it
954 # \- 2 -/ rev3 should have bar with all changes and
966 # \- 2 -/ rev3 should have bar with all changes and
955 # should record that bar descends from
967 # should record that bar descends from
956 # bar in rev2 and foo in rev1
968 # bar in rev2 and foo in rev1
957 #
969 #
958 # this allows this merge to succeed:
970 # this allows this merge to succeed:
959 #
971 #
960 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
972 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
961 # \ / merging rev3 and rev4 should use bar@rev2
973 # \ / merging rev3 and rev4 should use bar@rev2
962 # \- 2 --- 4 as the merge base
974 # \- 2 --- 4 as the merge base
963 #
975 #
964
976
965 cfname = copy[0]
977 cfname = copy[0]
966 crev = manifest1.get(cfname)
978 crev = manifest1.get(cfname)
967 newfparent = fparent2
979 newfparent = fparent2
968
980
969 if manifest2: # branch merge
981 if manifest2: # branch merge
970 if fparent2 == nullid or crev is None: # copied on remote side
982 if fparent2 == nullid or crev is None: # copied on remote side
971 if cfname in manifest2:
983 if cfname in manifest2:
972 crev = manifest2[cfname]
984 crev = manifest2[cfname]
973 newfparent = fparent1
985 newfparent = fparent1
974
986
975 # find source in nearest ancestor if we've lost track
987 # find source in nearest ancestor if we've lost track
976 if not crev:
988 if not crev:
977 self.ui.debug(" %s: searching for copy revision for %s\n" %
989 self.ui.debug(" %s: searching for copy revision for %s\n" %
978 (fname, cfname))
990 (fname, cfname))
979 for ancestor in self[None].ancestors():
991 for ancestor in self[None].ancestors():
980 if cfname in ancestor:
992 if cfname in ancestor:
981 crev = ancestor[cfname].filenode()
993 crev = ancestor[cfname].filenode()
982 break
994 break
983
995
984 if crev:
996 if crev:
985 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
997 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
986 meta["copy"] = cfname
998 meta["copy"] = cfname
987 meta["copyrev"] = hex(crev)
999 meta["copyrev"] = hex(crev)
988 fparent1, fparent2 = nullid, newfparent
1000 fparent1, fparent2 = nullid, newfparent
989 else:
1001 else:
990 self.ui.warn(_("warning: can't find ancestor for '%s' "
1002 self.ui.warn(_("warning: can't find ancestor for '%s' "
991 "copied from '%s'!\n") % (fname, cfname))
1003 "copied from '%s'!\n") % (fname, cfname))
992
1004
993 elif fparent2 != nullid:
1005 elif fparent2 != nullid:
994 # is one parent an ancestor of the other?
1006 # is one parent an ancestor of the other?
995 fparentancestor = flog.ancestor(fparent1, fparent2)
1007 fparentancestor = flog.ancestor(fparent1, fparent2)
996 if fparentancestor == fparent1:
1008 if fparentancestor == fparent1:
997 fparent1, fparent2 = fparent2, nullid
1009 fparent1, fparent2 = fparent2, nullid
998 elif fparentancestor == fparent2:
1010 elif fparentancestor == fparent2:
999 fparent2 = nullid
1011 fparent2 = nullid
1000
1012
1001 # is the file changed?
1013 # is the file changed?
1002 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1014 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1003 changelist.append(fname)
1015 changelist.append(fname)
1004 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1016 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1005
1017
1006 # are just the flags changed during merge?
1018 # are just the flags changed during merge?
1007 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1019 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1008 changelist.append(fname)
1020 changelist.append(fname)
1009
1021
1010 return fparent1
1022 return fparent1
1011
1023
1012 def commit(self, text="", user=None, date=None, match=None, force=False,
1024 def commit(self, text="", user=None, date=None, match=None, force=False,
1013 editor=False, extra={}):
1025 editor=False, extra={}):
1014 """Add a new revision to current repository.
1026 """Add a new revision to current repository.
1015
1027
1016 Revision information is gathered from the working directory,
1028 Revision information is gathered from the working directory,
1017 match can be used to filter the committed files. If editor is
1029 match can be used to filter the committed files. If editor is
1018 supplied, it is called to get a commit message.
1030 supplied, it is called to get a commit message.
1019 """
1031 """
1020
1032
1021 def fail(f, msg):
1033 def fail(f, msg):
1022 raise util.Abort('%s: %s' % (f, msg))
1034 raise util.Abort('%s: %s' % (f, msg))
1023
1035
1024 if not match:
1036 if not match:
1025 match = matchmod.always(self.root, '')
1037 match = matchmod.always(self.root, '')
1026
1038
1027 if not force:
1039 if not force:
1028 vdirs = []
1040 vdirs = []
1029 match.dir = vdirs.append
1041 match.dir = vdirs.append
1030 match.bad = fail
1042 match.bad = fail
1031
1043
1032 wlock = self.wlock()
1044 wlock = self.wlock()
1033 try:
1045 try:
1034 wctx = self[None]
1046 wctx = self[None]
1035 merge = len(wctx.parents()) > 1
1047 merge = len(wctx.parents()) > 1
1036
1048
1037 if (not force and merge and match and
1049 if (not force and merge and match and
1038 (match.files() or match.anypats())):
1050 (match.files() or match.anypats())):
1039 raise util.Abort(_('cannot partially commit a merge '
1051 raise util.Abort(_('cannot partially commit a merge '
1040 '(do not specify files or patterns)'))
1052 '(do not specify files or patterns)'))
1041
1053
1042 changes = self.status(match=match, clean=force)
1054 changes = self.status(match=match, clean=force)
1043 if force:
1055 if force:
1044 changes[0].extend(changes[6]) # mq may commit unchanged files
1056 changes[0].extend(changes[6]) # mq may commit unchanged files
1045
1057
1046 # check subrepos
1058 # check subrepos
1047 subs = []
1059 subs = []
1048 removedsubs = set()
1060 removedsubs = set()
1049 if '.hgsub' in wctx:
1061 if '.hgsub' in wctx:
1050 # only manage subrepos and .hgsubstate if .hgsub is present
1062 # only manage subrepos and .hgsubstate if .hgsub is present
1051 for p in wctx.parents():
1063 for p in wctx.parents():
1052 removedsubs.update(s for s in p.substate if match(s))
1064 removedsubs.update(s for s in p.substate if match(s))
1053 for s in wctx.substate:
1065 for s in wctx.substate:
1054 removedsubs.discard(s)
1066 removedsubs.discard(s)
1055 if match(s) and wctx.sub(s).dirty():
1067 if match(s) and wctx.sub(s).dirty():
1056 subs.append(s)
1068 subs.append(s)
1057 if (subs or removedsubs):
1069 if (subs or removedsubs):
1058 if (not match('.hgsub') and
1070 if (not match('.hgsub') and
1059 '.hgsub' in (wctx.modified() + wctx.added())):
1071 '.hgsub' in (wctx.modified() + wctx.added())):
1060 raise util.Abort(
1072 raise util.Abort(
1061 _("can't commit subrepos without .hgsub"))
1073 _("can't commit subrepos without .hgsub"))
1062 if '.hgsubstate' not in changes[0]:
1074 if '.hgsubstate' not in changes[0]:
1063 changes[0].insert(0, '.hgsubstate')
1075 changes[0].insert(0, '.hgsubstate')
1064 if '.hgsubstate' in changes[2]:
1076 if '.hgsubstate' in changes[2]:
1065 changes[2].remove('.hgsubstate')
1077 changes[2].remove('.hgsubstate')
1066 elif '.hgsub' in changes[2]:
1078 elif '.hgsub' in changes[2]:
1067 # clean up .hgsubstate when .hgsub is removed
1079 # clean up .hgsubstate when .hgsub is removed
1068 if ('.hgsubstate' in wctx and
1080 if ('.hgsubstate' in wctx and
1069 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1081 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1070 changes[2].insert(0, '.hgsubstate')
1082 changes[2].insert(0, '.hgsubstate')
1071
1083
1072 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1084 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1073 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1085 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1074 if changedsubs:
1086 if changedsubs:
1075 raise util.Abort(_("uncommitted changes in subrepo %s")
1087 raise util.Abort(_("uncommitted changes in subrepo %s")
1076 % changedsubs[0],
1088 % changedsubs[0],
1077 hint=_("use --subrepos for recursive commit"))
1089 hint=_("use --subrepos for recursive commit"))
1078
1090
1079 # make sure all explicit patterns are matched
1091 # make sure all explicit patterns are matched
1080 if not force and match.files():
1092 if not force and match.files():
1081 matched = set(changes[0] + changes[1] + changes[2])
1093 matched = set(changes[0] + changes[1] + changes[2])
1082
1094
1083 for f in match.files():
1095 for f in match.files():
1084 if f == '.' or f in matched or f in wctx.substate:
1096 if f == '.' or f in matched or f in wctx.substate:
1085 continue
1097 continue
1086 if f in changes[3]: # missing
1098 if f in changes[3]: # missing
1087 fail(f, _('file not found!'))
1099 fail(f, _('file not found!'))
1088 if f in vdirs: # visited directory
1100 if f in vdirs: # visited directory
1089 d = f + '/'
1101 d = f + '/'
1090 for mf in matched:
1102 for mf in matched:
1091 if mf.startswith(d):
1103 if mf.startswith(d):
1092 break
1104 break
1093 else:
1105 else:
1094 fail(f, _("no match under directory!"))
1106 fail(f, _("no match under directory!"))
1095 elif f not in self.dirstate:
1107 elif f not in self.dirstate:
1096 fail(f, _("file not tracked!"))
1108 fail(f, _("file not tracked!"))
1097
1109
1098 if (not force and not extra.get("close") and not merge
1110 if (not force and not extra.get("close") and not merge
1099 and not (changes[0] or changes[1] or changes[2])
1111 and not (changes[0] or changes[1] or changes[2])
1100 and wctx.branch() == wctx.p1().branch()):
1112 and wctx.branch() == wctx.p1().branch()):
1101 return None
1113 return None
1102
1114
1103 ms = mergemod.mergestate(self)
1115 ms = mergemod.mergestate(self)
1104 for f in changes[0]:
1116 for f in changes[0]:
1105 if f in ms and ms[f] == 'u':
1117 if f in ms and ms[f] == 'u':
1106 raise util.Abort(_("unresolved merge conflicts "
1118 raise util.Abort(_("unresolved merge conflicts "
1107 "(see hg help resolve)"))
1119 "(see hg help resolve)"))
1108
1120
1109 cctx = context.workingctx(self, text, user, date, extra, changes)
1121 cctx = context.workingctx(self, text, user, date, extra, changes)
1110 if editor:
1122 if editor:
1111 cctx._text = editor(self, cctx, subs)
1123 cctx._text = editor(self, cctx, subs)
1112 edited = (text != cctx._text)
1124 edited = (text != cctx._text)
1113
1125
1114 # commit subs
1126 # commit subs
1115 if subs or removedsubs:
1127 if subs or removedsubs:
1116 state = wctx.substate.copy()
1128 state = wctx.substate.copy()
1117 for s in sorted(subs):
1129 for s in sorted(subs):
1118 sub = wctx.sub(s)
1130 sub = wctx.sub(s)
1119 self.ui.status(_('committing subrepository %s\n') %
1131 self.ui.status(_('committing subrepository %s\n') %
1120 subrepo.subrelpath(sub))
1132 subrepo.subrelpath(sub))
1121 sr = sub.commit(cctx._text, user, date)
1133 sr = sub.commit(cctx._text, user, date)
1122 state[s] = (state[s][0], sr)
1134 state[s] = (state[s][0], sr)
1123 subrepo.writestate(self, state)
1135 subrepo.writestate(self, state)
1124
1136
1125 # Save commit message in case this transaction gets rolled back
1137 # Save commit message in case this transaction gets rolled back
1126 # (e.g. by a pretxncommit hook). Leave the content alone on
1138 # (e.g. by a pretxncommit hook). Leave the content alone on
1127 # the assumption that the user will use the same editor again.
1139 # the assumption that the user will use the same editor again.
1128 msgfn = self.savecommitmessage(cctx._text)
1140 msgfn = self.savecommitmessage(cctx._text)
1129
1141
1130 p1, p2 = self.dirstate.parents()
1142 p1, p2 = self.dirstate.parents()
1131 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1143 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1132 try:
1144 try:
1133 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1145 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1134 ret = self.commitctx(cctx, True)
1146 ret = self.commitctx(cctx, True)
1135 except:
1147 except:
1136 if edited:
1148 if edited:
1137 self.ui.write(
1149 self.ui.write(
1138 _('note: commit message saved in %s\n') % msgfn)
1150 _('note: commit message saved in %s\n') % msgfn)
1139 raise
1151 raise
1140
1152
1141 # update bookmarks, dirstate and mergestate
1153 # update bookmarks, dirstate and mergestate
1142 bookmarks.update(self, p1, ret)
1154 bookmarks.update(self, p1, ret)
1143 for f in changes[0] + changes[1]:
1155 for f in changes[0] + changes[1]:
1144 self.dirstate.normal(f)
1156 self.dirstate.normal(f)
1145 for f in changes[2]:
1157 for f in changes[2]:
1146 self.dirstate.drop(f)
1158 self.dirstate.drop(f)
1147 self.dirstate.setparents(ret)
1159 self.dirstate.setparents(ret)
1148 ms.reset()
1160 ms.reset()
1149 finally:
1161 finally:
1150 wlock.release()
1162 wlock.release()
1151
1163
1152 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1164 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1153 return ret
1165 return ret
1154
1166
1155 def commitctx(self, ctx, error=False):
1167 def commitctx(self, ctx, error=False):
1156 """Add a new revision to current repository.
1168 """Add a new revision to current repository.
1157 Revision information is passed via the context argument.
1169 Revision information is passed via the context argument.
1158 """
1170 """
1159
1171
1160 tr = lock = None
1172 tr = lock = None
1161 removed = list(ctx.removed())
1173 removed = list(ctx.removed())
1162 p1, p2 = ctx.p1(), ctx.p2()
1174 p1, p2 = ctx.p1(), ctx.p2()
1163 user = ctx.user()
1175 user = ctx.user()
1164
1176
1165 lock = self.lock()
1177 lock = self.lock()
1166 try:
1178 try:
1167 tr = self.transaction("commit")
1179 tr = self.transaction("commit")
1168 trp = weakref.proxy(tr)
1180 trp = weakref.proxy(tr)
1169
1181
1170 if ctx.files():
1182 if ctx.files():
1171 m1 = p1.manifest().copy()
1183 m1 = p1.manifest().copy()
1172 m2 = p2.manifest()
1184 m2 = p2.manifest()
1173
1185
1174 # check in files
1186 # check in files
1175 new = {}
1187 new = {}
1176 changed = []
1188 changed = []
1177 linkrev = len(self)
1189 linkrev = len(self)
1178 for f in sorted(ctx.modified() + ctx.added()):
1190 for f in sorted(ctx.modified() + ctx.added()):
1179 self.ui.note(f + "\n")
1191 self.ui.note(f + "\n")
1180 try:
1192 try:
1181 fctx = ctx[f]
1193 fctx = ctx[f]
1182 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1194 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1183 changed)
1195 changed)
1184 m1.set(f, fctx.flags())
1196 m1.set(f, fctx.flags())
1185 except OSError, inst:
1197 except OSError, inst:
1186 self.ui.warn(_("trouble committing %s!\n") % f)
1198 self.ui.warn(_("trouble committing %s!\n") % f)
1187 raise
1199 raise
1188 except IOError, inst:
1200 except IOError, inst:
1189 errcode = getattr(inst, 'errno', errno.ENOENT)
1201 errcode = getattr(inst, 'errno', errno.ENOENT)
1190 if error or errcode and errcode != errno.ENOENT:
1202 if error or errcode and errcode != errno.ENOENT:
1191 self.ui.warn(_("trouble committing %s!\n") % f)
1203 self.ui.warn(_("trouble committing %s!\n") % f)
1192 raise
1204 raise
1193 else:
1205 else:
1194 removed.append(f)
1206 removed.append(f)
1195
1207
1196 # update manifest
1208 # update manifest
1197 m1.update(new)
1209 m1.update(new)
1198 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1210 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1199 drop = [f for f in removed if f in m1]
1211 drop = [f for f in removed if f in m1]
1200 for f in drop:
1212 for f in drop:
1201 del m1[f]
1213 del m1[f]
1202 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1214 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1203 p2.manifestnode(), (new, drop))
1215 p2.manifestnode(), (new, drop))
1204 files = changed + removed
1216 files = changed + removed
1205 else:
1217 else:
1206 mn = p1.manifestnode()
1218 mn = p1.manifestnode()
1207 files = []
1219 files = []
1208
1220
1209 # update changelog
1221 # update changelog
1210 self.changelog.delayupdate()
1222 self.changelog.delayupdate()
1211 n = self.changelog.add(mn, files, ctx.description(),
1223 n = self.changelog.add(mn, files, ctx.description(),
1212 trp, p1.node(), p2.node(),
1224 trp, p1.node(), p2.node(),
1213 user, ctx.date(), ctx.extra().copy())
1225 user, ctx.date(), ctx.extra().copy())
1214 p = lambda: self.changelog.writepending() and self.root or ""
1226 p = lambda: self.changelog.writepending() and self.root or ""
1215 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1227 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1216 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1228 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1217 parent2=xp2, pending=p)
1229 parent2=xp2, pending=p)
1218 self.changelog.finalize(trp)
1230 self.changelog.finalize(trp)
1219 tr.close()
1231 tr.close()
1220
1232
1221 if self._branchcache:
1233 if self._branchcache:
1222 self.updatebranchcache()
1234 self.updatebranchcache()
1223 return n
1235 return n
1224 finally:
1236 finally:
1225 if tr:
1237 if tr:
1226 tr.release()
1238 tr.release()
1227 lock.release()
1239 lock.release()
1228
1240
1229 def destroyed(self):
1241 def destroyed(self):
1230 '''Inform the repository that nodes have been destroyed.
1242 '''Inform the repository that nodes have been destroyed.
1231 Intended for use by strip and rollback, so there's a common
1243 Intended for use by strip and rollback, so there's a common
1232 place for anything that has to be done after destroying history.'''
1244 place for anything that has to be done after destroying history.'''
1233 # XXX it might be nice if we could take the list of destroyed
1245 # XXX it might be nice if we could take the list of destroyed
1234 # nodes, but I don't see an easy way for rollback() to do that
1246 # nodes, but I don't see an easy way for rollback() to do that
1235
1247
1236 # Ensure the persistent tag cache is updated. Doing it now
1248 # Ensure the persistent tag cache is updated. Doing it now
1237 # means that the tag cache only has to worry about destroyed
1249 # means that the tag cache only has to worry about destroyed
1238 # heads immediately after a strip/rollback. That in turn
1250 # heads immediately after a strip/rollback. That in turn
1239 # guarantees that "cachetip == currenttip" (comparing both rev
1251 # guarantees that "cachetip == currenttip" (comparing both rev
1240 # and node) always means no nodes have been added or destroyed.
1252 # and node) always means no nodes have been added or destroyed.
1241
1253
1242 # XXX this is suboptimal when qrefresh'ing: we strip the current
1254 # XXX this is suboptimal when qrefresh'ing: we strip the current
1243 # head, refresh the tag cache, then immediately add a new head.
1255 # head, refresh the tag cache, then immediately add a new head.
1244 # But I think doing it this way is necessary for the "instant
1256 # But I think doing it this way is necessary for the "instant
1245 # tag cache retrieval" case to work.
1257 # tag cache retrieval" case to work.
1246 self.invalidatecaches()
1258 self.invalidatecaches()
1247
1259
1248 def walk(self, match, node=None):
1260 def walk(self, match, node=None):
1249 '''
1261 '''
1250 walk recursively through the directory tree or a given
1262 walk recursively through the directory tree or a given
1251 changeset, finding all files matched by the match
1263 changeset, finding all files matched by the match
1252 function
1264 function
1253 '''
1265 '''
1254 return self[node].walk(match)
1266 return self[node].walk(match)
1255
1267
1256 def status(self, node1='.', node2=None, match=None,
1268 def status(self, node1='.', node2=None, match=None,
1257 ignored=False, clean=False, unknown=False,
1269 ignored=False, clean=False, unknown=False,
1258 listsubrepos=False):
1270 listsubrepos=False):
1259 """return status of files between two nodes or node and working directory
1271 """return status of files between two nodes or node and working directory
1260
1272
1261 If node1 is None, use the first dirstate parent instead.
1273 If node1 is None, use the first dirstate parent instead.
1262 If node2 is None, compare node1 with working directory.
1274 If node2 is None, compare node1 with working directory.
1263 """
1275 """
1264
1276
1265 def mfmatches(ctx):
1277 def mfmatches(ctx):
1266 mf = ctx.manifest().copy()
1278 mf = ctx.manifest().copy()
1267 for fn in mf.keys():
1279 for fn in mf.keys():
1268 if not match(fn):
1280 if not match(fn):
1269 del mf[fn]
1281 del mf[fn]
1270 return mf
1282 return mf
1271
1283
1272 if isinstance(node1, context.changectx):
1284 if isinstance(node1, context.changectx):
1273 ctx1 = node1
1285 ctx1 = node1
1274 else:
1286 else:
1275 ctx1 = self[node1]
1287 ctx1 = self[node1]
1276 if isinstance(node2, context.changectx):
1288 if isinstance(node2, context.changectx):
1277 ctx2 = node2
1289 ctx2 = node2
1278 else:
1290 else:
1279 ctx2 = self[node2]
1291 ctx2 = self[node2]
1280
1292
1281 working = ctx2.rev() is None
1293 working = ctx2.rev() is None
1282 parentworking = working and ctx1 == self['.']
1294 parentworking = working and ctx1 == self['.']
1283 match = match or matchmod.always(self.root, self.getcwd())
1295 match = match or matchmod.always(self.root, self.getcwd())
1284 listignored, listclean, listunknown = ignored, clean, unknown
1296 listignored, listclean, listunknown = ignored, clean, unknown
1285
1297
1286 # load earliest manifest first for caching reasons
1298 # load earliest manifest first for caching reasons
1287 if not working and ctx2.rev() < ctx1.rev():
1299 if not working and ctx2.rev() < ctx1.rev():
1288 ctx2.manifest()
1300 ctx2.manifest()
1289
1301
1290 if not parentworking:
1302 if not parentworking:
1291 def bad(f, msg):
1303 def bad(f, msg):
1292 if f not in ctx1:
1304 if f not in ctx1:
1293 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1305 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1294 match.bad = bad
1306 match.bad = bad
1295
1307
1296 if working: # we need to scan the working dir
1308 if working: # we need to scan the working dir
1297 subrepos = []
1309 subrepos = []
1298 if '.hgsub' in self.dirstate:
1310 if '.hgsub' in self.dirstate:
1299 subrepos = ctx2.substate.keys()
1311 subrepos = ctx2.substate.keys()
1300 s = self.dirstate.status(match, subrepos, listignored,
1312 s = self.dirstate.status(match, subrepos, listignored,
1301 listclean, listunknown)
1313 listclean, listunknown)
1302 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1314 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1303
1315
1304 # check for any possibly clean files
1316 # check for any possibly clean files
1305 if parentworking and cmp:
1317 if parentworking and cmp:
1306 fixup = []
1318 fixup = []
1307 # do a full compare of any files that might have changed
1319 # do a full compare of any files that might have changed
1308 for f in sorted(cmp):
1320 for f in sorted(cmp):
1309 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1321 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1310 or ctx1[f].cmp(ctx2[f])):
1322 or ctx1[f].cmp(ctx2[f])):
1311 modified.append(f)
1323 modified.append(f)
1312 else:
1324 else:
1313 fixup.append(f)
1325 fixup.append(f)
1314
1326
1315 # update dirstate for files that are actually clean
1327 # update dirstate for files that are actually clean
1316 if fixup:
1328 if fixup:
1317 if listclean:
1329 if listclean:
1318 clean += fixup
1330 clean += fixup
1319
1331
1320 try:
1332 try:
1321 # updating the dirstate is optional
1333 # updating the dirstate is optional
1322 # so we don't wait on the lock
1334 # so we don't wait on the lock
1323 wlock = self.wlock(False)
1335 wlock = self.wlock(False)
1324 try:
1336 try:
1325 for f in fixup:
1337 for f in fixup:
1326 self.dirstate.normal(f)
1338 self.dirstate.normal(f)
1327 finally:
1339 finally:
1328 wlock.release()
1340 wlock.release()
1329 except error.LockError:
1341 except error.LockError:
1330 pass
1342 pass
1331
1343
1332 if not parentworking:
1344 if not parentworking:
1333 mf1 = mfmatches(ctx1)
1345 mf1 = mfmatches(ctx1)
1334 if working:
1346 if working:
1335 # we are comparing working dir against non-parent
1347 # we are comparing working dir against non-parent
1336 # generate a pseudo-manifest for the working dir
1348 # generate a pseudo-manifest for the working dir
1337 mf2 = mfmatches(self['.'])
1349 mf2 = mfmatches(self['.'])
1338 for f in cmp + modified + added:
1350 for f in cmp + modified + added:
1339 mf2[f] = None
1351 mf2[f] = None
1340 mf2.set(f, ctx2.flags(f))
1352 mf2.set(f, ctx2.flags(f))
1341 for f in removed:
1353 for f in removed:
1342 if f in mf2:
1354 if f in mf2:
1343 del mf2[f]
1355 del mf2[f]
1344 else:
1356 else:
1345 # we are comparing two revisions
1357 # we are comparing two revisions
1346 deleted, unknown, ignored = [], [], []
1358 deleted, unknown, ignored = [], [], []
1347 mf2 = mfmatches(ctx2)
1359 mf2 = mfmatches(ctx2)
1348
1360
1349 modified, added, clean = [], [], []
1361 modified, added, clean = [], [], []
1350 for fn in mf2:
1362 for fn in mf2:
1351 if fn in mf1:
1363 if fn in mf1:
1352 if (fn not in deleted and
1364 if (fn not in deleted and
1353 (mf1.flags(fn) != mf2.flags(fn) or
1365 (mf1.flags(fn) != mf2.flags(fn) or
1354 (mf1[fn] != mf2[fn] and
1366 (mf1[fn] != mf2[fn] and
1355 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1367 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1356 modified.append(fn)
1368 modified.append(fn)
1357 elif listclean:
1369 elif listclean:
1358 clean.append(fn)
1370 clean.append(fn)
1359 del mf1[fn]
1371 del mf1[fn]
1360 elif fn not in deleted:
1372 elif fn not in deleted:
1361 added.append(fn)
1373 added.append(fn)
1362 removed = mf1.keys()
1374 removed = mf1.keys()
1363
1375
1364 if working and modified and not self.dirstate._checklink:
1376 if working and modified and not self.dirstate._checklink:
1365 # Symlink placeholders may get non-symlink-like contents
1377 # Symlink placeholders may get non-symlink-like contents
1366 # via user error or dereferencing by NFS or Samba servers,
1378 # via user error or dereferencing by NFS or Samba servers,
1367 # so we filter out any placeholders that don't look like a
1379 # so we filter out any placeholders that don't look like a
1368 # symlink
1380 # symlink
1369 sane = []
1381 sane = []
1370 for f in modified:
1382 for f in modified:
1371 if ctx2.flags(f) == 'l':
1383 if ctx2.flags(f) == 'l':
1372 d = ctx2[f].data()
1384 d = ctx2[f].data()
1373 if len(d) >= 1024 or '\n' in d or util.binary(d):
1385 if len(d) >= 1024 or '\n' in d or util.binary(d):
1374 self.ui.debug('ignoring suspect symlink placeholder'
1386 self.ui.debug('ignoring suspect symlink placeholder'
1375 ' "%s"\n' % f)
1387 ' "%s"\n' % f)
1376 continue
1388 continue
1377 sane.append(f)
1389 sane.append(f)
1378 modified = sane
1390 modified = sane
1379
1391
1380 r = modified, added, removed, deleted, unknown, ignored, clean
1392 r = modified, added, removed, deleted, unknown, ignored, clean
1381
1393
1382 if listsubrepos:
1394 if listsubrepos:
1383 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1395 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1384 if working:
1396 if working:
1385 rev2 = None
1397 rev2 = None
1386 else:
1398 else:
1387 rev2 = ctx2.substate[subpath][1]
1399 rev2 = ctx2.substate[subpath][1]
1388 try:
1400 try:
1389 submatch = matchmod.narrowmatcher(subpath, match)
1401 submatch = matchmod.narrowmatcher(subpath, match)
1390 s = sub.status(rev2, match=submatch, ignored=listignored,
1402 s = sub.status(rev2, match=submatch, ignored=listignored,
1391 clean=listclean, unknown=listunknown,
1403 clean=listclean, unknown=listunknown,
1392 listsubrepos=True)
1404 listsubrepos=True)
1393 for rfiles, sfiles in zip(r, s):
1405 for rfiles, sfiles in zip(r, s):
1394 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1406 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1395 except error.LookupError:
1407 except error.LookupError:
1396 self.ui.status(_("skipping missing subrepository: %s\n")
1408 self.ui.status(_("skipping missing subrepository: %s\n")
1397 % subpath)
1409 % subpath)
1398
1410
1399 for l in r:
1411 for l in r:
1400 l.sort()
1412 l.sort()
1401 return r
1413 return r
1402
1414
1403 def heads(self, start=None):
1415 def heads(self, start=None):
1404 heads = self.changelog.heads(start)
1416 heads = self.changelog.heads(start)
1405 # sort the output in rev descending order
1417 # sort the output in rev descending order
1406 return sorted(heads, key=self.changelog.rev, reverse=True)
1418 return sorted(heads, key=self.changelog.rev, reverse=True)
1407
1419
1408 def branchheads(self, branch=None, start=None, closed=False):
1420 def branchheads(self, branch=None, start=None, closed=False):
1409 '''return a (possibly filtered) list of heads for the given branch
1421 '''return a (possibly filtered) list of heads for the given branch
1410
1422
1411 Heads are returned in topological order, from newest to oldest.
1423 Heads are returned in topological order, from newest to oldest.
1412 If branch is None, use the dirstate branch.
1424 If branch is None, use the dirstate branch.
1413 If start is not None, return only heads reachable from start.
1425 If start is not None, return only heads reachable from start.
1414 If closed is True, return heads that are marked as closed as well.
1426 If closed is True, return heads that are marked as closed as well.
1415 '''
1427 '''
1416 if branch is None:
1428 if branch is None:
1417 branch = self[None].branch()
1429 branch = self[None].branch()
1418 branches = self.branchmap()
1430 branches = self.branchmap()
1419 if branch not in branches:
1431 if branch not in branches:
1420 return []
1432 return []
1421 # the cache returns heads ordered lowest to highest
1433 # the cache returns heads ordered lowest to highest
1422 bheads = list(reversed(branches[branch]))
1434 bheads = list(reversed(branches[branch]))
1423 if start is not None:
1435 if start is not None:
1424 # filter out the heads that cannot be reached from startrev
1436 # filter out the heads that cannot be reached from startrev
1425 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1437 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1426 bheads = [h for h in bheads if h in fbheads]
1438 bheads = [h for h in bheads if h in fbheads]
1427 if not closed:
1439 if not closed:
1428 bheads = [h for h in bheads if
1440 bheads = [h for h in bheads if
1429 ('close' not in self.changelog.read(h)[5])]
1441 ('close' not in self.changelog.read(h)[5])]
1430 return bheads
1442 return bheads
1431
1443
1432 def branches(self, nodes):
1444 def branches(self, nodes):
1433 if not nodes:
1445 if not nodes:
1434 nodes = [self.changelog.tip()]
1446 nodes = [self.changelog.tip()]
1435 b = []
1447 b = []
1436 for n in nodes:
1448 for n in nodes:
1437 t = n
1449 t = n
1438 while True:
1450 while True:
1439 p = self.changelog.parents(n)
1451 p = self.changelog.parents(n)
1440 if p[1] != nullid or p[0] == nullid:
1452 if p[1] != nullid or p[0] == nullid:
1441 b.append((t, n, p[0], p[1]))
1453 b.append((t, n, p[0], p[1]))
1442 break
1454 break
1443 n = p[0]
1455 n = p[0]
1444 return b
1456 return b
1445
1457
1446 def between(self, pairs):
1458 def between(self, pairs):
1447 r = []
1459 r = []
1448
1460
1449 for top, bottom in pairs:
1461 for top, bottom in pairs:
1450 n, l, i = top, [], 0
1462 n, l, i = top, [], 0
1451 f = 1
1463 f = 1
1452
1464
1453 while n != bottom and n != nullid:
1465 while n != bottom and n != nullid:
1454 p = self.changelog.parents(n)[0]
1466 p = self.changelog.parents(n)[0]
1455 if i == f:
1467 if i == f:
1456 l.append(n)
1468 l.append(n)
1457 f = f * 2
1469 f = f * 2
1458 n = p
1470 n = p
1459 i += 1
1471 i += 1
1460
1472
1461 r.append(l)
1473 r.append(l)
1462
1474
1463 return r
1475 return r
1464
1476
1465 def pull(self, remote, heads=None, force=False):
1477 def pull(self, remote, heads=None, force=False):
1466 lock = self.lock()
1478 lock = self.lock()
1467 try:
1479 try:
1468 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1480 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1469 force=force)
1481 force=force)
1470 common, fetch, rheads = tmp
1482 common, fetch, rheads = tmp
1471 if not fetch:
1483 if not fetch:
1472 self.ui.status(_("no changes found\n"))
1484 self.ui.status(_("no changes found\n"))
1473 result = 0
1485 result = 0
1474 else:
1486 else:
1475 if heads is None and list(common) == [nullid]:
1487 if heads is None and list(common) == [nullid]:
1476 self.ui.status(_("requesting all changes\n"))
1488 self.ui.status(_("requesting all changes\n"))
1477 elif heads is None and remote.capable('changegroupsubset'):
1489 elif heads is None and remote.capable('changegroupsubset'):
1478 # issue1320, avoid a race if remote changed after discovery
1490 # issue1320, avoid a race if remote changed after discovery
1479 heads = rheads
1491 heads = rheads
1480
1492
1481 if remote.capable('getbundle'):
1493 if remote.capable('getbundle'):
1482 cg = remote.getbundle('pull', common=common,
1494 cg = remote.getbundle('pull', common=common,
1483 heads=heads or rheads)
1495 heads=heads or rheads)
1484 elif heads is None:
1496 elif heads is None:
1485 cg = remote.changegroup(fetch, 'pull')
1497 cg = remote.changegroup(fetch, 'pull')
1486 elif not remote.capable('changegroupsubset'):
1498 elif not remote.capable('changegroupsubset'):
1487 raise util.Abort(_("partial pull cannot be done because "
1499 raise util.Abort(_("partial pull cannot be done because "
1488 "other repository doesn't support "
1500 "other repository doesn't support "
1489 "changegroupsubset."))
1501 "changegroupsubset."))
1490 else:
1502 else:
1491 cg = remote.changegroupsubset(fetch, heads, 'pull')
1503 cg = remote.changegroupsubset(fetch, heads, 'pull')
1492 result = self.addchangegroup(cg, 'pull', remote.url(),
1504 result = self.addchangegroup(cg, 'pull', remote.url(),
1493 lock=lock)
1505 lock=lock)
1494 finally:
1506 finally:
1495 lock.release()
1507 lock.release()
1496
1508
1497 return result
1509 return result
1498
1510
1499 def checkpush(self, force, revs):
1511 def checkpush(self, force, revs):
1500 """Extensions can override this function if additional checks have
1512 """Extensions can override this function if additional checks have
1501 to be performed before pushing, or call it if they override push
1513 to be performed before pushing, or call it if they override push
1502 command.
1514 command.
1503 """
1515 """
1504 pass
1516 pass
1505
1517
1506 def push(self, remote, force=False, revs=None, newbranch=False):
1518 def push(self, remote, force=False, revs=None, newbranch=False):
1507 '''Push outgoing changesets (limited by revs) from the current
1519 '''Push outgoing changesets (limited by revs) from the current
1508 repository to remote. Return an integer:
1520 repository to remote. Return an integer:
1509 - 0 means HTTP error *or* nothing to push
1521 - 0 means HTTP error *or* nothing to push
1510 - 1 means we pushed and remote head count is unchanged *or*
1522 - 1 means we pushed and remote head count is unchanged *or*
1511 we have outgoing changesets but refused to push
1523 we have outgoing changesets but refused to push
1512 - other values as described by addchangegroup()
1524 - other values as described by addchangegroup()
1513 '''
1525 '''
1514 # there are two ways to push to remote repo:
1526 # there are two ways to push to remote repo:
1515 #
1527 #
1516 # addchangegroup assumes local user can lock remote
1528 # addchangegroup assumes local user can lock remote
1517 # repo (local filesystem, old ssh servers).
1529 # repo (local filesystem, old ssh servers).
1518 #
1530 #
1519 # unbundle assumes local user cannot lock remote repo (new ssh
1531 # unbundle assumes local user cannot lock remote repo (new ssh
1520 # servers, http servers).
1532 # servers, http servers).
1521
1533
1522 self.checkpush(force, revs)
1534 self.checkpush(force, revs)
1523 lock = None
1535 lock = None
1524 unbundle = remote.capable('unbundle')
1536 unbundle = remote.capable('unbundle')
1525 if not unbundle:
1537 if not unbundle:
1526 lock = remote.lock()
1538 lock = remote.lock()
1527 try:
1539 try:
1528 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1540 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1529 newbranch)
1541 newbranch)
1530 ret = remote_heads
1542 ret = remote_heads
1531 if cg is not None:
1543 if cg is not None:
1532 if unbundle:
1544 if unbundle:
1533 # local repo finds heads on server, finds out what
1545 # local repo finds heads on server, finds out what
1534 # revs it must push. once revs transferred, if server
1546 # revs it must push. once revs transferred, if server
1535 # finds it has different heads (someone else won
1547 # finds it has different heads (someone else won
1536 # commit/push race), server aborts.
1548 # commit/push race), server aborts.
1537 if force:
1549 if force:
1538 remote_heads = ['force']
1550 remote_heads = ['force']
1539 # ssh: return remote's addchangegroup()
1551 # ssh: return remote's addchangegroup()
1540 # http: return remote's addchangegroup() or 0 for error
1552 # http: return remote's addchangegroup() or 0 for error
1541 ret = remote.unbundle(cg, remote_heads, 'push')
1553 ret = remote.unbundle(cg, remote_heads, 'push')
1542 else:
1554 else:
1543 # we return an integer indicating remote head count change
1555 # we return an integer indicating remote head count change
1544 ret = remote.addchangegroup(cg, 'push', self.url(),
1556 ret = remote.addchangegroup(cg, 'push', self.url(),
1545 lock=lock)
1557 lock=lock)
1546 finally:
1558 finally:
1547 if lock is not None:
1559 if lock is not None:
1548 lock.release()
1560 lock.release()
1549
1561
1550 self.ui.debug("checking for updated bookmarks\n")
1562 self.ui.debug("checking for updated bookmarks\n")
1551 rb = remote.listkeys('bookmarks')
1563 rb = remote.listkeys('bookmarks')
1552 for k in rb.keys():
1564 for k in rb.keys():
1553 if k in self._bookmarks:
1565 if k in self._bookmarks:
1554 nr, nl = rb[k], hex(self._bookmarks[k])
1566 nr, nl = rb[k], hex(self._bookmarks[k])
1555 if nr in self:
1567 if nr in self:
1556 cr = self[nr]
1568 cr = self[nr]
1557 cl = self[nl]
1569 cl = self[nl]
1558 if cl in cr.descendants():
1570 if cl in cr.descendants():
1559 r = remote.pushkey('bookmarks', k, nr, nl)
1571 r = remote.pushkey('bookmarks', k, nr, nl)
1560 if r:
1572 if r:
1561 self.ui.status(_("updating bookmark %s\n") % k)
1573 self.ui.status(_("updating bookmark %s\n") % k)
1562 else:
1574 else:
1563 self.ui.warn(_('updating bookmark %s'
1575 self.ui.warn(_('updating bookmark %s'
1564 ' failed!\n') % k)
1576 ' failed!\n') % k)
1565
1577
1566 return ret
1578 return ret
1567
1579
1568 def changegroupinfo(self, nodes, source):
1580 def changegroupinfo(self, nodes, source):
1569 if self.ui.verbose or source == 'bundle':
1581 if self.ui.verbose or source == 'bundle':
1570 self.ui.status(_("%d changesets found\n") % len(nodes))
1582 self.ui.status(_("%d changesets found\n") % len(nodes))
1571 if self.ui.debugflag:
1583 if self.ui.debugflag:
1572 self.ui.debug("list of changesets:\n")
1584 self.ui.debug("list of changesets:\n")
1573 for node in nodes:
1585 for node in nodes:
1574 self.ui.debug("%s\n" % hex(node))
1586 self.ui.debug("%s\n" % hex(node))
1575
1587
1576 def changegroupsubset(self, bases, heads, source):
1588 def changegroupsubset(self, bases, heads, source):
1577 """Compute a changegroup consisting of all the nodes that are
1589 """Compute a changegroup consisting of all the nodes that are
1578 descendants of any of the bases and ancestors of any of the heads.
1590 descendants of any of the bases and ancestors of any of the heads.
1579 Return a chunkbuffer object whose read() method will return
1591 Return a chunkbuffer object whose read() method will return
1580 successive changegroup chunks.
1592 successive changegroup chunks.
1581
1593
1582 It is fairly complex as determining which filenodes and which
1594 It is fairly complex as determining which filenodes and which
1583 manifest nodes need to be included for the changeset to be complete
1595 manifest nodes need to be included for the changeset to be complete
1584 is non-trivial.
1596 is non-trivial.
1585
1597
1586 Another wrinkle is doing the reverse, figuring out which changeset in
1598 Another wrinkle is doing the reverse, figuring out which changeset in
1587 the changegroup a particular filenode or manifestnode belongs to.
1599 the changegroup a particular filenode or manifestnode belongs to.
1588 """
1600 """
1589 cl = self.changelog
1601 cl = self.changelog
1590 if not bases:
1602 if not bases:
1591 bases = [nullid]
1603 bases = [nullid]
1592 csets, bases, heads = cl.nodesbetween(bases, heads)
1604 csets, bases, heads = cl.nodesbetween(bases, heads)
1593 # We assume that all ancestors of bases are known
1605 # We assume that all ancestors of bases are known
1594 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1606 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1595 return self._changegroupsubset(common, csets, heads, source)
1607 return self._changegroupsubset(common, csets, heads, source)
1596
1608
1597 def getbundle(self, source, heads=None, common=None):
1609 def getbundle(self, source, heads=None, common=None):
1598 """Like changegroupsubset, but returns the set difference between the
1610 """Like changegroupsubset, but returns the set difference between the
1599 ancestors of heads and the ancestors common.
1611 ancestors of heads and the ancestors common.
1600
1612
1601 If heads is None, use the local heads. If common is None, use [nullid].
1613 If heads is None, use the local heads. If common is None, use [nullid].
1602
1614
1603 The nodes in common might not all be known locally due to the way the
1615 The nodes in common might not all be known locally due to the way the
1604 current discovery protocol works.
1616 current discovery protocol works.
1605 """
1617 """
1606 cl = self.changelog
1618 cl = self.changelog
1607 if common:
1619 if common:
1608 nm = cl.nodemap
1620 nm = cl.nodemap
1609 common = [n for n in common if n in nm]
1621 common = [n for n in common if n in nm]
1610 else:
1622 else:
1611 common = [nullid]
1623 common = [nullid]
1612 if not heads:
1624 if not heads:
1613 heads = cl.heads()
1625 heads = cl.heads()
1614 common, missing = cl.findcommonmissing(common, heads)
1626 common, missing = cl.findcommonmissing(common, heads)
1615 if not missing:
1627 if not missing:
1616 return None
1628 return None
1617 return self._changegroupsubset(common, missing, heads, source)
1629 return self._changegroupsubset(common, missing, heads, source)
1618
1630
1619 def _changegroupsubset(self, commonrevs, csets, heads, source):
1631 def _changegroupsubset(self, commonrevs, csets, heads, source):
1620
1632
1621 cl = self.changelog
1633 cl = self.changelog
1622 mf = self.manifest
1634 mf = self.manifest
1623 mfs = {} # needed manifests
1635 mfs = {} # needed manifests
1624 fnodes = {} # needed file nodes
1636 fnodes = {} # needed file nodes
1625 changedfiles = set()
1637 changedfiles = set()
1626 fstate = ['', {}]
1638 fstate = ['', {}]
1627 count = [0]
1639 count = [0]
1628
1640
1629 # can we go through the fast path ?
1641 # can we go through the fast path ?
1630 heads.sort()
1642 heads.sort()
1631 if heads == sorted(self.heads()):
1643 if heads == sorted(self.heads()):
1632 return self._changegroup(csets, source)
1644 return self._changegroup(csets, source)
1633
1645
1634 # slow path
1646 # slow path
1635 self.hook('preoutgoing', throw=True, source=source)
1647 self.hook('preoutgoing', throw=True, source=source)
1636 self.changegroupinfo(csets, source)
1648 self.changegroupinfo(csets, source)
1637
1649
1638 # filter any nodes that claim to be part of the known set
1650 # filter any nodes that claim to be part of the known set
1639 def prune(revlog, missing):
1651 def prune(revlog, missing):
1640 return [n for n in missing
1652 return [n for n in missing
1641 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1653 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1642
1654
1643 def lookup(revlog, x):
1655 def lookup(revlog, x):
1644 if revlog == cl:
1656 if revlog == cl:
1645 c = cl.read(x)
1657 c = cl.read(x)
1646 changedfiles.update(c[3])
1658 changedfiles.update(c[3])
1647 mfs.setdefault(c[0], x)
1659 mfs.setdefault(c[0], x)
1648 count[0] += 1
1660 count[0] += 1
1649 self.ui.progress(_('bundling'), count[0],
1661 self.ui.progress(_('bundling'), count[0],
1650 unit=_('changesets'), total=len(csets))
1662 unit=_('changesets'), total=len(csets))
1651 return x
1663 return x
1652 elif revlog == mf:
1664 elif revlog == mf:
1653 clnode = mfs[x]
1665 clnode = mfs[x]
1654 mdata = mf.readfast(x)
1666 mdata = mf.readfast(x)
1655 for f in changedfiles:
1667 for f in changedfiles:
1656 if f in mdata:
1668 if f in mdata:
1657 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1669 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1658 count[0] += 1
1670 count[0] += 1
1659 self.ui.progress(_('bundling'), count[0],
1671 self.ui.progress(_('bundling'), count[0],
1660 unit=_('manifests'), total=len(mfs))
1672 unit=_('manifests'), total=len(mfs))
1661 return mfs[x]
1673 return mfs[x]
1662 else:
1674 else:
1663 self.ui.progress(
1675 self.ui.progress(
1664 _('bundling'), count[0], item=fstate[0],
1676 _('bundling'), count[0], item=fstate[0],
1665 unit=_('files'), total=len(changedfiles))
1677 unit=_('files'), total=len(changedfiles))
1666 return fstate[1][x]
1678 return fstate[1][x]
1667
1679
1668 bundler = changegroup.bundle10(lookup)
1680 bundler = changegroup.bundle10(lookup)
1669 reorder = self.ui.config('bundle', 'reorder', 'auto')
1681 reorder = self.ui.config('bundle', 'reorder', 'auto')
1670 if reorder == 'auto':
1682 if reorder == 'auto':
1671 reorder = None
1683 reorder = None
1672 else:
1684 else:
1673 reorder = util.parsebool(reorder)
1685 reorder = util.parsebool(reorder)
1674
1686
1675 def gengroup():
1687 def gengroup():
1676 # Create a changenode group generator that will call our functions
1688 # Create a changenode group generator that will call our functions
1677 # back to lookup the owning changenode and collect information.
1689 # back to lookup the owning changenode and collect information.
1678 for chunk in cl.group(csets, bundler, reorder=reorder):
1690 for chunk in cl.group(csets, bundler, reorder=reorder):
1679 yield chunk
1691 yield chunk
1680 self.ui.progress(_('bundling'), None)
1692 self.ui.progress(_('bundling'), None)
1681
1693
1682 # Create a generator for the manifestnodes that calls our lookup
1694 # Create a generator for the manifestnodes that calls our lookup
1683 # and data collection functions back.
1695 # and data collection functions back.
1684 count[0] = 0
1696 count[0] = 0
1685 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1697 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1686 yield chunk
1698 yield chunk
1687 self.ui.progress(_('bundling'), None)
1699 self.ui.progress(_('bundling'), None)
1688
1700
1689 mfs.clear()
1701 mfs.clear()
1690
1702
1691 # Go through all our files in order sorted by name.
1703 # Go through all our files in order sorted by name.
1692 count[0] = 0
1704 count[0] = 0
1693 for fname in sorted(changedfiles):
1705 for fname in sorted(changedfiles):
1694 filerevlog = self.file(fname)
1706 filerevlog = self.file(fname)
1695 if not len(filerevlog):
1707 if not len(filerevlog):
1696 raise util.Abort(_("empty or missing revlog for %s") % fname)
1708 raise util.Abort(_("empty or missing revlog for %s") % fname)
1697 fstate[0] = fname
1709 fstate[0] = fname
1698 fstate[1] = fnodes.pop(fname, {})
1710 fstate[1] = fnodes.pop(fname, {})
1699
1711
1700 nodelist = prune(filerevlog, fstate[1])
1712 nodelist = prune(filerevlog, fstate[1])
1701 if nodelist:
1713 if nodelist:
1702 count[0] += 1
1714 count[0] += 1
1703 yield bundler.fileheader(fname)
1715 yield bundler.fileheader(fname)
1704 for chunk in filerevlog.group(nodelist, bundler, reorder):
1716 for chunk in filerevlog.group(nodelist, bundler, reorder):
1705 yield chunk
1717 yield chunk
1706
1718
1707 # Signal that no more groups are left.
1719 # Signal that no more groups are left.
1708 yield bundler.close()
1720 yield bundler.close()
1709 self.ui.progress(_('bundling'), None)
1721 self.ui.progress(_('bundling'), None)
1710
1722
1711 if csets:
1723 if csets:
1712 self.hook('outgoing', node=hex(csets[0]), source=source)
1724 self.hook('outgoing', node=hex(csets[0]), source=source)
1713
1725
1714 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1726 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1715
1727
1716 def changegroup(self, basenodes, source):
1728 def changegroup(self, basenodes, source):
1717 # to avoid a race we use changegroupsubset() (issue1320)
1729 # to avoid a race we use changegroupsubset() (issue1320)
1718 return self.changegroupsubset(basenodes, self.heads(), source)
1730 return self.changegroupsubset(basenodes, self.heads(), source)
1719
1731
1720 def _changegroup(self, nodes, source):
1732 def _changegroup(self, nodes, source):
1721 """Compute the changegroup of all nodes that we have that a recipient
1733 """Compute the changegroup of all nodes that we have that a recipient
1722 doesn't. Return a chunkbuffer object whose read() method will return
1734 doesn't. Return a chunkbuffer object whose read() method will return
1723 successive changegroup chunks.
1735 successive changegroup chunks.
1724
1736
1725 This is much easier than the previous function as we can assume that
1737 This is much easier than the previous function as we can assume that
1726 the recipient has any changenode we aren't sending them.
1738 the recipient has any changenode we aren't sending them.
1727
1739
1728 nodes is the set of nodes to send"""
1740 nodes is the set of nodes to send"""
1729
1741
1730 cl = self.changelog
1742 cl = self.changelog
1731 mf = self.manifest
1743 mf = self.manifest
1732 mfs = {}
1744 mfs = {}
1733 changedfiles = set()
1745 changedfiles = set()
1734 fstate = ['']
1746 fstate = ['']
1735 count = [0]
1747 count = [0]
1736
1748
1737 self.hook('preoutgoing', throw=True, source=source)
1749 self.hook('preoutgoing', throw=True, source=source)
1738 self.changegroupinfo(nodes, source)
1750 self.changegroupinfo(nodes, source)
1739
1751
1740 revset = set([cl.rev(n) for n in nodes])
1752 revset = set([cl.rev(n) for n in nodes])
1741
1753
1742 def gennodelst(log):
1754 def gennodelst(log):
1743 return [log.node(r) for r in log if log.linkrev(r) in revset]
1755 return [log.node(r) for r in log if log.linkrev(r) in revset]
1744
1756
1745 def lookup(revlog, x):
1757 def lookup(revlog, x):
1746 if revlog == cl:
1758 if revlog == cl:
1747 c = cl.read(x)
1759 c = cl.read(x)
1748 changedfiles.update(c[3])
1760 changedfiles.update(c[3])
1749 mfs.setdefault(c[0], x)
1761 mfs.setdefault(c[0], x)
1750 count[0] += 1
1762 count[0] += 1
1751 self.ui.progress(_('bundling'), count[0],
1763 self.ui.progress(_('bundling'), count[0],
1752 unit=_('changesets'), total=len(nodes))
1764 unit=_('changesets'), total=len(nodes))
1753 return x
1765 return x
1754 elif revlog == mf:
1766 elif revlog == mf:
1755 count[0] += 1
1767 count[0] += 1
1756 self.ui.progress(_('bundling'), count[0],
1768 self.ui.progress(_('bundling'), count[0],
1757 unit=_('manifests'), total=len(mfs))
1769 unit=_('manifests'), total=len(mfs))
1758 return cl.node(revlog.linkrev(revlog.rev(x)))
1770 return cl.node(revlog.linkrev(revlog.rev(x)))
1759 else:
1771 else:
1760 self.ui.progress(
1772 self.ui.progress(
1761 _('bundling'), count[0], item=fstate[0],
1773 _('bundling'), count[0], item=fstate[0],
1762 total=len(changedfiles), unit=_('files'))
1774 total=len(changedfiles), unit=_('files'))
1763 return cl.node(revlog.linkrev(revlog.rev(x)))
1775 return cl.node(revlog.linkrev(revlog.rev(x)))
1764
1776
1765 bundler = changegroup.bundle10(lookup)
1777 bundler = changegroup.bundle10(lookup)
1766 reorder = self.ui.config('bundle', 'reorder', 'auto')
1778 reorder = self.ui.config('bundle', 'reorder', 'auto')
1767 if reorder == 'auto':
1779 if reorder == 'auto':
1768 reorder = None
1780 reorder = None
1769 else:
1781 else:
1770 reorder = util.parsebool(reorder)
1782 reorder = util.parsebool(reorder)
1771
1783
1772 def gengroup():
1784 def gengroup():
1773 '''yield a sequence of changegroup chunks (strings)'''
1785 '''yield a sequence of changegroup chunks (strings)'''
1774 # construct a list of all changed files
1786 # construct a list of all changed files
1775
1787
1776 for chunk in cl.group(nodes, bundler, reorder=reorder):
1788 for chunk in cl.group(nodes, bundler, reorder=reorder):
1777 yield chunk
1789 yield chunk
1778 self.ui.progress(_('bundling'), None)
1790 self.ui.progress(_('bundling'), None)
1779
1791
1780 count[0] = 0
1792 count[0] = 0
1781 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1793 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1782 yield chunk
1794 yield chunk
1783 self.ui.progress(_('bundling'), None)
1795 self.ui.progress(_('bundling'), None)
1784
1796
1785 count[0] = 0
1797 count[0] = 0
1786 for fname in sorted(changedfiles):
1798 for fname in sorted(changedfiles):
1787 filerevlog = self.file(fname)
1799 filerevlog = self.file(fname)
1788 if not len(filerevlog):
1800 if not len(filerevlog):
1789 raise util.Abort(_("empty or missing revlog for %s") % fname)
1801 raise util.Abort(_("empty or missing revlog for %s") % fname)
1790 fstate[0] = fname
1802 fstate[0] = fname
1791 nodelist = gennodelst(filerevlog)
1803 nodelist = gennodelst(filerevlog)
1792 if nodelist:
1804 if nodelist:
1793 count[0] += 1
1805 count[0] += 1
1794 yield bundler.fileheader(fname)
1806 yield bundler.fileheader(fname)
1795 for chunk in filerevlog.group(nodelist, bundler, reorder):
1807 for chunk in filerevlog.group(nodelist, bundler, reorder):
1796 yield chunk
1808 yield chunk
1797 yield bundler.close()
1809 yield bundler.close()
1798 self.ui.progress(_('bundling'), None)
1810 self.ui.progress(_('bundling'), None)
1799
1811
1800 if nodes:
1812 if nodes:
1801 self.hook('outgoing', node=hex(nodes[0]), source=source)
1813 self.hook('outgoing', node=hex(nodes[0]), source=source)
1802
1814
1803 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1815 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1804
1816
1805 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1817 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1806 """Add the changegroup returned by source.read() to this repo.
1818 """Add the changegroup returned by source.read() to this repo.
1807 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1819 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1808 the URL of the repo where this changegroup is coming from.
1820 the URL of the repo where this changegroup is coming from.
1809 If lock is not None, the function takes ownership of the lock
1821 If lock is not None, the function takes ownership of the lock
1810 and releases it after the changegroup is added.
1822 and releases it after the changegroup is added.
1811
1823
1812 Return an integer summarizing the change to this repo:
1824 Return an integer summarizing the change to this repo:
1813 - nothing changed or no source: 0
1825 - nothing changed or no source: 0
1814 - more heads than before: 1+added heads (2..n)
1826 - more heads than before: 1+added heads (2..n)
1815 - fewer heads than before: -1-removed heads (-2..-n)
1827 - fewer heads than before: -1-removed heads (-2..-n)
1816 - number of heads stays the same: 1
1828 - number of heads stays the same: 1
1817 """
1829 """
1818 def csmap(x):
1830 def csmap(x):
1819 self.ui.debug("add changeset %s\n" % short(x))
1831 self.ui.debug("add changeset %s\n" % short(x))
1820 return len(cl)
1832 return len(cl)
1821
1833
1822 def revmap(x):
1834 def revmap(x):
1823 return cl.rev(x)
1835 return cl.rev(x)
1824
1836
1825 if not source:
1837 if not source:
1826 return 0
1838 return 0
1827
1839
1828 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1840 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1829
1841
1830 changesets = files = revisions = 0
1842 changesets = files = revisions = 0
1831 efiles = set()
1843 efiles = set()
1832
1844
1833 # write changelog data to temp files so concurrent readers will not see
1845 # write changelog data to temp files so concurrent readers will not see
1834 # inconsistent view
1846 # inconsistent view
1835 cl = self.changelog
1847 cl = self.changelog
1836 cl.delayupdate()
1848 cl.delayupdate()
1837 oldheads = cl.heads()
1849 oldheads = cl.heads()
1838
1850
1839 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1851 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1840 try:
1852 try:
1841 trp = weakref.proxy(tr)
1853 trp = weakref.proxy(tr)
1842 # pull off the changeset group
1854 # pull off the changeset group
1843 self.ui.status(_("adding changesets\n"))
1855 self.ui.status(_("adding changesets\n"))
1844 clstart = len(cl)
1856 clstart = len(cl)
1845 class prog(object):
1857 class prog(object):
1846 step = _('changesets')
1858 step = _('changesets')
1847 count = 1
1859 count = 1
1848 ui = self.ui
1860 ui = self.ui
1849 total = None
1861 total = None
1850 def __call__(self):
1862 def __call__(self):
1851 self.ui.progress(self.step, self.count, unit=_('chunks'),
1863 self.ui.progress(self.step, self.count, unit=_('chunks'),
1852 total=self.total)
1864 total=self.total)
1853 self.count += 1
1865 self.count += 1
1854 pr = prog()
1866 pr = prog()
1855 source.callback = pr
1867 source.callback = pr
1856
1868
1857 source.changelogheader()
1869 source.changelogheader()
1858 if (cl.addgroup(source, csmap, trp) is None
1870 if (cl.addgroup(source, csmap, trp) is None
1859 and not emptyok):
1871 and not emptyok):
1860 raise util.Abort(_("received changelog group is empty"))
1872 raise util.Abort(_("received changelog group is empty"))
1861 clend = len(cl)
1873 clend = len(cl)
1862 changesets = clend - clstart
1874 changesets = clend - clstart
1863 for c in xrange(clstart, clend):
1875 for c in xrange(clstart, clend):
1864 efiles.update(self[c].files())
1876 efiles.update(self[c].files())
1865 efiles = len(efiles)
1877 efiles = len(efiles)
1866 self.ui.progress(_('changesets'), None)
1878 self.ui.progress(_('changesets'), None)
1867
1879
1868 # pull off the manifest group
1880 # pull off the manifest group
1869 self.ui.status(_("adding manifests\n"))
1881 self.ui.status(_("adding manifests\n"))
1870 pr.step = _('manifests')
1882 pr.step = _('manifests')
1871 pr.count = 1
1883 pr.count = 1
1872 pr.total = changesets # manifests <= changesets
1884 pr.total = changesets # manifests <= changesets
1873 # no need to check for empty manifest group here:
1885 # no need to check for empty manifest group here:
1874 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1886 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1875 # no new manifest will be created and the manifest group will
1887 # no new manifest will be created and the manifest group will
1876 # be empty during the pull
1888 # be empty during the pull
1877 source.manifestheader()
1889 source.manifestheader()
1878 self.manifest.addgroup(source, revmap, trp)
1890 self.manifest.addgroup(source, revmap, trp)
1879 self.ui.progress(_('manifests'), None)
1891 self.ui.progress(_('manifests'), None)
1880
1892
1881 needfiles = {}
1893 needfiles = {}
1882 if self.ui.configbool('server', 'validate', default=False):
1894 if self.ui.configbool('server', 'validate', default=False):
1883 # validate incoming csets have their manifests
1895 # validate incoming csets have their manifests
1884 for cset in xrange(clstart, clend):
1896 for cset in xrange(clstart, clend):
1885 mfest = self.changelog.read(self.changelog.node(cset))[0]
1897 mfest = self.changelog.read(self.changelog.node(cset))[0]
1886 mfest = self.manifest.readdelta(mfest)
1898 mfest = self.manifest.readdelta(mfest)
1887 # store file nodes we must see
1899 # store file nodes we must see
1888 for f, n in mfest.iteritems():
1900 for f, n in mfest.iteritems():
1889 needfiles.setdefault(f, set()).add(n)
1901 needfiles.setdefault(f, set()).add(n)
1890
1902
1891 # process the files
1903 # process the files
1892 self.ui.status(_("adding file changes\n"))
1904 self.ui.status(_("adding file changes\n"))
1893 pr.step = _('files')
1905 pr.step = _('files')
1894 pr.count = 1
1906 pr.count = 1
1895 pr.total = efiles
1907 pr.total = efiles
1896 source.callback = None
1908 source.callback = None
1897
1909
1898 while True:
1910 while True:
1899 chunkdata = source.filelogheader()
1911 chunkdata = source.filelogheader()
1900 if not chunkdata:
1912 if not chunkdata:
1901 break
1913 break
1902 f = chunkdata["filename"]
1914 f = chunkdata["filename"]
1903 self.ui.debug("adding %s revisions\n" % f)
1915 self.ui.debug("adding %s revisions\n" % f)
1904 pr()
1916 pr()
1905 fl = self.file(f)
1917 fl = self.file(f)
1906 o = len(fl)
1918 o = len(fl)
1907 if fl.addgroup(source, revmap, trp) is None:
1919 if fl.addgroup(source, revmap, trp) is None:
1908 raise util.Abort(_("received file revlog group is empty"))
1920 raise util.Abort(_("received file revlog group is empty"))
1909 revisions += len(fl) - o
1921 revisions += len(fl) - o
1910 files += 1
1922 files += 1
1911 if f in needfiles:
1923 if f in needfiles:
1912 needs = needfiles[f]
1924 needs = needfiles[f]
1913 for new in xrange(o, len(fl)):
1925 for new in xrange(o, len(fl)):
1914 n = fl.node(new)
1926 n = fl.node(new)
1915 if n in needs:
1927 if n in needs:
1916 needs.remove(n)
1928 needs.remove(n)
1917 if not needs:
1929 if not needs:
1918 del needfiles[f]
1930 del needfiles[f]
1919 self.ui.progress(_('files'), None)
1931 self.ui.progress(_('files'), None)
1920
1932
1921 for f, needs in needfiles.iteritems():
1933 for f, needs in needfiles.iteritems():
1922 fl = self.file(f)
1934 fl = self.file(f)
1923 for n in needs:
1935 for n in needs:
1924 try:
1936 try:
1925 fl.rev(n)
1937 fl.rev(n)
1926 except error.LookupError:
1938 except error.LookupError:
1927 raise util.Abort(
1939 raise util.Abort(
1928 _('missing file data for %s:%s - run hg verify') %
1940 _('missing file data for %s:%s - run hg verify') %
1929 (f, hex(n)))
1941 (f, hex(n)))
1930
1942
1931 dh = 0
1943 dh = 0
1932 if oldheads:
1944 if oldheads:
1933 heads = cl.heads()
1945 heads = cl.heads()
1934 dh = len(heads) - len(oldheads)
1946 dh = len(heads) - len(oldheads)
1935 for h in heads:
1947 for h in heads:
1936 if h not in oldheads and 'close' in self[h].extra():
1948 if h not in oldheads and 'close' in self[h].extra():
1937 dh -= 1
1949 dh -= 1
1938 htext = ""
1950 htext = ""
1939 if dh:
1951 if dh:
1940 htext = _(" (%+d heads)") % dh
1952 htext = _(" (%+d heads)") % dh
1941
1953
1942 self.ui.status(_("added %d changesets"
1954 self.ui.status(_("added %d changesets"
1943 " with %d changes to %d files%s\n")
1955 " with %d changes to %d files%s\n")
1944 % (changesets, revisions, files, htext))
1956 % (changesets, revisions, files, htext))
1945
1957
1946 if changesets > 0:
1958 if changesets > 0:
1947 p = lambda: cl.writepending() and self.root or ""
1959 p = lambda: cl.writepending() and self.root or ""
1948 self.hook('pretxnchangegroup', throw=True,
1960 self.hook('pretxnchangegroup', throw=True,
1949 node=hex(cl.node(clstart)), source=srctype,
1961 node=hex(cl.node(clstart)), source=srctype,
1950 url=url, pending=p)
1962 url=url, pending=p)
1951
1963
1952 # make changelog see real files again
1964 # make changelog see real files again
1953 cl.finalize(trp)
1965 cl.finalize(trp)
1954
1966
1955 tr.close()
1967 tr.close()
1956 finally:
1968 finally:
1957 tr.release()
1969 tr.release()
1958 if lock:
1970 if lock:
1959 lock.release()
1971 lock.release()
1960
1972
1961 if changesets > 0:
1973 if changesets > 0:
1962 # forcefully update the on-disk branch cache
1974 # forcefully update the on-disk branch cache
1963 self.ui.debug("updating the branch cache\n")
1975 self.ui.debug("updating the branch cache\n")
1964 self.updatebranchcache()
1976 self.updatebranchcache()
1965 self.hook("changegroup", node=hex(cl.node(clstart)),
1977 self.hook("changegroup", node=hex(cl.node(clstart)),
1966 source=srctype, url=url)
1978 source=srctype, url=url)
1967
1979
1968 for i in xrange(clstart, clend):
1980 for i in xrange(clstart, clend):
1969 self.hook("incoming", node=hex(cl.node(i)),
1981 self.hook("incoming", node=hex(cl.node(i)),
1970 source=srctype, url=url)
1982 source=srctype, url=url)
1971
1983
1972 # never return 0 here:
1984 # never return 0 here:
1973 if dh < 0:
1985 if dh < 0:
1974 return dh - 1
1986 return dh - 1
1975 else:
1987 else:
1976 return dh + 1
1988 return dh + 1
1977
1989
1978 def stream_in(self, remote, requirements):
1990 def stream_in(self, remote, requirements):
1979 lock = self.lock()
1991 lock = self.lock()
1980 try:
1992 try:
1981 fp = remote.stream_out()
1993 fp = remote.stream_out()
1982 l = fp.readline()
1994 l = fp.readline()
1983 try:
1995 try:
1984 resp = int(l)
1996 resp = int(l)
1985 except ValueError:
1997 except ValueError:
1986 raise error.ResponseError(
1998 raise error.ResponseError(
1987 _('Unexpected response from remote server:'), l)
1999 _('Unexpected response from remote server:'), l)
1988 if resp == 1:
2000 if resp == 1:
1989 raise util.Abort(_('operation forbidden by server'))
2001 raise util.Abort(_('operation forbidden by server'))
1990 elif resp == 2:
2002 elif resp == 2:
1991 raise util.Abort(_('locking the remote repository failed'))
2003 raise util.Abort(_('locking the remote repository failed'))
1992 elif resp != 0:
2004 elif resp != 0:
1993 raise util.Abort(_('the server sent an unknown error code'))
2005 raise util.Abort(_('the server sent an unknown error code'))
1994 self.ui.status(_('streaming all changes\n'))
2006 self.ui.status(_('streaming all changes\n'))
1995 l = fp.readline()
2007 l = fp.readline()
1996 try:
2008 try:
1997 total_files, total_bytes = map(int, l.split(' ', 1))
2009 total_files, total_bytes = map(int, l.split(' ', 1))
1998 except (ValueError, TypeError):
2010 except (ValueError, TypeError):
1999 raise error.ResponseError(
2011 raise error.ResponseError(
2000 _('Unexpected response from remote server:'), l)
2012 _('Unexpected response from remote server:'), l)
2001 self.ui.status(_('%d files to transfer, %s of data\n') %
2013 self.ui.status(_('%d files to transfer, %s of data\n') %
2002 (total_files, util.bytecount(total_bytes)))
2014 (total_files, util.bytecount(total_bytes)))
2003 start = time.time()
2015 start = time.time()
2004 for i in xrange(total_files):
2016 for i in xrange(total_files):
2005 # XXX doesn't support '\n' or '\r' in filenames
2017 # XXX doesn't support '\n' or '\r' in filenames
2006 l = fp.readline()
2018 l = fp.readline()
2007 try:
2019 try:
2008 name, size = l.split('\0', 1)
2020 name, size = l.split('\0', 1)
2009 size = int(size)
2021 size = int(size)
2010 except (ValueError, TypeError):
2022 except (ValueError, TypeError):
2011 raise error.ResponseError(
2023 raise error.ResponseError(
2012 _('Unexpected response from remote server:'), l)
2024 _('Unexpected response from remote server:'), l)
2013 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2025 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2014 # for backwards compat, name was partially encoded
2026 # for backwards compat, name was partially encoded
2015 ofp = self.sopener(store.decodedir(name), 'w')
2027 ofp = self.sopener(store.decodedir(name), 'w')
2016 for chunk in util.filechunkiter(fp, limit=size):
2028 for chunk in util.filechunkiter(fp, limit=size):
2017 ofp.write(chunk)
2029 ofp.write(chunk)
2018 ofp.close()
2030 ofp.close()
2019 elapsed = time.time() - start
2031 elapsed = time.time() - start
2020 if elapsed <= 0:
2032 if elapsed <= 0:
2021 elapsed = 0.001
2033 elapsed = 0.001
2022 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2034 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2023 (util.bytecount(total_bytes), elapsed,
2035 (util.bytecount(total_bytes), elapsed,
2024 util.bytecount(total_bytes / elapsed)))
2036 util.bytecount(total_bytes / elapsed)))
2025
2037
2026 # new requirements = old non-format requirements + new format-related
2038 # new requirements = old non-format requirements + new format-related
2027 # requirements from the streamed-in repository
2039 # requirements from the streamed-in repository
2028 requirements.update(set(self.requirements) - self.supportedformats)
2040 requirements.update(set(self.requirements) - self.supportedformats)
2029 self._applyrequirements(requirements)
2041 self._applyrequirements(requirements)
2030 self._writerequirements()
2042 self._writerequirements()
2031
2043
2032 self.invalidate()
2044 self.invalidate()
2033 return len(self.heads()) + 1
2045 return len(self.heads()) + 1
2034 finally:
2046 finally:
2035 lock.release()
2047 lock.release()
2036
2048
2037 def clone(self, remote, heads=[], stream=False):
2049 def clone(self, remote, heads=[], stream=False):
2038 '''clone remote repository.
2050 '''clone remote repository.
2039
2051
2040 keyword arguments:
2052 keyword arguments:
2041 heads: list of revs to clone (forces use of pull)
2053 heads: list of revs to clone (forces use of pull)
2042 stream: use streaming clone if possible'''
2054 stream: use streaming clone if possible'''
2043
2055
2044 # now, all clients that can request uncompressed clones can
2056 # now, all clients that can request uncompressed clones can
2045 # read repo formats supported by all servers that can serve
2057 # read repo formats supported by all servers that can serve
2046 # them.
2058 # them.
2047
2059
2048 # if revlog format changes, client will have to check version
2060 # if revlog format changes, client will have to check version
2049 # and format flags on "stream" capability, and use
2061 # and format flags on "stream" capability, and use
2050 # uncompressed only if compatible.
2062 # uncompressed only if compatible.
2051
2063
2052 if stream and not heads:
2064 if stream and not heads:
2053 # 'stream' means remote revlog format is revlogv1 only
2065 # 'stream' means remote revlog format is revlogv1 only
2054 if remote.capable('stream'):
2066 if remote.capable('stream'):
2055 return self.stream_in(remote, set(('revlogv1',)))
2067 return self.stream_in(remote, set(('revlogv1',)))
2056 # otherwise, 'streamreqs' contains the remote revlog format
2068 # otherwise, 'streamreqs' contains the remote revlog format
2057 streamreqs = remote.capable('streamreqs')
2069 streamreqs = remote.capable('streamreqs')
2058 if streamreqs:
2070 if streamreqs:
2059 streamreqs = set(streamreqs.split(','))
2071 streamreqs = set(streamreqs.split(','))
2060 # if we support it, stream in and adjust our requirements
2072 # if we support it, stream in and adjust our requirements
2061 if not streamreqs - self.supportedformats:
2073 if not streamreqs - self.supportedformats:
2062 return self.stream_in(remote, streamreqs)
2074 return self.stream_in(remote, streamreqs)
2063 return self.pull(remote, heads)
2075 return self.pull(remote, heads)
2064
2076
2065 def pushkey(self, namespace, key, old, new):
2077 def pushkey(self, namespace, key, old, new):
2066 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2078 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2067 old=old, new=new)
2079 old=old, new=new)
2068 ret = pushkey.push(self, namespace, key, old, new)
2080 ret = pushkey.push(self, namespace, key, old, new)
2069 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2081 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2070 ret=ret)
2082 ret=ret)
2071 return ret
2083 return ret
2072
2084
2073 def listkeys(self, namespace):
2085 def listkeys(self, namespace):
2074 self.hook('prelistkeys', throw=True, namespace=namespace)
2086 self.hook('prelistkeys', throw=True, namespace=namespace)
2075 values = pushkey.list(self, namespace)
2087 values = pushkey.list(self, namespace)
2076 self.hook('listkeys', namespace=namespace, values=values)
2088 self.hook('listkeys', namespace=namespace, values=values)
2077 return values
2089 return values
2078
2090
2079 def debugwireargs(self, one, two, three=None, four=None, five=None):
2091 def debugwireargs(self, one, two, three=None, four=None, five=None):
2080 '''used to test argument passing over the wire'''
2092 '''used to test argument passing over the wire'''
2081 return "%s %s %s %s %s" % (one, two, three, four, five)
2093 return "%s %s %s %s %s" % (one, two, three, four, five)
2082
2094
2083 def savecommitmessage(self, text):
2095 def savecommitmessage(self, text):
2084 fp = self.opener('last-message.txt', 'wb')
2096 fp = self.opener('last-message.txt', 'wb')
2085 try:
2097 try:
2086 fp.write(text)
2098 fp.write(text)
2087 finally:
2099 finally:
2088 fp.close()
2100 fp.close()
2089 return self.pathto(fp.name[len(self.root)+1:])
2101 return self.pathto(fp.name[len(self.root)+1:])
2090
2102
2091 # used to avoid circular references so destructors work
2103 # used to avoid circular references so destructors work
2092 def aftertrans(files):
2104 def aftertrans(files):
2093 renamefiles = [tuple(t) for t in files]
2105 renamefiles = [tuple(t) for t in files]
2094 def a():
2106 def a():
2095 for src, dest in renamefiles:
2107 for src, dest in renamefiles:
2096 util.rename(src, dest)
2108 util.rename(src, dest)
2097 return a
2109 return a
2098
2110
2099 def undoname(fn):
2111 def undoname(fn):
2100 base, name = os.path.split(fn)
2112 base, name = os.path.split(fn)
2101 assert name.startswith('journal')
2113 assert name.startswith('journal')
2102 return os.path.join(base, name.replace('journal', 'undo', 1))
2114 return os.path.join(base, name.replace('journal', 'undo', 1))
2103
2115
2104 def instance(ui, path, create):
2116 def instance(ui, path, create):
2105 return localrepository(ui, util.urllocalpath(path), create)
2117 return localrepository(ui, util.urllocalpath(path), create)
2106
2118
2107 def islocal(path):
2119 def islocal(path):
2108 return True
2120 return True
General Comments 0
You need to be logged in to leave comments. Login now