##// END OF EJS Templates
phases: add rollback support
Pierre-Yves David -
r15455:c6f87bda default
parent child Browse files
Show More
@@ -1,2124 +1,2133 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 'known', 'getbundle'))
24 'known', 'getbundle'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
26 supported = supportedformats | set(('store', 'fncache', 'shared',
26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 'dotencode'))
27 'dotencode'))
28
28
29 def __init__(self, baseui, path=None, create=False):
29 def __init__(self, baseui, path=None, create=False):
30 repo.repository.__init__(self)
30 repo.repository.__init__(self)
31 self.root = os.path.realpath(util.expandpath(path))
31 self.root = os.path.realpath(util.expandpath(path))
32 self.path = os.path.join(self.root, ".hg")
32 self.path = os.path.join(self.root, ".hg")
33 self.origroot = path
33 self.origroot = path
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 self.opener = scmutil.opener(self.path)
35 self.opener = scmutil.opener(self.path)
36 self.wopener = scmutil.opener(self.root)
36 self.wopener = scmutil.opener(self.root)
37 self.baseui = baseui
37 self.baseui = baseui
38 self.ui = baseui.copy()
38 self.ui = baseui.copy()
39 self._dirtyphases = False
39 self._dirtyphases = False
40
40
41 try:
41 try:
42 self.ui.readconfig(self.join("hgrc"), self.root)
42 self.ui.readconfig(self.join("hgrc"), self.root)
43 extensions.loadall(self.ui)
43 extensions.loadall(self.ui)
44 except IOError:
44 except IOError:
45 pass
45 pass
46
46
47 if not os.path.isdir(self.path):
47 if not os.path.isdir(self.path):
48 if create:
48 if create:
49 if not os.path.exists(path):
49 if not os.path.exists(path):
50 util.makedirs(path)
50 util.makedirs(path)
51 util.makedir(self.path, notindexed=True)
51 util.makedir(self.path, notindexed=True)
52 requirements = ["revlogv1"]
52 requirements = ["revlogv1"]
53 if self.ui.configbool('format', 'usestore', True):
53 if self.ui.configbool('format', 'usestore', True):
54 os.mkdir(os.path.join(self.path, "store"))
54 os.mkdir(os.path.join(self.path, "store"))
55 requirements.append("store")
55 requirements.append("store")
56 if self.ui.configbool('format', 'usefncache', True):
56 if self.ui.configbool('format', 'usefncache', True):
57 requirements.append("fncache")
57 requirements.append("fncache")
58 if self.ui.configbool('format', 'dotencode', True):
58 if self.ui.configbool('format', 'dotencode', True):
59 requirements.append('dotencode')
59 requirements.append('dotencode')
60 # create an invalid changelog
60 # create an invalid changelog
61 self.opener.append(
61 self.opener.append(
62 "00changelog.i",
62 "00changelog.i",
63 '\0\0\0\2' # represents revlogv2
63 '\0\0\0\2' # represents revlogv2
64 ' dummy changelog to prevent using the old repo layout'
64 ' dummy changelog to prevent using the old repo layout'
65 )
65 )
66 if self.ui.configbool('format', 'generaldelta', False):
66 if self.ui.configbool('format', 'generaldelta', False):
67 requirements.append("generaldelta")
67 requirements.append("generaldelta")
68 requirements = set(requirements)
68 requirements = set(requirements)
69 else:
69 else:
70 raise error.RepoError(_("repository %s not found") % path)
70 raise error.RepoError(_("repository %s not found") % path)
71 elif create:
71 elif create:
72 raise error.RepoError(_("repository %s already exists") % path)
72 raise error.RepoError(_("repository %s already exists") % path)
73 else:
73 else:
74 try:
74 try:
75 requirements = scmutil.readrequires(self.opener, self.supported)
75 requirements = scmutil.readrequires(self.opener, self.supported)
76 except IOError, inst:
76 except IOError, inst:
77 if inst.errno != errno.ENOENT:
77 if inst.errno != errno.ENOENT:
78 raise
78 raise
79 requirements = set()
79 requirements = set()
80
80
81 self.sharedpath = self.path
81 self.sharedpath = self.path
82 try:
82 try:
83 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
83 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
84 if not os.path.exists(s):
84 if not os.path.exists(s):
85 raise error.RepoError(
85 raise error.RepoError(
86 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 _('.hg/sharedpath points to nonexistent directory %s') % s)
87 self.sharedpath = s
87 self.sharedpath = s
88 except IOError, inst:
88 except IOError, inst:
89 if inst.errno != errno.ENOENT:
89 if inst.errno != errno.ENOENT:
90 raise
90 raise
91
91
92 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
93 self.spath = self.store.path
93 self.spath = self.store.path
94 self.sopener = self.store.opener
94 self.sopener = self.store.opener
95 self.sjoin = self.store.join
95 self.sjoin = self.store.join
96 self.opener.createmode = self.store.createmode
96 self.opener.createmode = self.store.createmode
97 self._applyrequirements(requirements)
97 self._applyrequirements(requirements)
98 if create:
98 if create:
99 self._writerequirements()
99 self._writerequirements()
100
100
101
101
102 self._branchcache = None
102 self._branchcache = None
103 self._branchcachetip = None
103 self._branchcachetip = None
104 self.filterpats = {}
104 self.filterpats = {}
105 self._datafilters = {}
105 self._datafilters = {}
106 self._transref = self._lockref = self._wlockref = None
106 self._transref = self._lockref = self._wlockref = None
107
107
108 # A cache for various files under .hg/ that tracks file changes,
108 # A cache for various files under .hg/ that tracks file changes,
109 # (used by the filecache decorator)
109 # (used by the filecache decorator)
110 #
110 #
111 # Maps a property name to its util.filecacheentry
111 # Maps a property name to its util.filecacheentry
112 self._filecache = {}
112 self._filecache = {}
113
113
114 def _applyrequirements(self, requirements):
114 def _applyrequirements(self, requirements):
115 self.requirements = requirements
115 self.requirements = requirements
116 openerreqs = set(('revlogv1', 'generaldelta'))
116 openerreqs = set(('revlogv1', 'generaldelta'))
117 self.sopener.options = dict((r, 1) for r in requirements
117 self.sopener.options = dict((r, 1) for r in requirements
118 if r in openerreqs)
118 if r in openerreqs)
119
119
120 def _writerequirements(self):
120 def _writerequirements(self):
121 reqfile = self.opener("requires", "w")
121 reqfile = self.opener("requires", "w")
122 for r in self.requirements:
122 for r in self.requirements:
123 reqfile.write("%s\n" % r)
123 reqfile.write("%s\n" % r)
124 reqfile.close()
124 reqfile.close()
125
125
126 def _checknested(self, path):
126 def _checknested(self, path):
127 """Determine if path is a legal nested repository."""
127 """Determine if path is a legal nested repository."""
128 if not path.startswith(self.root):
128 if not path.startswith(self.root):
129 return False
129 return False
130 subpath = path[len(self.root) + 1:]
130 subpath = path[len(self.root) + 1:]
131
131
132 # XXX: Checking against the current working copy is wrong in
132 # XXX: Checking against the current working copy is wrong in
133 # the sense that it can reject things like
133 # the sense that it can reject things like
134 #
134 #
135 # $ hg cat -r 10 sub/x.txt
135 # $ hg cat -r 10 sub/x.txt
136 #
136 #
137 # if sub/ is no longer a subrepository in the working copy
137 # if sub/ is no longer a subrepository in the working copy
138 # parent revision.
138 # parent revision.
139 #
139 #
140 # However, it can of course also allow things that would have
140 # However, it can of course also allow things that would have
141 # been rejected before, such as the above cat command if sub/
141 # been rejected before, such as the above cat command if sub/
142 # is a subrepository now, but was a normal directory before.
142 # is a subrepository now, but was a normal directory before.
143 # The old path auditor would have rejected by mistake since it
143 # The old path auditor would have rejected by mistake since it
144 # panics when it sees sub/.hg/.
144 # panics when it sees sub/.hg/.
145 #
145 #
146 # All in all, checking against the working copy seems sensible
146 # All in all, checking against the working copy seems sensible
147 # since we want to prevent access to nested repositories on
147 # since we want to prevent access to nested repositories on
148 # the filesystem *now*.
148 # the filesystem *now*.
149 ctx = self[None]
149 ctx = self[None]
150 parts = util.splitpath(subpath)
150 parts = util.splitpath(subpath)
151 while parts:
151 while parts:
152 prefix = os.sep.join(parts)
152 prefix = os.sep.join(parts)
153 if prefix in ctx.substate:
153 if prefix in ctx.substate:
154 if prefix == subpath:
154 if prefix == subpath:
155 return True
155 return True
156 else:
156 else:
157 sub = ctx.sub(prefix)
157 sub = ctx.sub(prefix)
158 return sub.checknested(subpath[len(prefix) + 1:])
158 return sub.checknested(subpath[len(prefix) + 1:])
159 else:
159 else:
160 parts.pop()
160 parts.pop()
161 return False
161 return False
162
162
163 @filecache('bookmarks')
163 @filecache('bookmarks')
164 def _bookmarks(self):
164 def _bookmarks(self):
165 return bookmarks.read(self)
165 return bookmarks.read(self)
166
166
167 @filecache('bookmarks.current')
167 @filecache('bookmarks.current')
168 def _bookmarkcurrent(self):
168 def _bookmarkcurrent(self):
169 return bookmarks.readcurrent(self)
169 return bookmarks.readcurrent(self)
170
170
171 def _writebookmarks(self, marks):
171 def _writebookmarks(self, marks):
172 bookmarks.write(self)
172 bookmarks.write(self)
173
173
174 @filecache('phaseroots')
174 @filecache('phaseroots')
175 def _phaseroots(self):
175 def _phaseroots(self):
176 self._dirtyphases = False
176 self._dirtyphases = False
177 return phases.readroots(self)
177 return phases.readroots(self)
178
178
179 @propertycache
179 @propertycache
180 def _phaserev(self):
180 def _phaserev(self):
181 cache = [0] * len(self)
181 cache = [0] * len(self)
182 for phase in phases.trackedphases:
182 for phase in phases.trackedphases:
183 roots = map(self.changelog.rev, self._phaseroots[phase])
183 roots = map(self.changelog.rev, self._phaseroots[phase])
184 if roots:
184 if roots:
185 for rev in roots:
185 for rev in roots:
186 cache[rev] = phase
186 cache[rev] = phase
187 for rev in self.changelog.descendants(*roots):
187 for rev in self.changelog.descendants(*roots):
188 cache[rev] = phase
188 cache[rev] = phase
189 return cache
189 return cache
190
190
191 @filecache('00changelog.i', True)
191 @filecache('00changelog.i', True)
192 def changelog(self):
192 def changelog(self):
193 c = changelog.changelog(self.sopener)
193 c = changelog.changelog(self.sopener)
194 if 'HG_PENDING' in os.environ:
194 if 'HG_PENDING' in os.environ:
195 p = os.environ['HG_PENDING']
195 p = os.environ['HG_PENDING']
196 if p.startswith(self.root):
196 if p.startswith(self.root):
197 c.readpending('00changelog.i.a')
197 c.readpending('00changelog.i.a')
198 return c
198 return c
199
199
200 @filecache('00manifest.i', True)
200 @filecache('00manifest.i', True)
201 def manifest(self):
201 def manifest(self):
202 return manifest.manifest(self.sopener)
202 return manifest.manifest(self.sopener)
203
203
204 @filecache('dirstate')
204 @filecache('dirstate')
205 def dirstate(self):
205 def dirstate(self):
206 warned = [0]
206 warned = [0]
207 def validate(node):
207 def validate(node):
208 try:
208 try:
209 self.changelog.rev(node)
209 self.changelog.rev(node)
210 return node
210 return node
211 except error.LookupError:
211 except error.LookupError:
212 if not warned[0]:
212 if not warned[0]:
213 warned[0] = True
213 warned[0] = True
214 self.ui.warn(_("warning: ignoring unknown"
214 self.ui.warn(_("warning: ignoring unknown"
215 " working parent %s!\n") % short(node))
215 " working parent %s!\n") % short(node))
216 return nullid
216 return nullid
217
217
218 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
218 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
219
219
220 def __getitem__(self, changeid):
220 def __getitem__(self, changeid):
221 if changeid is None:
221 if changeid is None:
222 return context.workingctx(self)
222 return context.workingctx(self)
223 return context.changectx(self, changeid)
223 return context.changectx(self, changeid)
224
224
225 def __contains__(self, changeid):
225 def __contains__(self, changeid):
226 try:
226 try:
227 return bool(self.lookup(changeid))
227 return bool(self.lookup(changeid))
228 except error.RepoLookupError:
228 except error.RepoLookupError:
229 return False
229 return False
230
230
231 def __nonzero__(self):
231 def __nonzero__(self):
232 return True
232 return True
233
233
234 def __len__(self):
234 def __len__(self):
235 return len(self.changelog)
235 return len(self.changelog)
236
236
237 def __iter__(self):
237 def __iter__(self):
238 for i in xrange(len(self)):
238 for i in xrange(len(self)):
239 yield i
239 yield i
240
240
241 def revs(self, expr, *args):
241 def revs(self, expr, *args):
242 '''Return a list of revisions matching the given revset'''
242 '''Return a list of revisions matching the given revset'''
243 expr = revset.formatspec(expr, *args)
243 expr = revset.formatspec(expr, *args)
244 m = revset.match(None, expr)
244 m = revset.match(None, expr)
245 return [r for r in m(self, range(len(self)))]
245 return [r for r in m(self, range(len(self)))]
246
246
247 def set(self, expr, *args):
247 def set(self, expr, *args):
248 '''
248 '''
249 Yield a context for each matching revision, after doing arg
249 Yield a context for each matching revision, after doing arg
250 replacement via revset.formatspec
250 replacement via revset.formatspec
251 '''
251 '''
252 for r in self.revs(expr, *args):
252 for r in self.revs(expr, *args):
253 yield self[r]
253 yield self[r]
254
254
255 def url(self):
255 def url(self):
256 return 'file:' + self.root
256 return 'file:' + self.root
257
257
258 def hook(self, name, throw=False, **args):
258 def hook(self, name, throw=False, **args):
259 return hook.hook(self.ui, self, name, throw, **args)
259 return hook.hook(self.ui, self, name, throw, **args)
260
260
261 tag_disallowed = ':\r\n'
261 tag_disallowed = ':\r\n'
262
262
263 def _tag(self, names, node, message, local, user, date, extra={}):
263 def _tag(self, names, node, message, local, user, date, extra={}):
264 if isinstance(names, str):
264 if isinstance(names, str):
265 allchars = names
265 allchars = names
266 names = (names,)
266 names = (names,)
267 else:
267 else:
268 allchars = ''.join(names)
268 allchars = ''.join(names)
269 for c in self.tag_disallowed:
269 for c in self.tag_disallowed:
270 if c in allchars:
270 if c in allchars:
271 raise util.Abort(_('%r cannot be used in a tag name') % c)
271 raise util.Abort(_('%r cannot be used in a tag name') % c)
272
272
273 branches = self.branchmap()
273 branches = self.branchmap()
274 for name in names:
274 for name in names:
275 self.hook('pretag', throw=True, node=hex(node), tag=name,
275 self.hook('pretag', throw=True, node=hex(node), tag=name,
276 local=local)
276 local=local)
277 if name in branches:
277 if name in branches:
278 self.ui.warn(_("warning: tag %s conflicts with existing"
278 self.ui.warn(_("warning: tag %s conflicts with existing"
279 " branch name\n") % name)
279 " branch name\n") % name)
280
280
281 def writetags(fp, names, munge, prevtags):
281 def writetags(fp, names, munge, prevtags):
282 fp.seek(0, 2)
282 fp.seek(0, 2)
283 if prevtags and prevtags[-1] != '\n':
283 if prevtags and prevtags[-1] != '\n':
284 fp.write('\n')
284 fp.write('\n')
285 for name in names:
285 for name in names:
286 m = munge and munge(name) or name
286 m = munge and munge(name) or name
287 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
287 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
288 old = self.tags().get(name, nullid)
288 old = self.tags().get(name, nullid)
289 fp.write('%s %s\n' % (hex(old), m))
289 fp.write('%s %s\n' % (hex(old), m))
290 fp.write('%s %s\n' % (hex(node), m))
290 fp.write('%s %s\n' % (hex(node), m))
291 fp.close()
291 fp.close()
292
292
293 prevtags = ''
293 prevtags = ''
294 if local:
294 if local:
295 try:
295 try:
296 fp = self.opener('localtags', 'r+')
296 fp = self.opener('localtags', 'r+')
297 except IOError:
297 except IOError:
298 fp = self.opener('localtags', 'a')
298 fp = self.opener('localtags', 'a')
299 else:
299 else:
300 prevtags = fp.read()
300 prevtags = fp.read()
301
301
302 # local tags are stored in the current charset
302 # local tags are stored in the current charset
303 writetags(fp, names, None, prevtags)
303 writetags(fp, names, None, prevtags)
304 for name in names:
304 for name in names:
305 self.hook('tag', node=hex(node), tag=name, local=local)
305 self.hook('tag', node=hex(node), tag=name, local=local)
306 return
306 return
307
307
308 try:
308 try:
309 fp = self.wfile('.hgtags', 'rb+')
309 fp = self.wfile('.hgtags', 'rb+')
310 except IOError, e:
310 except IOError, e:
311 if e.errno != errno.ENOENT:
311 if e.errno != errno.ENOENT:
312 raise
312 raise
313 fp = self.wfile('.hgtags', 'ab')
313 fp = self.wfile('.hgtags', 'ab')
314 else:
314 else:
315 prevtags = fp.read()
315 prevtags = fp.read()
316
316
317 # committed tags are stored in UTF-8
317 # committed tags are stored in UTF-8
318 writetags(fp, names, encoding.fromlocal, prevtags)
318 writetags(fp, names, encoding.fromlocal, prevtags)
319
319
320 fp.close()
320 fp.close()
321
321
322 if '.hgtags' not in self.dirstate:
322 if '.hgtags' not in self.dirstate:
323 self[None].add(['.hgtags'])
323 self[None].add(['.hgtags'])
324
324
325 m = matchmod.exact(self.root, '', ['.hgtags'])
325 m = matchmod.exact(self.root, '', ['.hgtags'])
326 tagnode = self.commit(message, user, date, extra=extra, match=m)
326 tagnode = self.commit(message, user, date, extra=extra, match=m)
327
327
328 for name in names:
328 for name in names:
329 self.hook('tag', node=hex(node), tag=name, local=local)
329 self.hook('tag', node=hex(node), tag=name, local=local)
330
330
331 return tagnode
331 return tagnode
332
332
333 def tag(self, names, node, message, local, user, date):
333 def tag(self, names, node, message, local, user, date):
334 '''tag a revision with one or more symbolic names.
334 '''tag a revision with one or more symbolic names.
335
335
336 names is a list of strings or, when adding a single tag, names may be a
336 names is a list of strings or, when adding a single tag, names may be a
337 string.
337 string.
338
338
339 if local is True, the tags are stored in a per-repository file.
339 if local is True, the tags are stored in a per-repository file.
340 otherwise, they are stored in the .hgtags file, and a new
340 otherwise, they are stored in the .hgtags file, and a new
341 changeset is committed with the change.
341 changeset is committed with the change.
342
342
343 keyword arguments:
343 keyword arguments:
344
344
345 local: whether to store tags in non-version-controlled file
345 local: whether to store tags in non-version-controlled file
346 (default False)
346 (default False)
347
347
348 message: commit message to use if committing
348 message: commit message to use if committing
349
349
350 user: name of user to use if committing
350 user: name of user to use if committing
351
351
352 date: date tuple to use if committing'''
352 date: date tuple to use if committing'''
353
353
354 if not local:
354 if not local:
355 for x in self.status()[:5]:
355 for x in self.status()[:5]:
356 if '.hgtags' in x:
356 if '.hgtags' in x:
357 raise util.Abort(_('working copy of .hgtags is changed '
357 raise util.Abort(_('working copy of .hgtags is changed '
358 '(please commit .hgtags manually)'))
358 '(please commit .hgtags manually)'))
359
359
360 self.tags() # instantiate the cache
360 self.tags() # instantiate the cache
361 self._tag(names, node, message, local, user, date)
361 self._tag(names, node, message, local, user, date)
362
362
363 @propertycache
363 @propertycache
364 def _tagscache(self):
364 def _tagscache(self):
365 '''Returns a tagscache object that contains various tags related caches.'''
365 '''Returns a tagscache object that contains various tags related caches.'''
366
366
367 # This simplifies its cache management by having one decorated
367 # This simplifies its cache management by having one decorated
368 # function (this one) and the rest simply fetch things from it.
368 # function (this one) and the rest simply fetch things from it.
369 class tagscache(object):
369 class tagscache(object):
370 def __init__(self):
370 def __init__(self):
371 # These two define the set of tags for this repository. tags
371 # These two define the set of tags for this repository. tags
372 # maps tag name to node; tagtypes maps tag name to 'global' or
372 # maps tag name to node; tagtypes maps tag name to 'global' or
373 # 'local'. (Global tags are defined by .hgtags across all
373 # 'local'. (Global tags are defined by .hgtags across all
374 # heads, and local tags are defined in .hg/localtags.)
374 # heads, and local tags are defined in .hg/localtags.)
375 # They constitute the in-memory cache of tags.
375 # They constitute the in-memory cache of tags.
376 self.tags = self.tagtypes = None
376 self.tags = self.tagtypes = None
377
377
378 self.nodetagscache = self.tagslist = None
378 self.nodetagscache = self.tagslist = None
379
379
380 cache = tagscache()
380 cache = tagscache()
381 cache.tags, cache.tagtypes = self._findtags()
381 cache.tags, cache.tagtypes = self._findtags()
382
382
383 return cache
383 return cache
384
384
385 def tags(self):
385 def tags(self):
386 '''return a mapping of tag to node'''
386 '''return a mapping of tag to node'''
387 return self._tagscache.tags
387 return self._tagscache.tags
388
388
389 def _findtags(self):
389 def _findtags(self):
390 '''Do the hard work of finding tags. Return a pair of dicts
390 '''Do the hard work of finding tags. Return a pair of dicts
391 (tags, tagtypes) where tags maps tag name to node, and tagtypes
391 (tags, tagtypes) where tags maps tag name to node, and tagtypes
392 maps tag name to a string like \'global\' or \'local\'.
392 maps tag name to a string like \'global\' or \'local\'.
393 Subclasses or extensions are free to add their own tags, but
393 Subclasses or extensions are free to add their own tags, but
394 should be aware that the returned dicts will be retained for the
394 should be aware that the returned dicts will be retained for the
395 duration of the localrepo object.'''
395 duration of the localrepo object.'''
396
396
397 # XXX what tagtype should subclasses/extensions use? Currently
397 # XXX what tagtype should subclasses/extensions use? Currently
398 # mq and bookmarks add tags, but do not set the tagtype at all.
398 # mq and bookmarks add tags, but do not set the tagtype at all.
399 # Should each extension invent its own tag type? Should there
399 # Should each extension invent its own tag type? Should there
400 # be one tagtype for all such "virtual" tags? Or is the status
400 # be one tagtype for all such "virtual" tags? Or is the status
401 # quo fine?
401 # quo fine?
402
402
403 alltags = {} # map tag name to (node, hist)
403 alltags = {} # map tag name to (node, hist)
404 tagtypes = {}
404 tagtypes = {}
405
405
406 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
406 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
407 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
407 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
408
408
409 # Build the return dicts. Have to re-encode tag names because
409 # Build the return dicts. Have to re-encode tag names because
410 # the tags module always uses UTF-8 (in order not to lose info
410 # the tags module always uses UTF-8 (in order not to lose info
411 # writing to the cache), but the rest of Mercurial wants them in
411 # writing to the cache), but the rest of Mercurial wants them in
412 # local encoding.
412 # local encoding.
413 tags = {}
413 tags = {}
414 for (name, (node, hist)) in alltags.iteritems():
414 for (name, (node, hist)) in alltags.iteritems():
415 if node != nullid:
415 if node != nullid:
416 try:
416 try:
417 # ignore tags to unknown nodes
417 # ignore tags to unknown nodes
418 self.changelog.lookup(node)
418 self.changelog.lookup(node)
419 tags[encoding.tolocal(name)] = node
419 tags[encoding.tolocal(name)] = node
420 except error.LookupError:
420 except error.LookupError:
421 pass
421 pass
422 tags['tip'] = self.changelog.tip()
422 tags['tip'] = self.changelog.tip()
423 tagtypes = dict([(encoding.tolocal(name), value)
423 tagtypes = dict([(encoding.tolocal(name), value)
424 for (name, value) in tagtypes.iteritems()])
424 for (name, value) in tagtypes.iteritems()])
425 return (tags, tagtypes)
425 return (tags, tagtypes)
426
426
427 def tagtype(self, tagname):
427 def tagtype(self, tagname):
428 '''
428 '''
429 return the type of the given tag. result can be:
429 return the type of the given tag. result can be:
430
430
431 'local' : a local tag
431 'local' : a local tag
432 'global' : a global tag
432 'global' : a global tag
433 None : tag does not exist
433 None : tag does not exist
434 '''
434 '''
435
435
436 return self._tagscache.tagtypes.get(tagname)
436 return self._tagscache.tagtypes.get(tagname)
437
437
438 def tagslist(self):
438 def tagslist(self):
439 '''return a list of tags ordered by revision'''
439 '''return a list of tags ordered by revision'''
440 if not self._tagscache.tagslist:
440 if not self._tagscache.tagslist:
441 l = []
441 l = []
442 for t, n in self.tags().iteritems():
442 for t, n in self.tags().iteritems():
443 r = self.changelog.rev(n)
443 r = self.changelog.rev(n)
444 l.append((r, t, n))
444 l.append((r, t, n))
445 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
445 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
446
446
447 return self._tagscache.tagslist
447 return self._tagscache.tagslist
448
448
449 def nodetags(self, node):
449 def nodetags(self, node):
450 '''return the tags associated with a node'''
450 '''return the tags associated with a node'''
451 if not self._tagscache.nodetagscache:
451 if not self._tagscache.nodetagscache:
452 nodetagscache = {}
452 nodetagscache = {}
453 for t, n in self.tags().iteritems():
453 for t, n in self.tags().iteritems():
454 nodetagscache.setdefault(n, []).append(t)
454 nodetagscache.setdefault(n, []).append(t)
455 for tags in nodetagscache.itervalues():
455 for tags in nodetagscache.itervalues():
456 tags.sort()
456 tags.sort()
457 self._tagscache.nodetagscache = nodetagscache
457 self._tagscache.nodetagscache = nodetagscache
458 return self._tagscache.nodetagscache.get(node, [])
458 return self._tagscache.nodetagscache.get(node, [])
459
459
460 def nodebookmarks(self, node):
460 def nodebookmarks(self, node):
461 marks = []
461 marks = []
462 for bookmark, n in self._bookmarks.iteritems():
462 for bookmark, n in self._bookmarks.iteritems():
463 if n == node:
463 if n == node:
464 marks.append(bookmark)
464 marks.append(bookmark)
465 return sorted(marks)
465 return sorted(marks)
466
466
467 def _branchtags(self, partial, lrev):
467 def _branchtags(self, partial, lrev):
468 # TODO: rename this function?
468 # TODO: rename this function?
469 tiprev = len(self) - 1
469 tiprev = len(self) - 1
470 if lrev != tiprev:
470 if lrev != tiprev:
471 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
471 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
472 self._updatebranchcache(partial, ctxgen)
472 self._updatebranchcache(partial, ctxgen)
473 self._writebranchcache(partial, self.changelog.tip(), tiprev)
473 self._writebranchcache(partial, self.changelog.tip(), tiprev)
474
474
475 return partial
475 return partial
476
476
477 def updatebranchcache(self):
477 def updatebranchcache(self):
478 tip = self.changelog.tip()
478 tip = self.changelog.tip()
479 if self._branchcache is not None and self._branchcachetip == tip:
479 if self._branchcache is not None and self._branchcachetip == tip:
480 return self._branchcache
480 return self._branchcache
481
481
482 oldtip = self._branchcachetip
482 oldtip = self._branchcachetip
483 self._branchcachetip = tip
483 self._branchcachetip = tip
484 if oldtip is None or oldtip not in self.changelog.nodemap:
484 if oldtip is None or oldtip not in self.changelog.nodemap:
485 partial, last, lrev = self._readbranchcache()
485 partial, last, lrev = self._readbranchcache()
486 else:
486 else:
487 lrev = self.changelog.rev(oldtip)
487 lrev = self.changelog.rev(oldtip)
488 partial = self._branchcache
488 partial = self._branchcache
489
489
490 self._branchtags(partial, lrev)
490 self._branchtags(partial, lrev)
491 # this private cache holds all heads (not just tips)
491 # this private cache holds all heads (not just tips)
492 self._branchcache = partial
492 self._branchcache = partial
493
493
494 def branchmap(self):
494 def branchmap(self):
495 '''returns a dictionary {branch: [branchheads]}'''
495 '''returns a dictionary {branch: [branchheads]}'''
496 self.updatebranchcache()
496 self.updatebranchcache()
497 return self._branchcache
497 return self._branchcache
498
498
499 def branchtags(self):
499 def branchtags(self):
500 '''return a dict where branch names map to the tipmost head of
500 '''return a dict where branch names map to the tipmost head of
501 the branch, open heads come before closed'''
501 the branch, open heads come before closed'''
502 bt = {}
502 bt = {}
503 for bn, heads in self.branchmap().iteritems():
503 for bn, heads in self.branchmap().iteritems():
504 tip = heads[-1]
504 tip = heads[-1]
505 for h in reversed(heads):
505 for h in reversed(heads):
506 if 'close' not in self.changelog.read(h)[5]:
506 if 'close' not in self.changelog.read(h)[5]:
507 tip = h
507 tip = h
508 break
508 break
509 bt[bn] = tip
509 bt[bn] = tip
510 return bt
510 return bt
511
511
512 def _readbranchcache(self):
512 def _readbranchcache(self):
513 partial = {}
513 partial = {}
514 try:
514 try:
515 f = self.opener("cache/branchheads")
515 f = self.opener("cache/branchheads")
516 lines = f.read().split('\n')
516 lines = f.read().split('\n')
517 f.close()
517 f.close()
518 except (IOError, OSError):
518 except (IOError, OSError):
519 return {}, nullid, nullrev
519 return {}, nullid, nullrev
520
520
521 try:
521 try:
522 last, lrev = lines.pop(0).split(" ", 1)
522 last, lrev = lines.pop(0).split(" ", 1)
523 last, lrev = bin(last), int(lrev)
523 last, lrev = bin(last), int(lrev)
524 if lrev >= len(self) or self[lrev].node() != last:
524 if lrev >= len(self) or self[lrev].node() != last:
525 # invalidate the cache
525 # invalidate the cache
526 raise ValueError('invalidating branch cache (tip differs)')
526 raise ValueError('invalidating branch cache (tip differs)')
527 for l in lines:
527 for l in lines:
528 if not l:
528 if not l:
529 continue
529 continue
530 node, label = l.split(" ", 1)
530 node, label = l.split(" ", 1)
531 label = encoding.tolocal(label.strip())
531 label = encoding.tolocal(label.strip())
532 partial.setdefault(label, []).append(bin(node))
532 partial.setdefault(label, []).append(bin(node))
533 except KeyboardInterrupt:
533 except KeyboardInterrupt:
534 raise
534 raise
535 except Exception, inst:
535 except Exception, inst:
536 if self.ui.debugflag:
536 if self.ui.debugflag:
537 self.ui.warn(str(inst), '\n')
537 self.ui.warn(str(inst), '\n')
538 partial, last, lrev = {}, nullid, nullrev
538 partial, last, lrev = {}, nullid, nullrev
539 return partial, last, lrev
539 return partial, last, lrev
540
540
541 def _writebranchcache(self, branches, tip, tiprev):
541 def _writebranchcache(self, branches, tip, tiprev):
542 try:
542 try:
543 f = self.opener("cache/branchheads", "w", atomictemp=True)
543 f = self.opener("cache/branchheads", "w", atomictemp=True)
544 f.write("%s %s\n" % (hex(tip), tiprev))
544 f.write("%s %s\n" % (hex(tip), tiprev))
545 for label, nodes in branches.iteritems():
545 for label, nodes in branches.iteritems():
546 for node in nodes:
546 for node in nodes:
547 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
547 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
548 f.close()
548 f.close()
549 except (IOError, OSError):
549 except (IOError, OSError):
550 pass
550 pass
551
551
552 def _updatebranchcache(self, partial, ctxgen):
552 def _updatebranchcache(self, partial, ctxgen):
553 # collect new branch entries
553 # collect new branch entries
554 newbranches = {}
554 newbranches = {}
555 for c in ctxgen:
555 for c in ctxgen:
556 newbranches.setdefault(c.branch(), []).append(c.node())
556 newbranches.setdefault(c.branch(), []).append(c.node())
557 # if older branchheads are reachable from new ones, they aren't
557 # if older branchheads are reachable from new ones, they aren't
558 # really branchheads. Note checking parents is insufficient:
558 # really branchheads. Note checking parents is insufficient:
559 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
559 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
560 for branch, newnodes in newbranches.iteritems():
560 for branch, newnodes in newbranches.iteritems():
561 bheads = partial.setdefault(branch, [])
561 bheads = partial.setdefault(branch, [])
562 bheads.extend(newnodes)
562 bheads.extend(newnodes)
563 if len(bheads) <= 1:
563 if len(bheads) <= 1:
564 continue
564 continue
565 bheads = sorted(bheads, key=lambda x: self[x].rev())
565 bheads = sorted(bheads, key=lambda x: self[x].rev())
566 # starting from tip means fewer passes over reachable
566 # starting from tip means fewer passes over reachable
567 while newnodes:
567 while newnodes:
568 latest = newnodes.pop()
568 latest = newnodes.pop()
569 if latest not in bheads:
569 if latest not in bheads:
570 continue
570 continue
571 minbhrev = self[bheads[0]].node()
571 minbhrev = self[bheads[0]].node()
572 reachable = self.changelog.reachable(latest, minbhrev)
572 reachable = self.changelog.reachable(latest, minbhrev)
573 reachable.remove(latest)
573 reachable.remove(latest)
574 if reachable:
574 if reachable:
575 bheads = [b for b in bheads if b not in reachable]
575 bheads = [b for b in bheads if b not in reachable]
576 partial[branch] = bheads
576 partial[branch] = bheads
577
577
578 def lookup(self, key):
578 def lookup(self, key):
579 if isinstance(key, int):
579 if isinstance(key, int):
580 return self.changelog.node(key)
580 return self.changelog.node(key)
581 elif key == '.':
581 elif key == '.':
582 return self.dirstate.p1()
582 return self.dirstate.p1()
583 elif key == 'null':
583 elif key == 'null':
584 return nullid
584 return nullid
585 elif key == 'tip':
585 elif key == 'tip':
586 return self.changelog.tip()
586 return self.changelog.tip()
587 n = self.changelog._match(key)
587 n = self.changelog._match(key)
588 if n:
588 if n:
589 return n
589 return n
590 if key in self._bookmarks:
590 if key in self._bookmarks:
591 return self._bookmarks[key]
591 return self._bookmarks[key]
592 if key in self.tags():
592 if key in self.tags():
593 return self.tags()[key]
593 return self.tags()[key]
594 if key in self.branchtags():
594 if key in self.branchtags():
595 return self.branchtags()[key]
595 return self.branchtags()[key]
596 n = self.changelog._partialmatch(key)
596 n = self.changelog._partialmatch(key)
597 if n:
597 if n:
598 return n
598 return n
599
599
600 # can't find key, check if it might have come from damaged dirstate
600 # can't find key, check if it might have come from damaged dirstate
601 if key in self.dirstate.parents():
601 if key in self.dirstate.parents():
602 raise error.Abort(_("working directory has unknown parent '%s'!")
602 raise error.Abort(_("working directory has unknown parent '%s'!")
603 % short(key))
603 % short(key))
604 try:
604 try:
605 if len(key) == 20:
605 if len(key) == 20:
606 key = hex(key)
606 key = hex(key)
607 except TypeError:
607 except TypeError:
608 pass
608 pass
609 raise error.RepoLookupError(_("unknown revision '%s'") % key)
609 raise error.RepoLookupError(_("unknown revision '%s'") % key)
610
610
611 def lookupbranch(self, key, remote=None):
611 def lookupbranch(self, key, remote=None):
612 repo = remote or self
612 repo = remote or self
613 if key in repo.branchmap():
613 if key in repo.branchmap():
614 return key
614 return key
615
615
616 repo = (remote and remote.local()) and remote or self
616 repo = (remote and remote.local()) and remote or self
617 return repo[key].branch()
617 return repo[key].branch()
618
618
619 def known(self, nodes):
619 def known(self, nodes):
620 nm = self.changelog.nodemap
620 nm = self.changelog.nodemap
621 return [(n in nm) for n in nodes]
621 return [(n in nm) for n in nodes]
622
622
623 def local(self):
623 def local(self):
624 return self
624 return self
625
625
626 def join(self, f):
626 def join(self, f):
627 return os.path.join(self.path, f)
627 return os.path.join(self.path, f)
628
628
629 def wjoin(self, f):
629 def wjoin(self, f):
630 return os.path.join(self.root, f)
630 return os.path.join(self.root, f)
631
631
632 def file(self, f):
632 def file(self, f):
633 if f[0] == '/':
633 if f[0] == '/':
634 f = f[1:]
634 f = f[1:]
635 return filelog.filelog(self.sopener, f)
635 return filelog.filelog(self.sopener, f)
636
636
637 def changectx(self, changeid):
637 def changectx(self, changeid):
638 return self[changeid]
638 return self[changeid]
639
639
640 def parents(self, changeid=None):
640 def parents(self, changeid=None):
641 '''get list of changectxs for parents of changeid'''
641 '''get list of changectxs for parents of changeid'''
642 return self[changeid].parents()
642 return self[changeid].parents()
643
643
644 def filectx(self, path, changeid=None, fileid=None):
644 def filectx(self, path, changeid=None, fileid=None):
645 """changeid can be a changeset revision, node, or tag.
645 """changeid can be a changeset revision, node, or tag.
646 fileid can be a file revision or node."""
646 fileid can be a file revision or node."""
647 return context.filectx(self, path, changeid, fileid)
647 return context.filectx(self, path, changeid, fileid)
648
648
649 def getcwd(self):
649 def getcwd(self):
650 return self.dirstate.getcwd()
650 return self.dirstate.getcwd()
651
651
652 def pathto(self, f, cwd=None):
652 def pathto(self, f, cwd=None):
653 return self.dirstate.pathto(f, cwd)
653 return self.dirstate.pathto(f, cwd)
654
654
655 def wfile(self, f, mode='r'):
655 def wfile(self, f, mode='r'):
656 return self.wopener(f, mode)
656 return self.wopener(f, mode)
657
657
658 def _link(self, f):
658 def _link(self, f):
659 return os.path.islink(self.wjoin(f))
659 return os.path.islink(self.wjoin(f))
660
660
661 def _loadfilter(self, filter):
661 def _loadfilter(self, filter):
662 if filter not in self.filterpats:
662 if filter not in self.filterpats:
663 l = []
663 l = []
664 for pat, cmd in self.ui.configitems(filter):
664 for pat, cmd in self.ui.configitems(filter):
665 if cmd == '!':
665 if cmd == '!':
666 continue
666 continue
667 mf = matchmod.match(self.root, '', [pat])
667 mf = matchmod.match(self.root, '', [pat])
668 fn = None
668 fn = None
669 params = cmd
669 params = cmd
670 for name, filterfn in self._datafilters.iteritems():
670 for name, filterfn in self._datafilters.iteritems():
671 if cmd.startswith(name):
671 if cmd.startswith(name):
672 fn = filterfn
672 fn = filterfn
673 params = cmd[len(name):].lstrip()
673 params = cmd[len(name):].lstrip()
674 break
674 break
675 if not fn:
675 if not fn:
676 fn = lambda s, c, **kwargs: util.filter(s, c)
676 fn = lambda s, c, **kwargs: util.filter(s, c)
677 # Wrap old filters not supporting keyword arguments
677 # Wrap old filters not supporting keyword arguments
678 if not inspect.getargspec(fn)[2]:
678 if not inspect.getargspec(fn)[2]:
679 oldfn = fn
679 oldfn = fn
680 fn = lambda s, c, **kwargs: oldfn(s, c)
680 fn = lambda s, c, **kwargs: oldfn(s, c)
681 l.append((mf, fn, params))
681 l.append((mf, fn, params))
682 self.filterpats[filter] = l
682 self.filterpats[filter] = l
683 return self.filterpats[filter]
683 return self.filterpats[filter]
684
684
685 def _filter(self, filterpats, filename, data):
685 def _filter(self, filterpats, filename, data):
686 for mf, fn, cmd in filterpats:
686 for mf, fn, cmd in filterpats:
687 if mf(filename):
687 if mf(filename):
688 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
688 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
689 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
689 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
690 break
690 break
691
691
692 return data
692 return data
693
693
694 @propertycache
694 @propertycache
695 def _encodefilterpats(self):
695 def _encodefilterpats(self):
696 return self._loadfilter('encode')
696 return self._loadfilter('encode')
697
697
698 @propertycache
698 @propertycache
699 def _decodefilterpats(self):
699 def _decodefilterpats(self):
700 return self._loadfilter('decode')
700 return self._loadfilter('decode')
701
701
702 def adddatafilter(self, name, filter):
702 def adddatafilter(self, name, filter):
703 self._datafilters[name] = filter
703 self._datafilters[name] = filter
704
704
705 def wread(self, filename):
705 def wread(self, filename):
706 if self._link(filename):
706 if self._link(filename):
707 data = os.readlink(self.wjoin(filename))
707 data = os.readlink(self.wjoin(filename))
708 else:
708 else:
709 data = self.wopener.read(filename)
709 data = self.wopener.read(filename)
710 return self._filter(self._encodefilterpats, filename, data)
710 return self._filter(self._encodefilterpats, filename, data)
711
711
712 def wwrite(self, filename, data, flags):
712 def wwrite(self, filename, data, flags):
713 data = self._filter(self._decodefilterpats, filename, data)
713 data = self._filter(self._decodefilterpats, filename, data)
714 if 'l' in flags:
714 if 'l' in flags:
715 self.wopener.symlink(data, filename)
715 self.wopener.symlink(data, filename)
716 else:
716 else:
717 self.wopener.write(filename, data)
717 self.wopener.write(filename, data)
718 if 'x' in flags:
718 if 'x' in flags:
719 util.setflags(self.wjoin(filename), False, True)
719 util.setflags(self.wjoin(filename), False, True)
720
720
721 def wwritedata(self, filename, data):
721 def wwritedata(self, filename, data):
722 return self._filter(self._decodefilterpats, filename, data)
722 return self._filter(self._decodefilterpats, filename, data)
723
723
724 def transaction(self, desc):
724 def transaction(self, desc):
725 tr = self._transref and self._transref() or None
725 tr = self._transref and self._transref() or None
726 if tr and tr.running():
726 if tr and tr.running():
727 return tr.nest()
727 return tr.nest()
728
728
729 # abort here if the journal already exists
729 # abort here if the journal already exists
730 if os.path.exists(self.sjoin("journal")):
730 if os.path.exists(self.sjoin("journal")):
731 raise error.RepoError(
731 raise error.RepoError(
732 _("abandoned transaction found - run hg recover"))
732 _("abandoned transaction found - run hg recover"))
733
733
734 journalfiles = self._writejournal(desc)
734 journalfiles = self._writejournal(desc)
735 renames = [(x, undoname(x)) for x in journalfiles]
735 renames = [(x, undoname(x)) for x in journalfiles]
736
736
737 tr = transaction.transaction(self.ui.warn, self.sopener,
737 tr = transaction.transaction(self.ui.warn, self.sopener,
738 self.sjoin("journal"),
738 self.sjoin("journal"),
739 aftertrans(renames),
739 aftertrans(renames),
740 self.store.createmode)
740 self.store.createmode)
741 self._transref = weakref.ref(tr)
741 self._transref = weakref.ref(tr)
742 return tr
742 return tr
743
743
744 def _writejournal(self, desc):
744 def _writejournal(self, desc):
745 # save dirstate for rollback
745 # save dirstate for rollback
746 try:
746 try:
747 ds = self.opener.read("dirstate")
747 ds = self.opener.read("dirstate")
748 except IOError:
748 except IOError:
749 ds = ""
749 ds = ""
750 self.opener.write("journal.dirstate", ds)
750 self.opener.write("journal.dirstate", ds)
751 self.opener.write("journal.branch",
751 self.opener.write("journal.branch",
752 encoding.fromlocal(self.dirstate.branch()))
752 encoding.fromlocal(self.dirstate.branch()))
753 self.opener.write("journal.desc",
753 self.opener.write("journal.desc",
754 "%d\n%s\n" % (len(self), desc))
754 "%d\n%s\n" % (len(self), desc))
755
755
756 bkname = self.join('bookmarks')
756 bkname = self.join('bookmarks')
757 if os.path.exists(bkname):
757 if os.path.exists(bkname):
758 util.copyfile(bkname, self.join('journal.bookmarks'))
758 util.copyfile(bkname, self.join('journal.bookmarks'))
759 else:
759 else:
760 self.opener.write('journal.bookmarks', '')
760 self.opener.write('journal.bookmarks', '')
761 phasesname = self.sjoin('phaseroots')
762 if os.path.exists(phasesname):
763 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
764 else:
765 self.sopener.write('journal.phaseroots', '')
761
766
762 return (self.sjoin('journal'), self.join('journal.dirstate'),
767 return (self.sjoin('journal'), self.join('journal.dirstate'),
763 self.join('journal.branch'), self.join('journal.desc'),
768 self.join('journal.branch'), self.join('journal.desc'),
764 self.join('journal.bookmarks'))
769 self.join('journal.bookmarks'),
770 self.sjoin('journal.phaseroots'))
765
771
766 def recover(self):
772 def recover(self):
767 lock = self.lock()
773 lock = self.lock()
768 try:
774 try:
769 if os.path.exists(self.sjoin("journal")):
775 if os.path.exists(self.sjoin("journal")):
770 self.ui.status(_("rolling back interrupted transaction\n"))
776 self.ui.status(_("rolling back interrupted transaction\n"))
771 transaction.rollback(self.sopener, self.sjoin("journal"),
777 transaction.rollback(self.sopener, self.sjoin("journal"),
772 self.ui.warn)
778 self.ui.warn)
773 self.invalidate()
779 self.invalidate()
774 return True
780 return True
775 else:
781 else:
776 self.ui.warn(_("no interrupted transaction available\n"))
782 self.ui.warn(_("no interrupted transaction available\n"))
777 return False
783 return False
778 finally:
784 finally:
779 lock.release()
785 lock.release()
780
786
781 def rollback(self, dryrun=False, force=False):
787 def rollback(self, dryrun=False, force=False):
782 wlock = lock = None
788 wlock = lock = None
783 try:
789 try:
784 wlock = self.wlock()
790 wlock = self.wlock()
785 lock = self.lock()
791 lock = self.lock()
786 if os.path.exists(self.sjoin("undo")):
792 if os.path.exists(self.sjoin("undo")):
787 return self._rollback(dryrun, force)
793 return self._rollback(dryrun, force)
788 else:
794 else:
789 self.ui.warn(_("no rollback information available\n"))
795 self.ui.warn(_("no rollback information available\n"))
790 return 1
796 return 1
791 finally:
797 finally:
792 release(lock, wlock)
798 release(lock, wlock)
793
799
794 def _rollback(self, dryrun, force):
800 def _rollback(self, dryrun, force):
795 ui = self.ui
801 ui = self.ui
796 try:
802 try:
797 args = self.opener.read('undo.desc').splitlines()
803 args = self.opener.read('undo.desc').splitlines()
798 (oldlen, desc, detail) = (int(args[0]), args[1], None)
804 (oldlen, desc, detail) = (int(args[0]), args[1], None)
799 if len(args) >= 3:
805 if len(args) >= 3:
800 detail = args[2]
806 detail = args[2]
801 oldtip = oldlen - 1
807 oldtip = oldlen - 1
802
808
803 if detail and ui.verbose:
809 if detail and ui.verbose:
804 msg = (_('repository tip rolled back to revision %s'
810 msg = (_('repository tip rolled back to revision %s'
805 ' (undo %s: %s)\n')
811 ' (undo %s: %s)\n')
806 % (oldtip, desc, detail))
812 % (oldtip, desc, detail))
807 else:
813 else:
808 msg = (_('repository tip rolled back to revision %s'
814 msg = (_('repository tip rolled back to revision %s'
809 ' (undo %s)\n')
815 ' (undo %s)\n')
810 % (oldtip, desc))
816 % (oldtip, desc))
811 except IOError:
817 except IOError:
812 msg = _('rolling back unknown transaction\n')
818 msg = _('rolling back unknown transaction\n')
813 desc = None
819 desc = None
814
820
815 if not force and self['.'] != self['tip'] and desc == 'commit':
821 if not force and self['.'] != self['tip'] and desc == 'commit':
816 raise util.Abort(
822 raise util.Abort(
817 _('rollback of last commit while not checked out '
823 _('rollback of last commit while not checked out '
818 'may lose data'), hint=_('use -f to force'))
824 'may lose data'), hint=_('use -f to force'))
819
825
820 ui.status(msg)
826 ui.status(msg)
821 if dryrun:
827 if dryrun:
822 return 0
828 return 0
823
829
824 parents = self.dirstate.parents()
830 parents = self.dirstate.parents()
825 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
831 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
826 if os.path.exists(self.join('undo.bookmarks')):
832 if os.path.exists(self.join('undo.bookmarks')):
827 util.rename(self.join('undo.bookmarks'),
833 util.rename(self.join('undo.bookmarks'),
828 self.join('bookmarks'))
834 self.join('bookmarks'))
835 if os.path.exists(self.sjoin('undo.phaseroots')):
836 util.rename(self.sjoin('undo.phaseroots'),
837 self.sjoin('phaseroots'))
829 self.invalidate()
838 self.invalidate()
830
839
831 parentgone = (parents[0] not in self.changelog.nodemap or
840 parentgone = (parents[0] not in self.changelog.nodemap or
832 parents[1] not in self.changelog.nodemap)
841 parents[1] not in self.changelog.nodemap)
833 if parentgone:
842 if parentgone:
834 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
843 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
835 try:
844 try:
836 branch = self.opener.read('undo.branch')
845 branch = self.opener.read('undo.branch')
837 self.dirstate.setbranch(branch)
846 self.dirstate.setbranch(branch)
838 except IOError:
847 except IOError:
839 ui.warn(_('named branch could not be reset: '
848 ui.warn(_('named branch could not be reset: '
840 'current branch is still \'%s\'\n')
849 'current branch is still \'%s\'\n')
841 % self.dirstate.branch())
850 % self.dirstate.branch())
842
851
843 self.dirstate.invalidate()
852 self.dirstate.invalidate()
844 self.destroyed()
853 self.destroyed()
845 parents = tuple([p.rev() for p in self.parents()])
854 parents = tuple([p.rev() for p in self.parents()])
846 if len(parents) > 1:
855 if len(parents) > 1:
847 ui.status(_('working directory now based on '
856 ui.status(_('working directory now based on '
848 'revisions %d and %d\n') % parents)
857 'revisions %d and %d\n') % parents)
849 else:
858 else:
850 ui.status(_('working directory now based on '
859 ui.status(_('working directory now based on '
851 'revision %d\n') % parents)
860 'revision %d\n') % parents)
852 return 0
861 return 0
853
862
854 def invalidatecaches(self):
863 def invalidatecaches(self):
855 try:
864 try:
856 delattr(self, '_tagscache')
865 delattr(self, '_tagscache')
857 except AttributeError:
866 except AttributeError:
858 pass
867 pass
859
868
860 self._branchcache = None # in UTF-8
869 self._branchcache = None # in UTF-8
861 self._branchcachetip = None
870 self._branchcachetip = None
862
871
863 def invalidatedirstate(self):
872 def invalidatedirstate(self):
864 '''Invalidates the dirstate, causing the next call to dirstate
873 '''Invalidates the dirstate, causing the next call to dirstate
865 to check if it was modified since the last time it was read,
874 to check if it was modified since the last time it was read,
866 rereading it if it has.
875 rereading it if it has.
867
876
868 This is different to dirstate.invalidate() that it doesn't always
877 This is different to dirstate.invalidate() that it doesn't always
869 rereads the dirstate. Use dirstate.invalidate() if you want to
878 rereads the dirstate. Use dirstate.invalidate() if you want to
870 explicitly read the dirstate again (i.e. restoring it to a previous
879 explicitly read the dirstate again (i.e. restoring it to a previous
871 known good state).'''
880 known good state).'''
872 try:
881 try:
873 delattr(self, 'dirstate')
882 delattr(self, 'dirstate')
874 except AttributeError:
883 except AttributeError:
875 pass
884 pass
876
885
877 def invalidate(self):
886 def invalidate(self):
878 for k in self._filecache:
887 for k in self._filecache:
879 # dirstate is invalidated separately in invalidatedirstate()
888 # dirstate is invalidated separately in invalidatedirstate()
880 if k == 'dirstate':
889 if k == 'dirstate':
881 continue
890 continue
882
891
883 try:
892 try:
884 delattr(self, k)
893 delattr(self, k)
885 except AttributeError:
894 except AttributeError:
886 pass
895 pass
887 self.invalidatecaches()
896 self.invalidatecaches()
888
897
889 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
898 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
890 try:
899 try:
891 l = lock.lock(lockname, 0, releasefn, desc=desc)
900 l = lock.lock(lockname, 0, releasefn, desc=desc)
892 except error.LockHeld, inst:
901 except error.LockHeld, inst:
893 if not wait:
902 if not wait:
894 raise
903 raise
895 self.ui.warn(_("waiting for lock on %s held by %r\n") %
904 self.ui.warn(_("waiting for lock on %s held by %r\n") %
896 (desc, inst.locker))
905 (desc, inst.locker))
897 # default to 600 seconds timeout
906 # default to 600 seconds timeout
898 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
907 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
899 releasefn, desc=desc)
908 releasefn, desc=desc)
900 if acquirefn:
909 if acquirefn:
901 acquirefn()
910 acquirefn()
902 return l
911 return l
903
912
904 def lock(self, wait=True):
913 def lock(self, wait=True):
905 '''Lock the repository store (.hg/store) and return a weak reference
914 '''Lock the repository store (.hg/store) and return a weak reference
906 to the lock. Use this before modifying the store (e.g. committing or
915 to the lock. Use this before modifying the store (e.g. committing or
907 stripping). If you are opening a transaction, get a lock as well.)'''
916 stripping). If you are opening a transaction, get a lock as well.)'''
908 l = self._lockref and self._lockref()
917 l = self._lockref and self._lockref()
909 if l is not None and l.held:
918 if l is not None and l.held:
910 l.lock()
919 l.lock()
911 return l
920 return l
912
921
913 def unlock():
922 def unlock():
914 self.store.write()
923 self.store.write()
915 if self._dirtyphases:
924 if self._dirtyphases:
916 phases.writeroots(self)
925 phases.writeroots(self)
917 for k, ce in self._filecache.items():
926 for k, ce in self._filecache.items():
918 if k == 'dirstate':
927 if k == 'dirstate':
919 continue
928 continue
920 ce.refresh()
929 ce.refresh()
921
930
922 l = self._lock(self.sjoin("lock"), wait, unlock,
931 l = self._lock(self.sjoin("lock"), wait, unlock,
923 self.invalidate, _('repository %s') % self.origroot)
932 self.invalidate, _('repository %s') % self.origroot)
924 self._lockref = weakref.ref(l)
933 self._lockref = weakref.ref(l)
925 return l
934 return l
926
935
927 def wlock(self, wait=True):
936 def wlock(self, wait=True):
928 '''Lock the non-store parts of the repository (everything under
937 '''Lock the non-store parts of the repository (everything under
929 .hg except .hg/store) and return a weak reference to the lock.
938 .hg except .hg/store) and return a weak reference to the lock.
930 Use this before modifying files in .hg.'''
939 Use this before modifying files in .hg.'''
931 l = self._wlockref and self._wlockref()
940 l = self._wlockref and self._wlockref()
932 if l is not None and l.held:
941 if l is not None and l.held:
933 l.lock()
942 l.lock()
934 return l
943 return l
935
944
936 def unlock():
945 def unlock():
937 self.dirstate.write()
946 self.dirstate.write()
938 ce = self._filecache.get('dirstate')
947 ce = self._filecache.get('dirstate')
939 if ce:
948 if ce:
940 ce.refresh()
949 ce.refresh()
941
950
942 l = self._lock(self.join("wlock"), wait, unlock,
951 l = self._lock(self.join("wlock"), wait, unlock,
943 self.invalidatedirstate, _('working directory of %s') %
952 self.invalidatedirstate, _('working directory of %s') %
944 self.origroot)
953 self.origroot)
945 self._wlockref = weakref.ref(l)
954 self._wlockref = weakref.ref(l)
946 return l
955 return l
947
956
948 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
957 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
949 """
958 """
950 commit an individual file as part of a larger transaction
959 commit an individual file as part of a larger transaction
951 """
960 """
952
961
953 fname = fctx.path()
962 fname = fctx.path()
954 text = fctx.data()
963 text = fctx.data()
955 flog = self.file(fname)
964 flog = self.file(fname)
956 fparent1 = manifest1.get(fname, nullid)
965 fparent1 = manifest1.get(fname, nullid)
957 fparent2 = fparent2o = manifest2.get(fname, nullid)
966 fparent2 = fparent2o = manifest2.get(fname, nullid)
958
967
959 meta = {}
968 meta = {}
960 copy = fctx.renamed()
969 copy = fctx.renamed()
961 if copy and copy[0] != fname:
970 if copy and copy[0] != fname:
962 # Mark the new revision of this file as a copy of another
971 # Mark the new revision of this file as a copy of another
963 # file. This copy data will effectively act as a parent
972 # file. This copy data will effectively act as a parent
964 # of this new revision. If this is a merge, the first
973 # of this new revision. If this is a merge, the first
965 # parent will be the nullid (meaning "look up the copy data")
974 # parent will be the nullid (meaning "look up the copy data")
966 # and the second one will be the other parent. For example:
975 # and the second one will be the other parent. For example:
967 #
976 #
968 # 0 --- 1 --- 3 rev1 changes file foo
977 # 0 --- 1 --- 3 rev1 changes file foo
969 # \ / rev2 renames foo to bar and changes it
978 # \ / rev2 renames foo to bar and changes it
970 # \- 2 -/ rev3 should have bar with all changes and
979 # \- 2 -/ rev3 should have bar with all changes and
971 # should record that bar descends from
980 # should record that bar descends from
972 # bar in rev2 and foo in rev1
981 # bar in rev2 and foo in rev1
973 #
982 #
974 # this allows this merge to succeed:
983 # this allows this merge to succeed:
975 #
984 #
976 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
985 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
977 # \ / merging rev3 and rev4 should use bar@rev2
986 # \ / merging rev3 and rev4 should use bar@rev2
978 # \- 2 --- 4 as the merge base
987 # \- 2 --- 4 as the merge base
979 #
988 #
980
989
981 cfname = copy[0]
990 cfname = copy[0]
982 crev = manifest1.get(cfname)
991 crev = manifest1.get(cfname)
983 newfparent = fparent2
992 newfparent = fparent2
984
993
985 if manifest2: # branch merge
994 if manifest2: # branch merge
986 if fparent2 == nullid or crev is None: # copied on remote side
995 if fparent2 == nullid or crev is None: # copied on remote side
987 if cfname in manifest2:
996 if cfname in manifest2:
988 crev = manifest2[cfname]
997 crev = manifest2[cfname]
989 newfparent = fparent1
998 newfparent = fparent1
990
999
991 # find source in nearest ancestor if we've lost track
1000 # find source in nearest ancestor if we've lost track
992 if not crev:
1001 if not crev:
993 self.ui.debug(" %s: searching for copy revision for %s\n" %
1002 self.ui.debug(" %s: searching for copy revision for %s\n" %
994 (fname, cfname))
1003 (fname, cfname))
995 for ancestor in self[None].ancestors():
1004 for ancestor in self[None].ancestors():
996 if cfname in ancestor:
1005 if cfname in ancestor:
997 crev = ancestor[cfname].filenode()
1006 crev = ancestor[cfname].filenode()
998 break
1007 break
999
1008
1000 if crev:
1009 if crev:
1001 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1010 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1002 meta["copy"] = cfname
1011 meta["copy"] = cfname
1003 meta["copyrev"] = hex(crev)
1012 meta["copyrev"] = hex(crev)
1004 fparent1, fparent2 = nullid, newfparent
1013 fparent1, fparent2 = nullid, newfparent
1005 else:
1014 else:
1006 self.ui.warn(_("warning: can't find ancestor for '%s' "
1015 self.ui.warn(_("warning: can't find ancestor for '%s' "
1007 "copied from '%s'!\n") % (fname, cfname))
1016 "copied from '%s'!\n") % (fname, cfname))
1008
1017
1009 elif fparent2 != nullid:
1018 elif fparent2 != nullid:
1010 # is one parent an ancestor of the other?
1019 # is one parent an ancestor of the other?
1011 fparentancestor = flog.ancestor(fparent1, fparent2)
1020 fparentancestor = flog.ancestor(fparent1, fparent2)
1012 if fparentancestor == fparent1:
1021 if fparentancestor == fparent1:
1013 fparent1, fparent2 = fparent2, nullid
1022 fparent1, fparent2 = fparent2, nullid
1014 elif fparentancestor == fparent2:
1023 elif fparentancestor == fparent2:
1015 fparent2 = nullid
1024 fparent2 = nullid
1016
1025
1017 # is the file changed?
1026 # is the file changed?
1018 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1027 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1019 changelist.append(fname)
1028 changelist.append(fname)
1020 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1029 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1021
1030
1022 # are just the flags changed during merge?
1031 # are just the flags changed during merge?
1023 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1032 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1024 changelist.append(fname)
1033 changelist.append(fname)
1025
1034
1026 return fparent1
1035 return fparent1
1027
1036
1028 def commit(self, text="", user=None, date=None, match=None, force=False,
1037 def commit(self, text="", user=None, date=None, match=None, force=False,
1029 editor=False, extra={}):
1038 editor=False, extra={}):
1030 """Add a new revision to current repository.
1039 """Add a new revision to current repository.
1031
1040
1032 Revision information is gathered from the working directory,
1041 Revision information is gathered from the working directory,
1033 match can be used to filter the committed files. If editor is
1042 match can be used to filter the committed files. If editor is
1034 supplied, it is called to get a commit message.
1043 supplied, it is called to get a commit message.
1035 """
1044 """
1036
1045
1037 def fail(f, msg):
1046 def fail(f, msg):
1038 raise util.Abort('%s: %s' % (f, msg))
1047 raise util.Abort('%s: %s' % (f, msg))
1039
1048
1040 if not match:
1049 if not match:
1041 match = matchmod.always(self.root, '')
1050 match = matchmod.always(self.root, '')
1042
1051
1043 if not force:
1052 if not force:
1044 vdirs = []
1053 vdirs = []
1045 match.dir = vdirs.append
1054 match.dir = vdirs.append
1046 match.bad = fail
1055 match.bad = fail
1047
1056
1048 wlock = self.wlock()
1057 wlock = self.wlock()
1049 try:
1058 try:
1050 wctx = self[None]
1059 wctx = self[None]
1051 merge = len(wctx.parents()) > 1
1060 merge = len(wctx.parents()) > 1
1052
1061
1053 if (not force and merge and match and
1062 if (not force and merge and match and
1054 (match.files() or match.anypats())):
1063 (match.files() or match.anypats())):
1055 raise util.Abort(_('cannot partially commit a merge '
1064 raise util.Abort(_('cannot partially commit a merge '
1056 '(do not specify files or patterns)'))
1065 '(do not specify files or patterns)'))
1057
1066
1058 changes = self.status(match=match, clean=force)
1067 changes = self.status(match=match, clean=force)
1059 if force:
1068 if force:
1060 changes[0].extend(changes[6]) # mq may commit unchanged files
1069 changes[0].extend(changes[6]) # mq may commit unchanged files
1061
1070
1062 # check subrepos
1071 # check subrepos
1063 subs = []
1072 subs = []
1064 removedsubs = set()
1073 removedsubs = set()
1065 if '.hgsub' in wctx:
1074 if '.hgsub' in wctx:
1066 # only manage subrepos and .hgsubstate if .hgsub is present
1075 # only manage subrepos and .hgsubstate if .hgsub is present
1067 for p in wctx.parents():
1076 for p in wctx.parents():
1068 removedsubs.update(s for s in p.substate if match(s))
1077 removedsubs.update(s for s in p.substate if match(s))
1069 for s in wctx.substate:
1078 for s in wctx.substate:
1070 removedsubs.discard(s)
1079 removedsubs.discard(s)
1071 if match(s) and wctx.sub(s).dirty():
1080 if match(s) and wctx.sub(s).dirty():
1072 subs.append(s)
1081 subs.append(s)
1073 if (subs or removedsubs):
1082 if (subs or removedsubs):
1074 if (not match('.hgsub') and
1083 if (not match('.hgsub') and
1075 '.hgsub' in (wctx.modified() + wctx.added())):
1084 '.hgsub' in (wctx.modified() + wctx.added())):
1076 raise util.Abort(
1085 raise util.Abort(
1077 _("can't commit subrepos without .hgsub"))
1086 _("can't commit subrepos without .hgsub"))
1078 if '.hgsubstate' not in changes[0]:
1087 if '.hgsubstate' not in changes[0]:
1079 changes[0].insert(0, '.hgsubstate')
1088 changes[0].insert(0, '.hgsubstate')
1080 if '.hgsubstate' in changes[2]:
1089 if '.hgsubstate' in changes[2]:
1081 changes[2].remove('.hgsubstate')
1090 changes[2].remove('.hgsubstate')
1082 elif '.hgsub' in changes[2]:
1091 elif '.hgsub' in changes[2]:
1083 # clean up .hgsubstate when .hgsub is removed
1092 # clean up .hgsubstate when .hgsub is removed
1084 if ('.hgsubstate' in wctx and
1093 if ('.hgsubstate' in wctx and
1085 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1094 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1086 changes[2].insert(0, '.hgsubstate')
1095 changes[2].insert(0, '.hgsubstate')
1087
1096
1088 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1097 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1089 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1098 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1090 if changedsubs:
1099 if changedsubs:
1091 raise util.Abort(_("uncommitted changes in subrepo %s")
1100 raise util.Abort(_("uncommitted changes in subrepo %s")
1092 % changedsubs[0],
1101 % changedsubs[0],
1093 hint=_("use --subrepos for recursive commit"))
1102 hint=_("use --subrepos for recursive commit"))
1094
1103
1095 # make sure all explicit patterns are matched
1104 # make sure all explicit patterns are matched
1096 if not force and match.files():
1105 if not force and match.files():
1097 matched = set(changes[0] + changes[1] + changes[2])
1106 matched = set(changes[0] + changes[1] + changes[2])
1098
1107
1099 for f in match.files():
1108 for f in match.files():
1100 if f == '.' or f in matched or f in wctx.substate:
1109 if f == '.' or f in matched or f in wctx.substate:
1101 continue
1110 continue
1102 if f in changes[3]: # missing
1111 if f in changes[3]: # missing
1103 fail(f, _('file not found!'))
1112 fail(f, _('file not found!'))
1104 if f in vdirs: # visited directory
1113 if f in vdirs: # visited directory
1105 d = f + '/'
1114 d = f + '/'
1106 for mf in matched:
1115 for mf in matched:
1107 if mf.startswith(d):
1116 if mf.startswith(d):
1108 break
1117 break
1109 else:
1118 else:
1110 fail(f, _("no match under directory!"))
1119 fail(f, _("no match under directory!"))
1111 elif f not in self.dirstate:
1120 elif f not in self.dirstate:
1112 fail(f, _("file not tracked!"))
1121 fail(f, _("file not tracked!"))
1113
1122
1114 if (not force and not extra.get("close") and not merge
1123 if (not force and not extra.get("close") and not merge
1115 and not (changes[0] or changes[1] or changes[2])
1124 and not (changes[0] or changes[1] or changes[2])
1116 and wctx.branch() == wctx.p1().branch()):
1125 and wctx.branch() == wctx.p1().branch()):
1117 return None
1126 return None
1118
1127
1119 ms = mergemod.mergestate(self)
1128 ms = mergemod.mergestate(self)
1120 for f in changes[0]:
1129 for f in changes[0]:
1121 if f in ms and ms[f] == 'u':
1130 if f in ms and ms[f] == 'u':
1122 raise util.Abort(_("unresolved merge conflicts "
1131 raise util.Abort(_("unresolved merge conflicts "
1123 "(see hg help resolve)"))
1132 "(see hg help resolve)"))
1124
1133
1125 cctx = context.workingctx(self, text, user, date, extra, changes)
1134 cctx = context.workingctx(self, text, user, date, extra, changes)
1126 if editor:
1135 if editor:
1127 cctx._text = editor(self, cctx, subs)
1136 cctx._text = editor(self, cctx, subs)
1128 edited = (text != cctx._text)
1137 edited = (text != cctx._text)
1129
1138
1130 # commit subs
1139 # commit subs
1131 if subs or removedsubs:
1140 if subs or removedsubs:
1132 state = wctx.substate.copy()
1141 state = wctx.substate.copy()
1133 for s in sorted(subs):
1142 for s in sorted(subs):
1134 sub = wctx.sub(s)
1143 sub = wctx.sub(s)
1135 self.ui.status(_('committing subrepository %s\n') %
1144 self.ui.status(_('committing subrepository %s\n') %
1136 subrepo.subrelpath(sub))
1145 subrepo.subrelpath(sub))
1137 sr = sub.commit(cctx._text, user, date)
1146 sr = sub.commit(cctx._text, user, date)
1138 state[s] = (state[s][0], sr)
1147 state[s] = (state[s][0], sr)
1139 subrepo.writestate(self, state)
1148 subrepo.writestate(self, state)
1140
1149
1141 # Save commit message in case this transaction gets rolled back
1150 # Save commit message in case this transaction gets rolled back
1142 # (e.g. by a pretxncommit hook). Leave the content alone on
1151 # (e.g. by a pretxncommit hook). Leave the content alone on
1143 # the assumption that the user will use the same editor again.
1152 # the assumption that the user will use the same editor again.
1144 msgfn = self.savecommitmessage(cctx._text)
1153 msgfn = self.savecommitmessage(cctx._text)
1145
1154
1146 p1, p2 = self.dirstate.parents()
1155 p1, p2 = self.dirstate.parents()
1147 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1156 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1148 try:
1157 try:
1149 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1158 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1150 ret = self.commitctx(cctx, True)
1159 ret = self.commitctx(cctx, True)
1151 except:
1160 except:
1152 if edited:
1161 if edited:
1153 self.ui.write(
1162 self.ui.write(
1154 _('note: commit message saved in %s\n') % msgfn)
1163 _('note: commit message saved in %s\n') % msgfn)
1155 raise
1164 raise
1156
1165
1157 # update bookmarks, dirstate and mergestate
1166 # update bookmarks, dirstate and mergestate
1158 bookmarks.update(self, p1, ret)
1167 bookmarks.update(self, p1, ret)
1159 for f in changes[0] + changes[1]:
1168 for f in changes[0] + changes[1]:
1160 self.dirstate.normal(f)
1169 self.dirstate.normal(f)
1161 for f in changes[2]:
1170 for f in changes[2]:
1162 self.dirstate.drop(f)
1171 self.dirstate.drop(f)
1163 self.dirstate.setparents(ret)
1172 self.dirstate.setparents(ret)
1164 ms.reset()
1173 ms.reset()
1165 finally:
1174 finally:
1166 wlock.release()
1175 wlock.release()
1167
1176
1168 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1177 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1169 return ret
1178 return ret
1170
1179
1171 def commitctx(self, ctx, error=False):
1180 def commitctx(self, ctx, error=False):
1172 """Add a new revision to current repository.
1181 """Add a new revision to current repository.
1173 Revision information is passed via the context argument.
1182 Revision information is passed via the context argument.
1174 """
1183 """
1175
1184
1176 tr = lock = None
1185 tr = lock = None
1177 removed = list(ctx.removed())
1186 removed = list(ctx.removed())
1178 p1, p2 = ctx.p1(), ctx.p2()
1187 p1, p2 = ctx.p1(), ctx.p2()
1179 user = ctx.user()
1188 user = ctx.user()
1180
1189
1181 lock = self.lock()
1190 lock = self.lock()
1182 try:
1191 try:
1183 tr = self.transaction("commit")
1192 tr = self.transaction("commit")
1184 trp = weakref.proxy(tr)
1193 trp = weakref.proxy(tr)
1185
1194
1186 if ctx.files():
1195 if ctx.files():
1187 m1 = p1.manifest().copy()
1196 m1 = p1.manifest().copy()
1188 m2 = p2.manifest()
1197 m2 = p2.manifest()
1189
1198
1190 # check in files
1199 # check in files
1191 new = {}
1200 new = {}
1192 changed = []
1201 changed = []
1193 linkrev = len(self)
1202 linkrev = len(self)
1194 for f in sorted(ctx.modified() + ctx.added()):
1203 for f in sorted(ctx.modified() + ctx.added()):
1195 self.ui.note(f + "\n")
1204 self.ui.note(f + "\n")
1196 try:
1205 try:
1197 fctx = ctx[f]
1206 fctx = ctx[f]
1198 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1207 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1199 changed)
1208 changed)
1200 m1.set(f, fctx.flags())
1209 m1.set(f, fctx.flags())
1201 except OSError, inst:
1210 except OSError, inst:
1202 self.ui.warn(_("trouble committing %s!\n") % f)
1211 self.ui.warn(_("trouble committing %s!\n") % f)
1203 raise
1212 raise
1204 except IOError, inst:
1213 except IOError, inst:
1205 errcode = getattr(inst, 'errno', errno.ENOENT)
1214 errcode = getattr(inst, 'errno', errno.ENOENT)
1206 if error or errcode and errcode != errno.ENOENT:
1215 if error or errcode and errcode != errno.ENOENT:
1207 self.ui.warn(_("trouble committing %s!\n") % f)
1216 self.ui.warn(_("trouble committing %s!\n") % f)
1208 raise
1217 raise
1209 else:
1218 else:
1210 removed.append(f)
1219 removed.append(f)
1211
1220
1212 # update manifest
1221 # update manifest
1213 m1.update(new)
1222 m1.update(new)
1214 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1223 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1215 drop = [f for f in removed if f in m1]
1224 drop = [f for f in removed if f in m1]
1216 for f in drop:
1225 for f in drop:
1217 del m1[f]
1226 del m1[f]
1218 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1227 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1219 p2.manifestnode(), (new, drop))
1228 p2.manifestnode(), (new, drop))
1220 files = changed + removed
1229 files = changed + removed
1221 else:
1230 else:
1222 mn = p1.manifestnode()
1231 mn = p1.manifestnode()
1223 files = []
1232 files = []
1224
1233
1225 # update changelog
1234 # update changelog
1226 self.changelog.delayupdate()
1235 self.changelog.delayupdate()
1227 n = self.changelog.add(mn, files, ctx.description(),
1236 n = self.changelog.add(mn, files, ctx.description(),
1228 trp, p1.node(), p2.node(),
1237 trp, p1.node(), p2.node(),
1229 user, ctx.date(), ctx.extra().copy())
1238 user, ctx.date(), ctx.extra().copy())
1230 p = lambda: self.changelog.writepending() and self.root or ""
1239 p = lambda: self.changelog.writepending() and self.root or ""
1231 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1240 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1232 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1241 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1233 parent2=xp2, pending=p)
1242 parent2=xp2, pending=p)
1234 self.changelog.finalize(trp)
1243 self.changelog.finalize(trp)
1235 tr.close()
1244 tr.close()
1236
1245
1237 if self._branchcache:
1246 if self._branchcache:
1238 self.updatebranchcache()
1247 self.updatebranchcache()
1239 return n
1248 return n
1240 finally:
1249 finally:
1241 if tr:
1250 if tr:
1242 tr.release()
1251 tr.release()
1243 lock.release()
1252 lock.release()
1244
1253
1245 def destroyed(self):
1254 def destroyed(self):
1246 '''Inform the repository that nodes have been destroyed.
1255 '''Inform the repository that nodes have been destroyed.
1247 Intended for use by strip and rollback, so there's a common
1256 Intended for use by strip and rollback, so there's a common
1248 place for anything that has to be done after destroying history.'''
1257 place for anything that has to be done after destroying history.'''
1249 # XXX it might be nice if we could take the list of destroyed
1258 # XXX it might be nice if we could take the list of destroyed
1250 # nodes, but I don't see an easy way for rollback() to do that
1259 # nodes, but I don't see an easy way for rollback() to do that
1251
1260
1252 # Ensure the persistent tag cache is updated. Doing it now
1261 # Ensure the persistent tag cache is updated. Doing it now
1253 # means that the tag cache only has to worry about destroyed
1262 # means that the tag cache only has to worry about destroyed
1254 # heads immediately after a strip/rollback. That in turn
1263 # heads immediately after a strip/rollback. That in turn
1255 # guarantees that "cachetip == currenttip" (comparing both rev
1264 # guarantees that "cachetip == currenttip" (comparing both rev
1256 # and node) always means no nodes have been added or destroyed.
1265 # and node) always means no nodes have been added or destroyed.
1257
1266
1258 # XXX this is suboptimal when qrefresh'ing: we strip the current
1267 # XXX this is suboptimal when qrefresh'ing: we strip the current
1259 # head, refresh the tag cache, then immediately add a new head.
1268 # head, refresh the tag cache, then immediately add a new head.
1260 # But I think doing it this way is necessary for the "instant
1269 # But I think doing it this way is necessary for the "instant
1261 # tag cache retrieval" case to work.
1270 # tag cache retrieval" case to work.
1262 self.invalidatecaches()
1271 self.invalidatecaches()
1263
1272
1264 def walk(self, match, node=None):
1273 def walk(self, match, node=None):
1265 '''
1274 '''
1266 walk recursively through the directory tree or a given
1275 walk recursively through the directory tree or a given
1267 changeset, finding all files matched by the match
1276 changeset, finding all files matched by the match
1268 function
1277 function
1269 '''
1278 '''
1270 return self[node].walk(match)
1279 return self[node].walk(match)
1271
1280
1272 def status(self, node1='.', node2=None, match=None,
1281 def status(self, node1='.', node2=None, match=None,
1273 ignored=False, clean=False, unknown=False,
1282 ignored=False, clean=False, unknown=False,
1274 listsubrepos=False):
1283 listsubrepos=False):
1275 """return status of files between two nodes or node and working directory
1284 """return status of files between two nodes or node and working directory
1276
1285
1277 If node1 is None, use the first dirstate parent instead.
1286 If node1 is None, use the first dirstate parent instead.
1278 If node2 is None, compare node1 with working directory.
1287 If node2 is None, compare node1 with working directory.
1279 """
1288 """
1280
1289
1281 def mfmatches(ctx):
1290 def mfmatches(ctx):
1282 mf = ctx.manifest().copy()
1291 mf = ctx.manifest().copy()
1283 for fn in mf.keys():
1292 for fn in mf.keys():
1284 if not match(fn):
1293 if not match(fn):
1285 del mf[fn]
1294 del mf[fn]
1286 return mf
1295 return mf
1287
1296
1288 if isinstance(node1, context.changectx):
1297 if isinstance(node1, context.changectx):
1289 ctx1 = node1
1298 ctx1 = node1
1290 else:
1299 else:
1291 ctx1 = self[node1]
1300 ctx1 = self[node1]
1292 if isinstance(node2, context.changectx):
1301 if isinstance(node2, context.changectx):
1293 ctx2 = node2
1302 ctx2 = node2
1294 else:
1303 else:
1295 ctx2 = self[node2]
1304 ctx2 = self[node2]
1296
1305
1297 working = ctx2.rev() is None
1306 working = ctx2.rev() is None
1298 parentworking = working and ctx1 == self['.']
1307 parentworking = working and ctx1 == self['.']
1299 match = match or matchmod.always(self.root, self.getcwd())
1308 match = match or matchmod.always(self.root, self.getcwd())
1300 listignored, listclean, listunknown = ignored, clean, unknown
1309 listignored, listclean, listunknown = ignored, clean, unknown
1301
1310
1302 # load earliest manifest first for caching reasons
1311 # load earliest manifest first for caching reasons
1303 if not working and ctx2.rev() < ctx1.rev():
1312 if not working and ctx2.rev() < ctx1.rev():
1304 ctx2.manifest()
1313 ctx2.manifest()
1305
1314
1306 if not parentworking:
1315 if not parentworking:
1307 def bad(f, msg):
1316 def bad(f, msg):
1308 if f not in ctx1:
1317 if f not in ctx1:
1309 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1318 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1310 match.bad = bad
1319 match.bad = bad
1311
1320
1312 if working: # we need to scan the working dir
1321 if working: # we need to scan the working dir
1313 subrepos = []
1322 subrepos = []
1314 if '.hgsub' in self.dirstate:
1323 if '.hgsub' in self.dirstate:
1315 subrepos = ctx2.substate.keys()
1324 subrepos = ctx2.substate.keys()
1316 s = self.dirstate.status(match, subrepos, listignored,
1325 s = self.dirstate.status(match, subrepos, listignored,
1317 listclean, listunknown)
1326 listclean, listunknown)
1318 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1327 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1319
1328
1320 # check for any possibly clean files
1329 # check for any possibly clean files
1321 if parentworking and cmp:
1330 if parentworking and cmp:
1322 fixup = []
1331 fixup = []
1323 # do a full compare of any files that might have changed
1332 # do a full compare of any files that might have changed
1324 for f in sorted(cmp):
1333 for f in sorted(cmp):
1325 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1334 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1326 or ctx1[f].cmp(ctx2[f])):
1335 or ctx1[f].cmp(ctx2[f])):
1327 modified.append(f)
1336 modified.append(f)
1328 else:
1337 else:
1329 fixup.append(f)
1338 fixup.append(f)
1330
1339
1331 # update dirstate for files that are actually clean
1340 # update dirstate for files that are actually clean
1332 if fixup:
1341 if fixup:
1333 if listclean:
1342 if listclean:
1334 clean += fixup
1343 clean += fixup
1335
1344
1336 try:
1345 try:
1337 # updating the dirstate is optional
1346 # updating the dirstate is optional
1338 # so we don't wait on the lock
1347 # so we don't wait on the lock
1339 wlock = self.wlock(False)
1348 wlock = self.wlock(False)
1340 try:
1349 try:
1341 for f in fixup:
1350 for f in fixup:
1342 self.dirstate.normal(f)
1351 self.dirstate.normal(f)
1343 finally:
1352 finally:
1344 wlock.release()
1353 wlock.release()
1345 except error.LockError:
1354 except error.LockError:
1346 pass
1355 pass
1347
1356
1348 if not parentworking:
1357 if not parentworking:
1349 mf1 = mfmatches(ctx1)
1358 mf1 = mfmatches(ctx1)
1350 if working:
1359 if working:
1351 # we are comparing working dir against non-parent
1360 # we are comparing working dir against non-parent
1352 # generate a pseudo-manifest for the working dir
1361 # generate a pseudo-manifest for the working dir
1353 mf2 = mfmatches(self['.'])
1362 mf2 = mfmatches(self['.'])
1354 for f in cmp + modified + added:
1363 for f in cmp + modified + added:
1355 mf2[f] = None
1364 mf2[f] = None
1356 mf2.set(f, ctx2.flags(f))
1365 mf2.set(f, ctx2.flags(f))
1357 for f in removed:
1366 for f in removed:
1358 if f in mf2:
1367 if f in mf2:
1359 del mf2[f]
1368 del mf2[f]
1360 else:
1369 else:
1361 # we are comparing two revisions
1370 # we are comparing two revisions
1362 deleted, unknown, ignored = [], [], []
1371 deleted, unknown, ignored = [], [], []
1363 mf2 = mfmatches(ctx2)
1372 mf2 = mfmatches(ctx2)
1364
1373
1365 modified, added, clean = [], [], []
1374 modified, added, clean = [], [], []
1366 for fn in mf2:
1375 for fn in mf2:
1367 if fn in mf1:
1376 if fn in mf1:
1368 if (fn not in deleted and
1377 if (fn not in deleted and
1369 (mf1.flags(fn) != mf2.flags(fn) or
1378 (mf1.flags(fn) != mf2.flags(fn) or
1370 (mf1[fn] != mf2[fn] and
1379 (mf1[fn] != mf2[fn] and
1371 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1380 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1372 modified.append(fn)
1381 modified.append(fn)
1373 elif listclean:
1382 elif listclean:
1374 clean.append(fn)
1383 clean.append(fn)
1375 del mf1[fn]
1384 del mf1[fn]
1376 elif fn not in deleted:
1385 elif fn not in deleted:
1377 added.append(fn)
1386 added.append(fn)
1378 removed = mf1.keys()
1387 removed = mf1.keys()
1379
1388
1380 if working and modified and not self.dirstate._checklink:
1389 if working and modified and not self.dirstate._checklink:
1381 # Symlink placeholders may get non-symlink-like contents
1390 # Symlink placeholders may get non-symlink-like contents
1382 # via user error or dereferencing by NFS or Samba servers,
1391 # via user error or dereferencing by NFS or Samba servers,
1383 # so we filter out any placeholders that don't look like a
1392 # so we filter out any placeholders that don't look like a
1384 # symlink
1393 # symlink
1385 sane = []
1394 sane = []
1386 for f in modified:
1395 for f in modified:
1387 if ctx2.flags(f) == 'l':
1396 if ctx2.flags(f) == 'l':
1388 d = ctx2[f].data()
1397 d = ctx2[f].data()
1389 if len(d) >= 1024 or '\n' in d or util.binary(d):
1398 if len(d) >= 1024 or '\n' in d or util.binary(d):
1390 self.ui.debug('ignoring suspect symlink placeholder'
1399 self.ui.debug('ignoring suspect symlink placeholder'
1391 ' "%s"\n' % f)
1400 ' "%s"\n' % f)
1392 continue
1401 continue
1393 sane.append(f)
1402 sane.append(f)
1394 modified = sane
1403 modified = sane
1395
1404
1396 r = modified, added, removed, deleted, unknown, ignored, clean
1405 r = modified, added, removed, deleted, unknown, ignored, clean
1397
1406
1398 if listsubrepos:
1407 if listsubrepos:
1399 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1408 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1400 if working:
1409 if working:
1401 rev2 = None
1410 rev2 = None
1402 else:
1411 else:
1403 rev2 = ctx2.substate[subpath][1]
1412 rev2 = ctx2.substate[subpath][1]
1404 try:
1413 try:
1405 submatch = matchmod.narrowmatcher(subpath, match)
1414 submatch = matchmod.narrowmatcher(subpath, match)
1406 s = sub.status(rev2, match=submatch, ignored=listignored,
1415 s = sub.status(rev2, match=submatch, ignored=listignored,
1407 clean=listclean, unknown=listunknown,
1416 clean=listclean, unknown=listunknown,
1408 listsubrepos=True)
1417 listsubrepos=True)
1409 for rfiles, sfiles in zip(r, s):
1418 for rfiles, sfiles in zip(r, s):
1410 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1419 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1411 except error.LookupError:
1420 except error.LookupError:
1412 self.ui.status(_("skipping missing subrepository: %s\n")
1421 self.ui.status(_("skipping missing subrepository: %s\n")
1413 % subpath)
1422 % subpath)
1414
1423
1415 for l in r:
1424 for l in r:
1416 l.sort()
1425 l.sort()
1417 return r
1426 return r
1418
1427
1419 def heads(self, start=None):
1428 def heads(self, start=None):
1420 heads = self.changelog.heads(start)
1429 heads = self.changelog.heads(start)
1421 # sort the output in rev descending order
1430 # sort the output in rev descending order
1422 return sorted(heads, key=self.changelog.rev, reverse=True)
1431 return sorted(heads, key=self.changelog.rev, reverse=True)
1423
1432
1424 def branchheads(self, branch=None, start=None, closed=False):
1433 def branchheads(self, branch=None, start=None, closed=False):
1425 '''return a (possibly filtered) list of heads for the given branch
1434 '''return a (possibly filtered) list of heads for the given branch
1426
1435
1427 Heads are returned in topological order, from newest to oldest.
1436 Heads are returned in topological order, from newest to oldest.
1428 If branch is None, use the dirstate branch.
1437 If branch is None, use the dirstate branch.
1429 If start is not None, return only heads reachable from start.
1438 If start is not None, return only heads reachable from start.
1430 If closed is True, return heads that are marked as closed as well.
1439 If closed is True, return heads that are marked as closed as well.
1431 '''
1440 '''
1432 if branch is None:
1441 if branch is None:
1433 branch = self[None].branch()
1442 branch = self[None].branch()
1434 branches = self.branchmap()
1443 branches = self.branchmap()
1435 if branch not in branches:
1444 if branch not in branches:
1436 return []
1445 return []
1437 # the cache returns heads ordered lowest to highest
1446 # the cache returns heads ordered lowest to highest
1438 bheads = list(reversed(branches[branch]))
1447 bheads = list(reversed(branches[branch]))
1439 if start is not None:
1448 if start is not None:
1440 # filter out the heads that cannot be reached from startrev
1449 # filter out the heads that cannot be reached from startrev
1441 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1450 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1442 bheads = [h for h in bheads if h in fbheads]
1451 bheads = [h for h in bheads if h in fbheads]
1443 if not closed:
1452 if not closed:
1444 bheads = [h for h in bheads if
1453 bheads = [h for h in bheads if
1445 ('close' not in self.changelog.read(h)[5])]
1454 ('close' not in self.changelog.read(h)[5])]
1446 return bheads
1455 return bheads
1447
1456
1448 def branches(self, nodes):
1457 def branches(self, nodes):
1449 if not nodes:
1458 if not nodes:
1450 nodes = [self.changelog.tip()]
1459 nodes = [self.changelog.tip()]
1451 b = []
1460 b = []
1452 for n in nodes:
1461 for n in nodes:
1453 t = n
1462 t = n
1454 while True:
1463 while True:
1455 p = self.changelog.parents(n)
1464 p = self.changelog.parents(n)
1456 if p[1] != nullid or p[0] == nullid:
1465 if p[1] != nullid or p[0] == nullid:
1457 b.append((t, n, p[0], p[1]))
1466 b.append((t, n, p[0], p[1]))
1458 break
1467 break
1459 n = p[0]
1468 n = p[0]
1460 return b
1469 return b
1461
1470
1462 def between(self, pairs):
1471 def between(self, pairs):
1463 r = []
1472 r = []
1464
1473
1465 for top, bottom in pairs:
1474 for top, bottom in pairs:
1466 n, l, i = top, [], 0
1475 n, l, i = top, [], 0
1467 f = 1
1476 f = 1
1468
1477
1469 while n != bottom and n != nullid:
1478 while n != bottom and n != nullid:
1470 p = self.changelog.parents(n)[0]
1479 p = self.changelog.parents(n)[0]
1471 if i == f:
1480 if i == f:
1472 l.append(n)
1481 l.append(n)
1473 f = f * 2
1482 f = f * 2
1474 n = p
1483 n = p
1475 i += 1
1484 i += 1
1476
1485
1477 r.append(l)
1486 r.append(l)
1478
1487
1479 return r
1488 return r
1480
1489
1481 def pull(self, remote, heads=None, force=False):
1490 def pull(self, remote, heads=None, force=False):
1482 lock = self.lock()
1491 lock = self.lock()
1483 try:
1492 try:
1484 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1493 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1485 force=force)
1494 force=force)
1486 common, fetch, rheads = tmp
1495 common, fetch, rheads = tmp
1487 if not fetch:
1496 if not fetch:
1488 self.ui.status(_("no changes found\n"))
1497 self.ui.status(_("no changes found\n"))
1489 result = 0
1498 result = 0
1490 else:
1499 else:
1491 if heads is None and list(common) == [nullid]:
1500 if heads is None and list(common) == [nullid]:
1492 self.ui.status(_("requesting all changes\n"))
1501 self.ui.status(_("requesting all changes\n"))
1493 elif heads is None and remote.capable('changegroupsubset'):
1502 elif heads is None and remote.capable('changegroupsubset'):
1494 # issue1320, avoid a race if remote changed after discovery
1503 # issue1320, avoid a race if remote changed after discovery
1495 heads = rheads
1504 heads = rheads
1496
1505
1497 if remote.capable('getbundle'):
1506 if remote.capable('getbundle'):
1498 cg = remote.getbundle('pull', common=common,
1507 cg = remote.getbundle('pull', common=common,
1499 heads=heads or rheads)
1508 heads=heads or rheads)
1500 elif heads is None:
1509 elif heads is None:
1501 cg = remote.changegroup(fetch, 'pull')
1510 cg = remote.changegroup(fetch, 'pull')
1502 elif not remote.capable('changegroupsubset'):
1511 elif not remote.capable('changegroupsubset'):
1503 raise util.Abort(_("partial pull cannot be done because "
1512 raise util.Abort(_("partial pull cannot be done because "
1504 "other repository doesn't support "
1513 "other repository doesn't support "
1505 "changegroupsubset."))
1514 "changegroupsubset."))
1506 else:
1515 else:
1507 cg = remote.changegroupsubset(fetch, heads, 'pull')
1516 cg = remote.changegroupsubset(fetch, heads, 'pull')
1508 result = self.addchangegroup(cg, 'pull', remote.url(),
1517 result = self.addchangegroup(cg, 'pull', remote.url(),
1509 lock=lock)
1518 lock=lock)
1510 finally:
1519 finally:
1511 lock.release()
1520 lock.release()
1512
1521
1513 return result
1522 return result
1514
1523
1515 def checkpush(self, force, revs):
1524 def checkpush(self, force, revs):
1516 """Extensions can override this function if additional checks have
1525 """Extensions can override this function if additional checks have
1517 to be performed before pushing, or call it if they override push
1526 to be performed before pushing, or call it if they override push
1518 command.
1527 command.
1519 """
1528 """
1520 pass
1529 pass
1521
1530
1522 def push(self, remote, force=False, revs=None, newbranch=False):
1531 def push(self, remote, force=False, revs=None, newbranch=False):
1523 '''Push outgoing changesets (limited by revs) from the current
1532 '''Push outgoing changesets (limited by revs) from the current
1524 repository to remote. Return an integer:
1533 repository to remote. Return an integer:
1525 - 0 means HTTP error *or* nothing to push
1534 - 0 means HTTP error *or* nothing to push
1526 - 1 means we pushed and remote head count is unchanged *or*
1535 - 1 means we pushed and remote head count is unchanged *or*
1527 we have outgoing changesets but refused to push
1536 we have outgoing changesets but refused to push
1528 - other values as described by addchangegroup()
1537 - other values as described by addchangegroup()
1529 '''
1538 '''
1530 # there are two ways to push to remote repo:
1539 # there are two ways to push to remote repo:
1531 #
1540 #
1532 # addchangegroup assumes local user can lock remote
1541 # addchangegroup assumes local user can lock remote
1533 # repo (local filesystem, old ssh servers).
1542 # repo (local filesystem, old ssh servers).
1534 #
1543 #
1535 # unbundle assumes local user cannot lock remote repo (new ssh
1544 # unbundle assumes local user cannot lock remote repo (new ssh
1536 # servers, http servers).
1545 # servers, http servers).
1537
1546
1538 self.checkpush(force, revs)
1547 self.checkpush(force, revs)
1539 lock = None
1548 lock = None
1540 unbundle = remote.capable('unbundle')
1549 unbundle = remote.capable('unbundle')
1541 if not unbundle:
1550 if not unbundle:
1542 lock = remote.lock()
1551 lock = remote.lock()
1543 try:
1552 try:
1544 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1553 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1545 newbranch)
1554 newbranch)
1546 ret = remote_heads
1555 ret = remote_heads
1547 if cg is not None:
1556 if cg is not None:
1548 if unbundle:
1557 if unbundle:
1549 # local repo finds heads on server, finds out what
1558 # local repo finds heads on server, finds out what
1550 # revs it must push. once revs transferred, if server
1559 # revs it must push. once revs transferred, if server
1551 # finds it has different heads (someone else won
1560 # finds it has different heads (someone else won
1552 # commit/push race), server aborts.
1561 # commit/push race), server aborts.
1553 if force:
1562 if force:
1554 remote_heads = ['force']
1563 remote_heads = ['force']
1555 # ssh: return remote's addchangegroup()
1564 # ssh: return remote's addchangegroup()
1556 # http: return remote's addchangegroup() or 0 for error
1565 # http: return remote's addchangegroup() or 0 for error
1557 ret = remote.unbundle(cg, remote_heads, 'push')
1566 ret = remote.unbundle(cg, remote_heads, 'push')
1558 else:
1567 else:
1559 # we return an integer indicating remote head count change
1568 # we return an integer indicating remote head count change
1560 ret = remote.addchangegroup(cg, 'push', self.url(),
1569 ret = remote.addchangegroup(cg, 'push', self.url(),
1561 lock=lock)
1570 lock=lock)
1562 finally:
1571 finally:
1563 if lock is not None:
1572 if lock is not None:
1564 lock.release()
1573 lock.release()
1565
1574
1566 self.ui.debug("checking for updated bookmarks\n")
1575 self.ui.debug("checking for updated bookmarks\n")
1567 rb = remote.listkeys('bookmarks')
1576 rb = remote.listkeys('bookmarks')
1568 for k in rb.keys():
1577 for k in rb.keys():
1569 if k in self._bookmarks:
1578 if k in self._bookmarks:
1570 nr, nl = rb[k], hex(self._bookmarks[k])
1579 nr, nl = rb[k], hex(self._bookmarks[k])
1571 if nr in self:
1580 if nr in self:
1572 cr = self[nr]
1581 cr = self[nr]
1573 cl = self[nl]
1582 cl = self[nl]
1574 if cl in cr.descendants():
1583 if cl in cr.descendants():
1575 r = remote.pushkey('bookmarks', k, nr, nl)
1584 r = remote.pushkey('bookmarks', k, nr, nl)
1576 if r:
1585 if r:
1577 self.ui.status(_("updating bookmark %s\n") % k)
1586 self.ui.status(_("updating bookmark %s\n") % k)
1578 else:
1587 else:
1579 self.ui.warn(_('updating bookmark %s'
1588 self.ui.warn(_('updating bookmark %s'
1580 ' failed!\n') % k)
1589 ' failed!\n') % k)
1581
1590
1582 return ret
1591 return ret
1583
1592
1584 def changegroupinfo(self, nodes, source):
1593 def changegroupinfo(self, nodes, source):
1585 if self.ui.verbose or source == 'bundle':
1594 if self.ui.verbose or source == 'bundle':
1586 self.ui.status(_("%d changesets found\n") % len(nodes))
1595 self.ui.status(_("%d changesets found\n") % len(nodes))
1587 if self.ui.debugflag:
1596 if self.ui.debugflag:
1588 self.ui.debug("list of changesets:\n")
1597 self.ui.debug("list of changesets:\n")
1589 for node in nodes:
1598 for node in nodes:
1590 self.ui.debug("%s\n" % hex(node))
1599 self.ui.debug("%s\n" % hex(node))
1591
1600
1592 def changegroupsubset(self, bases, heads, source):
1601 def changegroupsubset(self, bases, heads, source):
1593 """Compute a changegroup consisting of all the nodes that are
1602 """Compute a changegroup consisting of all the nodes that are
1594 descendants of any of the bases and ancestors of any of the heads.
1603 descendants of any of the bases and ancestors of any of the heads.
1595 Return a chunkbuffer object whose read() method will return
1604 Return a chunkbuffer object whose read() method will return
1596 successive changegroup chunks.
1605 successive changegroup chunks.
1597
1606
1598 It is fairly complex as determining which filenodes and which
1607 It is fairly complex as determining which filenodes and which
1599 manifest nodes need to be included for the changeset to be complete
1608 manifest nodes need to be included for the changeset to be complete
1600 is non-trivial.
1609 is non-trivial.
1601
1610
1602 Another wrinkle is doing the reverse, figuring out which changeset in
1611 Another wrinkle is doing the reverse, figuring out which changeset in
1603 the changegroup a particular filenode or manifestnode belongs to.
1612 the changegroup a particular filenode or manifestnode belongs to.
1604 """
1613 """
1605 cl = self.changelog
1614 cl = self.changelog
1606 if not bases:
1615 if not bases:
1607 bases = [nullid]
1616 bases = [nullid]
1608 csets, bases, heads = cl.nodesbetween(bases, heads)
1617 csets, bases, heads = cl.nodesbetween(bases, heads)
1609 # We assume that all ancestors of bases are known
1618 # We assume that all ancestors of bases are known
1610 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1619 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1611 return self._changegroupsubset(common, csets, heads, source)
1620 return self._changegroupsubset(common, csets, heads, source)
1612
1621
1613 def getbundle(self, source, heads=None, common=None):
1622 def getbundle(self, source, heads=None, common=None):
1614 """Like changegroupsubset, but returns the set difference between the
1623 """Like changegroupsubset, but returns the set difference between the
1615 ancestors of heads and the ancestors common.
1624 ancestors of heads and the ancestors common.
1616
1625
1617 If heads is None, use the local heads. If common is None, use [nullid].
1626 If heads is None, use the local heads. If common is None, use [nullid].
1618
1627
1619 The nodes in common might not all be known locally due to the way the
1628 The nodes in common might not all be known locally due to the way the
1620 current discovery protocol works.
1629 current discovery protocol works.
1621 """
1630 """
1622 cl = self.changelog
1631 cl = self.changelog
1623 if common:
1632 if common:
1624 nm = cl.nodemap
1633 nm = cl.nodemap
1625 common = [n for n in common if n in nm]
1634 common = [n for n in common if n in nm]
1626 else:
1635 else:
1627 common = [nullid]
1636 common = [nullid]
1628 if not heads:
1637 if not heads:
1629 heads = cl.heads()
1638 heads = cl.heads()
1630 common, missing = cl.findcommonmissing(common, heads)
1639 common, missing = cl.findcommonmissing(common, heads)
1631 if not missing:
1640 if not missing:
1632 return None
1641 return None
1633 return self._changegroupsubset(common, missing, heads, source)
1642 return self._changegroupsubset(common, missing, heads, source)
1634
1643
1635 def _changegroupsubset(self, commonrevs, csets, heads, source):
1644 def _changegroupsubset(self, commonrevs, csets, heads, source):
1636
1645
1637 cl = self.changelog
1646 cl = self.changelog
1638 mf = self.manifest
1647 mf = self.manifest
1639 mfs = {} # needed manifests
1648 mfs = {} # needed manifests
1640 fnodes = {} # needed file nodes
1649 fnodes = {} # needed file nodes
1641 changedfiles = set()
1650 changedfiles = set()
1642 fstate = ['', {}]
1651 fstate = ['', {}]
1643 count = [0]
1652 count = [0]
1644
1653
1645 # can we go through the fast path ?
1654 # can we go through the fast path ?
1646 heads.sort()
1655 heads.sort()
1647 if heads == sorted(self.heads()):
1656 if heads == sorted(self.heads()):
1648 return self._changegroup(csets, source)
1657 return self._changegroup(csets, source)
1649
1658
1650 # slow path
1659 # slow path
1651 self.hook('preoutgoing', throw=True, source=source)
1660 self.hook('preoutgoing', throw=True, source=source)
1652 self.changegroupinfo(csets, source)
1661 self.changegroupinfo(csets, source)
1653
1662
1654 # filter any nodes that claim to be part of the known set
1663 # filter any nodes that claim to be part of the known set
1655 def prune(revlog, missing):
1664 def prune(revlog, missing):
1656 return [n for n in missing
1665 return [n for n in missing
1657 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1666 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1658
1667
1659 def lookup(revlog, x):
1668 def lookup(revlog, x):
1660 if revlog == cl:
1669 if revlog == cl:
1661 c = cl.read(x)
1670 c = cl.read(x)
1662 changedfiles.update(c[3])
1671 changedfiles.update(c[3])
1663 mfs.setdefault(c[0], x)
1672 mfs.setdefault(c[0], x)
1664 count[0] += 1
1673 count[0] += 1
1665 self.ui.progress(_('bundling'), count[0],
1674 self.ui.progress(_('bundling'), count[0],
1666 unit=_('changesets'), total=len(csets))
1675 unit=_('changesets'), total=len(csets))
1667 return x
1676 return x
1668 elif revlog == mf:
1677 elif revlog == mf:
1669 clnode = mfs[x]
1678 clnode = mfs[x]
1670 mdata = mf.readfast(x)
1679 mdata = mf.readfast(x)
1671 for f in changedfiles:
1680 for f in changedfiles:
1672 if f in mdata:
1681 if f in mdata:
1673 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1682 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1674 count[0] += 1
1683 count[0] += 1
1675 self.ui.progress(_('bundling'), count[0],
1684 self.ui.progress(_('bundling'), count[0],
1676 unit=_('manifests'), total=len(mfs))
1685 unit=_('manifests'), total=len(mfs))
1677 return mfs[x]
1686 return mfs[x]
1678 else:
1687 else:
1679 self.ui.progress(
1688 self.ui.progress(
1680 _('bundling'), count[0], item=fstate[0],
1689 _('bundling'), count[0], item=fstate[0],
1681 unit=_('files'), total=len(changedfiles))
1690 unit=_('files'), total=len(changedfiles))
1682 return fstate[1][x]
1691 return fstate[1][x]
1683
1692
1684 bundler = changegroup.bundle10(lookup)
1693 bundler = changegroup.bundle10(lookup)
1685 reorder = self.ui.config('bundle', 'reorder', 'auto')
1694 reorder = self.ui.config('bundle', 'reorder', 'auto')
1686 if reorder == 'auto':
1695 if reorder == 'auto':
1687 reorder = None
1696 reorder = None
1688 else:
1697 else:
1689 reorder = util.parsebool(reorder)
1698 reorder = util.parsebool(reorder)
1690
1699
1691 def gengroup():
1700 def gengroup():
1692 # Create a changenode group generator that will call our functions
1701 # Create a changenode group generator that will call our functions
1693 # back to lookup the owning changenode and collect information.
1702 # back to lookup the owning changenode and collect information.
1694 for chunk in cl.group(csets, bundler, reorder=reorder):
1703 for chunk in cl.group(csets, bundler, reorder=reorder):
1695 yield chunk
1704 yield chunk
1696 self.ui.progress(_('bundling'), None)
1705 self.ui.progress(_('bundling'), None)
1697
1706
1698 # Create a generator for the manifestnodes that calls our lookup
1707 # Create a generator for the manifestnodes that calls our lookup
1699 # and data collection functions back.
1708 # and data collection functions back.
1700 count[0] = 0
1709 count[0] = 0
1701 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1710 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1702 yield chunk
1711 yield chunk
1703 self.ui.progress(_('bundling'), None)
1712 self.ui.progress(_('bundling'), None)
1704
1713
1705 mfs.clear()
1714 mfs.clear()
1706
1715
1707 # Go through all our files in order sorted by name.
1716 # Go through all our files in order sorted by name.
1708 count[0] = 0
1717 count[0] = 0
1709 for fname in sorted(changedfiles):
1718 for fname in sorted(changedfiles):
1710 filerevlog = self.file(fname)
1719 filerevlog = self.file(fname)
1711 if not len(filerevlog):
1720 if not len(filerevlog):
1712 raise util.Abort(_("empty or missing revlog for %s") % fname)
1721 raise util.Abort(_("empty or missing revlog for %s") % fname)
1713 fstate[0] = fname
1722 fstate[0] = fname
1714 fstate[1] = fnodes.pop(fname, {})
1723 fstate[1] = fnodes.pop(fname, {})
1715
1724
1716 nodelist = prune(filerevlog, fstate[1])
1725 nodelist = prune(filerevlog, fstate[1])
1717 if nodelist:
1726 if nodelist:
1718 count[0] += 1
1727 count[0] += 1
1719 yield bundler.fileheader(fname)
1728 yield bundler.fileheader(fname)
1720 for chunk in filerevlog.group(nodelist, bundler, reorder):
1729 for chunk in filerevlog.group(nodelist, bundler, reorder):
1721 yield chunk
1730 yield chunk
1722
1731
1723 # Signal that no more groups are left.
1732 # Signal that no more groups are left.
1724 yield bundler.close()
1733 yield bundler.close()
1725 self.ui.progress(_('bundling'), None)
1734 self.ui.progress(_('bundling'), None)
1726
1735
1727 if csets:
1736 if csets:
1728 self.hook('outgoing', node=hex(csets[0]), source=source)
1737 self.hook('outgoing', node=hex(csets[0]), source=source)
1729
1738
1730 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1739 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1731
1740
1732 def changegroup(self, basenodes, source):
1741 def changegroup(self, basenodes, source):
1733 # to avoid a race we use changegroupsubset() (issue1320)
1742 # to avoid a race we use changegroupsubset() (issue1320)
1734 return self.changegroupsubset(basenodes, self.heads(), source)
1743 return self.changegroupsubset(basenodes, self.heads(), source)
1735
1744
1736 def _changegroup(self, nodes, source):
1745 def _changegroup(self, nodes, source):
1737 """Compute the changegroup of all nodes that we have that a recipient
1746 """Compute the changegroup of all nodes that we have that a recipient
1738 doesn't. Return a chunkbuffer object whose read() method will return
1747 doesn't. Return a chunkbuffer object whose read() method will return
1739 successive changegroup chunks.
1748 successive changegroup chunks.
1740
1749
1741 This is much easier than the previous function as we can assume that
1750 This is much easier than the previous function as we can assume that
1742 the recipient has any changenode we aren't sending them.
1751 the recipient has any changenode we aren't sending them.
1743
1752
1744 nodes is the set of nodes to send"""
1753 nodes is the set of nodes to send"""
1745
1754
1746 cl = self.changelog
1755 cl = self.changelog
1747 mf = self.manifest
1756 mf = self.manifest
1748 mfs = {}
1757 mfs = {}
1749 changedfiles = set()
1758 changedfiles = set()
1750 fstate = ['']
1759 fstate = ['']
1751 count = [0]
1760 count = [0]
1752
1761
1753 self.hook('preoutgoing', throw=True, source=source)
1762 self.hook('preoutgoing', throw=True, source=source)
1754 self.changegroupinfo(nodes, source)
1763 self.changegroupinfo(nodes, source)
1755
1764
1756 revset = set([cl.rev(n) for n in nodes])
1765 revset = set([cl.rev(n) for n in nodes])
1757
1766
1758 def gennodelst(log):
1767 def gennodelst(log):
1759 return [log.node(r) for r in log if log.linkrev(r) in revset]
1768 return [log.node(r) for r in log if log.linkrev(r) in revset]
1760
1769
1761 def lookup(revlog, x):
1770 def lookup(revlog, x):
1762 if revlog == cl:
1771 if revlog == cl:
1763 c = cl.read(x)
1772 c = cl.read(x)
1764 changedfiles.update(c[3])
1773 changedfiles.update(c[3])
1765 mfs.setdefault(c[0], x)
1774 mfs.setdefault(c[0], x)
1766 count[0] += 1
1775 count[0] += 1
1767 self.ui.progress(_('bundling'), count[0],
1776 self.ui.progress(_('bundling'), count[0],
1768 unit=_('changesets'), total=len(nodes))
1777 unit=_('changesets'), total=len(nodes))
1769 return x
1778 return x
1770 elif revlog == mf:
1779 elif revlog == mf:
1771 count[0] += 1
1780 count[0] += 1
1772 self.ui.progress(_('bundling'), count[0],
1781 self.ui.progress(_('bundling'), count[0],
1773 unit=_('manifests'), total=len(mfs))
1782 unit=_('manifests'), total=len(mfs))
1774 return cl.node(revlog.linkrev(revlog.rev(x)))
1783 return cl.node(revlog.linkrev(revlog.rev(x)))
1775 else:
1784 else:
1776 self.ui.progress(
1785 self.ui.progress(
1777 _('bundling'), count[0], item=fstate[0],
1786 _('bundling'), count[0], item=fstate[0],
1778 total=len(changedfiles), unit=_('files'))
1787 total=len(changedfiles), unit=_('files'))
1779 return cl.node(revlog.linkrev(revlog.rev(x)))
1788 return cl.node(revlog.linkrev(revlog.rev(x)))
1780
1789
1781 bundler = changegroup.bundle10(lookup)
1790 bundler = changegroup.bundle10(lookup)
1782 reorder = self.ui.config('bundle', 'reorder', 'auto')
1791 reorder = self.ui.config('bundle', 'reorder', 'auto')
1783 if reorder == 'auto':
1792 if reorder == 'auto':
1784 reorder = None
1793 reorder = None
1785 else:
1794 else:
1786 reorder = util.parsebool(reorder)
1795 reorder = util.parsebool(reorder)
1787
1796
1788 def gengroup():
1797 def gengroup():
1789 '''yield a sequence of changegroup chunks (strings)'''
1798 '''yield a sequence of changegroup chunks (strings)'''
1790 # construct a list of all changed files
1799 # construct a list of all changed files
1791
1800
1792 for chunk in cl.group(nodes, bundler, reorder=reorder):
1801 for chunk in cl.group(nodes, bundler, reorder=reorder):
1793 yield chunk
1802 yield chunk
1794 self.ui.progress(_('bundling'), None)
1803 self.ui.progress(_('bundling'), None)
1795
1804
1796 count[0] = 0
1805 count[0] = 0
1797 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1806 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1798 yield chunk
1807 yield chunk
1799 self.ui.progress(_('bundling'), None)
1808 self.ui.progress(_('bundling'), None)
1800
1809
1801 count[0] = 0
1810 count[0] = 0
1802 for fname in sorted(changedfiles):
1811 for fname in sorted(changedfiles):
1803 filerevlog = self.file(fname)
1812 filerevlog = self.file(fname)
1804 if not len(filerevlog):
1813 if not len(filerevlog):
1805 raise util.Abort(_("empty or missing revlog for %s") % fname)
1814 raise util.Abort(_("empty or missing revlog for %s") % fname)
1806 fstate[0] = fname
1815 fstate[0] = fname
1807 nodelist = gennodelst(filerevlog)
1816 nodelist = gennodelst(filerevlog)
1808 if nodelist:
1817 if nodelist:
1809 count[0] += 1
1818 count[0] += 1
1810 yield bundler.fileheader(fname)
1819 yield bundler.fileheader(fname)
1811 for chunk in filerevlog.group(nodelist, bundler, reorder):
1820 for chunk in filerevlog.group(nodelist, bundler, reorder):
1812 yield chunk
1821 yield chunk
1813 yield bundler.close()
1822 yield bundler.close()
1814 self.ui.progress(_('bundling'), None)
1823 self.ui.progress(_('bundling'), None)
1815
1824
1816 if nodes:
1825 if nodes:
1817 self.hook('outgoing', node=hex(nodes[0]), source=source)
1826 self.hook('outgoing', node=hex(nodes[0]), source=source)
1818
1827
1819 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1828 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1820
1829
1821 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1830 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1822 """Add the changegroup returned by source.read() to this repo.
1831 """Add the changegroup returned by source.read() to this repo.
1823 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1832 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1824 the URL of the repo where this changegroup is coming from.
1833 the URL of the repo where this changegroup is coming from.
1825 If lock is not None, the function takes ownership of the lock
1834 If lock is not None, the function takes ownership of the lock
1826 and releases it after the changegroup is added.
1835 and releases it after the changegroup is added.
1827
1836
1828 Return an integer summarizing the change to this repo:
1837 Return an integer summarizing the change to this repo:
1829 - nothing changed or no source: 0
1838 - nothing changed or no source: 0
1830 - more heads than before: 1+added heads (2..n)
1839 - more heads than before: 1+added heads (2..n)
1831 - fewer heads than before: -1-removed heads (-2..-n)
1840 - fewer heads than before: -1-removed heads (-2..-n)
1832 - number of heads stays the same: 1
1841 - number of heads stays the same: 1
1833 """
1842 """
1834 def csmap(x):
1843 def csmap(x):
1835 self.ui.debug("add changeset %s\n" % short(x))
1844 self.ui.debug("add changeset %s\n" % short(x))
1836 return len(cl)
1845 return len(cl)
1837
1846
1838 def revmap(x):
1847 def revmap(x):
1839 return cl.rev(x)
1848 return cl.rev(x)
1840
1849
1841 if not source:
1850 if not source:
1842 return 0
1851 return 0
1843
1852
1844 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1853 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1845
1854
1846 changesets = files = revisions = 0
1855 changesets = files = revisions = 0
1847 efiles = set()
1856 efiles = set()
1848
1857
1849 # write changelog data to temp files so concurrent readers will not see
1858 # write changelog data to temp files so concurrent readers will not see
1850 # inconsistent view
1859 # inconsistent view
1851 cl = self.changelog
1860 cl = self.changelog
1852 cl.delayupdate()
1861 cl.delayupdate()
1853 oldheads = cl.heads()
1862 oldheads = cl.heads()
1854
1863
1855 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1864 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1856 try:
1865 try:
1857 trp = weakref.proxy(tr)
1866 trp = weakref.proxy(tr)
1858 # pull off the changeset group
1867 # pull off the changeset group
1859 self.ui.status(_("adding changesets\n"))
1868 self.ui.status(_("adding changesets\n"))
1860 clstart = len(cl)
1869 clstart = len(cl)
1861 class prog(object):
1870 class prog(object):
1862 step = _('changesets')
1871 step = _('changesets')
1863 count = 1
1872 count = 1
1864 ui = self.ui
1873 ui = self.ui
1865 total = None
1874 total = None
1866 def __call__(self):
1875 def __call__(self):
1867 self.ui.progress(self.step, self.count, unit=_('chunks'),
1876 self.ui.progress(self.step, self.count, unit=_('chunks'),
1868 total=self.total)
1877 total=self.total)
1869 self.count += 1
1878 self.count += 1
1870 pr = prog()
1879 pr = prog()
1871 source.callback = pr
1880 source.callback = pr
1872
1881
1873 source.changelogheader()
1882 source.changelogheader()
1874 if (cl.addgroup(source, csmap, trp) is None
1883 if (cl.addgroup(source, csmap, trp) is None
1875 and not emptyok):
1884 and not emptyok):
1876 raise util.Abort(_("received changelog group is empty"))
1885 raise util.Abort(_("received changelog group is empty"))
1877 clend = len(cl)
1886 clend = len(cl)
1878 changesets = clend - clstart
1887 changesets = clend - clstart
1879 for c in xrange(clstart, clend):
1888 for c in xrange(clstart, clend):
1880 efiles.update(self[c].files())
1889 efiles.update(self[c].files())
1881 efiles = len(efiles)
1890 efiles = len(efiles)
1882 self.ui.progress(_('changesets'), None)
1891 self.ui.progress(_('changesets'), None)
1883
1892
1884 # pull off the manifest group
1893 # pull off the manifest group
1885 self.ui.status(_("adding manifests\n"))
1894 self.ui.status(_("adding manifests\n"))
1886 pr.step = _('manifests')
1895 pr.step = _('manifests')
1887 pr.count = 1
1896 pr.count = 1
1888 pr.total = changesets # manifests <= changesets
1897 pr.total = changesets # manifests <= changesets
1889 # no need to check for empty manifest group here:
1898 # no need to check for empty manifest group here:
1890 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1899 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1891 # no new manifest will be created and the manifest group will
1900 # no new manifest will be created and the manifest group will
1892 # be empty during the pull
1901 # be empty during the pull
1893 source.manifestheader()
1902 source.manifestheader()
1894 self.manifest.addgroup(source, revmap, trp)
1903 self.manifest.addgroup(source, revmap, trp)
1895 self.ui.progress(_('manifests'), None)
1904 self.ui.progress(_('manifests'), None)
1896
1905
1897 needfiles = {}
1906 needfiles = {}
1898 if self.ui.configbool('server', 'validate', default=False):
1907 if self.ui.configbool('server', 'validate', default=False):
1899 # validate incoming csets have their manifests
1908 # validate incoming csets have their manifests
1900 for cset in xrange(clstart, clend):
1909 for cset in xrange(clstart, clend):
1901 mfest = self.changelog.read(self.changelog.node(cset))[0]
1910 mfest = self.changelog.read(self.changelog.node(cset))[0]
1902 mfest = self.manifest.readdelta(mfest)
1911 mfest = self.manifest.readdelta(mfest)
1903 # store file nodes we must see
1912 # store file nodes we must see
1904 for f, n in mfest.iteritems():
1913 for f, n in mfest.iteritems():
1905 needfiles.setdefault(f, set()).add(n)
1914 needfiles.setdefault(f, set()).add(n)
1906
1915
1907 # process the files
1916 # process the files
1908 self.ui.status(_("adding file changes\n"))
1917 self.ui.status(_("adding file changes\n"))
1909 pr.step = _('files')
1918 pr.step = _('files')
1910 pr.count = 1
1919 pr.count = 1
1911 pr.total = efiles
1920 pr.total = efiles
1912 source.callback = None
1921 source.callback = None
1913
1922
1914 while True:
1923 while True:
1915 chunkdata = source.filelogheader()
1924 chunkdata = source.filelogheader()
1916 if not chunkdata:
1925 if not chunkdata:
1917 break
1926 break
1918 f = chunkdata["filename"]
1927 f = chunkdata["filename"]
1919 self.ui.debug("adding %s revisions\n" % f)
1928 self.ui.debug("adding %s revisions\n" % f)
1920 pr()
1929 pr()
1921 fl = self.file(f)
1930 fl = self.file(f)
1922 o = len(fl)
1931 o = len(fl)
1923 if fl.addgroup(source, revmap, trp) is None:
1932 if fl.addgroup(source, revmap, trp) is None:
1924 raise util.Abort(_("received file revlog group is empty"))
1933 raise util.Abort(_("received file revlog group is empty"))
1925 revisions += len(fl) - o
1934 revisions += len(fl) - o
1926 files += 1
1935 files += 1
1927 if f in needfiles:
1936 if f in needfiles:
1928 needs = needfiles[f]
1937 needs = needfiles[f]
1929 for new in xrange(o, len(fl)):
1938 for new in xrange(o, len(fl)):
1930 n = fl.node(new)
1939 n = fl.node(new)
1931 if n in needs:
1940 if n in needs:
1932 needs.remove(n)
1941 needs.remove(n)
1933 if not needs:
1942 if not needs:
1934 del needfiles[f]
1943 del needfiles[f]
1935 self.ui.progress(_('files'), None)
1944 self.ui.progress(_('files'), None)
1936
1945
1937 for f, needs in needfiles.iteritems():
1946 for f, needs in needfiles.iteritems():
1938 fl = self.file(f)
1947 fl = self.file(f)
1939 for n in needs:
1948 for n in needs:
1940 try:
1949 try:
1941 fl.rev(n)
1950 fl.rev(n)
1942 except error.LookupError:
1951 except error.LookupError:
1943 raise util.Abort(
1952 raise util.Abort(
1944 _('missing file data for %s:%s - run hg verify') %
1953 _('missing file data for %s:%s - run hg verify') %
1945 (f, hex(n)))
1954 (f, hex(n)))
1946
1955
1947 dh = 0
1956 dh = 0
1948 if oldheads:
1957 if oldheads:
1949 heads = cl.heads()
1958 heads = cl.heads()
1950 dh = len(heads) - len(oldheads)
1959 dh = len(heads) - len(oldheads)
1951 for h in heads:
1960 for h in heads:
1952 if h not in oldheads and 'close' in self[h].extra():
1961 if h not in oldheads and 'close' in self[h].extra():
1953 dh -= 1
1962 dh -= 1
1954 htext = ""
1963 htext = ""
1955 if dh:
1964 if dh:
1956 htext = _(" (%+d heads)") % dh
1965 htext = _(" (%+d heads)") % dh
1957
1966
1958 self.ui.status(_("added %d changesets"
1967 self.ui.status(_("added %d changesets"
1959 " with %d changes to %d files%s\n")
1968 " with %d changes to %d files%s\n")
1960 % (changesets, revisions, files, htext))
1969 % (changesets, revisions, files, htext))
1961
1970
1962 if changesets > 0:
1971 if changesets > 0:
1963 p = lambda: cl.writepending() and self.root or ""
1972 p = lambda: cl.writepending() and self.root or ""
1964 self.hook('pretxnchangegroup', throw=True,
1973 self.hook('pretxnchangegroup', throw=True,
1965 node=hex(cl.node(clstart)), source=srctype,
1974 node=hex(cl.node(clstart)), source=srctype,
1966 url=url, pending=p)
1975 url=url, pending=p)
1967
1976
1968 # make changelog see real files again
1977 # make changelog see real files again
1969 cl.finalize(trp)
1978 cl.finalize(trp)
1970
1979
1971 tr.close()
1980 tr.close()
1972 finally:
1981 finally:
1973 tr.release()
1982 tr.release()
1974 if lock:
1983 if lock:
1975 lock.release()
1984 lock.release()
1976
1985
1977 if changesets > 0:
1986 if changesets > 0:
1978 # forcefully update the on-disk branch cache
1987 # forcefully update the on-disk branch cache
1979 self.ui.debug("updating the branch cache\n")
1988 self.ui.debug("updating the branch cache\n")
1980 self.updatebranchcache()
1989 self.updatebranchcache()
1981 self.hook("changegroup", node=hex(cl.node(clstart)),
1990 self.hook("changegroup", node=hex(cl.node(clstart)),
1982 source=srctype, url=url)
1991 source=srctype, url=url)
1983
1992
1984 for i in xrange(clstart, clend):
1993 for i in xrange(clstart, clend):
1985 self.hook("incoming", node=hex(cl.node(i)),
1994 self.hook("incoming", node=hex(cl.node(i)),
1986 source=srctype, url=url)
1995 source=srctype, url=url)
1987
1996
1988 # never return 0 here:
1997 # never return 0 here:
1989 if dh < 0:
1998 if dh < 0:
1990 return dh - 1
1999 return dh - 1
1991 else:
2000 else:
1992 return dh + 1
2001 return dh + 1
1993
2002
1994 def stream_in(self, remote, requirements):
2003 def stream_in(self, remote, requirements):
1995 lock = self.lock()
2004 lock = self.lock()
1996 try:
2005 try:
1997 fp = remote.stream_out()
2006 fp = remote.stream_out()
1998 l = fp.readline()
2007 l = fp.readline()
1999 try:
2008 try:
2000 resp = int(l)
2009 resp = int(l)
2001 except ValueError:
2010 except ValueError:
2002 raise error.ResponseError(
2011 raise error.ResponseError(
2003 _('Unexpected response from remote server:'), l)
2012 _('Unexpected response from remote server:'), l)
2004 if resp == 1:
2013 if resp == 1:
2005 raise util.Abort(_('operation forbidden by server'))
2014 raise util.Abort(_('operation forbidden by server'))
2006 elif resp == 2:
2015 elif resp == 2:
2007 raise util.Abort(_('locking the remote repository failed'))
2016 raise util.Abort(_('locking the remote repository failed'))
2008 elif resp != 0:
2017 elif resp != 0:
2009 raise util.Abort(_('the server sent an unknown error code'))
2018 raise util.Abort(_('the server sent an unknown error code'))
2010 self.ui.status(_('streaming all changes\n'))
2019 self.ui.status(_('streaming all changes\n'))
2011 l = fp.readline()
2020 l = fp.readline()
2012 try:
2021 try:
2013 total_files, total_bytes = map(int, l.split(' ', 1))
2022 total_files, total_bytes = map(int, l.split(' ', 1))
2014 except (ValueError, TypeError):
2023 except (ValueError, TypeError):
2015 raise error.ResponseError(
2024 raise error.ResponseError(
2016 _('Unexpected response from remote server:'), l)
2025 _('Unexpected response from remote server:'), l)
2017 self.ui.status(_('%d files to transfer, %s of data\n') %
2026 self.ui.status(_('%d files to transfer, %s of data\n') %
2018 (total_files, util.bytecount(total_bytes)))
2027 (total_files, util.bytecount(total_bytes)))
2019 start = time.time()
2028 start = time.time()
2020 for i in xrange(total_files):
2029 for i in xrange(total_files):
2021 # XXX doesn't support '\n' or '\r' in filenames
2030 # XXX doesn't support '\n' or '\r' in filenames
2022 l = fp.readline()
2031 l = fp.readline()
2023 try:
2032 try:
2024 name, size = l.split('\0', 1)
2033 name, size = l.split('\0', 1)
2025 size = int(size)
2034 size = int(size)
2026 except (ValueError, TypeError):
2035 except (ValueError, TypeError):
2027 raise error.ResponseError(
2036 raise error.ResponseError(
2028 _('Unexpected response from remote server:'), l)
2037 _('Unexpected response from remote server:'), l)
2029 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2038 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2030 # for backwards compat, name was partially encoded
2039 # for backwards compat, name was partially encoded
2031 ofp = self.sopener(store.decodedir(name), 'w')
2040 ofp = self.sopener(store.decodedir(name), 'w')
2032 for chunk in util.filechunkiter(fp, limit=size):
2041 for chunk in util.filechunkiter(fp, limit=size):
2033 ofp.write(chunk)
2042 ofp.write(chunk)
2034 ofp.close()
2043 ofp.close()
2035 elapsed = time.time() - start
2044 elapsed = time.time() - start
2036 if elapsed <= 0:
2045 if elapsed <= 0:
2037 elapsed = 0.001
2046 elapsed = 0.001
2038 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2047 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2039 (util.bytecount(total_bytes), elapsed,
2048 (util.bytecount(total_bytes), elapsed,
2040 util.bytecount(total_bytes / elapsed)))
2049 util.bytecount(total_bytes / elapsed)))
2041
2050
2042 # new requirements = old non-format requirements + new format-related
2051 # new requirements = old non-format requirements + new format-related
2043 # requirements from the streamed-in repository
2052 # requirements from the streamed-in repository
2044 requirements.update(set(self.requirements) - self.supportedformats)
2053 requirements.update(set(self.requirements) - self.supportedformats)
2045 self._applyrequirements(requirements)
2054 self._applyrequirements(requirements)
2046 self._writerequirements()
2055 self._writerequirements()
2047
2056
2048 self.invalidate()
2057 self.invalidate()
2049 return len(self.heads()) + 1
2058 return len(self.heads()) + 1
2050 finally:
2059 finally:
2051 lock.release()
2060 lock.release()
2052
2061
2053 def clone(self, remote, heads=[], stream=False):
2062 def clone(self, remote, heads=[], stream=False):
2054 '''clone remote repository.
2063 '''clone remote repository.
2055
2064
2056 keyword arguments:
2065 keyword arguments:
2057 heads: list of revs to clone (forces use of pull)
2066 heads: list of revs to clone (forces use of pull)
2058 stream: use streaming clone if possible'''
2067 stream: use streaming clone if possible'''
2059
2068
2060 # now, all clients that can request uncompressed clones can
2069 # now, all clients that can request uncompressed clones can
2061 # read repo formats supported by all servers that can serve
2070 # read repo formats supported by all servers that can serve
2062 # them.
2071 # them.
2063
2072
2064 # if revlog format changes, client will have to check version
2073 # if revlog format changes, client will have to check version
2065 # and format flags on "stream" capability, and use
2074 # and format flags on "stream" capability, and use
2066 # uncompressed only if compatible.
2075 # uncompressed only if compatible.
2067
2076
2068 if stream and not heads:
2077 if stream and not heads:
2069 # 'stream' means remote revlog format is revlogv1 only
2078 # 'stream' means remote revlog format is revlogv1 only
2070 if remote.capable('stream'):
2079 if remote.capable('stream'):
2071 return self.stream_in(remote, set(('revlogv1',)))
2080 return self.stream_in(remote, set(('revlogv1',)))
2072 # otherwise, 'streamreqs' contains the remote revlog format
2081 # otherwise, 'streamreqs' contains the remote revlog format
2073 streamreqs = remote.capable('streamreqs')
2082 streamreqs = remote.capable('streamreqs')
2074 if streamreqs:
2083 if streamreqs:
2075 streamreqs = set(streamreqs.split(','))
2084 streamreqs = set(streamreqs.split(','))
2076 # if we support it, stream in and adjust our requirements
2085 # if we support it, stream in and adjust our requirements
2077 if not streamreqs - self.supportedformats:
2086 if not streamreqs - self.supportedformats:
2078 return self.stream_in(remote, streamreqs)
2087 return self.stream_in(remote, streamreqs)
2079 return self.pull(remote, heads)
2088 return self.pull(remote, heads)
2080
2089
2081 def pushkey(self, namespace, key, old, new):
2090 def pushkey(self, namespace, key, old, new):
2082 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2091 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2083 old=old, new=new)
2092 old=old, new=new)
2084 ret = pushkey.push(self, namespace, key, old, new)
2093 ret = pushkey.push(self, namespace, key, old, new)
2085 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2094 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2086 ret=ret)
2095 ret=ret)
2087 return ret
2096 return ret
2088
2097
2089 def listkeys(self, namespace):
2098 def listkeys(self, namespace):
2090 self.hook('prelistkeys', throw=True, namespace=namespace)
2099 self.hook('prelistkeys', throw=True, namespace=namespace)
2091 values = pushkey.list(self, namespace)
2100 values = pushkey.list(self, namespace)
2092 self.hook('listkeys', namespace=namespace, values=values)
2101 self.hook('listkeys', namespace=namespace, values=values)
2093 return values
2102 return values
2094
2103
2095 def debugwireargs(self, one, two, three=None, four=None, five=None):
2104 def debugwireargs(self, one, two, three=None, four=None, five=None):
2096 '''used to test argument passing over the wire'''
2105 '''used to test argument passing over the wire'''
2097 return "%s %s %s %s %s" % (one, two, three, four, five)
2106 return "%s %s %s %s %s" % (one, two, three, four, five)
2098
2107
2099 def savecommitmessage(self, text):
2108 def savecommitmessage(self, text):
2100 fp = self.opener('last-message.txt', 'wb')
2109 fp = self.opener('last-message.txt', 'wb')
2101 try:
2110 try:
2102 fp.write(text)
2111 fp.write(text)
2103 finally:
2112 finally:
2104 fp.close()
2113 fp.close()
2105 return self.pathto(fp.name[len(self.root)+1:])
2114 return self.pathto(fp.name[len(self.root)+1:])
2106
2115
2107 # used to avoid circular references so destructors work
2116 # used to avoid circular references so destructors work
2108 def aftertrans(files):
2117 def aftertrans(files):
2109 renamefiles = [tuple(t) for t in files]
2118 renamefiles = [tuple(t) for t in files]
2110 def a():
2119 def a():
2111 for src, dest in renamefiles:
2120 for src, dest in renamefiles:
2112 util.rename(src, dest)
2121 util.rename(src, dest)
2113 return a
2122 return a
2114
2123
2115 def undoname(fn):
2124 def undoname(fn):
2116 base, name = os.path.split(fn)
2125 base, name = os.path.split(fn)
2117 assert name.startswith('journal')
2126 assert name.startswith('journal')
2118 return os.path.join(base, name.replace('journal', 'undo', 1))
2127 return os.path.join(base, name.replace('journal', 'undo', 1))
2119
2128
2120 def instance(ui, path, create):
2129 def instance(ui, path, create):
2121 return localrepository(ui, util.urllocalpath(path), create)
2130 return localrepository(ui, util.urllocalpath(path), create)
2122
2131
2123 def islocal(path):
2132 def islocal(path):
2124 return True
2133 return True
@@ -1,111 +1,113 b''
1 Init repo1:
1 Init repo1:
2
2
3 $ hg init repo1
3 $ hg init repo1
4 $ cd repo1
4 $ cd repo1
5 $ echo "some text" > a
5 $ echo "some text" > a
6 $ hg add
6 $ hg add
7 adding a
7 adding a
8 $ hg ci -m first
8 $ hg ci -m first
9 $ cat .hg/store/fncache | sort
9 $ cat .hg/store/fncache | sort
10 data/a.i
10 data/a.i
11
11
12 Testing a.i/b:
12 Testing a.i/b:
13
13
14 $ mkdir a.i
14 $ mkdir a.i
15 $ echo "some other text" > a.i/b
15 $ echo "some other text" > a.i/b
16 $ hg add
16 $ hg add
17 adding a.i/b (glob)
17 adding a.i/b (glob)
18 $ hg ci -m second
18 $ hg ci -m second
19 $ cat .hg/store/fncache | sort
19 $ cat .hg/store/fncache | sort
20 data/a.i
20 data/a.i
21 data/a.i.hg/b.i
21 data/a.i.hg/b.i
22
22
23 Testing a.i.hg/c:
23 Testing a.i.hg/c:
24
24
25 $ mkdir a.i.hg
25 $ mkdir a.i.hg
26 $ echo "yet another text" > a.i.hg/c
26 $ echo "yet another text" > a.i.hg/c
27 $ hg add
27 $ hg add
28 adding a.i.hg/c (glob)
28 adding a.i.hg/c (glob)
29 $ hg ci -m third
29 $ hg ci -m third
30 $ cat .hg/store/fncache | sort
30 $ cat .hg/store/fncache | sort
31 data/a.i
31 data/a.i
32 data/a.i.hg.hg/c.i
32 data/a.i.hg.hg/c.i
33 data/a.i.hg/b.i
33 data/a.i.hg/b.i
34
34
35 Testing verify:
35 Testing verify:
36
36
37 $ hg verify
37 $ hg verify
38 checking changesets
38 checking changesets
39 checking manifests
39 checking manifests
40 crosschecking files in changesets and manifests
40 crosschecking files in changesets and manifests
41 checking files
41 checking files
42 3 files, 3 changesets, 3 total revisions
42 3 files, 3 changesets, 3 total revisions
43
43
44 $ rm .hg/store/fncache
44 $ rm .hg/store/fncache
45
45
46 $ hg verify
46 $ hg verify
47 checking changesets
47 checking changesets
48 checking manifests
48 checking manifests
49 crosschecking files in changesets and manifests
49 crosschecking files in changesets and manifests
50 checking files
50 checking files
51 data/a.i@0: missing revlog!
51 data/a.i@0: missing revlog!
52 data/a.i.hg/c.i@2: missing revlog!
52 data/a.i.hg/c.i@2: missing revlog!
53 data/a.i/b.i@1: missing revlog!
53 data/a.i/b.i@1: missing revlog!
54 3 files, 3 changesets, 3 total revisions
54 3 files, 3 changesets, 3 total revisions
55 3 integrity errors encountered!
55 3 integrity errors encountered!
56 (first damaged changeset appears to be 0)
56 (first damaged changeset appears to be 0)
57 [1]
57 [1]
58 $ cd ..
58 $ cd ..
59
59
60 Non store repo:
60 Non store repo:
61
61
62 $ hg --config format.usestore=False init foo
62 $ hg --config format.usestore=False init foo
63 $ cd foo
63 $ cd foo
64 $ mkdir tst.d
64 $ mkdir tst.d
65 $ echo foo > tst.d/foo
65 $ echo foo > tst.d/foo
66 $ hg ci -Amfoo
66 $ hg ci -Amfoo
67 adding tst.d/foo
67 adding tst.d/foo
68 $ find .hg | sort
68 $ find .hg | sort
69 .hg
69 .hg
70 .hg/00changelog.i
70 .hg/00changelog.i
71 .hg/00manifest.i
71 .hg/00manifest.i
72 .hg/data
72 .hg/data
73 .hg/data/tst.d.hg
73 .hg/data/tst.d.hg
74 .hg/data/tst.d.hg/foo.i
74 .hg/data/tst.d.hg/foo.i
75 .hg/dirstate
75 .hg/dirstate
76 .hg/last-message.txt
76 .hg/last-message.txt
77 .hg/requires
77 .hg/requires
78 .hg/undo
78 .hg/undo
79 .hg/undo.bookmarks
79 .hg/undo.bookmarks
80 .hg/undo.branch
80 .hg/undo.branch
81 .hg/undo.desc
81 .hg/undo.desc
82 .hg/undo.dirstate
82 .hg/undo.dirstate
83 .hg/undo.phaseroots
83 $ cd ..
84 $ cd ..
84
85
85 Non fncache repo:
86 Non fncache repo:
86
87
87 $ hg --config format.usefncache=False init bar
88 $ hg --config format.usefncache=False init bar
88 $ cd bar
89 $ cd bar
89 $ mkdir tst.d
90 $ mkdir tst.d
90 $ echo foo > tst.d/Foo
91 $ echo foo > tst.d/Foo
91 $ hg ci -Amfoo
92 $ hg ci -Amfoo
92 adding tst.d/Foo
93 adding tst.d/Foo
93 $ find .hg | sort
94 $ find .hg | sort
94 .hg
95 .hg
95 .hg/00changelog.i
96 .hg/00changelog.i
96 .hg/dirstate
97 .hg/dirstate
97 .hg/last-message.txt
98 .hg/last-message.txt
98 .hg/requires
99 .hg/requires
99 .hg/store
100 .hg/store
100 .hg/store/00changelog.i
101 .hg/store/00changelog.i
101 .hg/store/00manifest.i
102 .hg/store/00manifest.i
102 .hg/store/data
103 .hg/store/data
103 .hg/store/data/tst.d.hg
104 .hg/store/data/tst.d.hg
104 .hg/store/data/tst.d.hg/_foo.i
105 .hg/store/data/tst.d.hg/_foo.i
105 .hg/store/undo
106 .hg/store/undo
107 .hg/store/undo.phaseroots
106 .hg/undo.bookmarks
108 .hg/undo.bookmarks
107 .hg/undo.branch
109 .hg/undo.branch
108 .hg/undo.desc
110 .hg/undo.desc
109 .hg/undo.dirstate
111 .hg/undo.dirstate
110 $ cd ..
112 $ cd ..
111
113
@@ -1,334 +1,340 b''
1 $ "$TESTDIR/hghave" no-windows || exit 80
1 $ "$TESTDIR/hghave" no-windows || exit 80
2
2
3 $ cat > nlinks.py <<EOF
3 $ cat > nlinks.py <<EOF
4 > import os, sys
4 > import os, sys
5 > for f in sorted(sys.stdin.readlines()):
5 > for f in sorted(sys.stdin.readlines()):
6 > f = f[:-1]
6 > f = f[:-1]
7 > print os.lstat(f).st_nlink, f
7 > print os.lstat(f).st_nlink, f
8 > EOF
8 > EOF
9
9
10 $ nlinksdir()
10 $ nlinksdir()
11 > {
11 > {
12 > find $1 -type f | python $TESTTMP/nlinks.py
12 > find $1 -type f | python $TESTTMP/nlinks.py
13 > }
13 > }
14
14
15 Some implementations of cp can't create hardlinks (replaces 'cp -al' on Linux):
15 Some implementations of cp can't create hardlinks (replaces 'cp -al' on Linux):
16
16
17 $ cat > linkcp.py <<EOF
17 $ cat > linkcp.py <<EOF
18 > from mercurial import util
18 > from mercurial import util
19 > import sys
19 > import sys
20 > util.copyfiles(sys.argv[1], sys.argv[2], hardlink=True)
20 > util.copyfiles(sys.argv[1], sys.argv[2], hardlink=True)
21 > EOF
21 > EOF
22
22
23 $ linkcp()
23 $ linkcp()
24 > {
24 > {
25 > python $TESTTMP/linkcp.py $1 $2
25 > python $TESTTMP/linkcp.py $1 $2
26 > }
26 > }
27
27
28 Prepare repo r1:
28 Prepare repo r1:
29
29
30 $ hg init r1
30 $ hg init r1
31 $ cd r1
31 $ cd r1
32
32
33 $ echo c1 > f1
33 $ echo c1 > f1
34 $ hg add f1
34 $ hg add f1
35 $ hg ci -m0
35 $ hg ci -m0
36
36
37 $ mkdir d1
37 $ mkdir d1
38 $ cd d1
38 $ cd d1
39 $ echo c2 > f2
39 $ echo c2 > f2
40 $ hg add f2
40 $ hg add f2
41 $ hg ci -m1
41 $ hg ci -m1
42 $ cd ../..
42 $ cd ../..
43
43
44 $ nlinksdir r1/.hg/store
44 $ nlinksdir r1/.hg/store
45 1 r1/.hg/store/00changelog.i
45 1 r1/.hg/store/00changelog.i
46 1 r1/.hg/store/00manifest.i
46 1 r1/.hg/store/00manifest.i
47 1 r1/.hg/store/data/d1/f2.i
47 1 r1/.hg/store/data/d1/f2.i
48 1 r1/.hg/store/data/f1.i
48 1 r1/.hg/store/data/f1.i
49 1 r1/.hg/store/fncache
49 1 r1/.hg/store/fncache
50 1 r1/.hg/store/undo
50 1 r1/.hg/store/undo
51 1 r1/.hg/store/undo.phaseroots
51
52
52
53
53 Create hardlinked clone r2:
54 Create hardlinked clone r2:
54
55
55 $ hg clone -U --debug r1 r2
56 $ hg clone -U --debug r1 r2
56 linked 7 files
57 linked 7 files
57
58
58 Create non-hardlinked clone r3:
59 Create non-hardlinked clone r3:
59
60
60 $ hg clone --pull r1 r3
61 $ hg clone --pull r1 r3
61 requesting all changes
62 requesting all changes
62 adding changesets
63 adding changesets
63 adding manifests
64 adding manifests
64 adding file changes
65 adding file changes
65 added 2 changesets with 2 changes to 2 files
66 added 2 changesets with 2 changes to 2 files
66 updating to branch default
67 updating to branch default
67 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
68 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
68
69
69
70
70 Repos r1 and r2 should now contain hardlinked files:
71 Repos r1 and r2 should now contain hardlinked files:
71
72
72 $ nlinksdir r1/.hg/store
73 $ nlinksdir r1/.hg/store
73 2 r1/.hg/store/00changelog.i
74 2 r1/.hg/store/00changelog.i
74 2 r1/.hg/store/00manifest.i
75 2 r1/.hg/store/00manifest.i
75 2 r1/.hg/store/data/d1/f2.i
76 2 r1/.hg/store/data/d1/f2.i
76 2 r1/.hg/store/data/f1.i
77 2 r1/.hg/store/data/f1.i
77 2 r1/.hg/store/fncache
78 2 r1/.hg/store/fncache
78 1 r1/.hg/store/undo
79 1 r1/.hg/store/undo
80 1 r1/.hg/store/undo.phaseroots
79
81
80 $ nlinksdir r2/.hg/store
82 $ nlinksdir r2/.hg/store
81 2 r2/.hg/store/00changelog.i
83 2 r2/.hg/store/00changelog.i
82 2 r2/.hg/store/00manifest.i
84 2 r2/.hg/store/00manifest.i
83 2 r2/.hg/store/data/d1/f2.i
85 2 r2/.hg/store/data/d1/f2.i
84 2 r2/.hg/store/data/f1.i
86 2 r2/.hg/store/data/f1.i
85 2 r2/.hg/store/fncache
87 2 r2/.hg/store/fncache
86
88
87 Repo r3 should not be hardlinked:
89 Repo r3 should not be hardlinked:
88
90
89 $ nlinksdir r3/.hg/store
91 $ nlinksdir r3/.hg/store
90 1 r3/.hg/store/00changelog.i
92 1 r3/.hg/store/00changelog.i
91 1 r3/.hg/store/00manifest.i
93 1 r3/.hg/store/00manifest.i
92 1 r3/.hg/store/data/d1/f2.i
94 1 r3/.hg/store/data/d1/f2.i
93 1 r3/.hg/store/data/f1.i
95 1 r3/.hg/store/data/f1.i
94 1 r3/.hg/store/fncache
96 1 r3/.hg/store/fncache
95 1 r3/.hg/store/undo
97 1 r3/.hg/store/undo
98 1 r3/.hg/store/undo.phaseroots
96
99
97
100
98 Create a non-inlined filelog in r3:
101 Create a non-inlined filelog in r3:
99
102
100 $ cd r3/d1
103 $ cd r3/d1
101 $ python -c 'for x in range(10000): print x' >> data1
104 $ python -c 'for x in range(10000): print x' >> data1
102 $ for j in 0 1 2 3 4 5 6 7 8 9; do
105 $ for j in 0 1 2 3 4 5 6 7 8 9; do
103 > cat data1 >> f2
106 > cat data1 >> f2
104 > hg commit -m$j
107 > hg commit -m$j
105 > done
108 > done
106 $ cd ../..
109 $ cd ../..
107
110
108 $ nlinksdir r3/.hg/store
111 $ nlinksdir r3/.hg/store
109 1 r3/.hg/store/00changelog.i
112 1 r3/.hg/store/00changelog.i
110 1 r3/.hg/store/00manifest.i
113 1 r3/.hg/store/00manifest.i
111 1 r3/.hg/store/data/d1/f2.d
114 1 r3/.hg/store/data/d1/f2.d
112 1 r3/.hg/store/data/d1/f2.i
115 1 r3/.hg/store/data/d1/f2.i
113 1 r3/.hg/store/data/f1.i
116 1 r3/.hg/store/data/f1.i
114 1 r3/.hg/store/fncache
117 1 r3/.hg/store/fncache
115 1 r3/.hg/store/undo
118 1 r3/.hg/store/undo
119 1 r3/.hg/store/undo.phaseroots
116
120
117 Push to repo r1 should break up most hardlinks in r2:
121 Push to repo r1 should break up most hardlinks in r2:
118
122
119 $ hg -R r2 verify
123 $ hg -R r2 verify
120 checking changesets
124 checking changesets
121 checking manifests
125 checking manifests
122 crosschecking files in changesets and manifests
126 crosschecking files in changesets and manifests
123 checking files
127 checking files
124 2 files, 2 changesets, 2 total revisions
128 2 files, 2 changesets, 2 total revisions
125
129
126 $ cd r3
130 $ cd r3
127 $ hg push
131 $ hg push
128 pushing to $TESTTMP/r1
132 pushing to $TESTTMP/r1
129 searching for changes
133 searching for changes
130 adding changesets
134 adding changesets
131 adding manifests
135 adding manifests
132 adding file changes
136 adding file changes
133 added 10 changesets with 10 changes to 1 files
137 added 10 changesets with 10 changes to 1 files
134
138
135 $ cd ..
139 $ cd ..
136
140
137 $ nlinksdir r2/.hg/store
141 $ nlinksdir r2/.hg/store
138 1 r2/.hg/store/00changelog.i
142 1 r2/.hg/store/00changelog.i
139 1 r2/.hg/store/00manifest.i
143 1 r2/.hg/store/00manifest.i
140 1 r2/.hg/store/data/d1/f2.i
144 1 r2/.hg/store/data/d1/f2.i
141 2 r2/.hg/store/data/f1.i
145 2 r2/.hg/store/data/f1.i
142 1 r2/.hg/store/fncache
146 1 r2/.hg/store/fncache
143
147
144 $ hg -R r2 verify
148 $ hg -R r2 verify
145 checking changesets
149 checking changesets
146 checking manifests
150 checking manifests
147 crosschecking files in changesets and manifests
151 crosschecking files in changesets and manifests
148 checking files
152 checking files
149 2 files, 2 changesets, 2 total revisions
153 2 files, 2 changesets, 2 total revisions
150
154
151
155
152 $ cd r1
156 $ cd r1
153 $ hg up
157 $ hg up
154 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
158 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
155
159
156 Committing a change to f1 in r1 must break up hardlink f1.i in r2:
160 Committing a change to f1 in r1 must break up hardlink f1.i in r2:
157
161
158 $ echo c1c1 >> f1
162 $ echo c1c1 >> f1
159 $ hg ci -m00
163 $ hg ci -m00
160 $ cd ..
164 $ cd ..
161
165
162 $ nlinksdir r2/.hg/store
166 $ nlinksdir r2/.hg/store
163 1 r2/.hg/store/00changelog.i
167 1 r2/.hg/store/00changelog.i
164 1 r2/.hg/store/00manifest.i
168 1 r2/.hg/store/00manifest.i
165 1 r2/.hg/store/data/d1/f2.i
169 1 r2/.hg/store/data/d1/f2.i
166 1 r2/.hg/store/data/f1.i
170 1 r2/.hg/store/data/f1.i
167 1 r2/.hg/store/fncache
171 1 r2/.hg/store/fncache
168
172
169
173
170 $ cd r3
174 $ cd r3
171 $ hg tip --template '{rev}:{node|short}\n'
175 $ hg tip --template '{rev}:{node|short}\n'
172 11:a6451b6bc41f
176 11:a6451b6bc41f
173 $ echo bla > f1
177 $ echo bla > f1
174 $ hg ci -m1
178 $ hg ci -m1
175 $ cd ..
179 $ cd ..
176
180
177 Create hardlinked copy r4 of r3 (on Linux, we would call 'cp -al'):
181 Create hardlinked copy r4 of r3 (on Linux, we would call 'cp -al'):
178
182
179 $ linkcp r3 r4
183 $ linkcp r3 r4
180
184
181 r4 has hardlinks in the working dir (not just inside .hg):
185 r4 has hardlinks in the working dir (not just inside .hg):
182
186
183 $ nlinksdir r4
187 $ nlinksdir r4
184 2 r4/.hg/00changelog.i
188 2 r4/.hg/00changelog.i
185 2 r4/.hg/branch
189 2 r4/.hg/branch
186 2 r4/.hg/cache/branchheads
190 2 r4/.hg/cache/branchheads
187 2 r4/.hg/cache/tags
191 2 r4/.hg/cache/tags
188 2 r4/.hg/dirstate
192 2 r4/.hg/dirstate
189 2 r4/.hg/hgrc
193 2 r4/.hg/hgrc
190 2 r4/.hg/last-message.txt
194 2 r4/.hg/last-message.txt
191 2 r4/.hg/requires
195 2 r4/.hg/requires
192 2 r4/.hg/store/00changelog.i
196 2 r4/.hg/store/00changelog.i
193 2 r4/.hg/store/00manifest.i
197 2 r4/.hg/store/00manifest.i
194 2 r4/.hg/store/data/d1/f2.d
198 2 r4/.hg/store/data/d1/f2.d
195 2 r4/.hg/store/data/d1/f2.i
199 2 r4/.hg/store/data/d1/f2.i
196 2 r4/.hg/store/data/f1.i
200 2 r4/.hg/store/data/f1.i
197 2 r4/.hg/store/fncache
201 2 r4/.hg/store/fncache
198 2 r4/.hg/store/undo
202 2 r4/.hg/store/undo
203 2 r4/.hg/store/undo.phaseroots
199 2 r4/.hg/undo.bookmarks
204 2 r4/.hg/undo.bookmarks
200 2 r4/.hg/undo.branch
205 2 r4/.hg/undo.branch
201 2 r4/.hg/undo.desc
206 2 r4/.hg/undo.desc
202 2 r4/.hg/undo.dirstate
207 2 r4/.hg/undo.dirstate
203 2 r4/d1/data1
208 2 r4/d1/data1
204 2 r4/d1/f2
209 2 r4/d1/f2
205 2 r4/f1
210 2 r4/f1
206
211
207 Update back to revision 11 in r4 should break hardlink of file f1:
212 Update back to revision 11 in r4 should break hardlink of file f1:
208
213
209 $ hg -R r4 up 11
214 $ hg -R r4 up 11
210 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
215 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
211
216
212 $ nlinksdir r4
217 $ nlinksdir r4
213 2 r4/.hg/00changelog.i
218 2 r4/.hg/00changelog.i
214 1 r4/.hg/branch
219 1 r4/.hg/branch
215 2 r4/.hg/cache/branchheads
220 2 r4/.hg/cache/branchheads
216 2 r4/.hg/cache/tags
221 2 r4/.hg/cache/tags
217 1 r4/.hg/dirstate
222 1 r4/.hg/dirstate
218 2 r4/.hg/hgrc
223 2 r4/.hg/hgrc
219 2 r4/.hg/last-message.txt
224 2 r4/.hg/last-message.txt
220 2 r4/.hg/requires
225 2 r4/.hg/requires
221 2 r4/.hg/store/00changelog.i
226 2 r4/.hg/store/00changelog.i
222 2 r4/.hg/store/00manifest.i
227 2 r4/.hg/store/00manifest.i
223 2 r4/.hg/store/data/d1/f2.d
228 2 r4/.hg/store/data/d1/f2.d
224 2 r4/.hg/store/data/d1/f2.i
229 2 r4/.hg/store/data/d1/f2.i
225 2 r4/.hg/store/data/f1.i
230 2 r4/.hg/store/data/f1.i
226 2 r4/.hg/store/fncache
231 2 r4/.hg/store/fncache
227 2 r4/.hg/store/undo
232 2 r4/.hg/store/undo
233 2 r4/.hg/store/undo.phaseroots
228 2 r4/.hg/undo.bookmarks
234 2 r4/.hg/undo.bookmarks
229 2 r4/.hg/undo.branch
235 2 r4/.hg/undo.branch
230 2 r4/.hg/undo.desc
236 2 r4/.hg/undo.desc
231 2 r4/.hg/undo.dirstate
237 2 r4/.hg/undo.dirstate
232 2 r4/d1/data1
238 2 r4/d1/data1
233 2 r4/d1/f2
239 2 r4/d1/f2
234 1 r4/f1
240 1 r4/f1
235
241
236
242
237 Test hardlinking outside hg:
243 Test hardlinking outside hg:
238
244
239 $ mkdir x
245 $ mkdir x
240 $ echo foo > x/a
246 $ echo foo > x/a
241
247
242 $ linkcp x y
248 $ linkcp x y
243 $ echo bar >> y/a
249 $ echo bar >> y/a
244
250
245 No diff if hardlink:
251 No diff if hardlink:
246
252
247 $ diff x/a y/a
253 $ diff x/a y/a
248
254
249 Test mq hardlinking:
255 Test mq hardlinking:
250
256
251 $ echo "[extensions]" >> $HGRCPATH
257 $ echo "[extensions]" >> $HGRCPATH
252 $ echo "mq=" >> $HGRCPATH
258 $ echo "mq=" >> $HGRCPATH
253
259
254 $ hg init a
260 $ hg init a
255 $ cd a
261 $ cd a
256
262
257 $ hg qimport -n foo - << EOF
263 $ hg qimport -n foo - << EOF
258 > # HG changeset patch
264 > # HG changeset patch
259 > # Date 1 0
265 > # Date 1 0
260 > diff -r 2588a8b53d66 a
266 > diff -r 2588a8b53d66 a
261 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
267 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
262 > +++ b/a Wed Jul 23 15:54:29 2008 +0200
268 > +++ b/a Wed Jul 23 15:54:29 2008 +0200
263 > @@ -0,0 +1,1 @@
269 > @@ -0,0 +1,1 @@
264 > +a
270 > +a
265 > EOF
271 > EOF
266 adding foo to series file
272 adding foo to series file
267
273
268 $ hg qpush
274 $ hg qpush
269 applying foo
275 applying foo
270 now at: foo
276 now at: foo
271
277
272 $ cd ..
278 $ cd ..
273 $ linkcp a b
279 $ linkcp a b
274 $ cd b
280 $ cd b
275
281
276 $ hg qimport -n bar - << EOF
282 $ hg qimport -n bar - << EOF
277 > # HG changeset patch
283 > # HG changeset patch
278 > # Date 2 0
284 > # Date 2 0
279 > diff -r 2588a8b53d66 a
285 > diff -r 2588a8b53d66 a
280 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
286 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
281 > +++ b/b Wed Jul 23 15:54:29 2008 +0200
287 > +++ b/b Wed Jul 23 15:54:29 2008 +0200
282 > @@ -0,0 +1,1 @@
288 > @@ -0,0 +1,1 @@
283 > +b
289 > +b
284 > EOF
290 > EOF
285 adding bar to series file
291 adding bar to series file
286
292
287 $ hg qpush
293 $ hg qpush
288 applying bar
294 applying bar
289 now at: bar
295 now at: bar
290
296
291 $ cat .hg/patches/status
297 $ cat .hg/patches/status
292 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
298 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
293 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c:bar
299 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c:bar
294
300
295 $ cat .hg/patches/series
301 $ cat .hg/patches/series
296 foo
302 foo
297 bar
303 bar
298
304
299 $ cat ../a/.hg/patches/status
305 $ cat ../a/.hg/patches/status
300 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
306 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
301
307
302 $ cat ../a/.hg/patches/series
308 $ cat ../a/.hg/patches/series
303 foo
309 foo
304
310
305 Test tags hardlinking:
311 Test tags hardlinking:
306
312
307 $ hg qdel -r qbase:qtip
313 $ hg qdel -r qbase:qtip
308 patch foo finalized without changeset message
314 patch foo finalized without changeset message
309 patch bar finalized without changeset message
315 patch bar finalized without changeset message
310
316
311 $ hg tag -l lfoo
317 $ hg tag -l lfoo
312 $ hg tag foo
318 $ hg tag foo
313
319
314 $ cd ..
320 $ cd ..
315 $ linkcp b c
321 $ linkcp b c
316 $ cd c
322 $ cd c
317
323
318 $ hg tag -l -r 0 lbar
324 $ hg tag -l -r 0 lbar
319 $ hg tag -r 0 bar
325 $ hg tag -r 0 bar
320
326
321 $ cat .hgtags
327 $ cat .hgtags
322 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
328 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
323 430ed4828a74fa4047bc816a25500f7472ab4bfe bar
329 430ed4828a74fa4047bc816a25500f7472ab4bfe bar
324
330
325 $ cat .hg/localtags
331 $ cat .hg/localtags
326 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
332 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
327 430ed4828a74fa4047bc816a25500f7472ab4bfe lbar
333 430ed4828a74fa4047bc816a25500f7472ab4bfe lbar
328
334
329 $ cat ../b/.hgtags
335 $ cat ../b/.hgtags
330 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
336 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
331
337
332 $ cat ../b/.hg/localtags
338 $ cat ../b/.hg/localtags
333 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
339 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
334
340
@@ -1,20 +1,20 b''
1 Test hangup signal in the middle of transaction
1 Test hangup signal in the middle of transaction
2
2
3 $ "$TESTDIR/hghave" serve fifo || exit 80
3 $ "$TESTDIR/hghave" serve fifo || exit 80
4 $ hg init
4 $ hg init
5 $ mkfifo p
5 $ mkfifo p
6 $ hg serve --stdio < p &
6 $ hg serve --stdio < p &
7 $ P=$!
7 $ P=$!
8 $ (echo lock; echo addchangegroup; sleep 5) > p &
8 $ (echo lock; echo addchangegroup; sleep 5) > p &
9 $ Q=$!
9 $ Q=$!
10 $ sleep 3
10 $ sleep 3
11 0
11 0
12 0
12 0
13 adding changesets
13 adding changesets
14 $ kill -HUP $P
14 $ kill -HUP $P
15 $ wait
15 $ wait
16 transaction abort!
16 transaction abort!
17 rollback completed
17 rollback completed
18 killed!
18 killed!
19 $ echo .hg/* .hg/store/*
19 $ echo .hg/* .hg/store/*
20 .hg/00changelog.i .hg/journal.bookmarks .hg/journal.branch .hg/journal.desc .hg/journal.dirstate .hg/requires .hg/store .hg/store/00changelog.i .hg/store/00changelog.i.a
20 .hg/00changelog.i .hg/journal.bookmarks .hg/journal.branch .hg/journal.desc .hg/journal.dirstate .hg/requires .hg/store .hg/store/00changelog.i .hg/store/00changelog.i.a .hg/store/journal.phaseroots
@@ -1,143 +1,145 b''
1 test that new files created in .hg inherit the permissions from .hg/store
1 test that new files created in .hg inherit the permissions from .hg/store
2
2
3
3
4 $ "$TESTDIR/hghave" unix-permissions || exit 80
4 $ "$TESTDIR/hghave" unix-permissions || exit 80
5
5
6 $ mkdir dir
6 $ mkdir dir
7
7
8 just in case somebody has a strange $TMPDIR
8 just in case somebody has a strange $TMPDIR
9
9
10 $ chmod g-s dir
10 $ chmod g-s dir
11 $ cd dir
11 $ cd dir
12
12
13 $ cat >printmodes.py <<EOF
13 $ cat >printmodes.py <<EOF
14 > import os, sys
14 > import os, sys
15 >
15 >
16 > allnames = []
16 > allnames = []
17 > isdir = {}
17 > isdir = {}
18 > for root, dirs, files in os.walk(sys.argv[1]):
18 > for root, dirs, files in os.walk(sys.argv[1]):
19 > for d in dirs:
19 > for d in dirs:
20 > name = os.path.join(root, d)
20 > name = os.path.join(root, d)
21 > isdir[name] = 1
21 > isdir[name] = 1
22 > allnames.append(name)
22 > allnames.append(name)
23 > for f in files:
23 > for f in files:
24 > name = os.path.join(root, f)
24 > name = os.path.join(root, f)
25 > allnames.append(name)
25 > allnames.append(name)
26 > allnames.sort()
26 > allnames.sort()
27 > for name in allnames:
27 > for name in allnames:
28 > suffix = name in isdir and '/' or ''
28 > suffix = name in isdir and '/' or ''
29 > print '%05o %s%s' % (os.lstat(name).st_mode & 07777, name, suffix)
29 > print '%05o %s%s' % (os.lstat(name).st_mode & 07777, name, suffix)
30 > EOF
30 > EOF
31
31
32 $ cat >mode.py <<EOF
32 $ cat >mode.py <<EOF
33 > import sys
33 > import sys
34 > import os
34 > import os
35 > print '%05o' % os.lstat(sys.argv[1]).st_mode
35 > print '%05o' % os.lstat(sys.argv[1]).st_mode
36 > EOF
36 > EOF
37
37
38 $ umask 077
38 $ umask 077
39
39
40 $ hg init repo
40 $ hg init repo
41 $ cd repo
41 $ cd repo
42
42
43 $ chmod 0770 .hg/store
43 $ chmod 0770 .hg/store
44
44
45 before commit
45 before commit
46 store can be written by the group, other files cannot
46 store can be written by the group, other files cannot
47 store is setgid
47 store is setgid
48
48
49 $ python ../printmodes.py .
49 $ python ../printmodes.py .
50 00700 ./.hg/
50 00700 ./.hg/
51 00600 ./.hg/00changelog.i
51 00600 ./.hg/00changelog.i
52 00600 ./.hg/requires
52 00600 ./.hg/requires
53 00770 ./.hg/store/
53 00770 ./.hg/store/
54
54
55 $ mkdir dir
55 $ mkdir dir
56 $ touch foo dir/bar
56 $ touch foo dir/bar
57 $ hg ci -qAm 'add files'
57 $ hg ci -qAm 'add files'
58
58
59 after commit
59 after commit
60 working dir files can only be written by the owner
60 working dir files can only be written by the owner
61 files created in .hg can be written by the group
61 files created in .hg can be written by the group
62 (in particular, store/**, dirstate, branch cache file, undo files)
62 (in particular, store/**, dirstate, branch cache file, undo files)
63 new directories are setgid
63 new directories are setgid
64
64
65 $ python ../printmodes.py .
65 $ python ../printmodes.py .
66 00700 ./.hg/
66 00700 ./.hg/
67 00600 ./.hg/00changelog.i
67 00600 ./.hg/00changelog.i
68 00660 ./.hg/dirstate
68 00660 ./.hg/dirstate
69 00660 ./.hg/last-message.txt
69 00660 ./.hg/last-message.txt
70 00600 ./.hg/requires
70 00600 ./.hg/requires
71 00770 ./.hg/store/
71 00770 ./.hg/store/
72 00660 ./.hg/store/00changelog.i
72 00660 ./.hg/store/00changelog.i
73 00660 ./.hg/store/00manifest.i
73 00660 ./.hg/store/00manifest.i
74 00770 ./.hg/store/data/
74 00770 ./.hg/store/data/
75 00770 ./.hg/store/data/dir/
75 00770 ./.hg/store/data/dir/
76 00660 ./.hg/store/data/dir/bar.i
76 00660 ./.hg/store/data/dir/bar.i
77 00660 ./.hg/store/data/foo.i
77 00660 ./.hg/store/data/foo.i
78 00660 ./.hg/store/fncache
78 00660 ./.hg/store/fncache
79 00660 ./.hg/store/undo
79 00660 ./.hg/store/undo
80 00660 ./.hg/store/undo.phaseroots
80 00660 ./.hg/undo.bookmarks
81 00660 ./.hg/undo.bookmarks
81 00660 ./.hg/undo.branch
82 00660 ./.hg/undo.branch
82 00660 ./.hg/undo.desc
83 00660 ./.hg/undo.desc
83 00660 ./.hg/undo.dirstate
84 00660 ./.hg/undo.dirstate
84 00700 ./dir/
85 00700 ./dir/
85 00600 ./dir/bar
86 00600 ./dir/bar
86 00600 ./foo
87 00600 ./foo
87
88
88 $ umask 007
89 $ umask 007
89 $ hg init ../push
90 $ hg init ../push
90
91
91 before push
92 before push
92 group can write everything
93 group can write everything
93
94
94 $ python ../printmodes.py ../push
95 $ python ../printmodes.py ../push
95 00770 ../push/.hg/
96 00770 ../push/.hg/
96 00660 ../push/.hg/00changelog.i
97 00660 ../push/.hg/00changelog.i
97 00660 ../push/.hg/requires
98 00660 ../push/.hg/requires
98 00770 ../push/.hg/store/
99 00770 ../push/.hg/store/
99
100
100 $ umask 077
101 $ umask 077
101 $ hg -q push ../push
102 $ hg -q push ../push
102
103
103 after push
104 after push
104 group can still write everything
105 group can still write everything
105
106
106 $ python ../printmodes.py ../push
107 $ python ../printmodes.py ../push
107 00770 ../push/.hg/
108 00770 ../push/.hg/
108 00660 ../push/.hg/00changelog.i
109 00660 ../push/.hg/00changelog.i
109 00770 ../push/.hg/cache/
110 00770 ../push/.hg/cache/
110 00660 ../push/.hg/cache/branchheads
111 00660 ../push/.hg/cache/branchheads
111 00660 ../push/.hg/requires
112 00660 ../push/.hg/requires
112 00770 ../push/.hg/store/
113 00770 ../push/.hg/store/
113 00660 ../push/.hg/store/00changelog.i
114 00660 ../push/.hg/store/00changelog.i
114 00660 ../push/.hg/store/00manifest.i
115 00660 ../push/.hg/store/00manifest.i
115 00770 ../push/.hg/store/data/
116 00770 ../push/.hg/store/data/
116 00770 ../push/.hg/store/data/dir/
117 00770 ../push/.hg/store/data/dir/
117 00660 ../push/.hg/store/data/dir/bar.i
118 00660 ../push/.hg/store/data/dir/bar.i
118 00660 ../push/.hg/store/data/foo.i
119 00660 ../push/.hg/store/data/foo.i
119 00660 ../push/.hg/store/fncache
120 00660 ../push/.hg/store/fncache
120 00660 ../push/.hg/store/undo
121 00660 ../push/.hg/store/undo
122 00660 ../push/.hg/store/undo.phaseroots
121 00660 ../push/.hg/undo.bookmarks
123 00660 ../push/.hg/undo.bookmarks
122 00660 ../push/.hg/undo.branch
124 00660 ../push/.hg/undo.branch
123 00660 ../push/.hg/undo.desc
125 00660 ../push/.hg/undo.desc
124 00660 ../push/.hg/undo.dirstate
126 00660 ../push/.hg/undo.dirstate
125
127
126
128
127 Test that we don't lose the setgid bit when we call chmod.
129 Test that we don't lose the setgid bit when we call chmod.
128 Not all systems support setgid directories (e.g. HFS+), so
130 Not all systems support setgid directories (e.g. HFS+), so
129 just check that directories have the same mode.
131 just check that directories have the same mode.
130
132
131 $ cd ..
133 $ cd ..
132 $ hg init setgid
134 $ hg init setgid
133 $ cd setgid
135 $ cd setgid
134 $ chmod g+rwx .hg/store
136 $ chmod g+rwx .hg/store
135 $ chmod g+s .hg/store 2> /dev/null
137 $ chmod g+s .hg/store 2> /dev/null
136 $ mkdir dir
138 $ mkdir dir
137 $ touch dir/file
139 $ touch dir/file
138 $ hg ci -qAm 'add dir/file'
140 $ hg ci -qAm 'add dir/file'
139 $ storemode=`python ../mode.py .hg/store`
141 $ storemode=`python ../mode.py .hg/store`
140 $ dirmode=`python ../mode.py .hg/store/data/dir`
142 $ dirmode=`python ../mode.py .hg/store/data/dir`
141 $ if [ "$storemode" != "$dirmode" ]; then
143 $ if [ "$storemode" != "$dirmode" ]; then
142 > echo "$storemode != $dirmode"
144 > echo "$storemode != $dirmode"
143 $ fi
145 $ fi
General Comments 0
You need to be logged in to leave comments. Login now