##// END OF EJS Templates
phases: make secret changeset undiscoverable in all case...
Pierre-Yves David -
r15889:816209ea default
parent child Browse files
Show More
@@ -1,2272 +1,2277 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 'known', 'getbundle'))
24 'known', 'getbundle'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
26 supported = supportedformats | set(('store', 'fncache', 'shared',
26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 'dotencode'))
27 'dotencode'))
28
28
29 def __init__(self, baseui, path=None, create=False):
29 def __init__(self, baseui, path=None, create=False):
30 repo.repository.__init__(self)
30 repo.repository.__init__(self)
31 self.root = os.path.realpath(util.expandpath(path))
31 self.root = os.path.realpath(util.expandpath(path))
32 self.path = os.path.join(self.root, ".hg")
32 self.path = os.path.join(self.root, ".hg")
33 self.origroot = path
33 self.origroot = path
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 self.opener = scmutil.opener(self.path)
35 self.opener = scmutil.opener(self.path)
36 self.wopener = scmutil.opener(self.root)
36 self.wopener = scmutil.opener(self.root)
37 self.baseui = baseui
37 self.baseui = baseui
38 self.ui = baseui.copy()
38 self.ui = baseui.copy()
39 self._dirtyphases = False
39 self._dirtyphases = False
40
40
41 try:
41 try:
42 self.ui.readconfig(self.join("hgrc"), self.root)
42 self.ui.readconfig(self.join("hgrc"), self.root)
43 extensions.loadall(self.ui)
43 extensions.loadall(self.ui)
44 except IOError:
44 except IOError:
45 pass
45 pass
46
46
47 if not os.path.isdir(self.path):
47 if not os.path.isdir(self.path):
48 if create:
48 if create:
49 if not os.path.exists(path):
49 if not os.path.exists(path):
50 util.makedirs(path)
50 util.makedirs(path)
51 util.makedir(self.path, notindexed=True)
51 util.makedir(self.path, notindexed=True)
52 requirements = ["revlogv1"]
52 requirements = ["revlogv1"]
53 if self.ui.configbool('format', 'usestore', True):
53 if self.ui.configbool('format', 'usestore', True):
54 os.mkdir(os.path.join(self.path, "store"))
54 os.mkdir(os.path.join(self.path, "store"))
55 requirements.append("store")
55 requirements.append("store")
56 if self.ui.configbool('format', 'usefncache', True):
56 if self.ui.configbool('format', 'usefncache', True):
57 requirements.append("fncache")
57 requirements.append("fncache")
58 if self.ui.configbool('format', 'dotencode', True):
58 if self.ui.configbool('format', 'dotencode', True):
59 requirements.append('dotencode')
59 requirements.append('dotencode')
60 # create an invalid changelog
60 # create an invalid changelog
61 self.opener.append(
61 self.opener.append(
62 "00changelog.i",
62 "00changelog.i",
63 '\0\0\0\2' # represents revlogv2
63 '\0\0\0\2' # represents revlogv2
64 ' dummy changelog to prevent using the old repo layout'
64 ' dummy changelog to prevent using the old repo layout'
65 )
65 )
66 if self.ui.configbool('format', 'generaldelta', False):
66 if self.ui.configbool('format', 'generaldelta', False):
67 requirements.append("generaldelta")
67 requirements.append("generaldelta")
68 requirements = set(requirements)
68 requirements = set(requirements)
69 else:
69 else:
70 raise error.RepoError(_("repository %s not found") % path)
70 raise error.RepoError(_("repository %s not found") % path)
71 elif create:
71 elif create:
72 raise error.RepoError(_("repository %s already exists") % path)
72 raise error.RepoError(_("repository %s already exists") % path)
73 else:
73 else:
74 try:
74 try:
75 requirements = scmutil.readrequires(self.opener, self.supported)
75 requirements = scmutil.readrequires(self.opener, self.supported)
76 except IOError, inst:
76 except IOError, inst:
77 if inst.errno != errno.ENOENT:
77 if inst.errno != errno.ENOENT:
78 raise
78 raise
79 requirements = set()
79 requirements = set()
80
80
81 self.sharedpath = self.path
81 self.sharedpath = self.path
82 try:
82 try:
83 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
83 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
84 if not os.path.exists(s):
84 if not os.path.exists(s):
85 raise error.RepoError(
85 raise error.RepoError(
86 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 _('.hg/sharedpath points to nonexistent directory %s') % s)
87 self.sharedpath = s
87 self.sharedpath = s
88 except IOError, inst:
88 except IOError, inst:
89 if inst.errno != errno.ENOENT:
89 if inst.errno != errno.ENOENT:
90 raise
90 raise
91
91
92 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
93 self.spath = self.store.path
93 self.spath = self.store.path
94 self.sopener = self.store.opener
94 self.sopener = self.store.opener
95 self.sjoin = self.store.join
95 self.sjoin = self.store.join
96 self.opener.createmode = self.store.createmode
96 self.opener.createmode = self.store.createmode
97 self._applyrequirements(requirements)
97 self._applyrequirements(requirements)
98 if create:
98 if create:
99 self._writerequirements()
99 self._writerequirements()
100
100
101
101
102 self._branchcache = None
102 self._branchcache = None
103 self._branchcachetip = None
103 self._branchcachetip = None
104 self.filterpats = {}
104 self.filterpats = {}
105 self._datafilters = {}
105 self._datafilters = {}
106 self._transref = self._lockref = self._wlockref = None
106 self._transref = self._lockref = self._wlockref = None
107
107
108 # A cache for various files under .hg/ that tracks file changes,
108 # A cache for various files under .hg/ that tracks file changes,
109 # (used by the filecache decorator)
109 # (used by the filecache decorator)
110 #
110 #
111 # Maps a property name to its util.filecacheentry
111 # Maps a property name to its util.filecacheentry
112 self._filecache = {}
112 self._filecache = {}
113
113
114 def _applyrequirements(self, requirements):
114 def _applyrequirements(self, requirements):
115 self.requirements = requirements
115 self.requirements = requirements
116 openerreqs = set(('revlogv1', 'generaldelta'))
116 openerreqs = set(('revlogv1', 'generaldelta'))
117 self.sopener.options = dict((r, 1) for r in requirements
117 self.sopener.options = dict((r, 1) for r in requirements
118 if r in openerreqs)
118 if r in openerreqs)
119
119
120 def _writerequirements(self):
120 def _writerequirements(self):
121 reqfile = self.opener("requires", "w")
121 reqfile = self.opener("requires", "w")
122 for r in self.requirements:
122 for r in self.requirements:
123 reqfile.write("%s\n" % r)
123 reqfile.write("%s\n" % r)
124 reqfile.close()
124 reqfile.close()
125
125
126 def _checknested(self, path):
126 def _checknested(self, path):
127 """Determine if path is a legal nested repository."""
127 """Determine if path is a legal nested repository."""
128 if not path.startswith(self.root):
128 if not path.startswith(self.root):
129 return False
129 return False
130 subpath = path[len(self.root) + 1:]
130 subpath = path[len(self.root) + 1:]
131 normsubpath = util.pconvert(subpath)
131 normsubpath = util.pconvert(subpath)
132
132
133 # XXX: Checking against the current working copy is wrong in
133 # XXX: Checking against the current working copy is wrong in
134 # the sense that it can reject things like
134 # the sense that it can reject things like
135 #
135 #
136 # $ hg cat -r 10 sub/x.txt
136 # $ hg cat -r 10 sub/x.txt
137 #
137 #
138 # if sub/ is no longer a subrepository in the working copy
138 # if sub/ is no longer a subrepository in the working copy
139 # parent revision.
139 # parent revision.
140 #
140 #
141 # However, it can of course also allow things that would have
141 # However, it can of course also allow things that would have
142 # been rejected before, such as the above cat command if sub/
142 # been rejected before, such as the above cat command if sub/
143 # is a subrepository now, but was a normal directory before.
143 # is a subrepository now, but was a normal directory before.
144 # The old path auditor would have rejected by mistake since it
144 # The old path auditor would have rejected by mistake since it
145 # panics when it sees sub/.hg/.
145 # panics when it sees sub/.hg/.
146 #
146 #
147 # All in all, checking against the working copy seems sensible
147 # All in all, checking against the working copy seems sensible
148 # since we want to prevent access to nested repositories on
148 # since we want to prevent access to nested repositories on
149 # the filesystem *now*.
149 # the filesystem *now*.
150 ctx = self[None]
150 ctx = self[None]
151 parts = util.splitpath(subpath)
151 parts = util.splitpath(subpath)
152 while parts:
152 while parts:
153 prefix = '/'.join(parts)
153 prefix = '/'.join(parts)
154 if prefix in ctx.substate:
154 if prefix in ctx.substate:
155 if prefix == normsubpath:
155 if prefix == normsubpath:
156 return True
156 return True
157 else:
157 else:
158 sub = ctx.sub(prefix)
158 sub = ctx.sub(prefix)
159 return sub.checknested(subpath[len(prefix) + 1:])
159 return sub.checknested(subpath[len(prefix) + 1:])
160 else:
160 else:
161 parts.pop()
161 parts.pop()
162 return False
162 return False
163
163
164 @filecache('bookmarks')
164 @filecache('bookmarks')
165 def _bookmarks(self):
165 def _bookmarks(self):
166 return bookmarks.read(self)
166 return bookmarks.read(self)
167
167
168 @filecache('bookmarks.current')
168 @filecache('bookmarks.current')
169 def _bookmarkcurrent(self):
169 def _bookmarkcurrent(self):
170 return bookmarks.readcurrent(self)
170 return bookmarks.readcurrent(self)
171
171
172 def _writebookmarks(self, marks):
172 def _writebookmarks(self, marks):
173 bookmarks.write(self)
173 bookmarks.write(self)
174
174
175 @filecache('phaseroots')
175 @filecache('phaseroots')
176 def _phaseroots(self):
176 def _phaseroots(self):
177 self._dirtyphases = False
177 self._dirtyphases = False
178 phaseroots = phases.readroots(self)
178 phaseroots = phases.readroots(self)
179 phases.filterunknown(self, phaseroots)
179 phases.filterunknown(self, phaseroots)
180 return phaseroots
180 return phaseroots
181
181
182 @propertycache
182 @propertycache
183 def _phaserev(self):
183 def _phaserev(self):
184 cache = [phases.public] * len(self)
184 cache = [phases.public] * len(self)
185 for phase in phases.trackedphases:
185 for phase in phases.trackedphases:
186 roots = map(self.changelog.rev, self._phaseroots[phase])
186 roots = map(self.changelog.rev, self._phaseroots[phase])
187 if roots:
187 if roots:
188 for rev in roots:
188 for rev in roots:
189 cache[rev] = phase
189 cache[rev] = phase
190 for rev in self.changelog.descendants(*roots):
190 for rev in self.changelog.descendants(*roots):
191 cache[rev] = phase
191 cache[rev] = phase
192 return cache
192 return cache
193
193
194 @filecache('00changelog.i', True)
194 @filecache('00changelog.i', True)
195 def changelog(self):
195 def changelog(self):
196 c = changelog.changelog(self.sopener)
196 c = changelog.changelog(self.sopener)
197 if 'HG_PENDING' in os.environ:
197 if 'HG_PENDING' in os.environ:
198 p = os.environ['HG_PENDING']
198 p = os.environ['HG_PENDING']
199 if p.startswith(self.root):
199 if p.startswith(self.root):
200 c.readpending('00changelog.i.a')
200 c.readpending('00changelog.i.a')
201 return c
201 return c
202
202
203 @filecache('00manifest.i', True)
203 @filecache('00manifest.i', True)
204 def manifest(self):
204 def manifest(self):
205 return manifest.manifest(self.sopener)
205 return manifest.manifest(self.sopener)
206
206
207 @filecache('dirstate')
207 @filecache('dirstate')
208 def dirstate(self):
208 def dirstate(self):
209 warned = [0]
209 warned = [0]
210 def validate(node):
210 def validate(node):
211 try:
211 try:
212 self.changelog.rev(node)
212 self.changelog.rev(node)
213 return node
213 return node
214 except error.LookupError:
214 except error.LookupError:
215 if not warned[0]:
215 if not warned[0]:
216 warned[0] = True
216 warned[0] = True
217 self.ui.warn(_("warning: ignoring unknown"
217 self.ui.warn(_("warning: ignoring unknown"
218 " working parent %s!\n") % short(node))
218 " working parent %s!\n") % short(node))
219 return nullid
219 return nullid
220
220
221 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
221 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
222
222
223 def __getitem__(self, changeid):
223 def __getitem__(self, changeid):
224 if changeid is None:
224 if changeid is None:
225 return context.workingctx(self)
225 return context.workingctx(self)
226 return context.changectx(self, changeid)
226 return context.changectx(self, changeid)
227
227
228 def __contains__(self, changeid):
228 def __contains__(self, changeid):
229 try:
229 try:
230 return bool(self.lookup(changeid))
230 return bool(self.lookup(changeid))
231 except error.RepoLookupError:
231 except error.RepoLookupError:
232 return False
232 return False
233
233
234 def __nonzero__(self):
234 def __nonzero__(self):
235 return True
235 return True
236
236
237 def __len__(self):
237 def __len__(self):
238 return len(self.changelog)
238 return len(self.changelog)
239
239
240 def __iter__(self):
240 def __iter__(self):
241 for i in xrange(len(self)):
241 for i in xrange(len(self)):
242 yield i
242 yield i
243
243
244 def revs(self, expr, *args):
244 def revs(self, expr, *args):
245 '''Return a list of revisions matching the given revset'''
245 '''Return a list of revisions matching the given revset'''
246 expr = revset.formatspec(expr, *args)
246 expr = revset.formatspec(expr, *args)
247 m = revset.match(None, expr)
247 m = revset.match(None, expr)
248 return [r for r in m(self, range(len(self)))]
248 return [r for r in m(self, range(len(self)))]
249
249
250 def set(self, expr, *args):
250 def set(self, expr, *args):
251 '''
251 '''
252 Yield a context for each matching revision, after doing arg
252 Yield a context for each matching revision, after doing arg
253 replacement via revset.formatspec
253 replacement via revset.formatspec
254 '''
254 '''
255 for r in self.revs(expr, *args):
255 for r in self.revs(expr, *args):
256 yield self[r]
256 yield self[r]
257
257
258 def url(self):
258 def url(self):
259 return 'file:' + self.root
259 return 'file:' + self.root
260
260
261 def hook(self, name, throw=False, **args):
261 def hook(self, name, throw=False, **args):
262 return hook.hook(self.ui, self, name, throw, **args)
262 return hook.hook(self.ui, self, name, throw, **args)
263
263
264 tag_disallowed = ':\r\n'
264 tag_disallowed = ':\r\n'
265
265
266 def _tag(self, names, node, message, local, user, date, extra={}):
266 def _tag(self, names, node, message, local, user, date, extra={}):
267 if isinstance(names, str):
267 if isinstance(names, str):
268 allchars = names
268 allchars = names
269 names = (names,)
269 names = (names,)
270 else:
270 else:
271 allchars = ''.join(names)
271 allchars = ''.join(names)
272 for c in self.tag_disallowed:
272 for c in self.tag_disallowed:
273 if c in allchars:
273 if c in allchars:
274 raise util.Abort(_('%r cannot be used in a tag name') % c)
274 raise util.Abort(_('%r cannot be used in a tag name') % c)
275
275
276 branches = self.branchmap()
276 branches = self.branchmap()
277 for name in names:
277 for name in names:
278 self.hook('pretag', throw=True, node=hex(node), tag=name,
278 self.hook('pretag', throw=True, node=hex(node), tag=name,
279 local=local)
279 local=local)
280 if name in branches:
280 if name in branches:
281 self.ui.warn(_("warning: tag %s conflicts with existing"
281 self.ui.warn(_("warning: tag %s conflicts with existing"
282 " branch name\n") % name)
282 " branch name\n") % name)
283
283
284 def writetags(fp, names, munge, prevtags):
284 def writetags(fp, names, munge, prevtags):
285 fp.seek(0, 2)
285 fp.seek(0, 2)
286 if prevtags and prevtags[-1] != '\n':
286 if prevtags and prevtags[-1] != '\n':
287 fp.write('\n')
287 fp.write('\n')
288 for name in names:
288 for name in names:
289 m = munge and munge(name) or name
289 m = munge and munge(name) or name
290 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
290 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
291 old = self.tags().get(name, nullid)
291 old = self.tags().get(name, nullid)
292 fp.write('%s %s\n' % (hex(old), m))
292 fp.write('%s %s\n' % (hex(old), m))
293 fp.write('%s %s\n' % (hex(node), m))
293 fp.write('%s %s\n' % (hex(node), m))
294 fp.close()
294 fp.close()
295
295
296 prevtags = ''
296 prevtags = ''
297 if local:
297 if local:
298 try:
298 try:
299 fp = self.opener('localtags', 'r+')
299 fp = self.opener('localtags', 'r+')
300 except IOError:
300 except IOError:
301 fp = self.opener('localtags', 'a')
301 fp = self.opener('localtags', 'a')
302 else:
302 else:
303 prevtags = fp.read()
303 prevtags = fp.read()
304
304
305 # local tags are stored in the current charset
305 # local tags are stored in the current charset
306 writetags(fp, names, None, prevtags)
306 writetags(fp, names, None, prevtags)
307 for name in names:
307 for name in names:
308 self.hook('tag', node=hex(node), tag=name, local=local)
308 self.hook('tag', node=hex(node), tag=name, local=local)
309 return
309 return
310
310
311 try:
311 try:
312 fp = self.wfile('.hgtags', 'rb+')
312 fp = self.wfile('.hgtags', 'rb+')
313 except IOError, e:
313 except IOError, e:
314 if e.errno != errno.ENOENT:
314 if e.errno != errno.ENOENT:
315 raise
315 raise
316 fp = self.wfile('.hgtags', 'ab')
316 fp = self.wfile('.hgtags', 'ab')
317 else:
317 else:
318 prevtags = fp.read()
318 prevtags = fp.read()
319
319
320 # committed tags are stored in UTF-8
320 # committed tags are stored in UTF-8
321 writetags(fp, names, encoding.fromlocal, prevtags)
321 writetags(fp, names, encoding.fromlocal, prevtags)
322
322
323 fp.close()
323 fp.close()
324
324
325 if '.hgtags' not in self.dirstate:
325 if '.hgtags' not in self.dirstate:
326 self[None].add(['.hgtags'])
326 self[None].add(['.hgtags'])
327
327
328 m = matchmod.exact(self.root, '', ['.hgtags'])
328 m = matchmod.exact(self.root, '', ['.hgtags'])
329 tagnode = self.commit(message, user, date, extra=extra, match=m)
329 tagnode = self.commit(message, user, date, extra=extra, match=m)
330
330
331 for name in names:
331 for name in names:
332 self.hook('tag', node=hex(node), tag=name, local=local)
332 self.hook('tag', node=hex(node), tag=name, local=local)
333
333
334 return tagnode
334 return tagnode
335
335
336 def tag(self, names, node, message, local, user, date):
336 def tag(self, names, node, message, local, user, date):
337 '''tag a revision with one or more symbolic names.
337 '''tag a revision with one or more symbolic names.
338
338
339 names is a list of strings or, when adding a single tag, names may be a
339 names is a list of strings or, when adding a single tag, names may be a
340 string.
340 string.
341
341
342 if local is True, the tags are stored in a per-repository file.
342 if local is True, the tags are stored in a per-repository file.
343 otherwise, they are stored in the .hgtags file, and a new
343 otherwise, they are stored in the .hgtags file, and a new
344 changeset is committed with the change.
344 changeset is committed with the change.
345
345
346 keyword arguments:
346 keyword arguments:
347
347
348 local: whether to store tags in non-version-controlled file
348 local: whether to store tags in non-version-controlled file
349 (default False)
349 (default False)
350
350
351 message: commit message to use if committing
351 message: commit message to use if committing
352
352
353 user: name of user to use if committing
353 user: name of user to use if committing
354
354
355 date: date tuple to use if committing'''
355 date: date tuple to use if committing'''
356
356
357 if not local:
357 if not local:
358 for x in self.status()[:5]:
358 for x in self.status()[:5]:
359 if '.hgtags' in x:
359 if '.hgtags' in x:
360 raise util.Abort(_('working copy of .hgtags is changed '
360 raise util.Abort(_('working copy of .hgtags is changed '
361 '(please commit .hgtags manually)'))
361 '(please commit .hgtags manually)'))
362
362
363 self.tags() # instantiate the cache
363 self.tags() # instantiate the cache
364 self._tag(names, node, message, local, user, date)
364 self._tag(names, node, message, local, user, date)
365
365
366 @propertycache
366 @propertycache
367 def _tagscache(self):
367 def _tagscache(self):
368 '''Returns a tagscache object that contains various tags related caches.'''
368 '''Returns a tagscache object that contains various tags related caches.'''
369
369
370 # This simplifies its cache management by having one decorated
370 # This simplifies its cache management by having one decorated
371 # function (this one) and the rest simply fetch things from it.
371 # function (this one) and the rest simply fetch things from it.
372 class tagscache(object):
372 class tagscache(object):
373 def __init__(self):
373 def __init__(self):
374 # These two define the set of tags for this repository. tags
374 # These two define the set of tags for this repository. tags
375 # maps tag name to node; tagtypes maps tag name to 'global' or
375 # maps tag name to node; tagtypes maps tag name to 'global' or
376 # 'local'. (Global tags are defined by .hgtags across all
376 # 'local'. (Global tags are defined by .hgtags across all
377 # heads, and local tags are defined in .hg/localtags.)
377 # heads, and local tags are defined in .hg/localtags.)
378 # They constitute the in-memory cache of tags.
378 # They constitute the in-memory cache of tags.
379 self.tags = self.tagtypes = None
379 self.tags = self.tagtypes = None
380
380
381 self.nodetagscache = self.tagslist = None
381 self.nodetagscache = self.tagslist = None
382
382
383 cache = tagscache()
383 cache = tagscache()
384 cache.tags, cache.tagtypes = self._findtags()
384 cache.tags, cache.tagtypes = self._findtags()
385
385
386 return cache
386 return cache
387
387
388 def tags(self):
388 def tags(self):
389 '''return a mapping of tag to node'''
389 '''return a mapping of tag to node'''
390 return self._tagscache.tags
390 return self._tagscache.tags
391
391
392 def _findtags(self):
392 def _findtags(self):
393 '''Do the hard work of finding tags. Return a pair of dicts
393 '''Do the hard work of finding tags. Return a pair of dicts
394 (tags, tagtypes) where tags maps tag name to node, and tagtypes
394 (tags, tagtypes) where tags maps tag name to node, and tagtypes
395 maps tag name to a string like \'global\' or \'local\'.
395 maps tag name to a string like \'global\' or \'local\'.
396 Subclasses or extensions are free to add their own tags, but
396 Subclasses or extensions are free to add their own tags, but
397 should be aware that the returned dicts will be retained for the
397 should be aware that the returned dicts will be retained for the
398 duration of the localrepo object.'''
398 duration of the localrepo object.'''
399
399
400 # XXX what tagtype should subclasses/extensions use? Currently
400 # XXX what tagtype should subclasses/extensions use? Currently
401 # mq and bookmarks add tags, but do not set the tagtype at all.
401 # mq and bookmarks add tags, but do not set the tagtype at all.
402 # Should each extension invent its own tag type? Should there
402 # Should each extension invent its own tag type? Should there
403 # be one tagtype for all such "virtual" tags? Or is the status
403 # be one tagtype for all such "virtual" tags? Or is the status
404 # quo fine?
404 # quo fine?
405
405
406 alltags = {} # map tag name to (node, hist)
406 alltags = {} # map tag name to (node, hist)
407 tagtypes = {}
407 tagtypes = {}
408
408
409 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
409 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
410 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
410 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
411
411
412 # Build the return dicts. Have to re-encode tag names because
412 # Build the return dicts. Have to re-encode tag names because
413 # the tags module always uses UTF-8 (in order not to lose info
413 # the tags module always uses UTF-8 (in order not to lose info
414 # writing to the cache), but the rest of Mercurial wants them in
414 # writing to the cache), but the rest of Mercurial wants them in
415 # local encoding.
415 # local encoding.
416 tags = {}
416 tags = {}
417 for (name, (node, hist)) in alltags.iteritems():
417 for (name, (node, hist)) in alltags.iteritems():
418 if node != nullid:
418 if node != nullid:
419 try:
419 try:
420 # ignore tags to unknown nodes
420 # ignore tags to unknown nodes
421 self.changelog.lookup(node)
421 self.changelog.lookup(node)
422 tags[encoding.tolocal(name)] = node
422 tags[encoding.tolocal(name)] = node
423 except error.LookupError:
423 except error.LookupError:
424 pass
424 pass
425 tags['tip'] = self.changelog.tip()
425 tags['tip'] = self.changelog.tip()
426 tagtypes = dict([(encoding.tolocal(name), value)
426 tagtypes = dict([(encoding.tolocal(name), value)
427 for (name, value) in tagtypes.iteritems()])
427 for (name, value) in tagtypes.iteritems()])
428 return (tags, tagtypes)
428 return (tags, tagtypes)
429
429
430 def tagtype(self, tagname):
430 def tagtype(self, tagname):
431 '''
431 '''
432 return the type of the given tag. result can be:
432 return the type of the given tag. result can be:
433
433
434 'local' : a local tag
434 'local' : a local tag
435 'global' : a global tag
435 'global' : a global tag
436 None : tag does not exist
436 None : tag does not exist
437 '''
437 '''
438
438
439 return self._tagscache.tagtypes.get(tagname)
439 return self._tagscache.tagtypes.get(tagname)
440
440
441 def tagslist(self):
441 def tagslist(self):
442 '''return a list of tags ordered by revision'''
442 '''return a list of tags ordered by revision'''
443 if not self._tagscache.tagslist:
443 if not self._tagscache.tagslist:
444 l = []
444 l = []
445 for t, n in self.tags().iteritems():
445 for t, n in self.tags().iteritems():
446 r = self.changelog.rev(n)
446 r = self.changelog.rev(n)
447 l.append((r, t, n))
447 l.append((r, t, n))
448 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
448 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
449
449
450 return self._tagscache.tagslist
450 return self._tagscache.tagslist
451
451
452 def nodetags(self, node):
452 def nodetags(self, node):
453 '''return the tags associated with a node'''
453 '''return the tags associated with a node'''
454 if not self._tagscache.nodetagscache:
454 if not self._tagscache.nodetagscache:
455 nodetagscache = {}
455 nodetagscache = {}
456 for t, n in self.tags().iteritems():
456 for t, n in self.tags().iteritems():
457 nodetagscache.setdefault(n, []).append(t)
457 nodetagscache.setdefault(n, []).append(t)
458 for tags in nodetagscache.itervalues():
458 for tags in nodetagscache.itervalues():
459 tags.sort()
459 tags.sort()
460 self._tagscache.nodetagscache = nodetagscache
460 self._tagscache.nodetagscache = nodetagscache
461 return self._tagscache.nodetagscache.get(node, [])
461 return self._tagscache.nodetagscache.get(node, [])
462
462
463 def nodebookmarks(self, node):
463 def nodebookmarks(self, node):
464 marks = []
464 marks = []
465 for bookmark, n in self._bookmarks.iteritems():
465 for bookmark, n in self._bookmarks.iteritems():
466 if n == node:
466 if n == node:
467 marks.append(bookmark)
467 marks.append(bookmark)
468 return sorted(marks)
468 return sorted(marks)
469
469
470 def _branchtags(self, partial, lrev):
470 def _branchtags(self, partial, lrev):
471 # TODO: rename this function?
471 # TODO: rename this function?
472 tiprev = len(self) - 1
472 tiprev = len(self) - 1
473 if lrev != tiprev:
473 if lrev != tiprev:
474 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
474 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
475 self._updatebranchcache(partial, ctxgen)
475 self._updatebranchcache(partial, ctxgen)
476 self._writebranchcache(partial, self.changelog.tip(), tiprev)
476 self._writebranchcache(partial, self.changelog.tip(), tiprev)
477
477
478 return partial
478 return partial
479
479
480 def updatebranchcache(self):
480 def updatebranchcache(self):
481 tip = self.changelog.tip()
481 tip = self.changelog.tip()
482 if self._branchcache is not None and self._branchcachetip == tip:
482 if self._branchcache is not None and self._branchcachetip == tip:
483 return
483 return
484
484
485 oldtip = self._branchcachetip
485 oldtip = self._branchcachetip
486 self._branchcachetip = tip
486 self._branchcachetip = tip
487 if oldtip is None or oldtip not in self.changelog.nodemap:
487 if oldtip is None or oldtip not in self.changelog.nodemap:
488 partial, last, lrev = self._readbranchcache()
488 partial, last, lrev = self._readbranchcache()
489 else:
489 else:
490 lrev = self.changelog.rev(oldtip)
490 lrev = self.changelog.rev(oldtip)
491 partial = self._branchcache
491 partial = self._branchcache
492
492
493 self._branchtags(partial, lrev)
493 self._branchtags(partial, lrev)
494 # this private cache holds all heads (not just tips)
494 # this private cache holds all heads (not just tips)
495 self._branchcache = partial
495 self._branchcache = partial
496
496
497 def branchmap(self):
497 def branchmap(self):
498 '''returns a dictionary {branch: [branchheads]}'''
498 '''returns a dictionary {branch: [branchheads]}'''
499 self.updatebranchcache()
499 self.updatebranchcache()
500 return self._branchcache
500 return self._branchcache
501
501
502 def branchtags(self):
502 def branchtags(self):
503 '''return a dict where branch names map to the tipmost head of
503 '''return a dict where branch names map to the tipmost head of
504 the branch, open heads come before closed'''
504 the branch, open heads come before closed'''
505 bt = {}
505 bt = {}
506 for bn, heads in self.branchmap().iteritems():
506 for bn, heads in self.branchmap().iteritems():
507 tip = heads[-1]
507 tip = heads[-1]
508 for h in reversed(heads):
508 for h in reversed(heads):
509 if 'close' not in self.changelog.read(h)[5]:
509 if 'close' not in self.changelog.read(h)[5]:
510 tip = h
510 tip = h
511 break
511 break
512 bt[bn] = tip
512 bt[bn] = tip
513 return bt
513 return bt
514
514
515 def _readbranchcache(self):
515 def _readbranchcache(self):
516 partial = {}
516 partial = {}
517 try:
517 try:
518 f = self.opener("cache/branchheads")
518 f = self.opener("cache/branchheads")
519 lines = f.read().split('\n')
519 lines = f.read().split('\n')
520 f.close()
520 f.close()
521 except (IOError, OSError):
521 except (IOError, OSError):
522 return {}, nullid, nullrev
522 return {}, nullid, nullrev
523
523
524 try:
524 try:
525 last, lrev = lines.pop(0).split(" ", 1)
525 last, lrev = lines.pop(0).split(" ", 1)
526 last, lrev = bin(last), int(lrev)
526 last, lrev = bin(last), int(lrev)
527 if lrev >= len(self) or self[lrev].node() != last:
527 if lrev >= len(self) or self[lrev].node() != last:
528 # invalidate the cache
528 # invalidate the cache
529 raise ValueError('invalidating branch cache (tip differs)')
529 raise ValueError('invalidating branch cache (tip differs)')
530 for l in lines:
530 for l in lines:
531 if not l:
531 if not l:
532 continue
532 continue
533 node, label = l.split(" ", 1)
533 node, label = l.split(" ", 1)
534 label = encoding.tolocal(label.strip())
534 label = encoding.tolocal(label.strip())
535 partial.setdefault(label, []).append(bin(node))
535 partial.setdefault(label, []).append(bin(node))
536 except KeyboardInterrupt:
536 except KeyboardInterrupt:
537 raise
537 raise
538 except Exception, inst:
538 except Exception, inst:
539 if self.ui.debugflag:
539 if self.ui.debugflag:
540 self.ui.warn(str(inst), '\n')
540 self.ui.warn(str(inst), '\n')
541 partial, last, lrev = {}, nullid, nullrev
541 partial, last, lrev = {}, nullid, nullrev
542 return partial, last, lrev
542 return partial, last, lrev
543
543
544 def _writebranchcache(self, branches, tip, tiprev):
544 def _writebranchcache(self, branches, tip, tiprev):
545 try:
545 try:
546 f = self.opener("cache/branchheads", "w", atomictemp=True)
546 f = self.opener("cache/branchheads", "w", atomictemp=True)
547 f.write("%s %s\n" % (hex(tip), tiprev))
547 f.write("%s %s\n" % (hex(tip), tiprev))
548 for label, nodes in branches.iteritems():
548 for label, nodes in branches.iteritems():
549 for node in nodes:
549 for node in nodes:
550 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
550 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
551 f.close()
551 f.close()
552 except (IOError, OSError):
552 except (IOError, OSError):
553 pass
553 pass
554
554
555 def _updatebranchcache(self, partial, ctxgen):
555 def _updatebranchcache(self, partial, ctxgen):
556 # collect new branch entries
556 # collect new branch entries
557 newbranches = {}
557 newbranches = {}
558 for c in ctxgen:
558 for c in ctxgen:
559 newbranches.setdefault(c.branch(), []).append(c.node())
559 newbranches.setdefault(c.branch(), []).append(c.node())
560 # if older branchheads are reachable from new ones, they aren't
560 # if older branchheads are reachable from new ones, they aren't
561 # really branchheads. Note checking parents is insufficient:
561 # really branchheads. Note checking parents is insufficient:
562 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
562 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
563 for branch, newnodes in newbranches.iteritems():
563 for branch, newnodes in newbranches.iteritems():
564 bheads = partial.setdefault(branch, [])
564 bheads = partial.setdefault(branch, [])
565 bheads.extend(newnodes)
565 bheads.extend(newnodes)
566 if len(bheads) <= 1:
566 if len(bheads) <= 1:
567 continue
567 continue
568 bheads = sorted(bheads, key=lambda x: self[x].rev())
568 bheads = sorted(bheads, key=lambda x: self[x].rev())
569 # starting from tip means fewer passes over reachable
569 # starting from tip means fewer passes over reachable
570 while newnodes:
570 while newnodes:
571 latest = newnodes.pop()
571 latest = newnodes.pop()
572 if latest not in bheads:
572 if latest not in bheads:
573 continue
573 continue
574 minbhrev = self[bheads[0]].node()
574 minbhrev = self[bheads[0]].node()
575 reachable = self.changelog.reachable(latest, minbhrev)
575 reachable = self.changelog.reachable(latest, minbhrev)
576 reachable.remove(latest)
576 reachable.remove(latest)
577 if reachable:
577 if reachable:
578 bheads = [b for b in bheads if b not in reachable]
578 bheads = [b for b in bheads if b not in reachable]
579 partial[branch] = bheads
579 partial[branch] = bheads
580
580
581 def lookup(self, key):
581 def lookup(self, key):
582 if isinstance(key, int):
582 if isinstance(key, int):
583 return self.changelog.node(key)
583 return self.changelog.node(key)
584 elif key == '.':
584 elif key == '.':
585 return self.dirstate.p1()
585 return self.dirstate.p1()
586 elif key == 'null':
586 elif key == 'null':
587 return nullid
587 return nullid
588 elif key == 'tip':
588 elif key == 'tip':
589 return self.changelog.tip()
589 return self.changelog.tip()
590 n = self.changelog._match(key)
590 n = self.changelog._match(key)
591 if n:
591 if n:
592 return n
592 return n
593 if key in self._bookmarks:
593 if key in self._bookmarks:
594 return self._bookmarks[key]
594 return self._bookmarks[key]
595 if key in self.tags():
595 if key in self.tags():
596 return self.tags()[key]
596 return self.tags()[key]
597 if key in self.branchtags():
597 if key in self.branchtags():
598 return self.branchtags()[key]
598 return self.branchtags()[key]
599 n = self.changelog._partialmatch(key)
599 n = self.changelog._partialmatch(key)
600 if n:
600 if n:
601 return n
601 return n
602
602
603 # can't find key, check if it might have come from damaged dirstate
603 # can't find key, check if it might have come from damaged dirstate
604 if key in self.dirstate.parents():
604 if key in self.dirstate.parents():
605 raise error.Abort(_("working directory has unknown parent '%s'!")
605 raise error.Abort(_("working directory has unknown parent '%s'!")
606 % short(key))
606 % short(key))
607 try:
607 try:
608 if len(key) == 20:
608 if len(key) == 20:
609 key = hex(key)
609 key = hex(key)
610 except TypeError:
610 except TypeError:
611 pass
611 pass
612 raise error.RepoLookupError(_("unknown revision '%s'") % key)
612 raise error.RepoLookupError(_("unknown revision '%s'") % key)
613
613
614 def lookupbranch(self, key, remote=None):
614 def lookupbranch(self, key, remote=None):
615 repo = remote or self
615 repo = remote or self
616 if key in repo.branchmap():
616 if key in repo.branchmap():
617 return key
617 return key
618
618
619 repo = (remote and remote.local()) and remote or self
619 repo = (remote and remote.local()) and remote or self
620 return repo[key].branch()
620 return repo[key].branch()
621
621
622 def known(self, nodes):
622 def known(self, nodes):
623 nm = self.changelog.nodemap
623 nm = self.changelog.nodemap
624 return [(n in nm) for n in nodes]
624 result = []
625 for n in nodes:
626 r = nm.get(n)
627 resp = not (r is None or self._phaserev[r] >= phases.secret)
628 result.append(resp)
629 return result
625
630
626 def local(self):
631 def local(self):
627 return self
632 return self
628
633
629 def cancopy(self):
634 def cancopy(self):
630 return (repo.repository.cancopy(self)
635 return (repo.repository.cancopy(self)
631 and not self._phaseroots[phases.secret])
636 and not self._phaseroots[phases.secret])
632
637
633 def join(self, f):
638 def join(self, f):
634 return os.path.join(self.path, f)
639 return os.path.join(self.path, f)
635
640
636 def wjoin(self, f):
641 def wjoin(self, f):
637 return os.path.join(self.root, f)
642 return os.path.join(self.root, f)
638
643
639 def file(self, f):
644 def file(self, f):
640 if f[0] == '/':
645 if f[0] == '/':
641 f = f[1:]
646 f = f[1:]
642 return filelog.filelog(self.sopener, f)
647 return filelog.filelog(self.sopener, f)
643
648
644 def changectx(self, changeid):
649 def changectx(self, changeid):
645 return self[changeid]
650 return self[changeid]
646
651
647 def parents(self, changeid=None):
652 def parents(self, changeid=None):
648 '''get list of changectxs for parents of changeid'''
653 '''get list of changectxs for parents of changeid'''
649 return self[changeid].parents()
654 return self[changeid].parents()
650
655
651 def filectx(self, path, changeid=None, fileid=None):
656 def filectx(self, path, changeid=None, fileid=None):
652 """changeid can be a changeset revision, node, or tag.
657 """changeid can be a changeset revision, node, or tag.
653 fileid can be a file revision or node."""
658 fileid can be a file revision or node."""
654 return context.filectx(self, path, changeid, fileid)
659 return context.filectx(self, path, changeid, fileid)
655
660
656 def getcwd(self):
661 def getcwd(self):
657 return self.dirstate.getcwd()
662 return self.dirstate.getcwd()
658
663
659 def pathto(self, f, cwd=None):
664 def pathto(self, f, cwd=None):
660 return self.dirstate.pathto(f, cwd)
665 return self.dirstate.pathto(f, cwd)
661
666
662 def wfile(self, f, mode='r'):
667 def wfile(self, f, mode='r'):
663 return self.wopener(f, mode)
668 return self.wopener(f, mode)
664
669
665 def _link(self, f):
670 def _link(self, f):
666 return os.path.islink(self.wjoin(f))
671 return os.path.islink(self.wjoin(f))
667
672
668 def _loadfilter(self, filter):
673 def _loadfilter(self, filter):
669 if filter not in self.filterpats:
674 if filter not in self.filterpats:
670 l = []
675 l = []
671 for pat, cmd in self.ui.configitems(filter):
676 for pat, cmd in self.ui.configitems(filter):
672 if cmd == '!':
677 if cmd == '!':
673 continue
678 continue
674 mf = matchmod.match(self.root, '', [pat])
679 mf = matchmod.match(self.root, '', [pat])
675 fn = None
680 fn = None
676 params = cmd
681 params = cmd
677 for name, filterfn in self._datafilters.iteritems():
682 for name, filterfn in self._datafilters.iteritems():
678 if cmd.startswith(name):
683 if cmd.startswith(name):
679 fn = filterfn
684 fn = filterfn
680 params = cmd[len(name):].lstrip()
685 params = cmd[len(name):].lstrip()
681 break
686 break
682 if not fn:
687 if not fn:
683 fn = lambda s, c, **kwargs: util.filter(s, c)
688 fn = lambda s, c, **kwargs: util.filter(s, c)
684 # Wrap old filters not supporting keyword arguments
689 # Wrap old filters not supporting keyword arguments
685 if not inspect.getargspec(fn)[2]:
690 if not inspect.getargspec(fn)[2]:
686 oldfn = fn
691 oldfn = fn
687 fn = lambda s, c, **kwargs: oldfn(s, c)
692 fn = lambda s, c, **kwargs: oldfn(s, c)
688 l.append((mf, fn, params))
693 l.append((mf, fn, params))
689 self.filterpats[filter] = l
694 self.filterpats[filter] = l
690 return self.filterpats[filter]
695 return self.filterpats[filter]
691
696
692 def _filter(self, filterpats, filename, data):
697 def _filter(self, filterpats, filename, data):
693 for mf, fn, cmd in filterpats:
698 for mf, fn, cmd in filterpats:
694 if mf(filename):
699 if mf(filename):
695 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
700 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
696 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
701 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
697 break
702 break
698
703
699 return data
704 return data
700
705
701 @propertycache
706 @propertycache
702 def _encodefilterpats(self):
707 def _encodefilterpats(self):
703 return self._loadfilter('encode')
708 return self._loadfilter('encode')
704
709
705 @propertycache
710 @propertycache
706 def _decodefilterpats(self):
711 def _decodefilterpats(self):
707 return self._loadfilter('decode')
712 return self._loadfilter('decode')
708
713
709 def adddatafilter(self, name, filter):
714 def adddatafilter(self, name, filter):
710 self._datafilters[name] = filter
715 self._datafilters[name] = filter
711
716
712 def wread(self, filename):
717 def wread(self, filename):
713 if self._link(filename):
718 if self._link(filename):
714 data = os.readlink(self.wjoin(filename))
719 data = os.readlink(self.wjoin(filename))
715 else:
720 else:
716 data = self.wopener.read(filename)
721 data = self.wopener.read(filename)
717 return self._filter(self._encodefilterpats, filename, data)
722 return self._filter(self._encodefilterpats, filename, data)
718
723
719 def wwrite(self, filename, data, flags):
724 def wwrite(self, filename, data, flags):
720 data = self._filter(self._decodefilterpats, filename, data)
725 data = self._filter(self._decodefilterpats, filename, data)
721 if 'l' in flags:
726 if 'l' in flags:
722 self.wopener.symlink(data, filename)
727 self.wopener.symlink(data, filename)
723 else:
728 else:
724 self.wopener.write(filename, data)
729 self.wopener.write(filename, data)
725 if 'x' in flags:
730 if 'x' in flags:
726 util.setflags(self.wjoin(filename), False, True)
731 util.setflags(self.wjoin(filename), False, True)
727
732
728 def wwritedata(self, filename, data):
733 def wwritedata(self, filename, data):
729 return self._filter(self._decodefilterpats, filename, data)
734 return self._filter(self._decodefilterpats, filename, data)
730
735
731 def transaction(self, desc):
736 def transaction(self, desc):
732 tr = self._transref and self._transref() or None
737 tr = self._transref and self._transref() or None
733 if tr and tr.running():
738 if tr and tr.running():
734 return tr.nest()
739 return tr.nest()
735
740
736 # abort here if the journal already exists
741 # abort here if the journal already exists
737 if os.path.exists(self.sjoin("journal")):
742 if os.path.exists(self.sjoin("journal")):
738 raise error.RepoError(
743 raise error.RepoError(
739 _("abandoned transaction found - run hg recover"))
744 _("abandoned transaction found - run hg recover"))
740
745
741 journalfiles = self._writejournal(desc)
746 journalfiles = self._writejournal(desc)
742 renames = [(x, undoname(x)) for x in journalfiles]
747 renames = [(x, undoname(x)) for x in journalfiles]
743
748
744 tr = transaction.transaction(self.ui.warn, self.sopener,
749 tr = transaction.transaction(self.ui.warn, self.sopener,
745 self.sjoin("journal"),
750 self.sjoin("journal"),
746 aftertrans(renames),
751 aftertrans(renames),
747 self.store.createmode)
752 self.store.createmode)
748 self._transref = weakref.ref(tr)
753 self._transref = weakref.ref(tr)
749 return tr
754 return tr
750
755
751 def _writejournal(self, desc):
756 def _writejournal(self, desc):
752 # save dirstate for rollback
757 # save dirstate for rollback
753 try:
758 try:
754 ds = self.opener.read("dirstate")
759 ds = self.opener.read("dirstate")
755 except IOError:
760 except IOError:
756 ds = ""
761 ds = ""
757 self.opener.write("journal.dirstate", ds)
762 self.opener.write("journal.dirstate", ds)
758 self.opener.write("journal.branch",
763 self.opener.write("journal.branch",
759 encoding.fromlocal(self.dirstate.branch()))
764 encoding.fromlocal(self.dirstate.branch()))
760 self.opener.write("journal.desc",
765 self.opener.write("journal.desc",
761 "%d\n%s\n" % (len(self), desc))
766 "%d\n%s\n" % (len(self), desc))
762
767
763 bkname = self.join('bookmarks')
768 bkname = self.join('bookmarks')
764 if os.path.exists(bkname):
769 if os.path.exists(bkname):
765 util.copyfile(bkname, self.join('journal.bookmarks'))
770 util.copyfile(bkname, self.join('journal.bookmarks'))
766 else:
771 else:
767 self.opener.write('journal.bookmarks', '')
772 self.opener.write('journal.bookmarks', '')
768 phasesname = self.sjoin('phaseroots')
773 phasesname = self.sjoin('phaseroots')
769 if os.path.exists(phasesname):
774 if os.path.exists(phasesname):
770 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
775 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
771 else:
776 else:
772 self.sopener.write('journal.phaseroots', '')
777 self.sopener.write('journal.phaseroots', '')
773
778
774 return (self.sjoin('journal'), self.join('journal.dirstate'),
779 return (self.sjoin('journal'), self.join('journal.dirstate'),
775 self.join('journal.branch'), self.join('journal.desc'),
780 self.join('journal.branch'), self.join('journal.desc'),
776 self.join('journal.bookmarks'),
781 self.join('journal.bookmarks'),
777 self.sjoin('journal.phaseroots'))
782 self.sjoin('journal.phaseroots'))
778
783
779 def recover(self):
784 def recover(self):
780 lock = self.lock()
785 lock = self.lock()
781 try:
786 try:
782 if os.path.exists(self.sjoin("journal")):
787 if os.path.exists(self.sjoin("journal")):
783 self.ui.status(_("rolling back interrupted transaction\n"))
788 self.ui.status(_("rolling back interrupted transaction\n"))
784 transaction.rollback(self.sopener, self.sjoin("journal"),
789 transaction.rollback(self.sopener, self.sjoin("journal"),
785 self.ui.warn)
790 self.ui.warn)
786 self.invalidate()
791 self.invalidate()
787 return True
792 return True
788 else:
793 else:
789 self.ui.warn(_("no interrupted transaction available\n"))
794 self.ui.warn(_("no interrupted transaction available\n"))
790 return False
795 return False
791 finally:
796 finally:
792 lock.release()
797 lock.release()
793
798
794 def rollback(self, dryrun=False, force=False):
799 def rollback(self, dryrun=False, force=False):
795 wlock = lock = None
800 wlock = lock = None
796 try:
801 try:
797 wlock = self.wlock()
802 wlock = self.wlock()
798 lock = self.lock()
803 lock = self.lock()
799 if os.path.exists(self.sjoin("undo")):
804 if os.path.exists(self.sjoin("undo")):
800 return self._rollback(dryrun, force)
805 return self._rollback(dryrun, force)
801 else:
806 else:
802 self.ui.warn(_("no rollback information available\n"))
807 self.ui.warn(_("no rollback information available\n"))
803 return 1
808 return 1
804 finally:
809 finally:
805 release(lock, wlock)
810 release(lock, wlock)
806
811
807 def _rollback(self, dryrun, force):
812 def _rollback(self, dryrun, force):
808 ui = self.ui
813 ui = self.ui
809 try:
814 try:
810 args = self.opener.read('undo.desc').splitlines()
815 args = self.opener.read('undo.desc').splitlines()
811 (oldlen, desc, detail) = (int(args[0]), args[1], None)
816 (oldlen, desc, detail) = (int(args[0]), args[1], None)
812 if len(args) >= 3:
817 if len(args) >= 3:
813 detail = args[2]
818 detail = args[2]
814 oldtip = oldlen - 1
819 oldtip = oldlen - 1
815
820
816 if detail and ui.verbose:
821 if detail and ui.verbose:
817 msg = (_('repository tip rolled back to revision %s'
822 msg = (_('repository tip rolled back to revision %s'
818 ' (undo %s: %s)\n')
823 ' (undo %s: %s)\n')
819 % (oldtip, desc, detail))
824 % (oldtip, desc, detail))
820 else:
825 else:
821 msg = (_('repository tip rolled back to revision %s'
826 msg = (_('repository tip rolled back to revision %s'
822 ' (undo %s)\n')
827 ' (undo %s)\n')
823 % (oldtip, desc))
828 % (oldtip, desc))
824 except IOError:
829 except IOError:
825 msg = _('rolling back unknown transaction\n')
830 msg = _('rolling back unknown transaction\n')
826 desc = None
831 desc = None
827
832
828 if not force and self['.'] != self['tip'] and desc == 'commit':
833 if not force and self['.'] != self['tip'] and desc == 'commit':
829 raise util.Abort(
834 raise util.Abort(
830 _('rollback of last commit while not checked out '
835 _('rollback of last commit while not checked out '
831 'may lose data'), hint=_('use -f to force'))
836 'may lose data'), hint=_('use -f to force'))
832
837
833 ui.status(msg)
838 ui.status(msg)
834 if dryrun:
839 if dryrun:
835 return 0
840 return 0
836
841
837 parents = self.dirstate.parents()
842 parents = self.dirstate.parents()
838 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
843 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
839 if os.path.exists(self.join('undo.bookmarks')):
844 if os.path.exists(self.join('undo.bookmarks')):
840 util.rename(self.join('undo.bookmarks'),
845 util.rename(self.join('undo.bookmarks'),
841 self.join('bookmarks'))
846 self.join('bookmarks'))
842 if os.path.exists(self.sjoin('undo.phaseroots')):
847 if os.path.exists(self.sjoin('undo.phaseroots')):
843 util.rename(self.sjoin('undo.phaseroots'),
848 util.rename(self.sjoin('undo.phaseroots'),
844 self.sjoin('phaseroots'))
849 self.sjoin('phaseroots'))
845 self.invalidate()
850 self.invalidate()
846
851
847 parentgone = (parents[0] not in self.changelog.nodemap or
852 parentgone = (parents[0] not in self.changelog.nodemap or
848 parents[1] not in self.changelog.nodemap)
853 parents[1] not in self.changelog.nodemap)
849 if parentgone:
854 if parentgone:
850 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
855 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
851 try:
856 try:
852 branch = self.opener.read('undo.branch')
857 branch = self.opener.read('undo.branch')
853 self.dirstate.setbranch(branch)
858 self.dirstate.setbranch(branch)
854 except IOError:
859 except IOError:
855 ui.warn(_('named branch could not be reset: '
860 ui.warn(_('named branch could not be reset: '
856 'current branch is still \'%s\'\n')
861 'current branch is still \'%s\'\n')
857 % self.dirstate.branch())
862 % self.dirstate.branch())
858
863
859 self.dirstate.invalidate()
864 self.dirstate.invalidate()
860 parents = tuple([p.rev() for p in self.parents()])
865 parents = tuple([p.rev() for p in self.parents()])
861 if len(parents) > 1:
866 if len(parents) > 1:
862 ui.status(_('working directory now based on '
867 ui.status(_('working directory now based on '
863 'revisions %d and %d\n') % parents)
868 'revisions %d and %d\n') % parents)
864 else:
869 else:
865 ui.status(_('working directory now based on '
870 ui.status(_('working directory now based on '
866 'revision %d\n') % parents)
871 'revision %d\n') % parents)
867 self.destroyed()
872 self.destroyed()
868 return 0
873 return 0
869
874
870 def invalidatecaches(self):
875 def invalidatecaches(self):
871 try:
876 try:
872 delattr(self, '_tagscache')
877 delattr(self, '_tagscache')
873 except AttributeError:
878 except AttributeError:
874 pass
879 pass
875
880
876 self._branchcache = None # in UTF-8
881 self._branchcache = None # in UTF-8
877 self._branchcachetip = None
882 self._branchcachetip = None
878
883
879 def invalidatedirstate(self):
884 def invalidatedirstate(self):
880 '''Invalidates the dirstate, causing the next call to dirstate
885 '''Invalidates the dirstate, causing the next call to dirstate
881 to check if it was modified since the last time it was read,
886 to check if it was modified since the last time it was read,
882 rereading it if it has.
887 rereading it if it has.
883
888
884 This is different to dirstate.invalidate() that it doesn't always
889 This is different to dirstate.invalidate() that it doesn't always
885 rereads the dirstate. Use dirstate.invalidate() if you want to
890 rereads the dirstate. Use dirstate.invalidate() if you want to
886 explicitly read the dirstate again (i.e. restoring it to a previous
891 explicitly read the dirstate again (i.e. restoring it to a previous
887 known good state).'''
892 known good state).'''
888 try:
893 try:
889 delattr(self, 'dirstate')
894 delattr(self, 'dirstate')
890 except AttributeError:
895 except AttributeError:
891 pass
896 pass
892
897
893 def invalidate(self):
898 def invalidate(self):
894 for k in self._filecache:
899 for k in self._filecache:
895 # dirstate is invalidated separately in invalidatedirstate()
900 # dirstate is invalidated separately in invalidatedirstate()
896 if k == 'dirstate':
901 if k == 'dirstate':
897 continue
902 continue
898
903
899 try:
904 try:
900 delattr(self, k)
905 delattr(self, k)
901 except AttributeError:
906 except AttributeError:
902 pass
907 pass
903 self.invalidatecaches()
908 self.invalidatecaches()
904
909
905 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
910 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
906 try:
911 try:
907 l = lock.lock(lockname, 0, releasefn, desc=desc)
912 l = lock.lock(lockname, 0, releasefn, desc=desc)
908 except error.LockHeld, inst:
913 except error.LockHeld, inst:
909 if not wait:
914 if not wait:
910 raise
915 raise
911 self.ui.warn(_("waiting for lock on %s held by %r\n") %
916 self.ui.warn(_("waiting for lock on %s held by %r\n") %
912 (desc, inst.locker))
917 (desc, inst.locker))
913 # default to 600 seconds timeout
918 # default to 600 seconds timeout
914 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
919 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
915 releasefn, desc=desc)
920 releasefn, desc=desc)
916 if acquirefn:
921 if acquirefn:
917 acquirefn()
922 acquirefn()
918 return l
923 return l
919
924
920 def _afterlock(self, callback):
925 def _afterlock(self, callback):
921 """add a callback to the current repository lock.
926 """add a callback to the current repository lock.
922
927
923 The callback will be executed on lock release."""
928 The callback will be executed on lock release."""
924 l = self._lockref and self._lockref()
929 l = self._lockref and self._lockref()
925 if l:
930 if l:
926 l.postrelease.append(callback)
931 l.postrelease.append(callback)
927
932
928 def lock(self, wait=True):
933 def lock(self, wait=True):
929 '''Lock the repository store (.hg/store) and return a weak reference
934 '''Lock the repository store (.hg/store) and return a weak reference
930 to the lock. Use this before modifying the store (e.g. committing or
935 to the lock. Use this before modifying the store (e.g. committing or
931 stripping). If you are opening a transaction, get a lock as well.)'''
936 stripping). If you are opening a transaction, get a lock as well.)'''
932 l = self._lockref and self._lockref()
937 l = self._lockref and self._lockref()
933 if l is not None and l.held:
938 if l is not None and l.held:
934 l.lock()
939 l.lock()
935 return l
940 return l
936
941
937 def unlock():
942 def unlock():
938 self.store.write()
943 self.store.write()
939 if self._dirtyphases:
944 if self._dirtyphases:
940 phases.writeroots(self)
945 phases.writeroots(self)
941 for k, ce in self._filecache.items():
946 for k, ce in self._filecache.items():
942 if k == 'dirstate':
947 if k == 'dirstate':
943 continue
948 continue
944 ce.refresh()
949 ce.refresh()
945
950
946 l = self._lock(self.sjoin("lock"), wait, unlock,
951 l = self._lock(self.sjoin("lock"), wait, unlock,
947 self.invalidate, _('repository %s') % self.origroot)
952 self.invalidate, _('repository %s') % self.origroot)
948 self._lockref = weakref.ref(l)
953 self._lockref = weakref.ref(l)
949 return l
954 return l
950
955
951 def wlock(self, wait=True):
956 def wlock(self, wait=True):
952 '''Lock the non-store parts of the repository (everything under
957 '''Lock the non-store parts of the repository (everything under
953 .hg except .hg/store) and return a weak reference to the lock.
958 .hg except .hg/store) and return a weak reference to the lock.
954 Use this before modifying files in .hg.'''
959 Use this before modifying files in .hg.'''
955 l = self._wlockref and self._wlockref()
960 l = self._wlockref and self._wlockref()
956 if l is not None and l.held:
961 if l is not None and l.held:
957 l.lock()
962 l.lock()
958 return l
963 return l
959
964
960 def unlock():
965 def unlock():
961 self.dirstate.write()
966 self.dirstate.write()
962 ce = self._filecache.get('dirstate')
967 ce = self._filecache.get('dirstate')
963 if ce:
968 if ce:
964 ce.refresh()
969 ce.refresh()
965
970
966 l = self._lock(self.join("wlock"), wait, unlock,
971 l = self._lock(self.join("wlock"), wait, unlock,
967 self.invalidatedirstate, _('working directory of %s') %
972 self.invalidatedirstate, _('working directory of %s') %
968 self.origroot)
973 self.origroot)
969 self._wlockref = weakref.ref(l)
974 self._wlockref = weakref.ref(l)
970 return l
975 return l
971
976
972 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
977 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
973 """
978 """
974 commit an individual file as part of a larger transaction
979 commit an individual file as part of a larger transaction
975 """
980 """
976
981
977 fname = fctx.path()
982 fname = fctx.path()
978 text = fctx.data()
983 text = fctx.data()
979 flog = self.file(fname)
984 flog = self.file(fname)
980 fparent1 = manifest1.get(fname, nullid)
985 fparent1 = manifest1.get(fname, nullid)
981 fparent2 = fparent2o = manifest2.get(fname, nullid)
986 fparent2 = fparent2o = manifest2.get(fname, nullid)
982
987
983 meta = {}
988 meta = {}
984 copy = fctx.renamed()
989 copy = fctx.renamed()
985 if copy and copy[0] != fname:
990 if copy and copy[0] != fname:
986 # Mark the new revision of this file as a copy of another
991 # Mark the new revision of this file as a copy of another
987 # file. This copy data will effectively act as a parent
992 # file. This copy data will effectively act as a parent
988 # of this new revision. If this is a merge, the first
993 # of this new revision. If this is a merge, the first
989 # parent will be the nullid (meaning "look up the copy data")
994 # parent will be the nullid (meaning "look up the copy data")
990 # and the second one will be the other parent. For example:
995 # and the second one will be the other parent. For example:
991 #
996 #
992 # 0 --- 1 --- 3 rev1 changes file foo
997 # 0 --- 1 --- 3 rev1 changes file foo
993 # \ / rev2 renames foo to bar and changes it
998 # \ / rev2 renames foo to bar and changes it
994 # \- 2 -/ rev3 should have bar with all changes and
999 # \- 2 -/ rev3 should have bar with all changes and
995 # should record that bar descends from
1000 # should record that bar descends from
996 # bar in rev2 and foo in rev1
1001 # bar in rev2 and foo in rev1
997 #
1002 #
998 # this allows this merge to succeed:
1003 # this allows this merge to succeed:
999 #
1004 #
1000 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1005 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1001 # \ / merging rev3 and rev4 should use bar@rev2
1006 # \ / merging rev3 and rev4 should use bar@rev2
1002 # \- 2 --- 4 as the merge base
1007 # \- 2 --- 4 as the merge base
1003 #
1008 #
1004
1009
1005 cfname = copy[0]
1010 cfname = copy[0]
1006 crev = manifest1.get(cfname)
1011 crev = manifest1.get(cfname)
1007 newfparent = fparent2
1012 newfparent = fparent2
1008
1013
1009 if manifest2: # branch merge
1014 if manifest2: # branch merge
1010 if fparent2 == nullid or crev is None: # copied on remote side
1015 if fparent2 == nullid or crev is None: # copied on remote side
1011 if cfname in manifest2:
1016 if cfname in manifest2:
1012 crev = manifest2[cfname]
1017 crev = manifest2[cfname]
1013 newfparent = fparent1
1018 newfparent = fparent1
1014
1019
1015 # find source in nearest ancestor if we've lost track
1020 # find source in nearest ancestor if we've lost track
1016 if not crev:
1021 if not crev:
1017 self.ui.debug(" %s: searching for copy revision for %s\n" %
1022 self.ui.debug(" %s: searching for copy revision for %s\n" %
1018 (fname, cfname))
1023 (fname, cfname))
1019 for ancestor in self[None].ancestors():
1024 for ancestor in self[None].ancestors():
1020 if cfname in ancestor:
1025 if cfname in ancestor:
1021 crev = ancestor[cfname].filenode()
1026 crev = ancestor[cfname].filenode()
1022 break
1027 break
1023
1028
1024 if crev:
1029 if crev:
1025 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1030 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1026 meta["copy"] = cfname
1031 meta["copy"] = cfname
1027 meta["copyrev"] = hex(crev)
1032 meta["copyrev"] = hex(crev)
1028 fparent1, fparent2 = nullid, newfparent
1033 fparent1, fparent2 = nullid, newfparent
1029 else:
1034 else:
1030 self.ui.warn(_("warning: can't find ancestor for '%s' "
1035 self.ui.warn(_("warning: can't find ancestor for '%s' "
1031 "copied from '%s'!\n") % (fname, cfname))
1036 "copied from '%s'!\n") % (fname, cfname))
1032
1037
1033 elif fparent2 != nullid:
1038 elif fparent2 != nullid:
1034 # is one parent an ancestor of the other?
1039 # is one parent an ancestor of the other?
1035 fparentancestor = flog.ancestor(fparent1, fparent2)
1040 fparentancestor = flog.ancestor(fparent1, fparent2)
1036 if fparentancestor == fparent1:
1041 if fparentancestor == fparent1:
1037 fparent1, fparent2 = fparent2, nullid
1042 fparent1, fparent2 = fparent2, nullid
1038 elif fparentancestor == fparent2:
1043 elif fparentancestor == fparent2:
1039 fparent2 = nullid
1044 fparent2 = nullid
1040
1045
1041 # is the file changed?
1046 # is the file changed?
1042 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1047 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1043 changelist.append(fname)
1048 changelist.append(fname)
1044 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1049 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1045
1050
1046 # are just the flags changed during merge?
1051 # are just the flags changed during merge?
1047 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1052 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1048 changelist.append(fname)
1053 changelist.append(fname)
1049
1054
1050 return fparent1
1055 return fparent1
1051
1056
1052 def commit(self, text="", user=None, date=None, match=None, force=False,
1057 def commit(self, text="", user=None, date=None, match=None, force=False,
1053 editor=False, extra={}):
1058 editor=False, extra={}):
1054 """Add a new revision to current repository.
1059 """Add a new revision to current repository.
1055
1060
1056 Revision information is gathered from the working directory,
1061 Revision information is gathered from the working directory,
1057 match can be used to filter the committed files. If editor is
1062 match can be used to filter the committed files. If editor is
1058 supplied, it is called to get a commit message.
1063 supplied, it is called to get a commit message.
1059 """
1064 """
1060
1065
1061 def fail(f, msg):
1066 def fail(f, msg):
1062 raise util.Abort('%s: %s' % (f, msg))
1067 raise util.Abort('%s: %s' % (f, msg))
1063
1068
1064 if not match:
1069 if not match:
1065 match = matchmod.always(self.root, '')
1070 match = matchmod.always(self.root, '')
1066
1071
1067 if not force:
1072 if not force:
1068 vdirs = []
1073 vdirs = []
1069 match.dir = vdirs.append
1074 match.dir = vdirs.append
1070 match.bad = fail
1075 match.bad = fail
1071
1076
1072 wlock = self.wlock()
1077 wlock = self.wlock()
1073 try:
1078 try:
1074 wctx = self[None]
1079 wctx = self[None]
1075 merge = len(wctx.parents()) > 1
1080 merge = len(wctx.parents()) > 1
1076
1081
1077 if (not force and merge and match and
1082 if (not force and merge and match and
1078 (match.files() or match.anypats())):
1083 (match.files() or match.anypats())):
1079 raise util.Abort(_('cannot partially commit a merge '
1084 raise util.Abort(_('cannot partially commit a merge '
1080 '(do not specify files or patterns)'))
1085 '(do not specify files or patterns)'))
1081
1086
1082 changes = self.status(match=match, clean=force)
1087 changes = self.status(match=match, clean=force)
1083 if force:
1088 if force:
1084 changes[0].extend(changes[6]) # mq may commit unchanged files
1089 changes[0].extend(changes[6]) # mq may commit unchanged files
1085
1090
1086 # check subrepos
1091 # check subrepos
1087 subs = []
1092 subs = []
1088 removedsubs = set()
1093 removedsubs = set()
1089 if '.hgsub' in wctx:
1094 if '.hgsub' in wctx:
1090 # only manage subrepos and .hgsubstate if .hgsub is present
1095 # only manage subrepos and .hgsubstate if .hgsub is present
1091 for p in wctx.parents():
1096 for p in wctx.parents():
1092 removedsubs.update(s for s in p.substate if match(s))
1097 removedsubs.update(s for s in p.substate if match(s))
1093 for s in wctx.substate:
1098 for s in wctx.substate:
1094 removedsubs.discard(s)
1099 removedsubs.discard(s)
1095 if match(s) and wctx.sub(s).dirty():
1100 if match(s) and wctx.sub(s).dirty():
1096 subs.append(s)
1101 subs.append(s)
1097 if (subs or removedsubs):
1102 if (subs or removedsubs):
1098 if (not match('.hgsub') and
1103 if (not match('.hgsub') and
1099 '.hgsub' in (wctx.modified() + wctx.added())):
1104 '.hgsub' in (wctx.modified() + wctx.added())):
1100 raise util.Abort(
1105 raise util.Abort(
1101 _("can't commit subrepos without .hgsub"))
1106 _("can't commit subrepos without .hgsub"))
1102 if '.hgsubstate' not in changes[0]:
1107 if '.hgsubstate' not in changes[0]:
1103 changes[0].insert(0, '.hgsubstate')
1108 changes[0].insert(0, '.hgsubstate')
1104 if '.hgsubstate' in changes[2]:
1109 if '.hgsubstate' in changes[2]:
1105 changes[2].remove('.hgsubstate')
1110 changes[2].remove('.hgsubstate')
1106 elif '.hgsub' in changes[2]:
1111 elif '.hgsub' in changes[2]:
1107 # clean up .hgsubstate when .hgsub is removed
1112 # clean up .hgsubstate when .hgsub is removed
1108 if ('.hgsubstate' in wctx and
1113 if ('.hgsubstate' in wctx and
1109 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1114 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1110 changes[2].insert(0, '.hgsubstate')
1115 changes[2].insert(0, '.hgsubstate')
1111
1116
1112 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1117 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1113 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1118 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1114 if changedsubs:
1119 if changedsubs:
1115 raise util.Abort(_("uncommitted changes in subrepo %s")
1120 raise util.Abort(_("uncommitted changes in subrepo %s")
1116 % changedsubs[0],
1121 % changedsubs[0],
1117 hint=_("use --subrepos for recursive commit"))
1122 hint=_("use --subrepos for recursive commit"))
1118
1123
1119 # make sure all explicit patterns are matched
1124 # make sure all explicit patterns are matched
1120 if not force and match.files():
1125 if not force and match.files():
1121 matched = set(changes[0] + changes[1] + changes[2])
1126 matched = set(changes[0] + changes[1] + changes[2])
1122
1127
1123 for f in match.files():
1128 for f in match.files():
1124 if f == '.' or f in matched or f in wctx.substate:
1129 if f == '.' or f in matched or f in wctx.substate:
1125 continue
1130 continue
1126 if f in changes[3]: # missing
1131 if f in changes[3]: # missing
1127 fail(f, _('file not found!'))
1132 fail(f, _('file not found!'))
1128 if f in vdirs: # visited directory
1133 if f in vdirs: # visited directory
1129 d = f + '/'
1134 d = f + '/'
1130 for mf in matched:
1135 for mf in matched:
1131 if mf.startswith(d):
1136 if mf.startswith(d):
1132 break
1137 break
1133 else:
1138 else:
1134 fail(f, _("no match under directory!"))
1139 fail(f, _("no match under directory!"))
1135 elif f not in self.dirstate:
1140 elif f not in self.dirstate:
1136 fail(f, _("file not tracked!"))
1141 fail(f, _("file not tracked!"))
1137
1142
1138 if (not force and not extra.get("close") and not merge
1143 if (not force and not extra.get("close") and not merge
1139 and not (changes[0] or changes[1] or changes[2])
1144 and not (changes[0] or changes[1] or changes[2])
1140 and wctx.branch() == wctx.p1().branch()):
1145 and wctx.branch() == wctx.p1().branch()):
1141 return None
1146 return None
1142
1147
1143 ms = mergemod.mergestate(self)
1148 ms = mergemod.mergestate(self)
1144 for f in changes[0]:
1149 for f in changes[0]:
1145 if f in ms and ms[f] == 'u':
1150 if f in ms and ms[f] == 'u':
1146 raise util.Abort(_("unresolved merge conflicts "
1151 raise util.Abort(_("unresolved merge conflicts "
1147 "(see hg help resolve)"))
1152 "(see hg help resolve)"))
1148
1153
1149 cctx = context.workingctx(self, text, user, date, extra, changes)
1154 cctx = context.workingctx(self, text, user, date, extra, changes)
1150 if editor:
1155 if editor:
1151 cctx._text = editor(self, cctx, subs)
1156 cctx._text = editor(self, cctx, subs)
1152 edited = (text != cctx._text)
1157 edited = (text != cctx._text)
1153
1158
1154 # commit subs
1159 # commit subs
1155 if subs or removedsubs:
1160 if subs or removedsubs:
1156 state = wctx.substate.copy()
1161 state = wctx.substate.copy()
1157 for s in sorted(subs):
1162 for s in sorted(subs):
1158 sub = wctx.sub(s)
1163 sub = wctx.sub(s)
1159 self.ui.status(_('committing subrepository %s\n') %
1164 self.ui.status(_('committing subrepository %s\n') %
1160 subrepo.subrelpath(sub))
1165 subrepo.subrelpath(sub))
1161 sr = sub.commit(cctx._text, user, date)
1166 sr = sub.commit(cctx._text, user, date)
1162 state[s] = (state[s][0], sr)
1167 state[s] = (state[s][0], sr)
1163 subrepo.writestate(self, state)
1168 subrepo.writestate(self, state)
1164
1169
1165 # Save commit message in case this transaction gets rolled back
1170 # Save commit message in case this transaction gets rolled back
1166 # (e.g. by a pretxncommit hook). Leave the content alone on
1171 # (e.g. by a pretxncommit hook). Leave the content alone on
1167 # the assumption that the user will use the same editor again.
1172 # the assumption that the user will use the same editor again.
1168 msgfn = self.savecommitmessage(cctx._text)
1173 msgfn = self.savecommitmessage(cctx._text)
1169
1174
1170 p1, p2 = self.dirstate.parents()
1175 p1, p2 = self.dirstate.parents()
1171 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1176 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1172 try:
1177 try:
1173 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1178 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1174 ret = self.commitctx(cctx, True)
1179 ret = self.commitctx(cctx, True)
1175 except:
1180 except:
1176 if edited:
1181 if edited:
1177 self.ui.write(
1182 self.ui.write(
1178 _('note: commit message saved in %s\n') % msgfn)
1183 _('note: commit message saved in %s\n') % msgfn)
1179 raise
1184 raise
1180
1185
1181 # update bookmarks, dirstate and mergestate
1186 # update bookmarks, dirstate and mergestate
1182 bookmarks.update(self, p1, ret)
1187 bookmarks.update(self, p1, ret)
1183 for f in changes[0] + changes[1]:
1188 for f in changes[0] + changes[1]:
1184 self.dirstate.normal(f)
1189 self.dirstate.normal(f)
1185 for f in changes[2]:
1190 for f in changes[2]:
1186 self.dirstate.drop(f)
1191 self.dirstate.drop(f)
1187 self.dirstate.setparents(ret)
1192 self.dirstate.setparents(ret)
1188 ms.reset()
1193 ms.reset()
1189 finally:
1194 finally:
1190 wlock.release()
1195 wlock.release()
1191
1196
1192 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1197 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1193 return ret
1198 return ret
1194
1199
1195 def commitctx(self, ctx, error=False):
1200 def commitctx(self, ctx, error=False):
1196 """Add a new revision to current repository.
1201 """Add a new revision to current repository.
1197 Revision information is passed via the context argument.
1202 Revision information is passed via the context argument.
1198 """
1203 """
1199
1204
1200 tr = lock = None
1205 tr = lock = None
1201 removed = list(ctx.removed())
1206 removed = list(ctx.removed())
1202 p1, p2 = ctx.p1(), ctx.p2()
1207 p1, p2 = ctx.p1(), ctx.p2()
1203 user = ctx.user()
1208 user = ctx.user()
1204
1209
1205 lock = self.lock()
1210 lock = self.lock()
1206 try:
1211 try:
1207 tr = self.transaction("commit")
1212 tr = self.transaction("commit")
1208 trp = weakref.proxy(tr)
1213 trp = weakref.proxy(tr)
1209
1214
1210 if ctx.files():
1215 if ctx.files():
1211 m1 = p1.manifest().copy()
1216 m1 = p1.manifest().copy()
1212 m2 = p2.manifest()
1217 m2 = p2.manifest()
1213
1218
1214 # check in files
1219 # check in files
1215 new = {}
1220 new = {}
1216 changed = []
1221 changed = []
1217 linkrev = len(self)
1222 linkrev = len(self)
1218 for f in sorted(ctx.modified() + ctx.added()):
1223 for f in sorted(ctx.modified() + ctx.added()):
1219 self.ui.note(f + "\n")
1224 self.ui.note(f + "\n")
1220 try:
1225 try:
1221 fctx = ctx[f]
1226 fctx = ctx[f]
1222 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1227 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1223 changed)
1228 changed)
1224 m1.set(f, fctx.flags())
1229 m1.set(f, fctx.flags())
1225 except OSError, inst:
1230 except OSError, inst:
1226 self.ui.warn(_("trouble committing %s!\n") % f)
1231 self.ui.warn(_("trouble committing %s!\n") % f)
1227 raise
1232 raise
1228 except IOError, inst:
1233 except IOError, inst:
1229 errcode = getattr(inst, 'errno', errno.ENOENT)
1234 errcode = getattr(inst, 'errno', errno.ENOENT)
1230 if error or errcode and errcode != errno.ENOENT:
1235 if error or errcode and errcode != errno.ENOENT:
1231 self.ui.warn(_("trouble committing %s!\n") % f)
1236 self.ui.warn(_("trouble committing %s!\n") % f)
1232 raise
1237 raise
1233 else:
1238 else:
1234 removed.append(f)
1239 removed.append(f)
1235
1240
1236 # update manifest
1241 # update manifest
1237 m1.update(new)
1242 m1.update(new)
1238 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1243 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1239 drop = [f for f in removed if f in m1]
1244 drop = [f for f in removed if f in m1]
1240 for f in drop:
1245 for f in drop:
1241 del m1[f]
1246 del m1[f]
1242 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1247 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1243 p2.manifestnode(), (new, drop))
1248 p2.manifestnode(), (new, drop))
1244 files = changed + removed
1249 files = changed + removed
1245 else:
1250 else:
1246 mn = p1.manifestnode()
1251 mn = p1.manifestnode()
1247 files = []
1252 files = []
1248
1253
1249 # update changelog
1254 # update changelog
1250 self.changelog.delayupdate()
1255 self.changelog.delayupdate()
1251 n = self.changelog.add(mn, files, ctx.description(),
1256 n = self.changelog.add(mn, files, ctx.description(),
1252 trp, p1.node(), p2.node(),
1257 trp, p1.node(), p2.node(),
1253 user, ctx.date(), ctx.extra().copy())
1258 user, ctx.date(), ctx.extra().copy())
1254 p = lambda: self.changelog.writepending() and self.root or ""
1259 p = lambda: self.changelog.writepending() and self.root or ""
1255 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1260 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1256 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1261 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1257 parent2=xp2, pending=p)
1262 parent2=xp2, pending=p)
1258 self.changelog.finalize(trp)
1263 self.changelog.finalize(trp)
1259 # set the new commit is proper phase
1264 # set the new commit is proper phase
1260 targetphase = self.ui.configint('phases', 'new-commit',
1265 targetphase = self.ui.configint('phases', 'new-commit',
1261 phases.draft)
1266 phases.draft)
1262 if targetphase:
1267 if targetphase:
1263 # retract boundary do not alter parent changeset.
1268 # retract boundary do not alter parent changeset.
1264 # if a parent have higher the resulting phase will
1269 # if a parent have higher the resulting phase will
1265 # be compliant anyway
1270 # be compliant anyway
1266 #
1271 #
1267 # if minimal phase was 0 we don't need to retract anything
1272 # if minimal phase was 0 we don't need to retract anything
1268 phases.retractboundary(self, targetphase, [n])
1273 phases.retractboundary(self, targetphase, [n])
1269 tr.close()
1274 tr.close()
1270 self.updatebranchcache()
1275 self.updatebranchcache()
1271 return n
1276 return n
1272 finally:
1277 finally:
1273 if tr:
1278 if tr:
1274 tr.release()
1279 tr.release()
1275 lock.release()
1280 lock.release()
1276
1281
1277 def destroyed(self):
1282 def destroyed(self):
1278 '''Inform the repository that nodes have been destroyed.
1283 '''Inform the repository that nodes have been destroyed.
1279 Intended for use by strip and rollback, so there's a common
1284 Intended for use by strip and rollback, so there's a common
1280 place for anything that has to be done after destroying history.'''
1285 place for anything that has to be done after destroying history.'''
1281 # XXX it might be nice if we could take the list of destroyed
1286 # XXX it might be nice if we could take the list of destroyed
1282 # nodes, but I don't see an easy way for rollback() to do that
1287 # nodes, but I don't see an easy way for rollback() to do that
1283
1288
1284 # Ensure the persistent tag cache is updated. Doing it now
1289 # Ensure the persistent tag cache is updated. Doing it now
1285 # means that the tag cache only has to worry about destroyed
1290 # means that the tag cache only has to worry about destroyed
1286 # heads immediately after a strip/rollback. That in turn
1291 # heads immediately after a strip/rollback. That in turn
1287 # guarantees that "cachetip == currenttip" (comparing both rev
1292 # guarantees that "cachetip == currenttip" (comparing both rev
1288 # and node) always means no nodes have been added or destroyed.
1293 # and node) always means no nodes have been added or destroyed.
1289
1294
1290 # XXX this is suboptimal when qrefresh'ing: we strip the current
1295 # XXX this is suboptimal when qrefresh'ing: we strip the current
1291 # head, refresh the tag cache, then immediately add a new head.
1296 # head, refresh the tag cache, then immediately add a new head.
1292 # But I think doing it this way is necessary for the "instant
1297 # But I think doing it this way is necessary for the "instant
1293 # tag cache retrieval" case to work.
1298 # tag cache retrieval" case to work.
1294 self.invalidatecaches()
1299 self.invalidatecaches()
1295
1300
1296 def walk(self, match, node=None):
1301 def walk(self, match, node=None):
1297 '''
1302 '''
1298 walk recursively through the directory tree or a given
1303 walk recursively through the directory tree or a given
1299 changeset, finding all files matched by the match
1304 changeset, finding all files matched by the match
1300 function
1305 function
1301 '''
1306 '''
1302 return self[node].walk(match)
1307 return self[node].walk(match)
1303
1308
1304 def status(self, node1='.', node2=None, match=None,
1309 def status(self, node1='.', node2=None, match=None,
1305 ignored=False, clean=False, unknown=False,
1310 ignored=False, clean=False, unknown=False,
1306 listsubrepos=False):
1311 listsubrepos=False):
1307 """return status of files between two nodes or node and working directory
1312 """return status of files between two nodes or node and working directory
1308
1313
1309 If node1 is None, use the first dirstate parent instead.
1314 If node1 is None, use the first dirstate parent instead.
1310 If node2 is None, compare node1 with working directory.
1315 If node2 is None, compare node1 with working directory.
1311 """
1316 """
1312
1317
1313 def mfmatches(ctx):
1318 def mfmatches(ctx):
1314 mf = ctx.manifest().copy()
1319 mf = ctx.manifest().copy()
1315 for fn in mf.keys():
1320 for fn in mf.keys():
1316 if not match(fn):
1321 if not match(fn):
1317 del mf[fn]
1322 del mf[fn]
1318 return mf
1323 return mf
1319
1324
1320 if isinstance(node1, context.changectx):
1325 if isinstance(node1, context.changectx):
1321 ctx1 = node1
1326 ctx1 = node1
1322 else:
1327 else:
1323 ctx1 = self[node1]
1328 ctx1 = self[node1]
1324 if isinstance(node2, context.changectx):
1329 if isinstance(node2, context.changectx):
1325 ctx2 = node2
1330 ctx2 = node2
1326 else:
1331 else:
1327 ctx2 = self[node2]
1332 ctx2 = self[node2]
1328
1333
1329 working = ctx2.rev() is None
1334 working = ctx2.rev() is None
1330 parentworking = working and ctx1 == self['.']
1335 parentworking = working and ctx1 == self['.']
1331 match = match or matchmod.always(self.root, self.getcwd())
1336 match = match or matchmod.always(self.root, self.getcwd())
1332 listignored, listclean, listunknown = ignored, clean, unknown
1337 listignored, listclean, listunknown = ignored, clean, unknown
1333
1338
1334 # load earliest manifest first for caching reasons
1339 # load earliest manifest first for caching reasons
1335 if not working and ctx2.rev() < ctx1.rev():
1340 if not working and ctx2.rev() < ctx1.rev():
1336 ctx2.manifest()
1341 ctx2.manifest()
1337
1342
1338 if not parentworking:
1343 if not parentworking:
1339 def bad(f, msg):
1344 def bad(f, msg):
1340 if f not in ctx1:
1345 if f not in ctx1:
1341 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1346 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1342 match.bad = bad
1347 match.bad = bad
1343
1348
1344 if working: # we need to scan the working dir
1349 if working: # we need to scan the working dir
1345 subrepos = []
1350 subrepos = []
1346 if '.hgsub' in self.dirstate:
1351 if '.hgsub' in self.dirstate:
1347 subrepos = ctx2.substate.keys()
1352 subrepos = ctx2.substate.keys()
1348 s = self.dirstate.status(match, subrepos, listignored,
1353 s = self.dirstate.status(match, subrepos, listignored,
1349 listclean, listunknown)
1354 listclean, listunknown)
1350 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1355 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1351
1356
1352 # check for any possibly clean files
1357 # check for any possibly clean files
1353 if parentworking and cmp:
1358 if parentworking and cmp:
1354 fixup = []
1359 fixup = []
1355 # do a full compare of any files that might have changed
1360 # do a full compare of any files that might have changed
1356 for f in sorted(cmp):
1361 for f in sorted(cmp):
1357 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1362 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1358 or ctx1[f].cmp(ctx2[f])):
1363 or ctx1[f].cmp(ctx2[f])):
1359 modified.append(f)
1364 modified.append(f)
1360 else:
1365 else:
1361 fixup.append(f)
1366 fixup.append(f)
1362
1367
1363 # update dirstate for files that are actually clean
1368 # update dirstate for files that are actually clean
1364 if fixup:
1369 if fixup:
1365 if listclean:
1370 if listclean:
1366 clean += fixup
1371 clean += fixup
1367
1372
1368 try:
1373 try:
1369 # updating the dirstate is optional
1374 # updating the dirstate is optional
1370 # so we don't wait on the lock
1375 # so we don't wait on the lock
1371 wlock = self.wlock(False)
1376 wlock = self.wlock(False)
1372 try:
1377 try:
1373 for f in fixup:
1378 for f in fixup:
1374 self.dirstate.normal(f)
1379 self.dirstate.normal(f)
1375 finally:
1380 finally:
1376 wlock.release()
1381 wlock.release()
1377 except error.LockError:
1382 except error.LockError:
1378 pass
1383 pass
1379
1384
1380 if not parentworking:
1385 if not parentworking:
1381 mf1 = mfmatches(ctx1)
1386 mf1 = mfmatches(ctx1)
1382 if working:
1387 if working:
1383 # we are comparing working dir against non-parent
1388 # we are comparing working dir against non-parent
1384 # generate a pseudo-manifest for the working dir
1389 # generate a pseudo-manifest for the working dir
1385 mf2 = mfmatches(self['.'])
1390 mf2 = mfmatches(self['.'])
1386 for f in cmp + modified + added:
1391 for f in cmp + modified + added:
1387 mf2[f] = None
1392 mf2[f] = None
1388 mf2.set(f, ctx2.flags(f))
1393 mf2.set(f, ctx2.flags(f))
1389 for f in removed:
1394 for f in removed:
1390 if f in mf2:
1395 if f in mf2:
1391 del mf2[f]
1396 del mf2[f]
1392 else:
1397 else:
1393 # we are comparing two revisions
1398 # we are comparing two revisions
1394 deleted, unknown, ignored = [], [], []
1399 deleted, unknown, ignored = [], [], []
1395 mf2 = mfmatches(ctx2)
1400 mf2 = mfmatches(ctx2)
1396
1401
1397 modified, added, clean = [], [], []
1402 modified, added, clean = [], [], []
1398 for fn in mf2:
1403 for fn in mf2:
1399 if fn in mf1:
1404 if fn in mf1:
1400 if (fn not in deleted and
1405 if (fn not in deleted and
1401 (mf1.flags(fn) != mf2.flags(fn) or
1406 (mf1.flags(fn) != mf2.flags(fn) or
1402 (mf1[fn] != mf2[fn] and
1407 (mf1[fn] != mf2[fn] and
1403 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1408 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1404 modified.append(fn)
1409 modified.append(fn)
1405 elif listclean:
1410 elif listclean:
1406 clean.append(fn)
1411 clean.append(fn)
1407 del mf1[fn]
1412 del mf1[fn]
1408 elif fn not in deleted:
1413 elif fn not in deleted:
1409 added.append(fn)
1414 added.append(fn)
1410 removed = mf1.keys()
1415 removed = mf1.keys()
1411
1416
1412 if working and modified and not self.dirstate._checklink:
1417 if working and modified and not self.dirstate._checklink:
1413 # Symlink placeholders may get non-symlink-like contents
1418 # Symlink placeholders may get non-symlink-like contents
1414 # via user error or dereferencing by NFS or Samba servers,
1419 # via user error or dereferencing by NFS or Samba servers,
1415 # so we filter out any placeholders that don't look like a
1420 # so we filter out any placeholders that don't look like a
1416 # symlink
1421 # symlink
1417 sane = []
1422 sane = []
1418 for f in modified:
1423 for f in modified:
1419 if ctx2.flags(f) == 'l':
1424 if ctx2.flags(f) == 'l':
1420 d = ctx2[f].data()
1425 d = ctx2[f].data()
1421 if len(d) >= 1024 or '\n' in d or util.binary(d):
1426 if len(d) >= 1024 or '\n' in d or util.binary(d):
1422 self.ui.debug('ignoring suspect symlink placeholder'
1427 self.ui.debug('ignoring suspect symlink placeholder'
1423 ' "%s"\n' % f)
1428 ' "%s"\n' % f)
1424 continue
1429 continue
1425 sane.append(f)
1430 sane.append(f)
1426 modified = sane
1431 modified = sane
1427
1432
1428 r = modified, added, removed, deleted, unknown, ignored, clean
1433 r = modified, added, removed, deleted, unknown, ignored, clean
1429
1434
1430 if listsubrepos:
1435 if listsubrepos:
1431 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1436 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1432 if working:
1437 if working:
1433 rev2 = None
1438 rev2 = None
1434 else:
1439 else:
1435 rev2 = ctx2.substate[subpath][1]
1440 rev2 = ctx2.substate[subpath][1]
1436 try:
1441 try:
1437 submatch = matchmod.narrowmatcher(subpath, match)
1442 submatch = matchmod.narrowmatcher(subpath, match)
1438 s = sub.status(rev2, match=submatch, ignored=listignored,
1443 s = sub.status(rev2, match=submatch, ignored=listignored,
1439 clean=listclean, unknown=listunknown,
1444 clean=listclean, unknown=listunknown,
1440 listsubrepos=True)
1445 listsubrepos=True)
1441 for rfiles, sfiles in zip(r, s):
1446 for rfiles, sfiles in zip(r, s):
1442 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1447 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1443 except error.LookupError:
1448 except error.LookupError:
1444 self.ui.status(_("skipping missing subrepository: %s\n")
1449 self.ui.status(_("skipping missing subrepository: %s\n")
1445 % subpath)
1450 % subpath)
1446
1451
1447 for l in r:
1452 for l in r:
1448 l.sort()
1453 l.sort()
1449 return r
1454 return r
1450
1455
1451 def heads(self, start=None):
1456 def heads(self, start=None):
1452 heads = self.changelog.heads(start)
1457 heads = self.changelog.heads(start)
1453 # sort the output in rev descending order
1458 # sort the output in rev descending order
1454 return sorted(heads, key=self.changelog.rev, reverse=True)
1459 return sorted(heads, key=self.changelog.rev, reverse=True)
1455
1460
1456 def branchheads(self, branch=None, start=None, closed=False):
1461 def branchheads(self, branch=None, start=None, closed=False):
1457 '''return a (possibly filtered) list of heads for the given branch
1462 '''return a (possibly filtered) list of heads for the given branch
1458
1463
1459 Heads are returned in topological order, from newest to oldest.
1464 Heads are returned in topological order, from newest to oldest.
1460 If branch is None, use the dirstate branch.
1465 If branch is None, use the dirstate branch.
1461 If start is not None, return only heads reachable from start.
1466 If start is not None, return only heads reachable from start.
1462 If closed is True, return heads that are marked as closed as well.
1467 If closed is True, return heads that are marked as closed as well.
1463 '''
1468 '''
1464 if branch is None:
1469 if branch is None:
1465 branch = self[None].branch()
1470 branch = self[None].branch()
1466 branches = self.branchmap()
1471 branches = self.branchmap()
1467 if branch not in branches:
1472 if branch not in branches:
1468 return []
1473 return []
1469 # the cache returns heads ordered lowest to highest
1474 # the cache returns heads ordered lowest to highest
1470 bheads = list(reversed(branches[branch]))
1475 bheads = list(reversed(branches[branch]))
1471 if start is not None:
1476 if start is not None:
1472 # filter out the heads that cannot be reached from startrev
1477 # filter out the heads that cannot be reached from startrev
1473 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1478 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1474 bheads = [h for h in bheads if h in fbheads]
1479 bheads = [h for h in bheads if h in fbheads]
1475 if not closed:
1480 if not closed:
1476 bheads = [h for h in bheads if
1481 bheads = [h for h in bheads if
1477 ('close' not in self.changelog.read(h)[5])]
1482 ('close' not in self.changelog.read(h)[5])]
1478 return bheads
1483 return bheads
1479
1484
1480 def branches(self, nodes):
1485 def branches(self, nodes):
1481 if not nodes:
1486 if not nodes:
1482 nodes = [self.changelog.tip()]
1487 nodes = [self.changelog.tip()]
1483 b = []
1488 b = []
1484 for n in nodes:
1489 for n in nodes:
1485 t = n
1490 t = n
1486 while True:
1491 while True:
1487 p = self.changelog.parents(n)
1492 p = self.changelog.parents(n)
1488 if p[1] != nullid or p[0] == nullid:
1493 if p[1] != nullid or p[0] == nullid:
1489 b.append((t, n, p[0], p[1]))
1494 b.append((t, n, p[0], p[1]))
1490 break
1495 break
1491 n = p[0]
1496 n = p[0]
1492 return b
1497 return b
1493
1498
1494 def between(self, pairs):
1499 def between(self, pairs):
1495 r = []
1500 r = []
1496
1501
1497 for top, bottom in pairs:
1502 for top, bottom in pairs:
1498 n, l, i = top, [], 0
1503 n, l, i = top, [], 0
1499 f = 1
1504 f = 1
1500
1505
1501 while n != bottom and n != nullid:
1506 while n != bottom and n != nullid:
1502 p = self.changelog.parents(n)[0]
1507 p = self.changelog.parents(n)[0]
1503 if i == f:
1508 if i == f:
1504 l.append(n)
1509 l.append(n)
1505 f = f * 2
1510 f = f * 2
1506 n = p
1511 n = p
1507 i += 1
1512 i += 1
1508
1513
1509 r.append(l)
1514 r.append(l)
1510
1515
1511 return r
1516 return r
1512
1517
1513 def pull(self, remote, heads=None, force=False):
1518 def pull(self, remote, heads=None, force=False):
1514 lock = self.lock()
1519 lock = self.lock()
1515 try:
1520 try:
1516 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1521 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1517 force=force)
1522 force=force)
1518 common, fetch, rheads = tmp
1523 common, fetch, rheads = tmp
1519 if not fetch:
1524 if not fetch:
1520 self.ui.status(_("no changes found\n"))
1525 self.ui.status(_("no changes found\n"))
1521 added = []
1526 added = []
1522 result = 0
1527 result = 0
1523 else:
1528 else:
1524 if heads is None and list(common) == [nullid]:
1529 if heads is None and list(common) == [nullid]:
1525 self.ui.status(_("requesting all changes\n"))
1530 self.ui.status(_("requesting all changes\n"))
1526 elif heads is None and remote.capable('changegroupsubset'):
1531 elif heads is None and remote.capable('changegroupsubset'):
1527 # issue1320, avoid a race if remote changed after discovery
1532 # issue1320, avoid a race if remote changed after discovery
1528 heads = rheads
1533 heads = rheads
1529
1534
1530 if remote.capable('getbundle'):
1535 if remote.capable('getbundle'):
1531 cg = remote.getbundle('pull', common=common,
1536 cg = remote.getbundle('pull', common=common,
1532 heads=heads or rheads)
1537 heads=heads or rheads)
1533 elif heads is None:
1538 elif heads is None:
1534 cg = remote.changegroup(fetch, 'pull')
1539 cg = remote.changegroup(fetch, 'pull')
1535 elif not remote.capable('changegroupsubset'):
1540 elif not remote.capable('changegroupsubset'):
1536 raise util.Abort(_("partial pull cannot be done because "
1541 raise util.Abort(_("partial pull cannot be done because "
1537 "other repository doesn't support "
1542 "other repository doesn't support "
1538 "changegroupsubset."))
1543 "changegroupsubset."))
1539 else:
1544 else:
1540 cg = remote.changegroupsubset(fetch, heads, 'pull')
1545 cg = remote.changegroupsubset(fetch, heads, 'pull')
1541 clstart = len(self.changelog)
1546 clstart = len(self.changelog)
1542 result = self.addchangegroup(cg, 'pull', remote.url())
1547 result = self.addchangegroup(cg, 'pull', remote.url())
1543 clend = len(self.changelog)
1548 clend = len(self.changelog)
1544 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1549 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1545
1550
1546
1551
1547 # Get remote phases data from remote
1552 # Get remote phases data from remote
1548 remotephases = remote.listkeys('phases')
1553 remotephases = remote.listkeys('phases')
1549 publishing = bool(remotephases.get('publishing', False))
1554 publishing = bool(remotephases.get('publishing', False))
1550 if remotephases and not publishing:
1555 if remotephases and not publishing:
1551 # remote is new and unpublishing
1556 # remote is new and unpublishing
1552 subset = common + added
1557 subset = common + added
1553 rheads, rroots = phases.analyzeremotephases(self, subset,
1558 rheads, rroots = phases.analyzeremotephases(self, subset,
1554 remotephases)
1559 remotephases)
1555 for phase, boundary in enumerate(rheads):
1560 for phase, boundary in enumerate(rheads):
1556 phases.advanceboundary(self, phase, boundary)
1561 phases.advanceboundary(self, phase, boundary)
1557 else:
1562 else:
1558 # Remote is old or publishing all common changesets
1563 # Remote is old or publishing all common changesets
1559 # should be seen as public
1564 # should be seen as public
1560 phases.advanceboundary(self, phases.public, common + added)
1565 phases.advanceboundary(self, phases.public, common + added)
1561 finally:
1566 finally:
1562 lock.release()
1567 lock.release()
1563
1568
1564 return result
1569 return result
1565
1570
1566 def checkpush(self, force, revs):
1571 def checkpush(self, force, revs):
1567 """Extensions can override this function if additional checks have
1572 """Extensions can override this function if additional checks have
1568 to be performed before pushing, or call it if they override push
1573 to be performed before pushing, or call it if they override push
1569 command.
1574 command.
1570 """
1575 """
1571 pass
1576 pass
1572
1577
1573 def push(self, remote, force=False, revs=None, newbranch=False):
1578 def push(self, remote, force=False, revs=None, newbranch=False):
1574 '''Push outgoing changesets (limited by revs) from the current
1579 '''Push outgoing changesets (limited by revs) from the current
1575 repository to remote. Return an integer:
1580 repository to remote. Return an integer:
1576 - 0 means HTTP error *or* nothing to push
1581 - 0 means HTTP error *or* nothing to push
1577 - 1 means we pushed and remote head count is unchanged *or*
1582 - 1 means we pushed and remote head count is unchanged *or*
1578 we have outgoing changesets but refused to push
1583 we have outgoing changesets but refused to push
1579 - other values as described by addchangegroup()
1584 - other values as described by addchangegroup()
1580 '''
1585 '''
1581 # there are two ways to push to remote repo:
1586 # there are two ways to push to remote repo:
1582 #
1587 #
1583 # addchangegroup assumes local user can lock remote
1588 # addchangegroup assumes local user can lock remote
1584 # repo (local filesystem, old ssh servers).
1589 # repo (local filesystem, old ssh servers).
1585 #
1590 #
1586 # unbundle assumes local user cannot lock remote repo (new ssh
1591 # unbundle assumes local user cannot lock remote repo (new ssh
1587 # servers, http servers).
1592 # servers, http servers).
1588
1593
1589 self.checkpush(force, revs)
1594 self.checkpush(force, revs)
1590 lock = None
1595 lock = None
1591 unbundle = remote.capable('unbundle')
1596 unbundle = remote.capable('unbundle')
1592 if not unbundle:
1597 if not unbundle:
1593 lock = remote.lock()
1598 lock = remote.lock()
1594 try:
1599 try:
1595 # get local lock as we might write phase data
1600 # get local lock as we might write phase data
1596 locallock = self.lock()
1601 locallock = self.lock()
1597 try:
1602 try:
1598 cg, remote_heads, fut = discovery.prepush(self, remote, force,
1603 cg, remote_heads, fut = discovery.prepush(self, remote, force,
1599 revs, newbranch)
1604 revs, newbranch)
1600 ret = remote_heads
1605 ret = remote_heads
1601 # create a callback for addchangegroup.
1606 # create a callback for addchangegroup.
1602 # If will be used branch of the conditionnal too.
1607 # If will be used branch of the conditionnal too.
1603 if cg is not None:
1608 if cg is not None:
1604 if unbundle:
1609 if unbundle:
1605 # local repo finds heads on server, finds out what
1610 # local repo finds heads on server, finds out what
1606 # revs it must push. once revs transferred, if server
1611 # revs it must push. once revs transferred, if server
1607 # finds it has different heads (someone else won
1612 # finds it has different heads (someone else won
1608 # commit/push race), server aborts.
1613 # commit/push race), server aborts.
1609 if force:
1614 if force:
1610 remote_heads = ['force']
1615 remote_heads = ['force']
1611 # ssh: return remote's addchangegroup()
1616 # ssh: return remote's addchangegroup()
1612 # http: return remote's addchangegroup() or 0 for error
1617 # http: return remote's addchangegroup() or 0 for error
1613 ret = remote.unbundle(cg, remote_heads, 'push')
1618 ret = remote.unbundle(cg, remote_heads, 'push')
1614 else:
1619 else:
1615 # we return an integer indicating remote head count change
1620 # we return an integer indicating remote head count change
1616 ret = remote.addchangegroup(cg, 'push', self.url())
1621 ret = remote.addchangegroup(cg, 'push', self.url())
1617
1622
1618 # even when we don't push, exchanging phase data is useful
1623 # even when we don't push, exchanging phase data is useful
1619 remotephases = remote.listkeys('phases')
1624 remotephases = remote.listkeys('phases')
1620 if not remotephases: # old server or public only repo
1625 if not remotephases: # old server or public only repo
1621 phases.advanceboundary(self, phases.public, fut)
1626 phases.advanceboundary(self, phases.public, fut)
1622 # don't push any phase data as there is nothing to push
1627 # don't push any phase data as there is nothing to push
1623 else:
1628 else:
1624 ana = phases.analyzeremotephases(self, fut, remotephases)
1629 ana = phases.analyzeremotephases(self, fut, remotephases)
1625 rheads, rroots = ana
1630 rheads, rroots = ana
1626 ### Apply remote phase on local
1631 ### Apply remote phase on local
1627 if remotephases.get('publishing', False):
1632 if remotephases.get('publishing', False):
1628 phases.advanceboundary(self, phases.public, fut)
1633 phases.advanceboundary(self, phases.public, fut)
1629 else: # publish = False
1634 else: # publish = False
1630 for phase, rpheads in enumerate(rheads):
1635 for phase, rpheads in enumerate(rheads):
1631 phases.advanceboundary(self, phase, rpheads)
1636 phases.advanceboundary(self, phase, rpheads)
1632 ### Apply local phase on remote
1637 ### Apply local phase on remote
1633 #
1638 #
1634 # XXX If push failed we should use strict common and not
1639 # XXX If push failed we should use strict common and not
1635 # future to avoir pushing phase data on unknown changeset.
1640 # future to avoir pushing phase data on unknown changeset.
1636 # This is to done later.
1641 # This is to done later.
1637
1642
1638 # element we want to push
1643 # element we want to push
1639 topush = []
1644 topush = []
1640
1645
1641 # store details of known remote phase of several revision
1646 # store details of known remote phase of several revision
1642 # /!\ set of index I holds rev where: I <= rev.phase()
1647 # /!\ set of index I holds rev where: I <= rev.phase()
1643 # /!\ public phase (index 0) is ignored
1648 # /!\ public phase (index 0) is ignored
1644 remdetails = [set() for i in xrange(len(phases.allphases))]
1649 remdetails = [set() for i in xrange(len(phases.allphases))]
1645 _revs = set()
1650 _revs = set()
1646 for relremphase in phases.trackedphases[::-1]:
1651 for relremphase in phases.trackedphases[::-1]:
1647 # we iterate backward because the list alway grows
1652 # we iterate backward because the list alway grows
1648 # when filled in this direction.
1653 # when filled in this direction.
1649 _revs.update(self.revs('%ln::%ln',
1654 _revs.update(self.revs('%ln::%ln',
1650 rroots[relremphase], fut))
1655 rroots[relremphase], fut))
1651 remdetails[relremphase].update(_revs)
1656 remdetails[relremphase].update(_revs)
1652
1657
1653 for phase in phases.allphases[:-1]:
1658 for phase in phases.allphases[:-1]:
1654 # We don't need the last phase as we will never want to
1659 # We don't need the last phase as we will never want to
1655 # move anything to it while moving phase backward.
1660 # move anything to it while moving phase backward.
1656
1661
1657 # Get the list of all revs on remote which are in a
1662 # Get the list of all revs on remote which are in a
1658 # phase higher than currently processed phase.
1663 # phase higher than currently processed phase.
1659 relremrev = remdetails[phase + 1]
1664 relremrev = remdetails[phase + 1]
1660
1665
1661 if not relremrev:
1666 if not relremrev:
1662 # no candidate to remote push anymore
1667 # no candidate to remote push anymore
1663 # break before any expensive revset
1668 # break before any expensive revset
1664 break
1669 break
1665
1670
1666 #dynamical inject appropriate phase symbol
1671 #dynamical inject appropriate phase symbol
1667 phasename = phases.phasenames[phase]
1672 phasename = phases.phasenames[phase]
1668 odrevset = 'heads(%%ld and %s())' % phasename
1673 odrevset = 'heads(%%ld and %s())' % phasename
1669 outdated = self.set(odrevset, relremrev)
1674 outdated = self.set(odrevset, relremrev)
1670 for od in outdated:
1675 for od in outdated:
1671 candstart = len(remdetails) - 1
1676 candstart = len(remdetails) - 1
1672 candstop = phase + 1
1677 candstop = phase + 1
1673 candidateold = xrange(candstart, candstop, -1)
1678 candidateold = xrange(candstart, candstop, -1)
1674 for oldphase in candidateold:
1679 for oldphase in candidateold:
1675 if od.rev() in remdetails[oldphase]:
1680 if od.rev() in remdetails[oldphase]:
1676 break
1681 break
1677 else: # last one: no need to search
1682 else: # last one: no need to search
1678 oldphase = phase + 1
1683 oldphase = phase + 1
1679 topush.append((oldphase, phase, od))
1684 topush.append((oldphase, phase, od))
1680
1685
1681 # push every needed data
1686 # push every needed data
1682 for oldphase, newphase, newremotehead in topush:
1687 for oldphase, newphase, newremotehead in topush:
1683 r = remote.pushkey('phases',
1688 r = remote.pushkey('phases',
1684 newremotehead.hex(),
1689 newremotehead.hex(),
1685 str(oldphase), str(newphase))
1690 str(oldphase), str(newphase))
1686 if not r:
1691 if not r:
1687 self.ui.warn(_('updating phase of %s '
1692 self.ui.warn(_('updating phase of %s '
1688 'to %s from %s failed!\n')
1693 'to %s from %s failed!\n')
1689 % (newremotehead, newphase,
1694 % (newremotehead, newphase,
1690 oldphase))
1695 oldphase))
1691 finally:
1696 finally:
1692 locallock.release()
1697 locallock.release()
1693 finally:
1698 finally:
1694 if lock is not None:
1699 if lock is not None:
1695 lock.release()
1700 lock.release()
1696
1701
1697 self.ui.debug("checking for updated bookmarks\n")
1702 self.ui.debug("checking for updated bookmarks\n")
1698 rb = remote.listkeys('bookmarks')
1703 rb = remote.listkeys('bookmarks')
1699 for k in rb.keys():
1704 for k in rb.keys():
1700 if k in self._bookmarks:
1705 if k in self._bookmarks:
1701 nr, nl = rb[k], hex(self._bookmarks[k])
1706 nr, nl = rb[k], hex(self._bookmarks[k])
1702 if nr in self:
1707 if nr in self:
1703 cr = self[nr]
1708 cr = self[nr]
1704 cl = self[nl]
1709 cl = self[nl]
1705 if cl in cr.descendants():
1710 if cl in cr.descendants():
1706 r = remote.pushkey('bookmarks', k, nr, nl)
1711 r = remote.pushkey('bookmarks', k, nr, nl)
1707 if r:
1712 if r:
1708 self.ui.status(_("updating bookmark %s\n") % k)
1713 self.ui.status(_("updating bookmark %s\n") % k)
1709 else:
1714 else:
1710 self.ui.warn(_('updating bookmark %s'
1715 self.ui.warn(_('updating bookmark %s'
1711 ' failed!\n') % k)
1716 ' failed!\n') % k)
1712
1717
1713 return ret
1718 return ret
1714
1719
1715 def changegroupinfo(self, nodes, source):
1720 def changegroupinfo(self, nodes, source):
1716 if self.ui.verbose or source == 'bundle':
1721 if self.ui.verbose or source == 'bundle':
1717 self.ui.status(_("%d changesets found\n") % len(nodes))
1722 self.ui.status(_("%d changesets found\n") % len(nodes))
1718 if self.ui.debugflag:
1723 if self.ui.debugflag:
1719 self.ui.debug("list of changesets:\n")
1724 self.ui.debug("list of changesets:\n")
1720 for node in nodes:
1725 for node in nodes:
1721 self.ui.debug("%s\n" % hex(node))
1726 self.ui.debug("%s\n" % hex(node))
1722
1727
1723 def changegroupsubset(self, bases, heads, source):
1728 def changegroupsubset(self, bases, heads, source):
1724 """Compute a changegroup consisting of all the nodes that are
1729 """Compute a changegroup consisting of all the nodes that are
1725 descendants of any of the bases and ancestors of any of the heads.
1730 descendants of any of the bases and ancestors of any of the heads.
1726 Return a chunkbuffer object whose read() method will return
1731 Return a chunkbuffer object whose read() method will return
1727 successive changegroup chunks.
1732 successive changegroup chunks.
1728
1733
1729 It is fairly complex as determining which filenodes and which
1734 It is fairly complex as determining which filenodes and which
1730 manifest nodes need to be included for the changeset to be complete
1735 manifest nodes need to be included for the changeset to be complete
1731 is non-trivial.
1736 is non-trivial.
1732
1737
1733 Another wrinkle is doing the reverse, figuring out which changeset in
1738 Another wrinkle is doing the reverse, figuring out which changeset in
1734 the changegroup a particular filenode or manifestnode belongs to.
1739 the changegroup a particular filenode or manifestnode belongs to.
1735 """
1740 """
1736 cl = self.changelog
1741 cl = self.changelog
1737 if not bases:
1742 if not bases:
1738 bases = [nullid]
1743 bases = [nullid]
1739 csets, bases, heads = cl.nodesbetween(bases, heads)
1744 csets, bases, heads = cl.nodesbetween(bases, heads)
1740 # We assume that all ancestors of bases are known
1745 # We assume that all ancestors of bases are known
1741 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1746 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1742 return self._changegroupsubset(common, csets, heads, source)
1747 return self._changegroupsubset(common, csets, heads, source)
1743
1748
1744 def getlocalbundle(self, source, outgoing):
1749 def getlocalbundle(self, source, outgoing):
1745 """Like getbundle, but taking a discovery.outgoing as an argument.
1750 """Like getbundle, but taking a discovery.outgoing as an argument.
1746
1751
1747 This is only implemented for local repos and reuses potentially
1752 This is only implemented for local repos and reuses potentially
1748 precomputed sets in outgoing."""
1753 precomputed sets in outgoing."""
1749 if not outgoing.missing:
1754 if not outgoing.missing:
1750 return None
1755 return None
1751 return self._changegroupsubset(outgoing.common,
1756 return self._changegroupsubset(outgoing.common,
1752 outgoing.missing,
1757 outgoing.missing,
1753 outgoing.missingheads,
1758 outgoing.missingheads,
1754 source)
1759 source)
1755
1760
1756 def getbundle(self, source, heads=None, common=None):
1761 def getbundle(self, source, heads=None, common=None):
1757 """Like changegroupsubset, but returns the set difference between the
1762 """Like changegroupsubset, but returns the set difference between the
1758 ancestors of heads and the ancestors common.
1763 ancestors of heads and the ancestors common.
1759
1764
1760 If heads is None, use the local heads. If common is None, use [nullid].
1765 If heads is None, use the local heads. If common is None, use [nullid].
1761
1766
1762 The nodes in common might not all be known locally due to the way the
1767 The nodes in common might not all be known locally due to the way the
1763 current discovery protocol works.
1768 current discovery protocol works.
1764 """
1769 """
1765 cl = self.changelog
1770 cl = self.changelog
1766 if common:
1771 if common:
1767 nm = cl.nodemap
1772 nm = cl.nodemap
1768 common = [n for n in common if n in nm]
1773 common = [n for n in common if n in nm]
1769 else:
1774 else:
1770 common = [nullid]
1775 common = [nullid]
1771 if not heads:
1776 if not heads:
1772 heads = cl.heads()
1777 heads = cl.heads()
1773 return self.getlocalbundle(source,
1778 return self.getlocalbundle(source,
1774 discovery.outgoing(cl, common, heads))
1779 discovery.outgoing(cl, common, heads))
1775
1780
1776 def _changegroupsubset(self, commonrevs, csets, heads, source):
1781 def _changegroupsubset(self, commonrevs, csets, heads, source):
1777
1782
1778 cl = self.changelog
1783 cl = self.changelog
1779 mf = self.manifest
1784 mf = self.manifest
1780 mfs = {} # needed manifests
1785 mfs = {} # needed manifests
1781 fnodes = {} # needed file nodes
1786 fnodes = {} # needed file nodes
1782 changedfiles = set()
1787 changedfiles = set()
1783 fstate = ['', {}]
1788 fstate = ['', {}]
1784 count = [0]
1789 count = [0]
1785
1790
1786 # can we go through the fast path ?
1791 # can we go through the fast path ?
1787 heads.sort()
1792 heads.sort()
1788 if heads == sorted(self.heads()):
1793 if heads == sorted(self.heads()):
1789 return self._changegroup(csets, source)
1794 return self._changegroup(csets, source)
1790
1795
1791 # slow path
1796 # slow path
1792 self.hook('preoutgoing', throw=True, source=source)
1797 self.hook('preoutgoing', throw=True, source=source)
1793 self.changegroupinfo(csets, source)
1798 self.changegroupinfo(csets, source)
1794
1799
1795 # filter any nodes that claim to be part of the known set
1800 # filter any nodes that claim to be part of the known set
1796 def prune(revlog, missing):
1801 def prune(revlog, missing):
1797 return [n for n in missing
1802 return [n for n in missing
1798 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1803 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1799
1804
1800 def lookup(revlog, x):
1805 def lookup(revlog, x):
1801 if revlog == cl:
1806 if revlog == cl:
1802 c = cl.read(x)
1807 c = cl.read(x)
1803 changedfiles.update(c[3])
1808 changedfiles.update(c[3])
1804 mfs.setdefault(c[0], x)
1809 mfs.setdefault(c[0], x)
1805 count[0] += 1
1810 count[0] += 1
1806 self.ui.progress(_('bundling'), count[0],
1811 self.ui.progress(_('bundling'), count[0],
1807 unit=_('changesets'), total=len(csets))
1812 unit=_('changesets'), total=len(csets))
1808 return x
1813 return x
1809 elif revlog == mf:
1814 elif revlog == mf:
1810 clnode = mfs[x]
1815 clnode = mfs[x]
1811 mdata = mf.readfast(x)
1816 mdata = mf.readfast(x)
1812 for f in changedfiles:
1817 for f in changedfiles:
1813 if f in mdata:
1818 if f in mdata:
1814 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1819 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1815 count[0] += 1
1820 count[0] += 1
1816 self.ui.progress(_('bundling'), count[0],
1821 self.ui.progress(_('bundling'), count[0],
1817 unit=_('manifests'), total=len(mfs))
1822 unit=_('manifests'), total=len(mfs))
1818 return mfs[x]
1823 return mfs[x]
1819 else:
1824 else:
1820 self.ui.progress(
1825 self.ui.progress(
1821 _('bundling'), count[0], item=fstate[0],
1826 _('bundling'), count[0], item=fstate[0],
1822 unit=_('files'), total=len(changedfiles))
1827 unit=_('files'), total=len(changedfiles))
1823 return fstate[1][x]
1828 return fstate[1][x]
1824
1829
1825 bundler = changegroup.bundle10(lookup)
1830 bundler = changegroup.bundle10(lookup)
1826 reorder = self.ui.config('bundle', 'reorder', 'auto')
1831 reorder = self.ui.config('bundle', 'reorder', 'auto')
1827 if reorder == 'auto':
1832 if reorder == 'auto':
1828 reorder = None
1833 reorder = None
1829 else:
1834 else:
1830 reorder = util.parsebool(reorder)
1835 reorder = util.parsebool(reorder)
1831
1836
1832 def gengroup():
1837 def gengroup():
1833 # Create a changenode group generator that will call our functions
1838 # Create a changenode group generator that will call our functions
1834 # back to lookup the owning changenode and collect information.
1839 # back to lookup the owning changenode and collect information.
1835 for chunk in cl.group(csets, bundler, reorder=reorder):
1840 for chunk in cl.group(csets, bundler, reorder=reorder):
1836 yield chunk
1841 yield chunk
1837 self.ui.progress(_('bundling'), None)
1842 self.ui.progress(_('bundling'), None)
1838
1843
1839 # Create a generator for the manifestnodes that calls our lookup
1844 # Create a generator for the manifestnodes that calls our lookup
1840 # and data collection functions back.
1845 # and data collection functions back.
1841 count[0] = 0
1846 count[0] = 0
1842 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1847 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1843 yield chunk
1848 yield chunk
1844 self.ui.progress(_('bundling'), None)
1849 self.ui.progress(_('bundling'), None)
1845
1850
1846 mfs.clear()
1851 mfs.clear()
1847
1852
1848 # Go through all our files in order sorted by name.
1853 # Go through all our files in order sorted by name.
1849 count[0] = 0
1854 count[0] = 0
1850 for fname in sorted(changedfiles):
1855 for fname in sorted(changedfiles):
1851 filerevlog = self.file(fname)
1856 filerevlog = self.file(fname)
1852 if not len(filerevlog):
1857 if not len(filerevlog):
1853 raise util.Abort(_("empty or missing revlog for %s") % fname)
1858 raise util.Abort(_("empty or missing revlog for %s") % fname)
1854 fstate[0] = fname
1859 fstate[0] = fname
1855 fstate[1] = fnodes.pop(fname, {})
1860 fstate[1] = fnodes.pop(fname, {})
1856
1861
1857 nodelist = prune(filerevlog, fstate[1])
1862 nodelist = prune(filerevlog, fstate[1])
1858 if nodelist:
1863 if nodelist:
1859 count[0] += 1
1864 count[0] += 1
1860 yield bundler.fileheader(fname)
1865 yield bundler.fileheader(fname)
1861 for chunk in filerevlog.group(nodelist, bundler, reorder):
1866 for chunk in filerevlog.group(nodelist, bundler, reorder):
1862 yield chunk
1867 yield chunk
1863
1868
1864 # Signal that no more groups are left.
1869 # Signal that no more groups are left.
1865 yield bundler.close()
1870 yield bundler.close()
1866 self.ui.progress(_('bundling'), None)
1871 self.ui.progress(_('bundling'), None)
1867
1872
1868 if csets:
1873 if csets:
1869 self.hook('outgoing', node=hex(csets[0]), source=source)
1874 self.hook('outgoing', node=hex(csets[0]), source=source)
1870
1875
1871 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1876 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1872
1877
1873 def changegroup(self, basenodes, source):
1878 def changegroup(self, basenodes, source):
1874 # to avoid a race we use changegroupsubset() (issue1320)
1879 # to avoid a race we use changegroupsubset() (issue1320)
1875 return self.changegroupsubset(basenodes, self.heads(), source)
1880 return self.changegroupsubset(basenodes, self.heads(), source)
1876
1881
1877 def _changegroup(self, nodes, source):
1882 def _changegroup(self, nodes, source):
1878 """Compute the changegroup of all nodes that we have that a recipient
1883 """Compute the changegroup of all nodes that we have that a recipient
1879 doesn't. Return a chunkbuffer object whose read() method will return
1884 doesn't. Return a chunkbuffer object whose read() method will return
1880 successive changegroup chunks.
1885 successive changegroup chunks.
1881
1886
1882 This is much easier than the previous function as we can assume that
1887 This is much easier than the previous function as we can assume that
1883 the recipient has any changenode we aren't sending them.
1888 the recipient has any changenode we aren't sending them.
1884
1889
1885 nodes is the set of nodes to send"""
1890 nodes is the set of nodes to send"""
1886
1891
1887 cl = self.changelog
1892 cl = self.changelog
1888 mf = self.manifest
1893 mf = self.manifest
1889 mfs = {}
1894 mfs = {}
1890 changedfiles = set()
1895 changedfiles = set()
1891 fstate = ['']
1896 fstate = ['']
1892 count = [0]
1897 count = [0]
1893
1898
1894 self.hook('preoutgoing', throw=True, source=source)
1899 self.hook('preoutgoing', throw=True, source=source)
1895 self.changegroupinfo(nodes, source)
1900 self.changegroupinfo(nodes, source)
1896
1901
1897 revset = set([cl.rev(n) for n in nodes])
1902 revset = set([cl.rev(n) for n in nodes])
1898
1903
1899 def gennodelst(log):
1904 def gennodelst(log):
1900 return [log.node(r) for r in log if log.linkrev(r) in revset]
1905 return [log.node(r) for r in log if log.linkrev(r) in revset]
1901
1906
1902 def lookup(revlog, x):
1907 def lookup(revlog, x):
1903 if revlog == cl:
1908 if revlog == cl:
1904 c = cl.read(x)
1909 c = cl.read(x)
1905 changedfiles.update(c[3])
1910 changedfiles.update(c[3])
1906 mfs.setdefault(c[0], x)
1911 mfs.setdefault(c[0], x)
1907 count[0] += 1
1912 count[0] += 1
1908 self.ui.progress(_('bundling'), count[0],
1913 self.ui.progress(_('bundling'), count[0],
1909 unit=_('changesets'), total=len(nodes))
1914 unit=_('changesets'), total=len(nodes))
1910 return x
1915 return x
1911 elif revlog == mf:
1916 elif revlog == mf:
1912 count[0] += 1
1917 count[0] += 1
1913 self.ui.progress(_('bundling'), count[0],
1918 self.ui.progress(_('bundling'), count[0],
1914 unit=_('manifests'), total=len(mfs))
1919 unit=_('manifests'), total=len(mfs))
1915 return cl.node(revlog.linkrev(revlog.rev(x)))
1920 return cl.node(revlog.linkrev(revlog.rev(x)))
1916 else:
1921 else:
1917 self.ui.progress(
1922 self.ui.progress(
1918 _('bundling'), count[0], item=fstate[0],
1923 _('bundling'), count[0], item=fstate[0],
1919 total=len(changedfiles), unit=_('files'))
1924 total=len(changedfiles), unit=_('files'))
1920 return cl.node(revlog.linkrev(revlog.rev(x)))
1925 return cl.node(revlog.linkrev(revlog.rev(x)))
1921
1926
1922 bundler = changegroup.bundle10(lookup)
1927 bundler = changegroup.bundle10(lookup)
1923 reorder = self.ui.config('bundle', 'reorder', 'auto')
1928 reorder = self.ui.config('bundle', 'reorder', 'auto')
1924 if reorder == 'auto':
1929 if reorder == 'auto':
1925 reorder = None
1930 reorder = None
1926 else:
1931 else:
1927 reorder = util.parsebool(reorder)
1932 reorder = util.parsebool(reorder)
1928
1933
1929 def gengroup():
1934 def gengroup():
1930 '''yield a sequence of changegroup chunks (strings)'''
1935 '''yield a sequence of changegroup chunks (strings)'''
1931 # construct a list of all changed files
1936 # construct a list of all changed files
1932
1937
1933 for chunk in cl.group(nodes, bundler, reorder=reorder):
1938 for chunk in cl.group(nodes, bundler, reorder=reorder):
1934 yield chunk
1939 yield chunk
1935 self.ui.progress(_('bundling'), None)
1940 self.ui.progress(_('bundling'), None)
1936
1941
1937 count[0] = 0
1942 count[0] = 0
1938 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1943 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1939 yield chunk
1944 yield chunk
1940 self.ui.progress(_('bundling'), None)
1945 self.ui.progress(_('bundling'), None)
1941
1946
1942 count[0] = 0
1947 count[0] = 0
1943 for fname in sorted(changedfiles):
1948 for fname in sorted(changedfiles):
1944 filerevlog = self.file(fname)
1949 filerevlog = self.file(fname)
1945 if not len(filerevlog):
1950 if not len(filerevlog):
1946 raise util.Abort(_("empty or missing revlog for %s") % fname)
1951 raise util.Abort(_("empty or missing revlog for %s") % fname)
1947 fstate[0] = fname
1952 fstate[0] = fname
1948 nodelist = gennodelst(filerevlog)
1953 nodelist = gennodelst(filerevlog)
1949 if nodelist:
1954 if nodelist:
1950 count[0] += 1
1955 count[0] += 1
1951 yield bundler.fileheader(fname)
1956 yield bundler.fileheader(fname)
1952 for chunk in filerevlog.group(nodelist, bundler, reorder):
1957 for chunk in filerevlog.group(nodelist, bundler, reorder):
1953 yield chunk
1958 yield chunk
1954 yield bundler.close()
1959 yield bundler.close()
1955 self.ui.progress(_('bundling'), None)
1960 self.ui.progress(_('bundling'), None)
1956
1961
1957 if nodes:
1962 if nodes:
1958 self.hook('outgoing', node=hex(nodes[0]), source=source)
1963 self.hook('outgoing', node=hex(nodes[0]), source=source)
1959
1964
1960 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1965 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1961
1966
1962 def addchangegroup(self, source, srctype, url, emptyok=False):
1967 def addchangegroup(self, source, srctype, url, emptyok=False):
1963 """Add the changegroup returned by source.read() to this repo.
1968 """Add the changegroup returned by source.read() to this repo.
1964 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1969 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1965 the URL of the repo where this changegroup is coming from.
1970 the URL of the repo where this changegroup is coming from.
1966
1971
1967 Return an integer summarizing the change to this repo:
1972 Return an integer summarizing the change to this repo:
1968 - nothing changed or no source: 0
1973 - nothing changed or no source: 0
1969 - more heads than before: 1+added heads (2..n)
1974 - more heads than before: 1+added heads (2..n)
1970 - fewer heads than before: -1-removed heads (-2..-n)
1975 - fewer heads than before: -1-removed heads (-2..-n)
1971 - number of heads stays the same: 1
1976 - number of heads stays the same: 1
1972 """
1977 """
1973 def csmap(x):
1978 def csmap(x):
1974 self.ui.debug("add changeset %s\n" % short(x))
1979 self.ui.debug("add changeset %s\n" % short(x))
1975 return len(cl)
1980 return len(cl)
1976
1981
1977 def revmap(x):
1982 def revmap(x):
1978 return cl.rev(x)
1983 return cl.rev(x)
1979
1984
1980 if not source:
1985 if not source:
1981 return 0
1986 return 0
1982
1987
1983 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1988 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1984
1989
1985 changesets = files = revisions = 0
1990 changesets = files = revisions = 0
1986 efiles = set()
1991 efiles = set()
1987
1992
1988 # write changelog data to temp files so concurrent readers will not see
1993 # write changelog data to temp files so concurrent readers will not see
1989 # inconsistent view
1994 # inconsistent view
1990 cl = self.changelog
1995 cl = self.changelog
1991 cl.delayupdate()
1996 cl.delayupdate()
1992 oldheads = cl.heads()
1997 oldheads = cl.heads()
1993
1998
1994 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1999 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1995 try:
2000 try:
1996 trp = weakref.proxy(tr)
2001 trp = weakref.proxy(tr)
1997 # pull off the changeset group
2002 # pull off the changeset group
1998 self.ui.status(_("adding changesets\n"))
2003 self.ui.status(_("adding changesets\n"))
1999 clstart = len(cl)
2004 clstart = len(cl)
2000 class prog(object):
2005 class prog(object):
2001 step = _('changesets')
2006 step = _('changesets')
2002 count = 1
2007 count = 1
2003 ui = self.ui
2008 ui = self.ui
2004 total = None
2009 total = None
2005 def __call__(self):
2010 def __call__(self):
2006 self.ui.progress(self.step, self.count, unit=_('chunks'),
2011 self.ui.progress(self.step, self.count, unit=_('chunks'),
2007 total=self.total)
2012 total=self.total)
2008 self.count += 1
2013 self.count += 1
2009 pr = prog()
2014 pr = prog()
2010 source.callback = pr
2015 source.callback = pr
2011
2016
2012 source.changelogheader()
2017 source.changelogheader()
2013 if (cl.addgroup(source, csmap, trp) is None
2018 if (cl.addgroup(source, csmap, trp) is None
2014 and not emptyok):
2019 and not emptyok):
2015 raise util.Abort(_("received changelog group is empty"))
2020 raise util.Abort(_("received changelog group is empty"))
2016 clend = len(cl)
2021 clend = len(cl)
2017 changesets = clend - clstart
2022 changesets = clend - clstart
2018 for c in xrange(clstart, clend):
2023 for c in xrange(clstart, clend):
2019 efiles.update(self[c].files())
2024 efiles.update(self[c].files())
2020 efiles = len(efiles)
2025 efiles = len(efiles)
2021 self.ui.progress(_('changesets'), None)
2026 self.ui.progress(_('changesets'), None)
2022
2027
2023 # pull off the manifest group
2028 # pull off the manifest group
2024 self.ui.status(_("adding manifests\n"))
2029 self.ui.status(_("adding manifests\n"))
2025 pr.step = _('manifests')
2030 pr.step = _('manifests')
2026 pr.count = 1
2031 pr.count = 1
2027 pr.total = changesets # manifests <= changesets
2032 pr.total = changesets # manifests <= changesets
2028 # no need to check for empty manifest group here:
2033 # no need to check for empty manifest group here:
2029 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2034 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2030 # no new manifest will be created and the manifest group will
2035 # no new manifest will be created and the manifest group will
2031 # be empty during the pull
2036 # be empty during the pull
2032 source.manifestheader()
2037 source.manifestheader()
2033 self.manifest.addgroup(source, revmap, trp)
2038 self.manifest.addgroup(source, revmap, trp)
2034 self.ui.progress(_('manifests'), None)
2039 self.ui.progress(_('manifests'), None)
2035
2040
2036 needfiles = {}
2041 needfiles = {}
2037 if self.ui.configbool('server', 'validate', default=False):
2042 if self.ui.configbool('server', 'validate', default=False):
2038 # validate incoming csets have their manifests
2043 # validate incoming csets have their manifests
2039 for cset in xrange(clstart, clend):
2044 for cset in xrange(clstart, clend):
2040 mfest = self.changelog.read(self.changelog.node(cset))[0]
2045 mfest = self.changelog.read(self.changelog.node(cset))[0]
2041 mfest = self.manifest.readdelta(mfest)
2046 mfest = self.manifest.readdelta(mfest)
2042 # store file nodes we must see
2047 # store file nodes we must see
2043 for f, n in mfest.iteritems():
2048 for f, n in mfest.iteritems():
2044 needfiles.setdefault(f, set()).add(n)
2049 needfiles.setdefault(f, set()).add(n)
2045
2050
2046 # process the files
2051 # process the files
2047 self.ui.status(_("adding file changes\n"))
2052 self.ui.status(_("adding file changes\n"))
2048 pr.step = _('files')
2053 pr.step = _('files')
2049 pr.count = 1
2054 pr.count = 1
2050 pr.total = efiles
2055 pr.total = efiles
2051 source.callback = None
2056 source.callback = None
2052
2057
2053 while True:
2058 while True:
2054 chunkdata = source.filelogheader()
2059 chunkdata = source.filelogheader()
2055 if not chunkdata:
2060 if not chunkdata:
2056 break
2061 break
2057 f = chunkdata["filename"]
2062 f = chunkdata["filename"]
2058 self.ui.debug("adding %s revisions\n" % f)
2063 self.ui.debug("adding %s revisions\n" % f)
2059 pr()
2064 pr()
2060 fl = self.file(f)
2065 fl = self.file(f)
2061 o = len(fl)
2066 o = len(fl)
2062 if fl.addgroup(source, revmap, trp) is None:
2067 if fl.addgroup(source, revmap, trp) is None:
2063 raise util.Abort(_("received file revlog group is empty"))
2068 raise util.Abort(_("received file revlog group is empty"))
2064 revisions += len(fl) - o
2069 revisions += len(fl) - o
2065 files += 1
2070 files += 1
2066 if f in needfiles:
2071 if f in needfiles:
2067 needs = needfiles[f]
2072 needs = needfiles[f]
2068 for new in xrange(o, len(fl)):
2073 for new in xrange(o, len(fl)):
2069 n = fl.node(new)
2074 n = fl.node(new)
2070 if n in needs:
2075 if n in needs:
2071 needs.remove(n)
2076 needs.remove(n)
2072 if not needs:
2077 if not needs:
2073 del needfiles[f]
2078 del needfiles[f]
2074 self.ui.progress(_('files'), None)
2079 self.ui.progress(_('files'), None)
2075
2080
2076 for f, needs in needfiles.iteritems():
2081 for f, needs in needfiles.iteritems():
2077 fl = self.file(f)
2082 fl = self.file(f)
2078 for n in needs:
2083 for n in needs:
2079 try:
2084 try:
2080 fl.rev(n)
2085 fl.rev(n)
2081 except error.LookupError:
2086 except error.LookupError:
2082 raise util.Abort(
2087 raise util.Abort(
2083 _('missing file data for %s:%s - run hg verify') %
2088 _('missing file data for %s:%s - run hg verify') %
2084 (f, hex(n)))
2089 (f, hex(n)))
2085
2090
2086 dh = 0
2091 dh = 0
2087 if oldheads:
2092 if oldheads:
2088 heads = cl.heads()
2093 heads = cl.heads()
2089 dh = len(heads) - len(oldheads)
2094 dh = len(heads) - len(oldheads)
2090 for h in heads:
2095 for h in heads:
2091 if h not in oldheads and 'close' in self[h].extra():
2096 if h not in oldheads and 'close' in self[h].extra():
2092 dh -= 1
2097 dh -= 1
2093 htext = ""
2098 htext = ""
2094 if dh:
2099 if dh:
2095 htext = _(" (%+d heads)") % dh
2100 htext = _(" (%+d heads)") % dh
2096
2101
2097 self.ui.status(_("added %d changesets"
2102 self.ui.status(_("added %d changesets"
2098 " with %d changes to %d files%s\n")
2103 " with %d changes to %d files%s\n")
2099 % (changesets, revisions, files, htext))
2104 % (changesets, revisions, files, htext))
2100
2105
2101 if changesets > 0:
2106 if changesets > 0:
2102 p = lambda: cl.writepending() and self.root or ""
2107 p = lambda: cl.writepending() and self.root or ""
2103 self.hook('pretxnchangegroup', throw=True,
2108 self.hook('pretxnchangegroup', throw=True,
2104 node=hex(cl.node(clstart)), source=srctype,
2109 node=hex(cl.node(clstart)), source=srctype,
2105 url=url, pending=p)
2110 url=url, pending=p)
2106
2111
2107 added = [cl.node(r) for r in xrange(clstart, clend)]
2112 added = [cl.node(r) for r in xrange(clstart, clend)]
2108 publishing = self.ui.configbool('phases', 'publish', True)
2113 publishing = self.ui.configbool('phases', 'publish', True)
2109 if publishing and srctype == 'push':
2114 if publishing and srctype == 'push':
2110 # Old server can not push the boundary themself.
2115 # Old server can not push the boundary themself.
2111 # This clause ensure pushed changeset are alway marked as public
2116 # This clause ensure pushed changeset are alway marked as public
2112 phases.advanceboundary(self, phases.public, added)
2117 phases.advanceboundary(self, phases.public, added)
2113 elif srctype != 'strip': # strip should not touch boundary at all
2118 elif srctype != 'strip': # strip should not touch boundary at all
2114 phases.retractboundary(self, phases.draft, added)
2119 phases.retractboundary(self, phases.draft, added)
2115
2120
2116 # make changelog see real files again
2121 # make changelog see real files again
2117 cl.finalize(trp)
2122 cl.finalize(trp)
2118
2123
2119 tr.close()
2124 tr.close()
2120
2125
2121 if changesets > 0:
2126 if changesets > 0:
2122 def runhooks():
2127 def runhooks():
2123 # forcefully update the on-disk branch cache
2128 # forcefully update the on-disk branch cache
2124 self.ui.debug("updating the branch cache\n")
2129 self.ui.debug("updating the branch cache\n")
2125 self.updatebranchcache()
2130 self.updatebranchcache()
2126 self.hook("changegroup", node=hex(cl.node(clstart)),
2131 self.hook("changegroup", node=hex(cl.node(clstart)),
2127 source=srctype, url=url)
2132 source=srctype, url=url)
2128
2133
2129 for n in added:
2134 for n in added:
2130 self.hook("incoming", node=hex(n), source=srctype,
2135 self.hook("incoming", node=hex(n), source=srctype,
2131 url=url)
2136 url=url)
2132 self._afterlock(runhooks)
2137 self._afterlock(runhooks)
2133
2138
2134 finally:
2139 finally:
2135 tr.release()
2140 tr.release()
2136 # never return 0 here:
2141 # never return 0 here:
2137 if dh < 0:
2142 if dh < 0:
2138 return dh - 1
2143 return dh - 1
2139 else:
2144 else:
2140 return dh + 1
2145 return dh + 1
2141
2146
2142 def stream_in(self, remote, requirements):
2147 def stream_in(self, remote, requirements):
2143 lock = self.lock()
2148 lock = self.lock()
2144 try:
2149 try:
2145 fp = remote.stream_out()
2150 fp = remote.stream_out()
2146 l = fp.readline()
2151 l = fp.readline()
2147 try:
2152 try:
2148 resp = int(l)
2153 resp = int(l)
2149 except ValueError:
2154 except ValueError:
2150 raise error.ResponseError(
2155 raise error.ResponseError(
2151 _('Unexpected response from remote server:'), l)
2156 _('Unexpected response from remote server:'), l)
2152 if resp == 1:
2157 if resp == 1:
2153 raise util.Abort(_('operation forbidden by server'))
2158 raise util.Abort(_('operation forbidden by server'))
2154 elif resp == 2:
2159 elif resp == 2:
2155 raise util.Abort(_('locking the remote repository failed'))
2160 raise util.Abort(_('locking the remote repository failed'))
2156 elif resp != 0:
2161 elif resp != 0:
2157 raise util.Abort(_('the server sent an unknown error code'))
2162 raise util.Abort(_('the server sent an unknown error code'))
2158 self.ui.status(_('streaming all changes\n'))
2163 self.ui.status(_('streaming all changes\n'))
2159 l = fp.readline()
2164 l = fp.readline()
2160 try:
2165 try:
2161 total_files, total_bytes = map(int, l.split(' ', 1))
2166 total_files, total_bytes = map(int, l.split(' ', 1))
2162 except (ValueError, TypeError):
2167 except (ValueError, TypeError):
2163 raise error.ResponseError(
2168 raise error.ResponseError(
2164 _('Unexpected response from remote server:'), l)
2169 _('Unexpected response from remote server:'), l)
2165 self.ui.status(_('%d files to transfer, %s of data\n') %
2170 self.ui.status(_('%d files to transfer, %s of data\n') %
2166 (total_files, util.bytecount(total_bytes)))
2171 (total_files, util.bytecount(total_bytes)))
2167 start = time.time()
2172 start = time.time()
2168 for i in xrange(total_files):
2173 for i in xrange(total_files):
2169 # XXX doesn't support '\n' or '\r' in filenames
2174 # XXX doesn't support '\n' or '\r' in filenames
2170 l = fp.readline()
2175 l = fp.readline()
2171 try:
2176 try:
2172 name, size = l.split('\0', 1)
2177 name, size = l.split('\0', 1)
2173 size = int(size)
2178 size = int(size)
2174 except (ValueError, TypeError):
2179 except (ValueError, TypeError):
2175 raise error.ResponseError(
2180 raise error.ResponseError(
2176 _('Unexpected response from remote server:'), l)
2181 _('Unexpected response from remote server:'), l)
2177 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2182 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2178 # for backwards compat, name was partially encoded
2183 # for backwards compat, name was partially encoded
2179 ofp = self.sopener(store.decodedir(name), 'w')
2184 ofp = self.sopener(store.decodedir(name), 'w')
2180 for chunk in util.filechunkiter(fp, limit=size):
2185 for chunk in util.filechunkiter(fp, limit=size):
2181 ofp.write(chunk)
2186 ofp.write(chunk)
2182 ofp.close()
2187 ofp.close()
2183 elapsed = time.time() - start
2188 elapsed = time.time() - start
2184 if elapsed <= 0:
2189 if elapsed <= 0:
2185 elapsed = 0.001
2190 elapsed = 0.001
2186 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2191 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2187 (util.bytecount(total_bytes), elapsed,
2192 (util.bytecount(total_bytes), elapsed,
2188 util.bytecount(total_bytes / elapsed)))
2193 util.bytecount(total_bytes / elapsed)))
2189
2194
2190 # new requirements = old non-format requirements + new format-related
2195 # new requirements = old non-format requirements + new format-related
2191 # requirements from the streamed-in repository
2196 # requirements from the streamed-in repository
2192 requirements.update(set(self.requirements) - self.supportedformats)
2197 requirements.update(set(self.requirements) - self.supportedformats)
2193 self._applyrequirements(requirements)
2198 self._applyrequirements(requirements)
2194 self._writerequirements()
2199 self._writerequirements()
2195
2200
2196 self.invalidate()
2201 self.invalidate()
2197 return len(self.heads()) + 1
2202 return len(self.heads()) + 1
2198 finally:
2203 finally:
2199 lock.release()
2204 lock.release()
2200
2205
2201 def clone(self, remote, heads=[], stream=False):
2206 def clone(self, remote, heads=[], stream=False):
2202 '''clone remote repository.
2207 '''clone remote repository.
2203
2208
2204 keyword arguments:
2209 keyword arguments:
2205 heads: list of revs to clone (forces use of pull)
2210 heads: list of revs to clone (forces use of pull)
2206 stream: use streaming clone if possible'''
2211 stream: use streaming clone if possible'''
2207
2212
2208 # now, all clients that can request uncompressed clones can
2213 # now, all clients that can request uncompressed clones can
2209 # read repo formats supported by all servers that can serve
2214 # read repo formats supported by all servers that can serve
2210 # them.
2215 # them.
2211
2216
2212 # if revlog format changes, client will have to check version
2217 # if revlog format changes, client will have to check version
2213 # and format flags on "stream" capability, and use
2218 # and format flags on "stream" capability, and use
2214 # uncompressed only if compatible.
2219 # uncompressed only if compatible.
2215
2220
2216 if stream and not heads:
2221 if stream and not heads:
2217 # 'stream' means remote revlog format is revlogv1 only
2222 # 'stream' means remote revlog format is revlogv1 only
2218 if remote.capable('stream'):
2223 if remote.capable('stream'):
2219 return self.stream_in(remote, set(('revlogv1',)))
2224 return self.stream_in(remote, set(('revlogv1',)))
2220 # otherwise, 'streamreqs' contains the remote revlog format
2225 # otherwise, 'streamreqs' contains the remote revlog format
2221 streamreqs = remote.capable('streamreqs')
2226 streamreqs = remote.capable('streamreqs')
2222 if streamreqs:
2227 if streamreqs:
2223 streamreqs = set(streamreqs.split(','))
2228 streamreqs = set(streamreqs.split(','))
2224 # if we support it, stream in and adjust our requirements
2229 # if we support it, stream in and adjust our requirements
2225 if not streamreqs - self.supportedformats:
2230 if not streamreqs - self.supportedformats:
2226 return self.stream_in(remote, streamreqs)
2231 return self.stream_in(remote, streamreqs)
2227 return self.pull(remote, heads)
2232 return self.pull(remote, heads)
2228
2233
2229 def pushkey(self, namespace, key, old, new):
2234 def pushkey(self, namespace, key, old, new):
2230 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2235 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2231 old=old, new=new)
2236 old=old, new=new)
2232 ret = pushkey.push(self, namespace, key, old, new)
2237 ret = pushkey.push(self, namespace, key, old, new)
2233 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2238 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2234 ret=ret)
2239 ret=ret)
2235 return ret
2240 return ret
2236
2241
2237 def listkeys(self, namespace):
2242 def listkeys(self, namespace):
2238 self.hook('prelistkeys', throw=True, namespace=namespace)
2243 self.hook('prelistkeys', throw=True, namespace=namespace)
2239 values = pushkey.list(self, namespace)
2244 values = pushkey.list(self, namespace)
2240 self.hook('listkeys', namespace=namespace, values=values)
2245 self.hook('listkeys', namespace=namespace, values=values)
2241 return values
2246 return values
2242
2247
2243 def debugwireargs(self, one, two, three=None, four=None, five=None):
2248 def debugwireargs(self, one, two, three=None, four=None, five=None):
2244 '''used to test argument passing over the wire'''
2249 '''used to test argument passing over the wire'''
2245 return "%s %s %s %s %s" % (one, two, three, four, five)
2250 return "%s %s %s %s %s" % (one, two, three, four, five)
2246
2251
2247 def savecommitmessage(self, text):
2252 def savecommitmessage(self, text):
2248 fp = self.opener('last-message.txt', 'wb')
2253 fp = self.opener('last-message.txt', 'wb')
2249 try:
2254 try:
2250 fp.write(text)
2255 fp.write(text)
2251 finally:
2256 finally:
2252 fp.close()
2257 fp.close()
2253 return self.pathto(fp.name[len(self.root)+1:])
2258 return self.pathto(fp.name[len(self.root)+1:])
2254
2259
2255 # used to avoid circular references so destructors work
2260 # used to avoid circular references so destructors work
2256 def aftertrans(files):
2261 def aftertrans(files):
2257 renamefiles = [tuple(t) for t in files]
2262 renamefiles = [tuple(t) for t in files]
2258 def a():
2263 def a():
2259 for src, dest in renamefiles:
2264 for src, dest in renamefiles:
2260 util.rename(src, dest)
2265 util.rename(src, dest)
2261 return a
2266 return a
2262
2267
2263 def undoname(fn):
2268 def undoname(fn):
2264 base, name = os.path.split(fn)
2269 base, name = os.path.split(fn)
2265 assert name.startswith('journal')
2270 assert name.startswith('journal')
2266 return os.path.join(base, name.replace('journal', 'undo', 1))
2271 return os.path.join(base, name.replace('journal', 'undo', 1))
2267
2272
2268 def instance(ui, path, create):
2273 def instance(ui, path, create):
2269 return localrepository(ui, util.urllocalpath(path), create)
2274 return localrepository(ui, util.urllocalpath(path), create)
2270
2275
2271 def islocal(path):
2276 def islocal(path):
2272 return True
2277 return True
@@ -1,860 +1,982 b''
1 $ cat >> $HGRCPATH <<EOF
1 $ cat >> $HGRCPATH <<EOF
2 > [extensions]
2 > [extensions]
3 > graphlog=
3 > graphlog=
4 > EOF
4 > EOF
5 $ alias hgph='hg log -G --template "{rev} {phase} {desc} - {node|short}\n"'
5 $ alias hgph='hg log -G --template "{rev} {phase} {desc} - {node|short}\n"'
6
6
7 $ mkcommit() {
7 $ mkcommit() {
8 > echo "$1" > "$1"
8 > echo "$1" > "$1"
9 > hg add "$1"
9 > hg add "$1"
10 > message="$1"
10 > message="$1"
11 > shift
11 > shift
12 > hg ci -m "$message" $*
12 > hg ci -m "$message" $*
13 > }
13 > }
14
14
15 $ hg init alpha
15 $ hg init alpha
16 $ cd alpha
16 $ cd alpha
17 $ mkcommit a-A
17 $ mkcommit a-A
18 $ mkcommit a-B
18 $ mkcommit a-B
19 $ mkcommit a-C
19 $ mkcommit a-C
20 $ mkcommit a-D
20 $ mkcommit a-D
21 $ hgph
21 $ hgph
22 @ 3 draft a-D - b555f63b6063
22 @ 3 draft a-D - b555f63b6063
23 |
23 |
24 o 2 draft a-C - 54acac6f23ab
24 o 2 draft a-C - 54acac6f23ab
25 |
25 |
26 o 1 draft a-B - 548a3d25dbf0
26 o 1 draft a-B - 548a3d25dbf0
27 |
27 |
28 o 0 draft a-A - 054250a37db4
28 o 0 draft a-A - 054250a37db4
29
29
30
30
31 $ hg init ../beta
31 $ hg init ../beta
32 $ hg push -r 1 ../beta
32 $ hg push -r 1 ../beta
33 pushing to ../beta
33 pushing to ../beta
34 searching for changes
34 searching for changes
35 adding changesets
35 adding changesets
36 adding manifests
36 adding manifests
37 adding file changes
37 adding file changes
38 added 2 changesets with 2 changes to 2 files
38 added 2 changesets with 2 changes to 2 files
39 $ hgph
39 $ hgph
40 @ 3 draft a-D - b555f63b6063
40 @ 3 draft a-D - b555f63b6063
41 |
41 |
42 o 2 draft a-C - 54acac6f23ab
42 o 2 draft a-C - 54acac6f23ab
43 |
43 |
44 o 1 public a-B - 548a3d25dbf0
44 o 1 public a-B - 548a3d25dbf0
45 |
45 |
46 o 0 public a-A - 054250a37db4
46 o 0 public a-A - 054250a37db4
47
47
48
48
49 $ cd ../beta
49 $ cd ../beta
50 $ hgph
50 $ hgph
51 o 1 public a-B - 548a3d25dbf0
51 o 1 public a-B - 548a3d25dbf0
52 |
52 |
53 o 0 public a-A - 054250a37db4
53 o 0 public a-A - 054250a37db4
54
54
55 $ hg up -q
55 $ hg up -q
56 $ mkcommit b-A
56 $ mkcommit b-A
57 $ hgph
57 $ hgph
58 @ 2 draft b-A - f54f1bb90ff3
58 @ 2 draft b-A - f54f1bb90ff3
59 |
59 |
60 o 1 public a-B - 548a3d25dbf0
60 o 1 public a-B - 548a3d25dbf0
61 |
61 |
62 o 0 public a-A - 054250a37db4
62 o 0 public a-A - 054250a37db4
63
63
64 $ hg pull ../alpha
64 $ hg pull ../alpha
65 pulling from ../alpha
65 pulling from ../alpha
66 searching for changes
66 searching for changes
67 adding changesets
67 adding changesets
68 adding manifests
68 adding manifests
69 adding file changes
69 adding file changes
70 added 2 changesets with 2 changes to 2 files (+1 heads)
70 added 2 changesets with 2 changes to 2 files (+1 heads)
71 (run 'hg heads' to see heads, 'hg merge' to merge)
71 (run 'hg heads' to see heads, 'hg merge' to merge)
72 $ hgph
72 $ hgph
73 o 4 public a-D - b555f63b6063
73 o 4 public a-D - b555f63b6063
74 |
74 |
75 o 3 public a-C - 54acac6f23ab
75 o 3 public a-C - 54acac6f23ab
76 |
76 |
77 | @ 2 draft b-A - f54f1bb90ff3
77 | @ 2 draft b-A - f54f1bb90ff3
78 |/
78 |/
79 o 1 public a-B - 548a3d25dbf0
79 o 1 public a-B - 548a3d25dbf0
80 |
80 |
81 o 0 public a-A - 054250a37db4
81 o 0 public a-A - 054250a37db4
82
82
83
83
84 pull did not updated ../alpha state.
84 pull did not updated ../alpha state.
85 push from alpha to beta should update phase even if nothing is transfered
85 push from alpha to beta should update phase even if nothing is transfered
86
86
87 $ cd ../alpha
87 $ cd ../alpha
88 $ hgph # not updated by remote pull
88 $ hgph # not updated by remote pull
89 @ 3 draft a-D - b555f63b6063
89 @ 3 draft a-D - b555f63b6063
90 |
90 |
91 o 2 draft a-C - 54acac6f23ab
91 o 2 draft a-C - 54acac6f23ab
92 |
92 |
93 o 1 public a-B - 548a3d25dbf0
93 o 1 public a-B - 548a3d25dbf0
94 |
94 |
95 o 0 public a-A - 054250a37db4
95 o 0 public a-A - 054250a37db4
96
96
97 $ hg push ../beta
97 $ hg push ../beta
98 pushing to ../beta
98 pushing to ../beta
99 searching for changes
99 searching for changes
100 no changes found
100 no changes found
101 $ hgph
101 $ hgph
102 @ 3 public a-D - b555f63b6063
102 @ 3 public a-D - b555f63b6063
103 |
103 |
104 o 2 public a-C - 54acac6f23ab
104 o 2 public a-C - 54acac6f23ab
105 |
105 |
106 o 1 public a-B - 548a3d25dbf0
106 o 1 public a-B - 548a3d25dbf0
107 |
107 |
108 o 0 public a-A - 054250a37db4
108 o 0 public a-A - 054250a37db4
109
109
110
110
111 update must update phase of common changeset too
111 update must update phase of common changeset too
112
112
113 $ hg pull ../beta # getting b-A
113 $ hg pull ../beta # getting b-A
114 pulling from ../beta
114 pulling from ../beta
115 searching for changes
115 searching for changes
116 adding changesets
116 adding changesets
117 adding manifests
117 adding manifests
118 adding file changes
118 adding file changes
119 added 1 changesets with 1 changes to 1 files (+1 heads)
119 added 1 changesets with 1 changes to 1 files (+1 heads)
120 (run 'hg heads' to see heads, 'hg merge' to merge)
120 (run 'hg heads' to see heads, 'hg merge' to merge)
121
121
122 $ cd ../beta
122 $ cd ../beta
123 $ hgph # not updated by remote pull
123 $ hgph # not updated by remote pull
124 o 4 public a-D - b555f63b6063
124 o 4 public a-D - b555f63b6063
125 |
125 |
126 o 3 public a-C - 54acac6f23ab
126 o 3 public a-C - 54acac6f23ab
127 |
127 |
128 | @ 2 draft b-A - f54f1bb90ff3
128 | @ 2 draft b-A - f54f1bb90ff3
129 |/
129 |/
130 o 1 public a-B - 548a3d25dbf0
130 o 1 public a-B - 548a3d25dbf0
131 |
131 |
132 o 0 public a-A - 054250a37db4
132 o 0 public a-A - 054250a37db4
133
133
134 $ hg pull ../alpha
134 $ hg pull ../alpha
135 pulling from ../alpha
135 pulling from ../alpha
136 searching for changes
136 searching for changes
137 no changes found
137 no changes found
138 $ hgph
138 $ hgph
139 o 4 public a-D - b555f63b6063
139 o 4 public a-D - b555f63b6063
140 |
140 |
141 o 3 public a-C - 54acac6f23ab
141 o 3 public a-C - 54acac6f23ab
142 |
142 |
143 | @ 2 public b-A - f54f1bb90ff3
143 | @ 2 public b-A - f54f1bb90ff3
144 |/
144 |/
145 o 1 public a-B - 548a3d25dbf0
145 o 1 public a-B - 548a3d25dbf0
146 |
146 |
147 o 0 public a-A - 054250a37db4
147 o 0 public a-A - 054250a37db4
148
148
149
149
150 Publish configuration option
150 Publish configuration option
151 ----------------------------
151 ----------------------------
152
152
153 Pull
153 Pull
154 ````
154 ````
155
155
156 changegroup are added without phase movement
156 changegroup are added without phase movement
157
157
158 $ hg bundle -a ../base.bundle
158 $ hg bundle -a ../base.bundle
159 5 changesets found
159 5 changesets found
160 $ cd ..
160 $ cd ..
161 $ hg init mu
161 $ hg init mu
162 $ cd mu
162 $ cd mu
163 $ cat > .hg/hgrc << EOF
163 $ cat > .hg/hgrc << EOF
164 > [phases]
164 > [phases]
165 > publish=0
165 > publish=0
166 > EOF
166 > EOF
167 $ hg unbundle ../base.bundle
167 $ hg unbundle ../base.bundle
168 adding changesets
168 adding changesets
169 adding manifests
169 adding manifests
170 adding file changes
170 adding file changes
171 added 5 changesets with 5 changes to 5 files (+1 heads)
171 added 5 changesets with 5 changes to 5 files (+1 heads)
172 (run 'hg heads' to see heads, 'hg merge' to merge)
172 (run 'hg heads' to see heads, 'hg merge' to merge)
173 $ hgph
173 $ hgph
174 o 4 draft a-D - b555f63b6063
174 o 4 draft a-D - b555f63b6063
175 |
175 |
176 o 3 draft a-C - 54acac6f23ab
176 o 3 draft a-C - 54acac6f23ab
177 |
177 |
178 | o 2 draft b-A - f54f1bb90ff3
178 | o 2 draft b-A - f54f1bb90ff3
179 |/
179 |/
180 o 1 draft a-B - 548a3d25dbf0
180 o 1 draft a-B - 548a3d25dbf0
181 |
181 |
182 o 0 draft a-A - 054250a37db4
182 o 0 draft a-A - 054250a37db4
183
183
184 $ cd ..
184 $ cd ..
185
185
186 Pulling from publish=False to publish=False does not move boundary.
186 Pulling from publish=False to publish=False does not move boundary.
187
187
188 $ hg init nu
188 $ hg init nu
189 $ cd nu
189 $ cd nu
190 $ cat > .hg/hgrc << EOF
190 $ cat > .hg/hgrc << EOF
191 > [phases]
191 > [phases]
192 > publish=0
192 > publish=0
193 > EOF
193 > EOF
194 $ hg pull ../mu -r 54acac6f23ab
194 $ hg pull ../mu -r 54acac6f23ab
195 pulling from ../mu
195 pulling from ../mu
196 adding changesets
196 adding changesets
197 adding manifests
197 adding manifests
198 adding file changes
198 adding file changes
199 added 3 changesets with 3 changes to 3 files
199 added 3 changesets with 3 changes to 3 files
200 (run 'hg update' to get a working copy)
200 (run 'hg update' to get a working copy)
201 $ hgph
201 $ hgph
202 o 2 draft a-C - 54acac6f23ab
202 o 2 draft a-C - 54acac6f23ab
203 |
203 |
204 o 1 draft a-B - 548a3d25dbf0
204 o 1 draft a-B - 548a3d25dbf0
205 |
205 |
206 o 0 draft a-A - 054250a37db4
206 o 0 draft a-A - 054250a37db4
207
207
208
208
209 Even for common
209 Even for common
210
210
211 $ hg pull ../mu -r f54f1bb90ff3
211 $ hg pull ../mu -r f54f1bb90ff3
212 pulling from ../mu
212 pulling from ../mu
213 searching for changes
213 searching for changes
214 adding changesets
214 adding changesets
215 adding manifests
215 adding manifests
216 adding file changes
216 adding file changes
217 added 1 changesets with 1 changes to 1 files (+1 heads)
217 added 1 changesets with 1 changes to 1 files (+1 heads)
218 (run 'hg heads' to see heads, 'hg merge' to merge)
218 (run 'hg heads' to see heads, 'hg merge' to merge)
219 $ hgph
219 $ hgph
220 o 3 draft b-A - f54f1bb90ff3
220 o 3 draft b-A - f54f1bb90ff3
221 |
221 |
222 | o 2 draft a-C - 54acac6f23ab
222 | o 2 draft a-C - 54acac6f23ab
223 |/
223 |/
224 o 1 draft a-B - 548a3d25dbf0
224 o 1 draft a-B - 548a3d25dbf0
225 |
225 |
226 o 0 draft a-A - 054250a37db4
226 o 0 draft a-A - 054250a37db4
227
227
228
228
229
229
230 Pulling from Publish=True to Publish=False move boundary in common set.
230 Pulling from Publish=True to Publish=False move boundary in common set.
231 we are in nu
231 we are in nu
232
232
233 $ hg pull ../alpha -r b555f63b6063
233 $ hg pull ../alpha -r b555f63b6063
234 pulling from ../alpha
234 pulling from ../alpha
235 searching for changes
235 searching for changes
236 adding changesets
236 adding changesets
237 adding manifests
237 adding manifests
238 adding file changes
238 adding file changes
239 added 1 changesets with 1 changes to 1 files
239 added 1 changesets with 1 changes to 1 files
240 (run 'hg update' to get a working copy)
240 (run 'hg update' to get a working copy)
241 $ hgph
241 $ hgph
242 o 4 public a-D - b555f63b6063
242 o 4 public a-D - b555f63b6063
243 |
243 |
244 | o 3 public b-A - f54f1bb90ff3
244 | o 3 public b-A - f54f1bb90ff3
245 | |
245 | |
246 o | 2 public a-C - 54acac6f23ab
246 o | 2 public a-C - 54acac6f23ab
247 |/
247 |/
248 o 1 public a-B - 548a3d25dbf0
248 o 1 public a-B - 548a3d25dbf0
249 |
249 |
250 o 0 public a-A - 054250a37db4
250 o 0 public a-A - 054250a37db4
251
251
252
252
253 pulling from Publish=False to publish=False with some public
253 pulling from Publish=False to publish=False with some public
254
254
255 $ hg up -q f54f1bb90ff3
255 $ hg up -q f54f1bb90ff3
256 $ mkcommit n-A
256 $ mkcommit n-A
257 $ mkcommit n-B
257 $ mkcommit n-B
258 $ hgph
258 $ hgph
259 @ 6 draft n-B - 145e75495359
259 @ 6 draft n-B - 145e75495359
260 |
260 |
261 o 5 draft n-A - d6bcb4f74035
261 o 5 draft n-A - d6bcb4f74035
262 |
262 |
263 | o 4 public a-D - b555f63b6063
263 | o 4 public a-D - b555f63b6063
264 | |
264 | |
265 o | 3 public b-A - f54f1bb90ff3
265 o | 3 public b-A - f54f1bb90ff3
266 | |
266 | |
267 | o 2 public a-C - 54acac6f23ab
267 | o 2 public a-C - 54acac6f23ab
268 |/
268 |/
269 o 1 public a-B - 548a3d25dbf0
269 o 1 public a-B - 548a3d25dbf0
270 |
270 |
271 o 0 public a-A - 054250a37db4
271 o 0 public a-A - 054250a37db4
272
272
273 $ cd ../mu
273 $ cd ../mu
274 $ hg pull ../nu
274 $ hg pull ../nu
275 pulling from ../nu
275 pulling from ../nu
276 searching for changes
276 searching for changes
277 adding changesets
277 adding changesets
278 adding manifests
278 adding manifests
279 adding file changes
279 adding file changes
280 added 2 changesets with 2 changes to 2 files
280 added 2 changesets with 2 changes to 2 files
281 (run 'hg update' to get a working copy)
281 (run 'hg update' to get a working copy)
282 $ hgph
282 $ hgph
283 o 6 draft n-B - 145e75495359
283 o 6 draft n-B - 145e75495359
284 |
284 |
285 o 5 draft n-A - d6bcb4f74035
285 o 5 draft n-A - d6bcb4f74035
286 |
286 |
287 | o 4 public a-D - b555f63b6063
287 | o 4 public a-D - b555f63b6063
288 | |
288 | |
289 | o 3 public a-C - 54acac6f23ab
289 | o 3 public a-C - 54acac6f23ab
290 | |
290 | |
291 o | 2 public b-A - f54f1bb90ff3
291 o | 2 public b-A - f54f1bb90ff3
292 |/
292 |/
293 o 1 public a-B - 548a3d25dbf0
293 o 1 public a-B - 548a3d25dbf0
294 |
294 |
295 o 0 public a-A - 054250a37db4
295 o 0 public a-A - 054250a37db4
296
296
297 $ cd ..
297 $ cd ..
298
298
299 pulling into publish=True
299 pulling into publish=True
300
300
301 $ cd alpha
301 $ cd alpha
302 $ hgph
302 $ hgph
303 o 4 public b-A - f54f1bb90ff3
303 o 4 public b-A - f54f1bb90ff3
304 |
304 |
305 | @ 3 public a-D - b555f63b6063
305 | @ 3 public a-D - b555f63b6063
306 | |
306 | |
307 | o 2 public a-C - 54acac6f23ab
307 | o 2 public a-C - 54acac6f23ab
308 |/
308 |/
309 o 1 public a-B - 548a3d25dbf0
309 o 1 public a-B - 548a3d25dbf0
310 |
310 |
311 o 0 public a-A - 054250a37db4
311 o 0 public a-A - 054250a37db4
312
312
313 $ hg pull ../mu
313 $ hg pull ../mu
314 pulling from ../mu
314 pulling from ../mu
315 searching for changes
315 searching for changes
316 adding changesets
316 adding changesets
317 adding manifests
317 adding manifests
318 adding file changes
318 adding file changes
319 added 2 changesets with 2 changes to 2 files
319 added 2 changesets with 2 changes to 2 files
320 (run 'hg update' to get a working copy)
320 (run 'hg update' to get a working copy)
321 $ hgph
321 $ hgph
322 o 6 draft n-B - 145e75495359
322 o 6 draft n-B - 145e75495359
323 |
323 |
324 o 5 draft n-A - d6bcb4f74035
324 o 5 draft n-A - d6bcb4f74035
325 |
325 |
326 o 4 public b-A - f54f1bb90ff3
326 o 4 public b-A - f54f1bb90ff3
327 |
327 |
328 | @ 3 public a-D - b555f63b6063
328 | @ 3 public a-D - b555f63b6063
329 | |
329 | |
330 | o 2 public a-C - 54acac6f23ab
330 | o 2 public a-C - 54acac6f23ab
331 |/
331 |/
332 o 1 public a-B - 548a3d25dbf0
332 o 1 public a-B - 548a3d25dbf0
333 |
333 |
334 o 0 public a-A - 054250a37db4
334 o 0 public a-A - 054250a37db4
335
335
336 $ cd ..
336 $ cd ..
337
337
338 pulling back into original repo
338 pulling back into original repo
339
339
340 $ cd nu
340 $ cd nu
341 $ hg pull ../alpha
341 $ hg pull ../alpha
342 pulling from ../alpha
342 pulling from ../alpha
343 searching for changes
343 searching for changes
344 no changes found
344 no changes found
345 $ hgph
345 $ hgph
346 @ 6 public n-B - 145e75495359
346 @ 6 public n-B - 145e75495359
347 |
347 |
348 o 5 public n-A - d6bcb4f74035
348 o 5 public n-A - d6bcb4f74035
349 |
349 |
350 | o 4 public a-D - b555f63b6063
350 | o 4 public a-D - b555f63b6063
351 | |
351 | |
352 o | 3 public b-A - f54f1bb90ff3
352 o | 3 public b-A - f54f1bb90ff3
353 | |
353 | |
354 | o 2 public a-C - 54acac6f23ab
354 | o 2 public a-C - 54acac6f23ab
355 |/
355 |/
356 o 1 public a-B - 548a3d25dbf0
356 o 1 public a-B - 548a3d25dbf0
357 |
357 |
358 o 0 public a-A - 054250a37db4
358 o 0 public a-A - 054250a37db4
359
359
360
360
361 Push
361 Push
362 ````
362 ````
363
363
364 (inserted)
364 (inserted)
365
365
366 Test that phase are pushed even when they are nothing to pus
366 Test that phase are pushed even when they are nothing to pus
367 (this might be tested later bu are very convenient to not alter too much test)
367 (this might be tested later bu are very convenient to not alter too much test)
368
368
369 Push back to alpha
369 Push back to alpha
370
370
371 $ hg push ../alpha # from nu
371 $ hg push ../alpha # from nu
372 pushing to ../alpha
372 pushing to ../alpha
373 searching for changes
373 searching for changes
374 no changes found
374 no changes found
375 $ cd ..
375 $ cd ..
376 $ cd alpha
376 $ cd alpha
377 $ hgph
377 $ hgph
378 o 6 public n-B - 145e75495359
378 o 6 public n-B - 145e75495359
379 |
379 |
380 o 5 public n-A - d6bcb4f74035
380 o 5 public n-A - d6bcb4f74035
381 |
381 |
382 o 4 public b-A - f54f1bb90ff3
382 o 4 public b-A - f54f1bb90ff3
383 |
383 |
384 | @ 3 public a-D - b555f63b6063
384 | @ 3 public a-D - b555f63b6063
385 | |
385 | |
386 | o 2 public a-C - 54acac6f23ab
386 | o 2 public a-C - 54acac6f23ab
387 |/
387 |/
388 o 1 public a-B - 548a3d25dbf0
388 o 1 public a-B - 548a3d25dbf0
389 |
389 |
390 o 0 public a-A - 054250a37db4
390 o 0 public a-A - 054250a37db4
391
391
392
392
393 (end insertion)
393 (end insertion)
394
394
395
395
396 initial setup
396 initial setup
397
397
398 $ hg glog # of alpha
398 $ hg glog # of alpha
399 o changeset: 6:145e75495359
399 o changeset: 6:145e75495359
400 | tag: tip
400 | tag: tip
401 | user: test
401 | user: test
402 | date: Thu Jan 01 00:00:00 1970 +0000
402 | date: Thu Jan 01 00:00:00 1970 +0000
403 | summary: n-B
403 | summary: n-B
404 |
404 |
405 o changeset: 5:d6bcb4f74035
405 o changeset: 5:d6bcb4f74035
406 | user: test
406 | user: test
407 | date: Thu Jan 01 00:00:00 1970 +0000
407 | date: Thu Jan 01 00:00:00 1970 +0000
408 | summary: n-A
408 | summary: n-A
409 |
409 |
410 o changeset: 4:f54f1bb90ff3
410 o changeset: 4:f54f1bb90ff3
411 | parent: 1:548a3d25dbf0
411 | parent: 1:548a3d25dbf0
412 | user: test
412 | user: test
413 | date: Thu Jan 01 00:00:00 1970 +0000
413 | date: Thu Jan 01 00:00:00 1970 +0000
414 | summary: b-A
414 | summary: b-A
415 |
415 |
416 | @ changeset: 3:b555f63b6063
416 | @ changeset: 3:b555f63b6063
417 | | user: test
417 | | user: test
418 | | date: Thu Jan 01 00:00:00 1970 +0000
418 | | date: Thu Jan 01 00:00:00 1970 +0000
419 | | summary: a-D
419 | | summary: a-D
420 | |
420 | |
421 | o changeset: 2:54acac6f23ab
421 | o changeset: 2:54acac6f23ab
422 |/ user: test
422 |/ user: test
423 | date: Thu Jan 01 00:00:00 1970 +0000
423 | date: Thu Jan 01 00:00:00 1970 +0000
424 | summary: a-C
424 | summary: a-C
425 |
425 |
426 o changeset: 1:548a3d25dbf0
426 o changeset: 1:548a3d25dbf0
427 | user: test
427 | user: test
428 | date: Thu Jan 01 00:00:00 1970 +0000
428 | date: Thu Jan 01 00:00:00 1970 +0000
429 | summary: a-B
429 | summary: a-B
430 |
430 |
431 o changeset: 0:054250a37db4
431 o changeset: 0:054250a37db4
432 user: test
432 user: test
433 date: Thu Jan 01 00:00:00 1970 +0000
433 date: Thu Jan 01 00:00:00 1970 +0000
434 summary: a-A
434 summary: a-A
435
435
436 $ mkcommit a-E
436 $ mkcommit a-E
437 $ mkcommit a-F
437 $ mkcommit a-F
438 $ mkcommit a-G
438 $ mkcommit a-G
439 $ hg up d6bcb4f74035 -q
439 $ hg up d6bcb4f74035 -q
440 $ mkcommit a-H
440 $ mkcommit a-H
441 created new head
441 created new head
442 $ hgph
442 $ hgph
443 @ 10 draft a-H - 967b449fbc94
443 @ 10 draft a-H - 967b449fbc94
444 |
444 |
445 | o 9 draft a-G - 3e27b6f1eee1
445 | o 9 draft a-G - 3e27b6f1eee1
446 | |
446 | |
447 | o 8 draft a-F - b740e3e5c05d
447 | o 8 draft a-F - b740e3e5c05d
448 | |
448 | |
449 | o 7 draft a-E - e9f537e46dea
449 | o 7 draft a-E - e9f537e46dea
450 | |
450 | |
451 +---o 6 public n-B - 145e75495359
451 +---o 6 public n-B - 145e75495359
452 | |
452 | |
453 o | 5 public n-A - d6bcb4f74035
453 o | 5 public n-A - d6bcb4f74035
454 | |
454 | |
455 o | 4 public b-A - f54f1bb90ff3
455 o | 4 public b-A - f54f1bb90ff3
456 | |
456 | |
457 | o 3 public a-D - b555f63b6063
457 | o 3 public a-D - b555f63b6063
458 | |
458 | |
459 | o 2 public a-C - 54acac6f23ab
459 | o 2 public a-C - 54acac6f23ab
460 |/
460 |/
461 o 1 public a-B - 548a3d25dbf0
461 o 1 public a-B - 548a3d25dbf0
462 |
462 |
463 o 0 public a-A - 054250a37db4
463 o 0 public a-A - 054250a37db4
464
464
465
465
466 Pushing to Publish=False (unknown changeset)
466 Pushing to Publish=False (unknown changeset)
467
467
468 $ hg push ../mu -r b740e3e5c05d # a-F
468 $ hg push ../mu -r b740e3e5c05d # a-F
469 pushing to ../mu
469 pushing to ../mu
470 searching for changes
470 searching for changes
471 adding changesets
471 adding changesets
472 adding manifests
472 adding manifests
473 adding file changes
473 adding file changes
474 added 2 changesets with 2 changes to 2 files
474 added 2 changesets with 2 changes to 2 files
475 $ hgph
475 $ hgph
476 @ 10 draft a-H - 967b449fbc94
476 @ 10 draft a-H - 967b449fbc94
477 |
477 |
478 | o 9 draft a-G - 3e27b6f1eee1
478 | o 9 draft a-G - 3e27b6f1eee1
479 | |
479 | |
480 | o 8 draft a-F - b740e3e5c05d
480 | o 8 draft a-F - b740e3e5c05d
481 | |
481 | |
482 | o 7 draft a-E - e9f537e46dea
482 | o 7 draft a-E - e9f537e46dea
483 | |
483 | |
484 +---o 6 public n-B - 145e75495359
484 +---o 6 public n-B - 145e75495359
485 | |
485 | |
486 o | 5 public n-A - d6bcb4f74035
486 o | 5 public n-A - d6bcb4f74035
487 | |
487 | |
488 o | 4 public b-A - f54f1bb90ff3
488 o | 4 public b-A - f54f1bb90ff3
489 | |
489 | |
490 | o 3 public a-D - b555f63b6063
490 | o 3 public a-D - b555f63b6063
491 | |
491 | |
492 | o 2 public a-C - 54acac6f23ab
492 | o 2 public a-C - 54acac6f23ab
493 |/
493 |/
494 o 1 public a-B - 548a3d25dbf0
494 o 1 public a-B - 548a3d25dbf0
495 |
495 |
496 o 0 public a-A - 054250a37db4
496 o 0 public a-A - 054250a37db4
497
497
498
498
499 $ cd ../mu
499 $ cd ../mu
500 $ hgph # d6bcb4f74035 and 145e75495359 changed because common is too smart
500 $ hgph # d6bcb4f74035 and 145e75495359 changed because common is too smart
501 o 8 draft a-F - b740e3e5c05d
501 o 8 draft a-F - b740e3e5c05d
502 |
502 |
503 o 7 draft a-E - e9f537e46dea
503 o 7 draft a-E - e9f537e46dea
504 |
504 |
505 | o 6 public n-B - 145e75495359
505 | o 6 public n-B - 145e75495359
506 | |
506 | |
507 | o 5 public n-A - d6bcb4f74035
507 | o 5 public n-A - d6bcb4f74035
508 | |
508 | |
509 o | 4 public a-D - b555f63b6063
509 o | 4 public a-D - b555f63b6063
510 | |
510 | |
511 o | 3 public a-C - 54acac6f23ab
511 o | 3 public a-C - 54acac6f23ab
512 | |
512 | |
513 | o 2 public b-A - f54f1bb90ff3
513 | o 2 public b-A - f54f1bb90ff3
514 |/
514 |/
515 o 1 public a-B - 548a3d25dbf0
515 o 1 public a-B - 548a3d25dbf0
516 |
516 |
517 o 0 public a-A - 054250a37db4
517 o 0 public a-A - 054250a37db4
518
518
519
519
520 Pushing to Publish=True (unknown changeset)
520 Pushing to Publish=True (unknown changeset)
521
521
522 $ hg push ../beta -r b740e3e5c05d
522 $ hg push ../beta -r b740e3e5c05d
523 pushing to ../beta
523 pushing to ../beta
524 searching for changes
524 searching for changes
525 adding changesets
525 adding changesets
526 adding manifests
526 adding manifests
527 adding file changes
527 adding file changes
528 added 2 changesets with 2 changes to 2 files
528 added 2 changesets with 2 changes to 2 files
529 $ hgph # again d6bcb4f74035 and 145e75495359 changed because common is too smart
529 $ hgph # again d6bcb4f74035 and 145e75495359 changed because common is too smart
530 o 8 public a-F - b740e3e5c05d
530 o 8 public a-F - b740e3e5c05d
531 |
531 |
532 o 7 public a-E - e9f537e46dea
532 o 7 public a-E - e9f537e46dea
533 |
533 |
534 | o 6 public n-B - 145e75495359
534 | o 6 public n-B - 145e75495359
535 | |
535 | |
536 | o 5 public n-A - d6bcb4f74035
536 | o 5 public n-A - d6bcb4f74035
537 | |
537 | |
538 o | 4 public a-D - b555f63b6063
538 o | 4 public a-D - b555f63b6063
539 | |
539 | |
540 o | 3 public a-C - 54acac6f23ab
540 o | 3 public a-C - 54acac6f23ab
541 | |
541 | |
542 | o 2 public b-A - f54f1bb90ff3
542 | o 2 public b-A - f54f1bb90ff3
543 |/
543 |/
544 o 1 public a-B - 548a3d25dbf0
544 o 1 public a-B - 548a3d25dbf0
545 |
545 |
546 o 0 public a-A - 054250a37db4
546 o 0 public a-A - 054250a37db4
547
547
548
548
549 Pushing to Publish=True (common changeset)
549 Pushing to Publish=True (common changeset)
550
550
551 $ cd ../beta
551 $ cd ../beta
552 $ hg push ../alpha
552 $ hg push ../alpha
553 pushing to ../alpha
553 pushing to ../alpha
554 searching for changes
554 searching for changes
555 no changes found
555 no changes found
556 $ hgph
556 $ hgph
557 o 6 public a-F - b740e3e5c05d
557 o 6 public a-F - b740e3e5c05d
558 |
558 |
559 o 5 public a-E - e9f537e46dea
559 o 5 public a-E - e9f537e46dea
560 |
560 |
561 o 4 public a-D - b555f63b6063
561 o 4 public a-D - b555f63b6063
562 |
562 |
563 o 3 public a-C - 54acac6f23ab
563 o 3 public a-C - 54acac6f23ab
564 |
564 |
565 | @ 2 public b-A - f54f1bb90ff3
565 | @ 2 public b-A - f54f1bb90ff3
566 |/
566 |/
567 o 1 public a-B - 548a3d25dbf0
567 o 1 public a-B - 548a3d25dbf0
568 |
568 |
569 o 0 public a-A - 054250a37db4
569 o 0 public a-A - 054250a37db4
570
570
571 $ cd ../alpha
571 $ cd ../alpha
572 $ hgph # e9f537e46dea and b740e3e5c05d should have been sync to 0
572 $ hgph # e9f537e46dea and b740e3e5c05d should have been sync to 0
573 @ 10 draft a-H - 967b449fbc94
573 @ 10 draft a-H - 967b449fbc94
574 |
574 |
575 | o 9 draft a-G - 3e27b6f1eee1
575 | o 9 draft a-G - 3e27b6f1eee1
576 | |
576 | |
577 | o 8 public a-F - b740e3e5c05d
577 | o 8 public a-F - b740e3e5c05d
578 | |
578 | |
579 | o 7 public a-E - e9f537e46dea
579 | o 7 public a-E - e9f537e46dea
580 | |
580 | |
581 +---o 6 public n-B - 145e75495359
581 +---o 6 public n-B - 145e75495359
582 | |
582 | |
583 o | 5 public n-A - d6bcb4f74035
583 o | 5 public n-A - d6bcb4f74035
584 | |
584 | |
585 o | 4 public b-A - f54f1bb90ff3
585 o | 4 public b-A - f54f1bb90ff3
586 | |
586 | |
587 | o 3 public a-D - b555f63b6063
587 | o 3 public a-D - b555f63b6063
588 | |
588 | |
589 | o 2 public a-C - 54acac6f23ab
589 | o 2 public a-C - 54acac6f23ab
590 |/
590 |/
591 o 1 public a-B - 548a3d25dbf0
591 o 1 public a-B - 548a3d25dbf0
592 |
592 |
593 o 0 public a-A - 054250a37db4
593 o 0 public a-A - 054250a37db4
594
594
595
595
596 Pushing to Publish=False (common changeset that change phase + unknown one)
596 Pushing to Publish=False (common changeset that change phase + unknown one)
597
597
598 $ hg push ../mu -r 967b449fbc94 -f
598 $ hg push ../mu -r 967b449fbc94 -f
599 pushing to ../mu
599 pushing to ../mu
600 searching for changes
600 searching for changes
601 adding changesets
601 adding changesets
602 adding manifests
602 adding manifests
603 adding file changes
603 adding file changes
604 added 1 changesets with 1 changes to 1 files (+1 heads)
604 added 1 changesets with 1 changes to 1 files (+1 heads)
605 $ hgph
605 $ hgph
606 @ 10 draft a-H - 967b449fbc94
606 @ 10 draft a-H - 967b449fbc94
607 |
607 |
608 | o 9 draft a-G - 3e27b6f1eee1
608 | o 9 draft a-G - 3e27b6f1eee1
609 | |
609 | |
610 | o 8 public a-F - b740e3e5c05d
610 | o 8 public a-F - b740e3e5c05d
611 | |
611 | |
612 | o 7 public a-E - e9f537e46dea
612 | o 7 public a-E - e9f537e46dea
613 | |
613 | |
614 +---o 6 public n-B - 145e75495359
614 +---o 6 public n-B - 145e75495359
615 | |
615 | |
616 o | 5 public n-A - d6bcb4f74035
616 o | 5 public n-A - d6bcb4f74035
617 | |
617 | |
618 o | 4 public b-A - f54f1bb90ff3
618 o | 4 public b-A - f54f1bb90ff3
619 | |
619 | |
620 | o 3 public a-D - b555f63b6063
620 | o 3 public a-D - b555f63b6063
621 | |
621 | |
622 | o 2 public a-C - 54acac6f23ab
622 | o 2 public a-C - 54acac6f23ab
623 |/
623 |/
624 o 1 public a-B - 548a3d25dbf0
624 o 1 public a-B - 548a3d25dbf0
625 |
625 |
626 o 0 public a-A - 054250a37db4
626 o 0 public a-A - 054250a37db4
627
627
628 $ cd ../mu
628 $ cd ../mu
629 $ hgph # d6bcb4f74035 should have changed phase
629 $ hgph # d6bcb4f74035 should have changed phase
630 > # again d6bcb4f74035 and 145e75495359 changed because common was too smart
630 > # again d6bcb4f74035 and 145e75495359 changed because common was too smart
631 o 9 draft a-H - 967b449fbc94
631 o 9 draft a-H - 967b449fbc94
632 |
632 |
633 | o 8 public a-F - b740e3e5c05d
633 | o 8 public a-F - b740e3e5c05d
634 | |
634 | |
635 | o 7 public a-E - e9f537e46dea
635 | o 7 public a-E - e9f537e46dea
636 | |
636 | |
637 +---o 6 public n-B - 145e75495359
637 +---o 6 public n-B - 145e75495359
638 | |
638 | |
639 o | 5 public n-A - d6bcb4f74035
639 o | 5 public n-A - d6bcb4f74035
640 | |
640 | |
641 | o 4 public a-D - b555f63b6063
641 | o 4 public a-D - b555f63b6063
642 | |
642 | |
643 | o 3 public a-C - 54acac6f23ab
643 | o 3 public a-C - 54acac6f23ab
644 | |
644 | |
645 o | 2 public b-A - f54f1bb90ff3
645 o | 2 public b-A - f54f1bb90ff3
646 |/
646 |/
647 o 1 public a-B - 548a3d25dbf0
647 o 1 public a-B - 548a3d25dbf0
648 |
648 |
649 o 0 public a-A - 054250a37db4
649 o 0 public a-A - 054250a37db4
650
650
651
651
652
652
653 Pushing to Publish=True (common changeset from publish=False)
653 Pushing to Publish=True (common changeset from publish=False)
654
654
655 (in mu)
655 (in mu)
656 $ hg push ../alpha
656 $ hg push ../alpha
657 pushing to ../alpha
657 pushing to ../alpha
658 searching for changes
658 searching for changes
659 no changes found
659 no changes found
660 $ hgph
660 $ hgph
661 o 9 public a-H - 967b449fbc94
661 o 9 public a-H - 967b449fbc94
662 |
662 |
663 | o 8 public a-F - b740e3e5c05d
663 | o 8 public a-F - b740e3e5c05d
664 | |
664 | |
665 | o 7 public a-E - e9f537e46dea
665 | o 7 public a-E - e9f537e46dea
666 | |
666 | |
667 +---o 6 public n-B - 145e75495359
667 +---o 6 public n-B - 145e75495359
668 | |
668 | |
669 o | 5 public n-A - d6bcb4f74035
669 o | 5 public n-A - d6bcb4f74035
670 | |
670 | |
671 | o 4 public a-D - b555f63b6063
671 | o 4 public a-D - b555f63b6063
672 | |
672 | |
673 | o 3 public a-C - 54acac6f23ab
673 | o 3 public a-C - 54acac6f23ab
674 | |
674 | |
675 o | 2 public b-A - f54f1bb90ff3
675 o | 2 public b-A - f54f1bb90ff3
676 |/
676 |/
677 o 1 public a-B - 548a3d25dbf0
677 o 1 public a-B - 548a3d25dbf0
678 |
678 |
679 o 0 public a-A - 054250a37db4
679 o 0 public a-A - 054250a37db4
680
680
681 $ hgph -R ../alpha # a-H should have been synced to 0
681 $ hgph -R ../alpha # a-H should have been synced to 0
682 @ 10 public a-H - 967b449fbc94
682 @ 10 public a-H - 967b449fbc94
683 |
683 |
684 | o 9 draft a-G - 3e27b6f1eee1
684 | o 9 draft a-G - 3e27b6f1eee1
685 | |
685 | |
686 | o 8 public a-F - b740e3e5c05d
686 | o 8 public a-F - b740e3e5c05d
687 | |
687 | |
688 | o 7 public a-E - e9f537e46dea
688 | o 7 public a-E - e9f537e46dea
689 | |
689 | |
690 +---o 6 public n-B - 145e75495359
690 +---o 6 public n-B - 145e75495359
691 | |
691 | |
692 o | 5 public n-A - d6bcb4f74035
692 o | 5 public n-A - d6bcb4f74035
693 | |
693 | |
694 o | 4 public b-A - f54f1bb90ff3
694 o | 4 public b-A - f54f1bb90ff3
695 | |
695 | |
696 | o 3 public a-D - b555f63b6063
696 | o 3 public a-D - b555f63b6063
697 | |
697 | |
698 | o 2 public a-C - 54acac6f23ab
698 | o 2 public a-C - 54acac6f23ab
699 |/
699 |/
700 o 1 public a-B - 548a3d25dbf0
700 o 1 public a-B - 548a3d25dbf0
701 |
701 |
702 o 0 public a-A - 054250a37db4
702 o 0 public a-A - 054250a37db4
703
703
704
704
705
705
706 Discovery locally secret changeset on a remote repository:
706 Discovery locally secret changeset on a remote repository:
707
707
708 - should make it non-secret
708 - should make it non-secret
709
709
710 $ cd ../alpha
710 $ cd ../alpha
711 $ mkcommit A-secret --config phases.new-commit=2
711 $ mkcommit A-secret --config phases.new-commit=2
712 $ hgph
712 $ hgph
713 @ 11 secret A-secret - 435b5d83910c
713 @ 11 secret A-secret - 435b5d83910c
714 |
714 |
715 o 10 public a-H - 967b449fbc94
715 o 10 public a-H - 967b449fbc94
716 |
716 |
717 | o 9 draft a-G - 3e27b6f1eee1
717 | o 9 draft a-G - 3e27b6f1eee1
718 | |
718 | |
719 | o 8 public a-F - b740e3e5c05d
719 | o 8 public a-F - b740e3e5c05d
720 | |
720 | |
721 | o 7 public a-E - e9f537e46dea
721 | o 7 public a-E - e9f537e46dea
722 | |
722 | |
723 +---o 6 public n-B - 145e75495359
723 +---o 6 public n-B - 145e75495359
724 | |
724 | |
725 o | 5 public n-A - d6bcb4f74035
725 o | 5 public n-A - d6bcb4f74035
726 | |
726 | |
727 o | 4 public b-A - f54f1bb90ff3
727 o | 4 public b-A - f54f1bb90ff3
728 | |
728 | |
729 | o 3 public a-D - b555f63b6063
729 | o 3 public a-D - b555f63b6063
730 | |
730 | |
731 | o 2 public a-C - 54acac6f23ab
731 | o 2 public a-C - 54acac6f23ab
732 |/
732 |/
733 o 1 public a-B - 548a3d25dbf0
733 o 1 public a-B - 548a3d25dbf0
734 |
734 |
735 o 0 public a-A - 054250a37db4
735 o 0 public a-A - 054250a37db4
736
736
737 $ hg bundle --base 'parents(.)' -r . ../secret-bundle.hg
737 $ hg bundle --base 'parents(.)' -r . ../secret-bundle.hg
738 1 changesets found
738 1 changesets found
739 $ hg -R ../mu unbundle ../secret-bundle.hg
739 $ hg -R ../mu unbundle ../secret-bundle.hg
740 adding changesets
740 adding changesets
741 adding manifests
741 adding manifests
742 adding file changes
742 adding file changes
743 added 1 changesets with 1 changes to 1 files
743 added 1 changesets with 1 changes to 1 files
744 (run 'hg update' to get a working copy)
744 (run 'hg update' to get a working copy)
745 $ hgph -R ../mu
745 $ hgph -R ../mu
746 o 10 draft A-secret - 435b5d83910c
746 o 10 draft A-secret - 435b5d83910c
747 |
747 |
748 o 9 public a-H - 967b449fbc94
748 o 9 public a-H - 967b449fbc94
749 |
749 |
750 | o 8 public a-F - b740e3e5c05d
750 | o 8 public a-F - b740e3e5c05d
751 | |
751 | |
752 | o 7 public a-E - e9f537e46dea
752 | o 7 public a-E - e9f537e46dea
753 | |
753 | |
754 +---o 6 public n-B - 145e75495359
754 +---o 6 public n-B - 145e75495359
755 | |
755 | |
756 o | 5 public n-A - d6bcb4f74035
756 o | 5 public n-A - d6bcb4f74035
757 | |
757 | |
758 | o 4 public a-D - b555f63b6063
758 | o 4 public a-D - b555f63b6063
759 | |
759 | |
760 | o 3 public a-C - 54acac6f23ab
760 | o 3 public a-C - 54acac6f23ab
761 | |
761 | |
762 o | 2 public b-A - f54f1bb90ff3
762 o | 2 public b-A - f54f1bb90ff3
763 |/
763 |/
764 o 1 public a-B - 548a3d25dbf0
764 o 1 public a-B - 548a3d25dbf0
765 |
765 |
766 o 0 public a-A - 054250a37db4
766 o 0 public a-A - 054250a37db4
767
767
768 $ hg pull ../mu
768 $ hg pull ../mu
769 pulling from ../mu
769 pulling from ../mu
770 searching for changes
770 searching for changes
771 no changes found
771 no changes found
772 $ hgph
772 $ hgph
773 @ 11 draft A-secret - 435b5d83910c
773 @ 11 draft A-secret - 435b5d83910c
774 |
774 |
775 o 10 public a-H - 967b449fbc94
775 o 10 public a-H - 967b449fbc94
776 |
776 |
777 | o 9 draft a-G - 3e27b6f1eee1
777 | o 9 draft a-G - 3e27b6f1eee1
778 | |
778 | |
779 | o 8 public a-F - b740e3e5c05d
779 | o 8 public a-F - b740e3e5c05d
780 | |
780 | |
781 | o 7 public a-E - e9f537e46dea
781 | o 7 public a-E - e9f537e46dea
782 | |
782 | |
783 +---o 6 public n-B - 145e75495359
783 +---o 6 public n-B - 145e75495359
784 | |
784 | |
785 o | 5 public n-A - d6bcb4f74035
785 o | 5 public n-A - d6bcb4f74035
786 | |
786 | |
787 o | 4 public b-A - f54f1bb90ff3
787 o | 4 public b-A - f54f1bb90ff3
788 | |
788 | |
789 | o 3 public a-D - b555f63b6063
789 | o 3 public a-D - b555f63b6063
790 | |
790 | |
791 | o 2 public a-C - 54acac6f23ab
791 | o 2 public a-C - 54acac6f23ab
792 |/
792 |/
793 o 1 public a-B - 548a3d25dbf0
793 o 1 public a-B - 548a3d25dbf0
794 |
794 |
795 o 0 public a-A - 054250a37db4
795 o 0 public a-A - 054250a37db4
796
796
797
798 pull new changeset with common draft locally
799
800 $ hg up -q 967b449fbc94 # create a new root for draft
801 $ mkcommit 'alpha-more'
802 created new head
803 $ hg push -fr . ../mu
804 pushing to ../mu
805 searching for changes
806 adding changesets
807 adding manifests
808 adding file changes
809 added 1 changesets with 1 changes to 1 files (+1 heads)
810 $ cd ../mu
811 $ hg phase --secret --force 1c5cfd894796
812 $ hg up -q 435b5d83910c
813 $ mkcommit 'mu-more'
814 $ cd ../alpha
815 $ hg pull ../mu
816 pulling from ../mu
817 searching for changes
818 adding changesets
819 adding manifests
820 adding file changes
821 added 1 changesets with 1 changes to 1 files
822 (run 'hg update' to get a working copy)
823 $ hgph
824 o 13 draft mu-more - 5237fb433fc8
825 |
826 | @ 12 draft alpha-more - 1c5cfd894796
827 | |
828 o | 11 draft A-secret - 435b5d83910c
829 |/
830 o 10 public a-H - 967b449fbc94
831 |
832 | o 9 draft a-G - 3e27b6f1eee1
833 | |
834 | o 8 public a-F - b740e3e5c05d
835 | |
836 | o 7 public a-E - e9f537e46dea
837 | |
838 +---o 6 public n-B - 145e75495359
839 | |
840 o | 5 public n-A - d6bcb4f74035
841 | |
842 o | 4 public b-A - f54f1bb90ff3
843 | |
844 | o 3 public a-D - b555f63b6063
845 | |
846 | o 2 public a-C - 54acac6f23ab
847 |/
848 o 1 public a-B - 548a3d25dbf0
849 |
850 o 0 public a-A - 054250a37db4
851
852
853 Test that test are properly ignored on remote event when existing locally
854
797 $ cd ..
855 $ cd ..
856 $ hg clone -qU -r b555f63b6063 -r f54f1bb90ff3 beta gamma
857
858 # pathological case are
859 #
860 # * secret remotely
861 # * known locally
862 # * repo have uncommon changeset
863
864 $ hg -R beta phase --secret --force f54f1bb90ff3
865 $ hg -R gamma phase --draft --force f54f1bb90ff3
866
867 $ cd gamma
868 $ hg pull ../beta
869 pulling from ../beta
870 searching for changes
871 adding changesets
872 adding manifests
873 adding file changes
874 added 2 changesets with 2 changes to 2 files
875 (run 'hg update' to get a working copy)
876 $ hg phase f54f1bb90ff3
877 2: draft
878
879 same over the wire
880
881 $ cd ../beta
882 $ hg serve -p $HGPORT -d --pid-file=../beta.pid -E ../beta-error.log
883 $ cat ../beta.pid >> $DAEMON_PIDS
884 $ cd ../gamma
885
886 $ hg pull http://localhost:$HGPORT/
887 pulling from http://localhost:$HGPORT/
888 searching for changes
889 no changes found
890 $ hg phase f54f1bb90ff3
891 2: draft
892
893 check that secret local on both side are not synced to public
894
895 $ hg push -r b555f63b6063 http://localhost:$HGPORT/
896 pushing to http://localhost:$HGPORT/
897 searching for changes
898 no changes found
899 $ hg phase f54f1bb90ff3
900 2: draft
901
902 put the changeset in the draft state again
903 (first test after this one expect to be able to copy)
904
905 $ cd ..
906
907
798 Test Clone behavior
908 Test Clone behavior
799
909
800 A. Clone without secret changeset
910 A. Clone without secret changeset
801
911
802 1. cloning non-publishing repository
912 1. cloning non-publishing repository
913 (Phase should be preserved)
803
914
804 (Phase should be preservedΒ°
915 # make sure there is no secret so we can use a copy clone
916
917 $ hg -R mu phase --draft 'secret()'
805
918
806 $ hg clone -U mu Tau
919 $ hg clone -U mu Tau
807 $ hgph -R Tau
920 $ hgph -R Tau
808 o 10 draft A-secret - 435b5d83910c
921 o 12 draft mu-more - 5237fb433fc8
809 |
922 |
923 | o 11 draft alpha-more - 1c5cfd894796
924 | |
925 o | 10 draft A-secret - 435b5d83910c
926 |/
810 o 9 public a-H - 967b449fbc94
927 o 9 public a-H - 967b449fbc94
811 |
928 |
812 | o 8 public a-F - b740e3e5c05d
929 | o 8 public a-F - b740e3e5c05d
813 | |
930 | |
814 | o 7 public a-E - e9f537e46dea
931 | o 7 public a-E - e9f537e46dea
815 | |
932 | |
816 +---o 6 public n-B - 145e75495359
933 +---o 6 public n-B - 145e75495359
817 | |
934 | |
818 o | 5 public n-A - d6bcb4f74035
935 o | 5 public n-A - d6bcb4f74035
819 | |
936 | |
820 | o 4 public a-D - b555f63b6063
937 | o 4 public a-D - b555f63b6063
821 | |
938 | |
822 | o 3 public a-C - 54acac6f23ab
939 | o 3 public a-C - 54acac6f23ab
823 | |
940 | |
824 o | 2 public b-A - f54f1bb90ff3
941 o | 2 public b-A - f54f1bb90ff3
825 |/
942 |/
826 o 1 public a-B - 548a3d25dbf0
943 o 1 public a-B - 548a3d25dbf0
827 |
944 |
828 o 0 public a-A - 054250a37db4
945 o 0 public a-A - 054250a37db4
829
946
830
947
831 2. cloning publishing repository
948 2. cloning publishing repository
832
949
833 (everything should be public)
950 (everything should be public)
834
951
835 $ hg clone -U alpha Upsilon
952 $ hg clone -U alpha Upsilon
836 $ hgph -R Upsilon
953 $ hgph -R Upsilon
837 o 11 public A-secret - 435b5d83910c
954 o 13 public mu-more - 5237fb433fc8
838 |
955 |
956 | o 12 public alpha-more - 1c5cfd894796
957 | |
958 o | 11 public A-secret - 435b5d83910c
959 |/
839 o 10 public a-H - 967b449fbc94
960 o 10 public a-H - 967b449fbc94
840 |
961 |
841 | o 9 public a-G - 3e27b6f1eee1
962 | o 9 public a-G - 3e27b6f1eee1
842 | |
963 | |
843 | o 8 public a-F - b740e3e5c05d
964 | o 8 public a-F - b740e3e5c05d
844 | |
965 | |
845 | o 7 public a-E - e9f537e46dea
966 | o 7 public a-E - e9f537e46dea
846 | |
967 | |
847 +---o 6 public n-B - 145e75495359
968 +---o 6 public n-B - 145e75495359
848 | |
969 | |
849 o | 5 public n-A - d6bcb4f74035
970 o | 5 public n-A - d6bcb4f74035
850 | |
971 | |
851 o | 4 public b-A - f54f1bb90ff3
972 o | 4 public b-A - f54f1bb90ff3
852 | |
973 | |
853 | o 3 public a-D - b555f63b6063
974 | o 3 public a-D - b555f63b6063
854 | |
975 | |
855 | o 2 public a-C - 54acac6f23ab
976 | o 2 public a-C - 54acac6f23ab
856 |/
977 |/
857 o 1 public a-B - 548a3d25dbf0
978 o 1 public a-B - 548a3d25dbf0
858 |
979 |
859 o 0 public a-A - 054250a37db4
980 o 0 public a-A - 054250a37db4
860
981
982
General Comments 0
You need to be logged in to leave comments. Login now