##// END OF EJS Templates
obsolete: introduction of obsolete markers...
Pierre-Yves.David@ens-lyon.org -
r17070:ad0d6c2b default
parent child Browse files
Show More
@@ -0,0 +1,175 b''
1 # obsolete.py - obsolete markers handling
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
5 #
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
8
9 """Obsolete markers handling
10
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
15
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewriting operations, and help
18 building new tools to reconciliate conflicting rewriting actions. To
19 facilitate conflicts resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
21 author name.
22
23
24 Format
25 ------
26
27 Markers are stored in an append-only file stored in
28 '.hg/store/obsstore'.
29
30 The file starts with a version header:
31
32 - 1 unsigned byte: version number, starting at zero.
33
34
35 The header is followed by the markers. Each marker is made of:
36
37 - 1 unsigned byte: number of new changesets "R", could be zero.
38
39 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
40
41 - 1 byte: a bit field. It is reserved for flags used in obsolete
42 markers common operations, to avoid repeated decoding of metadata
43 entries.
44
45 - 20 bytes: obsoleted changeset identifier.
46
47 - N*20 bytes: new changesets identifiers.
48
49 - M bytes: metadata as a sequence of nul-terminated strings. Each
50 string contains a key and a value, separated by a color ':', without
51 additional encoding. Keys cannot contain '\0' or ':' and values
52 cannot contain '\0'.
53 """
54 import struct
55 from mercurial import util
56 from i18n import _
57
58 _pack = struct.pack
59 _unpack = struct.unpack
60
61
62
63 # data used for parsing and writing
64 _fmversion = 0
65 _fmfixed = '>BIB20s'
66 _fmnode = '20s'
67 _fmfsize = struct.calcsize(_fmfixed)
68 _fnodesize = struct.calcsize(_fmnode)
69
70 def _readmarkers(data):
71 """Read and enumerate markers from raw data"""
72 off = 0
73 diskversion = _unpack('>B', data[off:off + 1])[0]
74 off += 1
75 if diskversion != _fmversion:
76 raise util.Abort(_('parsing obsolete marker: unknown version %r')
77 % diskversion)
78
79 # Loop on markers
80 l = len(data)
81 while off + _fmfsize <= l:
82 # read fixed part
83 cur = data[off:off + _fmfsize]
84 off += _fmfsize
85 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
86 # read replacement
87 sucs = ()
88 if nbsuc:
89 s = (_fnodesize * nbsuc)
90 cur = data[off:off + s]
91 sucs = _unpack(_fmnode * nbsuc, cur)
92 off += s
93 # read metadata
94 # (metadata will be decoded on demand)
95 metadata = data[off:off + mdsize]
96 if len(metadata) != mdsize:
97 raise util.Abort(_('parsing obsolete marker: metadata is too '
98 'short, %d bytes expected, got %d')
99 % (len(metadata), mdsize))
100 off += mdsize
101 yield (pre, sucs, flags, metadata)
102
103 def encodemeta(meta):
104 """Return encoded metadata string to string mapping.
105
106 Assume no ':' in key and no '\0' in both key and value."""
107 for key, value in meta.iteritems():
108 if ':' in key or '\0' in key:
109 raise ValueError("':' and '\0' are forbidden in metadata key'")
110 if '\0' in value:
111 raise ValueError("':' are forbidden in metadata value'")
112 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
113
114 def decodemeta(data):
115 """Return string to string dictionary from encoded version."""
116 d = {}
117 for l in data.split('\0'):
118 if l:
119 key, value = l.split(':')
120 d[key] = value
121 return d
122
123 class obsstore(object):
124 """Store obsolete markers
125
126 Markers can be accessed with two mappings:
127 - precursors: old -> set(new)
128 - successors: new -> set(old)
129 """
130
131 def __init__(self):
132 self._all = []
133 # new markers to serialize
134 self._new = []
135 self.precursors = {}
136 self.successors = {}
137
138 def add(self, marker):
139 """Add a new marker to the store
140
141 This marker still needs to be written to disk"""
142 self._new.append(marker)
143 self._load(marker)
144
145 def loadmarkers(self, data):
146 """Load all markers in data, mark them as known."""
147 for marker in _readmarkers(data):
148 self._load(marker)
149
150 def flushmarkers(self, stream):
151 """Write all markers to a stream
152
153 After this operation, "new" markers are considered "known"."""
154 self._writemarkers(stream)
155 self._new[:] = []
156
157 def _load(self, marker):
158 self._all.append(marker)
159 pre, sucs = marker[:2]
160 self.precursors.setdefault(pre, set()).add(marker)
161 for suc in sucs:
162 self.successors.setdefault(suc, set()).add(marker)
163
164 def _writemarkers(self, stream):
165 # Kept separate from flushmarkers(), it will be reused for
166 # markers exchange.
167 stream.write(_pack('>B', _fmversion))
168 for marker in self._all:
169 pre, sucs, flags, metadata = marker
170 nbsuc = len(sucs)
171 format = _fmfixed + (_fmnode * nbsuc)
172 data = [nbsuc, len(metadata), flags, pre]
173 data.extend(sucs)
174 stream.write(_pack(format, *data))
175 stream.write(metadata)
@@ -1,2425 +1,2443 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey, obsolete
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class storecache(filecache):
22 class storecache(filecache):
23 """filecache for files in the store"""
23 """filecache for files in the store"""
24 def join(self, obj, fname):
24 def join(self, obj, fname):
25 return obj.sjoin(fname)
25 return obj.sjoin(fname)
26
26
27 class localrepository(repo.repository):
27 class localrepository(repo.repository):
28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
29 'known', 'getbundle'))
29 'known', 'getbundle'))
30 supportedformats = set(('revlogv1', 'generaldelta'))
30 supportedformats = set(('revlogv1', 'generaldelta'))
31 supported = supportedformats | set(('store', 'fncache', 'shared',
31 supported = supportedformats | set(('store', 'fncache', 'shared',
32 'dotencode'))
32 'dotencode'))
33
33
34 def __init__(self, baseui, path=None, create=False):
34 def __init__(self, baseui, path=None, create=False):
35 repo.repository.__init__(self)
35 repo.repository.__init__(self)
36 self.root = os.path.realpath(util.expandpath(path))
36 self.root = os.path.realpath(util.expandpath(path))
37 self.path = os.path.join(self.root, ".hg")
37 self.path = os.path.join(self.root, ".hg")
38 self.origroot = path
38 self.origroot = path
39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
40 self.opener = scmutil.opener(self.path)
40 self.opener = scmutil.opener(self.path)
41 self.wopener = scmutil.opener(self.root)
41 self.wopener = scmutil.opener(self.root)
42 self.baseui = baseui
42 self.baseui = baseui
43 self.ui = baseui.copy()
43 self.ui = baseui.copy()
44 # A list of callback to shape the phase if no data were found.
44 # A list of callback to shape the phase if no data were found.
45 # Callback are in the form: func(repo, roots) --> processed root.
45 # Callback are in the form: func(repo, roots) --> processed root.
46 # This list it to be filled by extension during repo setup
46 # This list it to be filled by extension during repo setup
47 self._phasedefaults = []
47 self._phasedefaults = []
48
48
49 try:
49 try:
50 self.ui.readconfig(self.join("hgrc"), self.root)
50 self.ui.readconfig(self.join("hgrc"), self.root)
51 extensions.loadall(self.ui)
51 extensions.loadall(self.ui)
52 except IOError:
52 except IOError:
53 pass
53 pass
54
54
55 if not os.path.isdir(self.path):
55 if not os.path.isdir(self.path):
56 if create:
56 if create:
57 if not os.path.exists(path):
57 if not os.path.exists(path):
58 util.makedirs(path)
58 util.makedirs(path)
59 util.makedir(self.path, notindexed=True)
59 util.makedir(self.path, notindexed=True)
60 requirements = ["revlogv1"]
60 requirements = ["revlogv1"]
61 if self.ui.configbool('format', 'usestore', True):
61 if self.ui.configbool('format', 'usestore', True):
62 os.mkdir(os.path.join(self.path, "store"))
62 os.mkdir(os.path.join(self.path, "store"))
63 requirements.append("store")
63 requirements.append("store")
64 if self.ui.configbool('format', 'usefncache', True):
64 if self.ui.configbool('format', 'usefncache', True):
65 requirements.append("fncache")
65 requirements.append("fncache")
66 if self.ui.configbool('format', 'dotencode', True):
66 if self.ui.configbool('format', 'dotencode', True):
67 requirements.append('dotencode')
67 requirements.append('dotencode')
68 # create an invalid changelog
68 # create an invalid changelog
69 self.opener.append(
69 self.opener.append(
70 "00changelog.i",
70 "00changelog.i",
71 '\0\0\0\2' # represents revlogv2
71 '\0\0\0\2' # represents revlogv2
72 ' dummy changelog to prevent using the old repo layout'
72 ' dummy changelog to prevent using the old repo layout'
73 )
73 )
74 if self.ui.configbool('format', 'generaldelta', False):
74 if self.ui.configbool('format', 'generaldelta', False):
75 requirements.append("generaldelta")
75 requirements.append("generaldelta")
76 requirements = set(requirements)
76 requirements = set(requirements)
77 else:
77 else:
78 raise error.RepoError(_("repository %s not found") % path)
78 raise error.RepoError(_("repository %s not found") % path)
79 elif create:
79 elif create:
80 raise error.RepoError(_("repository %s already exists") % path)
80 raise error.RepoError(_("repository %s already exists") % path)
81 else:
81 else:
82 try:
82 try:
83 requirements = scmutil.readrequires(self.opener, self.supported)
83 requirements = scmutil.readrequires(self.opener, self.supported)
84 except IOError, inst:
84 except IOError, inst:
85 if inst.errno != errno.ENOENT:
85 if inst.errno != errno.ENOENT:
86 raise
86 raise
87 requirements = set()
87 requirements = set()
88
88
89 self.sharedpath = self.path
89 self.sharedpath = self.path
90 try:
90 try:
91 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
91 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
92 if not os.path.exists(s):
92 if not os.path.exists(s):
93 raise error.RepoError(
93 raise error.RepoError(
94 _('.hg/sharedpath points to nonexistent directory %s') % s)
94 _('.hg/sharedpath points to nonexistent directory %s') % s)
95 self.sharedpath = s
95 self.sharedpath = s
96 except IOError, inst:
96 except IOError, inst:
97 if inst.errno != errno.ENOENT:
97 if inst.errno != errno.ENOENT:
98 raise
98 raise
99
99
100 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
100 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
101 self.spath = self.store.path
101 self.spath = self.store.path
102 self.sopener = self.store.opener
102 self.sopener = self.store.opener
103 self.sjoin = self.store.join
103 self.sjoin = self.store.join
104 self.opener.createmode = self.store.createmode
104 self.opener.createmode = self.store.createmode
105 self._applyrequirements(requirements)
105 self._applyrequirements(requirements)
106 if create:
106 if create:
107 self._writerequirements()
107 self._writerequirements()
108
108
109
109
110 self._branchcache = None
110 self._branchcache = None
111 self._branchcachetip = None
111 self._branchcachetip = None
112 self.filterpats = {}
112 self.filterpats = {}
113 self._datafilters = {}
113 self._datafilters = {}
114 self._transref = self._lockref = self._wlockref = None
114 self._transref = self._lockref = self._wlockref = None
115
115
116 # A cache for various files under .hg/ that tracks file changes,
116 # A cache for various files under .hg/ that tracks file changes,
117 # (used by the filecache decorator)
117 # (used by the filecache decorator)
118 #
118 #
119 # Maps a property name to its util.filecacheentry
119 # Maps a property name to its util.filecacheentry
120 self._filecache = {}
120 self._filecache = {}
121
121
122 def _applyrequirements(self, requirements):
122 def _applyrequirements(self, requirements):
123 self.requirements = requirements
123 self.requirements = requirements
124 openerreqs = set(('revlogv1', 'generaldelta'))
124 openerreqs = set(('revlogv1', 'generaldelta'))
125 self.sopener.options = dict((r, 1) for r in requirements
125 self.sopener.options = dict((r, 1) for r in requirements
126 if r in openerreqs)
126 if r in openerreqs)
127
127
128 def _writerequirements(self):
128 def _writerequirements(self):
129 reqfile = self.opener("requires", "w")
129 reqfile = self.opener("requires", "w")
130 for r in self.requirements:
130 for r in self.requirements:
131 reqfile.write("%s\n" % r)
131 reqfile.write("%s\n" % r)
132 reqfile.close()
132 reqfile.close()
133
133
134 def _checknested(self, path):
134 def _checknested(self, path):
135 """Determine if path is a legal nested repository."""
135 """Determine if path is a legal nested repository."""
136 if not path.startswith(self.root):
136 if not path.startswith(self.root):
137 return False
137 return False
138 subpath = path[len(self.root) + 1:]
138 subpath = path[len(self.root) + 1:]
139 normsubpath = util.pconvert(subpath)
139 normsubpath = util.pconvert(subpath)
140
140
141 # XXX: Checking against the current working copy is wrong in
141 # XXX: Checking against the current working copy is wrong in
142 # the sense that it can reject things like
142 # the sense that it can reject things like
143 #
143 #
144 # $ hg cat -r 10 sub/x.txt
144 # $ hg cat -r 10 sub/x.txt
145 #
145 #
146 # if sub/ is no longer a subrepository in the working copy
146 # if sub/ is no longer a subrepository in the working copy
147 # parent revision.
147 # parent revision.
148 #
148 #
149 # However, it can of course also allow things that would have
149 # However, it can of course also allow things that would have
150 # been rejected before, such as the above cat command if sub/
150 # been rejected before, such as the above cat command if sub/
151 # is a subrepository now, but was a normal directory before.
151 # is a subrepository now, but was a normal directory before.
152 # The old path auditor would have rejected by mistake since it
152 # The old path auditor would have rejected by mistake since it
153 # panics when it sees sub/.hg/.
153 # panics when it sees sub/.hg/.
154 #
154 #
155 # All in all, checking against the working copy seems sensible
155 # All in all, checking against the working copy seems sensible
156 # since we want to prevent access to nested repositories on
156 # since we want to prevent access to nested repositories on
157 # the filesystem *now*.
157 # the filesystem *now*.
158 ctx = self[None]
158 ctx = self[None]
159 parts = util.splitpath(subpath)
159 parts = util.splitpath(subpath)
160 while parts:
160 while parts:
161 prefix = '/'.join(parts)
161 prefix = '/'.join(parts)
162 if prefix in ctx.substate:
162 if prefix in ctx.substate:
163 if prefix == normsubpath:
163 if prefix == normsubpath:
164 return True
164 return True
165 else:
165 else:
166 sub = ctx.sub(prefix)
166 sub = ctx.sub(prefix)
167 return sub.checknested(subpath[len(prefix) + 1:])
167 return sub.checknested(subpath[len(prefix) + 1:])
168 else:
168 else:
169 parts.pop()
169 parts.pop()
170 return False
170 return False
171
171
172 @filecache('bookmarks')
172 @filecache('bookmarks')
173 def _bookmarks(self):
173 def _bookmarks(self):
174 return bookmarks.read(self)
174 return bookmarks.read(self)
175
175
176 @filecache('bookmarks.current')
176 @filecache('bookmarks.current')
177 def _bookmarkcurrent(self):
177 def _bookmarkcurrent(self):
178 return bookmarks.readcurrent(self)
178 return bookmarks.readcurrent(self)
179
179
180 def _writebookmarks(self, marks):
180 def _writebookmarks(self, marks):
181 bookmarks.write(self)
181 bookmarks.write(self)
182
182
183 def bookmarkheads(self, bookmark):
183 def bookmarkheads(self, bookmark):
184 name = bookmark.split('@', 1)[0]
184 name = bookmark.split('@', 1)[0]
185 heads = []
185 heads = []
186 for mark, n in self._bookmarks.iteritems():
186 for mark, n in self._bookmarks.iteritems():
187 if mark.split('@', 1)[0] == name:
187 if mark.split('@', 1)[0] == name:
188 heads.append(n)
188 heads.append(n)
189 return heads
189 return heads
190
190
191 @storecache('phaseroots')
191 @storecache('phaseroots')
192 def _phasecache(self):
192 def _phasecache(self):
193 return phases.phasecache(self, self._phasedefaults)
193 return phases.phasecache(self, self._phasedefaults)
194
194
195 @storecache('obsstore')
196 def obsstore(self):
197 store = obsolete.obsstore()
198 data = self.sopener.tryread('obsstore')
199 if data:
200 store.loadmarkers(data)
201 return store
202
195 @storecache('00changelog.i')
203 @storecache('00changelog.i')
196 def changelog(self):
204 def changelog(self):
197 c = changelog.changelog(self.sopener)
205 c = changelog.changelog(self.sopener)
198 if 'HG_PENDING' in os.environ:
206 if 'HG_PENDING' in os.environ:
199 p = os.environ['HG_PENDING']
207 p = os.environ['HG_PENDING']
200 if p.startswith(self.root):
208 if p.startswith(self.root):
201 c.readpending('00changelog.i.a')
209 c.readpending('00changelog.i.a')
202 return c
210 return c
203
211
204 @storecache('00manifest.i')
212 @storecache('00manifest.i')
205 def manifest(self):
213 def manifest(self):
206 return manifest.manifest(self.sopener)
214 return manifest.manifest(self.sopener)
207
215
208 @filecache('dirstate')
216 @filecache('dirstate')
209 def dirstate(self):
217 def dirstate(self):
210 warned = [0]
218 warned = [0]
211 def validate(node):
219 def validate(node):
212 try:
220 try:
213 self.changelog.rev(node)
221 self.changelog.rev(node)
214 return node
222 return node
215 except error.LookupError:
223 except error.LookupError:
216 if not warned[0]:
224 if not warned[0]:
217 warned[0] = True
225 warned[0] = True
218 self.ui.warn(_("warning: ignoring unknown"
226 self.ui.warn(_("warning: ignoring unknown"
219 " working parent %s!\n") % short(node))
227 " working parent %s!\n") % short(node))
220 return nullid
228 return nullid
221
229
222 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
230 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
223
231
224 def __getitem__(self, changeid):
232 def __getitem__(self, changeid):
225 if changeid is None:
233 if changeid is None:
226 return context.workingctx(self)
234 return context.workingctx(self)
227 return context.changectx(self, changeid)
235 return context.changectx(self, changeid)
228
236
229 def __contains__(self, changeid):
237 def __contains__(self, changeid):
230 try:
238 try:
231 return bool(self.lookup(changeid))
239 return bool(self.lookup(changeid))
232 except error.RepoLookupError:
240 except error.RepoLookupError:
233 return False
241 return False
234
242
235 def __nonzero__(self):
243 def __nonzero__(self):
236 return True
244 return True
237
245
238 def __len__(self):
246 def __len__(self):
239 return len(self.changelog)
247 return len(self.changelog)
240
248
241 def __iter__(self):
249 def __iter__(self):
242 for i in xrange(len(self)):
250 for i in xrange(len(self)):
243 yield i
251 yield i
244
252
245 def revs(self, expr, *args):
253 def revs(self, expr, *args):
246 '''Return a list of revisions matching the given revset'''
254 '''Return a list of revisions matching the given revset'''
247 expr = revset.formatspec(expr, *args)
255 expr = revset.formatspec(expr, *args)
248 m = revset.match(None, expr)
256 m = revset.match(None, expr)
249 return [r for r in m(self, range(len(self)))]
257 return [r for r in m(self, range(len(self)))]
250
258
251 def set(self, expr, *args):
259 def set(self, expr, *args):
252 '''
260 '''
253 Yield a context for each matching revision, after doing arg
261 Yield a context for each matching revision, after doing arg
254 replacement via revset.formatspec
262 replacement via revset.formatspec
255 '''
263 '''
256 for r in self.revs(expr, *args):
264 for r in self.revs(expr, *args):
257 yield self[r]
265 yield self[r]
258
266
259 def url(self):
267 def url(self):
260 return 'file:' + self.root
268 return 'file:' + self.root
261
269
262 def hook(self, name, throw=False, **args):
270 def hook(self, name, throw=False, **args):
263 return hook.hook(self.ui, self, name, throw, **args)
271 return hook.hook(self.ui, self, name, throw, **args)
264
272
265 tag_disallowed = ':\r\n'
273 tag_disallowed = ':\r\n'
266
274
267 def _tag(self, names, node, message, local, user, date, extra={}):
275 def _tag(self, names, node, message, local, user, date, extra={}):
268 if isinstance(names, str):
276 if isinstance(names, str):
269 allchars = names
277 allchars = names
270 names = (names,)
278 names = (names,)
271 else:
279 else:
272 allchars = ''.join(names)
280 allchars = ''.join(names)
273 for c in self.tag_disallowed:
281 for c in self.tag_disallowed:
274 if c in allchars:
282 if c in allchars:
275 raise util.Abort(_('%r cannot be used in a tag name') % c)
283 raise util.Abort(_('%r cannot be used in a tag name') % c)
276
284
277 branches = self.branchmap()
285 branches = self.branchmap()
278 for name in names:
286 for name in names:
279 self.hook('pretag', throw=True, node=hex(node), tag=name,
287 self.hook('pretag', throw=True, node=hex(node), tag=name,
280 local=local)
288 local=local)
281 if name in branches:
289 if name in branches:
282 self.ui.warn(_("warning: tag %s conflicts with existing"
290 self.ui.warn(_("warning: tag %s conflicts with existing"
283 " branch name\n") % name)
291 " branch name\n") % name)
284
292
285 def writetags(fp, names, munge, prevtags):
293 def writetags(fp, names, munge, prevtags):
286 fp.seek(0, 2)
294 fp.seek(0, 2)
287 if prevtags and prevtags[-1] != '\n':
295 if prevtags and prevtags[-1] != '\n':
288 fp.write('\n')
296 fp.write('\n')
289 for name in names:
297 for name in names:
290 m = munge and munge(name) or name
298 m = munge and munge(name) or name
291 if (self._tagscache.tagtypes and
299 if (self._tagscache.tagtypes and
292 name in self._tagscache.tagtypes):
300 name in self._tagscache.tagtypes):
293 old = self.tags().get(name, nullid)
301 old = self.tags().get(name, nullid)
294 fp.write('%s %s\n' % (hex(old), m))
302 fp.write('%s %s\n' % (hex(old), m))
295 fp.write('%s %s\n' % (hex(node), m))
303 fp.write('%s %s\n' % (hex(node), m))
296 fp.close()
304 fp.close()
297
305
298 prevtags = ''
306 prevtags = ''
299 if local:
307 if local:
300 try:
308 try:
301 fp = self.opener('localtags', 'r+')
309 fp = self.opener('localtags', 'r+')
302 except IOError:
310 except IOError:
303 fp = self.opener('localtags', 'a')
311 fp = self.opener('localtags', 'a')
304 else:
312 else:
305 prevtags = fp.read()
313 prevtags = fp.read()
306
314
307 # local tags are stored in the current charset
315 # local tags are stored in the current charset
308 writetags(fp, names, None, prevtags)
316 writetags(fp, names, None, prevtags)
309 for name in names:
317 for name in names:
310 self.hook('tag', node=hex(node), tag=name, local=local)
318 self.hook('tag', node=hex(node), tag=name, local=local)
311 return
319 return
312
320
313 try:
321 try:
314 fp = self.wfile('.hgtags', 'rb+')
322 fp = self.wfile('.hgtags', 'rb+')
315 except IOError, e:
323 except IOError, e:
316 if e.errno != errno.ENOENT:
324 if e.errno != errno.ENOENT:
317 raise
325 raise
318 fp = self.wfile('.hgtags', 'ab')
326 fp = self.wfile('.hgtags', 'ab')
319 else:
327 else:
320 prevtags = fp.read()
328 prevtags = fp.read()
321
329
322 # committed tags are stored in UTF-8
330 # committed tags are stored in UTF-8
323 writetags(fp, names, encoding.fromlocal, prevtags)
331 writetags(fp, names, encoding.fromlocal, prevtags)
324
332
325 fp.close()
333 fp.close()
326
334
327 self.invalidatecaches()
335 self.invalidatecaches()
328
336
329 if '.hgtags' not in self.dirstate:
337 if '.hgtags' not in self.dirstate:
330 self[None].add(['.hgtags'])
338 self[None].add(['.hgtags'])
331
339
332 m = matchmod.exact(self.root, '', ['.hgtags'])
340 m = matchmod.exact(self.root, '', ['.hgtags'])
333 tagnode = self.commit(message, user, date, extra=extra, match=m)
341 tagnode = self.commit(message, user, date, extra=extra, match=m)
334
342
335 for name in names:
343 for name in names:
336 self.hook('tag', node=hex(node), tag=name, local=local)
344 self.hook('tag', node=hex(node), tag=name, local=local)
337
345
338 return tagnode
346 return tagnode
339
347
340 def tag(self, names, node, message, local, user, date):
348 def tag(self, names, node, message, local, user, date):
341 '''tag a revision with one or more symbolic names.
349 '''tag a revision with one or more symbolic names.
342
350
343 names is a list of strings or, when adding a single tag, names may be a
351 names is a list of strings or, when adding a single tag, names may be a
344 string.
352 string.
345
353
346 if local is True, the tags are stored in a per-repository file.
354 if local is True, the tags are stored in a per-repository file.
347 otherwise, they are stored in the .hgtags file, and a new
355 otherwise, they are stored in the .hgtags file, and a new
348 changeset is committed with the change.
356 changeset is committed with the change.
349
357
350 keyword arguments:
358 keyword arguments:
351
359
352 local: whether to store tags in non-version-controlled file
360 local: whether to store tags in non-version-controlled file
353 (default False)
361 (default False)
354
362
355 message: commit message to use if committing
363 message: commit message to use if committing
356
364
357 user: name of user to use if committing
365 user: name of user to use if committing
358
366
359 date: date tuple to use if committing'''
367 date: date tuple to use if committing'''
360
368
361 if not local:
369 if not local:
362 for x in self.status()[:5]:
370 for x in self.status()[:5]:
363 if '.hgtags' in x:
371 if '.hgtags' in x:
364 raise util.Abort(_('working copy of .hgtags is changed '
372 raise util.Abort(_('working copy of .hgtags is changed '
365 '(please commit .hgtags manually)'))
373 '(please commit .hgtags manually)'))
366
374
367 self.tags() # instantiate the cache
375 self.tags() # instantiate the cache
368 self._tag(names, node, message, local, user, date)
376 self._tag(names, node, message, local, user, date)
369
377
370 @propertycache
378 @propertycache
371 def _tagscache(self):
379 def _tagscache(self):
372 '''Returns a tagscache object that contains various tags related
380 '''Returns a tagscache object that contains various tags related
373 caches.'''
381 caches.'''
374
382
375 # This simplifies its cache management by having one decorated
383 # This simplifies its cache management by having one decorated
376 # function (this one) and the rest simply fetch things from it.
384 # function (this one) and the rest simply fetch things from it.
377 class tagscache(object):
385 class tagscache(object):
378 def __init__(self):
386 def __init__(self):
379 # These two define the set of tags for this repository. tags
387 # These two define the set of tags for this repository. tags
380 # maps tag name to node; tagtypes maps tag name to 'global' or
388 # maps tag name to node; tagtypes maps tag name to 'global' or
381 # 'local'. (Global tags are defined by .hgtags across all
389 # 'local'. (Global tags are defined by .hgtags across all
382 # heads, and local tags are defined in .hg/localtags.)
390 # heads, and local tags are defined in .hg/localtags.)
383 # They constitute the in-memory cache of tags.
391 # They constitute the in-memory cache of tags.
384 self.tags = self.tagtypes = None
392 self.tags = self.tagtypes = None
385
393
386 self.nodetagscache = self.tagslist = None
394 self.nodetagscache = self.tagslist = None
387
395
388 cache = tagscache()
396 cache = tagscache()
389 cache.tags, cache.tagtypes = self._findtags()
397 cache.tags, cache.tagtypes = self._findtags()
390
398
391 return cache
399 return cache
392
400
393 def tags(self):
401 def tags(self):
394 '''return a mapping of tag to node'''
402 '''return a mapping of tag to node'''
395 t = {}
403 t = {}
396 for k, v in self._tagscache.tags.iteritems():
404 for k, v in self._tagscache.tags.iteritems():
397 try:
405 try:
398 # ignore tags to unknown nodes
406 # ignore tags to unknown nodes
399 self.changelog.rev(v)
407 self.changelog.rev(v)
400 t[k] = v
408 t[k] = v
401 except (error.LookupError, ValueError):
409 except (error.LookupError, ValueError):
402 pass
410 pass
403 return t
411 return t
404
412
405 def _findtags(self):
413 def _findtags(self):
406 '''Do the hard work of finding tags. Return a pair of dicts
414 '''Do the hard work of finding tags. Return a pair of dicts
407 (tags, tagtypes) where tags maps tag name to node, and tagtypes
415 (tags, tagtypes) where tags maps tag name to node, and tagtypes
408 maps tag name to a string like \'global\' or \'local\'.
416 maps tag name to a string like \'global\' or \'local\'.
409 Subclasses or extensions are free to add their own tags, but
417 Subclasses or extensions are free to add their own tags, but
410 should be aware that the returned dicts will be retained for the
418 should be aware that the returned dicts will be retained for the
411 duration of the localrepo object.'''
419 duration of the localrepo object.'''
412
420
413 # XXX what tagtype should subclasses/extensions use? Currently
421 # XXX what tagtype should subclasses/extensions use? Currently
414 # mq and bookmarks add tags, but do not set the tagtype at all.
422 # mq and bookmarks add tags, but do not set the tagtype at all.
415 # Should each extension invent its own tag type? Should there
423 # Should each extension invent its own tag type? Should there
416 # be one tagtype for all such "virtual" tags? Or is the status
424 # be one tagtype for all such "virtual" tags? Or is the status
417 # quo fine?
425 # quo fine?
418
426
419 alltags = {} # map tag name to (node, hist)
427 alltags = {} # map tag name to (node, hist)
420 tagtypes = {}
428 tagtypes = {}
421
429
422 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
430 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
423 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
431 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
424
432
425 # Build the return dicts. Have to re-encode tag names because
433 # Build the return dicts. Have to re-encode tag names because
426 # the tags module always uses UTF-8 (in order not to lose info
434 # the tags module always uses UTF-8 (in order not to lose info
427 # writing to the cache), but the rest of Mercurial wants them in
435 # writing to the cache), but the rest of Mercurial wants them in
428 # local encoding.
436 # local encoding.
429 tags = {}
437 tags = {}
430 for (name, (node, hist)) in alltags.iteritems():
438 for (name, (node, hist)) in alltags.iteritems():
431 if node != nullid:
439 if node != nullid:
432 tags[encoding.tolocal(name)] = node
440 tags[encoding.tolocal(name)] = node
433 tags['tip'] = self.changelog.tip()
441 tags['tip'] = self.changelog.tip()
434 tagtypes = dict([(encoding.tolocal(name), value)
442 tagtypes = dict([(encoding.tolocal(name), value)
435 for (name, value) in tagtypes.iteritems()])
443 for (name, value) in tagtypes.iteritems()])
436 return (tags, tagtypes)
444 return (tags, tagtypes)
437
445
438 def tagtype(self, tagname):
446 def tagtype(self, tagname):
439 '''
447 '''
440 return the type of the given tag. result can be:
448 return the type of the given tag. result can be:
441
449
442 'local' : a local tag
450 'local' : a local tag
443 'global' : a global tag
451 'global' : a global tag
444 None : tag does not exist
452 None : tag does not exist
445 '''
453 '''
446
454
447 return self._tagscache.tagtypes.get(tagname)
455 return self._tagscache.tagtypes.get(tagname)
448
456
449 def tagslist(self):
457 def tagslist(self):
450 '''return a list of tags ordered by revision'''
458 '''return a list of tags ordered by revision'''
451 if not self._tagscache.tagslist:
459 if not self._tagscache.tagslist:
452 l = []
460 l = []
453 for t, n in self.tags().iteritems():
461 for t, n in self.tags().iteritems():
454 r = self.changelog.rev(n)
462 r = self.changelog.rev(n)
455 l.append((r, t, n))
463 l.append((r, t, n))
456 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
464 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
457
465
458 return self._tagscache.tagslist
466 return self._tagscache.tagslist
459
467
460 def nodetags(self, node):
468 def nodetags(self, node):
461 '''return the tags associated with a node'''
469 '''return the tags associated with a node'''
462 if not self._tagscache.nodetagscache:
470 if not self._tagscache.nodetagscache:
463 nodetagscache = {}
471 nodetagscache = {}
464 for t, n in self._tagscache.tags.iteritems():
472 for t, n in self._tagscache.tags.iteritems():
465 nodetagscache.setdefault(n, []).append(t)
473 nodetagscache.setdefault(n, []).append(t)
466 for tags in nodetagscache.itervalues():
474 for tags in nodetagscache.itervalues():
467 tags.sort()
475 tags.sort()
468 self._tagscache.nodetagscache = nodetagscache
476 self._tagscache.nodetagscache = nodetagscache
469 return self._tagscache.nodetagscache.get(node, [])
477 return self._tagscache.nodetagscache.get(node, [])
470
478
471 def nodebookmarks(self, node):
479 def nodebookmarks(self, node):
472 marks = []
480 marks = []
473 for bookmark, n in self._bookmarks.iteritems():
481 for bookmark, n in self._bookmarks.iteritems():
474 if n == node:
482 if n == node:
475 marks.append(bookmark)
483 marks.append(bookmark)
476 return sorted(marks)
484 return sorted(marks)
477
485
478 def _branchtags(self, partial, lrev):
486 def _branchtags(self, partial, lrev):
479 # TODO: rename this function?
487 # TODO: rename this function?
480 tiprev = len(self) - 1
488 tiprev = len(self) - 1
481 if lrev != tiprev:
489 if lrev != tiprev:
482 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
490 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
483 self._updatebranchcache(partial, ctxgen)
491 self._updatebranchcache(partial, ctxgen)
484 self._writebranchcache(partial, self.changelog.tip(), tiprev)
492 self._writebranchcache(partial, self.changelog.tip(), tiprev)
485
493
486 return partial
494 return partial
487
495
488 def updatebranchcache(self):
496 def updatebranchcache(self):
489 tip = self.changelog.tip()
497 tip = self.changelog.tip()
490 if self._branchcache is not None and self._branchcachetip == tip:
498 if self._branchcache is not None and self._branchcachetip == tip:
491 return
499 return
492
500
493 oldtip = self._branchcachetip
501 oldtip = self._branchcachetip
494 self._branchcachetip = tip
502 self._branchcachetip = tip
495 if oldtip is None or oldtip not in self.changelog.nodemap:
503 if oldtip is None or oldtip not in self.changelog.nodemap:
496 partial, last, lrev = self._readbranchcache()
504 partial, last, lrev = self._readbranchcache()
497 else:
505 else:
498 lrev = self.changelog.rev(oldtip)
506 lrev = self.changelog.rev(oldtip)
499 partial = self._branchcache
507 partial = self._branchcache
500
508
501 self._branchtags(partial, lrev)
509 self._branchtags(partial, lrev)
502 # this private cache holds all heads (not just the branch tips)
510 # this private cache holds all heads (not just the branch tips)
503 self._branchcache = partial
511 self._branchcache = partial
504
512
505 def branchmap(self):
513 def branchmap(self):
506 '''returns a dictionary {branch: [branchheads]}'''
514 '''returns a dictionary {branch: [branchheads]}'''
507 self.updatebranchcache()
515 self.updatebranchcache()
508 return self._branchcache
516 return self._branchcache
509
517
510 def _branchtip(self, heads):
518 def _branchtip(self, heads):
511 '''return the tipmost branch head in heads'''
519 '''return the tipmost branch head in heads'''
512 tip = heads[-1]
520 tip = heads[-1]
513 for h in reversed(heads):
521 for h in reversed(heads):
514 if not self[h].closesbranch():
522 if not self[h].closesbranch():
515 tip = h
523 tip = h
516 break
524 break
517 return tip
525 return tip
518
526
519 def branchtip(self, branch):
527 def branchtip(self, branch):
520 '''return the tip node for a given branch'''
528 '''return the tip node for a given branch'''
521 if branch not in self.branchmap():
529 if branch not in self.branchmap():
522 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
530 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
523 return self._branchtip(self.branchmap()[branch])
531 return self._branchtip(self.branchmap()[branch])
524
532
525 def branchtags(self):
533 def branchtags(self):
526 '''return a dict where branch names map to the tipmost head of
534 '''return a dict where branch names map to the tipmost head of
527 the branch, open heads come before closed'''
535 the branch, open heads come before closed'''
528 bt = {}
536 bt = {}
529 for bn, heads in self.branchmap().iteritems():
537 for bn, heads in self.branchmap().iteritems():
530 bt[bn] = self._branchtip(heads)
538 bt[bn] = self._branchtip(heads)
531 return bt
539 return bt
532
540
533 def _readbranchcache(self):
541 def _readbranchcache(self):
534 partial = {}
542 partial = {}
535 try:
543 try:
536 f = self.opener("cache/branchheads")
544 f = self.opener("cache/branchheads")
537 lines = f.read().split('\n')
545 lines = f.read().split('\n')
538 f.close()
546 f.close()
539 except (IOError, OSError):
547 except (IOError, OSError):
540 return {}, nullid, nullrev
548 return {}, nullid, nullrev
541
549
542 try:
550 try:
543 last, lrev = lines.pop(0).split(" ", 1)
551 last, lrev = lines.pop(0).split(" ", 1)
544 last, lrev = bin(last), int(lrev)
552 last, lrev = bin(last), int(lrev)
545 if lrev >= len(self) or self[lrev].node() != last:
553 if lrev >= len(self) or self[lrev].node() != last:
546 # invalidate the cache
554 # invalidate the cache
547 raise ValueError('invalidating branch cache (tip differs)')
555 raise ValueError('invalidating branch cache (tip differs)')
548 for l in lines:
556 for l in lines:
549 if not l:
557 if not l:
550 continue
558 continue
551 node, label = l.split(" ", 1)
559 node, label = l.split(" ", 1)
552 label = encoding.tolocal(label.strip())
560 label = encoding.tolocal(label.strip())
553 if not node in self:
561 if not node in self:
554 raise ValueError('invalidating branch cache because node '+
562 raise ValueError('invalidating branch cache because node '+
555 '%s does not exist' % node)
563 '%s does not exist' % node)
556 partial.setdefault(label, []).append(bin(node))
564 partial.setdefault(label, []).append(bin(node))
557 except KeyboardInterrupt:
565 except KeyboardInterrupt:
558 raise
566 raise
559 except Exception, inst:
567 except Exception, inst:
560 if self.ui.debugflag:
568 if self.ui.debugflag:
561 self.ui.warn(str(inst), '\n')
569 self.ui.warn(str(inst), '\n')
562 partial, last, lrev = {}, nullid, nullrev
570 partial, last, lrev = {}, nullid, nullrev
563 return partial, last, lrev
571 return partial, last, lrev
564
572
565 def _writebranchcache(self, branches, tip, tiprev):
573 def _writebranchcache(self, branches, tip, tiprev):
566 try:
574 try:
567 f = self.opener("cache/branchheads", "w", atomictemp=True)
575 f = self.opener("cache/branchheads", "w", atomictemp=True)
568 f.write("%s %s\n" % (hex(tip), tiprev))
576 f.write("%s %s\n" % (hex(tip), tiprev))
569 for label, nodes in branches.iteritems():
577 for label, nodes in branches.iteritems():
570 for node in nodes:
578 for node in nodes:
571 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
579 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
572 f.close()
580 f.close()
573 except (IOError, OSError):
581 except (IOError, OSError):
574 pass
582 pass
575
583
576 def _updatebranchcache(self, partial, ctxgen):
584 def _updatebranchcache(self, partial, ctxgen):
577 """Given a branchhead cache, partial, that may have extra nodes or be
585 """Given a branchhead cache, partial, that may have extra nodes or be
578 missing heads, and a generator of nodes that are at least a superset of
586 missing heads, and a generator of nodes that are at least a superset of
579 heads missing, this function updates partial to be correct.
587 heads missing, this function updates partial to be correct.
580 """
588 """
581 # collect new branch entries
589 # collect new branch entries
582 newbranches = {}
590 newbranches = {}
583 for c in ctxgen:
591 for c in ctxgen:
584 newbranches.setdefault(c.branch(), []).append(c.node())
592 newbranches.setdefault(c.branch(), []).append(c.node())
585 # if older branchheads are reachable from new ones, they aren't
593 # if older branchheads are reachable from new ones, they aren't
586 # really branchheads. Note checking parents is insufficient:
594 # really branchheads. Note checking parents is insufficient:
587 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
595 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
588 for branch, newnodes in newbranches.iteritems():
596 for branch, newnodes in newbranches.iteritems():
589 bheads = partial.setdefault(branch, [])
597 bheads = partial.setdefault(branch, [])
590 # Remove candidate heads that no longer are in the repo (e.g., as
598 # Remove candidate heads that no longer are in the repo (e.g., as
591 # the result of a strip that just happened). Avoid using 'node in
599 # the result of a strip that just happened). Avoid using 'node in
592 # self' here because that dives down into branchcache code somewhat
600 # self' here because that dives down into branchcache code somewhat
593 # recrusively.
601 # recrusively.
594 bheadrevs = [self.changelog.rev(node) for node in bheads
602 bheadrevs = [self.changelog.rev(node) for node in bheads
595 if self.changelog.hasnode(node)]
603 if self.changelog.hasnode(node)]
596 newheadrevs = [self.changelog.rev(node) for node in newnodes
604 newheadrevs = [self.changelog.rev(node) for node in newnodes
597 if self.changelog.hasnode(node)]
605 if self.changelog.hasnode(node)]
598 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
606 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
599 # Remove duplicates - nodes that are in newheadrevs and are already
607 # Remove duplicates - nodes that are in newheadrevs and are already
600 # in bheadrevs. This can happen if you strip a node whose parent
608 # in bheadrevs. This can happen if you strip a node whose parent
601 # was already a head (because they're on different branches).
609 # was already a head (because they're on different branches).
602 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
610 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
603
611
604 # Starting from tip means fewer passes over reachable. If we know
612 # Starting from tip means fewer passes over reachable. If we know
605 # the new candidates are not ancestors of existing heads, we don't
613 # the new candidates are not ancestors of existing heads, we don't
606 # have to examine ancestors of existing heads
614 # have to examine ancestors of existing heads
607 if ctxisnew:
615 if ctxisnew:
608 iterrevs = sorted(newheadrevs)
616 iterrevs = sorted(newheadrevs)
609 else:
617 else:
610 iterrevs = list(bheadrevs)
618 iterrevs = list(bheadrevs)
611
619
612 # This loop prunes out two kinds of heads - heads that are
620 # This loop prunes out two kinds of heads - heads that are
613 # superceded by a head in newheadrevs, and newheadrevs that are not
621 # superceded by a head in newheadrevs, and newheadrevs that are not
614 # heads because an existing head is their descendant.
622 # heads because an existing head is their descendant.
615 while iterrevs:
623 while iterrevs:
616 latest = iterrevs.pop()
624 latest = iterrevs.pop()
617 if latest not in bheadrevs:
625 if latest not in bheadrevs:
618 continue
626 continue
619 ancestors = set(self.changelog.ancestors([latest],
627 ancestors = set(self.changelog.ancestors([latest],
620 bheadrevs[0]))
628 bheadrevs[0]))
621 if ancestors:
629 if ancestors:
622 bheadrevs = [b for b in bheadrevs if b not in ancestors]
630 bheadrevs = [b for b in bheadrevs if b not in ancestors]
623 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
631 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
624
632
625 # There may be branches that cease to exist when the last commit in the
633 # There may be branches that cease to exist when the last commit in the
626 # branch was stripped. This code filters them out. Note that the
634 # branch was stripped. This code filters them out. Note that the
627 # branch that ceased to exist may not be in newbranches because
635 # branch that ceased to exist may not be in newbranches because
628 # newbranches is the set of candidate heads, which when you strip the
636 # newbranches is the set of candidate heads, which when you strip the
629 # last commit in a branch will be the parent branch.
637 # last commit in a branch will be the parent branch.
630 for branch in partial:
638 for branch in partial:
631 nodes = [head for head in partial[branch]
639 nodes = [head for head in partial[branch]
632 if self.changelog.hasnode(head)]
640 if self.changelog.hasnode(head)]
633 if not nodes:
641 if not nodes:
634 del partial[branch]
642 del partial[branch]
635
643
636 def lookup(self, key):
644 def lookup(self, key):
637 return self[key].node()
645 return self[key].node()
638
646
639 def lookupbranch(self, key, remote=None):
647 def lookupbranch(self, key, remote=None):
640 repo = remote or self
648 repo = remote or self
641 if key in repo.branchmap():
649 if key in repo.branchmap():
642 return key
650 return key
643
651
644 repo = (remote and remote.local()) and remote or self
652 repo = (remote and remote.local()) and remote or self
645 return repo[key].branch()
653 return repo[key].branch()
646
654
647 def known(self, nodes):
655 def known(self, nodes):
648 nm = self.changelog.nodemap
656 nm = self.changelog.nodemap
649 pc = self._phasecache
657 pc = self._phasecache
650 result = []
658 result = []
651 for n in nodes:
659 for n in nodes:
652 r = nm.get(n)
660 r = nm.get(n)
653 resp = not (r is None or pc.phase(self, r) >= phases.secret)
661 resp = not (r is None or pc.phase(self, r) >= phases.secret)
654 result.append(resp)
662 result.append(resp)
655 return result
663 return result
656
664
657 def local(self):
665 def local(self):
658 return self
666 return self
659
667
660 def join(self, f):
668 def join(self, f):
661 return os.path.join(self.path, f)
669 return os.path.join(self.path, f)
662
670
663 def wjoin(self, f):
671 def wjoin(self, f):
664 return os.path.join(self.root, f)
672 return os.path.join(self.root, f)
665
673
666 def file(self, f):
674 def file(self, f):
667 if f[0] == '/':
675 if f[0] == '/':
668 f = f[1:]
676 f = f[1:]
669 return filelog.filelog(self.sopener, f)
677 return filelog.filelog(self.sopener, f)
670
678
671 def changectx(self, changeid):
679 def changectx(self, changeid):
672 return self[changeid]
680 return self[changeid]
673
681
674 def parents(self, changeid=None):
682 def parents(self, changeid=None):
675 '''get list of changectxs for parents of changeid'''
683 '''get list of changectxs for parents of changeid'''
676 return self[changeid].parents()
684 return self[changeid].parents()
677
685
678 def setparents(self, p1, p2=nullid):
686 def setparents(self, p1, p2=nullid):
679 copies = self.dirstate.setparents(p1, p2)
687 copies = self.dirstate.setparents(p1, p2)
680 if copies:
688 if copies:
681 # Adjust copy records, the dirstate cannot do it, it
689 # Adjust copy records, the dirstate cannot do it, it
682 # requires access to parents manifests. Preserve them
690 # requires access to parents manifests. Preserve them
683 # only for entries added to first parent.
691 # only for entries added to first parent.
684 pctx = self[p1]
692 pctx = self[p1]
685 for f in copies:
693 for f in copies:
686 if f not in pctx and copies[f] in pctx:
694 if f not in pctx and copies[f] in pctx:
687 self.dirstate.copy(copies[f], f)
695 self.dirstate.copy(copies[f], f)
688
696
689 def filectx(self, path, changeid=None, fileid=None):
697 def filectx(self, path, changeid=None, fileid=None):
690 """changeid can be a changeset revision, node, or tag.
698 """changeid can be a changeset revision, node, or tag.
691 fileid can be a file revision or node."""
699 fileid can be a file revision or node."""
692 return context.filectx(self, path, changeid, fileid)
700 return context.filectx(self, path, changeid, fileid)
693
701
694 def getcwd(self):
702 def getcwd(self):
695 return self.dirstate.getcwd()
703 return self.dirstate.getcwd()
696
704
697 def pathto(self, f, cwd=None):
705 def pathto(self, f, cwd=None):
698 return self.dirstate.pathto(f, cwd)
706 return self.dirstate.pathto(f, cwd)
699
707
700 def wfile(self, f, mode='r'):
708 def wfile(self, f, mode='r'):
701 return self.wopener(f, mode)
709 return self.wopener(f, mode)
702
710
703 def _link(self, f):
711 def _link(self, f):
704 return os.path.islink(self.wjoin(f))
712 return os.path.islink(self.wjoin(f))
705
713
706 def _loadfilter(self, filter):
714 def _loadfilter(self, filter):
707 if filter not in self.filterpats:
715 if filter not in self.filterpats:
708 l = []
716 l = []
709 for pat, cmd in self.ui.configitems(filter):
717 for pat, cmd in self.ui.configitems(filter):
710 if cmd == '!':
718 if cmd == '!':
711 continue
719 continue
712 mf = matchmod.match(self.root, '', [pat])
720 mf = matchmod.match(self.root, '', [pat])
713 fn = None
721 fn = None
714 params = cmd
722 params = cmd
715 for name, filterfn in self._datafilters.iteritems():
723 for name, filterfn in self._datafilters.iteritems():
716 if cmd.startswith(name):
724 if cmd.startswith(name):
717 fn = filterfn
725 fn = filterfn
718 params = cmd[len(name):].lstrip()
726 params = cmd[len(name):].lstrip()
719 break
727 break
720 if not fn:
728 if not fn:
721 fn = lambda s, c, **kwargs: util.filter(s, c)
729 fn = lambda s, c, **kwargs: util.filter(s, c)
722 # Wrap old filters not supporting keyword arguments
730 # Wrap old filters not supporting keyword arguments
723 if not inspect.getargspec(fn)[2]:
731 if not inspect.getargspec(fn)[2]:
724 oldfn = fn
732 oldfn = fn
725 fn = lambda s, c, **kwargs: oldfn(s, c)
733 fn = lambda s, c, **kwargs: oldfn(s, c)
726 l.append((mf, fn, params))
734 l.append((mf, fn, params))
727 self.filterpats[filter] = l
735 self.filterpats[filter] = l
728 return self.filterpats[filter]
736 return self.filterpats[filter]
729
737
730 def _filter(self, filterpats, filename, data):
738 def _filter(self, filterpats, filename, data):
731 for mf, fn, cmd in filterpats:
739 for mf, fn, cmd in filterpats:
732 if mf(filename):
740 if mf(filename):
733 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
741 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
734 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
742 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
735 break
743 break
736
744
737 return data
745 return data
738
746
739 @propertycache
747 @propertycache
740 def _encodefilterpats(self):
748 def _encodefilterpats(self):
741 return self._loadfilter('encode')
749 return self._loadfilter('encode')
742
750
743 @propertycache
751 @propertycache
744 def _decodefilterpats(self):
752 def _decodefilterpats(self):
745 return self._loadfilter('decode')
753 return self._loadfilter('decode')
746
754
747 def adddatafilter(self, name, filter):
755 def adddatafilter(self, name, filter):
748 self._datafilters[name] = filter
756 self._datafilters[name] = filter
749
757
750 def wread(self, filename):
758 def wread(self, filename):
751 if self._link(filename):
759 if self._link(filename):
752 data = os.readlink(self.wjoin(filename))
760 data = os.readlink(self.wjoin(filename))
753 else:
761 else:
754 data = self.wopener.read(filename)
762 data = self.wopener.read(filename)
755 return self._filter(self._encodefilterpats, filename, data)
763 return self._filter(self._encodefilterpats, filename, data)
756
764
757 def wwrite(self, filename, data, flags):
765 def wwrite(self, filename, data, flags):
758 data = self._filter(self._decodefilterpats, filename, data)
766 data = self._filter(self._decodefilterpats, filename, data)
759 if 'l' in flags:
767 if 'l' in flags:
760 self.wopener.symlink(data, filename)
768 self.wopener.symlink(data, filename)
761 else:
769 else:
762 self.wopener.write(filename, data)
770 self.wopener.write(filename, data)
763 if 'x' in flags:
771 if 'x' in flags:
764 util.setflags(self.wjoin(filename), False, True)
772 util.setflags(self.wjoin(filename), False, True)
765
773
766 def wwritedata(self, filename, data):
774 def wwritedata(self, filename, data):
767 return self._filter(self._decodefilterpats, filename, data)
775 return self._filter(self._decodefilterpats, filename, data)
768
776
769 def transaction(self, desc):
777 def transaction(self, desc):
770 tr = self._transref and self._transref() or None
778 tr = self._transref and self._transref() or None
771 if tr and tr.running():
779 if tr and tr.running():
772 return tr.nest()
780 return tr.nest()
773
781
774 # abort here if the journal already exists
782 # abort here if the journal already exists
775 if os.path.exists(self.sjoin("journal")):
783 if os.path.exists(self.sjoin("journal")):
776 raise error.RepoError(
784 raise error.RepoError(
777 _("abandoned transaction found - run hg recover"))
785 _("abandoned transaction found - run hg recover"))
778
786
779 self._writejournal(desc)
787 self._writejournal(desc)
780 renames = [(x, undoname(x)) for x in self._journalfiles()]
788 renames = [(x, undoname(x)) for x in self._journalfiles()]
781
789
782 tr = transaction.transaction(self.ui.warn, self.sopener,
790 tr = transaction.transaction(self.ui.warn, self.sopener,
783 self.sjoin("journal"),
791 self.sjoin("journal"),
784 aftertrans(renames),
792 aftertrans(renames),
785 self.store.createmode)
793 self.store.createmode)
786 self._transref = weakref.ref(tr)
794 self._transref = weakref.ref(tr)
787 return tr
795 return tr
788
796
789 def _journalfiles(self):
797 def _journalfiles(self):
790 return (self.sjoin('journal'), self.join('journal.dirstate'),
798 return (self.sjoin('journal'), self.join('journal.dirstate'),
791 self.join('journal.branch'), self.join('journal.desc'),
799 self.join('journal.branch'), self.join('journal.desc'),
792 self.join('journal.bookmarks'),
800 self.join('journal.bookmarks'),
793 self.sjoin('journal.phaseroots'))
801 self.sjoin('journal.phaseroots'))
794
802
795 def undofiles(self):
803 def undofiles(self):
796 return [undoname(x) for x in self._journalfiles()]
804 return [undoname(x) for x in self._journalfiles()]
797
805
798 def _writejournal(self, desc):
806 def _writejournal(self, desc):
799 self.opener.write("journal.dirstate",
807 self.opener.write("journal.dirstate",
800 self.opener.tryread("dirstate"))
808 self.opener.tryread("dirstate"))
801 self.opener.write("journal.branch",
809 self.opener.write("journal.branch",
802 encoding.fromlocal(self.dirstate.branch()))
810 encoding.fromlocal(self.dirstate.branch()))
803 self.opener.write("journal.desc",
811 self.opener.write("journal.desc",
804 "%d\n%s\n" % (len(self), desc))
812 "%d\n%s\n" % (len(self), desc))
805 self.opener.write("journal.bookmarks",
813 self.opener.write("journal.bookmarks",
806 self.opener.tryread("bookmarks"))
814 self.opener.tryread("bookmarks"))
807 self.sopener.write("journal.phaseroots",
815 self.sopener.write("journal.phaseroots",
808 self.sopener.tryread("phaseroots"))
816 self.sopener.tryread("phaseroots"))
809
817
810 def recover(self):
818 def recover(self):
811 lock = self.lock()
819 lock = self.lock()
812 try:
820 try:
813 if os.path.exists(self.sjoin("journal")):
821 if os.path.exists(self.sjoin("journal")):
814 self.ui.status(_("rolling back interrupted transaction\n"))
822 self.ui.status(_("rolling back interrupted transaction\n"))
815 transaction.rollback(self.sopener, self.sjoin("journal"),
823 transaction.rollback(self.sopener, self.sjoin("journal"),
816 self.ui.warn)
824 self.ui.warn)
817 self.invalidate()
825 self.invalidate()
818 return True
826 return True
819 else:
827 else:
820 self.ui.warn(_("no interrupted transaction available\n"))
828 self.ui.warn(_("no interrupted transaction available\n"))
821 return False
829 return False
822 finally:
830 finally:
823 lock.release()
831 lock.release()
824
832
825 def rollback(self, dryrun=False, force=False):
833 def rollback(self, dryrun=False, force=False):
826 wlock = lock = None
834 wlock = lock = None
827 try:
835 try:
828 wlock = self.wlock()
836 wlock = self.wlock()
829 lock = self.lock()
837 lock = self.lock()
830 if os.path.exists(self.sjoin("undo")):
838 if os.path.exists(self.sjoin("undo")):
831 return self._rollback(dryrun, force)
839 return self._rollback(dryrun, force)
832 else:
840 else:
833 self.ui.warn(_("no rollback information available\n"))
841 self.ui.warn(_("no rollback information available\n"))
834 return 1
842 return 1
835 finally:
843 finally:
836 release(lock, wlock)
844 release(lock, wlock)
837
845
838 def _rollback(self, dryrun, force):
846 def _rollback(self, dryrun, force):
839 ui = self.ui
847 ui = self.ui
840 try:
848 try:
841 args = self.opener.read('undo.desc').splitlines()
849 args = self.opener.read('undo.desc').splitlines()
842 (oldlen, desc, detail) = (int(args[0]), args[1], None)
850 (oldlen, desc, detail) = (int(args[0]), args[1], None)
843 if len(args) >= 3:
851 if len(args) >= 3:
844 detail = args[2]
852 detail = args[2]
845 oldtip = oldlen - 1
853 oldtip = oldlen - 1
846
854
847 if detail and ui.verbose:
855 if detail and ui.verbose:
848 msg = (_('repository tip rolled back to revision %s'
856 msg = (_('repository tip rolled back to revision %s'
849 ' (undo %s: %s)\n')
857 ' (undo %s: %s)\n')
850 % (oldtip, desc, detail))
858 % (oldtip, desc, detail))
851 else:
859 else:
852 msg = (_('repository tip rolled back to revision %s'
860 msg = (_('repository tip rolled back to revision %s'
853 ' (undo %s)\n')
861 ' (undo %s)\n')
854 % (oldtip, desc))
862 % (oldtip, desc))
855 except IOError:
863 except IOError:
856 msg = _('rolling back unknown transaction\n')
864 msg = _('rolling back unknown transaction\n')
857 desc = None
865 desc = None
858
866
859 if not force and self['.'] != self['tip'] and desc == 'commit':
867 if not force and self['.'] != self['tip'] and desc == 'commit':
860 raise util.Abort(
868 raise util.Abort(
861 _('rollback of last commit while not checked out '
869 _('rollback of last commit while not checked out '
862 'may lose data'), hint=_('use -f to force'))
870 'may lose data'), hint=_('use -f to force'))
863
871
864 ui.status(msg)
872 ui.status(msg)
865 if dryrun:
873 if dryrun:
866 return 0
874 return 0
867
875
868 parents = self.dirstate.parents()
876 parents = self.dirstate.parents()
869 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
877 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
870 if os.path.exists(self.join('undo.bookmarks')):
878 if os.path.exists(self.join('undo.bookmarks')):
871 util.rename(self.join('undo.bookmarks'),
879 util.rename(self.join('undo.bookmarks'),
872 self.join('bookmarks'))
880 self.join('bookmarks'))
873 if os.path.exists(self.sjoin('undo.phaseroots')):
881 if os.path.exists(self.sjoin('undo.phaseroots')):
874 util.rename(self.sjoin('undo.phaseroots'),
882 util.rename(self.sjoin('undo.phaseroots'),
875 self.sjoin('phaseroots'))
883 self.sjoin('phaseroots'))
876 self.invalidate()
884 self.invalidate()
877
885
878 parentgone = (parents[0] not in self.changelog.nodemap or
886 parentgone = (parents[0] not in self.changelog.nodemap or
879 parents[1] not in self.changelog.nodemap)
887 parents[1] not in self.changelog.nodemap)
880 if parentgone:
888 if parentgone:
881 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
889 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
882 try:
890 try:
883 branch = self.opener.read('undo.branch')
891 branch = self.opener.read('undo.branch')
884 self.dirstate.setbranch(branch)
892 self.dirstate.setbranch(branch)
885 except IOError:
893 except IOError:
886 ui.warn(_('named branch could not be reset: '
894 ui.warn(_('named branch could not be reset: '
887 'current branch is still \'%s\'\n')
895 'current branch is still \'%s\'\n')
888 % self.dirstate.branch())
896 % self.dirstate.branch())
889
897
890 self.dirstate.invalidate()
898 self.dirstate.invalidate()
891 parents = tuple([p.rev() for p in self.parents()])
899 parents = tuple([p.rev() for p in self.parents()])
892 if len(parents) > 1:
900 if len(parents) > 1:
893 ui.status(_('working directory now based on '
901 ui.status(_('working directory now based on '
894 'revisions %d and %d\n') % parents)
902 'revisions %d and %d\n') % parents)
895 else:
903 else:
896 ui.status(_('working directory now based on '
904 ui.status(_('working directory now based on '
897 'revision %d\n') % parents)
905 'revision %d\n') % parents)
898 # TODO: if we know which new heads may result from this rollback, pass
906 # TODO: if we know which new heads may result from this rollback, pass
899 # them to destroy(), which will prevent the branchhead cache from being
907 # them to destroy(), which will prevent the branchhead cache from being
900 # invalidated.
908 # invalidated.
901 self.destroyed()
909 self.destroyed()
902 return 0
910 return 0
903
911
904 def invalidatecaches(self):
912 def invalidatecaches(self):
905 def delcache(name):
913 def delcache(name):
906 try:
914 try:
907 delattr(self, name)
915 delattr(self, name)
908 except AttributeError:
916 except AttributeError:
909 pass
917 pass
910
918
911 delcache('_tagscache')
919 delcache('_tagscache')
912
920
913 self._branchcache = None # in UTF-8
921 self._branchcache = None # in UTF-8
914 self._branchcachetip = None
922 self._branchcachetip = None
915
923
916 def invalidatedirstate(self):
924 def invalidatedirstate(self):
917 '''Invalidates the dirstate, causing the next call to dirstate
925 '''Invalidates the dirstate, causing the next call to dirstate
918 to check if it was modified since the last time it was read,
926 to check if it was modified since the last time it was read,
919 rereading it if it has.
927 rereading it if it has.
920
928
921 This is different to dirstate.invalidate() that it doesn't always
929 This is different to dirstate.invalidate() that it doesn't always
922 rereads the dirstate. Use dirstate.invalidate() if you want to
930 rereads the dirstate. Use dirstate.invalidate() if you want to
923 explicitly read the dirstate again (i.e. restoring it to a previous
931 explicitly read the dirstate again (i.e. restoring it to a previous
924 known good state).'''
932 known good state).'''
925 if 'dirstate' in self.__dict__:
933 if 'dirstate' in self.__dict__:
926 for k in self.dirstate._filecache:
934 for k in self.dirstate._filecache:
927 try:
935 try:
928 delattr(self.dirstate, k)
936 delattr(self.dirstate, k)
929 except AttributeError:
937 except AttributeError:
930 pass
938 pass
931 delattr(self, 'dirstate')
939 delattr(self, 'dirstate')
932
940
933 def invalidate(self):
941 def invalidate(self):
934 for k in self._filecache:
942 for k in self._filecache:
935 # dirstate is invalidated separately in invalidatedirstate()
943 # dirstate is invalidated separately in invalidatedirstate()
936 if k == 'dirstate':
944 if k == 'dirstate':
937 continue
945 continue
938
946
939 try:
947 try:
940 delattr(self, k)
948 delattr(self, k)
941 except AttributeError:
949 except AttributeError:
942 pass
950 pass
943 self.invalidatecaches()
951 self.invalidatecaches()
944
952
945 # Discard all cache entries to force reloading everything.
953 # Discard all cache entries to force reloading everything.
946 self._filecache.clear()
954 self._filecache.clear()
947
955
948 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
956 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
949 try:
957 try:
950 l = lock.lock(lockname, 0, releasefn, desc=desc)
958 l = lock.lock(lockname, 0, releasefn, desc=desc)
951 except error.LockHeld, inst:
959 except error.LockHeld, inst:
952 if not wait:
960 if not wait:
953 raise
961 raise
954 self.ui.warn(_("waiting for lock on %s held by %r\n") %
962 self.ui.warn(_("waiting for lock on %s held by %r\n") %
955 (desc, inst.locker))
963 (desc, inst.locker))
956 # default to 600 seconds timeout
964 # default to 600 seconds timeout
957 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
965 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
958 releasefn, desc=desc)
966 releasefn, desc=desc)
959 if acquirefn:
967 if acquirefn:
960 acquirefn()
968 acquirefn()
961 return l
969 return l
962
970
963 def _afterlock(self, callback):
971 def _afterlock(self, callback):
964 """add a callback to the current repository lock.
972 """add a callback to the current repository lock.
965
973
966 The callback will be executed on lock release."""
974 The callback will be executed on lock release."""
967 l = self._lockref and self._lockref()
975 l = self._lockref and self._lockref()
968 if l:
976 if l:
969 l.postrelease.append(callback)
977 l.postrelease.append(callback)
970 else:
978 else:
971 callback()
979 callback()
972
980
973 def lock(self, wait=True):
981 def lock(self, wait=True):
974 '''Lock the repository store (.hg/store) and return a weak reference
982 '''Lock the repository store (.hg/store) and return a weak reference
975 to the lock. Use this before modifying the store (e.g. committing or
983 to the lock. Use this before modifying the store (e.g. committing or
976 stripping). If you are opening a transaction, get a lock as well.)'''
984 stripping). If you are opening a transaction, get a lock as well.)'''
977 l = self._lockref and self._lockref()
985 l = self._lockref and self._lockref()
978 if l is not None and l.held:
986 if l is not None and l.held:
979 l.lock()
987 l.lock()
980 return l
988 return l
981
989
982 def unlock():
990 def unlock():
983 self.store.write()
991 self.store.write()
984 if '_phasecache' in vars(self):
992 if '_phasecache' in vars(self):
985 self._phasecache.write()
993 self._phasecache.write()
994 if 'obsstore' in vars(self) and self.obsstore._new:
995 # XXX: transaction logic should be used here. But for
996 # now rewriting the whole file is good enough.
997 f = self.sopener('obsstore', 'wb', atomictemp=True)
998 try:
999 self.obsstore.flushmarkers(f)
1000 f.close()
1001 except: # re-raises
1002 f.discard()
1003 raise
986 for k, ce in self._filecache.items():
1004 for k, ce in self._filecache.items():
987 if k == 'dirstate':
1005 if k == 'dirstate':
988 continue
1006 continue
989 ce.refresh()
1007 ce.refresh()
990
1008
991 l = self._lock(self.sjoin("lock"), wait, unlock,
1009 l = self._lock(self.sjoin("lock"), wait, unlock,
992 self.invalidate, _('repository %s') % self.origroot)
1010 self.invalidate, _('repository %s') % self.origroot)
993 self._lockref = weakref.ref(l)
1011 self._lockref = weakref.ref(l)
994 return l
1012 return l
995
1013
996 def wlock(self, wait=True):
1014 def wlock(self, wait=True):
997 '''Lock the non-store parts of the repository (everything under
1015 '''Lock the non-store parts of the repository (everything under
998 .hg except .hg/store) and return a weak reference to the lock.
1016 .hg except .hg/store) and return a weak reference to the lock.
999 Use this before modifying files in .hg.'''
1017 Use this before modifying files in .hg.'''
1000 l = self._wlockref and self._wlockref()
1018 l = self._wlockref and self._wlockref()
1001 if l is not None and l.held:
1019 if l is not None and l.held:
1002 l.lock()
1020 l.lock()
1003 return l
1021 return l
1004
1022
1005 def unlock():
1023 def unlock():
1006 self.dirstate.write()
1024 self.dirstate.write()
1007 ce = self._filecache.get('dirstate')
1025 ce = self._filecache.get('dirstate')
1008 if ce:
1026 if ce:
1009 ce.refresh()
1027 ce.refresh()
1010
1028
1011 l = self._lock(self.join("wlock"), wait, unlock,
1029 l = self._lock(self.join("wlock"), wait, unlock,
1012 self.invalidatedirstate, _('working directory of %s') %
1030 self.invalidatedirstate, _('working directory of %s') %
1013 self.origroot)
1031 self.origroot)
1014 self._wlockref = weakref.ref(l)
1032 self._wlockref = weakref.ref(l)
1015 return l
1033 return l
1016
1034
1017 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1035 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1018 """
1036 """
1019 commit an individual file as part of a larger transaction
1037 commit an individual file as part of a larger transaction
1020 """
1038 """
1021
1039
1022 fname = fctx.path()
1040 fname = fctx.path()
1023 text = fctx.data()
1041 text = fctx.data()
1024 flog = self.file(fname)
1042 flog = self.file(fname)
1025 fparent1 = manifest1.get(fname, nullid)
1043 fparent1 = manifest1.get(fname, nullid)
1026 fparent2 = fparent2o = manifest2.get(fname, nullid)
1044 fparent2 = fparent2o = manifest2.get(fname, nullid)
1027
1045
1028 meta = {}
1046 meta = {}
1029 copy = fctx.renamed()
1047 copy = fctx.renamed()
1030 if copy and copy[0] != fname:
1048 if copy and copy[0] != fname:
1031 # Mark the new revision of this file as a copy of another
1049 # Mark the new revision of this file as a copy of another
1032 # file. This copy data will effectively act as a parent
1050 # file. This copy data will effectively act as a parent
1033 # of this new revision. If this is a merge, the first
1051 # of this new revision. If this is a merge, the first
1034 # parent will be the nullid (meaning "look up the copy data")
1052 # parent will be the nullid (meaning "look up the copy data")
1035 # and the second one will be the other parent. For example:
1053 # and the second one will be the other parent. For example:
1036 #
1054 #
1037 # 0 --- 1 --- 3 rev1 changes file foo
1055 # 0 --- 1 --- 3 rev1 changes file foo
1038 # \ / rev2 renames foo to bar and changes it
1056 # \ / rev2 renames foo to bar and changes it
1039 # \- 2 -/ rev3 should have bar with all changes and
1057 # \- 2 -/ rev3 should have bar with all changes and
1040 # should record that bar descends from
1058 # should record that bar descends from
1041 # bar in rev2 and foo in rev1
1059 # bar in rev2 and foo in rev1
1042 #
1060 #
1043 # this allows this merge to succeed:
1061 # this allows this merge to succeed:
1044 #
1062 #
1045 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1063 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1046 # \ / merging rev3 and rev4 should use bar@rev2
1064 # \ / merging rev3 and rev4 should use bar@rev2
1047 # \- 2 --- 4 as the merge base
1065 # \- 2 --- 4 as the merge base
1048 #
1066 #
1049
1067
1050 cfname = copy[0]
1068 cfname = copy[0]
1051 crev = manifest1.get(cfname)
1069 crev = manifest1.get(cfname)
1052 newfparent = fparent2
1070 newfparent = fparent2
1053
1071
1054 if manifest2: # branch merge
1072 if manifest2: # branch merge
1055 if fparent2 == nullid or crev is None: # copied on remote side
1073 if fparent2 == nullid or crev is None: # copied on remote side
1056 if cfname in manifest2:
1074 if cfname in manifest2:
1057 crev = manifest2[cfname]
1075 crev = manifest2[cfname]
1058 newfparent = fparent1
1076 newfparent = fparent1
1059
1077
1060 # find source in nearest ancestor if we've lost track
1078 # find source in nearest ancestor if we've lost track
1061 if not crev:
1079 if not crev:
1062 self.ui.debug(" %s: searching for copy revision for %s\n" %
1080 self.ui.debug(" %s: searching for copy revision for %s\n" %
1063 (fname, cfname))
1081 (fname, cfname))
1064 for ancestor in self[None].ancestors():
1082 for ancestor in self[None].ancestors():
1065 if cfname in ancestor:
1083 if cfname in ancestor:
1066 crev = ancestor[cfname].filenode()
1084 crev = ancestor[cfname].filenode()
1067 break
1085 break
1068
1086
1069 if crev:
1087 if crev:
1070 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1088 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1071 meta["copy"] = cfname
1089 meta["copy"] = cfname
1072 meta["copyrev"] = hex(crev)
1090 meta["copyrev"] = hex(crev)
1073 fparent1, fparent2 = nullid, newfparent
1091 fparent1, fparent2 = nullid, newfparent
1074 else:
1092 else:
1075 self.ui.warn(_("warning: can't find ancestor for '%s' "
1093 self.ui.warn(_("warning: can't find ancestor for '%s' "
1076 "copied from '%s'!\n") % (fname, cfname))
1094 "copied from '%s'!\n") % (fname, cfname))
1077
1095
1078 elif fparent2 != nullid:
1096 elif fparent2 != nullid:
1079 # is one parent an ancestor of the other?
1097 # is one parent an ancestor of the other?
1080 fparentancestor = flog.ancestor(fparent1, fparent2)
1098 fparentancestor = flog.ancestor(fparent1, fparent2)
1081 if fparentancestor == fparent1:
1099 if fparentancestor == fparent1:
1082 fparent1, fparent2 = fparent2, nullid
1100 fparent1, fparent2 = fparent2, nullid
1083 elif fparentancestor == fparent2:
1101 elif fparentancestor == fparent2:
1084 fparent2 = nullid
1102 fparent2 = nullid
1085
1103
1086 # is the file changed?
1104 # is the file changed?
1087 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1105 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1088 changelist.append(fname)
1106 changelist.append(fname)
1089 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1107 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1090
1108
1091 # are just the flags changed during merge?
1109 # are just the flags changed during merge?
1092 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1110 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1093 changelist.append(fname)
1111 changelist.append(fname)
1094
1112
1095 return fparent1
1113 return fparent1
1096
1114
1097 def commit(self, text="", user=None, date=None, match=None, force=False,
1115 def commit(self, text="", user=None, date=None, match=None, force=False,
1098 editor=False, extra={}):
1116 editor=False, extra={}):
1099 """Add a new revision to current repository.
1117 """Add a new revision to current repository.
1100
1118
1101 Revision information is gathered from the working directory,
1119 Revision information is gathered from the working directory,
1102 match can be used to filter the committed files. If editor is
1120 match can be used to filter the committed files. If editor is
1103 supplied, it is called to get a commit message.
1121 supplied, it is called to get a commit message.
1104 """
1122 """
1105
1123
1106 def fail(f, msg):
1124 def fail(f, msg):
1107 raise util.Abort('%s: %s' % (f, msg))
1125 raise util.Abort('%s: %s' % (f, msg))
1108
1126
1109 if not match:
1127 if not match:
1110 match = matchmod.always(self.root, '')
1128 match = matchmod.always(self.root, '')
1111
1129
1112 if not force:
1130 if not force:
1113 vdirs = []
1131 vdirs = []
1114 match.dir = vdirs.append
1132 match.dir = vdirs.append
1115 match.bad = fail
1133 match.bad = fail
1116
1134
1117 wlock = self.wlock()
1135 wlock = self.wlock()
1118 try:
1136 try:
1119 wctx = self[None]
1137 wctx = self[None]
1120 merge = len(wctx.parents()) > 1
1138 merge = len(wctx.parents()) > 1
1121
1139
1122 if (not force and merge and match and
1140 if (not force and merge and match and
1123 (match.files() or match.anypats())):
1141 (match.files() or match.anypats())):
1124 raise util.Abort(_('cannot partially commit a merge '
1142 raise util.Abort(_('cannot partially commit a merge '
1125 '(do not specify files or patterns)'))
1143 '(do not specify files or patterns)'))
1126
1144
1127 changes = self.status(match=match, clean=force)
1145 changes = self.status(match=match, clean=force)
1128 if force:
1146 if force:
1129 changes[0].extend(changes[6]) # mq may commit unchanged files
1147 changes[0].extend(changes[6]) # mq may commit unchanged files
1130
1148
1131 # check subrepos
1149 # check subrepos
1132 subs = []
1150 subs = []
1133 commitsubs = set()
1151 commitsubs = set()
1134 newstate = wctx.substate.copy()
1152 newstate = wctx.substate.copy()
1135 # only manage subrepos and .hgsubstate if .hgsub is present
1153 # only manage subrepos and .hgsubstate if .hgsub is present
1136 if '.hgsub' in wctx:
1154 if '.hgsub' in wctx:
1137 # we'll decide whether to track this ourselves, thanks
1155 # we'll decide whether to track this ourselves, thanks
1138 if '.hgsubstate' in changes[0]:
1156 if '.hgsubstate' in changes[0]:
1139 changes[0].remove('.hgsubstate')
1157 changes[0].remove('.hgsubstate')
1140 if '.hgsubstate' in changes[2]:
1158 if '.hgsubstate' in changes[2]:
1141 changes[2].remove('.hgsubstate')
1159 changes[2].remove('.hgsubstate')
1142
1160
1143 # compare current state to last committed state
1161 # compare current state to last committed state
1144 # build new substate based on last committed state
1162 # build new substate based on last committed state
1145 oldstate = wctx.p1().substate
1163 oldstate = wctx.p1().substate
1146 for s in sorted(newstate.keys()):
1164 for s in sorted(newstate.keys()):
1147 if not match(s):
1165 if not match(s):
1148 # ignore working copy, use old state if present
1166 # ignore working copy, use old state if present
1149 if s in oldstate:
1167 if s in oldstate:
1150 newstate[s] = oldstate[s]
1168 newstate[s] = oldstate[s]
1151 continue
1169 continue
1152 if not force:
1170 if not force:
1153 raise util.Abort(
1171 raise util.Abort(
1154 _("commit with new subrepo %s excluded") % s)
1172 _("commit with new subrepo %s excluded") % s)
1155 if wctx.sub(s).dirty(True):
1173 if wctx.sub(s).dirty(True):
1156 if not self.ui.configbool('ui', 'commitsubrepos'):
1174 if not self.ui.configbool('ui', 'commitsubrepos'):
1157 raise util.Abort(
1175 raise util.Abort(
1158 _("uncommitted changes in subrepo %s") % s,
1176 _("uncommitted changes in subrepo %s") % s,
1159 hint=_("use --subrepos for recursive commit"))
1177 hint=_("use --subrepos for recursive commit"))
1160 subs.append(s)
1178 subs.append(s)
1161 commitsubs.add(s)
1179 commitsubs.add(s)
1162 else:
1180 else:
1163 bs = wctx.sub(s).basestate()
1181 bs = wctx.sub(s).basestate()
1164 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1182 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1165 if oldstate.get(s, (None, None, None))[1] != bs:
1183 if oldstate.get(s, (None, None, None))[1] != bs:
1166 subs.append(s)
1184 subs.append(s)
1167
1185
1168 # check for removed subrepos
1186 # check for removed subrepos
1169 for p in wctx.parents():
1187 for p in wctx.parents():
1170 r = [s for s in p.substate if s not in newstate]
1188 r = [s for s in p.substate if s not in newstate]
1171 subs += [s for s in r if match(s)]
1189 subs += [s for s in r if match(s)]
1172 if subs:
1190 if subs:
1173 if (not match('.hgsub') and
1191 if (not match('.hgsub') and
1174 '.hgsub' in (wctx.modified() + wctx.added())):
1192 '.hgsub' in (wctx.modified() + wctx.added())):
1175 raise util.Abort(
1193 raise util.Abort(
1176 _("can't commit subrepos without .hgsub"))
1194 _("can't commit subrepos without .hgsub"))
1177 changes[0].insert(0, '.hgsubstate')
1195 changes[0].insert(0, '.hgsubstate')
1178
1196
1179 elif '.hgsub' in changes[2]:
1197 elif '.hgsub' in changes[2]:
1180 # clean up .hgsubstate when .hgsub is removed
1198 # clean up .hgsubstate when .hgsub is removed
1181 if ('.hgsubstate' in wctx and
1199 if ('.hgsubstate' in wctx and
1182 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1200 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1183 changes[2].insert(0, '.hgsubstate')
1201 changes[2].insert(0, '.hgsubstate')
1184
1202
1185 # make sure all explicit patterns are matched
1203 # make sure all explicit patterns are matched
1186 if not force and match.files():
1204 if not force and match.files():
1187 matched = set(changes[0] + changes[1] + changes[2])
1205 matched = set(changes[0] + changes[1] + changes[2])
1188
1206
1189 for f in match.files():
1207 for f in match.files():
1190 if f == '.' or f in matched or f in wctx.substate:
1208 if f == '.' or f in matched or f in wctx.substate:
1191 continue
1209 continue
1192 if f in changes[3]: # missing
1210 if f in changes[3]: # missing
1193 fail(f, _('file not found!'))
1211 fail(f, _('file not found!'))
1194 if f in vdirs: # visited directory
1212 if f in vdirs: # visited directory
1195 d = f + '/'
1213 d = f + '/'
1196 for mf in matched:
1214 for mf in matched:
1197 if mf.startswith(d):
1215 if mf.startswith(d):
1198 break
1216 break
1199 else:
1217 else:
1200 fail(f, _("no match under directory!"))
1218 fail(f, _("no match under directory!"))
1201 elif f not in self.dirstate:
1219 elif f not in self.dirstate:
1202 fail(f, _("file not tracked!"))
1220 fail(f, _("file not tracked!"))
1203
1221
1204 if (not force and not extra.get("close") and not merge
1222 if (not force and not extra.get("close") and not merge
1205 and not (changes[0] or changes[1] or changes[2])
1223 and not (changes[0] or changes[1] or changes[2])
1206 and wctx.branch() == wctx.p1().branch()):
1224 and wctx.branch() == wctx.p1().branch()):
1207 return None
1225 return None
1208
1226
1209 if merge and changes[3]:
1227 if merge and changes[3]:
1210 raise util.Abort(_("cannot commit merge with missing files"))
1228 raise util.Abort(_("cannot commit merge with missing files"))
1211
1229
1212 ms = mergemod.mergestate(self)
1230 ms = mergemod.mergestate(self)
1213 for f in changes[0]:
1231 for f in changes[0]:
1214 if f in ms and ms[f] == 'u':
1232 if f in ms and ms[f] == 'u':
1215 raise util.Abort(_("unresolved merge conflicts "
1233 raise util.Abort(_("unresolved merge conflicts "
1216 "(see hg help resolve)"))
1234 "(see hg help resolve)"))
1217
1235
1218 cctx = context.workingctx(self, text, user, date, extra, changes)
1236 cctx = context.workingctx(self, text, user, date, extra, changes)
1219 if editor:
1237 if editor:
1220 cctx._text = editor(self, cctx, subs)
1238 cctx._text = editor(self, cctx, subs)
1221 edited = (text != cctx._text)
1239 edited = (text != cctx._text)
1222
1240
1223 # commit subs and write new state
1241 # commit subs and write new state
1224 if subs:
1242 if subs:
1225 for s in sorted(commitsubs):
1243 for s in sorted(commitsubs):
1226 sub = wctx.sub(s)
1244 sub = wctx.sub(s)
1227 self.ui.status(_('committing subrepository %s\n') %
1245 self.ui.status(_('committing subrepository %s\n') %
1228 subrepo.subrelpath(sub))
1246 subrepo.subrelpath(sub))
1229 sr = sub.commit(cctx._text, user, date)
1247 sr = sub.commit(cctx._text, user, date)
1230 newstate[s] = (newstate[s][0], sr)
1248 newstate[s] = (newstate[s][0], sr)
1231 subrepo.writestate(self, newstate)
1249 subrepo.writestate(self, newstate)
1232
1250
1233 # Save commit message in case this transaction gets rolled back
1251 # Save commit message in case this transaction gets rolled back
1234 # (e.g. by a pretxncommit hook). Leave the content alone on
1252 # (e.g. by a pretxncommit hook). Leave the content alone on
1235 # the assumption that the user will use the same editor again.
1253 # the assumption that the user will use the same editor again.
1236 msgfn = self.savecommitmessage(cctx._text)
1254 msgfn = self.savecommitmessage(cctx._text)
1237
1255
1238 p1, p2 = self.dirstate.parents()
1256 p1, p2 = self.dirstate.parents()
1239 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1257 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1240 try:
1258 try:
1241 self.hook("precommit", throw=True, parent1=hookp1,
1259 self.hook("precommit", throw=True, parent1=hookp1,
1242 parent2=hookp2)
1260 parent2=hookp2)
1243 ret = self.commitctx(cctx, True)
1261 ret = self.commitctx(cctx, True)
1244 except: # re-raises
1262 except: # re-raises
1245 if edited:
1263 if edited:
1246 self.ui.write(
1264 self.ui.write(
1247 _('note: commit message saved in %s\n') % msgfn)
1265 _('note: commit message saved in %s\n') % msgfn)
1248 raise
1266 raise
1249
1267
1250 # update bookmarks, dirstate and mergestate
1268 # update bookmarks, dirstate and mergestate
1251 bookmarks.update(self, [p1, p2], ret)
1269 bookmarks.update(self, [p1, p2], ret)
1252 for f in changes[0] + changes[1]:
1270 for f in changes[0] + changes[1]:
1253 self.dirstate.normal(f)
1271 self.dirstate.normal(f)
1254 for f in changes[2]:
1272 for f in changes[2]:
1255 self.dirstate.drop(f)
1273 self.dirstate.drop(f)
1256 self.dirstate.setparents(ret)
1274 self.dirstate.setparents(ret)
1257 ms.reset()
1275 ms.reset()
1258 finally:
1276 finally:
1259 wlock.release()
1277 wlock.release()
1260
1278
1261 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1279 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1262 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1280 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1263 self._afterlock(commithook)
1281 self._afterlock(commithook)
1264 return ret
1282 return ret
1265
1283
1266 def commitctx(self, ctx, error=False):
1284 def commitctx(self, ctx, error=False):
1267 """Add a new revision to current repository.
1285 """Add a new revision to current repository.
1268 Revision information is passed via the context argument.
1286 Revision information is passed via the context argument.
1269 """
1287 """
1270
1288
1271 tr = lock = None
1289 tr = lock = None
1272 removed = list(ctx.removed())
1290 removed = list(ctx.removed())
1273 p1, p2 = ctx.p1(), ctx.p2()
1291 p1, p2 = ctx.p1(), ctx.p2()
1274 user = ctx.user()
1292 user = ctx.user()
1275
1293
1276 lock = self.lock()
1294 lock = self.lock()
1277 try:
1295 try:
1278 tr = self.transaction("commit")
1296 tr = self.transaction("commit")
1279 trp = weakref.proxy(tr)
1297 trp = weakref.proxy(tr)
1280
1298
1281 if ctx.files():
1299 if ctx.files():
1282 m1 = p1.manifest().copy()
1300 m1 = p1.manifest().copy()
1283 m2 = p2.manifest()
1301 m2 = p2.manifest()
1284
1302
1285 # check in files
1303 # check in files
1286 new = {}
1304 new = {}
1287 changed = []
1305 changed = []
1288 linkrev = len(self)
1306 linkrev = len(self)
1289 for f in sorted(ctx.modified() + ctx.added()):
1307 for f in sorted(ctx.modified() + ctx.added()):
1290 self.ui.note(f + "\n")
1308 self.ui.note(f + "\n")
1291 try:
1309 try:
1292 fctx = ctx[f]
1310 fctx = ctx[f]
1293 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1311 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1294 changed)
1312 changed)
1295 m1.set(f, fctx.flags())
1313 m1.set(f, fctx.flags())
1296 except OSError, inst:
1314 except OSError, inst:
1297 self.ui.warn(_("trouble committing %s!\n") % f)
1315 self.ui.warn(_("trouble committing %s!\n") % f)
1298 raise
1316 raise
1299 except IOError, inst:
1317 except IOError, inst:
1300 errcode = getattr(inst, 'errno', errno.ENOENT)
1318 errcode = getattr(inst, 'errno', errno.ENOENT)
1301 if error or errcode and errcode != errno.ENOENT:
1319 if error or errcode and errcode != errno.ENOENT:
1302 self.ui.warn(_("trouble committing %s!\n") % f)
1320 self.ui.warn(_("trouble committing %s!\n") % f)
1303 raise
1321 raise
1304 else:
1322 else:
1305 removed.append(f)
1323 removed.append(f)
1306
1324
1307 # update manifest
1325 # update manifest
1308 m1.update(new)
1326 m1.update(new)
1309 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1327 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1310 drop = [f for f in removed if f in m1]
1328 drop = [f for f in removed if f in m1]
1311 for f in drop:
1329 for f in drop:
1312 del m1[f]
1330 del m1[f]
1313 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1331 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1314 p2.manifestnode(), (new, drop))
1332 p2.manifestnode(), (new, drop))
1315 files = changed + removed
1333 files = changed + removed
1316 else:
1334 else:
1317 mn = p1.manifestnode()
1335 mn = p1.manifestnode()
1318 files = []
1336 files = []
1319
1337
1320 # update changelog
1338 # update changelog
1321 self.changelog.delayupdate()
1339 self.changelog.delayupdate()
1322 n = self.changelog.add(mn, files, ctx.description(),
1340 n = self.changelog.add(mn, files, ctx.description(),
1323 trp, p1.node(), p2.node(),
1341 trp, p1.node(), p2.node(),
1324 user, ctx.date(), ctx.extra().copy())
1342 user, ctx.date(), ctx.extra().copy())
1325 p = lambda: self.changelog.writepending() and self.root or ""
1343 p = lambda: self.changelog.writepending() and self.root or ""
1326 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1344 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1327 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1345 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1328 parent2=xp2, pending=p)
1346 parent2=xp2, pending=p)
1329 self.changelog.finalize(trp)
1347 self.changelog.finalize(trp)
1330 # set the new commit is proper phase
1348 # set the new commit is proper phase
1331 targetphase = phases.newcommitphase(self.ui)
1349 targetphase = phases.newcommitphase(self.ui)
1332 if targetphase:
1350 if targetphase:
1333 # retract boundary do not alter parent changeset.
1351 # retract boundary do not alter parent changeset.
1334 # if a parent have higher the resulting phase will
1352 # if a parent have higher the resulting phase will
1335 # be compliant anyway
1353 # be compliant anyway
1336 #
1354 #
1337 # if minimal phase was 0 we don't need to retract anything
1355 # if minimal phase was 0 we don't need to retract anything
1338 phases.retractboundary(self, targetphase, [n])
1356 phases.retractboundary(self, targetphase, [n])
1339 tr.close()
1357 tr.close()
1340 self.updatebranchcache()
1358 self.updatebranchcache()
1341 return n
1359 return n
1342 finally:
1360 finally:
1343 if tr:
1361 if tr:
1344 tr.release()
1362 tr.release()
1345 lock.release()
1363 lock.release()
1346
1364
1347 def destroyed(self, newheadnodes=None):
1365 def destroyed(self, newheadnodes=None):
1348 '''Inform the repository that nodes have been destroyed.
1366 '''Inform the repository that nodes have been destroyed.
1349 Intended for use by strip and rollback, so there's a common
1367 Intended for use by strip and rollback, so there's a common
1350 place for anything that has to be done after destroying history.
1368 place for anything that has to be done after destroying history.
1351
1369
1352 If you know the branchheadcache was uptodate before nodes were removed
1370 If you know the branchheadcache was uptodate before nodes were removed
1353 and you also know the set of candidate new heads that may have resulted
1371 and you also know the set of candidate new heads that may have resulted
1354 from the destruction, you can set newheadnodes. This will enable the
1372 from the destruction, you can set newheadnodes. This will enable the
1355 code to update the branchheads cache, rather than having future code
1373 code to update the branchheads cache, rather than having future code
1356 decide it's invalid and regenrating it from scratch.
1374 decide it's invalid and regenrating it from scratch.
1357 '''
1375 '''
1358 # If we have info, newheadnodes, on how to update the branch cache, do
1376 # If we have info, newheadnodes, on how to update the branch cache, do
1359 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1377 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1360 # will be caught the next time it is read.
1378 # will be caught the next time it is read.
1361 if newheadnodes:
1379 if newheadnodes:
1362 tiprev = len(self) - 1
1380 tiprev = len(self) - 1
1363 ctxgen = (self[node] for node in newheadnodes
1381 ctxgen = (self[node] for node in newheadnodes
1364 if self.changelog.hasnode(node))
1382 if self.changelog.hasnode(node))
1365 self._updatebranchcache(self._branchcache, ctxgen)
1383 self._updatebranchcache(self._branchcache, ctxgen)
1366 self._writebranchcache(self._branchcache, self.changelog.tip(),
1384 self._writebranchcache(self._branchcache, self.changelog.tip(),
1367 tiprev)
1385 tiprev)
1368
1386
1369 # Ensure the persistent tag cache is updated. Doing it now
1387 # Ensure the persistent tag cache is updated. Doing it now
1370 # means that the tag cache only has to worry about destroyed
1388 # means that the tag cache only has to worry about destroyed
1371 # heads immediately after a strip/rollback. That in turn
1389 # heads immediately after a strip/rollback. That in turn
1372 # guarantees that "cachetip == currenttip" (comparing both rev
1390 # guarantees that "cachetip == currenttip" (comparing both rev
1373 # and node) always means no nodes have been added or destroyed.
1391 # and node) always means no nodes have been added or destroyed.
1374
1392
1375 # XXX this is suboptimal when qrefresh'ing: we strip the current
1393 # XXX this is suboptimal when qrefresh'ing: we strip the current
1376 # head, refresh the tag cache, then immediately add a new head.
1394 # head, refresh the tag cache, then immediately add a new head.
1377 # But I think doing it this way is necessary for the "instant
1395 # But I think doing it this way is necessary for the "instant
1378 # tag cache retrieval" case to work.
1396 # tag cache retrieval" case to work.
1379 self.invalidatecaches()
1397 self.invalidatecaches()
1380
1398
1381 def walk(self, match, node=None):
1399 def walk(self, match, node=None):
1382 '''
1400 '''
1383 walk recursively through the directory tree or a given
1401 walk recursively through the directory tree or a given
1384 changeset, finding all files matched by the match
1402 changeset, finding all files matched by the match
1385 function
1403 function
1386 '''
1404 '''
1387 return self[node].walk(match)
1405 return self[node].walk(match)
1388
1406
1389 def status(self, node1='.', node2=None, match=None,
1407 def status(self, node1='.', node2=None, match=None,
1390 ignored=False, clean=False, unknown=False,
1408 ignored=False, clean=False, unknown=False,
1391 listsubrepos=False):
1409 listsubrepos=False):
1392 """return status of files between two nodes or node and working
1410 """return status of files between two nodes or node and working
1393 directory.
1411 directory.
1394
1412
1395 If node1 is None, use the first dirstate parent instead.
1413 If node1 is None, use the first dirstate parent instead.
1396 If node2 is None, compare node1 with working directory.
1414 If node2 is None, compare node1 with working directory.
1397 """
1415 """
1398
1416
1399 def mfmatches(ctx):
1417 def mfmatches(ctx):
1400 mf = ctx.manifest().copy()
1418 mf = ctx.manifest().copy()
1401 if match.always():
1419 if match.always():
1402 return mf
1420 return mf
1403 for fn in mf.keys():
1421 for fn in mf.keys():
1404 if not match(fn):
1422 if not match(fn):
1405 del mf[fn]
1423 del mf[fn]
1406 return mf
1424 return mf
1407
1425
1408 if isinstance(node1, context.changectx):
1426 if isinstance(node1, context.changectx):
1409 ctx1 = node1
1427 ctx1 = node1
1410 else:
1428 else:
1411 ctx1 = self[node1]
1429 ctx1 = self[node1]
1412 if isinstance(node2, context.changectx):
1430 if isinstance(node2, context.changectx):
1413 ctx2 = node2
1431 ctx2 = node2
1414 else:
1432 else:
1415 ctx2 = self[node2]
1433 ctx2 = self[node2]
1416
1434
1417 working = ctx2.rev() is None
1435 working = ctx2.rev() is None
1418 parentworking = working and ctx1 == self['.']
1436 parentworking = working and ctx1 == self['.']
1419 match = match or matchmod.always(self.root, self.getcwd())
1437 match = match or matchmod.always(self.root, self.getcwd())
1420 listignored, listclean, listunknown = ignored, clean, unknown
1438 listignored, listclean, listunknown = ignored, clean, unknown
1421
1439
1422 # load earliest manifest first for caching reasons
1440 # load earliest manifest first for caching reasons
1423 if not working and ctx2.rev() < ctx1.rev():
1441 if not working and ctx2.rev() < ctx1.rev():
1424 ctx2.manifest()
1442 ctx2.manifest()
1425
1443
1426 if not parentworking:
1444 if not parentworking:
1427 def bad(f, msg):
1445 def bad(f, msg):
1428 # 'f' may be a directory pattern from 'match.files()',
1446 # 'f' may be a directory pattern from 'match.files()',
1429 # so 'f not in ctx1' is not enough
1447 # so 'f not in ctx1' is not enough
1430 if f not in ctx1 and f not in ctx1.dirs():
1448 if f not in ctx1 and f not in ctx1.dirs():
1431 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1449 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1432 match.bad = bad
1450 match.bad = bad
1433
1451
1434 if working: # we need to scan the working dir
1452 if working: # we need to scan the working dir
1435 subrepos = []
1453 subrepos = []
1436 if '.hgsub' in self.dirstate:
1454 if '.hgsub' in self.dirstate:
1437 subrepos = ctx2.substate.keys()
1455 subrepos = ctx2.substate.keys()
1438 s = self.dirstate.status(match, subrepos, listignored,
1456 s = self.dirstate.status(match, subrepos, listignored,
1439 listclean, listunknown)
1457 listclean, listunknown)
1440 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1458 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1441
1459
1442 # check for any possibly clean files
1460 # check for any possibly clean files
1443 if parentworking and cmp:
1461 if parentworking and cmp:
1444 fixup = []
1462 fixup = []
1445 # do a full compare of any files that might have changed
1463 # do a full compare of any files that might have changed
1446 for f in sorted(cmp):
1464 for f in sorted(cmp):
1447 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1465 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1448 or ctx1[f].cmp(ctx2[f])):
1466 or ctx1[f].cmp(ctx2[f])):
1449 modified.append(f)
1467 modified.append(f)
1450 else:
1468 else:
1451 fixup.append(f)
1469 fixup.append(f)
1452
1470
1453 # update dirstate for files that are actually clean
1471 # update dirstate for files that are actually clean
1454 if fixup:
1472 if fixup:
1455 if listclean:
1473 if listclean:
1456 clean += fixup
1474 clean += fixup
1457
1475
1458 try:
1476 try:
1459 # updating the dirstate is optional
1477 # updating the dirstate is optional
1460 # so we don't wait on the lock
1478 # so we don't wait on the lock
1461 wlock = self.wlock(False)
1479 wlock = self.wlock(False)
1462 try:
1480 try:
1463 for f in fixup:
1481 for f in fixup:
1464 self.dirstate.normal(f)
1482 self.dirstate.normal(f)
1465 finally:
1483 finally:
1466 wlock.release()
1484 wlock.release()
1467 except error.LockError:
1485 except error.LockError:
1468 pass
1486 pass
1469
1487
1470 if not parentworking:
1488 if not parentworking:
1471 mf1 = mfmatches(ctx1)
1489 mf1 = mfmatches(ctx1)
1472 if working:
1490 if working:
1473 # we are comparing working dir against non-parent
1491 # we are comparing working dir against non-parent
1474 # generate a pseudo-manifest for the working dir
1492 # generate a pseudo-manifest for the working dir
1475 mf2 = mfmatches(self['.'])
1493 mf2 = mfmatches(self['.'])
1476 for f in cmp + modified + added:
1494 for f in cmp + modified + added:
1477 mf2[f] = None
1495 mf2[f] = None
1478 mf2.set(f, ctx2.flags(f))
1496 mf2.set(f, ctx2.flags(f))
1479 for f in removed:
1497 for f in removed:
1480 if f in mf2:
1498 if f in mf2:
1481 del mf2[f]
1499 del mf2[f]
1482 else:
1500 else:
1483 # we are comparing two revisions
1501 # we are comparing two revisions
1484 deleted, unknown, ignored = [], [], []
1502 deleted, unknown, ignored = [], [], []
1485 mf2 = mfmatches(ctx2)
1503 mf2 = mfmatches(ctx2)
1486
1504
1487 modified, added, clean = [], [], []
1505 modified, added, clean = [], [], []
1488 withflags = mf1.withflags() | mf2.withflags()
1506 withflags = mf1.withflags() | mf2.withflags()
1489 for fn in mf2:
1507 for fn in mf2:
1490 if fn in mf1:
1508 if fn in mf1:
1491 if (fn not in deleted and
1509 if (fn not in deleted and
1492 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1510 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1493 (mf1[fn] != mf2[fn] and
1511 (mf1[fn] != mf2[fn] and
1494 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1512 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1495 modified.append(fn)
1513 modified.append(fn)
1496 elif listclean:
1514 elif listclean:
1497 clean.append(fn)
1515 clean.append(fn)
1498 del mf1[fn]
1516 del mf1[fn]
1499 elif fn not in deleted:
1517 elif fn not in deleted:
1500 added.append(fn)
1518 added.append(fn)
1501 removed = mf1.keys()
1519 removed = mf1.keys()
1502
1520
1503 if working and modified and not self.dirstate._checklink:
1521 if working and modified and not self.dirstate._checklink:
1504 # Symlink placeholders may get non-symlink-like contents
1522 # Symlink placeholders may get non-symlink-like contents
1505 # via user error or dereferencing by NFS or Samba servers,
1523 # via user error or dereferencing by NFS or Samba servers,
1506 # so we filter out any placeholders that don't look like a
1524 # so we filter out any placeholders that don't look like a
1507 # symlink
1525 # symlink
1508 sane = []
1526 sane = []
1509 for f in modified:
1527 for f in modified:
1510 if ctx2.flags(f) == 'l':
1528 if ctx2.flags(f) == 'l':
1511 d = ctx2[f].data()
1529 d = ctx2[f].data()
1512 if len(d) >= 1024 or '\n' in d or util.binary(d):
1530 if len(d) >= 1024 or '\n' in d or util.binary(d):
1513 self.ui.debug('ignoring suspect symlink placeholder'
1531 self.ui.debug('ignoring suspect symlink placeholder'
1514 ' "%s"\n' % f)
1532 ' "%s"\n' % f)
1515 continue
1533 continue
1516 sane.append(f)
1534 sane.append(f)
1517 modified = sane
1535 modified = sane
1518
1536
1519 r = modified, added, removed, deleted, unknown, ignored, clean
1537 r = modified, added, removed, deleted, unknown, ignored, clean
1520
1538
1521 if listsubrepos:
1539 if listsubrepos:
1522 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1540 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1523 if working:
1541 if working:
1524 rev2 = None
1542 rev2 = None
1525 else:
1543 else:
1526 rev2 = ctx2.substate[subpath][1]
1544 rev2 = ctx2.substate[subpath][1]
1527 try:
1545 try:
1528 submatch = matchmod.narrowmatcher(subpath, match)
1546 submatch = matchmod.narrowmatcher(subpath, match)
1529 s = sub.status(rev2, match=submatch, ignored=listignored,
1547 s = sub.status(rev2, match=submatch, ignored=listignored,
1530 clean=listclean, unknown=listunknown,
1548 clean=listclean, unknown=listunknown,
1531 listsubrepos=True)
1549 listsubrepos=True)
1532 for rfiles, sfiles in zip(r, s):
1550 for rfiles, sfiles in zip(r, s):
1533 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1551 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1534 except error.LookupError:
1552 except error.LookupError:
1535 self.ui.status(_("skipping missing subrepository: %s\n")
1553 self.ui.status(_("skipping missing subrepository: %s\n")
1536 % subpath)
1554 % subpath)
1537
1555
1538 for l in r:
1556 for l in r:
1539 l.sort()
1557 l.sort()
1540 return r
1558 return r
1541
1559
1542 def heads(self, start=None):
1560 def heads(self, start=None):
1543 heads = self.changelog.heads(start)
1561 heads = self.changelog.heads(start)
1544 # sort the output in rev descending order
1562 # sort the output in rev descending order
1545 return sorted(heads, key=self.changelog.rev, reverse=True)
1563 return sorted(heads, key=self.changelog.rev, reverse=True)
1546
1564
1547 def branchheads(self, branch=None, start=None, closed=False):
1565 def branchheads(self, branch=None, start=None, closed=False):
1548 '''return a (possibly filtered) list of heads for the given branch
1566 '''return a (possibly filtered) list of heads for the given branch
1549
1567
1550 Heads are returned in topological order, from newest to oldest.
1568 Heads are returned in topological order, from newest to oldest.
1551 If branch is None, use the dirstate branch.
1569 If branch is None, use the dirstate branch.
1552 If start is not None, return only heads reachable from start.
1570 If start is not None, return only heads reachable from start.
1553 If closed is True, return heads that are marked as closed as well.
1571 If closed is True, return heads that are marked as closed as well.
1554 '''
1572 '''
1555 if branch is None:
1573 if branch is None:
1556 branch = self[None].branch()
1574 branch = self[None].branch()
1557 branches = self.branchmap()
1575 branches = self.branchmap()
1558 if branch not in branches:
1576 if branch not in branches:
1559 return []
1577 return []
1560 # the cache returns heads ordered lowest to highest
1578 # the cache returns heads ordered lowest to highest
1561 bheads = list(reversed(branches[branch]))
1579 bheads = list(reversed(branches[branch]))
1562 if start is not None:
1580 if start is not None:
1563 # filter out the heads that cannot be reached from startrev
1581 # filter out the heads that cannot be reached from startrev
1564 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1582 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1565 bheads = [h for h in bheads if h in fbheads]
1583 bheads = [h for h in bheads if h in fbheads]
1566 if not closed:
1584 if not closed:
1567 bheads = [h for h in bheads if not self[h].closesbranch()]
1585 bheads = [h for h in bheads if not self[h].closesbranch()]
1568 return bheads
1586 return bheads
1569
1587
1570 def branches(self, nodes):
1588 def branches(self, nodes):
1571 if not nodes:
1589 if not nodes:
1572 nodes = [self.changelog.tip()]
1590 nodes = [self.changelog.tip()]
1573 b = []
1591 b = []
1574 for n in nodes:
1592 for n in nodes:
1575 t = n
1593 t = n
1576 while True:
1594 while True:
1577 p = self.changelog.parents(n)
1595 p = self.changelog.parents(n)
1578 if p[1] != nullid or p[0] == nullid:
1596 if p[1] != nullid or p[0] == nullid:
1579 b.append((t, n, p[0], p[1]))
1597 b.append((t, n, p[0], p[1]))
1580 break
1598 break
1581 n = p[0]
1599 n = p[0]
1582 return b
1600 return b
1583
1601
1584 def between(self, pairs):
1602 def between(self, pairs):
1585 r = []
1603 r = []
1586
1604
1587 for top, bottom in pairs:
1605 for top, bottom in pairs:
1588 n, l, i = top, [], 0
1606 n, l, i = top, [], 0
1589 f = 1
1607 f = 1
1590
1608
1591 while n != bottom and n != nullid:
1609 while n != bottom and n != nullid:
1592 p = self.changelog.parents(n)[0]
1610 p = self.changelog.parents(n)[0]
1593 if i == f:
1611 if i == f:
1594 l.append(n)
1612 l.append(n)
1595 f = f * 2
1613 f = f * 2
1596 n = p
1614 n = p
1597 i += 1
1615 i += 1
1598
1616
1599 r.append(l)
1617 r.append(l)
1600
1618
1601 return r
1619 return r
1602
1620
1603 def pull(self, remote, heads=None, force=False):
1621 def pull(self, remote, heads=None, force=False):
1604 lock = self.lock()
1622 lock = self.lock()
1605 try:
1623 try:
1606 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1624 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1607 force=force)
1625 force=force)
1608 common, fetch, rheads = tmp
1626 common, fetch, rheads = tmp
1609 if not fetch:
1627 if not fetch:
1610 self.ui.status(_("no changes found\n"))
1628 self.ui.status(_("no changes found\n"))
1611 added = []
1629 added = []
1612 result = 0
1630 result = 0
1613 else:
1631 else:
1614 if heads is None and list(common) == [nullid]:
1632 if heads is None and list(common) == [nullid]:
1615 self.ui.status(_("requesting all changes\n"))
1633 self.ui.status(_("requesting all changes\n"))
1616 elif heads is None and remote.capable('changegroupsubset'):
1634 elif heads is None and remote.capable('changegroupsubset'):
1617 # issue1320, avoid a race if remote changed after discovery
1635 # issue1320, avoid a race if remote changed after discovery
1618 heads = rheads
1636 heads = rheads
1619
1637
1620 if remote.capable('getbundle'):
1638 if remote.capable('getbundle'):
1621 cg = remote.getbundle('pull', common=common,
1639 cg = remote.getbundle('pull', common=common,
1622 heads=heads or rheads)
1640 heads=heads or rheads)
1623 elif heads is None:
1641 elif heads is None:
1624 cg = remote.changegroup(fetch, 'pull')
1642 cg = remote.changegroup(fetch, 'pull')
1625 elif not remote.capable('changegroupsubset'):
1643 elif not remote.capable('changegroupsubset'):
1626 raise util.Abort(_("partial pull cannot be done because "
1644 raise util.Abort(_("partial pull cannot be done because "
1627 "other repository doesn't support "
1645 "other repository doesn't support "
1628 "changegroupsubset."))
1646 "changegroupsubset."))
1629 else:
1647 else:
1630 cg = remote.changegroupsubset(fetch, heads, 'pull')
1648 cg = remote.changegroupsubset(fetch, heads, 'pull')
1631 clstart = len(self.changelog)
1649 clstart = len(self.changelog)
1632 result = self.addchangegroup(cg, 'pull', remote.url())
1650 result = self.addchangegroup(cg, 'pull', remote.url())
1633 clend = len(self.changelog)
1651 clend = len(self.changelog)
1634 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1652 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1635
1653
1636 # compute target subset
1654 # compute target subset
1637 if heads is None:
1655 if heads is None:
1638 # We pulled every thing possible
1656 # We pulled every thing possible
1639 # sync on everything common
1657 # sync on everything common
1640 subset = common + added
1658 subset = common + added
1641 else:
1659 else:
1642 # We pulled a specific subset
1660 # We pulled a specific subset
1643 # sync on this subset
1661 # sync on this subset
1644 subset = heads
1662 subset = heads
1645
1663
1646 # Get remote phases data from remote
1664 # Get remote phases data from remote
1647 remotephases = remote.listkeys('phases')
1665 remotephases = remote.listkeys('phases')
1648 publishing = bool(remotephases.get('publishing', False))
1666 publishing = bool(remotephases.get('publishing', False))
1649 if remotephases and not publishing:
1667 if remotephases and not publishing:
1650 # remote is new and unpublishing
1668 # remote is new and unpublishing
1651 pheads, _dr = phases.analyzeremotephases(self, subset,
1669 pheads, _dr = phases.analyzeremotephases(self, subset,
1652 remotephases)
1670 remotephases)
1653 phases.advanceboundary(self, phases.public, pheads)
1671 phases.advanceboundary(self, phases.public, pheads)
1654 phases.advanceboundary(self, phases.draft, subset)
1672 phases.advanceboundary(self, phases.draft, subset)
1655 else:
1673 else:
1656 # Remote is old or publishing all common changesets
1674 # Remote is old or publishing all common changesets
1657 # should be seen as public
1675 # should be seen as public
1658 phases.advanceboundary(self, phases.public, subset)
1676 phases.advanceboundary(self, phases.public, subset)
1659 finally:
1677 finally:
1660 lock.release()
1678 lock.release()
1661
1679
1662 return result
1680 return result
1663
1681
1664 def checkpush(self, force, revs):
1682 def checkpush(self, force, revs):
1665 """Extensions can override this function if additional checks have
1683 """Extensions can override this function if additional checks have
1666 to be performed before pushing, or call it if they override push
1684 to be performed before pushing, or call it if they override push
1667 command.
1685 command.
1668 """
1686 """
1669 pass
1687 pass
1670
1688
1671 def push(self, remote, force=False, revs=None, newbranch=False):
1689 def push(self, remote, force=False, revs=None, newbranch=False):
1672 '''Push outgoing changesets (limited by revs) from the current
1690 '''Push outgoing changesets (limited by revs) from the current
1673 repository to remote. Return an integer:
1691 repository to remote. Return an integer:
1674 - None means nothing to push
1692 - None means nothing to push
1675 - 0 means HTTP error
1693 - 0 means HTTP error
1676 - 1 means we pushed and remote head count is unchanged *or*
1694 - 1 means we pushed and remote head count is unchanged *or*
1677 we have outgoing changesets but refused to push
1695 we have outgoing changesets but refused to push
1678 - other values as described by addchangegroup()
1696 - other values as described by addchangegroup()
1679 '''
1697 '''
1680 # there are two ways to push to remote repo:
1698 # there are two ways to push to remote repo:
1681 #
1699 #
1682 # addchangegroup assumes local user can lock remote
1700 # addchangegroup assumes local user can lock remote
1683 # repo (local filesystem, old ssh servers).
1701 # repo (local filesystem, old ssh servers).
1684 #
1702 #
1685 # unbundle assumes local user cannot lock remote repo (new ssh
1703 # unbundle assumes local user cannot lock remote repo (new ssh
1686 # servers, http servers).
1704 # servers, http servers).
1687
1705
1688 # get local lock as we might write phase data
1706 # get local lock as we might write phase data
1689 locallock = self.lock()
1707 locallock = self.lock()
1690 try:
1708 try:
1691 self.checkpush(force, revs)
1709 self.checkpush(force, revs)
1692 lock = None
1710 lock = None
1693 unbundle = remote.capable('unbundle')
1711 unbundle = remote.capable('unbundle')
1694 if not unbundle:
1712 if not unbundle:
1695 lock = remote.lock()
1713 lock = remote.lock()
1696 try:
1714 try:
1697 # discovery
1715 # discovery
1698 fci = discovery.findcommonincoming
1716 fci = discovery.findcommonincoming
1699 commoninc = fci(self, remote, force=force)
1717 commoninc = fci(self, remote, force=force)
1700 common, inc, remoteheads = commoninc
1718 common, inc, remoteheads = commoninc
1701 fco = discovery.findcommonoutgoing
1719 fco = discovery.findcommonoutgoing
1702 outgoing = fco(self, remote, onlyheads=revs,
1720 outgoing = fco(self, remote, onlyheads=revs,
1703 commoninc=commoninc, force=force)
1721 commoninc=commoninc, force=force)
1704
1722
1705
1723
1706 if not outgoing.missing:
1724 if not outgoing.missing:
1707 # nothing to push
1725 # nothing to push
1708 scmutil.nochangesfound(self.ui, outgoing.excluded)
1726 scmutil.nochangesfound(self.ui, outgoing.excluded)
1709 ret = None
1727 ret = None
1710 else:
1728 else:
1711 # something to push
1729 # something to push
1712 if not force:
1730 if not force:
1713 discovery.checkheads(self, remote, outgoing,
1731 discovery.checkheads(self, remote, outgoing,
1714 remoteheads, newbranch,
1732 remoteheads, newbranch,
1715 bool(inc))
1733 bool(inc))
1716
1734
1717 # create a changegroup from local
1735 # create a changegroup from local
1718 if revs is None and not outgoing.excluded:
1736 if revs is None and not outgoing.excluded:
1719 # push everything,
1737 # push everything,
1720 # use the fast path, no race possible on push
1738 # use the fast path, no race possible on push
1721 cg = self._changegroup(outgoing.missing, 'push')
1739 cg = self._changegroup(outgoing.missing, 'push')
1722 else:
1740 else:
1723 cg = self.getlocalbundle('push', outgoing)
1741 cg = self.getlocalbundle('push', outgoing)
1724
1742
1725 # apply changegroup to remote
1743 # apply changegroup to remote
1726 if unbundle:
1744 if unbundle:
1727 # local repo finds heads on server, finds out what
1745 # local repo finds heads on server, finds out what
1728 # revs it must push. once revs transferred, if server
1746 # revs it must push. once revs transferred, if server
1729 # finds it has different heads (someone else won
1747 # finds it has different heads (someone else won
1730 # commit/push race), server aborts.
1748 # commit/push race), server aborts.
1731 if force:
1749 if force:
1732 remoteheads = ['force']
1750 remoteheads = ['force']
1733 # ssh: return remote's addchangegroup()
1751 # ssh: return remote's addchangegroup()
1734 # http: return remote's addchangegroup() or 0 for error
1752 # http: return remote's addchangegroup() or 0 for error
1735 ret = remote.unbundle(cg, remoteheads, 'push')
1753 ret = remote.unbundle(cg, remoteheads, 'push')
1736 else:
1754 else:
1737 # we return an integer indicating remote head count
1755 # we return an integer indicating remote head count
1738 # change
1756 # change
1739 ret = remote.addchangegroup(cg, 'push', self.url())
1757 ret = remote.addchangegroup(cg, 'push', self.url())
1740
1758
1741 if ret:
1759 if ret:
1742 # push succeed, synchonize target of the push
1760 # push succeed, synchonize target of the push
1743 cheads = outgoing.missingheads
1761 cheads = outgoing.missingheads
1744 elif revs is None:
1762 elif revs is None:
1745 # All out push fails. synchronize all common
1763 # All out push fails. synchronize all common
1746 cheads = outgoing.commonheads
1764 cheads = outgoing.commonheads
1747 else:
1765 else:
1748 # I want cheads = heads(::missingheads and ::commonheads)
1766 # I want cheads = heads(::missingheads and ::commonheads)
1749 # (missingheads is revs with secret changeset filtered out)
1767 # (missingheads is revs with secret changeset filtered out)
1750 #
1768 #
1751 # This can be expressed as:
1769 # This can be expressed as:
1752 # cheads = ( (missingheads and ::commonheads)
1770 # cheads = ( (missingheads and ::commonheads)
1753 # + (commonheads and ::missingheads))"
1771 # + (commonheads and ::missingheads))"
1754 # )
1772 # )
1755 #
1773 #
1756 # while trying to push we already computed the following:
1774 # while trying to push we already computed the following:
1757 # common = (::commonheads)
1775 # common = (::commonheads)
1758 # missing = ((commonheads::missingheads) - commonheads)
1776 # missing = ((commonheads::missingheads) - commonheads)
1759 #
1777 #
1760 # We can pick:
1778 # We can pick:
1761 # * missingheads part of comon (::commonheads)
1779 # * missingheads part of comon (::commonheads)
1762 common = set(outgoing.common)
1780 common = set(outgoing.common)
1763 cheads = [node for node in revs if node in common]
1781 cheads = [node for node in revs if node in common]
1764 # and
1782 # and
1765 # * commonheads parents on missing
1783 # * commonheads parents on missing
1766 revset = self.set('%ln and parents(roots(%ln))',
1784 revset = self.set('%ln and parents(roots(%ln))',
1767 outgoing.commonheads,
1785 outgoing.commonheads,
1768 outgoing.missing)
1786 outgoing.missing)
1769 cheads.extend(c.node() for c in revset)
1787 cheads.extend(c.node() for c in revset)
1770 # even when we don't push, exchanging phase data is useful
1788 # even when we don't push, exchanging phase data is useful
1771 remotephases = remote.listkeys('phases')
1789 remotephases = remote.listkeys('phases')
1772 if not remotephases: # old server or public only repo
1790 if not remotephases: # old server or public only repo
1773 phases.advanceboundary(self, phases.public, cheads)
1791 phases.advanceboundary(self, phases.public, cheads)
1774 # don't push any phase data as there is nothing to push
1792 # don't push any phase data as there is nothing to push
1775 else:
1793 else:
1776 ana = phases.analyzeremotephases(self, cheads, remotephases)
1794 ana = phases.analyzeremotephases(self, cheads, remotephases)
1777 pheads, droots = ana
1795 pheads, droots = ana
1778 ### Apply remote phase on local
1796 ### Apply remote phase on local
1779 if remotephases.get('publishing', False):
1797 if remotephases.get('publishing', False):
1780 phases.advanceboundary(self, phases.public, cheads)
1798 phases.advanceboundary(self, phases.public, cheads)
1781 else: # publish = False
1799 else: # publish = False
1782 phases.advanceboundary(self, phases.public, pheads)
1800 phases.advanceboundary(self, phases.public, pheads)
1783 phases.advanceboundary(self, phases.draft, cheads)
1801 phases.advanceboundary(self, phases.draft, cheads)
1784 ### Apply local phase on remote
1802 ### Apply local phase on remote
1785
1803
1786 # Get the list of all revs draft on remote by public here.
1804 # Get the list of all revs draft on remote by public here.
1787 # XXX Beware that revset break if droots is not strictly
1805 # XXX Beware that revset break if droots is not strictly
1788 # XXX root we may want to ensure it is but it is costly
1806 # XXX root we may want to ensure it is but it is costly
1789 outdated = self.set('heads((%ln::%ln) and public())',
1807 outdated = self.set('heads((%ln::%ln) and public())',
1790 droots, cheads)
1808 droots, cheads)
1791 for newremotehead in outdated:
1809 for newremotehead in outdated:
1792 r = remote.pushkey('phases',
1810 r = remote.pushkey('phases',
1793 newremotehead.hex(),
1811 newremotehead.hex(),
1794 str(phases.draft),
1812 str(phases.draft),
1795 str(phases.public))
1813 str(phases.public))
1796 if not r:
1814 if not r:
1797 self.ui.warn(_('updating %s to public failed!\n')
1815 self.ui.warn(_('updating %s to public failed!\n')
1798 % newremotehead)
1816 % newremotehead)
1799 finally:
1817 finally:
1800 if lock is not None:
1818 if lock is not None:
1801 lock.release()
1819 lock.release()
1802 finally:
1820 finally:
1803 locallock.release()
1821 locallock.release()
1804
1822
1805 self.ui.debug("checking for updated bookmarks\n")
1823 self.ui.debug("checking for updated bookmarks\n")
1806 rb = remote.listkeys('bookmarks')
1824 rb = remote.listkeys('bookmarks')
1807 for k in rb.keys():
1825 for k in rb.keys():
1808 if k in self._bookmarks:
1826 if k in self._bookmarks:
1809 nr, nl = rb[k], hex(self._bookmarks[k])
1827 nr, nl = rb[k], hex(self._bookmarks[k])
1810 if nr in self:
1828 if nr in self:
1811 cr = self[nr]
1829 cr = self[nr]
1812 cl = self[nl]
1830 cl = self[nl]
1813 if cl in cr.descendants():
1831 if cl in cr.descendants():
1814 r = remote.pushkey('bookmarks', k, nr, nl)
1832 r = remote.pushkey('bookmarks', k, nr, nl)
1815 if r:
1833 if r:
1816 self.ui.status(_("updating bookmark %s\n") % k)
1834 self.ui.status(_("updating bookmark %s\n") % k)
1817 else:
1835 else:
1818 self.ui.warn(_('updating bookmark %s'
1836 self.ui.warn(_('updating bookmark %s'
1819 ' failed!\n') % k)
1837 ' failed!\n') % k)
1820
1838
1821 return ret
1839 return ret
1822
1840
1823 def changegroupinfo(self, nodes, source):
1841 def changegroupinfo(self, nodes, source):
1824 if self.ui.verbose or source == 'bundle':
1842 if self.ui.verbose or source == 'bundle':
1825 self.ui.status(_("%d changesets found\n") % len(nodes))
1843 self.ui.status(_("%d changesets found\n") % len(nodes))
1826 if self.ui.debugflag:
1844 if self.ui.debugflag:
1827 self.ui.debug("list of changesets:\n")
1845 self.ui.debug("list of changesets:\n")
1828 for node in nodes:
1846 for node in nodes:
1829 self.ui.debug("%s\n" % hex(node))
1847 self.ui.debug("%s\n" % hex(node))
1830
1848
1831 def changegroupsubset(self, bases, heads, source):
1849 def changegroupsubset(self, bases, heads, source):
1832 """Compute a changegroup consisting of all the nodes that are
1850 """Compute a changegroup consisting of all the nodes that are
1833 descendants of any of the bases and ancestors of any of the heads.
1851 descendants of any of the bases and ancestors of any of the heads.
1834 Return a chunkbuffer object whose read() method will return
1852 Return a chunkbuffer object whose read() method will return
1835 successive changegroup chunks.
1853 successive changegroup chunks.
1836
1854
1837 It is fairly complex as determining which filenodes and which
1855 It is fairly complex as determining which filenodes and which
1838 manifest nodes need to be included for the changeset to be complete
1856 manifest nodes need to be included for the changeset to be complete
1839 is non-trivial.
1857 is non-trivial.
1840
1858
1841 Another wrinkle is doing the reverse, figuring out which changeset in
1859 Another wrinkle is doing the reverse, figuring out which changeset in
1842 the changegroup a particular filenode or manifestnode belongs to.
1860 the changegroup a particular filenode or manifestnode belongs to.
1843 """
1861 """
1844 cl = self.changelog
1862 cl = self.changelog
1845 if not bases:
1863 if not bases:
1846 bases = [nullid]
1864 bases = [nullid]
1847 csets, bases, heads = cl.nodesbetween(bases, heads)
1865 csets, bases, heads = cl.nodesbetween(bases, heads)
1848 # We assume that all ancestors of bases are known
1866 # We assume that all ancestors of bases are known
1849 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1867 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1850 return self._changegroupsubset(common, csets, heads, source)
1868 return self._changegroupsubset(common, csets, heads, source)
1851
1869
1852 def getlocalbundle(self, source, outgoing):
1870 def getlocalbundle(self, source, outgoing):
1853 """Like getbundle, but taking a discovery.outgoing as an argument.
1871 """Like getbundle, but taking a discovery.outgoing as an argument.
1854
1872
1855 This is only implemented for local repos and reuses potentially
1873 This is only implemented for local repos and reuses potentially
1856 precomputed sets in outgoing."""
1874 precomputed sets in outgoing."""
1857 if not outgoing.missing:
1875 if not outgoing.missing:
1858 return None
1876 return None
1859 return self._changegroupsubset(outgoing.common,
1877 return self._changegroupsubset(outgoing.common,
1860 outgoing.missing,
1878 outgoing.missing,
1861 outgoing.missingheads,
1879 outgoing.missingheads,
1862 source)
1880 source)
1863
1881
1864 def getbundle(self, source, heads=None, common=None):
1882 def getbundle(self, source, heads=None, common=None):
1865 """Like changegroupsubset, but returns the set difference between the
1883 """Like changegroupsubset, but returns the set difference between the
1866 ancestors of heads and the ancestors common.
1884 ancestors of heads and the ancestors common.
1867
1885
1868 If heads is None, use the local heads. If common is None, use [nullid].
1886 If heads is None, use the local heads. If common is None, use [nullid].
1869
1887
1870 The nodes in common might not all be known locally due to the way the
1888 The nodes in common might not all be known locally due to the way the
1871 current discovery protocol works.
1889 current discovery protocol works.
1872 """
1890 """
1873 cl = self.changelog
1891 cl = self.changelog
1874 if common:
1892 if common:
1875 nm = cl.nodemap
1893 nm = cl.nodemap
1876 common = [n for n in common if n in nm]
1894 common = [n for n in common if n in nm]
1877 else:
1895 else:
1878 common = [nullid]
1896 common = [nullid]
1879 if not heads:
1897 if not heads:
1880 heads = cl.heads()
1898 heads = cl.heads()
1881 return self.getlocalbundle(source,
1899 return self.getlocalbundle(source,
1882 discovery.outgoing(cl, common, heads))
1900 discovery.outgoing(cl, common, heads))
1883
1901
1884 def _changegroupsubset(self, commonrevs, csets, heads, source):
1902 def _changegroupsubset(self, commonrevs, csets, heads, source):
1885
1903
1886 cl = self.changelog
1904 cl = self.changelog
1887 mf = self.manifest
1905 mf = self.manifest
1888 mfs = {} # needed manifests
1906 mfs = {} # needed manifests
1889 fnodes = {} # needed file nodes
1907 fnodes = {} # needed file nodes
1890 changedfiles = set()
1908 changedfiles = set()
1891 fstate = ['', {}]
1909 fstate = ['', {}]
1892 count = [0, 0]
1910 count = [0, 0]
1893
1911
1894 # can we go through the fast path ?
1912 # can we go through the fast path ?
1895 heads.sort()
1913 heads.sort()
1896 if heads == sorted(self.heads()):
1914 if heads == sorted(self.heads()):
1897 return self._changegroup(csets, source)
1915 return self._changegroup(csets, source)
1898
1916
1899 # slow path
1917 # slow path
1900 self.hook('preoutgoing', throw=True, source=source)
1918 self.hook('preoutgoing', throw=True, source=source)
1901 self.changegroupinfo(csets, source)
1919 self.changegroupinfo(csets, source)
1902
1920
1903 # filter any nodes that claim to be part of the known set
1921 # filter any nodes that claim to be part of the known set
1904 def prune(revlog, missing):
1922 def prune(revlog, missing):
1905 rr, rl = revlog.rev, revlog.linkrev
1923 rr, rl = revlog.rev, revlog.linkrev
1906 return [n for n in missing
1924 return [n for n in missing
1907 if rl(rr(n)) not in commonrevs]
1925 if rl(rr(n)) not in commonrevs]
1908
1926
1909 progress = self.ui.progress
1927 progress = self.ui.progress
1910 _bundling = _('bundling')
1928 _bundling = _('bundling')
1911 _changesets = _('changesets')
1929 _changesets = _('changesets')
1912 _manifests = _('manifests')
1930 _manifests = _('manifests')
1913 _files = _('files')
1931 _files = _('files')
1914
1932
1915 def lookup(revlog, x):
1933 def lookup(revlog, x):
1916 if revlog == cl:
1934 if revlog == cl:
1917 c = cl.read(x)
1935 c = cl.read(x)
1918 changedfiles.update(c[3])
1936 changedfiles.update(c[3])
1919 mfs.setdefault(c[0], x)
1937 mfs.setdefault(c[0], x)
1920 count[0] += 1
1938 count[0] += 1
1921 progress(_bundling, count[0],
1939 progress(_bundling, count[0],
1922 unit=_changesets, total=count[1])
1940 unit=_changesets, total=count[1])
1923 return x
1941 return x
1924 elif revlog == mf:
1942 elif revlog == mf:
1925 clnode = mfs[x]
1943 clnode = mfs[x]
1926 mdata = mf.readfast(x)
1944 mdata = mf.readfast(x)
1927 for f, n in mdata.iteritems():
1945 for f, n in mdata.iteritems():
1928 if f in changedfiles:
1946 if f in changedfiles:
1929 fnodes[f].setdefault(n, clnode)
1947 fnodes[f].setdefault(n, clnode)
1930 count[0] += 1
1948 count[0] += 1
1931 progress(_bundling, count[0],
1949 progress(_bundling, count[0],
1932 unit=_manifests, total=count[1])
1950 unit=_manifests, total=count[1])
1933 return clnode
1951 return clnode
1934 else:
1952 else:
1935 progress(_bundling, count[0], item=fstate[0],
1953 progress(_bundling, count[0], item=fstate[0],
1936 unit=_files, total=count[1])
1954 unit=_files, total=count[1])
1937 return fstate[1][x]
1955 return fstate[1][x]
1938
1956
1939 bundler = changegroup.bundle10(lookup)
1957 bundler = changegroup.bundle10(lookup)
1940 reorder = self.ui.config('bundle', 'reorder', 'auto')
1958 reorder = self.ui.config('bundle', 'reorder', 'auto')
1941 if reorder == 'auto':
1959 if reorder == 'auto':
1942 reorder = None
1960 reorder = None
1943 else:
1961 else:
1944 reorder = util.parsebool(reorder)
1962 reorder = util.parsebool(reorder)
1945
1963
1946 def gengroup():
1964 def gengroup():
1947 # Create a changenode group generator that will call our functions
1965 # Create a changenode group generator that will call our functions
1948 # back to lookup the owning changenode and collect information.
1966 # back to lookup the owning changenode and collect information.
1949 count[:] = [0, len(csets)]
1967 count[:] = [0, len(csets)]
1950 for chunk in cl.group(csets, bundler, reorder=reorder):
1968 for chunk in cl.group(csets, bundler, reorder=reorder):
1951 yield chunk
1969 yield chunk
1952 progress(_bundling, None)
1970 progress(_bundling, None)
1953
1971
1954 # Create a generator for the manifestnodes that calls our lookup
1972 # Create a generator for the manifestnodes that calls our lookup
1955 # and data collection functions back.
1973 # and data collection functions back.
1956 for f in changedfiles:
1974 for f in changedfiles:
1957 fnodes[f] = {}
1975 fnodes[f] = {}
1958 count[:] = [0, len(mfs)]
1976 count[:] = [0, len(mfs)]
1959 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1977 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1960 yield chunk
1978 yield chunk
1961 progress(_bundling, None)
1979 progress(_bundling, None)
1962
1980
1963 mfs.clear()
1981 mfs.clear()
1964
1982
1965 # Go through all our files in order sorted by name.
1983 # Go through all our files in order sorted by name.
1966 count[:] = [0, len(changedfiles)]
1984 count[:] = [0, len(changedfiles)]
1967 for fname in sorted(changedfiles):
1985 for fname in sorted(changedfiles):
1968 filerevlog = self.file(fname)
1986 filerevlog = self.file(fname)
1969 if not len(filerevlog):
1987 if not len(filerevlog):
1970 raise util.Abort(_("empty or missing revlog for %s")
1988 raise util.Abort(_("empty or missing revlog for %s")
1971 % fname)
1989 % fname)
1972 fstate[0] = fname
1990 fstate[0] = fname
1973 fstate[1] = fnodes.pop(fname, {})
1991 fstate[1] = fnodes.pop(fname, {})
1974
1992
1975 nodelist = prune(filerevlog, fstate[1])
1993 nodelist = prune(filerevlog, fstate[1])
1976 if nodelist:
1994 if nodelist:
1977 count[0] += 1
1995 count[0] += 1
1978 yield bundler.fileheader(fname)
1996 yield bundler.fileheader(fname)
1979 for chunk in filerevlog.group(nodelist, bundler, reorder):
1997 for chunk in filerevlog.group(nodelist, bundler, reorder):
1980 yield chunk
1998 yield chunk
1981
1999
1982 # Signal that no more groups are left.
2000 # Signal that no more groups are left.
1983 yield bundler.close()
2001 yield bundler.close()
1984 progress(_bundling, None)
2002 progress(_bundling, None)
1985
2003
1986 if csets:
2004 if csets:
1987 self.hook('outgoing', node=hex(csets[0]), source=source)
2005 self.hook('outgoing', node=hex(csets[0]), source=source)
1988
2006
1989 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2007 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1990
2008
1991 def changegroup(self, basenodes, source):
2009 def changegroup(self, basenodes, source):
1992 # to avoid a race we use changegroupsubset() (issue1320)
2010 # to avoid a race we use changegroupsubset() (issue1320)
1993 return self.changegroupsubset(basenodes, self.heads(), source)
2011 return self.changegroupsubset(basenodes, self.heads(), source)
1994
2012
1995 def _changegroup(self, nodes, source):
2013 def _changegroup(self, nodes, source):
1996 """Compute the changegroup of all nodes that we have that a recipient
2014 """Compute the changegroup of all nodes that we have that a recipient
1997 doesn't. Return a chunkbuffer object whose read() method will return
2015 doesn't. Return a chunkbuffer object whose read() method will return
1998 successive changegroup chunks.
2016 successive changegroup chunks.
1999
2017
2000 This is much easier than the previous function as we can assume that
2018 This is much easier than the previous function as we can assume that
2001 the recipient has any changenode we aren't sending them.
2019 the recipient has any changenode we aren't sending them.
2002
2020
2003 nodes is the set of nodes to send"""
2021 nodes is the set of nodes to send"""
2004
2022
2005 cl = self.changelog
2023 cl = self.changelog
2006 mf = self.manifest
2024 mf = self.manifest
2007 mfs = {}
2025 mfs = {}
2008 changedfiles = set()
2026 changedfiles = set()
2009 fstate = ['']
2027 fstate = ['']
2010 count = [0, 0]
2028 count = [0, 0]
2011
2029
2012 self.hook('preoutgoing', throw=True, source=source)
2030 self.hook('preoutgoing', throw=True, source=source)
2013 self.changegroupinfo(nodes, source)
2031 self.changegroupinfo(nodes, source)
2014
2032
2015 revset = set([cl.rev(n) for n in nodes])
2033 revset = set([cl.rev(n) for n in nodes])
2016
2034
2017 def gennodelst(log):
2035 def gennodelst(log):
2018 ln, llr = log.node, log.linkrev
2036 ln, llr = log.node, log.linkrev
2019 return [ln(r) for r in log if llr(r) in revset]
2037 return [ln(r) for r in log if llr(r) in revset]
2020
2038
2021 progress = self.ui.progress
2039 progress = self.ui.progress
2022 _bundling = _('bundling')
2040 _bundling = _('bundling')
2023 _changesets = _('changesets')
2041 _changesets = _('changesets')
2024 _manifests = _('manifests')
2042 _manifests = _('manifests')
2025 _files = _('files')
2043 _files = _('files')
2026
2044
2027 def lookup(revlog, x):
2045 def lookup(revlog, x):
2028 if revlog == cl:
2046 if revlog == cl:
2029 c = cl.read(x)
2047 c = cl.read(x)
2030 changedfiles.update(c[3])
2048 changedfiles.update(c[3])
2031 mfs.setdefault(c[0], x)
2049 mfs.setdefault(c[0], x)
2032 count[0] += 1
2050 count[0] += 1
2033 progress(_bundling, count[0],
2051 progress(_bundling, count[0],
2034 unit=_changesets, total=count[1])
2052 unit=_changesets, total=count[1])
2035 return x
2053 return x
2036 elif revlog == mf:
2054 elif revlog == mf:
2037 count[0] += 1
2055 count[0] += 1
2038 progress(_bundling, count[0],
2056 progress(_bundling, count[0],
2039 unit=_manifests, total=count[1])
2057 unit=_manifests, total=count[1])
2040 return cl.node(revlog.linkrev(revlog.rev(x)))
2058 return cl.node(revlog.linkrev(revlog.rev(x)))
2041 else:
2059 else:
2042 progress(_bundling, count[0], item=fstate[0],
2060 progress(_bundling, count[0], item=fstate[0],
2043 total=count[1], unit=_files)
2061 total=count[1], unit=_files)
2044 return cl.node(revlog.linkrev(revlog.rev(x)))
2062 return cl.node(revlog.linkrev(revlog.rev(x)))
2045
2063
2046 bundler = changegroup.bundle10(lookup)
2064 bundler = changegroup.bundle10(lookup)
2047 reorder = self.ui.config('bundle', 'reorder', 'auto')
2065 reorder = self.ui.config('bundle', 'reorder', 'auto')
2048 if reorder == 'auto':
2066 if reorder == 'auto':
2049 reorder = None
2067 reorder = None
2050 else:
2068 else:
2051 reorder = util.parsebool(reorder)
2069 reorder = util.parsebool(reorder)
2052
2070
2053 def gengroup():
2071 def gengroup():
2054 '''yield a sequence of changegroup chunks (strings)'''
2072 '''yield a sequence of changegroup chunks (strings)'''
2055 # construct a list of all changed files
2073 # construct a list of all changed files
2056
2074
2057 count[:] = [0, len(nodes)]
2075 count[:] = [0, len(nodes)]
2058 for chunk in cl.group(nodes, bundler, reorder=reorder):
2076 for chunk in cl.group(nodes, bundler, reorder=reorder):
2059 yield chunk
2077 yield chunk
2060 progress(_bundling, None)
2078 progress(_bundling, None)
2061
2079
2062 count[:] = [0, len(mfs)]
2080 count[:] = [0, len(mfs)]
2063 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2081 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2064 yield chunk
2082 yield chunk
2065 progress(_bundling, None)
2083 progress(_bundling, None)
2066
2084
2067 count[:] = [0, len(changedfiles)]
2085 count[:] = [0, len(changedfiles)]
2068 for fname in sorted(changedfiles):
2086 for fname in sorted(changedfiles):
2069 filerevlog = self.file(fname)
2087 filerevlog = self.file(fname)
2070 if not len(filerevlog):
2088 if not len(filerevlog):
2071 raise util.Abort(_("empty or missing revlog for %s")
2089 raise util.Abort(_("empty or missing revlog for %s")
2072 % fname)
2090 % fname)
2073 fstate[0] = fname
2091 fstate[0] = fname
2074 nodelist = gennodelst(filerevlog)
2092 nodelist = gennodelst(filerevlog)
2075 if nodelist:
2093 if nodelist:
2076 count[0] += 1
2094 count[0] += 1
2077 yield bundler.fileheader(fname)
2095 yield bundler.fileheader(fname)
2078 for chunk in filerevlog.group(nodelist, bundler, reorder):
2096 for chunk in filerevlog.group(nodelist, bundler, reorder):
2079 yield chunk
2097 yield chunk
2080 yield bundler.close()
2098 yield bundler.close()
2081 progress(_bundling, None)
2099 progress(_bundling, None)
2082
2100
2083 if nodes:
2101 if nodes:
2084 self.hook('outgoing', node=hex(nodes[0]), source=source)
2102 self.hook('outgoing', node=hex(nodes[0]), source=source)
2085
2103
2086 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2104 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2087
2105
2088 def addchangegroup(self, source, srctype, url, emptyok=False):
2106 def addchangegroup(self, source, srctype, url, emptyok=False):
2089 """Add the changegroup returned by source.read() to this repo.
2107 """Add the changegroup returned by source.read() to this repo.
2090 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2108 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2091 the URL of the repo where this changegroup is coming from.
2109 the URL of the repo where this changegroup is coming from.
2092
2110
2093 Return an integer summarizing the change to this repo:
2111 Return an integer summarizing the change to this repo:
2094 - nothing changed or no source: 0
2112 - nothing changed or no source: 0
2095 - more heads than before: 1+added heads (2..n)
2113 - more heads than before: 1+added heads (2..n)
2096 - fewer heads than before: -1-removed heads (-2..-n)
2114 - fewer heads than before: -1-removed heads (-2..-n)
2097 - number of heads stays the same: 1
2115 - number of heads stays the same: 1
2098 """
2116 """
2099 def csmap(x):
2117 def csmap(x):
2100 self.ui.debug("add changeset %s\n" % short(x))
2118 self.ui.debug("add changeset %s\n" % short(x))
2101 return len(cl)
2119 return len(cl)
2102
2120
2103 def revmap(x):
2121 def revmap(x):
2104 return cl.rev(x)
2122 return cl.rev(x)
2105
2123
2106 if not source:
2124 if not source:
2107 return 0
2125 return 0
2108
2126
2109 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2127 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2110
2128
2111 changesets = files = revisions = 0
2129 changesets = files = revisions = 0
2112 efiles = set()
2130 efiles = set()
2113
2131
2114 # write changelog data to temp files so concurrent readers will not see
2132 # write changelog data to temp files so concurrent readers will not see
2115 # inconsistent view
2133 # inconsistent view
2116 cl = self.changelog
2134 cl = self.changelog
2117 cl.delayupdate()
2135 cl.delayupdate()
2118 oldheads = cl.heads()
2136 oldheads = cl.heads()
2119
2137
2120 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2138 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2121 try:
2139 try:
2122 trp = weakref.proxy(tr)
2140 trp = weakref.proxy(tr)
2123 # pull off the changeset group
2141 # pull off the changeset group
2124 self.ui.status(_("adding changesets\n"))
2142 self.ui.status(_("adding changesets\n"))
2125 clstart = len(cl)
2143 clstart = len(cl)
2126 class prog(object):
2144 class prog(object):
2127 step = _('changesets')
2145 step = _('changesets')
2128 count = 1
2146 count = 1
2129 ui = self.ui
2147 ui = self.ui
2130 total = None
2148 total = None
2131 def __call__(self):
2149 def __call__(self):
2132 self.ui.progress(self.step, self.count, unit=_('chunks'),
2150 self.ui.progress(self.step, self.count, unit=_('chunks'),
2133 total=self.total)
2151 total=self.total)
2134 self.count += 1
2152 self.count += 1
2135 pr = prog()
2153 pr = prog()
2136 source.callback = pr
2154 source.callback = pr
2137
2155
2138 source.changelogheader()
2156 source.changelogheader()
2139 srccontent = cl.addgroup(source, csmap, trp)
2157 srccontent = cl.addgroup(source, csmap, trp)
2140 if not (srccontent or emptyok):
2158 if not (srccontent or emptyok):
2141 raise util.Abort(_("received changelog group is empty"))
2159 raise util.Abort(_("received changelog group is empty"))
2142 clend = len(cl)
2160 clend = len(cl)
2143 changesets = clend - clstart
2161 changesets = clend - clstart
2144 for c in xrange(clstart, clend):
2162 for c in xrange(clstart, clend):
2145 efiles.update(self[c].files())
2163 efiles.update(self[c].files())
2146 efiles = len(efiles)
2164 efiles = len(efiles)
2147 self.ui.progress(_('changesets'), None)
2165 self.ui.progress(_('changesets'), None)
2148
2166
2149 # pull off the manifest group
2167 # pull off the manifest group
2150 self.ui.status(_("adding manifests\n"))
2168 self.ui.status(_("adding manifests\n"))
2151 pr.step = _('manifests')
2169 pr.step = _('manifests')
2152 pr.count = 1
2170 pr.count = 1
2153 pr.total = changesets # manifests <= changesets
2171 pr.total = changesets # manifests <= changesets
2154 # no need to check for empty manifest group here:
2172 # no need to check for empty manifest group here:
2155 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2173 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2156 # no new manifest will be created and the manifest group will
2174 # no new manifest will be created and the manifest group will
2157 # be empty during the pull
2175 # be empty during the pull
2158 source.manifestheader()
2176 source.manifestheader()
2159 self.manifest.addgroup(source, revmap, trp)
2177 self.manifest.addgroup(source, revmap, trp)
2160 self.ui.progress(_('manifests'), None)
2178 self.ui.progress(_('manifests'), None)
2161
2179
2162 needfiles = {}
2180 needfiles = {}
2163 if self.ui.configbool('server', 'validate', default=False):
2181 if self.ui.configbool('server', 'validate', default=False):
2164 # validate incoming csets have their manifests
2182 # validate incoming csets have their manifests
2165 for cset in xrange(clstart, clend):
2183 for cset in xrange(clstart, clend):
2166 mfest = self.changelog.read(self.changelog.node(cset))[0]
2184 mfest = self.changelog.read(self.changelog.node(cset))[0]
2167 mfest = self.manifest.readdelta(mfest)
2185 mfest = self.manifest.readdelta(mfest)
2168 # store file nodes we must see
2186 # store file nodes we must see
2169 for f, n in mfest.iteritems():
2187 for f, n in mfest.iteritems():
2170 needfiles.setdefault(f, set()).add(n)
2188 needfiles.setdefault(f, set()).add(n)
2171
2189
2172 # process the files
2190 # process the files
2173 self.ui.status(_("adding file changes\n"))
2191 self.ui.status(_("adding file changes\n"))
2174 pr.step = _('files')
2192 pr.step = _('files')
2175 pr.count = 1
2193 pr.count = 1
2176 pr.total = efiles
2194 pr.total = efiles
2177 source.callback = None
2195 source.callback = None
2178
2196
2179 while True:
2197 while True:
2180 chunkdata = source.filelogheader()
2198 chunkdata = source.filelogheader()
2181 if not chunkdata:
2199 if not chunkdata:
2182 break
2200 break
2183 f = chunkdata["filename"]
2201 f = chunkdata["filename"]
2184 self.ui.debug("adding %s revisions\n" % f)
2202 self.ui.debug("adding %s revisions\n" % f)
2185 pr()
2203 pr()
2186 fl = self.file(f)
2204 fl = self.file(f)
2187 o = len(fl)
2205 o = len(fl)
2188 if not fl.addgroup(source, revmap, trp):
2206 if not fl.addgroup(source, revmap, trp):
2189 raise util.Abort(_("received file revlog group is empty"))
2207 raise util.Abort(_("received file revlog group is empty"))
2190 revisions += len(fl) - o
2208 revisions += len(fl) - o
2191 files += 1
2209 files += 1
2192 if f in needfiles:
2210 if f in needfiles:
2193 needs = needfiles[f]
2211 needs = needfiles[f]
2194 for new in xrange(o, len(fl)):
2212 for new in xrange(o, len(fl)):
2195 n = fl.node(new)
2213 n = fl.node(new)
2196 if n in needs:
2214 if n in needs:
2197 needs.remove(n)
2215 needs.remove(n)
2198 if not needs:
2216 if not needs:
2199 del needfiles[f]
2217 del needfiles[f]
2200 self.ui.progress(_('files'), None)
2218 self.ui.progress(_('files'), None)
2201
2219
2202 for f, needs in needfiles.iteritems():
2220 for f, needs in needfiles.iteritems():
2203 fl = self.file(f)
2221 fl = self.file(f)
2204 for n in needs:
2222 for n in needs:
2205 try:
2223 try:
2206 fl.rev(n)
2224 fl.rev(n)
2207 except error.LookupError:
2225 except error.LookupError:
2208 raise util.Abort(
2226 raise util.Abort(
2209 _('missing file data for %s:%s - run hg verify') %
2227 _('missing file data for %s:%s - run hg verify') %
2210 (f, hex(n)))
2228 (f, hex(n)))
2211
2229
2212 dh = 0
2230 dh = 0
2213 if oldheads:
2231 if oldheads:
2214 heads = cl.heads()
2232 heads = cl.heads()
2215 dh = len(heads) - len(oldheads)
2233 dh = len(heads) - len(oldheads)
2216 for h in heads:
2234 for h in heads:
2217 if h not in oldheads and self[h].closesbranch():
2235 if h not in oldheads and self[h].closesbranch():
2218 dh -= 1
2236 dh -= 1
2219 htext = ""
2237 htext = ""
2220 if dh:
2238 if dh:
2221 htext = _(" (%+d heads)") % dh
2239 htext = _(" (%+d heads)") % dh
2222
2240
2223 self.ui.status(_("added %d changesets"
2241 self.ui.status(_("added %d changesets"
2224 " with %d changes to %d files%s\n")
2242 " with %d changes to %d files%s\n")
2225 % (changesets, revisions, files, htext))
2243 % (changesets, revisions, files, htext))
2226
2244
2227 if changesets > 0:
2245 if changesets > 0:
2228 p = lambda: cl.writepending() and self.root or ""
2246 p = lambda: cl.writepending() and self.root or ""
2229 self.hook('pretxnchangegroup', throw=True,
2247 self.hook('pretxnchangegroup', throw=True,
2230 node=hex(cl.node(clstart)), source=srctype,
2248 node=hex(cl.node(clstart)), source=srctype,
2231 url=url, pending=p)
2249 url=url, pending=p)
2232
2250
2233 added = [cl.node(r) for r in xrange(clstart, clend)]
2251 added = [cl.node(r) for r in xrange(clstart, clend)]
2234 publishing = self.ui.configbool('phases', 'publish', True)
2252 publishing = self.ui.configbool('phases', 'publish', True)
2235 if srctype == 'push':
2253 if srctype == 'push':
2236 # Old server can not push the boundary themself.
2254 # Old server can not push the boundary themself.
2237 # New server won't push the boundary if changeset already
2255 # New server won't push the boundary if changeset already
2238 # existed locally as secrete
2256 # existed locally as secrete
2239 #
2257 #
2240 # We should not use added here but the list of all change in
2258 # We should not use added here but the list of all change in
2241 # the bundle
2259 # the bundle
2242 if publishing:
2260 if publishing:
2243 phases.advanceboundary(self, phases.public, srccontent)
2261 phases.advanceboundary(self, phases.public, srccontent)
2244 else:
2262 else:
2245 phases.advanceboundary(self, phases.draft, srccontent)
2263 phases.advanceboundary(self, phases.draft, srccontent)
2246 phases.retractboundary(self, phases.draft, added)
2264 phases.retractboundary(self, phases.draft, added)
2247 elif srctype != 'strip':
2265 elif srctype != 'strip':
2248 # publishing only alter behavior during push
2266 # publishing only alter behavior during push
2249 #
2267 #
2250 # strip should not touch boundary at all
2268 # strip should not touch boundary at all
2251 phases.retractboundary(self, phases.draft, added)
2269 phases.retractboundary(self, phases.draft, added)
2252
2270
2253 # make changelog see real files again
2271 # make changelog see real files again
2254 cl.finalize(trp)
2272 cl.finalize(trp)
2255
2273
2256 tr.close()
2274 tr.close()
2257
2275
2258 if changesets > 0:
2276 if changesets > 0:
2259 def runhooks():
2277 def runhooks():
2260 # forcefully update the on-disk branch cache
2278 # forcefully update the on-disk branch cache
2261 self.ui.debug("updating the branch cache\n")
2279 self.ui.debug("updating the branch cache\n")
2262 self.updatebranchcache()
2280 self.updatebranchcache()
2263 self.hook("changegroup", node=hex(cl.node(clstart)),
2281 self.hook("changegroup", node=hex(cl.node(clstart)),
2264 source=srctype, url=url)
2282 source=srctype, url=url)
2265
2283
2266 for n in added:
2284 for n in added:
2267 self.hook("incoming", node=hex(n), source=srctype,
2285 self.hook("incoming", node=hex(n), source=srctype,
2268 url=url)
2286 url=url)
2269 self._afterlock(runhooks)
2287 self._afterlock(runhooks)
2270
2288
2271 finally:
2289 finally:
2272 tr.release()
2290 tr.release()
2273 # never return 0 here:
2291 # never return 0 here:
2274 if dh < 0:
2292 if dh < 0:
2275 return dh - 1
2293 return dh - 1
2276 else:
2294 else:
2277 return dh + 1
2295 return dh + 1
2278
2296
2279 def stream_in(self, remote, requirements):
2297 def stream_in(self, remote, requirements):
2280 lock = self.lock()
2298 lock = self.lock()
2281 try:
2299 try:
2282 fp = remote.stream_out()
2300 fp = remote.stream_out()
2283 l = fp.readline()
2301 l = fp.readline()
2284 try:
2302 try:
2285 resp = int(l)
2303 resp = int(l)
2286 except ValueError:
2304 except ValueError:
2287 raise error.ResponseError(
2305 raise error.ResponseError(
2288 _('unexpected response from remote server:'), l)
2306 _('unexpected response from remote server:'), l)
2289 if resp == 1:
2307 if resp == 1:
2290 raise util.Abort(_('operation forbidden by server'))
2308 raise util.Abort(_('operation forbidden by server'))
2291 elif resp == 2:
2309 elif resp == 2:
2292 raise util.Abort(_('locking the remote repository failed'))
2310 raise util.Abort(_('locking the remote repository failed'))
2293 elif resp != 0:
2311 elif resp != 0:
2294 raise util.Abort(_('the server sent an unknown error code'))
2312 raise util.Abort(_('the server sent an unknown error code'))
2295 self.ui.status(_('streaming all changes\n'))
2313 self.ui.status(_('streaming all changes\n'))
2296 l = fp.readline()
2314 l = fp.readline()
2297 try:
2315 try:
2298 total_files, total_bytes = map(int, l.split(' ', 1))
2316 total_files, total_bytes = map(int, l.split(' ', 1))
2299 except (ValueError, TypeError):
2317 except (ValueError, TypeError):
2300 raise error.ResponseError(
2318 raise error.ResponseError(
2301 _('unexpected response from remote server:'), l)
2319 _('unexpected response from remote server:'), l)
2302 self.ui.status(_('%d files to transfer, %s of data\n') %
2320 self.ui.status(_('%d files to transfer, %s of data\n') %
2303 (total_files, util.bytecount(total_bytes)))
2321 (total_files, util.bytecount(total_bytes)))
2304 handled_bytes = 0
2322 handled_bytes = 0
2305 self.ui.progress(_('clone'), 0, total=total_bytes)
2323 self.ui.progress(_('clone'), 0, total=total_bytes)
2306 start = time.time()
2324 start = time.time()
2307 for i in xrange(total_files):
2325 for i in xrange(total_files):
2308 # XXX doesn't support '\n' or '\r' in filenames
2326 # XXX doesn't support '\n' or '\r' in filenames
2309 l = fp.readline()
2327 l = fp.readline()
2310 try:
2328 try:
2311 name, size = l.split('\0', 1)
2329 name, size = l.split('\0', 1)
2312 size = int(size)
2330 size = int(size)
2313 except (ValueError, TypeError):
2331 except (ValueError, TypeError):
2314 raise error.ResponseError(
2332 raise error.ResponseError(
2315 _('unexpected response from remote server:'), l)
2333 _('unexpected response from remote server:'), l)
2316 if self.ui.debugflag:
2334 if self.ui.debugflag:
2317 self.ui.debug('adding %s (%s)\n' %
2335 self.ui.debug('adding %s (%s)\n' %
2318 (name, util.bytecount(size)))
2336 (name, util.bytecount(size)))
2319 # for backwards compat, name was partially encoded
2337 # for backwards compat, name was partially encoded
2320 ofp = self.sopener(store.decodedir(name), 'w')
2338 ofp = self.sopener(store.decodedir(name), 'w')
2321 for chunk in util.filechunkiter(fp, limit=size):
2339 for chunk in util.filechunkiter(fp, limit=size):
2322 handled_bytes += len(chunk)
2340 handled_bytes += len(chunk)
2323 self.ui.progress(_('clone'), handled_bytes,
2341 self.ui.progress(_('clone'), handled_bytes,
2324 total=total_bytes)
2342 total=total_bytes)
2325 ofp.write(chunk)
2343 ofp.write(chunk)
2326 ofp.close()
2344 ofp.close()
2327 elapsed = time.time() - start
2345 elapsed = time.time() - start
2328 if elapsed <= 0:
2346 if elapsed <= 0:
2329 elapsed = 0.001
2347 elapsed = 0.001
2330 self.ui.progress(_('clone'), None)
2348 self.ui.progress(_('clone'), None)
2331 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2349 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2332 (util.bytecount(total_bytes), elapsed,
2350 (util.bytecount(total_bytes), elapsed,
2333 util.bytecount(total_bytes / elapsed)))
2351 util.bytecount(total_bytes / elapsed)))
2334
2352
2335 # new requirements = old non-format requirements +
2353 # new requirements = old non-format requirements +
2336 # new format-related
2354 # new format-related
2337 # requirements from the streamed-in repository
2355 # requirements from the streamed-in repository
2338 requirements.update(set(self.requirements) - self.supportedformats)
2356 requirements.update(set(self.requirements) - self.supportedformats)
2339 self._applyrequirements(requirements)
2357 self._applyrequirements(requirements)
2340 self._writerequirements()
2358 self._writerequirements()
2341
2359
2342 self.invalidate()
2360 self.invalidate()
2343 return len(self.heads()) + 1
2361 return len(self.heads()) + 1
2344 finally:
2362 finally:
2345 lock.release()
2363 lock.release()
2346
2364
2347 def clone(self, remote, heads=[], stream=False):
2365 def clone(self, remote, heads=[], stream=False):
2348 '''clone remote repository.
2366 '''clone remote repository.
2349
2367
2350 keyword arguments:
2368 keyword arguments:
2351 heads: list of revs to clone (forces use of pull)
2369 heads: list of revs to clone (forces use of pull)
2352 stream: use streaming clone if possible'''
2370 stream: use streaming clone if possible'''
2353
2371
2354 # now, all clients that can request uncompressed clones can
2372 # now, all clients that can request uncompressed clones can
2355 # read repo formats supported by all servers that can serve
2373 # read repo formats supported by all servers that can serve
2356 # them.
2374 # them.
2357
2375
2358 # if revlog format changes, client will have to check version
2376 # if revlog format changes, client will have to check version
2359 # and format flags on "stream" capability, and use
2377 # and format flags on "stream" capability, and use
2360 # uncompressed only if compatible.
2378 # uncompressed only if compatible.
2361
2379
2362 if not stream:
2380 if not stream:
2363 # if the server explicitely prefer to stream (for fast LANs)
2381 # if the server explicitely prefer to stream (for fast LANs)
2364 stream = remote.capable('stream-preferred')
2382 stream = remote.capable('stream-preferred')
2365
2383
2366 if stream and not heads:
2384 if stream and not heads:
2367 # 'stream' means remote revlog format is revlogv1 only
2385 # 'stream' means remote revlog format is revlogv1 only
2368 if remote.capable('stream'):
2386 if remote.capable('stream'):
2369 return self.stream_in(remote, set(('revlogv1',)))
2387 return self.stream_in(remote, set(('revlogv1',)))
2370 # otherwise, 'streamreqs' contains the remote revlog format
2388 # otherwise, 'streamreqs' contains the remote revlog format
2371 streamreqs = remote.capable('streamreqs')
2389 streamreqs = remote.capable('streamreqs')
2372 if streamreqs:
2390 if streamreqs:
2373 streamreqs = set(streamreqs.split(','))
2391 streamreqs = set(streamreqs.split(','))
2374 # if we support it, stream in and adjust our requirements
2392 # if we support it, stream in and adjust our requirements
2375 if not streamreqs - self.supportedformats:
2393 if not streamreqs - self.supportedformats:
2376 return self.stream_in(remote, streamreqs)
2394 return self.stream_in(remote, streamreqs)
2377 return self.pull(remote, heads)
2395 return self.pull(remote, heads)
2378
2396
2379 def pushkey(self, namespace, key, old, new):
2397 def pushkey(self, namespace, key, old, new):
2380 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2398 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2381 old=old, new=new)
2399 old=old, new=new)
2382 ret = pushkey.push(self, namespace, key, old, new)
2400 ret = pushkey.push(self, namespace, key, old, new)
2383 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2401 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2384 ret=ret)
2402 ret=ret)
2385 return ret
2403 return ret
2386
2404
2387 def listkeys(self, namespace):
2405 def listkeys(self, namespace):
2388 self.hook('prelistkeys', throw=True, namespace=namespace)
2406 self.hook('prelistkeys', throw=True, namespace=namespace)
2389 values = pushkey.list(self, namespace)
2407 values = pushkey.list(self, namespace)
2390 self.hook('listkeys', namespace=namespace, values=values)
2408 self.hook('listkeys', namespace=namespace, values=values)
2391 return values
2409 return values
2392
2410
2393 def debugwireargs(self, one, two, three=None, four=None, five=None):
2411 def debugwireargs(self, one, two, three=None, four=None, five=None):
2394 '''used to test argument passing over the wire'''
2412 '''used to test argument passing over the wire'''
2395 return "%s %s %s %s %s" % (one, two, three, four, five)
2413 return "%s %s %s %s %s" % (one, two, three, four, five)
2396
2414
2397 def savecommitmessage(self, text):
2415 def savecommitmessage(self, text):
2398 fp = self.opener('last-message.txt', 'wb')
2416 fp = self.opener('last-message.txt', 'wb')
2399 try:
2417 try:
2400 fp.write(text)
2418 fp.write(text)
2401 finally:
2419 finally:
2402 fp.close()
2420 fp.close()
2403 return self.pathto(fp.name[len(self.root)+1:])
2421 return self.pathto(fp.name[len(self.root)+1:])
2404
2422
2405 # used to avoid circular references so destructors work
2423 # used to avoid circular references so destructors work
2406 def aftertrans(files):
2424 def aftertrans(files):
2407 renamefiles = [tuple(t) for t in files]
2425 renamefiles = [tuple(t) for t in files]
2408 def a():
2426 def a():
2409 for src, dest in renamefiles:
2427 for src, dest in renamefiles:
2410 try:
2428 try:
2411 util.rename(src, dest)
2429 util.rename(src, dest)
2412 except OSError: # journal file does not yet exist
2430 except OSError: # journal file does not yet exist
2413 pass
2431 pass
2414 return a
2432 return a
2415
2433
2416 def undoname(fn):
2434 def undoname(fn):
2417 base, name = os.path.split(fn)
2435 base, name = os.path.split(fn)
2418 assert name.startswith('journal')
2436 assert name.startswith('journal')
2419 return os.path.join(base, name.replace('journal', 'undo', 1))
2437 return os.path.join(base, name.replace('journal', 'undo', 1))
2420
2438
2421 def instance(ui, path, create):
2439 def instance(ui, path, create):
2422 return localrepository(ui, util.urllocalpath(path), create)
2440 return localrepository(ui, util.urllocalpath(path), create)
2423
2441
2424 def islocal(path):
2442 def islocal(path):
2425 return True
2443 return True
General Comments 0
You need to be logged in to leave comments. Login now