##// END OF EJS Templates
convert: use None value for missing files instead of overloading IOError...
Mads Kiilerich -
r22296:650b5b6e default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,285 +1,284 b''
1 1 # bzr.py - bzr support for the convert extension
2 2 #
3 3 # Copyright 2008, 2009 Marek Kubica <marek@xivilization.net> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 # This module is for handling 'bzr', that was formerly known as Bazaar-NG;
9 9 # it cannot access 'bar' repositories, but they were never used very much
10 10
11 11 import os
12 12 from mercurial import demandimport
13 13 # these do not work with demandimport, blacklist
14 14 demandimport.ignore.extend([
15 15 'bzrlib.transactions',
16 16 'bzrlib.urlutils',
17 17 'ElementPath',
18 18 ])
19 19
20 20 from mercurial.i18n import _
21 21 from mercurial import util
22 22 from common import NoRepo, commit, converter_source
23 23
24 24 try:
25 25 # bazaar imports
26 26 from bzrlib import bzrdir, revision, errors
27 27 from bzrlib.revisionspec import RevisionSpec
28 28 except ImportError:
29 29 pass
30 30
31 31 supportedkinds = ('file', 'symlink')
32 32
33 33 class bzr_source(converter_source):
34 34 """Reads Bazaar repositories by using the Bazaar Python libraries"""
35 35
36 36 def __init__(self, ui, path, rev=None):
37 37 super(bzr_source, self).__init__(ui, path, rev=rev)
38 38
39 39 if not os.path.exists(os.path.join(path, '.bzr')):
40 40 raise NoRepo(_('%s does not look like a Bazaar repository')
41 41 % path)
42 42
43 43 try:
44 44 # access bzrlib stuff
45 45 bzrdir
46 46 except NameError:
47 47 raise NoRepo(_('Bazaar modules could not be loaded'))
48 48
49 49 path = os.path.abspath(path)
50 50 self._checkrepotype(path)
51 51 try:
52 52 self.sourcerepo = bzrdir.BzrDir.open(path).open_repository()
53 53 except errors.NoRepositoryPresent:
54 54 raise NoRepo(_('%s does not look like a Bazaar repository')
55 55 % path)
56 56 self._parentids = {}
57 57
58 58 def _checkrepotype(self, path):
59 59 # Lightweight checkouts detection is informational but probably
60 60 # fragile at API level. It should not terminate the conversion.
61 61 try:
62 62 from bzrlib import bzrdir
63 63 dir = bzrdir.BzrDir.open_containing(path)[0]
64 64 try:
65 65 tree = dir.open_workingtree(recommend_upgrade=False)
66 66 branch = tree.branch
67 67 except (errors.NoWorkingTree, errors.NotLocalUrl):
68 68 tree = None
69 69 branch = dir.open_branch()
70 70 if (tree is not None and tree.bzrdir.root_transport.base !=
71 71 branch.bzrdir.root_transport.base):
72 72 self.ui.warn(_('warning: lightweight checkouts may cause '
73 73 'conversion failures, try with a regular '
74 74 'branch instead.\n'))
75 75 except Exception:
76 76 self.ui.note(_('bzr source type could not be determined\n'))
77 77
78 78 def before(self):
79 79 """Before the conversion begins, acquire a read lock
80 80 for all the operations that might need it. Fortunately
81 81 read locks don't block other reads or writes to the
82 82 repository, so this shouldn't have any impact on the usage of
83 83 the source repository.
84 84
85 85 The alternative would be locking on every operation that
86 86 needs locks (there are currently two: getting the file and
87 87 getting the parent map) and releasing immediately after,
88 88 but this approach can take even 40% longer."""
89 89 self.sourcerepo.lock_read()
90 90
91 91 def after(self):
92 92 self.sourcerepo.unlock()
93 93
94 94 def _bzrbranches(self):
95 95 return self.sourcerepo.find_branches(using=True)
96 96
97 97 def getheads(self):
98 98 if not self.rev:
99 99 # Set using=True to avoid nested repositories (see issue3254)
100 100 heads = sorted([b.last_revision() for b in self._bzrbranches()])
101 101 else:
102 102 revid = None
103 103 for branch in self._bzrbranches():
104 104 try:
105 105 r = RevisionSpec.from_string(self.rev)
106 106 info = r.in_history(branch)
107 107 except errors.BzrError:
108 108 pass
109 109 revid = info.rev_id
110 110 if revid is None:
111 111 raise util.Abort(_('%s is not a valid revision') % self.rev)
112 112 heads = [revid]
113 113 # Empty repositories return 'null:', which cannot be retrieved
114 114 heads = [h for h in heads if h != 'null:']
115 115 return heads
116 116
117 117 def getfile(self, name, rev):
118 118 revtree = self.sourcerepo.revision_tree(rev)
119 119 fileid = revtree.path2id(name.decode(self.encoding or 'utf-8'))
120 120 kind = None
121 121 if fileid is not None:
122 122 kind = revtree.kind(fileid)
123 123 if kind not in supportedkinds:
124 124 # the file is not available anymore - was deleted
125 raise IOError(_('%s is not available in %s anymore') %
126 (name, rev))
125 return None, None
127 126 mode = self._modecache[(name, rev)]
128 127 if kind == 'symlink':
129 128 target = revtree.get_symlink_target(fileid)
130 129 if target is None:
131 130 raise util.Abort(_('%s.%s symlink has no target')
132 131 % (name, rev))
133 132 return target, mode
134 133 else:
135 134 sio = revtree.get_file(fileid)
136 135 return sio.read(), mode
137 136
138 137 def getchanges(self, version):
139 138 # set up caches: modecache and revtree
140 139 self._modecache = {}
141 140 self._revtree = self.sourcerepo.revision_tree(version)
142 141 # get the parentids from the cache
143 142 parentids = self._parentids.pop(version)
144 143 # only diff against first parent id
145 144 prevtree = self.sourcerepo.revision_tree(parentids[0])
146 145 return self._gettreechanges(self._revtree, prevtree)
147 146
148 147 def getcommit(self, version):
149 148 rev = self.sourcerepo.get_revision(version)
150 149 # populate parent id cache
151 150 if not rev.parent_ids:
152 151 parents = []
153 152 self._parentids[version] = (revision.NULL_REVISION,)
154 153 else:
155 154 parents = self._filterghosts(rev.parent_ids)
156 155 self._parentids[version] = parents
157 156
158 157 branch = self.recode(rev.properties.get('branch-nick', u'default'))
159 158 if branch == 'trunk':
160 159 branch = 'default'
161 160 return commit(parents=parents,
162 161 date='%d %d' % (rev.timestamp, -rev.timezone),
163 162 author=self.recode(rev.committer),
164 163 desc=self.recode(rev.message),
165 164 branch=branch,
166 165 rev=version)
167 166
168 167 def gettags(self):
169 168 bytetags = {}
170 169 for branch in self._bzrbranches():
171 170 if not branch.supports_tags():
172 171 return {}
173 172 tagdict = branch.tags.get_tag_dict()
174 173 for name, rev in tagdict.iteritems():
175 174 bytetags[self.recode(name)] = rev
176 175 return bytetags
177 176
178 177 def getchangedfiles(self, rev, i):
179 178 self._modecache = {}
180 179 curtree = self.sourcerepo.revision_tree(rev)
181 180 if i is not None:
182 181 parentid = self._parentids[rev][i]
183 182 else:
184 183 # no parent id, get the empty revision
185 184 parentid = revision.NULL_REVISION
186 185
187 186 prevtree = self.sourcerepo.revision_tree(parentid)
188 187 changes = [e[0] for e in self._gettreechanges(curtree, prevtree)[0]]
189 188 return changes
190 189
191 190 def _gettreechanges(self, current, origin):
192 191 revid = current._revision_id
193 192 changes = []
194 193 renames = {}
195 194 seen = set()
196 195 # Process the entries by reverse lexicographic name order to
197 196 # handle nested renames correctly, most specific first.
198 197 curchanges = sorted(current.iter_changes(origin),
199 198 key=lambda c: c[1][0] or c[1][1],
200 199 reverse=True)
201 200 for (fileid, paths, changed_content, versioned, parent, name,
202 201 kind, executable) in curchanges:
203 202
204 203 if paths[0] == u'' or paths[1] == u'':
205 204 # ignore changes to tree root
206 205 continue
207 206
208 207 # bazaar tracks directories, mercurial does not, so
209 208 # we have to rename the directory contents
210 209 if kind[1] == 'directory':
211 210 if kind[0] not in (None, 'directory'):
212 211 # Replacing 'something' with a directory, record it
213 212 # so it can be removed.
214 213 changes.append((self.recode(paths[0]), revid))
215 214
216 215 if kind[0] == 'directory' and None not in paths:
217 216 renaming = paths[0] != paths[1]
218 217 # neither an add nor an delete - a move
219 218 # rename all directory contents manually
220 219 subdir = origin.inventory.path2id(paths[0])
221 220 # get all child-entries of the directory
222 221 for name, entry in origin.inventory.iter_entries(subdir):
223 222 # hg does not track directory renames
224 223 if entry.kind == 'directory':
225 224 continue
226 225 frompath = self.recode(paths[0] + '/' + name)
227 226 if frompath in seen:
228 227 # Already handled by a more specific change entry
229 228 # This is important when you have:
230 229 # a => b
231 230 # a/c => a/c
232 231 # Here a/c must not be renamed into b/c
233 232 continue
234 233 seen.add(frompath)
235 234 if not renaming:
236 235 continue
237 236 topath = self.recode(paths[1] + '/' + name)
238 237 # register the files as changed
239 238 changes.append((frompath, revid))
240 239 changes.append((topath, revid))
241 240 # add to mode cache
242 241 mode = ((entry.executable and 'x')
243 242 or (entry.kind == 'symlink' and 's')
244 243 or '')
245 244 self._modecache[(topath, revid)] = mode
246 245 # register the change as move
247 246 renames[topath] = frompath
248 247
249 248 # no further changes, go to the next change
250 249 continue
251 250
252 251 # we got unicode paths, need to convert them
253 252 path, topath = paths
254 253 if path is not None:
255 254 path = self.recode(path)
256 255 if topath is not None:
257 256 topath = self.recode(topath)
258 257 seen.add(path or topath)
259 258
260 259 if topath is None:
261 260 # file deleted
262 261 changes.append((path, revid))
263 262 continue
264 263
265 264 # renamed
266 265 if path and path != topath:
267 266 renames[topath] = path
268 267 changes.append((path, revid))
269 268
270 269 # populate the mode cache
271 270 kind, executable = [e[1] for e in (kind, executable)]
272 271 mode = ((executable and 'x') or (kind == 'symlink' and 'l')
273 272 or '')
274 273 self._modecache[(topath, revid)] = mode
275 274 changes.append((topath, revid))
276 275
277 276 return changes, renames
278 277
279 278 def _filterghosts(self, ids):
280 279 """Filters out ghost revisions which hg does not support, see
281 280 <http://bazaar-vcs.org/GhostRevision>
282 281 """
283 282 parentmap = self.sourcerepo.get_parent_map(ids)
284 283 parents = tuple([parent for parent in ids if parent in parentmap])
285 284 return parents
@@ -1,450 +1,450 b''
1 1 # common.py - common code for the convert extension
2 2 #
3 3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import base64, errno, subprocess, os, datetime, re
9 9 import cPickle as pickle
10 10 from mercurial import util
11 11 from mercurial.i18n import _
12 12
13 13 propertycache = util.propertycache
14 14
15 15 def encodeargs(args):
16 16 def encodearg(s):
17 17 lines = base64.encodestring(s)
18 18 lines = [l.splitlines()[0] for l in lines]
19 19 return ''.join(lines)
20 20
21 21 s = pickle.dumps(args)
22 22 return encodearg(s)
23 23
24 24 def decodeargs(s):
25 25 s = base64.decodestring(s)
26 26 return pickle.loads(s)
27 27
28 28 class MissingTool(Exception):
29 29 pass
30 30
31 31 def checktool(exe, name=None, abort=True):
32 32 name = name or exe
33 33 if not util.findexe(exe):
34 34 exc = abort and util.Abort or MissingTool
35 35 raise exc(_('cannot find required "%s" tool') % name)
36 36
37 37 class NoRepo(Exception):
38 38 pass
39 39
40 40 SKIPREV = 'SKIP'
41 41
42 42 class commit(object):
43 43 def __init__(self, author, date, desc, parents, branch=None, rev=None,
44 44 extra={}, sortkey=None):
45 45 self.author = author or 'unknown'
46 46 self.date = date or '0 0'
47 47 self.desc = desc
48 48 self.parents = parents
49 49 self.branch = branch
50 50 self.rev = rev
51 51 self.extra = extra
52 52 self.sortkey = sortkey
53 53
54 54 class converter_source(object):
55 55 """Conversion source interface"""
56 56
57 57 def __init__(self, ui, path=None, rev=None):
58 58 """Initialize conversion source (or raise NoRepo("message")
59 59 exception if path is not a valid repository)"""
60 60 self.ui = ui
61 61 self.path = path
62 62 self.rev = rev
63 63
64 64 self.encoding = 'utf-8'
65 65
66 66 def checkhexformat(self, revstr, mapname='splicemap'):
67 67 """ fails if revstr is not a 40 byte hex. mercurial and git both uses
68 68 such format for their revision numbering
69 69 """
70 70 if not re.match(r'[0-9a-fA-F]{40,40}$', revstr):
71 71 raise util.Abort(_('%s entry %s is not a valid revision'
72 72 ' identifier') % (mapname, revstr))
73 73
74 74 def before(self):
75 75 pass
76 76
77 77 def after(self):
78 78 pass
79 79
80 80 def setrevmap(self, revmap):
81 81 """set the map of already-converted revisions"""
82 82 pass
83 83
84 84 def getheads(self):
85 85 """Return a list of this repository's heads"""
86 86 raise NotImplementedError
87 87
88 88 def getfile(self, name, rev):
89 89 """Return a pair (data, mode) where data is the file content
90 90 as a string and mode one of '', 'x' or 'l'. rev is the
91 identifier returned by a previous call to getchanges(). Raise
92 IOError to indicate that name was deleted in rev.
91 identifier returned by a previous call to getchanges().
92 Data is None if file is missing/deleted in rev.
93 93 """
94 94 raise NotImplementedError
95 95
96 96 def getchanges(self, version):
97 97 """Returns a tuple of (files, copies).
98 98
99 99 files is a sorted list of (filename, id) tuples for all files
100 100 changed between version and its first parent returned by
101 101 getcommit(). id is the source revision id of the file.
102 102
103 103 copies is a dictionary of dest: source
104 104 """
105 105 raise NotImplementedError
106 106
107 107 def getcommit(self, version):
108 108 """Return the commit object for version"""
109 109 raise NotImplementedError
110 110
111 111 def gettags(self):
112 112 """Return the tags as a dictionary of name: revision
113 113
114 114 Tag names must be UTF-8 strings.
115 115 """
116 116 raise NotImplementedError
117 117
118 118 def recode(self, s, encoding=None):
119 119 if not encoding:
120 120 encoding = self.encoding or 'utf-8'
121 121
122 122 if isinstance(s, unicode):
123 123 return s.encode("utf-8")
124 124 try:
125 125 return s.decode(encoding).encode("utf-8")
126 126 except UnicodeError:
127 127 try:
128 128 return s.decode("latin-1").encode("utf-8")
129 129 except UnicodeError:
130 130 return s.decode(encoding, "replace").encode("utf-8")
131 131
132 132 def getchangedfiles(self, rev, i):
133 133 """Return the files changed by rev compared to parent[i].
134 134
135 135 i is an index selecting one of the parents of rev. The return
136 136 value should be the list of files that are different in rev and
137 137 this parent.
138 138
139 139 If rev has no parents, i is None.
140 140
141 141 This function is only needed to support --filemap
142 142 """
143 143 raise NotImplementedError
144 144
145 145 def converted(self, rev, sinkrev):
146 146 '''Notify the source that a revision has been converted.'''
147 147 pass
148 148
149 149 def hasnativeorder(self):
150 150 """Return true if this source has a meaningful, native revision
151 151 order. For instance, Mercurial revisions are store sequentially
152 152 while there is no such global ordering with Darcs.
153 153 """
154 154 return False
155 155
156 156 def hasnativeclose(self):
157 157 """Return true if this source has ability to close branch.
158 158 """
159 159 return False
160 160
161 161 def lookuprev(self, rev):
162 162 """If rev is a meaningful revision reference in source, return
163 163 the referenced identifier in the same format used by getcommit().
164 164 return None otherwise.
165 165 """
166 166 return None
167 167
168 168 def getbookmarks(self):
169 169 """Return the bookmarks as a dictionary of name: revision
170 170
171 171 Bookmark names are to be UTF-8 strings.
172 172 """
173 173 return {}
174 174
175 175 def checkrevformat(self, revstr, mapname='splicemap'):
176 176 """revstr is a string that describes a revision in the given
177 177 source control system. Return true if revstr has correct
178 178 format.
179 179 """
180 180 return True
181 181
182 182 class converter_sink(object):
183 183 """Conversion sink (target) interface"""
184 184
185 185 def __init__(self, ui, path):
186 186 """Initialize conversion sink (or raise NoRepo("message")
187 187 exception if path is not a valid repository)
188 188
189 189 created is a list of paths to remove if a fatal error occurs
190 190 later"""
191 191 self.ui = ui
192 192 self.path = path
193 193 self.created = []
194 194
195 195 def revmapfile(self):
196 196 """Path to a file that will contain lines
197 197 source_rev_id sink_rev_id
198 198 mapping equivalent revision identifiers for each system."""
199 199 raise NotImplementedError
200 200
201 201 def authorfile(self):
202 202 """Path to a file that will contain lines
203 203 srcauthor=dstauthor
204 204 mapping equivalent authors identifiers for each system."""
205 205 return None
206 206
207 207 def putcommit(self, files, copies, parents, commit, source, revmap):
208 208 """Create a revision with all changed files listed in 'files'
209 209 and having listed parents. 'commit' is a commit object
210 210 containing at a minimum the author, date, and message for this
211 211 changeset. 'files' is a list of (path, version) tuples,
212 212 'copies' is a dictionary mapping destinations to sources,
213 213 'source' is the source repository, and 'revmap' is a mapfile
214 214 of source revisions to converted revisions. Only getfile() and
215 215 lookuprev() should be called on 'source'.
216 216
217 217 Note that the sink repository is not told to update itself to
218 218 a particular revision (or even what that revision would be)
219 219 before it receives the file data.
220 220 """
221 221 raise NotImplementedError
222 222
223 223 def puttags(self, tags):
224 224 """Put tags into sink.
225 225
226 226 tags: {tagname: sink_rev_id, ...} where tagname is an UTF-8 string.
227 227 Return a pair (tag_revision, tag_parent_revision), or (None, None)
228 228 if nothing was changed.
229 229 """
230 230 raise NotImplementedError
231 231
232 232 def setbranch(self, branch, pbranches):
233 233 """Set the current branch name. Called before the first putcommit
234 234 on the branch.
235 235 branch: branch name for subsequent commits
236 236 pbranches: (converted parent revision, parent branch) tuples"""
237 237 pass
238 238
239 239 def setfilemapmode(self, active):
240 240 """Tell the destination that we're using a filemap
241 241
242 242 Some converter_sources (svn in particular) can claim that a file
243 243 was changed in a revision, even if there was no change. This method
244 244 tells the destination that we're using a filemap and that it should
245 245 filter empty revisions.
246 246 """
247 247 pass
248 248
249 249 def before(self):
250 250 pass
251 251
252 252 def after(self):
253 253 pass
254 254
255 255 def putbookmarks(self, bookmarks):
256 256 """Put bookmarks into sink.
257 257
258 258 bookmarks: {bookmarkname: sink_rev_id, ...}
259 259 where bookmarkname is an UTF-8 string.
260 260 """
261 261 pass
262 262
263 263 def hascommitfrommap(self, rev):
264 264 """Return False if a rev mentioned in a filemap is known to not be
265 265 present."""
266 266 raise NotImplementedError
267 267
268 268 def hascommitforsplicemap(self, rev):
269 269 """This method is for the special needs for splicemap handling and not
270 270 for general use. Returns True if the sink contains rev, aborts on some
271 271 special cases."""
272 272 raise NotImplementedError
273 273
274 274 class commandline(object):
275 275 def __init__(self, ui, command):
276 276 self.ui = ui
277 277 self.command = command
278 278
279 279 def prerun(self):
280 280 pass
281 281
282 282 def postrun(self):
283 283 pass
284 284
285 285 def _cmdline(self, cmd, *args, **kwargs):
286 286 cmdline = [self.command, cmd] + list(args)
287 287 for k, v in kwargs.iteritems():
288 288 if len(k) == 1:
289 289 cmdline.append('-' + k)
290 290 else:
291 291 cmdline.append('--' + k.replace('_', '-'))
292 292 try:
293 293 if len(k) == 1:
294 294 cmdline.append('' + v)
295 295 else:
296 296 cmdline[-1] += '=' + v
297 297 except TypeError:
298 298 pass
299 299 cmdline = [util.shellquote(arg) for arg in cmdline]
300 300 if not self.ui.debugflag:
301 301 cmdline += ['2>', os.devnull]
302 302 cmdline = ' '.join(cmdline)
303 303 return cmdline
304 304
305 305 def _run(self, cmd, *args, **kwargs):
306 306 def popen(cmdline):
307 307 p = subprocess.Popen(cmdline, shell=True, bufsize=-1,
308 308 close_fds=util.closefds,
309 309 stdout=subprocess.PIPE)
310 310 return p
311 311 return self._dorun(popen, cmd, *args, **kwargs)
312 312
313 313 def _run2(self, cmd, *args, **kwargs):
314 314 return self._dorun(util.popen2, cmd, *args, **kwargs)
315 315
316 316 def _dorun(self, openfunc, cmd, *args, **kwargs):
317 317 cmdline = self._cmdline(cmd, *args, **kwargs)
318 318 self.ui.debug('running: %s\n' % (cmdline,))
319 319 self.prerun()
320 320 try:
321 321 return openfunc(cmdline)
322 322 finally:
323 323 self.postrun()
324 324
325 325 def run(self, cmd, *args, **kwargs):
326 326 p = self._run(cmd, *args, **kwargs)
327 327 output = p.communicate()[0]
328 328 self.ui.debug(output)
329 329 return output, p.returncode
330 330
331 331 def runlines(self, cmd, *args, **kwargs):
332 332 p = self._run(cmd, *args, **kwargs)
333 333 output = p.stdout.readlines()
334 334 p.wait()
335 335 self.ui.debug(''.join(output))
336 336 return output, p.returncode
337 337
338 338 def checkexit(self, status, output=''):
339 339 if status:
340 340 if output:
341 341 self.ui.warn(_('%s error:\n') % self.command)
342 342 self.ui.warn(output)
343 343 msg = util.explainexit(status)[0]
344 344 raise util.Abort('%s %s' % (self.command, msg))
345 345
346 346 def run0(self, cmd, *args, **kwargs):
347 347 output, status = self.run(cmd, *args, **kwargs)
348 348 self.checkexit(status, output)
349 349 return output
350 350
351 351 def runlines0(self, cmd, *args, **kwargs):
352 352 output, status = self.runlines(cmd, *args, **kwargs)
353 353 self.checkexit(status, ''.join(output))
354 354 return output
355 355
356 356 @propertycache
357 357 def argmax(self):
358 358 # POSIX requires at least 4096 bytes for ARG_MAX
359 359 argmax = 4096
360 360 try:
361 361 argmax = os.sysconf("SC_ARG_MAX")
362 362 except (AttributeError, ValueError):
363 363 pass
364 364
365 365 # Windows shells impose their own limits on command line length,
366 366 # down to 2047 bytes for cmd.exe under Windows NT/2k and 2500 bytes
367 367 # for older 4nt.exe. See http://support.microsoft.com/kb/830473 for
368 368 # details about cmd.exe limitations.
369 369
370 370 # Since ARG_MAX is for command line _and_ environment, lower our limit
371 371 # (and make happy Windows shells while doing this).
372 372 return argmax // 2 - 1
373 373
374 374 def _limit_arglist(self, arglist, cmd, *args, **kwargs):
375 375 cmdlen = len(self._cmdline(cmd, *args, **kwargs))
376 376 limit = self.argmax - cmdlen
377 377 bytes = 0
378 378 fl = []
379 379 for fn in arglist:
380 380 b = len(fn) + 3
381 381 if bytes + b < limit or len(fl) == 0:
382 382 fl.append(fn)
383 383 bytes += b
384 384 else:
385 385 yield fl
386 386 fl = [fn]
387 387 bytes = b
388 388 if fl:
389 389 yield fl
390 390
391 391 def xargs(self, arglist, cmd, *args, **kwargs):
392 392 for l in self._limit_arglist(arglist, cmd, *args, **kwargs):
393 393 self.run0(cmd, *(list(args) + l), **kwargs)
394 394
395 395 class mapfile(dict):
396 396 def __init__(self, ui, path):
397 397 super(mapfile, self).__init__()
398 398 self.ui = ui
399 399 self.path = path
400 400 self.fp = None
401 401 self.order = []
402 402 self._read()
403 403
404 404 def _read(self):
405 405 if not self.path:
406 406 return
407 407 try:
408 408 fp = open(self.path, 'r')
409 409 except IOError, err:
410 410 if err.errno != errno.ENOENT:
411 411 raise
412 412 return
413 413 for i, line in enumerate(fp):
414 414 line = line.splitlines()[0].rstrip()
415 415 if not line:
416 416 # Ignore blank lines
417 417 continue
418 418 try:
419 419 key, value = line.rsplit(' ', 1)
420 420 except ValueError:
421 421 raise util.Abort(
422 422 _('syntax error in %s(%d): key/value pair expected')
423 423 % (self.path, i + 1))
424 424 if key not in self:
425 425 self.order.append(key)
426 426 super(mapfile, self).__setitem__(key, value)
427 427 fp.close()
428 428
429 429 def __setitem__(self, key, value):
430 430 if self.fp is None:
431 431 try:
432 432 self.fp = open(self.path, 'a')
433 433 except IOError, err:
434 434 raise util.Abort(_('could not open map file %r: %s') %
435 435 (self.path, err.strerror))
436 436 self.fp.write('%s %s\n' % (key, value))
437 437 self.fp.flush()
438 438 super(mapfile, self).__setitem__(key, value)
439 439
440 440 def close(self):
441 441 if self.fp:
442 442 self.fp.close()
443 443 self.fp = None
444 444
445 445 def makedatetimestamp(t):
446 446 """Like util.makedate() but for time t instead of current time"""
447 447 delta = (datetime.datetime.utcfromtimestamp(t) -
448 448 datetime.datetime.fromtimestamp(t))
449 449 tz = delta.days * 86400 + delta.seconds
450 450 return t, tz
@@ -1,275 +1,275 b''
1 1 # cvs.py: CVS conversion code inspired by hg-cvs-import and git-cvsimport
2 2 #
3 3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import os, re, socket, errno
9 9 from cStringIO import StringIO
10 10 from mercurial import encoding, util
11 11 from mercurial.i18n import _
12 12
13 13 from common import NoRepo, commit, converter_source, checktool
14 14 from common import makedatetimestamp
15 15 import cvsps
16 16
17 17 class convert_cvs(converter_source):
18 18 def __init__(self, ui, path, rev=None):
19 19 super(convert_cvs, self).__init__(ui, path, rev=rev)
20 20
21 21 cvs = os.path.join(path, "CVS")
22 22 if not os.path.exists(cvs):
23 23 raise NoRepo(_("%s does not look like a CVS checkout") % path)
24 24
25 25 checktool('cvs')
26 26
27 27 self.changeset = None
28 28 self.files = {}
29 29 self.tags = {}
30 30 self.lastbranch = {}
31 31 self.socket = None
32 32 self.cvsroot = open(os.path.join(cvs, "Root")).read()[:-1]
33 33 self.cvsrepo = open(os.path.join(cvs, "Repository")).read()[:-1]
34 34 self.encoding = encoding.encoding
35 35
36 36 self._connect()
37 37
38 38 def _parse(self):
39 39 if self.changeset is not None:
40 40 return
41 41 self.changeset = {}
42 42
43 43 maxrev = 0
44 44 if self.rev:
45 45 # TODO: handle tags
46 46 try:
47 47 # patchset number?
48 48 maxrev = int(self.rev)
49 49 except ValueError:
50 50 raise util.Abort(_('revision %s is not a patchset number')
51 51 % self.rev)
52 52
53 53 d = os.getcwd()
54 54 try:
55 55 os.chdir(self.path)
56 56 id = None
57 57
58 58 cache = 'update'
59 59 if not self.ui.configbool('convert', 'cvsps.cache', True):
60 60 cache = None
61 61 db = cvsps.createlog(self.ui, cache=cache)
62 62 db = cvsps.createchangeset(self.ui, db,
63 63 fuzz=int(self.ui.config('convert', 'cvsps.fuzz', 60)),
64 64 mergeto=self.ui.config('convert', 'cvsps.mergeto', None),
65 65 mergefrom=self.ui.config('convert', 'cvsps.mergefrom', None))
66 66
67 67 for cs in db:
68 68 if maxrev and cs.id > maxrev:
69 69 break
70 70 id = str(cs.id)
71 71 cs.author = self.recode(cs.author)
72 72 self.lastbranch[cs.branch] = id
73 73 cs.comment = self.recode(cs.comment)
74 74 if self.ui.configbool('convert', 'localtimezone'):
75 75 cs.date = makedatetimestamp(cs.date[0])
76 76 date = util.datestr(cs.date, '%Y-%m-%d %H:%M:%S %1%2')
77 77 self.tags.update(dict.fromkeys(cs.tags, id))
78 78
79 79 files = {}
80 80 for f in cs.entries:
81 81 files[f.file] = "%s%s" % ('.'.join([str(x)
82 82 for x in f.revision]),
83 83 ['', '(DEAD)'][f.dead])
84 84
85 85 # add current commit to set
86 86 c = commit(author=cs.author, date=date,
87 87 parents=[str(p.id) for p in cs.parents],
88 88 desc=cs.comment, branch=cs.branch or '')
89 89 self.changeset[id] = c
90 90 self.files[id] = files
91 91
92 92 self.heads = self.lastbranch.values()
93 93 finally:
94 94 os.chdir(d)
95 95
96 96 def _connect(self):
97 97 root = self.cvsroot
98 98 conntype = None
99 99 user, host = None, None
100 100 cmd = ['cvs', 'server']
101 101
102 102 self.ui.status(_("connecting to %s\n") % root)
103 103
104 104 if root.startswith(":pserver:"):
105 105 root = root[9:]
106 106 m = re.match(r'(?:(.*?)(?::(.*?))?@)?([^:\/]*)(?::(\d*))?(.*)',
107 107 root)
108 108 if m:
109 109 conntype = "pserver"
110 110 user, passw, serv, port, root = m.groups()
111 111 if not user:
112 112 user = "anonymous"
113 113 if not port:
114 114 port = 2401
115 115 else:
116 116 port = int(port)
117 117 format0 = ":pserver:%s@%s:%s" % (user, serv, root)
118 118 format1 = ":pserver:%s@%s:%d%s" % (user, serv, port, root)
119 119
120 120 if not passw:
121 121 passw = "A"
122 122 cvspass = os.path.expanduser("~/.cvspass")
123 123 try:
124 124 pf = open(cvspass)
125 125 for line in pf.read().splitlines():
126 126 part1, part2 = line.split(' ', 1)
127 127 # /1 :pserver:user@example.com:2401/cvsroot/foo
128 128 # Ah<Z
129 129 if part1 == '/1':
130 130 part1, part2 = part2.split(' ', 1)
131 131 format = format1
132 132 # :pserver:user@example.com:/cvsroot/foo Ah<Z
133 133 else:
134 134 format = format0
135 135 if part1 == format:
136 136 passw = part2
137 137 break
138 138 pf.close()
139 139 except IOError, inst:
140 140 if inst.errno != errno.ENOENT:
141 141 if not getattr(inst, 'filename', None):
142 142 inst.filename = cvspass
143 143 raise
144 144
145 145 sck = socket.socket()
146 146 sck.connect((serv, port))
147 147 sck.send("\n".join(["BEGIN AUTH REQUEST", root, user, passw,
148 148 "END AUTH REQUEST", ""]))
149 149 if sck.recv(128) != "I LOVE YOU\n":
150 150 raise util.Abort(_("CVS pserver authentication failed"))
151 151
152 152 self.writep = self.readp = sck.makefile('r+')
153 153
154 154 if not conntype and root.startswith(":local:"):
155 155 conntype = "local"
156 156 root = root[7:]
157 157
158 158 if not conntype:
159 159 # :ext:user@host/home/user/path/to/cvsroot
160 160 if root.startswith(":ext:"):
161 161 root = root[5:]
162 162 m = re.match(r'(?:([^@:/]+)@)?([^:/]+):?(.*)', root)
163 163 # Do not take Windows path "c:\foo\bar" for a connection strings
164 164 if os.path.isdir(root) or not m:
165 165 conntype = "local"
166 166 else:
167 167 conntype = "rsh"
168 168 user, host, root = m.group(1), m.group(2), m.group(3)
169 169
170 170 if conntype != "pserver":
171 171 if conntype == "rsh":
172 172 rsh = os.environ.get("CVS_RSH") or "ssh"
173 173 if user:
174 174 cmd = [rsh, '-l', user, host] + cmd
175 175 else:
176 176 cmd = [rsh, host] + cmd
177 177
178 178 # popen2 does not support argument lists under Windows
179 179 cmd = [util.shellquote(arg) for arg in cmd]
180 180 cmd = util.quotecommand(' '.join(cmd))
181 181 self.writep, self.readp = util.popen2(cmd)
182 182
183 183 self.realroot = root
184 184
185 185 self.writep.write("Root %s\n" % root)
186 186 self.writep.write("Valid-responses ok error Valid-requests Mode"
187 187 " M Mbinary E Checked-in Created Updated"
188 188 " Merged Removed\n")
189 189 self.writep.write("valid-requests\n")
190 190 self.writep.flush()
191 191 r = self.readp.readline()
192 192 if not r.startswith("Valid-requests"):
193 193 raise util.Abort(_('unexpected response from CVS server '
194 194 '(expected "Valid-requests", but got %r)')
195 195 % r)
196 196 if "UseUnchanged" in r:
197 197 self.writep.write("UseUnchanged\n")
198 198 self.writep.flush()
199 199 r = self.readp.readline()
200 200
201 201 def getheads(self):
202 202 self._parse()
203 203 return self.heads
204 204
205 205 def getfile(self, name, rev):
206 206
207 207 def chunkedread(fp, count):
208 208 # file-objects returned by socket.makefile() do not handle
209 209 # large read() requests very well.
210 210 chunksize = 65536
211 211 output = StringIO()
212 212 while count > 0:
213 213 data = fp.read(min(count, chunksize))
214 214 if not data:
215 215 raise util.Abort(_("%d bytes missing from remote file")
216 216 % count)
217 217 count -= len(data)
218 218 output.write(data)
219 219 return output.getvalue()
220 220
221 221 self._parse()
222 222 if rev.endswith("(DEAD)"):
223 raise IOError
223 return None, None
224 224
225 225 args = ("-N -P -kk -r %s --" % rev).split()
226 226 args.append(self.cvsrepo + '/' + name)
227 227 for x in args:
228 228 self.writep.write("Argument %s\n" % x)
229 229 self.writep.write("Directory .\n%s\nco\n" % self.realroot)
230 230 self.writep.flush()
231 231
232 232 data = ""
233 233 mode = None
234 234 while True:
235 235 line = self.readp.readline()
236 236 if line.startswith("Created ") or line.startswith("Updated "):
237 237 self.readp.readline() # path
238 238 self.readp.readline() # entries
239 239 mode = self.readp.readline()[:-1]
240 240 count = int(self.readp.readline()[:-1])
241 241 data = chunkedread(self.readp, count)
242 242 elif line.startswith(" "):
243 243 data += line[1:]
244 244 elif line.startswith("M "):
245 245 pass
246 246 elif line.startswith("Mbinary "):
247 247 count = int(self.readp.readline()[:-1])
248 248 data = chunkedread(self.readp, count)
249 249 else:
250 250 if line == "ok\n":
251 251 if mode is None:
252 252 raise util.Abort(_('malformed response from CVS'))
253 253 return (data, "x" in mode and "x" or "")
254 254 elif line.startswith("E "):
255 255 self.ui.warn(_("cvs server: %s\n") % line[2:])
256 256 elif line.startswith("Remove"):
257 257 self.readp.readline()
258 258 else:
259 259 raise util.Abort(_("unknown CVS response: %s") % line)
260 260
261 261 def getchanges(self, rev):
262 262 self._parse()
263 263 return sorted(self.files[rev].iteritems()), {}
264 264
265 265 def getcommit(self, rev):
266 266 self._parse()
267 267 return self.changeset[rev]
268 268
269 269 def gettags(self):
270 270 self._parse()
271 271 return self.tags
272 272
273 273 def getchangedfiles(self, rev, i):
274 274 self._parse()
275 275 return sorted(self.files[rev])
@@ -1,201 +1,206 b''
1 1 # darcs.py - darcs support for the convert extension
2 2 #
3 3 # Copyright 2007-2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from common import NoRepo, checktool, commandline, commit, converter_source
9 9 from mercurial.i18n import _
10 10 from mercurial import util
11 import os, shutil, tempfile, re
11 import os, shutil, tempfile, re, errno
12 12
13 13 # The naming drift of ElementTree is fun!
14 14
15 15 try:
16 16 from xml.etree.cElementTree import ElementTree, XMLParser
17 17 except ImportError:
18 18 try:
19 19 from xml.etree.ElementTree import ElementTree, XMLParser
20 20 except ImportError:
21 21 try:
22 22 from elementtree.cElementTree import ElementTree, XMLParser
23 23 except ImportError:
24 24 try:
25 25 from elementtree.ElementTree import ElementTree, XMLParser
26 26 except ImportError:
27 27 pass
28 28
29 29 class darcs_source(converter_source, commandline):
30 30 def __init__(self, ui, path, rev=None):
31 31 converter_source.__init__(self, ui, path, rev=rev)
32 32 commandline.__init__(self, ui, 'darcs')
33 33
34 34 # check for _darcs, ElementTree so that we can easily skip
35 35 # test-convert-darcs if ElementTree is not around
36 36 if not os.path.exists(os.path.join(path, '_darcs')):
37 37 raise NoRepo(_("%s does not look like a darcs repository") % path)
38 38
39 39 checktool('darcs')
40 40 version = self.run0('--version').splitlines()[0].strip()
41 41 if version < '2.1':
42 42 raise util.Abort(_('darcs version 2.1 or newer needed (found %r)') %
43 43 version)
44 44
45 45 if "ElementTree" not in globals():
46 46 raise util.Abort(_("Python ElementTree module is not available"))
47 47
48 48 self.path = os.path.realpath(path)
49 49
50 50 self.lastrev = None
51 51 self.changes = {}
52 52 self.parents = {}
53 53 self.tags = {}
54 54
55 55 # Check darcs repository format
56 56 format = self.format()
57 57 if format:
58 58 if format in ('darcs-1.0', 'hashed'):
59 59 raise NoRepo(_("%s repository format is unsupported, "
60 60 "please upgrade") % format)
61 61 else:
62 62 self.ui.warn(_('failed to detect repository format!'))
63 63
64 64 def before(self):
65 65 self.tmppath = tempfile.mkdtemp(
66 66 prefix='convert-' + os.path.basename(self.path) + '-')
67 67 output, status = self.run('init', repodir=self.tmppath)
68 68 self.checkexit(status)
69 69
70 70 tree = self.xml('changes', xml_output=True, summary=True,
71 71 repodir=self.path)
72 72 tagname = None
73 73 child = None
74 74 for elt in tree.findall('patch'):
75 75 node = elt.get('hash')
76 76 name = elt.findtext('name', '')
77 77 if name.startswith('TAG '):
78 78 tagname = name[4:].strip()
79 79 elif tagname is not None:
80 80 self.tags[tagname] = node
81 81 tagname = None
82 82 self.changes[node] = elt
83 83 self.parents[child] = [node]
84 84 child = node
85 85 self.parents[child] = []
86 86
87 87 def after(self):
88 88 self.ui.debug('cleaning up %s\n' % self.tmppath)
89 89 shutil.rmtree(self.tmppath, ignore_errors=True)
90 90
91 91 def recode(self, s, encoding=None):
92 92 if isinstance(s, unicode):
93 93 # XMLParser returns unicode objects for anything it can't
94 94 # encode into ASCII. We convert them back to str to get
95 95 # recode's normal conversion behavior.
96 96 s = s.encode('latin-1')
97 97 return super(darcs_source, self).recode(s, encoding)
98 98
99 99 def xml(self, cmd, **kwargs):
100 100 # NOTE: darcs is currently encoding agnostic and will print
101 101 # patch metadata byte-for-byte, even in the XML changelog.
102 102 etree = ElementTree()
103 103 # While we are decoding the XML as latin-1 to be as liberal as
104 104 # possible, etree will still raise an exception if any
105 105 # non-printable characters are in the XML changelog.
106 106 parser = XMLParser(encoding='latin-1')
107 107 p = self._run(cmd, **kwargs)
108 108 etree.parse(p.stdout, parser=parser)
109 109 p.wait()
110 110 self.checkexit(p.returncode)
111 111 return etree.getroot()
112 112
113 113 def format(self):
114 114 output, status = self.run('show', 'repo', no_files=True,
115 115 repodir=self.path)
116 116 self.checkexit(status)
117 117 m = re.search(r'^\s*Format:\s*(.*)$', output, re.MULTILINE)
118 118 if not m:
119 119 return None
120 120 return ','.join(sorted(f.strip() for f in m.group(1).split(',')))
121 121
122 122 def manifest(self):
123 123 man = []
124 124 output, status = self.run('show', 'files', no_directories=True,
125 125 repodir=self.tmppath)
126 126 self.checkexit(status)
127 127 for line in output.split('\n'):
128 128 path = line[2:]
129 129 if path:
130 130 man.append(path)
131 131 return man
132 132
133 133 def getheads(self):
134 134 return self.parents[None]
135 135
136 136 def getcommit(self, rev):
137 137 elt = self.changes[rev]
138 138 date = util.strdate(elt.get('local_date'), '%a %b %d %H:%M:%S %Z %Y')
139 139 desc = elt.findtext('name') + '\n' + elt.findtext('comment', '')
140 140 # etree can return unicode objects for name, comment, and author,
141 141 # so recode() is used to ensure str objects are emitted.
142 142 return commit(author=self.recode(elt.get('author')),
143 143 date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'),
144 144 desc=self.recode(desc).strip(),
145 145 parents=self.parents[rev])
146 146
147 147 def pull(self, rev):
148 148 output, status = self.run('pull', self.path, all=True,
149 149 match='hash %s' % rev,
150 150 no_test=True, no_posthook=True,
151 151 external_merge='/bin/false',
152 152 repodir=self.tmppath)
153 153 if status:
154 154 if output.find('We have conflicts in') == -1:
155 155 self.checkexit(status, output)
156 156 output, status = self.run('revert', all=True, repodir=self.tmppath)
157 157 self.checkexit(status, output)
158 158
159 159 def getchanges(self, rev):
160 160 copies = {}
161 161 changes = []
162 162 man = None
163 163 for elt in self.changes[rev].find('summary').getchildren():
164 164 if elt.tag in ('add_directory', 'remove_directory'):
165 165 continue
166 166 if elt.tag == 'move':
167 167 if man is None:
168 168 man = self.manifest()
169 169 source, dest = elt.get('from'), elt.get('to')
170 170 if source in man:
171 171 # File move
172 172 changes.append((source, rev))
173 173 changes.append((dest, rev))
174 174 copies[dest] = source
175 175 else:
176 176 # Directory move, deduce file moves from manifest
177 177 source = source + '/'
178 178 for f in man:
179 179 if not f.startswith(source):
180 180 continue
181 181 fdest = dest + '/' + f[len(source):]
182 182 changes.append((f, rev))
183 183 changes.append((fdest, rev))
184 184 copies[fdest] = f
185 185 else:
186 186 changes.append((elt.text.strip(), rev))
187 187 self.pull(rev)
188 188 self.lastrev = rev
189 189 return sorted(changes), copies
190 190
191 191 def getfile(self, name, rev):
192 192 if rev != self.lastrev:
193 193 raise util.Abort(_('internal calling inconsistency'))
194 194 path = os.path.join(self.tmppath, name)
195 data = util.readfile(path)
196 mode = os.lstat(path).st_mode
195 try:
196 data = util.readfile(path)
197 mode = os.lstat(path).st_mode
198 except IOError, inst:
199 if inst.errno == errno.ENOENT:
200 return None, None
201 raise
197 202 mode = (mode & 0111) and 'x' or ''
198 203 return data, mode
199 204
200 205 def gettags(self):
201 206 return self.tags
@@ -1,343 +1,343 b''
1 1 # git.py - git support for the convert extension
2 2 #
3 3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import os
9 9 import subprocess
10 10 from mercurial import util, config
11 11 from mercurial.node import hex, nullid
12 12 from mercurial.i18n import _
13 13
14 14 from common import NoRepo, commit, converter_source, checktool
15 15
16 16 class submodule(object):
17 17 def __init__(self, path, node, url):
18 18 self.path = path
19 19 self.node = node
20 20 self.url = url
21 21
22 22 def hgsub(self):
23 23 return "%s = [git]%s" % (self.path, self.url)
24 24
25 25 def hgsubstate(self):
26 26 return "%s %s" % (self.node, self.path)
27 27
28 28 class convert_git(converter_source):
29 29 # Windows does not support GIT_DIR= construct while other systems
30 30 # cannot remove environment variable. Just assume none have
31 31 # both issues.
32 32 if util.safehasattr(os, 'unsetenv'):
33 33 def gitopen(self, s, err=None):
34 34 prevgitdir = os.environ.get('GIT_DIR')
35 35 os.environ['GIT_DIR'] = self.path
36 36 try:
37 37 if err == subprocess.PIPE:
38 38 (stdin, stdout, stderr) = util.popen3(s)
39 39 return stdout
40 40 elif err == subprocess.STDOUT:
41 41 return self.popen_with_stderr(s)
42 42 else:
43 43 return util.popen(s, 'rb')
44 44 finally:
45 45 if prevgitdir is None:
46 46 del os.environ['GIT_DIR']
47 47 else:
48 48 os.environ['GIT_DIR'] = prevgitdir
49 49
50 50 def gitpipe(self, s):
51 51 prevgitdir = os.environ.get('GIT_DIR')
52 52 os.environ['GIT_DIR'] = self.path
53 53 try:
54 54 return util.popen3(s)
55 55 finally:
56 56 if prevgitdir is None:
57 57 del os.environ['GIT_DIR']
58 58 else:
59 59 os.environ['GIT_DIR'] = prevgitdir
60 60
61 61 else:
62 62 def gitopen(self, s, err=None):
63 63 if err == subprocess.PIPE:
64 64 (sin, so, se) = util.popen3('GIT_DIR=%s %s' % (self.path, s))
65 65 return so
66 66 elif err == subprocess.STDOUT:
67 67 return self.popen_with_stderr(s)
68 68 else:
69 69 return util.popen('GIT_DIR=%s %s' % (self.path, s), 'rb')
70 70
71 71 def gitpipe(self, s):
72 72 return util.popen3('GIT_DIR=%s %s' % (self.path, s))
73 73
74 74 def popen_with_stderr(self, s):
75 75 p = subprocess.Popen(s, shell=True, bufsize=-1,
76 76 close_fds=util.closefds,
77 77 stdin=subprocess.PIPE,
78 78 stdout=subprocess.PIPE,
79 79 stderr=subprocess.STDOUT,
80 80 universal_newlines=False,
81 81 env=None)
82 82 return p.stdout
83 83
84 84 def gitread(self, s):
85 85 fh = self.gitopen(s)
86 86 data = fh.read()
87 87 return data, fh.close()
88 88
89 89 def __init__(self, ui, path, rev=None):
90 90 super(convert_git, self).__init__(ui, path, rev=rev)
91 91
92 92 if os.path.isdir(path + "/.git"):
93 93 path += "/.git"
94 94 if not os.path.exists(path + "/objects"):
95 95 raise NoRepo(_("%s does not look like a Git repository") % path)
96 96
97 97 checktool('git', 'git')
98 98
99 99 self.path = path
100 100 self.submodules = []
101 101
102 102 self.catfilepipe = self.gitpipe('git cat-file --batch')
103 103
104 104 def after(self):
105 105 for f in self.catfilepipe:
106 106 f.close()
107 107
108 108 def getheads(self):
109 109 if not self.rev:
110 110 heads, ret = self.gitread('git rev-parse --branches --remotes')
111 111 heads = heads.splitlines()
112 112 else:
113 113 heads, ret = self.gitread("git rev-parse --verify %s" % self.rev)
114 114 heads = [heads[:-1]]
115 115 if ret:
116 116 raise util.Abort(_('cannot retrieve git heads'))
117 117 return heads
118 118
119 119 def catfile(self, rev, type):
120 120 if rev == hex(nullid):
121 121 raise IOError
122 122 self.catfilepipe[0].write(rev+'\n')
123 123 self.catfilepipe[0].flush()
124 124 info = self.catfilepipe[1].readline().split()
125 125 if info[1] != type:
126 126 raise util.Abort(_('cannot read %r object at %s') % (type, rev))
127 127 size = int(info[2])
128 128 data = self.catfilepipe[1].read(size)
129 129 if len(data) < size:
130 130 raise util.Abort(_('cannot read %r object at %s: unexpected size')
131 131 % (type, rev))
132 132 # read the trailing newline
133 133 self.catfilepipe[1].read(1)
134 134 return data
135 135
136 136 def getfile(self, name, rev):
137 137 if rev == hex(nullid):
138 raise IOError
138 return None, None
139 139 if name == '.hgsub':
140 140 data = '\n'.join([m.hgsub() for m in self.submoditer()])
141 141 mode = ''
142 142 elif name == '.hgsubstate':
143 143 data = '\n'.join([m.hgsubstate() for m in self.submoditer()])
144 144 mode = ''
145 145 else:
146 146 data = self.catfile(rev, "blob")
147 147 mode = self.modecache[(name, rev)]
148 148 return data, mode
149 149
150 150 def submoditer(self):
151 151 null = hex(nullid)
152 152 for m in sorted(self.submodules, key=lambda p: p.path):
153 153 if m.node != null:
154 154 yield m
155 155
156 156 def parsegitmodules(self, content):
157 157 """Parse the formatted .gitmodules file, example file format:
158 158 [submodule "sub"]\n
159 159 \tpath = sub\n
160 160 \turl = git://giturl\n
161 161 """
162 162 self.submodules = []
163 163 c = config.config()
164 164 # Each item in .gitmodules starts with \t that cant be parsed
165 165 c.parse('.gitmodules', content.replace('\t',''))
166 166 for sec in c.sections():
167 167 s = c[sec]
168 168 if 'url' in s and 'path' in s:
169 169 self.submodules.append(submodule(s['path'], '', s['url']))
170 170
171 171 def retrievegitmodules(self, version):
172 172 modules, ret = self.gitread("git show %s:%s" % (version, '.gitmodules'))
173 173 if ret:
174 174 raise util.Abort(_('cannot read submodules config file in %s') %
175 175 version)
176 176 self.parsegitmodules(modules)
177 177 for m in self.submodules:
178 178 node, ret = self.gitread("git rev-parse %s:%s" % (version, m.path))
179 179 if ret:
180 180 continue
181 181 m.node = node.strip()
182 182
183 183 def getchanges(self, version):
184 184 self.modecache = {}
185 185 fh = self.gitopen("git diff-tree -z --root -m -r %s" % version)
186 186 changes = []
187 187 seen = set()
188 188 entry = None
189 189 subexists = False
190 190 subdeleted = False
191 191 for l in fh.read().split('\x00'):
192 192 if not entry:
193 193 if not l.startswith(':'):
194 194 continue
195 195 entry = l
196 196 continue
197 197 f = l
198 198 if f not in seen:
199 199 seen.add(f)
200 200 entry = entry.split()
201 201 h = entry[3]
202 202 p = (entry[1] == "100755")
203 203 s = (entry[1] == "120000")
204 204
205 205 if f == '.gitmodules':
206 206 subexists = True
207 207 if entry[4] == 'D':
208 208 subdeleted = True
209 209 changes.append(('.hgsub', hex(nullid)))
210 210 else:
211 211 changes.append(('.hgsub', ''))
212 212 elif entry[1] == '160000' or entry[0] == ':160000':
213 213 subexists = True
214 214 else:
215 215 self.modecache[(f, h)] = (p and "x") or (s and "l") or ""
216 216 changes.append((f, h))
217 217 entry = None
218 218 if fh.close():
219 219 raise util.Abort(_('cannot read changes in %s') % version)
220 220
221 221 if subexists:
222 222 if subdeleted:
223 223 changes.append(('.hgsubstate', hex(nullid)))
224 224 else:
225 225 self.retrievegitmodules(version)
226 226 changes.append(('.hgsubstate', ''))
227 227 return (changes, {})
228 228
229 229 def getcommit(self, version):
230 230 c = self.catfile(version, "commit") # read the commit hash
231 231 end = c.find("\n\n")
232 232 message = c[end + 2:]
233 233 message = self.recode(message)
234 234 l = c[:end].splitlines()
235 235 parents = []
236 236 author = committer = None
237 237 for e in l[1:]:
238 238 n, v = e.split(" ", 1)
239 239 if n == "author":
240 240 p = v.split()
241 241 tm, tz = p[-2:]
242 242 author = " ".join(p[:-2])
243 243 if author[0] == "<": author = author[1:-1]
244 244 author = self.recode(author)
245 245 if n == "committer":
246 246 p = v.split()
247 247 tm, tz = p[-2:]
248 248 committer = " ".join(p[:-2])
249 249 if committer[0] == "<": committer = committer[1:-1]
250 250 committer = self.recode(committer)
251 251 if n == "parent":
252 252 parents.append(v)
253 253
254 254 if committer and committer != author:
255 255 message += "\ncommitter: %s\n" % committer
256 256 tzs, tzh, tzm = tz[-5:-4] + "1", tz[-4:-2], tz[-2:]
257 257 tz = -int(tzs) * (int(tzh) * 3600 + int(tzm))
258 258 date = tm + " " + str(tz)
259 259
260 260 c = commit(parents=parents, date=date, author=author, desc=message,
261 261 rev=version)
262 262 return c
263 263
264 264 def gettags(self):
265 265 tags = {}
266 266 alltags = {}
267 267 fh = self.gitopen('git ls-remote --tags "%s"' % self.path,
268 268 err=subprocess.STDOUT)
269 269 prefix = 'refs/tags/'
270 270
271 271 # Build complete list of tags, both annotated and bare ones
272 272 for line in fh:
273 273 line = line.strip()
274 274 if line.startswith("error:") or line.startswith("fatal:"):
275 275 raise util.Abort(_('cannot read tags from %s') % self.path)
276 276 node, tag = line.split(None, 1)
277 277 if not tag.startswith(prefix):
278 278 continue
279 279 alltags[tag[len(prefix):]] = node
280 280 if fh.close():
281 281 raise util.Abort(_('cannot read tags from %s') % self.path)
282 282
283 283 # Filter out tag objects for annotated tag refs
284 284 for tag in alltags:
285 285 if tag.endswith('^{}'):
286 286 tags[tag[:-3]] = alltags[tag]
287 287 else:
288 288 if tag + '^{}' in alltags:
289 289 continue
290 290 else:
291 291 tags[tag] = alltags[tag]
292 292
293 293 return tags
294 294
295 295 def getchangedfiles(self, version, i):
296 296 changes = []
297 297 if i is None:
298 298 fh = self.gitopen("git diff-tree --root -m -r %s" % version)
299 299 for l in fh:
300 300 if "\t" not in l:
301 301 continue
302 302 m, f = l[:-1].split("\t")
303 303 changes.append(f)
304 304 else:
305 305 fh = self.gitopen('git diff-tree --name-only --root -r %s '
306 306 '"%s^%s" --' % (version, version, i + 1))
307 307 changes = [f.rstrip('\n') for f in fh]
308 308 if fh.close():
309 309 raise util.Abort(_('cannot read changes in %s') % version)
310 310
311 311 return changes
312 312
313 313 def getbookmarks(self):
314 314 bookmarks = {}
315 315
316 316 # Interesting references in git are prefixed
317 317 prefix = 'refs/heads/'
318 318 prefixlen = len(prefix)
319 319
320 320 # factor two commands
321 321 gitcmd = { 'remote/': 'git ls-remote --heads origin',
322 322 '': 'git show-ref'}
323 323
324 324 # Origin heads
325 325 for reftype in gitcmd:
326 326 try:
327 327 fh = self.gitopen(gitcmd[reftype], err=subprocess.PIPE)
328 328 for line in fh:
329 329 line = line.strip()
330 330 rev, name = line.split(None, 1)
331 331 if not name.startswith(prefix):
332 332 continue
333 333 name = '%s%s' % (reftype, name[prefixlen:])
334 334 bookmarks[name] = rev
335 335 except Exception:
336 336 pass
337 337
338 338 return bookmarks
339 339
340 340 def checkrevformat(self, revstr, mapname='splicemap'):
341 341 """ git revision string is a 40 byte hex """
342 342 self.checkhexformat(revstr, mapname)
343 343
@@ -1,338 +1,337 b''
1 1 # gnuarch.py - GNU Arch support for the convert extension
2 2 #
3 3 # Copyright 2008, 2009 Aleix Conchillo Flaque <aleix@member.fsf.org>
4 4 # and others
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from common import NoRepo, commandline, commit, converter_source
10 10 from mercurial.i18n import _
11 11 from mercurial import encoding, util
12 12 import os, shutil, tempfile, stat
13 13 from email.Parser import Parser
14 14
15 15 class gnuarch_source(converter_source, commandline):
16 16
17 17 class gnuarch_rev(object):
18 18 def __init__(self, rev):
19 19 self.rev = rev
20 20 self.summary = ''
21 21 self.date = None
22 22 self.author = ''
23 23 self.continuationof = None
24 24 self.add_files = []
25 25 self.mod_files = []
26 26 self.del_files = []
27 27 self.ren_files = {}
28 28 self.ren_dirs = {}
29 29
30 30 def __init__(self, ui, path, rev=None):
31 31 super(gnuarch_source, self).__init__(ui, path, rev=rev)
32 32
33 33 if not os.path.exists(os.path.join(path, '{arch}')):
34 34 raise NoRepo(_("%s does not look like a GNU Arch repository")
35 35 % path)
36 36
37 37 # Could use checktool, but we want to check for baz or tla.
38 38 self.execmd = None
39 39 if util.findexe('baz'):
40 40 self.execmd = 'baz'
41 41 else:
42 42 if util.findexe('tla'):
43 43 self.execmd = 'tla'
44 44 else:
45 45 raise util.Abort(_('cannot find a GNU Arch tool'))
46 46
47 47 commandline.__init__(self, ui, self.execmd)
48 48
49 49 self.path = os.path.realpath(path)
50 50 self.tmppath = None
51 51
52 52 self.treeversion = None
53 53 self.lastrev = None
54 54 self.changes = {}
55 55 self.parents = {}
56 56 self.tags = {}
57 57 self.catlogparser = Parser()
58 58 self.encoding = encoding.encoding
59 59 self.archives = []
60 60
61 61 def before(self):
62 62 # Get registered archives
63 63 self.archives = [i.rstrip('\n')
64 64 for i in self.runlines0('archives', '-n')]
65 65
66 66 if self.execmd == 'tla':
67 67 output = self.run0('tree-version', self.path)
68 68 else:
69 69 output = self.run0('tree-version', '-d', self.path)
70 70 self.treeversion = output.strip()
71 71
72 72 # Get name of temporary directory
73 73 version = self.treeversion.split('/')
74 74 self.tmppath = os.path.join(tempfile.gettempdir(),
75 75 'hg-%s' % version[1])
76 76
77 77 # Generate parents dictionary
78 78 self.parents[None] = []
79 79 treeversion = self.treeversion
80 80 child = None
81 81 while treeversion:
82 82 self.ui.status(_('analyzing tree version %s...\n') % treeversion)
83 83
84 84 archive = treeversion.split('/')[0]
85 85 if archive not in self.archives:
86 86 self.ui.status(_('tree analysis stopped because it points to '
87 87 'an unregistered archive %s...\n') % archive)
88 88 break
89 89
90 90 # Get the complete list of revisions for that tree version
91 91 output, status = self.runlines('revisions', '-r', '-f', treeversion)
92 92 self.checkexit(status, 'failed retrieving revisions for %s'
93 93 % treeversion)
94 94
95 95 # No new iteration unless a revision has a continuation-of header
96 96 treeversion = None
97 97
98 98 for l in output:
99 99 rev = l.strip()
100 100 self.changes[rev] = self.gnuarch_rev(rev)
101 101 self.parents[rev] = []
102 102
103 103 # Read author, date and summary
104 104 catlog, status = self.run('cat-log', '-d', self.path, rev)
105 105 if status:
106 106 catlog = self.run0('cat-archive-log', rev)
107 107 self._parsecatlog(catlog, rev)
108 108
109 109 # Populate the parents map
110 110 self.parents[child].append(rev)
111 111
112 112 # Keep track of the current revision as the child of the next
113 113 # revision scanned
114 114 child = rev
115 115
116 116 # Check if we have to follow the usual incremental history
117 117 # or if we have to 'jump' to a different treeversion given
118 118 # by the continuation-of header.
119 119 if self.changes[rev].continuationof:
120 120 treeversion = '--'.join(
121 121 self.changes[rev].continuationof.split('--')[:-1])
122 122 break
123 123
124 124 # If we reached a base-0 revision w/o any continuation-of
125 125 # header, it means the tree history ends here.
126 126 if rev[-6:] == 'base-0':
127 127 break
128 128
129 129 def after(self):
130 130 self.ui.debug('cleaning up %s\n' % self.tmppath)
131 131 shutil.rmtree(self.tmppath, ignore_errors=True)
132 132
133 133 def getheads(self):
134 134 return self.parents[None]
135 135
136 136 def getfile(self, name, rev):
137 137 if rev != self.lastrev:
138 138 raise util.Abort(_('internal calling inconsistency'))
139 139
140 # Raise IOError if necessary (i.e. deleted files).
141 140 if not os.path.lexists(os.path.join(self.tmppath, name)):
142 raise IOError
141 return None, None
143 142
144 143 return self._getfile(name, rev)
145 144
146 145 def getchanges(self, rev):
147 146 self._update(rev)
148 147 changes = []
149 148 copies = {}
150 149
151 150 for f in self.changes[rev].add_files:
152 151 changes.append((f, rev))
153 152
154 153 for f in self.changes[rev].mod_files:
155 154 changes.append((f, rev))
156 155
157 156 for f in self.changes[rev].del_files:
158 157 changes.append((f, rev))
159 158
160 159 for src in self.changes[rev].ren_files:
161 160 to = self.changes[rev].ren_files[src]
162 161 changes.append((src, rev))
163 162 changes.append((to, rev))
164 163 copies[to] = src
165 164
166 165 for src in self.changes[rev].ren_dirs:
167 166 to = self.changes[rev].ren_dirs[src]
168 167 chgs, cps = self._rendirchanges(src, to)
169 168 changes += [(f, rev) for f in chgs]
170 169 copies.update(cps)
171 170
172 171 self.lastrev = rev
173 172 return sorted(set(changes)), copies
174 173
175 174 def getcommit(self, rev):
176 175 changes = self.changes[rev]
177 176 return commit(author=changes.author, date=changes.date,
178 177 desc=changes.summary, parents=self.parents[rev], rev=rev)
179 178
180 179 def gettags(self):
181 180 return self.tags
182 181
183 182 def _execute(self, cmd, *args, **kwargs):
184 183 cmdline = [self.execmd, cmd]
185 184 cmdline += args
186 185 cmdline = [util.shellquote(arg) for arg in cmdline]
187 186 cmdline += ['>', os.devnull, '2>', os.devnull]
188 187 cmdline = util.quotecommand(' '.join(cmdline))
189 188 self.ui.debug(cmdline, '\n')
190 189 return os.system(cmdline)
191 190
192 191 def _update(self, rev):
193 192 self.ui.debug('applying revision %s...\n' % rev)
194 193 changeset, status = self.runlines('replay', '-d', self.tmppath,
195 194 rev)
196 195 if status:
197 196 # Something went wrong while merging (baz or tla
198 197 # issue?), get latest revision and try from there
199 198 shutil.rmtree(self.tmppath, ignore_errors=True)
200 199 self._obtainrevision(rev)
201 200 else:
202 201 old_rev = self.parents[rev][0]
203 202 self.ui.debug('computing changeset between %s and %s...\n'
204 203 % (old_rev, rev))
205 204 self._parsechangeset(changeset, rev)
206 205
207 206 def _getfile(self, name, rev):
208 207 mode = os.lstat(os.path.join(self.tmppath, name)).st_mode
209 208 if stat.S_ISLNK(mode):
210 209 data = os.readlink(os.path.join(self.tmppath, name))
211 210 mode = mode and 'l' or ''
212 211 else:
213 212 data = open(os.path.join(self.tmppath, name), 'rb').read()
214 213 mode = (mode & 0111) and 'x' or ''
215 214 return data, mode
216 215
217 216 def _exclude(self, name):
218 217 exclude = ['{arch}', '.arch-ids', '.arch-inventory']
219 218 for exc in exclude:
220 219 if name.find(exc) != -1:
221 220 return True
222 221 return False
223 222
224 223 def _readcontents(self, path):
225 224 files = []
226 225 contents = os.listdir(path)
227 226 while len(contents) > 0:
228 227 c = contents.pop()
229 228 p = os.path.join(path, c)
230 229 # os.walk could be used, but here we avoid internal GNU
231 230 # Arch files and directories, thus saving a lot time.
232 231 if not self._exclude(p):
233 232 if os.path.isdir(p):
234 233 contents += [os.path.join(c, f) for f in os.listdir(p)]
235 234 else:
236 235 files.append(c)
237 236 return files
238 237
239 238 def _rendirchanges(self, src, dest):
240 239 changes = []
241 240 copies = {}
242 241 files = self._readcontents(os.path.join(self.tmppath, dest))
243 242 for f in files:
244 243 s = os.path.join(src, f)
245 244 d = os.path.join(dest, f)
246 245 changes.append(s)
247 246 changes.append(d)
248 247 copies[d] = s
249 248 return changes, copies
250 249
251 250 def _obtainrevision(self, rev):
252 251 self.ui.debug('obtaining revision %s...\n' % rev)
253 252 output = self._execute('get', rev, self.tmppath)
254 253 self.checkexit(output)
255 254 self.ui.debug('analyzing revision %s...\n' % rev)
256 255 files = self._readcontents(self.tmppath)
257 256 self.changes[rev].add_files += files
258 257
259 258 def _stripbasepath(self, path):
260 259 if path.startswith('./'):
261 260 return path[2:]
262 261 return path
263 262
264 263 def _parsecatlog(self, data, rev):
265 264 try:
266 265 catlog = self.catlogparser.parsestr(data)
267 266
268 267 # Commit date
269 268 self.changes[rev].date = util.datestr(
270 269 util.strdate(catlog['Standard-date'],
271 270 '%Y-%m-%d %H:%M:%S'))
272 271
273 272 # Commit author
274 273 self.changes[rev].author = self.recode(catlog['Creator'])
275 274
276 275 # Commit description
277 276 self.changes[rev].summary = '\n\n'.join((catlog['Summary'],
278 277 catlog.get_payload()))
279 278 self.changes[rev].summary = self.recode(self.changes[rev].summary)
280 279
281 280 # Commit revision origin when dealing with a branch or tag
282 281 if 'Continuation-of' in catlog:
283 282 self.changes[rev].continuationof = self.recode(
284 283 catlog['Continuation-of'])
285 284 except Exception:
286 285 raise util.Abort(_('could not parse cat-log of %s') % rev)
287 286
288 287 def _parsechangeset(self, data, rev):
289 288 for l in data:
290 289 l = l.strip()
291 290 # Added file (ignore added directory)
292 291 if l.startswith('A') and not l.startswith('A/'):
293 292 file = self._stripbasepath(l[1:].strip())
294 293 if not self._exclude(file):
295 294 self.changes[rev].add_files.append(file)
296 295 # Deleted file (ignore deleted directory)
297 296 elif l.startswith('D') and not l.startswith('D/'):
298 297 file = self._stripbasepath(l[1:].strip())
299 298 if not self._exclude(file):
300 299 self.changes[rev].del_files.append(file)
301 300 # Modified binary file
302 301 elif l.startswith('Mb'):
303 302 file = self._stripbasepath(l[2:].strip())
304 303 if not self._exclude(file):
305 304 self.changes[rev].mod_files.append(file)
306 305 # Modified link
307 306 elif l.startswith('M->'):
308 307 file = self._stripbasepath(l[3:].strip())
309 308 if not self._exclude(file):
310 309 self.changes[rev].mod_files.append(file)
311 310 # Modified file
312 311 elif l.startswith('M'):
313 312 file = self._stripbasepath(l[1:].strip())
314 313 if not self._exclude(file):
315 314 self.changes[rev].mod_files.append(file)
316 315 # Renamed file (or link)
317 316 elif l.startswith('=>'):
318 317 files = l[2:].strip().split(' ')
319 318 if len(files) == 1:
320 319 files = l[2:].strip().split('\t')
321 320 src = self._stripbasepath(files[0])
322 321 dst = self._stripbasepath(files[1])
323 322 if not self._exclude(src) and not self._exclude(dst):
324 323 self.changes[rev].ren_files[src] = dst
325 324 # Conversion from file to link or from link to file (modified)
326 325 elif l.startswith('ch'):
327 326 file = self._stripbasepath(l[2:].strip())
328 327 if not self._exclude(file):
329 328 self.changes[rev].mod_files.append(file)
330 329 # Renamed directory
331 330 elif l.startswith('/>'):
332 331 dirs = l[2:].strip().split(' ')
333 332 if len(dirs) == 1:
334 333 dirs = l[2:].strip().split('\t')
335 334 src = self._stripbasepath(dirs[0])
336 335 dst = self._stripbasepath(dirs[1])
337 336 if not self._exclude(src) and not self._exclude(dst):
338 337 self.changes[rev].ren_dirs[src] = dst
@@ -1,470 +1,472 b''
1 1 # hg.py - hg backend for convert extension
2 2 #
3 3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 # Notes for hg->hg conversion:
9 9 #
10 10 # * Old versions of Mercurial didn't trim the whitespace from the ends
11 11 # of commit messages, but new versions do. Changesets created by
12 12 # those older versions, then converted, may thus have different
13 13 # hashes for changesets that are otherwise identical.
14 14 #
15 15 # * Using "--config convert.hg.saverev=true" will make the source
16 16 # identifier to be stored in the converted revision. This will cause
17 17 # the converted revision to have a different identity than the
18 18 # source.
19 19
20 20
21 21 import os, time, cStringIO
22 22 from mercurial.i18n import _
23 23 from mercurial.node import bin, hex, nullid
24 24 from mercurial import hg, util, context, bookmarks, error, scmutil
25 25
26 26 from common import NoRepo, commit, converter_source, converter_sink
27 27
28 28 import re
29 29 sha1re = re.compile(r'\b[0-9a-f]{6,40}\b')
30 30
31 31 class mercurial_sink(converter_sink):
32 32 def __init__(self, ui, path):
33 33 converter_sink.__init__(self, ui, path)
34 34 self.branchnames = ui.configbool('convert', 'hg.usebranchnames', True)
35 35 self.clonebranches = ui.configbool('convert', 'hg.clonebranches', False)
36 36 self.tagsbranch = ui.config('convert', 'hg.tagsbranch', 'default')
37 37 self.lastbranch = None
38 38 if os.path.isdir(path) and len(os.listdir(path)) > 0:
39 39 try:
40 40 self.repo = hg.repository(self.ui, path)
41 41 if not self.repo.local():
42 42 raise NoRepo(_('%s is not a local Mercurial repository')
43 43 % path)
44 44 except error.RepoError, err:
45 45 ui.traceback()
46 46 raise NoRepo(err.args[0])
47 47 else:
48 48 try:
49 49 ui.status(_('initializing destination %s repository\n') % path)
50 50 self.repo = hg.repository(self.ui, path, create=True)
51 51 if not self.repo.local():
52 52 raise NoRepo(_('%s is not a local Mercurial repository')
53 53 % path)
54 54 self.created.append(path)
55 55 except error.RepoError:
56 56 ui.traceback()
57 57 raise NoRepo(_("could not create hg repository %s as sink")
58 58 % path)
59 59 self.lock = None
60 60 self.wlock = None
61 61 self.filemapmode = False
62 62
63 63 def before(self):
64 64 self.ui.debug('run hg sink pre-conversion action\n')
65 65 self.wlock = self.repo.wlock()
66 66 self.lock = self.repo.lock()
67 67
68 68 def after(self):
69 69 self.ui.debug('run hg sink post-conversion action\n')
70 70 if self.lock:
71 71 self.lock.release()
72 72 if self.wlock:
73 73 self.wlock.release()
74 74
75 75 def revmapfile(self):
76 76 return self.repo.join("shamap")
77 77
78 78 def authorfile(self):
79 79 return self.repo.join("authormap")
80 80
81 81 def setbranch(self, branch, pbranches):
82 82 if not self.clonebranches:
83 83 return
84 84
85 85 setbranch = (branch != self.lastbranch)
86 86 self.lastbranch = branch
87 87 if not branch:
88 88 branch = 'default'
89 89 pbranches = [(b[0], b[1] and b[1] or 'default') for b in pbranches]
90 90 pbranch = pbranches and pbranches[0][1] or 'default'
91 91
92 92 branchpath = os.path.join(self.path, branch)
93 93 if setbranch:
94 94 self.after()
95 95 try:
96 96 self.repo = hg.repository(self.ui, branchpath)
97 97 except Exception:
98 98 self.repo = hg.repository(self.ui, branchpath, create=True)
99 99 self.before()
100 100
101 101 # pbranches may bring revisions from other branches (merge parents)
102 102 # Make sure we have them, or pull them.
103 103 missings = {}
104 104 for b in pbranches:
105 105 try:
106 106 self.repo.lookup(b[0])
107 107 except Exception:
108 108 missings.setdefault(b[1], []).append(b[0])
109 109
110 110 if missings:
111 111 self.after()
112 112 for pbranch, heads in sorted(missings.iteritems()):
113 113 pbranchpath = os.path.join(self.path, pbranch)
114 114 prepo = hg.peer(self.ui, {}, pbranchpath)
115 115 self.ui.note(_('pulling from %s into %s\n') % (pbranch, branch))
116 116 self.repo.pull(prepo, [prepo.lookup(h) for h in heads])
117 117 self.before()
118 118
119 119 def _rewritetags(self, source, revmap, data):
120 120 fp = cStringIO.StringIO()
121 121 for line in data.splitlines():
122 122 s = line.split(' ', 1)
123 123 if len(s) != 2:
124 124 continue
125 125 revid = revmap.get(source.lookuprev(s[0]))
126 126 if not revid:
127 127 continue
128 128 fp.write('%s %s\n' % (revid, s[1]))
129 129 return fp.getvalue()
130 130
131 131 def putcommit(self, files, copies, parents, commit, source, revmap):
132 132
133 133 files = dict(files)
134 134 def getfilectx(repo, memctx, f):
135 135 v = files[f]
136 136 data, mode = source.getfile(f, v)
137 if data is None:
138 return None
137 139 if f == '.hgtags':
138 140 data = self._rewritetags(source, revmap, data)
139 141 return context.memfilectx(self.repo, f, data, 'l' in mode,
140 142 'x' in mode, copies.get(f))
141 143
142 144 pl = []
143 145 for p in parents:
144 146 if p not in pl:
145 147 pl.append(p)
146 148 parents = pl
147 149 nparents = len(parents)
148 150 if self.filemapmode and nparents == 1:
149 151 m1node = self.repo.changelog.read(bin(parents[0]))[0]
150 152 parent = parents[0]
151 153
152 154 if len(parents) < 2:
153 155 parents.append(nullid)
154 156 if len(parents) < 2:
155 157 parents.append(nullid)
156 158 p2 = parents.pop(0)
157 159
158 160 text = commit.desc
159 161
160 162 sha1s = re.findall(sha1re, text)
161 163 for sha1 in sha1s:
162 164 oldrev = source.lookuprev(sha1)
163 165 newrev = revmap.get(oldrev)
164 166 if newrev is not None:
165 167 text = text.replace(sha1, newrev[:len(sha1)])
166 168
167 169 extra = commit.extra.copy()
168 170
169 171 for label in ('source', 'transplant_source', 'rebase_source'):
170 172 node = extra.get(label)
171 173
172 174 if node is None:
173 175 continue
174 176
175 177 # Only transplant stores its reference in binary
176 178 if label == 'transplant_source':
177 179 node = hex(node)
178 180
179 181 newrev = revmap.get(node)
180 182 if newrev is not None:
181 183 if label == 'transplant_source':
182 184 newrev = bin(newrev)
183 185
184 186 extra[label] = newrev
185 187
186 188 if self.branchnames and commit.branch:
187 189 extra['branch'] = commit.branch
188 190 if commit.rev:
189 191 extra['convert_revision'] = commit.rev
190 192
191 193 while parents:
192 194 p1 = p2
193 195 p2 = parents.pop(0)
194 196 ctx = context.memctx(self.repo, (p1, p2), text, files.keys(),
195 197 getfilectx, commit.author, commit.date, extra)
196 198 self.repo.commitctx(ctx)
197 199 text = "(octopus merge fixup)\n"
198 200 p2 = hex(self.repo.changelog.tip())
199 201
200 202 if self.filemapmode and nparents == 1:
201 203 man = self.repo.manifest
202 204 mnode = self.repo.changelog.read(bin(p2))[0]
203 205 closed = 'close' in commit.extra
204 206 if not closed and not man.cmp(m1node, man.revision(mnode)):
205 207 self.ui.status(_("filtering out empty revision\n"))
206 208 self.repo.rollback(force=True)
207 209 return parent
208 210 return p2
209 211
210 212 def puttags(self, tags):
211 213 try:
212 214 parentctx = self.repo[self.tagsbranch]
213 215 tagparent = parentctx.node()
214 216 except error.RepoError:
215 217 parentctx = None
216 218 tagparent = nullid
217 219
218 220 oldlines = set()
219 221 for branch, heads in self.repo.branchmap().iteritems():
220 222 for h in heads:
221 223 if '.hgtags' in self.repo[h]:
222 224 oldlines.update(
223 225 set(self.repo[h]['.hgtags'].data().splitlines(True)))
224 226 oldlines = sorted(list(oldlines))
225 227
226 228 newlines = sorted([("%s %s\n" % (tags[tag], tag)) for tag in tags])
227 229 if newlines == oldlines:
228 230 return None, None
229 231
230 232 # if the old and new tags match, then there is nothing to update
231 233 oldtags = set()
232 234 newtags = set()
233 235 for line in oldlines:
234 236 s = line.strip().split(' ', 1)
235 237 if len(s) != 2:
236 238 continue
237 239 oldtags.add(s[1])
238 240 for line in newlines:
239 241 s = line.strip().split(' ', 1)
240 242 if len(s) != 2:
241 243 continue
242 244 if s[1] not in oldtags:
243 245 newtags.add(s[1].strip())
244 246
245 247 if not newtags:
246 248 return None, None
247 249
248 250 data = "".join(newlines)
249 251 def getfilectx(repo, memctx, f):
250 252 return context.memfilectx(repo, f, data, False, False, None)
251 253
252 254 self.ui.status(_("updating tags\n"))
253 255 date = "%s 0" % int(time.mktime(time.gmtime()))
254 256 extra = {'branch': self.tagsbranch}
255 257 ctx = context.memctx(self.repo, (tagparent, None), "update tags",
256 258 [".hgtags"], getfilectx, "convert-repo", date,
257 259 extra)
258 260 self.repo.commitctx(ctx)
259 261 return hex(self.repo.changelog.tip()), hex(tagparent)
260 262
261 263 def setfilemapmode(self, active):
262 264 self.filemapmode = active
263 265
264 266 def putbookmarks(self, updatedbookmark):
265 267 if not len(updatedbookmark):
266 268 return
267 269
268 270 self.ui.status(_("updating bookmarks\n"))
269 271 destmarks = self.repo._bookmarks
270 272 for bookmark in updatedbookmark:
271 273 destmarks[bookmark] = bin(updatedbookmark[bookmark])
272 274 destmarks.write()
273 275
274 276 def hascommitfrommap(self, rev):
275 277 # the exact semantics of clonebranches is unclear so we can't say no
276 278 return rev in self.repo or self.clonebranches
277 279
278 280 def hascommitforsplicemap(self, rev):
279 281 if rev not in self.repo and self.clonebranches:
280 282 raise util.Abort(_('revision %s not found in destination '
281 283 'repository (lookups with clonebranches=true '
282 284 'are not implemented)') % rev)
283 285 return rev in self.repo
284 286
285 287 class mercurial_source(converter_source):
286 288 def __init__(self, ui, path, rev=None):
287 289 converter_source.__init__(self, ui, path, rev)
288 290 self.ignoreerrors = ui.configbool('convert', 'hg.ignoreerrors', False)
289 291 self.ignored = set()
290 292 self.saverev = ui.configbool('convert', 'hg.saverev', False)
291 293 try:
292 294 self.repo = hg.repository(self.ui, path)
293 295 # try to provoke an exception if this isn't really a hg
294 296 # repo, but some other bogus compatible-looking url
295 297 if not self.repo.local():
296 298 raise error.RepoError
297 299 except error.RepoError:
298 300 ui.traceback()
299 301 raise NoRepo(_("%s is not a local Mercurial repository") % path)
300 302 self.lastrev = None
301 303 self.lastctx = None
302 304 self._changescache = None
303 305 self.convertfp = None
304 306 # Restrict converted revisions to startrev descendants
305 307 startnode = ui.config('convert', 'hg.startrev')
306 308 hgrevs = ui.config('convert', 'hg.revs')
307 309 if hgrevs is None:
308 310 if startnode is not None:
309 311 try:
310 312 startnode = self.repo.lookup(startnode)
311 313 except error.RepoError:
312 314 raise util.Abort(_('%s is not a valid start revision')
313 315 % startnode)
314 316 startrev = self.repo.changelog.rev(startnode)
315 317 children = {startnode: 1}
316 318 for r in self.repo.changelog.descendants([startrev]):
317 319 children[self.repo.changelog.node(r)] = 1
318 320 self.keep = children.__contains__
319 321 else:
320 322 self.keep = util.always
321 323 if rev:
322 324 self._heads = [self.repo[rev].node()]
323 325 else:
324 326 self._heads = self.repo.heads()
325 327 else:
326 328 if rev or startnode is not None:
327 329 raise util.Abort(_('hg.revs cannot be combined with '
328 330 'hg.startrev or --rev'))
329 331 nodes = set()
330 332 parents = set()
331 333 for r in scmutil.revrange(self.repo, [hgrevs]):
332 334 ctx = self.repo[r]
333 335 nodes.add(ctx.node())
334 336 parents.update(p.node() for p in ctx.parents())
335 337 self.keep = nodes.__contains__
336 338 self._heads = nodes - parents
337 339
338 340 def changectx(self, rev):
339 341 if self.lastrev != rev:
340 342 self.lastctx = self.repo[rev]
341 343 self.lastrev = rev
342 344 return self.lastctx
343 345
344 346 def parents(self, ctx):
345 347 return [p for p in ctx.parents() if p and self.keep(p.node())]
346 348
347 349 def getheads(self):
348 350 return [hex(h) for h in self._heads if self.keep(h)]
349 351
350 352 def getfile(self, name, rev):
351 353 try:
352 354 fctx = self.changectx(rev)[name]
353 355 return fctx.data(), fctx.flags()
354 except error.LookupError, err:
355 raise IOError(err)
356 except error.LookupError:
357 return None, None
356 358
357 359 def getchanges(self, rev):
358 360 ctx = self.changectx(rev)
359 361 parents = self.parents(ctx)
360 362 if not parents:
361 363 files = sorted(ctx.manifest())
362 364 # getcopies() is not needed for roots, but it is a simple way to
363 365 # detect missing revlogs and abort on errors or populate
364 366 # self.ignored
365 367 self.getcopies(ctx, parents, files)
366 368 return [(f, rev) for f in files if f not in self.ignored], {}
367 369 if self._changescache and self._changescache[0] == rev:
368 370 m, a, r = self._changescache[1]
369 371 else:
370 372 m, a, r = self.repo.status(parents[0].node(), ctx.node())[:3]
371 373 # getcopies() detects missing revlogs early, run it before
372 374 # filtering the changes.
373 375 copies = self.getcopies(ctx, parents, m + a)
374 376 changes = [(name, rev) for name in m + a + r
375 377 if name not in self.ignored]
376 378 return sorted(changes), copies
377 379
378 380 def getcopies(self, ctx, parents, files):
379 381 copies = {}
380 382 for name in files:
381 383 if name in self.ignored:
382 384 continue
383 385 try:
384 386 copysource, _copynode = ctx.filectx(name).renamed()
385 387 if copysource in self.ignored:
386 388 continue
387 389 # Ignore copy sources not in parent revisions
388 390 found = False
389 391 for p in parents:
390 392 if copysource in p:
391 393 found = True
392 394 break
393 395 if not found:
394 396 continue
395 397 copies[name] = copysource
396 398 except TypeError:
397 399 pass
398 400 except error.LookupError, e:
399 401 if not self.ignoreerrors:
400 402 raise
401 403 self.ignored.add(name)
402 404 self.ui.warn(_('ignoring: %s\n') % e)
403 405 return copies
404 406
405 407 def getcommit(self, rev):
406 408 ctx = self.changectx(rev)
407 409 parents = [p.hex() for p in self.parents(ctx)]
408 410 if self.saverev:
409 411 crev = rev
410 412 else:
411 413 crev = None
412 414 return commit(author=ctx.user(),
413 415 date=util.datestr(ctx.date(), '%Y-%m-%d %H:%M:%S %1%2'),
414 416 desc=ctx.description(), rev=crev, parents=parents,
415 417 branch=ctx.branch(), extra=ctx.extra(),
416 418 sortkey=ctx.rev())
417 419
418 420 def gettags(self):
419 421 # This will get written to .hgtags, filter non global tags out.
420 422 tags = [t for t in self.repo.tagslist()
421 423 if self.repo.tagtype(t[0]) == 'global']
422 424 return dict([(name, hex(node)) for name, node in tags
423 425 if self.keep(node)])
424 426
425 427 def getchangedfiles(self, rev, i):
426 428 ctx = self.changectx(rev)
427 429 parents = self.parents(ctx)
428 430 if not parents and i is None:
429 431 i = 0
430 432 changes = [], ctx.manifest().keys(), []
431 433 else:
432 434 i = i or 0
433 435 changes = self.repo.status(parents[i].node(), ctx.node())[:3]
434 436 changes = [[f for f in l if f not in self.ignored] for l in changes]
435 437
436 438 if i == 0:
437 439 self._changescache = (rev, changes)
438 440
439 441 return changes[0] + changes[1] + changes[2]
440 442
441 443 def converted(self, rev, destrev):
442 444 if self.convertfp is None:
443 445 self.convertfp = open(self.repo.join('shamap'), 'a')
444 446 self.convertfp.write('%s %s\n' % (destrev, rev))
445 447 self.convertfp.flush()
446 448
447 449 def before(self):
448 450 self.ui.debug('run hg source pre-conversion action\n')
449 451
450 452 def after(self):
451 453 self.ui.debug('run hg source post-conversion action\n')
452 454
453 455 def hasnativeorder(self):
454 456 return True
455 457
456 458 def hasnativeclose(self):
457 459 return True
458 460
459 461 def lookuprev(self, rev):
460 462 try:
461 463 return hex(self.repo.lookup(rev))
462 464 except error.RepoError:
463 465 return None
464 466
465 467 def getbookmarks(self):
466 468 return bookmarks.listbookmarks(self.repo)
467 469
468 470 def checkrevformat(self, revstr, mapname='splicemap'):
469 471 """ Mercurial, revision string is a 40 byte hex """
470 472 self.checkhexformat(revstr, mapname)
@@ -1,359 +1,359 b''
1 1 # monotone.py - monotone support for the convert extension
2 2 #
3 3 # Copyright 2008, 2009 Mikkel Fahnoe Jorgensen <mikkel@dvide.com> and
4 4 # others
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 import os, re
10 10 from mercurial import util
11 11 from common import NoRepo, commit, converter_source, checktool
12 12 from common import commandline
13 13 from mercurial.i18n import _
14 14
15 15 class monotone_source(converter_source, commandline):
16 16 def __init__(self, ui, path=None, rev=None):
17 17 converter_source.__init__(self, ui, path, rev)
18 18 commandline.__init__(self, ui, 'mtn')
19 19
20 20 self.ui = ui
21 21 self.path = path
22 22 self.automatestdio = False
23 23 self.rev = rev
24 24
25 25 norepo = NoRepo(_("%s does not look like a monotone repository")
26 26 % path)
27 27 if not os.path.exists(os.path.join(path, '_MTN')):
28 28 # Could be a monotone repository (SQLite db file)
29 29 try:
30 30 f = file(path, 'rb')
31 31 header = f.read(16)
32 32 f.close()
33 33 except IOError:
34 34 header = ''
35 35 if header != 'SQLite format 3\x00':
36 36 raise norepo
37 37
38 38 # regular expressions for parsing monotone output
39 39 space = r'\s*'
40 40 name = r'\s+"((?:\\"|[^"])*)"\s*'
41 41 value = name
42 42 revision = r'\s+\[(\w+)\]\s*'
43 43 lines = r'(?:.|\n)+'
44 44
45 45 self.dir_re = re.compile(space + "dir" + name)
46 46 self.file_re = re.compile(space + "file" + name +
47 47 "content" + revision)
48 48 self.add_file_re = re.compile(space + "add_file" + name +
49 49 "content" + revision)
50 50 self.patch_re = re.compile(space + "patch" + name +
51 51 "from" + revision + "to" + revision)
52 52 self.rename_re = re.compile(space + "rename" + name + "to" + name)
53 53 self.delete_re = re.compile(space + "delete" + name)
54 54 self.tag_re = re.compile(space + "tag" + name + "revision" +
55 55 revision)
56 56 self.cert_re = re.compile(lines + space + "name" + name +
57 57 "value" + value)
58 58
59 59 attr = space + "file" + lines + space + "attr" + space
60 60 self.attr_execute_re = re.compile(attr + '"mtn:execute"' +
61 61 space + '"true"')
62 62
63 63 # cached data
64 64 self.manifest_rev = None
65 65 self.manifest = None
66 66 self.files = None
67 67 self.dirs = None
68 68
69 69 checktool('mtn', abort=False)
70 70
71 71 def mtnrun(self, *args, **kwargs):
72 72 if self.automatestdio:
73 73 return self.mtnrunstdio(*args, **kwargs)
74 74 else:
75 75 return self.mtnrunsingle(*args, **kwargs)
76 76
77 77 def mtnrunsingle(self, *args, **kwargs):
78 78 kwargs['d'] = self.path
79 79 return self.run0('automate', *args, **kwargs)
80 80
81 81 def mtnrunstdio(self, *args, **kwargs):
82 82 # Prepare the command in automate stdio format
83 83 command = []
84 84 for k, v in kwargs.iteritems():
85 85 command.append("%s:%s" % (len(k), k))
86 86 if v:
87 87 command.append("%s:%s" % (len(v), v))
88 88 if command:
89 89 command.insert(0, 'o')
90 90 command.append('e')
91 91
92 92 command.append('l')
93 93 for arg in args:
94 94 command += "%s:%s" % (len(arg), arg)
95 95 command.append('e')
96 96 command = ''.join(command)
97 97
98 98 self.ui.debug("mtn: sending '%s'\n" % command)
99 99 self.mtnwritefp.write(command)
100 100 self.mtnwritefp.flush()
101 101
102 102 return self.mtnstdioreadcommandoutput(command)
103 103
104 104 def mtnstdioreadpacket(self):
105 105 read = None
106 106 commandnbr = ''
107 107 while read != ':':
108 108 read = self.mtnreadfp.read(1)
109 109 if not read:
110 110 raise util.Abort(_('bad mtn packet - no end of commandnbr'))
111 111 commandnbr += read
112 112 commandnbr = commandnbr[:-1]
113 113
114 114 stream = self.mtnreadfp.read(1)
115 115 if stream not in 'mewptl':
116 116 raise util.Abort(_('bad mtn packet - bad stream type %s') % stream)
117 117
118 118 read = self.mtnreadfp.read(1)
119 119 if read != ':':
120 120 raise util.Abort(_('bad mtn packet - no divider before size'))
121 121
122 122 read = None
123 123 lengthstr = ''
124 124 while read != ':':
125 125 read = self.mtnreadfp.read(1)
126 126 if not read:
127 127 raise util.Abort(_('bad mtn packet - no end of packet size'))
128 128 lengthstr += read
129 129 try:
130 130 length = long(lengthstr[:-1])
131 131 except TypeError:
132 132 raise util.Abort(_('bad mtn packet - bad packet size %s')
133 133 % lengthstr)
134 134
135 135 read = self.mtnreadfp.read(length)
136 136 if len(read) != length:
137 137 raise util.Abort(_("bad mtn packet - unable to read full packet "
138 138 "read %s of %s") % (len(read), length))
139 139
140 140 return (commandnbr, stream, length, read)
141 141
142 142 def mtnstdioreadcommandoutput(self, command):
143 143 retval = []
144 144 while True:
145 145 commandnbr, stream, length, output = self.mtnstdioreadpacket()
146 146 self.ui.debug('mtn: read packet %s:%s:%s\n' %
147 147 (commandnbr, stream, length))
148 148
149 149 if stream == 'l':
150 150 # End of command
151 151 if output != '0':
152 152 raise util.Abort(_("mtn command '%s' returned %s") %
153 153 (command, output))
154 154 break
155 155 elif stream in 'ew':
156 156 # Error, warning output
157 157 self.ui.warn(_('%s error:\n') % self.command)
158 158 self.ui.warn(output)
159 159 elif stream == 'p':
160 160 # Progress messages
161 161 self.ui.debug('mtn: ' + output)
162 162 elif stream == 'm':
163 163 # Main stream - command output
164 164 retval.append(output)
165 165
166 166 return ''.join(retval)
167 167
168 168 def mtnloadmanifest(self, rev):
169 169 if self.manifest_rev == rev:
170 170 return
171 171 self.manifest = self.mtnrun("get_manifest_of", rev).split("\n\n")
172 172 self.manifest_rev = rev
173 173 self.files = {}
174 174 self.dirs = {}
175 175
176 176 for e in self.manifest:
177 177 m = self.file_re.match(e)
178 178 if m:
179 179 attr = ""
180 180 name = m.group(1)
181 181 node = m.group(2)
182 182 if self.attr_execute_re.match(e):
183 183 attr += "x"
184 184 self.files[name] = (node, attr)
185 185 m = self.dir_re.match(e)
186 186 if m:
187 187 self.dirs[m.group(1)] = True
188 188
189 189 def mtnisfile(self, name, rev):
190 190 # a non-file could be a directory or a deleted or renamed file
191 191 self.mtnloadmanifest(rev)
192 192 return name in self.files
193 193
194 194 def mtnisdir(self, name, rev):
195 195 self.mtnloadmanifest(rev)
196 196 return name in self.dirs
197 197
198 198 def mtngetcerts(self, rev):
199 199 certs = {"author":"<missing>", "date":"<missing>",
200 200 "changelog":"<missing>", "branch":"<missing>"}
201 201 certlist = self.mtnrun("certs", rev)
202 202 # mtn < 0.45:
203 203 # key "test@selenic.com"
204 204 # mtn >= 0.45:
205 205 # key [ff58a7ffb771907c4ff68995eada1c4da068d328]
206 206 certlist = re.split('\n\n key ["\[]', certlist)
207 207 for e in certlist:
208 208 m = self.cert_re.match(e)
209 209 if m:
210 210 name, value = m.groups()
211 211 value = value.replace(r'\"', '"')
212 212 value = value.replace(r'\\', '\\')
213 213 certs[name] = value
214 214 # Monotone may have subsecond dates: 2005-02-05T09:39:12.364306
215 215 # and all times are stored in UTC
216 216 certs["date"] = certs["date"].split('.')[0] + " UTC"
217 217 return certs
218 218
219 219 # implement the converter_source interface:
220 220
221 221 def getheads(self):
222 222 if not self.rev:
223 223 return self.mtnrun("leaves").splitlines()
224 224 else:
225 225 return [self.rev]
226 226
227 227 def getchanges(self, rev):
228 228 revision = self.mtnrun("get_revision", rev).split("\n\n")
229 229 files = {}
230 230 ignoremove = {}
231 231 renameddirs = []
232 232 copies = {}
233 233 for e in revision:
234 234 m = self.add_file_re.match(e)
235 235 if m:
236 236 files[m.group(1)] = rev
237 237 ignoremove[m.group(1)] = rev
238 238 m = self.patch_re.match(e)
239 239 if m:
240 240 files[m.group(1)] = rev
241 241 # Delete/rename is handled later when the convert engine
242 242 # discovers an IOError exception from getfile,
243 243 # but only if we add the "from" file to the list of changes.
244 244 m = self.delete_re.match(e)
245 245 if m:
246 246 files[m.group(1)] = rev
247 247 m = self.rename_re.match(e)
248 248 if m:
249 249 toname = m.group(2)
250 250 fromname = m.group(1)
251 251 if self.mtnisfile(toname, rev):
252 252 ignoremove[toname] = 1
253 253 copies[toname] = fromname
254 254 files[toname] = rev
255 255 files[fromname] = rev
256 256 elif self.mtnisdir(toname, rev):
257 257 renameddirs.append((fromname, toname))
258 258
259 259 # Directory renames can be handled only once we have recorded
260 260 # all new files
261 261 for fromdir, todir in renameddirs:
262 262 renamed = {}
263 263 for tofile in self.files:
264 264 if tofile in ignoremove:
265 265 continue
266 266 if tofile.startswith(todir + '/'):
267 267 renamed[tofile] = fromdir + tofile[len(todir):]
268 268 # Avoid chained moves like:
269 269 # d1(/a) => d3/d1(/a)
270 270 # d2 => d3
271 271 ignoremove[tofile] = 1
272 272 for tofile, fromfile in renamed.items():
273 273 self.ui.debug (_("copying file in renamed directory "
274 274 "from '%s' to '%s'")
275 275 % (fromfile, tofile), '\n')
276 276 files[tofile] = rev
277 277 copies[tofile] = fromfile
278 278 for fromfile in renamed.values():
279 279 files[fromfile] = rev
280 280
281 281 return (files.items(), copies)
282 282
283 283 def getfile(self, name, rev):
284 284 if not self.mtnisfile(name, rev):
285 raise IOError # file was deleted or renamed
285 return None, None
286 286 try:
287 287 data = self.mtnrun("get_file_of", name, r=rev)
288 288 except Exception:
289 raise IOError # file was deleted or renamed
289 return None, None
290 290 self.mtnloadmanifest(rev)
291 291 node, attr = self.files.get(name, (None, ""))
292 292 return data, attr
293 293
294 294 def getcommit(self, rev):
295 295 extra = {}
296 296 certs = self.mtngetcerts(rev)
297 297 if certs.get('suspend') == certs["branch"]:
298 298 extra['close'] = '1'
299 299 return commit(
300 300 author=certs["author"],
301 301 date=util.datestr(util.strdate(certs["date"], "%Y-%m-%dT%H:%M:%S")),
302 302 desc=certs["changelog"],
303 303 rev=rev,
304 304 parents=self.mtnrun("parents", rev).splitlines(),
305 305 branch=certs["branch"],
306 306 extra=extra)
307 307
308 308 def gettags(self):
309 309 tags = {}
310 310 for e in self.mtnrun("tags").split("\n\n"):
311 311 m = self.tag_re.match(e)
312 312 if m:
313 313 tags[m.group(1)] = m.group(2)
314 314 return tags
315 315
316 316 def getchangedfiles(self, rev, i):
317 317 # This function is only needed to support --filemap
318 318 # ... and we don't support that
319 319 raise NotImplementedError
320 320
321 321 def before(self):
322 322 # Check if we have a new enough version to use automate stdio
323 323 version = 0.0
324 324 try:
325 325 versionstr = self.mtnrunsingle("interface_version")
326 326 version = float(versionstr)
327 327 except Exception:
328 328 raise util.Abort(_("unable to determine mtn automate interface "
329 329 "version"))
330 330
331 331 if version >= 12.0:
332 332 self.automatestdio = True
333 333 self.ui.debug("mtn automate version %s - using automate stdio\n" %
334 334 version)
335 335
336 336 # launch the long-running automate stdio process
337 337 self.mtnwritefp, self.mtnreadfp = self._run2('automate', 'stdio',
338 338 '-d', self.path)
339 339 # read the headers
340 340 read = self.mtnreadfp.readline()
341 341 if read != 'format-version: 2\n':
342 342 raise util.Abort(_('mtn automate stdio header unexpected: %s')
343 343 % read)
344 344 while read != '\n':
345 345 read = self.mtnreadfp.readline()
346 346 if not read:
347 347 raise util.Abort(_("failed to reach end of mtn automate "
348 348 "stdio headers"))
349 349 else:
350 350 self.ui.debug("mtn automate version %s - not using automate stdio "
351 351 "(automate >= 12.0 - mtn >= 0.46 is needed)\n" % version)
352 352
353 353 def after(self):
354 354 if self.automatestdio:
355 355 self.mtnwritefp.close()
356 356 self.mtnwritefp = None
357 357 self.mtnreadfp.close()
358 358 self.mtnreadfp = None
359 359
@@ -1,205 +1,205 b''
1 1 # Perforce source for convert extension.
2 2 #
3 3 # Copyright 2009, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from mercurial import util
9 9 from mercurial.i18n import _
10 10
11 11 from common import commit, converter_source, checktool, NoRepo
12 12 import marshal
13 13 import re
14 14
15 15 def loaditer(f):
16 16 "Yield the dictionary objects generated by p4"
17 17 try:
18 18 while True:
19 19 d = marshal.load(f)
20 20 if not d:
21 21 break
22 22 yield d
23 23 except EOFError:
24 24 pass
25 25
26 26 class p4_source(converter_source):
27 27 def __init__(self, ui, path, rev=None):
28 28 super(p4_source, self).__init__(ui, path, rev=rev)
29 29
30 30 if "/" in path and not path.startswith('//'):
31 31 raise NoRepo(_('%s does not look like a P4 repository') % path)
32 32
33 33 checktool('p4', abort=False)
34 34
35 35 self.p4changes = {}
36 36 self.heads = {}
37 37 self.changeset = {}
38 38 self.files = {}
39 39 self.tags = {}
40 40 self.lastbranch = {}
41 41 self.parent = {}
42 42 self.encoding = "latin_1"
43 43 self.depotname = {} # mapping from local name to depot name
44 44 self.re_type = re.compile(
45 45 "([a-z]+)?(text|binary|symlink|apple|resource|unicode|utf\d+)"
46 46 "(\+\w+)?$")
47 47 self.re_keywords = re.compile(
48 48 r"\$(Id|Header|Date|DateTime|Change|File|Revision|Author)"
49 49 r":[^$\n]*\$")
50 50 self.re_keywords_old = re.compile("\$(Id|Header):[^$\n]*\$")
51 51
52 52 self._parse(ui, path)
53 53
54 54 def _parse_view(self, path):
55 55 "Read changes affecting the path"
56 56 cmd = 'p4 -G changes -s submitted %s' % util.shellquote(path)
57 57 stdout = util.popen(cmd, mode='rb')
58 58 for d in loaditer(stdout):
59 59 c = d.get("change", None)
60 60 if c:
61 61 self.p4changes[c] = True
62 62
63 63 def _parse(self, ui, path):
64 64 "Prepare list of P4 filenames and revisions to import"
65 65 ui.status(_('reading p4 views\n'))
66 66
67 67 # read client spec or view
68 68 if "/" in path:
69 69 self._parse_view(path)
70 70 if path.startswith("//") and path.endswith("/..."):
71 71 views = {path[:-3]:""}
72 72 else:
73 73 views = {"//": ""}
74 74 else:
75 75 cmd = 'p4 -G client -o %s' % util.shellquote(path)
76 76 clientspec = marshal.load(util.popen(cmd, mode='rb'))
77 77
78 78 views = {}
79 79 for client in clientspec:
80 80 if client.startswith("View"):
81 81 sview, cview = clientspec[client].split()
82 82 self._parse_view(sview)
83 83 if sview.endswith("...") and cview.endswith("..."):
84 84 sview = sview[:-3]
85 85 cview = cview[:-3]
86 86 cview = cview[2:]
87 87 cview = cview[cview.find("/") + 1:]
88 88 views[sview] = cview
89 89
90 90 # list of changes that affect our source files
91 91 self.p4changes = self.p4changes.keys()
92 92 self.p4changes.sort(key=int)
93 93
94 94 # list with depot pathnames, longest first
95 95 vieworder = views.keys()
96 96 vieworder.sort(key=len, reverse=True)
97 97
98 98 # handle revision limiting
99 99 startrev = self.ui.config('convert', 'p4.startrev', default=0)
100 100 self.p4changes = [x for x in self.p4changes
101 101 if ((not startrev or int(x) >= int(startrev)) and
102 102 (not self.rev or int(x) <= int(self.rev)))]
103 103
104 104 # now read the full changelists to get the list of file revisions
105 105 ui.status(_('collecting p4 changelists\n'))
106 106 lastid = None
107 107 for change in self.p4changes:
108 108 cmd = "p4 -G describe -s %s" % change
109 109 stdout = util.popen(cmd, mode='rb')
110 110 d = marshal.load(stdout)
111 111 desc = self.recode(d.get("desc", ""))
112 112 shortdesc = desc.split("\n", 1)[0]
113 113 t = '%s %s' % (d["change"], repr(shortdesc)[1:-1])
114 114 ui.status(util.ellipsis(t, 80) + '\n')
115 115
116 116 if lastid:
117 117 parents = [lastid]
118 118 else:
119 119 parents = []
120 120
121 121 date = (int(d["time"]), 0) # timezone not set
122 122 c = commit(author=self.recode(d["user"]),
123 123 date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'),
124 124 parents=parents, desc=desc, branch='',
125 125 extra={"p4": change})
126 126
127 127 files = []
128 128 i = 0
129 129 while ("depotFile%d" % i) in d and ("rev%d" % i) in d:
130 130 oldname = d["depotFile%d" % i]
131 131 filename = None
132 132 for v in vieworder:
133 133 if oldname.startswith(v):
134 134 filename = views[v] + oldname[len(v):]
135 135 break
136 136 if filename:
137 137 files.append((filename, d["rev%d" % i]))
138 138 self.depotname[filename] = oldname
139 139 i += 1
140 140 self.changeset[change] = c
141 141 self.files[change] = files
142 142 lastid = change
143 143
144 144 if lastid:
145 145 self.heads = [lastid]
146 146
147 147 def getheads(self):
148 148 return self.heads
149 149
150 150 def getfile(self, name, rev):
151 151 cmd = 'p4 -G print %s' \
152 152 % util.shellquote("%s#%s" % (self.depotname[name], rev))
153 153 stdout = util.popen(cmd, mode='rb')
154 154
155 155 mode = None
156 156 contents = ""
157 157 keywords = None
158 158
159 159 for d in loaditer(stdout):
160 160 code = d["code"]
161 161 data = d.get("data")
162 162
163 163 if code == "error":
164 164 raise IOError(d["generic"], data)
165 165
166 166 elif code == "stat":
167 167 if d.get("action") == "purge":
168 168 return None, None
169 169 p4type = self.re_type.match(d["type"])
170 170 if p4type:
171 171 mode = ""
172 172 flags = (p4type.group(1) or "") + (p4type.group(3) or "")
173 173 if "x" in flags:
174 174 mode = "x"
175 175 if p4type.group(2) == "symlink":
176 176 mode = "l"
177 177 if "ko" in flags:
178 178 keywords = self.re_keywords_old
179 179 elif "k" in flags:
180 180 keywords = self.re_keywords
181 181
182 182 elif code == "text" or code == "binary":
183 183 contents += data
184 184
185 185 if mode is None:
186 raise IOError(0, "bad stat")
186 return None, None
187 187
188 188 if keywords:
189 189 contents = keywords.sub("$\\1$", contents)
190 190 if mode == "l" and contents.endswith("\n"):
191 191 contents = contents[:-1]
192 192
193 193 return contents, mode
194 194
195 195 def getchanges(self, rev):
196 196 return self.files[rev], {}
197 197
198 198 def getcommit(self, rev):
199 199 return self.changeset[rev]
200 200
201 201 def gettags(self):
202 202 return self.tags
203 203
204 204 def getchangedfiles(self, rev, i):
205 205 return sorted([x[0] for x in self.files[rev]])
@@ -1,1315 +1,1314 b''
1 1 # Subversion 1.4/1.5 Python API backend
2 2 #
3 3 # Copyright(C) 2007 Daniel Holth et al
4 4
5 5 import os, re, sys, tempfile, urllib, urllib2
6 6 import xml.dom.minidom
7 7 import cPickle as pickle
8 8
9 9 from mercurial import strutil, scmutil, util, encoding
10 10 from mercurial.i18n import _
11 11
12 12 propertycache = util.propertycache
13 13
14 14 # Subversion stuff. Works best with very recent Python SVN bindings
15 15 # e.g. SVN 1.5 or backports. Thanks to the bzr folks for enhancing
16 16 # these bindings.
17 17
18 18 from cStringIO import StringIO
19 19
20 20 from common import NoRepo, MissingTool, commit, encodeargs, decodeargs
21 21 from common import commandline, converter_source, converter_sink, mapfile
22 22 from common import makedatetimestamp
23 23
24 24 try:
25 25 from svn.core import SubversionException, Pool
26 26 import svn
27 27 import svn.client
28 28 import svn.core
29 29 import svn.ra
30 30 import svn.delta
31 31 import transport
32 32 import warnings
33 33 warnings.filterwarnings('ignore',
34 34 module='svn.core',
35 35 category=DeprecationWarning)
36 36
37 37 except ImportError:
38 38 svn = None
39 39
40 40 class SvnPathNotFound(Exception):
41 41 pass
42 42
43 43 def revsplit(rev):
44 44 """Parse a revision string and return (uuid, path, revnum).
45 45 >>> revsplit('svn:a2147622-4a9f-4db4-a8d3-13562ff547b2'
46 46 ... '/proj%20B/mytrunk/mytrunk@1')
47 47 ('a2147622-4a9f-4db4-a8d3-13562ff547b2', '/proj%20B/mytrunk/mytrunk', 1)
48 48 >>> revsplit('svn:8af66a51-67f5-4354-b62c-98d67cc7be1d@1')
49 49 ('', '', 1)
50 50 >>> revsplit('@7')
51 51 ('', '', 7)
52 52 >>> revsplit('7')
53 53 ('', '', 0)
54 54 >>> revsplit('bad')
55 55 ('', '', 0)
56 56 """
57 57 parts = rev.rsplit('@', 1)
58 58 revnum = 0
59 59 if len(parts) > 1:
60 60 revnum = int(parts[1])
61 61 parts = parts[0].split('/', 1)
62 62 uuid = ''
63 63 mod = ''
64 64 if len(parts) > 1 and parts[0].startswith('svn:'):
65 65 uuid = parts[0][4:]
66 66 mod = '/' + parts[1]
67 67 return uuid, mod, revnum
68 68
69 69 def quote(s):
70 70 # As of svn 1.7, many svn calls expect "canonical" paths. In
71 71 # theory, we should call svn.core.*canonicalize() on all paths
72 72 # before passing them to the API. Instead, we assume the base url
73 73 # is canonical and copy the behaviour of svn URL encoding function
74 74 # so we can extend it safely with new components. The "safe"
75 75 # characters were taken from the "svn_uri__char_validity" table in
76 76 # libsvn_subr/path.c.
77 77 return urllib.quote(s, "!$&'()*+,-./:=@_~")
78 78
79 79 def geturl(path):
80 80 try:
81 81 return svn.client.url_from_path(svn.core.svn_path_canonicalize(path))
82 82 except SubversionException:
83 83 # svn.client.url_from_path() fails with local repositories
84 84 pass
85 85 if os.path.isdir(path):
86 86 path = os.path.normpath(os.path.abspath(path))
87 87 if os.name == 'nt':
88 88 path = '/' + util.normpath(path)
89 89 # Module URL is later compared with the repository URL returned
90 90 # by svn API, which is UTF-8.
91 91 path = encoding.tolocal(path)
92 92 path = 'file://%s' % quote(path)
93 93 return svn.core.svn_path_canonicalize(path)
94 94
95 95 def optrev(number):
96 96 optrev = svn.core.svn_opt_revision_t()
97 97 optrev.kind = svn.core.svn_opt_revision_number
98 98 optrev.value.number = number
99 99 return optrev
100 100
101 101 class changedpath(object):
102 102 def __init__(self, p):
103 103 self.copyfrom_path = p.copyfrom_path
104 104 self.copyfrom_rev = p.copyfrom_rev
105 105 self.action = p.action
106 106
107 107 def get_log_child(fp, url, paths, start, end, limit=0,
108 108 discover_changed_paths=True, strict_node_history=False):
109 109 protocol = -1
110 110 def receiver(orig_paths, revnum, author, date, message, pool):
111 111 paths = {}
112 112 if orig_paths is not None:
113 113 for k, v in orig_paths.iteritems():
114 114 paths[k] = changedpath(v)
115 115 pickle.dump((paths, revnum, author, date, message),
116 116 fp, protocol)
117 117
118 118 try:
119 119 # Use an ra of our own so that our parent can consume
120 120 # our results without confusing the server.
121 121 t = transport.SvnRaTransport(url=url)
122 122 svn.ra.get_log(t.ra, paths, start, end, limit,
123 123 discover_changed_paths,
124 124 strict_node_history,
125 125 receiver)
126 126 except IOError:
127 127 # Caller may interrupt the iteration
128 128 pickle.dump(None, fp, protocol)
129 129 except Exception, inst:
130 130 pickle.dump(str(inst), fp, protocol)
131 131 else:
132 132 pickle.dump(None, fp, protocol)
133 133 fp.close()
134 134 # With large history, cleanup process goes crazy and suddenly
135 135 # consumes *huge* amount of memory. The output file being closed,
136 136 # there is no need for clean termination.
137 137 os._exit(0)
138 138
139 139 def debugsvnlog(ui, **opts):
140 140 """Fetch SVN log in a subprocess and channel them back to parent to
141 141 avoid memory collection issues.
142 142 """
143 143 if svn is None:
144 144 raise util.Abort(_('debugsvnlog could not load Subversion python '
145 145 'bindings'))
146 146
147 147 util.setbinary(sys.stdin)
148 148 util.setbinary(sys.stdout)
149 149 args = decodeargs(sys.stdin.read())
150 150 get_log_child(sys.stdout, *args)
151 151
152 152 class logstream(object):
153 153 """Interruptible revision log iterator."""
154 154 def __init__(self, stdout):
155 155 self._stdout = stdout
156 156
157 157 def __iter__(self):
158 158 while True:
159 159 try:
160 160 entry = pickle.load(self._stdout)
161 161 except EOFError:
162 162 raise util.Abort(_('Mercurial failed to run itself, check'
163 163 ' hg executable is in PATH'))
164 164 try:
165 165 orig_paths, revnum, author, date, message = entry
166 166 except (TypeError, ValueError):
167 167 if entry is None:
168 168 break
169 169 raise util.Abort(_("log stream exception '%s'") % entry)
170 170 yield entry
171 171
172 172 def close(self):
173 173 if self._stdout:
174 174 self._stdout.close()
175 175 self._stdout = None
176 176
177 177 class directlogstream(list):
178 178 """Direct revision log iterator.
179 179 This can be used for debugging and development but it will probably leak
180 180 memory and is not suitable for real conversions."""
181 181 def __init__(self, url, paths, start, end, limit=0,
182 182 discover_changed_paths=True, strict_node_history=False):
183 183
184 184 def receiver(orig_paths, revnum, author, date, message, pool):
185 185 paths = {}
186 186 if orig_paths is not None:
187 187 for k, v in orig_paths.iteritems():
188 188 paths[k] = changedpath(v)
189 189 self.append((paths, revnum, author, date, message))
190 190
191 191 # Use an ra of our own so that our parent can consume
192 192 # our results without confusing the server.
193 193 t = transport.SvnRaTransport(url=url)
194 194 svn.ra.get_log(t.ra, paths, start, end, limit,
195 195 discover_changed_paths,
196 196 strict_node_history,
197 197 receiver)
198 198
199 199 def close(self):
200 200 pass
201 201
202 202 # Check to see if the given path is a local Subversion repo. Verify this by
203 203 # looking for several svn-specific files and directories in the given
204 204 # directory.
205 205 def filecheck(ui, path, proto):
206 206 for x in ('locks', 'hooks', 'format', 'db'):
207 207 if not os.path.exists(os.path.join(path, x)):
208 208 return False
209 209 return True
210 210
211 211 # Check to see if a given path is the root of an svn repo over http. We verify
212 212 # this by requesting a version-controlled URL we know can't exist and looking
213 213 # for the svn-specific "not found" XML.
214 214 def httpcheck(ui, path, proto):
215 215 try:
216 216 opener = urllib2.build_opener()
217 217 rsp = opener.open('%s://%s/!svn/ver/0/.svn' % (proto, path))
218 218 data = rsp.read()
219 219 except urllib2.HTTPError, inst:
220 220 if inst.code != 404:
221 221 # Except for 404 we cannot know for sure this is not an svn repo
222 222 ui.warn(_('svn: cannot probe remote repository, assume it could '
223 223 'be a subversion repository. Use --source-type if you '
224 224 'know better.\n'))
225 225 return True
226 226 data = inst.fp.read()
227 227 except Exception:
228 228 # Could be urllib2.URLError if the URL is invalid or anything else.
229 229 return False
230 230 return '<m:human-readable errcode="160013">' in data
231 231
232 232 protomap = {'http': httpcheck,
233 233 'https': httpcheck,
234 234 'file': filecheck,
235 235 }
236 236 def issvnurl(ui, url):
237 237 try:
238 238 proto, path = url.split('://', 1)
239 239 if proto == 'file':
240 240 if (os.name == 'nt' and path[:1] == '/' and path[1:2].isalpha()
241 241 and path[2:6].lower() == '%3a/'):
242 242 path = path[:2] + ':/' + path[6:]
243 243 path = urllib.url2pathname(path)
244 244 except ValueError:
245 245 proto = 'file'
246 246 path = os.path.abspath(url)
247 247 if proto == 'file':
248 248 path = util.pconvert(path)
249 249 check = protomap.get(proto, lambda *args: False)
250 250 while '/' in path:
251 251 if check(ui, path, proto):
252 252 return True
253 253 path = path.rsplit('/', 1)[0]
254 254 return False
255 255
256 256 # SVN conversion code stolen from bzr-svn and tailor
257 257 #
258 258 # Subversion looks like a versioned filesystem, branches structures
259 259 # are defined by conventions and not enforced by the tool. First,
260 260 # we define the potential branches (modules) as "trunk" and "branches"
261 261 # children directories. Revisions are then identified by their
262 262 # module and revision number (and a repository identifier).
263 263 #
264 264 # The revision graph is really a tree (or a forest). By default, a
265 265 # revision parent is the previous revision in the same module. If the
266 266 # module directory is copied/moved from another module then the
267 267 # revision is the module root and its parent the source revision in
268 268 # the parent module. A revision has at most one parent.
269 269 #
270 270 class svn_source(converter_source):
271 271 def __init__(self, ui, url, rev=None):
272 272 super(svn_source, self).__init__(ui, url, rev=rev)
273 273
274 274 if not (url.startswith('svn://') or url.startswith('svn+ssh://') or
275 275 (os.path.exists(url) and
276 276 os.path.exists(os.path.join(url, '.svn'))) or
277 277 issvnurl(ui, url)):
278 278 raise NoRepo(_("%s does not look like a Subversion repository")
279 279 % url)
280 280 if svn is None:
281 281 raise MissingTool(_('could not load Subversion python bindings'))
282 282
283 283 try:
284 284 version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
285 285 if version < (1, 4):
286 286 raise MissingTool(_('Subversion python bindings %d.%d found, '
287 287 '1.4 or later required') % version)
288 288 except AttributeError:
289 289 raise MissingTool(_('Subversion python bindings are too old, 1.4 '
290 290 'or later required'))
291 291
292 292 self.lastrevs = {}
293 293
294 294 latest = None
295 295 try:
296 296 # Support file://path@rev syntax. Useful e.g. to convert
297 297 # deleted branches.
298 298 at = url.rfind('@')
299 299 if at >= 0:
300 300 latest = int(url[at + 1:])
301 301 url = url[:at]
302 302 except ValueError:
303 303 pass
304 304 self.url = geturl(url)
305 305 self.encoding = 'UTF-8' # Subversion is always nominal UTF-8
306 306 try:
307 307 self.transport = transport.SvnRaTransport(url=self.url)
308 308 self.ra = self.transport.ra
309 309 self.ctx = self.transport.client
310 310 self.baseurl = svn.ra.get_repos_root(self.ra)
311 311 # Module is either empty or a repository path starting with
312 312 # a slash and not ending with a slash.
313 313 self.module = urllib.unquote(self.url[len(self.baseurl):])
314 314 self.prevmodule = None
315 315 self.rootmodule = self.module
316 316 self.commits = {}
317 317 self.paths = {}
318 318 self.uuid = svn.ra.get_uuid(self.ra)
319 319 except SubversionException:
320 320 ui.traceback()
321 321 raise NoRepo(_("%s does not look like a Subversion repository")
322 322 % self.url)
323 323
324 324 if rev:
325 325 try:
326 326 latest = int(rev)
327 327 except ValueError:
328 328 raise util.Abort(_('svn: revision %s is not an integer') % rev)
329 329
330 330 self.trunkname = self.ui.config('convert', 'svn.trunk',
331 331 'trunk').strip('/')
332 332 self.startrev = self.ui.config('convert', 'svn.startrev', default=0)
333 333 try:
334 334 self.startrev = int(self.startrev)
335 335 if self.startrev < 0:
336 336 self.startrev = 0
337 337 except ValueError:
338 338 raise util.Abort(_('svn: start revision %s is not an integer')
339 339 % self.startrev)
340 340
341 341 try:
342 342 self.head = self.latest(self.module, latest)
343 343 except SvnPathNotFound:
344 344 self.head = None
345 345 if not self.head:
346 346 raise util.Abort(_('no revision found in module %s')
347 347 % self.module)
348 348 self.last_changed = self.revnum(self.head)
349 349
350 350 self._changescache = None
351 351
352 352 if os.path.exists(os.path.join(url, '.svn/entries')):
353 353 self.wc = url
354 354 else:
355 355 self.wc = None
356 356 self.convertfp = None
357 357
358 358 def setrevmap(self, revmap):
359 359 lastrevs = {}
360 360 for revid in revmap.iterkeys():
361 361 uuid, module, revnum = revsplit(revid)
362 362 lastrevnum = lastrevs.setdefault(module, revnum)
363 363 if revnum > lastrevnum:
364 364 lastrevs[module] = revnum
365 365 self.lastrevs = lastrevs
366 366
367 367 def exists(self, path, optrev):
368 368 try:
369 369 svn.client.ls(self.url.rstrip('/') + '/' + quote(path),
370 370 optrev, False, self.ctx)
371 371 return True
372 372 except SubversionException:
373 373 return False
374 374
375 375 def getheads(self):
376 376
377 377 def isdir(path, revnum):
378 378 kind = self._checkpath(path, revnum)
379 379 return kind == svn.core.svn_node_dir
380 380
381 381 def getcfgpath(name, rev):
382 382 cfgpath = self.ui.config('convert', 'svn.' + name)
383 383 if cfgpath is not None and cfgpath.strip() == '':
384 384 return None
385 385 path = (cfgpath or name).strip('/')
386 386 if not self.exists(path, rev):
387 387 if self.module.endswith(path) and name == 'trunk':
388 388 # we are converting from inside this directory
389 389 return None
390 390 if cfgpath:
391 391 raise util.Abort(_('expected %s to be at %r, but not found')
392 392 % (name, path))
393 393 return None
394 394 self.ui.note(_('found %s at %r\n') % (name, path))
395 395 return path
396 396
397 397 rev = optrev(self.last_changed)
398 398 oldmodule = ''
399 399 trunk = getcfgpath('trunk', rev)
400 400 self.tags = getcfgpath('tags', rev)
401 401 branches = getcfgpath('branches', rev)
402 402
403 403 # If the project has a trunk or branches, we will extract heads
404 404 # from them. We keep the project root otherwise.
405 405 if trunk:
406 406 oldmodule = self.module or ''
407 407 self.module += '/' + trunk
408 408 self.head = self.latest(self.module, self.last_changed)
409 409 if not self.head:
410 410 raise util.Abort(_('no revision found in module %s')
411 411 % self.module)
412 412
413 413 # First head in the list is the module's head
414 414 self.heads = [self.head]
415 415 if self.tags is not None:
416 416 self.tags = '%s/%s' % (oldmodule , (self.tags or 'tags'))
417 417
418 418 # Check if branches bring a few more heads to the list
419 419 if branches:
420 420 rpath = self.url.strip('/')
421 421 branchnames = svn.client.ls(rpath + '/' + quote(branches),
422 422 rev, False, self.ctx)
423 423 for branch in sorted(branchnames):
424 424 module = '%s/%s/%s' % (oldmodule, branches, branch)
425 425 if not isdir(module, self.last_changed):
426 426 continue
427 427 brevid = self.latest(module, self.last_changed)
428 428 if not brevid:
429 429 self.ui.note(_('ignoring empty branch %s\n') % branch)
430 430 continue
431 431 self.ui.note(_('found branch %s at %d\n') %
432 432 (branch, self.revnum(brevid)))
433 433 self.heads.append(brevid)
434 434
435 435 if self.startrev and self.heads:
436 436 if len(self.heads) > 1:
437 437 raise util.Abort(_('svn: start revision is not supported '
438 438 'with more than one branch'))
439 439 revnum = self.revnum(self.heads[0])
440 440 if revnum < self.startrev:
441 441 raise util.Abort(
442 442 _('svn: no revision found after start revision %d')
443 443 % self.startrev)
444 444
445 445 return self.heads
446 446
447 447 def getchanges(self, rev):
448 448 if self._changescache and self._changescache[0] == rev:
449 449 return self._changescache[1]
450 450 self._changescache = None
451 451 (paths, parents) = self.paths[rev]
452 452 if parents:
453 453 files, self.removed, copies = self.expandpaths(rev, paths, parents)
454 454 else:
455 455 # Perform a full checkout on roots
456 456 uuid, module, revnum = revsplit(rev)
457 457 entries = svn.client.ls(self.baseurl + quote(module),
458 458 optrev(revnum), True, self.ctx)
459 459 files = [n for n, e in entries.iteritems()
460 460 if e.kind == svn.core.svn_node_file]
461 461 copies = {}
462 462 self.removed = set()
463 463
464 464 files.sort()
465 465 files = zip(files, [rev] * len(files))
466 466
467 467 # caller caches the result, so free it here to release memory
468 468 del self.paths[rev]
469 469 return (files, copies)
470 470
471 471 def getchangedfiles(self, rev, i):
472 472 changes = self.getchanges(rev)
473 473 self._changescache = (rev, changes)
474 474 return [f[0] for f in changes[0]]
475 475
476 476 def getcommit(self, rev):
477 477 if rev not in self.commits:
478 478 uuid, module, revnum = revsplit(rev)
479 479 self.module = module
480 480 self.reparent(module)
481 481 # We assume that:
482 482 # - requests for revisions after "stop" come from the
483 483 # revision graph backward traversal. Cache all of them
484 484 # down to stop, they will be used eventually.
485 485 # - requests for revisions before "stop" come to get
486 486 # isolated branches parents. Just fetch what is needed.
487 487 stop = self.lastrevs.get(module, 0)
488 488 if revnum < stop:
489 489 stop = revnum + 1
490 490 self._fetch_revisions(revnum, stop)
491 491 if rev not in self.commits:
492 492 raise util.Abort(_('svn: revision %s not found') % revnum)
493 493 revcommit = self.commits[rev]
494 494 # caller caches the result, so free it here to release memory
495 495 del self.commits[rev]
496 496 return revcommit
497 497
498 498 def checkrevformat(self, revstr, mapname='splicemap'):
499 499 """ fails if revision format does not match the correct format"""
500 500 if not re.match(r'svn:[0-9a-f]{8,8}-[0-9a-f]{4,4}-'
501 501 '[0-9a-f]{4,4}-[0-9a-f]{4,4}-[0-9a-f]'
502 502 '{12,12}(.*)\@[0-9]+$',revstr):
503 503 raise util.Abort(_('%s entry %s is not a valid revision'
504 504 ' identifier') % (mapname, revstr))
505 505
506 506 def gettags(self):
507 507 tags = {}
508 508 if self.tags is None:
509 509 return tags
510 510
511 511 # svn tags are just a convention, project branches left in a
512 512 # 'tags' directory. There is no other relationship than
513 513 # ancestry, which is expensive to discover and makes them hard
514 514 # to update incrementally. Worse, past revisions may be
515 515 # referenced by tags far away in the future, requiring a deep
516 516 # history traversal on every calculation. Current code
517 517 # performs a single backward traversal, tracking moves within
518 518 # the tags directory (tag renaming) and recording a new tag
519 519 # everytime a project is copied from outside the tags
520 520 # directory. It also lists deleted tags, this behaviour may
521 521 # change in the future.
522 522 pendings = []
523 523 tagspath = self.tags
524 524 start = svn.ra.get_latest_revnum(self.ra)
525 525 stream = self._getlog([self.tags], start, self.startrev)
526 526 try:
527 527 for entry in stream:
528 528 origpaths, revnum, author, date, message = entry
529 529 if not origpaths:
530 530 origpaths = []
531 531 copies = [(e.copyfrom_path, e.copyfrom_rev, p) for p, e
532 532 in origpaths.iteritems() if e.copyfrom_path]
533 533 # Apply moves/copies from more specific to general
534 534 copies.sort(reverse=True)
535 535
536 536 srctagspath = tagspath
537 537 if copies and copies[-1][2] == tagspath:
538 538 # Track tags directory moves
539 539 srctagspath = copies.pop()[0]
540 540
541 541 for source, sourcerev, dest in copies:
542 542 if not dest.startswith(tagspath + '/'):
543 543 continue
544 544 for tag in pendings:
545 545 if tag[0].startswith(dest):
546 546 tagpath = source + tag[0][len(dest):]
547 547 tag[:2] = [tagpath, sourcerev]
548 548 break
549 549 else:
550 550 pendings.append([source, sourcerev, dest])
551 551
552 552 # Filter out tags with children coming from different
553 553 # parts of the repository like:
554 554 # /tags/tag.1 (from /trunk:10)
555 555 # /tags/tag.1/foo (from /branches/foo:12)
556 556 # Here/tags/tag.1 discarded as well as its children.
557 557 # It happens with tools like cvs2svn. Such tags cannot
558 558 # be represented in mercurial.
559 559 addeds = dict((p, e.copyfrom_path) for p, e
560 560 in origpaths.iteritems()
561 561 if e.action == 'A' and e.copyfrom_path)
562 562 badroots = set()
563 563 for destroot in addeds:
564 564 for source, sourcerev, dest in pendings:
565 565 if (not dest.startswith(destroot + '/')
566 566 or source.startswith(addeds[destroot] + '/')):
567 567 continue
568 568 badroots.add(destroot)
569 569 break
570 570
571 571 for badroot in badroots:
572 572 pendings = [p for p in pendings if p[2] != badroot
573 573 and not p[2].startswith(badroot + '/')]
574 574
575 575 # Tell tag renamings from tag creations
576 576 renamings = []
577 577 for source, sourcerev, dest in pendings:
578 578 tagname = dest.split('/')[-1]
579 579 if source.startswith(srctagspath):
580 580 renamings.append([source, sourcerev, tagname])
581 581 continue
582 582 if tagname in tags:
583 583 # Keep the latest tag value
584 584 continue
585 585 # From revision may be fake, get one with changes
586 586 try:
587 587 tagid = self.latest(source, sourcerev)
588 588 if tagid and tagname not in tags:
589 589 tags[tagname] = tagid
590 590 except SvnPathNotFound:
591 591 # It happens when we are following directories
592 592 # we assumed were copied with their parents
593 593 # but were really created in the tag
594 594 # directory.
595 595 pass
596 596 pendings = renamings
597 597 tagspath = srctagspath
598 598 finally:
599 599 stream.close()
600 600 return tags
601 601
602 602 def converted(self, rev, destrev):
603 603 if not self.wc:
604 604 return
605 605 if self.convertfp is None:
606 606 self.convertfp = open(os.path.join(self.wc, '.svn', 'hg-shamap'),
607 607 'a')
608 608 self.convertfp.write('%s %d\n' % (destrev, self.revnum(rev)))
609 609 self.convertfp.flush()
610 610
611 611 def revid(self, revnum, module=None):
612 612 return 'svn:%s%s@%s' % (self.uuid, module or self.module, revnum)
613 613
614 614 def revnum(self, rev):
615 615 return int(rev.split('@')[-1])
616 616
617 617 def latest(self, path, stop=None):
618 618 """Find the latest revid affecting path, up to stop revision
619 619 number. If stop is None, default to repository latest
620 620 revision. It may return a revision in a different module,
621 621 since a branch may be moved without a change being
622 622 reported. Return None if computed module does not belong to
623 623 rootmodule subtree.
624 624 """
625 625 def findchanges(path, start, stop=None):
626 626 stream = self._getlog([path], start, stop or 1)
627 627 try:
628 628 for entry in stream:
629 629 paths, revnum, author, date, message = entry
630 630 if stop is None and paths:
631 631 # We do not know the latest changed revision,
632 632 # keep the first one with changed paths.
633 633 break
634 634 if revnum <= stop:
635 635 break
636 636
637 637 for p in paths:
638 638 if (not path.startswith(p) or
639 639 not paths[p].copyfrom_path):
640 640 continue
641 641 newpath = paths[p].copyfrom_path + path[len(p):]
642 642 self.ui.debug("branch renamed from %s to %s at %d\n" %
643 643 (path, newpath, revnum))
644 644 path = newpath
645 645 break
646 646 if not paths:
647 647 revnum = None
648 648 return revnum, path
649 649 finally:
650 650 stream.close()
651 651
652 652 if not path.startswith(self.rootmodule):
653 653 # Requests on foreign branches may be forbidden at server level
654 654 self.ui.debug('ignoring foreign branch %r\n' % path)
655 655 return None
656 656
657 657 if stop is None:
658 658 stop = svn.ra.get_latest_revnum(self.ra)
659 659 try:
660 660 prevmodule = self.reparent('')
661 661 dirent = svn.ra.stat(self.ra, path.strip('/'), stop)
662 662 self.reparent(prevmodule)
663 663 except SubversionException:
664 664 dirent = None
665 665 if not dirent:
666 666 raise SvnPathNotFound(_('%s not found up to revision %d')
667 667 % (path, stop))
668 668
669 669 # stat() gives us the previous revision on this line of
670 670 # development, but it might be in *another module*. Fetch the
671 671 # log and detect renames down to the latest revision.
672 672 revnum, realpath = findchanges(path, stop, dirent.created_rev)
673 673 if revnum is None:
674 674 # Tools like svnsync can create empty revision, when
675 675 # synchronizing only a subtree for instance. These empty
676 676 # revisions created_rev still have their original values
677 677 # despite all changes having disappeared and can be
678 678 # returned by ra.stat(), at least when stating the root
679 679 # module. In that case, do not trust created_rev and scan
680 680 # the whole history.
681 681 revnum, realpath = findchanges(path, stop)
682 682 if revnum is None:
683 683 self.ui.debug('ignoring empty branch %r\n' % realpath)
684 684 return None
685 685
686 686 if not realpath.startswith(self.rootmodule):
687 687 self.ui.debug('ignoring foreign branch %r\n' % realpath)
688 688 return None
689 689 return self.revid(revnum, realpath)
690 690
691 691 def reparent(self, module):
692 692 """Reparent the svn transport and return the previous parent."""
693 693 if self.prevmodule == module:
694 694 return module
695 695 svnurl = self.baseurl + quote(module)
696 696 prevmodule = self.prevmodule
697 697 if prevmodule is None:
698 698 prevmodule = ''
699 699 self.ui.debug("reparent to %s\n" % svnurl)
700 700 svn.ra.reparent(self.ra, svnurl)
701 701 self.prevmodule = module
702 702 return prevmodule
703 703
704 704 def expandpaths(self, rev, paths, parents):
705 705 changed, removed = set(), set()
706 706 copies = {}
707 707
708 708 new_module, revnum = revsplit(rev)[1:]
709 709 if new_module != self.module:
710 710 self.module = new_module
711 711 self.reparent(self.module)
712 712
713 713 for i, (path, ent) in enumerate(paths):
714 714 self.ui.progress(_('scanning paths'), i, item=path,
715 715 total=len(paths))
716 716 entrypath = self.getrelpath(path)
717 717
718 718 kind = self._checkpath(entrypath, revnum)
719 719 if kind == svn.core.svn_node_file:
720 720 changed.add(self.recode(entrypath))
721 721 if not ent.copyfrom_path or not parents:
722 722 continue
723 723 # Copy sources not in parent revisions cannot be
724 724 # represented, ignore their origin for now
725 725 pmodule, prevnum = revsplit(parents[0])[1:]
726 726 if ent.copyfrom_rev < prevnum:
727 727 continue
728 728 copyfrom_path = self.getrelpath(ent.copyfrom_path, pmodule)
729 729 if not copyfrom_path:
730 730 continue
731 731 self.ui.debug("copied to %s from %s@%s\n" %
732 732 (entrypath, copyfrom_path, ent.copyfrom_rev))
733 733 copies[self.recode(entrypath)] = self.recode(copyfrom_path)
734 734 elif kind == 0: # gone, but had better be a deleted *file*
735 735 self.ui.debug("gone from %s\n" % ent.copyfrom_rev)
736 736 pmodule, prevnum = revsplit(parents[0])[1:]
737 737 parentpath = pmodule + "/" + entrypath
738 738 fromkind = self._checkpath(entrypath, prevnum, pmodule)
739 739
740 740 if fromkind == svn.core.svn_node_file:
741 741 removed.add(self.recode(entrypath))
742 742 elif fromkind == svn.core.svn_node_dir:
743 743 oroot = parentpath.strip('/')
744 744 nroot = path.strip('/')
745 745 children = self._iterfiles(oroot, prevnum)
746 746 for childpath in children:
747 747 childpath = childpath.replace(oroot, nroot)
748 748 childpath = self.getrelpath("/" + childpath, pmodule)
749 749 if childpath:
750 750 removed.add(self.recode(childpath))
751 751 else:
752 752 self.ui.debug('unknown path in revision %d: %s\n' % \
753 753 (revnum, path))
754 754 elif kind == svn.core.svn_node_dir:
755 755 if ent.action == 'M':
756 756 # If the directory just had a prop change,
757 757 # then we shouldn't need to look for its children.
758 758 continue
759 759 if ent.action == 'R' and parents:
760 760 # If a directory is replacing a file, mark the previous
761 761 # file as deleted
762 762 pmodule, prevnum = revsplit(parents[0])[1:]
763 763 pkind = self._checkpath(entrypath, prevnum, pmodule)
764 764 if pkind == svn.core.svn_node_file:
765 765 removed.add(self.recode(entrypath))
766 766 elif pkind == svn.core.svn_node_dir:
767 767 # We do not know what files were kept or removed,
768 768 # mark them all as changed.
769 769 for childpath in self._iterfiles(pmodule, prevnum):
770 770 childpath = self.getrelpath("/" + childpath)
771 771 if childpath:
772 772 changed.add(self.recode(childpath))
773 773
774 774 for childpath in self._iterfiles(path, revnum):
775 775 childpath = self.getrelpath("/" + childpath)
776 776 if childpath:
777 777 changed.add(self.recode(childpath))
778 778
779 779 # Handle directory copies
780 780 if not ent.copyfrom_path or not parents:
781 781 continue
782 782 # Copy sources not in parent revisions cannot be
783 783 # represented, ignore their origin for now
784 784 pmodule, prevnum = revsplit(parents[0])[1:]
785 785 if ent.copyfrom_rev < prevnum:
786 786 continue
787 787 copyfrompath = self.getrelpath(ent.copyfrom_path, pmodule)
788 788 if not copyfrompath:
789 789 continue
790 790 self.ui.debug("mark %s came from %s:%d\n"
791 791 % (path, copyfrompath, ent.copyfrom_rev))
792 792 children = self._iterfiles(ent.copyfrom_path, ent.copyfrom_rev)
793 793 for childpath in children:
794 794 childpath = self.getrelpath("/" + childpath, pmodule)
795 795 if not childpath:
796 796 continue
797 797 copytopath = path + childpath[len(copyfrompath):]
798 798 copytopath = self.getrelpath(copytopath)
799 799 copies[self.recode(copytopath)] = self.recode(childpath)
800 800
801 801 self.ui.progress(_('scanning paths'), None)
802 802 changed.update(removed)
803 803 return (list(changed), removed, copies)
804 804
805 805 def _fetch_revisions(self, from_revnum, to_revnum):
806 806 if from_revnum < to_revnum:
807 807 from_revnum, to_revnum = to_revnum, from_revnum
808 808
809 809 self.child_cset = None
810 810
811 811 def parselogentry(orig_paths, revnum, author, date, message):
812 812 """Return the parsed commit object or None, and True if
813 813 the revision is a branch root.
814 814 """
815 815 self.ui.debug("parsing revision %d (%d changes)\n" %
816 816 (revnum, len(orig_paths)))
817 817
818 818 branched = False
819 819 rev = self.revid(revnum)
820 820 # branch log might return entries for a parent we already have
821 821
822 822 if rev in self.commits or revnum < to_revnum:
823 823 return None, branched
824 824
825 825 parents = []
826 826 # check whether this revision is the start of a branch or part
827 827 # of a branch renaming
828 828 orig_paths = sorted(orig_paths.iteritems())
829 829 root_paths = [(p, e) for p, e in orig_paths
830 830 if self.module.startswith(p)]
831 831 if root_paths:
832 832 path, ent = root_paths[-1]
833 833 if ent.copyfrom_path:
834 834 branched = True
835 835 newpath = ent.copyfrom_path + self.module[len(path):]
836 836 # ent.copyfrom_rev may not be the actual last revision
837 837 previd = self.latest(newpath, ent.copyfrom_rev)
838 838 if previd is not None:
839 839 prevmodule, prevnum = revsplit(previd)[1:]
840 840 if prevnum >= self.startrev:
841 841 parents = [previd]
842 842 self.ui.note(
843 843 _('found parent of branch %s at %d: %s\n') %
844 844 (self.module, prevnum, prevmodule))
845 845 else:
846 846 self.ui.debug("no copyfrom path, don't know what to do.\n")
847 847
848 848 paths = []
849 849 # filter out unrelated paths
850 850 for path, ent in orig_paths:
851 851 if self.getrelpath(path) is None:
852 852 continue
853 853 paths.append((path, ent))
854 854
855 855 # Example SVN datetime. Includes microseconds.
856 856 # ISO-8601 conformant
857 857 # '2007-01-04T17:35:00.902377Z'
858 858 date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"])
859 859 if self.ui.configbool('convert', 'localtimezone'):
860 860 date = makedatetimestamp(date[0])
861 861
862 862 log = message and self.recode(message) or ''
863 863 author = author and self.recode(author) or ''
864 864 try:
865 865 branch = self.module.split("/")[-1]
866 866 if branch == self.trunkname:
867 867 branch = None
868 868 except IndexError:
869 869 branch = None
870 870
871 871 cset = commit(author=author,
872 872 date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'),
873 873 desc=log,
874 874 parents=parents,
875 875 branch=branch,
876 876 rev=rev)
877 877
878 878 self.commits[rev] = cset
879 879 # The parents list is *shared* among self.paths and the
880 880 # commit object. Both will be updated below.
881 881 self.paths[rev] = (paths, cset.parents)
882 882 if self.child_cset and not self.child_cset.parents:
883 883 self.child_cset.parents[:] = [rev]
884 884 self.child_cset = cset
885 885 return cset, branched
886 886
887 887 self.ui.note(_('fetching revision log for "%s" from %d to %d\n') %
888 888 (self.module, from_revnum, to_revnum))
889 889
890 890 try:
891 891 firstcset = None
892 892 lastonbranch = False
893 893 stream = self._getlog([self.module], from_revnum, to_revnum)
894 894 try:
895 895 for entry in stream:
896 896 paths, revnum, author, date, message = entry
897 897 if revnum < self.startrev:
898 898 lastonbranch = True
899 899 break
900 900 if not paths:
901 901 self.ui.debug('revision %d has no entries\n' % revnum)
902 902 # If we ever leave the loop on an empty
903 903 # revision, do not try to get a parent branch
904 904 lastonbranch = lastonbranch or revnum == 0
905 905 continue
906 906 cset, lastonbranch = parselogentry(paths, revnum, author,
907 907 date, message)
908 908 if cset:
909 909 firstcset = cset
910 910 if lastonbranch:
911 911 break
912 912 finally:
913 913 stream.close()
914 914
915 915 if not lastonbranch and firstcset and not firstcset.parents:
916 916 # The first revision of the sequence (the last fetched one)
917 917 # has invalid parents if not a branch root. Find the parent
918 918 # revision now, if any.
919 919 try:
920 920 firstrevnum = self.revnum(firstcset.rev)
921 921 if firstrevnum > 1:
922 922 latest = self.latest(self.module, firstrevnum - 1)
923 923 if latest:
924 924 firstcset.parents.append(latest)
925 925 except SvnPathNotFound:
926 926 pass
927 927 except SubversionException, (inst, num):
928 928 if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION:
929 929 raise util.Abort(_('svn: branch has no revision %s')
930 930 % to_revnum)
931 931 raise
932 932
933 933 def getfile(self, file, rev):
934 934 # TODO: ra.get_file transmits the whole file instead of diffs.
935 935 if file in self.removed:
936 raise IOError
936 return None, None
937 937 mode = ''
938 938 try:
939 939 new_module, revnum = revsplit(rev)[1:]
940 940 if self.module != new_module:
941 941 self.module = new_module
942 942 self.reparent(self.module)
943 943 io = StringIO()
944 944 info = svn.ra.get_file(self.ra, file, revnum, io)
945 945 data = io.getvalue()
946 946 # ra.get_file() seems to keep a reference on the input buffer
947 947 # preventing collection. Release it explicitly.
948 948 io.close()
949 949 if isinstance(info, list):
950 950 info = info[-1]
951 951 mode = ("svn:executable" in info) and 'x' or ''
952 952 mode = ("svn:special" in info) and 'l' or mode
953 953 except SubversionException, e:
954 954 notfound = (svn.core.SVN_ERR_FS_NOT_FOUND,
955 955 svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND)
956 956 if e.apr_err in notfound: # File not found
957 raise IOError
957 return None, None
958 958 raise
959 959 if mode == 'l':
960 960 link_prefix = "link "
961 961 if data.startswith(link_prefix):
962 962 data = data[len(link_prefix):]
963 963 return data, mode
964 964
965 965 def _iterfiles(self, path, revnum):
966 966 """Enumerate all files in path at revnum, recursively."""
967 967 path = path.strip('/')
968 968 pool = Pool()
969 969 rpath = '/'.join([self.baseurl, quote(path)]).strip('/')
970 970 entries = svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool)
971 971 if path:
972 972 path += '/'
973 973 return ((path + p) for p, e in entries.iteritems()
974 974 if e.kind == svn.core.svn_node_file)
975 975
976 976 def getrelpath(self, path, module=None):
977 977 if module is None:
978 978 module = self.module
979 979 # Given the repository url of this wc, say
980 980 # "http://server/plone/CMFPlone/branches/Plone-2_0-branch"
981 981 # extract the "entry" portion (a relative path) from what
982 982 # svn log --xml says, i.e.
983 983 # "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py"
984 984 # that is to say "tests/PloneTestCase.py"
985 985 if path.startswith(module):
986 986 relative = path.rstrip('/')[len(module):]
987 987 if relative.startswith('/'):
988 988 return relative[1:]
989 989 elif relative == '':
990 990 return relative
991 991
992 992 # The path is outside our tracked tree...
993 993 self.ui.debug('%r is not under %r, ignoring\n' % (path, module))
994 994 return None
995 995
996 996 def _checkpath(self, path, revnum, module=None):
997 997 if module is not None:
998 998 prevmodule = self.reparent('')
999 999 path = module + '/' + path
1000 1000 try:
1001 1001 # ra.check_path does not like leading slashes very much, it leads
1002 1002 # to PROPFIND subversion errors
1003 1003 return svn.ra.check_path(self.ra, path.strip('/'), revnum)
1004 1004 finally:
1005 1005 if module is not None:
1006 1006 self.reparent(prevmodule)
1007 1007
1008 1008 def _getlog(self, paths, start, end, limit=0, discover_changed_paths=True,
1009 1009 strict_node_history=False):
1010 1010 # Normalize path names, svn >= 1.5 only wants paths relative to
1011 1011 # supplied URL
1012 1012 relpaths = []
1013 1013 for p in paths:
1014 1014 if not p.startswith('/'):
1015 1015 p = self.module + '/' + p
1016 1016 relpaths.append(p.strip('/'))
1017 1017 args = [self.baseurl, relpaths, start, end, limit,
1018 1018 discover_changed_paths, strict_node_history]
1019 1019 # undocumented feature: debugsvnlog can be disabled
1020 1020 if not self.ui.configbool('convert', 'svn.debugsvnlog', True):
1021 1021 return directlogstream(*args)
1022 1022 arg = encodeargs(args)
1023 1023 hgexe = util.hgexecutable()
1024 1024 cmd = '%s debugsvnlog' % util.shellquote(hgexe)
1025 1025 stdin, stdout = util.popen2(util.quotecommand(cmd))
1026 1026 stdin.write(arg)
1027 1027 try:
1028 1028 stdin.close()
1029 1029 except IOError:
1030 1030 raise util.Abort(_('Mercurial failed to run itself, check'
1031 1031 ' hg executable is in PATH'))
1032 1032 return logstream(stdout)
1033 1033
1034 1034 pre_revprop_change = '''#!/bin/sh
1035 1035
1036 1036 REPOS="$1"
1037 1037 REV="$2"
1038 1038 USER="$3"
1039 1039 PROPNAME="$4"
1040 1040 ACTION="$5"
1041 1041
1042 1042 if [ "$ACTION" = "M" -a "$PROPNAME" = "svn:log" ]; then exit 0; fi
1043 1043 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-branch" ]; then exit 0; fi
1044 1044 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-rev" ]; then exit 0; fi
1045 1045
1046 1046 echo "Changing prohibited revision property" >&2
1047 1047 exit 1
1048 1048 '''
1049 1049
1050 1050 class svn_sink(converter_sink, commandline):
1051 1051 commit_re = re.compile(r'Committed revision (\d+).', re.M)
1052 1052 uuid_re = re.compile(r'Repository UUID:\s*(\S+)', re.M)
1053 1053
1054 1054 def prerun(self):
1055 1055 if self.wc:
1056 1056 os.chdir(self.wc)
1057 1057
1058 1058 def postrun(self):
1059 1059 if self.wc:
1060 1060 os.chdir(self.cwd)
1061 1061
1062 1062 def join(self, name):
1063 1063 return os.path.join(self.wc, '.svn', name)
1064 1064
1065 1065 def revmapfile(self):
1066 1066 return self.join('hg-shamap')
1067 1067
1068 1068 def authorfile(self):
1069 1069 return self.join('hg-authormap')
1070 1070
1071 1071 def __init__(self, ui, path):
1072 1072
1073 1073 converter_sink.__init__(self, ui, path)
1074 1074 commandline.__init__(self, ui, 'svn')
1075 1075 self.delete = []
1076 1076 self.setexec = []
1077 1077 self.delexec = []
1078 1078 self.copies = []
1079 1079 self.wc = None
1080 1080 self.cwd = os.getcwd()
1081 1081
1082 1082 created = False
1083 1083 if os.path.isfile(os.path.join(path, '.svn', 'entries')):
1084 1084 self.wc = os.path.realpath(path)
1085 1085 self.run0('update')
1086 1086 else:
1087 1087 if not re.search(r'^(file|http|https|svn|svn\+ssh)\://', path):
1088 1088 path = os.path.realpath(path)
1089 1089 if os.path.isdir(os.path.dirname(path)):
1090 1090 if not os.path.exists(os.path.join(path, 'db', 'fs-type')):
1091 1091 ui.status(_('initializing svn repository %r\n') %
1092 1092 os.path.basename(path))
1093 1093 commandline(ui, 'svnadmin').run0('create', path)
1094 1094 created = path
1095 1095 path = util.normpath(path)
1096 1096 if not path.startswith('/'):
1097 1097 path = '/' + path
1098 1098 path = 'file://' + path
1099 1099
1100 1100 wcpath = os.path.join(os.getcwd(), os.path.basename(path) + '-wc')
1101 1101 ui.status(_('initializing svn working copy %r\n')
1102 1102 % os.path.basename(wcpath))
1103 1103 self.run0('checkout', path, wcpath)
1104 1104
1105 1105 self.wc = wcpath
1106 1106 self.opener = scmutil.opener(self.wc)
1107 1107 self.wopener = scmutil.opener(self.wc)
1108 1108 self.childmap = mapfile(ui, self.join('hg-childmap'))
1109 1109 self.is_exec = util.checkexec(self.wc) and util.isexec or None
1110 1110
1111 1111 if created:
1112 1112 hook = os.path.join(created, 'hooks', 'pre-revprop-change')
1113 1113 fp = open(hook, 'w')
1114 1114 fp.write(pre_revprop_change)
1115 1115 fp.close()
1116 1116 util.setflags(hook, False, True)
1117 1117
1118 1118 output = self.run0('info')
1119 1119 self.uuid = self.uuid_re.search(output).group(1).strip()
1120 1120
1121 1121 def wjoin(self, *names):
1122 1122 return os.path.join(self.wc, *names)
1123 1123
1124 1124 @propertycache
1125 1125 def manifest(self):
1126 1126 # As of svn 1.7, the "add" command fails when receiving
1127 1127 # already tracked entries, so we have to track and filter them
1128 1128 # ourselves.
1129 1129 m = set()
1130 1130 output = self.run0('ls', recursive=True, xml=True)
1131 1131 doc = xml.dom.minidom.parseString(output)
1132 1132 for e in doc.getElementsByTagName('entry'):
1133 1133 for n in e.childNodes:
1134 1134 if n.nodeType != n.ELEMENT_NODE or n.tagName != 'name':
1135 1135 continue
1136 1136 name = ''.join(c.data for c in n.childNodes
1137 1137 if c.nodeType == c.TEXT_NODE)
1138 1138 # Entries are compared with names coming from
1139 1139 # mercurial, so bytes with undefined encoding. Our
1140 1140 # best bet is to assume they are in local
1141 1141 # encoding. They will be passed to command line calls
1142 1142 # later anyway, so they better be.
1143 1143 m.add(encoding.tolocal(name.encode('utf-8')))
1144 1144 break
1145 1145 return m
1146 1146
1147 1147 def putfile(self, filename, flags, data):
1148 1148 if 'l' in flags:
1149 1149 self.wopener.symlink(data, filename)
1150 1150 else:
1151 1151 try:
1152 1152 if os.path.islink(self.wjoin(filename)):
1153 1153 os.unlink(filename)
1154 1154 except OSError:
1155 1155 pass
1156 1156 self.wopener.write(filename, data)
1157 1157
1158 1158 if self.is_exec:
1159 1159 if self.is_exec(self.wjoin(filename)):
1160 1160 if 'x' not in flags:
1161 1161 self.delexec.append(filename)
1162 1162 else:
1163 1163 if 'x' in flags:
1164 1164 self.setexec.append(filename)
1165 1165 util.setflags(self.wjoin(filename), False, 'x' in flags)
1166 1166
1167 1167 def _copyfile(self, source, dest):
1168 1168 # SVN's copy command pukes if the destination file exists, but
1169 1169 # our copyfile method expects to record a copy that has
1170 1170 # already occurred. Cross the semantic gap.
1171 1171 wdest = self.wjoin(dest)
1172 1172 exists = os.path.lexists(wdest)
1173 1173 if exists:
1174 1174 fd, tempname = tempfile.mkstemp(
1175 1175 prefix='hg-copy-', dir=os.path.dirname(wdest))
1176 1176 os.close(fd)
1177 1177 os.unlink(tempname)
1178 1178 os.rename(wdest, tempname)
1179 1179 try:
1180 1180 self.run0('copy', source, dest)
1181 1181 finally:
1182 1182 self.manifest.add(dest)
1183 1183 if exists:
1184 1184 try:
1185 1185 os.unlink(wdest)
1186 1186 except OSError:
1187 1187 pass
1188 1188 os.rename(tempname, wdest)
1189 1189
1190 1190 def dirs_of(self, files):
1191 1191 dirs = set()
1192 1192 for f in files:
1193 1193 if os.path.isdir(self.wjoin(f)):
1194 1194 dirs.add(f)
1195 1195 for i in strutil.rfindall(f, '/'):
1196 1196 dirs.add(f[:i])
1197 1197 return dirs
1198 1198
1199 1199 def add_dirs(self, files):
1200 1200 add_dirs = [d for d in sorted(self.dirs_of(files))
1201 1201 if d not in self.manifest]
1202 1202 if add_dirs:
1203 1203 self.manifest.update(add_dirs)
1204 1204 self.xargs(add_dirs, 'add', non_recursive=True, quiet=True)
1205 1205 return add_dirs
1206 1206
1207 1207 def add_files(self, files):
1208 1208 files = [f for f in files if f not in self.manifest]
1209 1209 if files:
1210 1210 self.manifest.update(files)
1211 1211 self.xargs(files, 'add', quiet=True)
1212 1212 return files
1213 1213
1214 1214 def tidy_dirs(self, names):
1215 1215 deleted = []
1216 1216 for d in sorted(self.dirs_of(names), reverse=True):
1217 1217 wd = self.wjoin(d)
1218 1218 if os.listdir(wd) == '.svn':
1219 1219 self.run0('delete', d)
1220 1220 self.manifest.remove(d)
1221 1221 deleted.append(d)
1222 1222 return deleted
1223 1223
1224 1224 def addchild(self, parent, child):
1225 1225 self.childmap[parent] = child
1226 1226
1227 1227 def revid(self, rev):
1228 1228 return u"svn:%s@%s" % (self.uuid, rev)
1229 1229
1230 1230 def putcommit(self, files, copies, parents, commit, source, revmap):
1231 1231 for parent in parents:
1232 1232 try:
1233 1233 return self.revid(self.childmap[parent])
1234 1234 except KeyError:
1235 1235 pass
1236 1236
1237 1237 # Apply changes to working copy
1238 1238 for f, v in files:
1239 try:
1240 data, mode = source.getfile(f, v)
1241 except IOError:
1239 data, mode = source.getfile(f, v)
1240 if data is None:
1242 1241 self.delete.append(f)
1243 1242 else:
1244 1243 self.putfile(f, mode, data)
1245 1244 if f in copies:
1246 1245 self.copies.append([copies[f], f])
1247 1246 files = [f[0] for f in files]
1248 1247
1249 1248 entries = set(self.delete)
1250 1249 files = frozenset(files)
1251 1250 entries.update(self.add_dirs(files.difference(entries)))
1252 1251 if self.copies:
1253 1252 for s, d in self.copies:
1254 1253 self._copyfile(s, d)
1255 1254 self.copies = []
1256 1255 if self.delete:
1257 1256 self.xargs(self.delete, 'delete')
1258 1257 for f in self.delete:
1259 1258 self.manifest.remove(f)
1260 1259 self.delete = []
1261 1260 entries.update(self.add_files(files.difference(entries)))
1262 1261 entries.update(self.tidy_dirs(entries))
1263 1262 if self.delexec:
1264 1263 self.xargs(self.delexec, 'propdel', 'svn:executable')
1265 1264 self.delexec = []
1266 1265 if self.setexec:
1267 1266 self.xargs(self.setexec, 'propset', 'svn:executable', '*')
1268 1267 self.setexec = []
1269 1268
1270 1269 fd, messagefile = tempfile.mkstemp(prefix='hg-convert-')
1271 1270 fp = os.fdopen(fd, 'w')
1272 1271 fp.write(commit.desc)
1273 1272 fp.close()
1274 1273 try:
1275 1274 output = self.run0('commit',
1276 1275 username=util.shortuser(commit.author),
1277 1276 file=messagefile,
1278 1277 encoding='utf-8')
1279 1278 try:
1280 1279 rev = self.commit_re.search(output).group(1)
1281 1280 except AttributeError:
1282 1281 if not files:
1283 1282 return parents[0]
1284 1283 self.ui.warn(_('unexpected svn output:\n'))
1285 1284 self.ui.warn(output)
1286 1285 raise util.Abort(_('unable to cope with svn output'))
1287 1286 if commit.rev:
1288 1287 self.run('propset', 'hg:convert-rev', commit.rev,
1289 1288 revprop=True, revision=rev)
1290 1289 if commit.branch and commit.branch != 'default':
1291 1290 self.run('propset', 'hg:convert-branch', commit.branch,
1292 1291 revprop=True, revision=rev)
1293 1292 for parent in parents:
1294 1293 self.addchild(parent, rev)
1295 1294 return self.revid(rev)
1296 1295 finally:
1297 1296 os.unlink(messagefile)
1298 1297
1299 1298 def puttags(self, tags):
1300 1299 self.ui.warn(_('writing Subversion tags is not yet implemented\n'))
1301 1300 return None, None
1302 1301
1303 1302 def hascommitfrommap(self, rev):
1304 1303 # We trust that revisions referenced in a map still is present
1305 1304 # TODO: implement something better if necessary and feasible
1306 1305 return True
1307 1306
1308 1307 def hascommitforsplicemap(self, rev):
1309 1308 # This is not correct as one can convert to an existing subversion
1310 1309 # repository and childmap would not list all revisions. Too bad.
1311 1310 if rev in self.childmap:
1312 1311 return True
1313 1312 raise util.Abort(_('splice map revision %s not found in subversion '
1314 1313 'child map (revision lookups are not implemented)')
1315 1314 % rev)
@@ -1,944 +1,944 b''
1 1 # histedit.py - interactive history editing for mercurial
2 2 #
3 3 # Copyright 2009 Augie Fackler <raf@durin42.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """interactive history editing
8 8
9 9 With this extension installed, Mercurial gains one new command: histedit. Usage
10 10 is as follows, assuming the following history::
11 11
12 12 @ 3[tip] 7c2fd3b9020c 2009-04-27 18:04 -0500 durin42
13 13 | Add delta
14 14 |
15 15 o 2 030b686bedc4 2009-04-27 18:04 -0500 durin42
16 16 | Add gamma
17 17 |
18 18 o 1 c561b4e977df 2009-04-27 18:04 -0500 durin42
19 19 | Add beta
20 20 |
21 21 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
22 22 Add alpha
23 23
24 24 If you were to run ``hg histedit c561b4e977df``, you would see the following
25 25 file open in your editor::
26 26
27 27 pick c561b4e977df Add beta
28 28 pick 030b686bedc4 Add gamma
29 29 pick 7c2fd3b9020c Add delta
30 30
31 31 # Edit history between c561b4e977df and 7c2fd3b9020c
32 32 #
33 33 # Commits are listed from least to most recent
34 34 #
35 35 # Commands:
36 36 # p, pick = use commit
37 37 # e, edit = use commit, but stop for amending
38 38 # f, fold = use commit, but combine it with the one above
39 39 # r, roll = like fold, but discard this commit's description
40 40 # d, drop = remove commit from history
41 41 # m, mess = edit message without changing commit content
42 42 #
43 43
44 44 In this file, lines beginning with ``#`` are ignored. You must specify a rule
45 45 for each revision in your history. For example, if you had meant to add gamma
46 46 before beta, and then wanted to add delta in the same revision as beta, you
47 47 would reorganize the file to look like this::
48 48
49 49 pick 030b686bedc4 Add gamma
50 50 pick c561b4e977df Add beta
51 51 fold 7c2fd3b9020c Add delta
52 52
53 53 # Edit history between c561b4e977df and 7c2fd3b9020c
54 54 #
55 55 # Commits are listed from least to most recent
56 56 #
57 57 # Commands:
58 58 # p, pick = use commit
59 59 # e, edit = use commit, but stop for amending
60 60 # f, fold = use commit, but combine it with the one above
61 61 # r, roll = like fold, but discard this commit's description
62 62 # d, drop = remove commit from history
63 63 # m, mess = edit message without changing commit content
64 64 #
65 65
66 66 At which point you close the editor and ``histedit`` starts working. When you
67 67 specify a ``fold`` operation, ``histedit`` will open an editor when it folds
68 68 those revisions together, offering you a chance to clean up the commit message::
69 69
70 70 Add beta
71 71 ***
72 72 Add delta
73 73
74 74 Edit the commit message to your liking, then close the editor. For
75 75 this example, let's assume that the commit message was changed to
76 76 ``Add beta and delta.`` After histedit has run and had a chance to
77 77 remove any old or temporary revisions it needed, the history looks
78 78 like this::
79 79
80 80 @ 2[tip] 989b4d060121 2009-04-27 18:04 -0500 durin42
81 81 | Add beta and delta.
82 82 |
83 83 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
84 84 | Add gamma
85 85 |
86 86 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
87 87 Add alpha
88 88
89 89 Note that ``histedit`` does *not* remove any revisions (even its own temporary
90 90 ones) until after it has completed all the editing operations, so it will
91 91 probably perform several strip operations when it's done. For the above example,
92 92 it had to run strip twice. Strip can be slow depending on a variety of factors,
93 93 so you might need to be a little patient. You can choose to keep the original
94 94 revisions by passing the ``--keep`` flag.
95 95
96 96 The ``edit`` operation will drop you back to a command prompt,
97 97 allowing you to edit files freely, or even use ``hg record`` to commit
98 98 some changes as a separate commit. When you're done, any remaining
99 99 uncommitted changes will be committed as well. When done, run ``hg
100 100 histedit --continue`` to finish this step. You'll be prompted for a
101 101 new commit message, but the default commit message will be the
102 102 original message for the ``edit`` ed revision.
103 103
104 104 The ``message`` operation will give you a chance to revise a commit
105 105 message without changing the contents. It's a shortcut for doing
106 106 ``edit`` immediately followed by `hg histedit --continue``.
107 107
108 108 If ``histedit`` encounters a conflict when moving a revision (while
109 109 handling ``pick`` or ``fold``), it'll stop in a similar manner to
110 110 ``edit`` with the difference that it won't prompt you for a commit
111 111 message when done. If you decide at this point that you don't like how
112 112 much work it will be to rearrange history, or that you made a mistake,
113 113 you can use ``hg histedit --abort`` to abandon the new changes you
114 114 have made and return to the state before you attempted to edit your
115 115 history.
116 116
117 117 If we clone the histedit-ed example repository above and add four more
118 118 changes, such that we have the following history::
119 119
120 120 @ 6[tip] 038383181893 2009-04-27 18:04 -0500 stefan
121 121 | Add theta
122 122 |
123 123 o 5 140988835471 2009-04-27 18:04 -0500 stefan
124 124 | Add eta
125 125 |
126 126 o 4 122930637314 2009-04-27 18:04 -0500 stefan
127 127 | Add zeta
128 128 |
129 129 o 3 836302820282 2009-04-27 18:04 -0500 stefan
130 130 | Add epsilon
131 131 |
132 132 o 2 989b4d060121 2009-04-27 18:04 -0500 durin42
133 133 | Add beta and delta.
134 134 |
135 135 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
136 136 | Add gamma
137 137 |
138 138 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
139 139 Add alpha
140 140
141 141 If you run ``hg histedit --outgoing`` on the clone then it is the same
142 142 as running ``hg histedit 836302820282``. If you need plan to push to a
143 143 repository that Mercurial does not detect to be related to the source
144 144 repo, you can add a ``--force`` option.
145 145 """
146 146
147 147 try:
148 148 import cPickle as pickle
149 149 pickle.dump # import now
150 150 except ImportError:
151 151 import pickle
152 152 import os
153 153 import sys
154 154
155 155 from mercurial import cmdutil
156 156 from mercurial import discovery
157 157 from mercurial import error
158 158 from mercurial import copies
159 159 from mercurial import context
160 160 from mercurial import hg
161 161 from mercurial import node
162 162 from mercurial import repair
163 163 from mercurial import scmutil
164 164 from mercurial import util
165 165 from mercurial import obsolete
166 166 from mercurial import merge as mergemod
167 167 from mercurial.lock import release
168 168 from mercurial.i18n import _
169 169
170 170 cmdtable = {}
171 171 command = cmdutil.command(cmdtable)
172 172
173 173 testedwith = 'internal'
174 174
175 175 # i18n: command names and abbreviations must remain untranslated
176 176 editcomment = _("""# Edit history between %s and %s
177 177 #
178 178 # Commits are listed from least to most recent
179 179 #
180 180 # Commands:
181 181 # p, pick = use commit
182 182 # e, edit = use commit, but stop for amending
183 183 # f, fold = use commit, but combine it with the one above
184 184 # r, roll = like fold, but discard this commit's description
185 185 # d, drop = remove commit from history
186 186 # m, mess = edit message without changing commit content
187 187 #
188 188 """)
189 189
190 190 def commitfuncfor(repo, src):
191 191 """Build a commit function for the replacement of <src>
192 192
193 193 This function ensure we apply the same treatment to all changesets.
194 194
195 195 - Add a 'histedit_source' entry in extra.
196 196
197 197 Note that fold have its own separated logic because its handling is a bit
198 198 different and not easily factored out of the fold method.
199 199 """
200 200 phasemin = src.phase()
201 201 def commitfunc(**kwargs):
202 202 phasebackup = repo.ui.backupconfig('phases', 'new-commit')
203 203 try:
204 204 repo.ui.setconfig('phases', 'new-commit', phasemin,
205 205 'histedit')
206 206 extra = kwargs.get('extra', {}).copy()
207 207 extra['histedit_source'] = src.hex()
208 208 kwargs['extra'] = extra
209 209 return repo.commit(**kwargs)
210 210 finally:
211 211 repo.ui.restoreconfig(phasebackup)
212 212 return commitfunc
213 213
214 214 def applychanges(ui, repo, ctx, opts):
215 215 """Merge changeset from ctx (only) in the current working directory"""
216 216 wcpar = repo.dirstate.parents()[0]
217 217 if ctx.p1().node() == wcpar:
218 218 # edition ar "in place" we do not need to make any merge,
219 219 # just applies changes on parent for edition
220 220 cmdutil.revert(ui, repo, ctx, (wcpar, node.nullid), all=True)
221 221 stats = None
222 222 else:
223 223 try:
224 224 # ui.forcemerge is an internal variable, do not document
225 225 repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
226 226 'histedit')
227 227 stats = mergemod.update(repo, ctx.node(), True, True, False,
228 228 ctx.p1().node())
229 229 finally:
230 230 repo.ui.setconfig('ui', 'forcemerge', '', 'histedit')
231 231 repo.setparents(wcpar, node.nullid)
232 232 repo.dirstate.write()
233 233 # fix up dirstate for copies and renames
234 234 cmdutil.duplicatecopies(repo, ctx.rev(), ctx.p1().rev())
235 235 return stats
236 236
237 237 def collapse(repo, first, last, commitopts):
238 238 """collapse the set of revisions from first to last as new one.
239 239
240 240 Expected commit options are:
241 241 - message
242 242 - date
243 243 - username
244 244 Commit message is edited in all cases.
245 245
246 246 This function works in memory."""
247 247 ctxs = list(repo.set('%d::%d', first, last))
248 248 if not ctxs:
249 249 return None
250 250 base = first.parents()[0]
251 251
252 252 # commit a new version of the old changeset, including the update
253 253 # collect all files which might be affected
254 254 files = set()
255 255 for ctx in ctxs:
256 256 files.update(ctx.files())
257 257
258 258 # Recompute copies (avoid recording a -> b -> a)
259 259 copied = copies.pathcopies(base, last)
260 260
261 261 # prune files which were reverted by the updates
262 262 def samefile(f):
263 263 if f in last.manifest():
264 264 a = last.filectx(f)
265 265 if f in base.manifest():
266 266 b = base.filectx(f)
267 267 return (a.data() == b.data()
268 268 and a.flags() == b.flags())
269 269 else:
270 270 return False
271 271 else:
272 272 return f not in base.manifest()
273 273 files = [f for f in files if not samefile(f)]
274 274 # commit version of these files as defined by head
275 275 headmf = last.manifest()
276 276 def filectxfn(repo, ctx, path):
277 277 if path in headmf:
278 278 fctx = last[path]
279 279 flags = fctx.flags()
280 280 mctx = context.memfilectx(repo,
281 281 fctx.path(), fctx.data(),
282 282 islink='l' in flags,
283 283 isexec='x' in flags,
284 284 copied=copied.get(path))
285 285 return mctx
286 raise IOError()
286 return None
287 287
288 288 if commitopts.get('message'):
289 289 message = commitopts['message']
290 290 else:
291 291 message = first.description()
292 292 user = commitopts.get('user')
293 293 date = commitopts.get('date')
294 294 extra = commitopts.get('extra')
295 295
296 296 parents = (first.p1().node(), first.p2().node())
297 297 editor = None
298 298 if not commitopts.get('rollup'):
299 299 editor = cmdutil.getcommiteditor(edit=True, editform='histedit.fold')
300 300 new = context.memctx(repo,
301 301 parents=parents,
302 302 text=message,
303 303 files=files,
304 304 filectxfn=filectxfn,
305 305 user=user,
306 306 date=date,
307 307 extra=extra,
308 308 editor=editor)
309 309 return repo.commitctx(new)
310 310
311 311 def pick(ui, repo, ctx, ha, opts):
312 312 oldctx = repo[ha]
313 313 if oldctx.parents()[0] == ctx:
314 314 ui.debug('node %s unchanged\n' % ha)
315 315 return oldctx, []
316 316 hg.update(repo, ctx.node())
317 317 stats = applychanges(ui, repo, oldctx, opts)
318 318 if stats and stats[3] > 0:
319 319 raise error.InterventionRequired(_('Fix up the change and run '
320 320 'hg histedit --continue'))
321 321 # drop the second merge parent
322 322 commit = commitfuncfor(repo, oldctx)
323 323 n = commit(text=oldctx.description(), user=oldctx.user(),
324 324 date=oldctx.date(), extra=oldctx.extra())
325 325 if n is None:
326 326 ui.warn(_('%s: empty changeset\n')
327 327 % node.hex(ha))
328 328 return ctx, []
329 329 new = repo[n]
330 330 return new, [(oldctx.node(), (n,))]
331 331
332 332
333 333 def edit(ui, repo, ctx, ha, opts):
334 334 oldctx = repo[ha]
335 335 hg.update(repo, ctx.node())
336 336 applychanges(ui, repo, oldctx, opts)
337 337 raise error.InterventionRequired(
338 338 _('Make changes as needed, you may commit or record as needed now.\n'
339 339 'When you are finished, run hg histedit --continue to resume.'))
340 340
341 341 def rollup(ui, repo, ctx, ha, opts):
342 342 rollupopts = opts.copy()
343 343 rollupopts['rollup'] = True
344 344 return fold(ui, repo, ctx, ha, rollupopts)
345 345
346 346 def fold(ui, repo, ctx, ha, opts):
347 347 oldctx = repo[ha]
348 348 hg.update(repo, ctx.node())
349 349 stats = applychanges(ui, repo, oldctx, opts)
350 350 if stats and stats[3] > 0:
351 351 raise error.InterventionRequired(
352 352 _('Fix up the change and run hg histedit --continue'))
353 353 n = repo.commit(text='fold-temp-revision %s' % ha, user=oldctx.user(),
354 354 date=oldctx.date(), extra=oldctx.extra())
355 355 if n is None:
356 356 ui.warn(_('%s: empty changeset')
357 357 % node.hex(ha))
358 358 return ctx, []
359 359 return finishfold(ui, repo, ctx, oldctx, n, opts, [])
360 360
361 361 def finishfold(ui, repo, ctx, oldctx, newnode, opts, internalchanges):
362 362 parent = ctx.parents()[0].node()
363 363 hg.update(repo, parent)
364 364 ### prepare new commit data
365 365 commitopts = opts.copy()
366 366 commitopts['user'] = ctx.user()
367 367 # commit message
368 368 if opts.get('rollup'):
369 369 newmessage = ctx.description()
370 370 else:
371 371 newmessage = '\n***\n'.join(
372 372 [ctx.description()] +
373 373 [repo[r].description() for r in internalchanges] +
374 374 [oldctx.description()]) + '\n'
375 375 commitopts['message'] = newmessage
376 376 # date
377 377 commitopts['date'] = max(ctx.date(), oldctx.date())
378 378 extra = ctx.extra().copy()
379 379 # histedit_source
380 380 # note: ctx is likely a temporary commit but that the best we can do here
381 381 # This is sufficient to solve issue3681 anyway
382 382 extra['histedit_source'] = '%s,%s' % (ctx.hex(), oldctx.hex())
383 383 commitopts['extra'] = extra
384 384 phasebackup = repo.ui.backupconfig('phases', 'new-commit')
385 385 try:
386 386 phasemin = max(ctx.phase(), oldctx.phase())
387 387 repo.ui.setconfig('phases', 'new-commit', phasemin, 'histedit')
388 388 n = collapse(repo, ctx, repo[newnode], commitopts)
389 389 finally:
390 390 repo.ui.restoreconfig(phasebackup)
391 391 if n is None:
392 392 return ctx, []
393 393 hg.update(repo, n)
394 394 replacements = [(oldctx.node(), (newnode,)),
395 395 (ctx.node(), (n,)),
396 396 (newnode, (n,)),
397 397 ]
398 398 for ich in internalchanges:
399 399 replacements.append((ich, (n,)))
400 400 return repo[n], replacements
401 401
402 402 def drop(ui, repo, ctx, ha, opts):
403 403 return ctx, [(repo[ha].node(), ())]
404 404
405 405
406 406 def message(ui, repo, ctx, ha, opts):
407 407 oldctx = repo[ha]
408 408 hg.update(repo, ctx.node())
409 409 stats = applychanges(ui, repo, oldctx, opts)
410 410 if stats and stats[3] > 0:
411 411 raise error.InterventionRequired(
412 412 _('Fix up the change and run hg histedit --continue'))
413 413 message = oldctx.description()
414 414 commit = commitfuncfor(repo, oldctx)
415 415 editor = cmdutil.getcommiteditor(edit=True, editform='histedit.mess')
416 416 new = commit(text=message, user=oldctx.user(), date=oldctx.date(),
417 417 extra=oldctx.extra(),
418 418 editor=editor)
419 419 newctx = repo[new]
420 420 if oldctx.node() != newctx.node():
421 421 return newctx, [(oldctx.node(), (new,))]
422 422 # We didn't make an edit, so just indicate no replaced nodes
423 423 return newctx, []
424 424
425 425 def findoutgoing(ui, repo, remote=None, force=False, opts={}):
426 426 """utility function to find the first outgoing changeset
427 427
428 428 Used by initialisation code"""
429 429 dest = ui.expandpath(remote or 'default-push', remote or 'default')
430 430 dest, revs = hg.parseurl(dest, None)[:2]
431 431 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
432 432
433 433 revs, checkout = hg.addbranchrevs(repo, repo, revs, None)
434 434 other = hg.peer(repo, opts, dest)
435 435
436 436 if revs:
437 437 revs = [repo.lookup(rev) for rev in revs]
438 438
439 439 outgoing = discovery.findcommonoutgoing(repo, other, revs, force=force)
440 440 if not outgoing.missing:
441 441 raise util.Abort(_('no outgoing ancestors'))
442 442 roots = list(repo.revs("roots(%ln)", outgoing.missing))
443 443 if 1 < len(roots):
444 444 msg = _('there are ambiguous outgoing revisions')
445 445 hint = _('see "hg help histedit" for more detail')
446 446 raise util.Abort(msg, hint=hint)
447 447 return repo.lookup(roots[0])
448 448
449 449 actiontable = {'p': pick,
450 450 'pick': pick,
451 451 'e': edit,
452 452 'edit': edit,
453 453 'f': fold,
454 454 'fold': fold,
455 455 'r': rollup,
456 456 'roll': rollup,
457 457 'd': drop,
458 458 'drop': drop,
459 459 'm': message,
460 460 'mess': message,
461 461 }
462 462
463 463 @command('histedit',
464 464 [('', 'commands', '',
465 465 _('Read history edits from the specified file.')),
466 466 ('c', 'continue', False, _('continue an edit already in progress')),
467 467 ('k', 'keep', False,
468 468 _("don't strip old nodes after edit is complete")),
469 469 ('', 'abort', False, _('abort an edit in progress')),
470 470 ('o', 'outgoing', False, _('changesets not found in destination')),
471 471 ('f', 'force', False,
472 472 _('force outgoing even for unrelated repositories')),
473 473 ('r', 'rev', [], _('first revision to be edited'))],
474 474 _("ANCESTOR | --outgoing [URL]"))
475 475 def histedit(ui, repo, *freeargs, **opts):
476 476 """interactively edit changeset history
477 477
478 478 This command edits changesets between ANCESTOR and the parent of
479 479 the working directory.
480 480
481 481 With --outgoing, this edits changesets not found in the
482 482 destination repository. If URL of the destination is omitted, the
483 483 'default-push' (or 'default') path will be used.
484 484
485 485 For safety, this command is aborted, also if there are ambiguous
486 486 outgoing revisions which may confuse users: for example, there are
487 487 multiple branches containing outgoing revisions.
488 488
489 489 Use "min(outgoing() and ::.)" or similar revset specification
490 490 instead of --outgoing to specify edit target revision exactly in
491 491 such ambiguous situation. See :hg:`help revsets` for detail about
492 492 selecting revisions.
493 493
494 494 Returns 0 on success, 1 if user intervention is required (not only
495 495 for intentional "edit" command, but also for resolving unexpected
496 496 conflicts).
497 497 """
498 498 lock = wlock = None
499 499 try:
500 500 wlock = repo.wlock()
501 501 lock = repo.lock()
502 502 _histedit(ui, repo, *freeargs, **opts)
503 503 finally:
504 504 release(lock, wlock)
505 505
506 506 def _histedit(ui, repo, *freeargs, **opts):
507 507 # TODO only abort if we try and histedit mq patches, not just
508 508 # blanket if mq patches are applied somewhere
509 509 mq = getattr(repo, 'mq', None)
510 510 if mq and mq.applied:
511 511 raise util.Abort(_('source has mq patches applied'))
512 512
513 513 # basic argument incompatibility processing
514 514 outg = opts.get('outgoing')
515 515 cont = opts.get('continue')
516 516 abort = opts.get('abort')
517 517 force = opts.get('force')
518 518 rules = opts.get('commands', '')
519 519 revs = opts.get('rev', [])
520 520 goal = 'new' # This invocation goal, in new, continue, abort
521 521 if force and not outg:
522 522 raise util.Abort(_('--force only allowed with --outgoing'))
523 523 if cont:
524 524 if util.any((outg, abort, revs, freeargs, rules)):
525 525 raise util.Abort(_('no arguments allowed with --continue'))
526 526 goal = 'continue'
527 527 elif abort:
528 528 if util.any((outg, revs, freeargs, rules)):
529 529 raise util.Abort(_('no arguments allowed with --abort'))
530 530 goal = 'abort'
531 531 else:
532 532 if os.path.exists(os.path.join(repo.path, 'histedit-state')):
533 533 raise util.Abort(_('history edit already in progress, try '
534 534 '--continue or --abort'))
535 535 if outg:
536 536 if revs:
537 537 raise util.Abort(_('no revisions allowed with --outgoing'))
538 538 if len(freeargs) > 1:
539 539 raise util.Abort(
540 540 _('only one repo argument allowed with --outgoing'))
541 541 else:
542 542 revs.extend(freeargs)
543 543 if len(revs) != 1:
544 544 raise util.Abort(
545 545 _('histedit requires exactly one ancestor revision'))
546 546
547 547
548 548 if goal == 'continue':
549 549 (parentctxnode, rules, keep, topmost, replacements) = readstate(repo)
550 550 parentctx = repo[parentctxnode]
551 551 parentctx, repl = bootstrapcontinue(ui, repo, parentctx, rules, opts)
552 552 replacements.extend(repl)
553 553 elif goal == 'abort':
554 554 (parentctxnode, rules, keep, topmost, replacements) = readstate(repo)
555 555 mapping, tmpnodes, leafs, _ntm = processreplacement(repo, replacements)
556 556 ui.debug('restore wc to old parent %s\n' % node.short(topmost))
557 557 # check whether we should update away
558 558 parentnodes = [c.node() for c in repo[None].parents()]
559 559 for n in leafs | set([parentctxnode]):
560 560 if n in parentnodes:
561 561 hg.clean(repo, topmost)
562 562 break
563 563 else:
564 564 pass
565 565 cleanupnode(ui, repo, 'created', tmpnodes)
566 566 cleanupnode(ui, repo, 'temp', leafs)
567 567 os.unlink(os.path.join(repo.path, 'histedit-state'))
568 568 return
569 569 else:
570 570 cmdutil.checkunfinished(repo)
571 571 cmdutil.bailifchanged(repo)
572 572
573 573 topmost, empty = repo.dirstate.parents()
574 574 if outg:
575 575 if freeargs:
576 576 remote = freeargs[0]
577 577 else:
578 578 remote = None
579 579 root = findoutgoing(ui, repo, remote, force, opts)
580 580 else:
581 581 rr = list(repo.set('roots(%ld)', scmutil.revrange(repo, revs)))
582 582 if len(rr) != 1:
583 583 raise util.Abort(_('The specified revisions must have '
584 584 'exactly one common root'))
585 585 root = rr[0].node()
586 586
587 587 keep = opts.get('keep', False)
588 588 revs = between(repo, root, topmost, keep)
589 589 if not revs:
590 590 raise util.Abort(_('%s is not an ancestor of working directory') %
591 591 node.short(root))
592 592
593 593 ctxs = [repo[r] for r in revs]
594 594 if not rules:
595 595 rules = '\n'.join([makedesc(c) for c in ctxs])
596 596 rules += '\n\n'
597 597 rules += editcomment % (node.short(root), node.short(topmost))
598 598 rules = ui.edit(rules, ui.username())
599 599 # Save edit rules in .hg/histedit-last-edit.txt in case
600 600 # the user needs to ask for help after something
601 601 # surprising happens.
602 602 f = open(repo.join('histedit-last-edit.txt'), 'w')
603 603 f.write(rules)
604 604 f.close()
605 605 else:
606 606 if rules == '-':
607 607 f = sys.stdin
608 608 else:
609 609 f = open(rules)
610 610 rules = f.read()
611 611 f.close()
612 612 rules = [l for l in (r.strip() for r in rules.splitlines())
613 613 if l and not l.startswith('#')]
614 614 rules = verifyrules(rules, repo, ctxs)
615 615
616 616 parentctx = repo[root].parents()[0]
617 617 replacements = []
618 618
619 619
620 620 while rules:
621 621 writestate(repo, parentctx.node(), rules, keep, topmost, replacements)
622 622 action, ha = rules.pop(0)
623 623 ui.debug('histedit: processing %s %s\n' % (action, ha))
624 624 actfunc = actiontable[action]
625 625 parentctx, replacement_ = actfunc(ui, repo, parentctx, ha, opts)
626 626 replacements.extend(replacement_)
627 627
628 628 hg.update(repo, parentctx.node())
629 629
630 630 mapping, tmpnodes, created, ntm = processreplacement(repo, replacements)
631 631 if mapping:
632 632 for prec, succs in mapping.iteritems():
633 633 if not succs:
634 634 ui.debug('histedit: %s is dropped\n' % node.short(prec))
635 635 else:
636 636 ui.debug('histedit: %s is replaced by %s\n' % (
637 637 node.short(prec), node.short(succs[0])))
638 638 if len(succs) > 1:
639 639 m = 'histedit: %s'
640 640 for n in succs[1:]:
641 641 ui.debug(m % node.short(n))
642 642
643 643 if not keep:
644 644 if mapping:
645 645 movebookmarks(ui, repo, mapping, topmost, ntm)
646 646 # TODO update mq state
647 647 if obsolete._enabled:
648 648 markers = []
649 649 # sort by revision number because it sound "right"
650 650 for prec in sorted(mapping, key=repo.changelog.rev):
651 651 succs = mapping[prec]
652 652 markers.append((repo[prec],
653 653 tuple(repo[s] for s in succs)))
654 654 if markers:
655 655 obsolete.createmarkers(repo, markers)
656 656 else:
657 657 cleanupnode(ui, repo, 'replaced', mapping)
658 658
659 659 cleanupnode(ui, repo, 'temp', tmpnodes)
660 660 os.unlink(os.path.join(repo.path, 'histedit-state'))
661 661 if os.path.exists(repo.sjoin('undo')):
662 662 os.unlink(repo.sjoin('undo'))
663 663
664 664 def gatherchildren(repo, ctx):
665 665 # is there any new commit between the expected parent and "."
666 666 #
667 667 # note: does not take non linear new change in account (but previous
668 668 # implementation didn't used them anyway (issue3655)
669 669 newchildren = [c.node() for c in repo.set('(%d::.)', ctx)]
670 670 if ctx.node() != node.nullid:
671 671 if not newchildren:
672 672 # `ctx` should match but no result. This means that
673 673 # currentnode is not a descendant from ctx.
674 674 msg = _('%s is not an ancestor of working directory')
675 675 hint = _('use "histedit --abort" to clear broken state')
676 676 raise util.Abort(msg % ctx, hint=hint)
677 677 newchildren.pop(0) # remove ctx
678 678 return newchildren
679 679
680 680 def bootstrapcontinue(ui, repo, parentctx, rules, opts):
681 681 action, currentnode = rules.pop(0)
682 682 ctx = repo[currentnode]
683 683
684 684 newchildren = gatherchildren(repo, parentctx)
685 685
686 686 # Commit dirty working directory if necessary
687 687 new = None
688 688 m, a, r, d = repo.status()[:4]
689 689 if m or a or r or d:
690 690 # prepare the message for the commit to comes
691 691 if action in ('f', 'fold', 'r', 'roll'):
692 692 message = 'fold-temp-revision %s' % currentnode
693 693 else:
694 694 message = ctx.description()
695 695 editopt = action in ('e', 'edit', 'm', 'mess')
696 696 canonaction = {'e': 'edit', 'm': 'mess', 'p': 'pick'}
697 697 editform = 'histedit.%s' % canonaction.get(action, action)
698 698 editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
699 699 commit = commitfuncfor(repo, ctx)
700 700 new = commit(text=message, user=ctx.user(),
701 701 date=ctx.date(), extra=ctx.extra(),
702 702 editor=editor)
703 703 if new is not None:
704 704 newchildren.append(new)
705 705
706 706 replacements = []
707 707 # track replacements
708 708 if ctx.node() not in newchildren:
709 709 # note: new children may be empty when the changeset is dropped.
710 710 # this happen e.g during conflicting pick where we revert content
711 711 # to parent.
712 712 replacements.append((ctx.node(), tuple(newchildren)))
713 713
714 714 if action in ('f', 'fold', 'r', 'roll'):
715 715 if newchildren:
716 716 # finalize fold operation if applicable
717 717 if new is None:
718 718 new = newchildren[-1]
719 719 else:
720 720 newchildren.pop() # remove new from internal changes
721 721 foldopts = opts
722 722 if action in ('r', 'roll'):
723 723 foldopts = foldopts.copy()
724 724 foldopts['rollup'] = True
725 725 parentctx, repl = finishfold(ui, repo, parentctx, ctx, new,
726 726 foldopts, newchildren)
727 727 replacements.extend(repl)
728 728 else:
729 729 # newchildren is empty if the fold did not result in any commit
730 730 # this happen when all folded change are discarded during the
731 731 # merge.
732 732 replacements.append((ctx.node(), (parentctx.node(),)))
733 733 elif newchildren:
734 734 # otherwise update "parentctx" before proceeding to further operation
735 735 parentctx = repo[newchildren[-1]]
736 736 return parentctx, replacements
737 737
738 738
739 739 def between(repo, old, new, keep):
740 740 """select and validate the set of revision to edit
741 741
742 742 When keep is false, the specified set can't have children."""
743 743 ctxs = list(repo.set('%n::%n', old, new))
744 744 if ctxs and not keep:
745 745 if (not obsolete._enabled and
746 746 repo.revs('(%ld::) - (%ld)', ctxs, ctxs)):
747 747 raise util.Abort(_('cannot edit history that would orphan nodes'))
748 748 if repo.revs('(%ld) and merge()', ctxs):
749 749 raise util.Abort(_('cannot edit history that contains merges'))
750 750 root = ctxs[0] # list is already sorted by repo.set
751 751 if not root.phase():
752 752 raise util.Abort(_('cannot edit immutable changeset: %s') % root)
753 753 return [c.node() for c in ctxs]
754 754
755 755
756 756 def writestate(repo, parentnode, rules, keep, topmost, replacements):
757 757 fp = open(os.path.join(repo.path, 'histedit-state'), 'w')
758 758 pickle.dump((parentnode, rules, keep, topmost, replacements), fp)
759 759 fp.close()
760 760
761 761 def readstate(repo):
762 762 """Returns a tuple of (parentnode, rules, keep, topmost, replacements).
763 763 """
764 764 fp = open(os.path.join(repo.path, 'histedit-state'))
765 765 return pickle.load(fp)
766 766
767 767
768 768 def makedesc(c):
769 769 """build a initial action line for a ctx `c`
770 770
771 771 line are in the form:
772 772
773 773 pick <hash> <rev> <summary>
774 774 """
775 775 summary = ''
776 776 if c.description():
777 777 summary = c.description().splitlines()[0]
778 778 line = 'pick %s %d %s' % (c, c.rev(), summary)
779 779 # trim to 80 columns so it's not stupidly wide in my editor
780 780 return util.ellipsis(line, 80)
781 781
782 782 def verifyrules(rules, repo, ctxs):
783 783 """Verify that there exists exactly one edit rule per given changeset.
784 784
785 785 Will abort if there are to many or too few rules, a malformed rule,
786 786 or a rule on a changeset outside of the user-given range.
787 787 """
788 788 parsed = []
789 789 expected = set(str(c) for c in ctxs)
790 790 seen = set()
791 791 for r in rules:
792 792 if ' ' not in r:
793 793 raise util.Abort(_('malformed line "%s"') % r)
794 794 action, rest = r.split(' ', 1)
795 795 ha = rest.strip().split(' ', 1)[0]
796 796 try:
797 797 ha = str(repo[ha]) # ensure its a short hash
798 798 except error.RepoError:
799 799 raise util.Abort(_('unknown changeset %s listed') % ha)
800 800 if ha not in expected:
801 801 raise util.Abort(
802 802 _('may not use changesets other than the ones listed'))
803 803 if ha in seen:
804 804 raise util.Abort(_('duplicated command for changeset %s') % ha)
805 805 seen.add(ha)
806 806 if action not in actiontable:
807 807 raise util.Abort(_('unknown action "%s"') % action)
808 808 parsed.append([action, ha])
809 809 missing = sorted(expected - seen) # sort to stabilize output
810 810 if missing:
811 811 raise util.Abort(_('missing rules for changeset %s') % missing[0],
812 812 hint=_('do you want to use the drop action?'))
813 813 return parsed
814 814
815 815 def processreplacement(repo, replacements):
816 816 """process the list of replacements to return
817 817
818 818 1) the final mapping between original and created nodes
819 819 2) the list of temporary node created by histedit
820 820 3) the list of new commit created by histedit"""
821 821 allsuccs = set()
822 822 replaced = set()
823 823 fullmapping = {}
824 824 # initialise basic set
825 825 # fullmapping record all operation recorded in replacement
826 826 for rep in replacements:
827 827 allsuccs.update(rep[1])
828 828 replaced.add(rep[0])
829 829 fullmapping.setdefault(rep[0], set()).update(rep[1])
830 830 new = allsuccs - replaced
831 831 tmpnodes = allsuccs & replaced
832 832 # Reduce content fullmapping into direct relation between original nodes
833 833 # and final node created during history edition
834 834 # Dropped changeset are replaced by an empty list
835 835 toproceed = set(fullmapping)
836 836 final = {}
837 837 while toproceed:
838 838 for x in list(toproceed):
839 839 succs = fullmapping[x]
840 840 for s in list(succs):
841 841 if s in toproceed:
842 842 # non final node with unknown closure
843 843 # We can't process this now
844 844 break
845 845 elif s in final:
846 846 # non final node, replace with closure
847 847 succs.remove(s)
848 848 succs.update(final[s])
849 849 else:
850 850 final[x] = succs
851 851 toproceed.remove(x)
852 852 # remove tmpnodes from final mapping
853 853 for n in tmpnodes:
854 854 del final[n]
855 855 # we expect all changes involved in final to exist in the repo
856 856 # turn `final` into list (topologically sorted)
857 857 nm = repo.changelog.nodemap
858 858 for prec, succs in final.items():
859 859 final[prec] = sorted(succs, key=nm.get)
860 860
861 861 # computed topmost element (necessary for bookmark)
862 862 if new:
863 863 newtopmost = sorted(new, key=repo.changelog.rev)[-1]
864 864 elif not final:
865 865 # Nothing rewritten at all. we won't need `newtopmost`
866 866 # It is the same as `oldtopmost` and `processreplacement` know it
867 867 newtopmost = None
868 868 else:
869 869 # every body died. The newtopmost is the parent of the root.
870 870 newtopmost = repo[sorted(final, key=repo.changelog.rev)[0]].p1().node()
871 871
872 872 return final, tmpnodes, new, newtopmost
873 873
874 874 def movebookmarks(ui, repo, mapping, oldtopmost, newtopmost):
875 875 """Move bookmark from old to newly created node"""
876 876 if not mapping:
877 877 # if nothing got rewritten there is not purpose for this function
878 878 return
879 879 moves = []
880 880 for bk, old in sorted(repo._bookmarks.iteritems()):
881 881 if old == oldtopmost:
882 882 # special case ensure bookmark stay on tip.
883 883 #
884 884 # This is arguably a feature and we may only want that for the
885 885 # active bookmark. But the behavior is kept compatible with the old
886 886 # version for now.
887 887 moves.append((bk, newtopmost))
888 888 continue
889 889 base = old
890 890 new = mapping.get(base, None)
891 891 if new is None:
892 892 continue
893 893 while not new:
894 894 # base is killed, trying with parent
895 895 base = repo[base].p1().node()
896 896 new = mapping.get(base, (base,))
897 897 # nothing to move
898 898 moves.append((bk, new[-1]))
899 899 if moves:
900 900 marks = repo._bookmarks
901 901 for mark, new in moves:
902 902 old = marks[mark]
903 903 ui.note(_('histedit: moving bookmarks %s from %s to %s\n')
904 904 % (mark, node.short(old), node.short(new)))
905 905 marks[mark] = new
906 906 marks.write()
907 907
908 908 def cleanupnode(ui, repo, name, nodes):
909 909 """strip a group of nodes from the repository
910 910
911 911 The set of node to strip may contains unknown nodes."""
912 912 ui.debug('should strip %s nodes %s\n' %
913 913 (name, ', '.join([node.short(n) for n in nodes])))
914 914 lock = None
915 915 try:
916 916 lock = repo.lock()
917 917 # Find all node that need to be stripped
918 918 # (we hg %lr instead of %ln to silently ignore unknown item
919 919 nm = repo.changelog.nodemap
920 920 nodes = [n for n in nodes if n in nm]
921 921 roots = [c.node() for c in repo.set("roots(%ln)", nodes)]
922 922 for c in roots:
923 923 # We should process node in reverse order to strip tip most first.
924 924 # but this trigger a bug in changegroup hook.
925 925 # This would reduce bundle overhead
926 926 repair.strip(ui, repo, c)
927 927 finally:
928 928 release(lock)
929 929
930 930 def summaryhook(ui, repo):
931 931 if not os.path.exists(repo.join('histedit-state')):
932 932 return
933 933 (parentctxnode, rules, keep, topmost, replacements) = readstate(repo)
934 934 if rules:
935 935 # i18n: column positioning for "hg summary"
936 936 ui.write(_('hist: %s (histedit --continue)\n') %
937 937 (ui.label(_('%d remaining'), 'histedit.remaining') %
938 938 len(rules)))
939 939
940 940 def extsetup(ui):
941 941 cmdutil.summaryhooks.add('histedit', summaryhook)
942 942 cmdutil.unfinishedstates.append(
943 943 ['histedit-state', False, True, _('histedit in progress'),
944 944 _("use 'hg histedit --continue' or 'hg histedit --abort'")])
@@ -1,573 +1,573 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''High-level command function for lfconvert, plus the cmdtable.'''
10 10
11 11 import os, errno
12 12 import shutil
13 13
14 14 from mercurial import util, match as match_, hg, node, context, error, \
15 15 cmdutil, scmutil, commands
16 16 from mercurial.i18n import _
17 17 from mercurial.lock import release
18 18
19 19 import lfutil
20 20 import basestore
21 21
22 22 # -- Commands ----------------------------------------------------------
23 23
24 24 cmdtable = {}
25 25 command = cmdutil.command(cmdtable)
26 26
27 27 @command('lfconvert',
28 28 [('s', 'size', '',
29 29 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
30 30 ('', 'to-normal', False,
31 31 _('convert from a largefiles repo to a normal repo')),
32 32 ],
33 33 _('hg lfconvert SOURCE DEST [FILE ...]'),
34 34 norepo=True,
35 35 inferrepo=True)
36 36 def lfconvert(ui, src, dest, *pats, **opts):
37 37 '''convert a normal repository to a largefiles repository
38 38
39 39 Convert repository SOURCE to a new repository DEST, identical to
40 40 SOURCE except that certain files will be converted as largefiles:
41 41 specifically, any file that matches any PATTERN *or* whose size is
42 42 above the minimum size threshold is converted as a largefile. The
43 43 size used to determine whether or not to track a file as a
44 44 largefile is the size of the first version of the file. The
45 45 minimum size can be specified either with --size or in
46 46 configuration as ``largefiles.size``.
47 47
48 48 After running this command you will need to make sure that
49 49 largefiles is enabled anywhere you intend to push the new
50 50 repository.
51 51
52 52 Use --to-normal to convert largefiles back to normal files; after
53 53 this, the DEST repository can be used without largefiles at all.'''
54 54
55 55 if opts['to_normal']:
56 56 tolfile = False
57 57 else:
58 58 tolfile = True
59 59 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
60 60
61 61 if not hg.islocal(src):
62 62 raise util.Abort(_('%s is not a local Mercurial repo') % src)
63 63 if not hg.islocal(dest):
64 64 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
65 65
66 66 rsrc = hg.repository(ui, src)
67 67 ui.status(_('initializing destination %s\n') % dest)
68 68 rdst = hg.repository(ui, dest, create=True)
69 69
70 70 success = False
71 71 dstwlock = dstlock = None
72 72 try:
73 73 # Lock destination to prevent modification while it is converted to.
74 74 # Don't need to lock src because we are just reading from its history
75 75 # which can't change.
76 76 dstwlock = rdst.wlock()
77 77 dstlock = rdst.lock()
78 78
79 79 # Get a list of all changesets in the source. The easy way to do this
80 80 # is to simply walk the changelog, using changelog.nodesbetween().
81 81 # Take a look at mercurial/revlog.py:639 for more details.
82 82 # Use a generator instead of a list to decrease memory usage
83 83 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
84 84 rsrc.heads())[0])
85 85 revmap = {node.nullid: node.nullid}
86 86 if tolfile:
87 87 lfiles = set()
88 88 normalfiles = set()
89 89 if not pats:
90 90 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
91 91 if pats:
92 92 matcher = match_.match(rsrc.root, '', list(pats))
93 93 else:
94 94 matcher = None
95 95
96 96 lfiletohash = {}
97 97 for ctx in ctxs:
98 98 ui.progress(_('converting revisions'), ctx.rev(),
99 99 unit=_('revision'), total=rsrc['tip'].rev())
100 100 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
101 101 lfiles, normalfiles, matcher, size, lfiletohash)
102 102 ui.progress(_('converting revisions'), None)
103 103
104 104 if os.path.exists(rdst.wjoin(lfutil.shortname)):
105 105 shutil.rmtree(rdst.wjoin(lfutil.shortname))
106 106
107 107 for f in lfiletohash.keys():
108 108 if os.path.isfile(rdst.wjoin(f)):
109 109 os.unlink(rdst.wjoin(f))
110 110 try:
111 111 os.removedirs(os.path.dirname(rdst.wjoin(f)))
112 112 except OSError:
113 113 pass
114 114
115 115 # If there were any files converted to largefiles, add largefiles
116 116 # to the destination repository's requirements.
117 117 if lfiles:
118 118 rdst.requirements.add('largefiles')
119 119 rdst._writerequirements()
120 120 else:
121 121 for ctx in ctxs:
122 122 ui.progress(_('converting revisions'), ctx.rev(),
123 123 unit=_('revision'), total=rsrc['tip'].rev())
124 124 _addchangeset(ui, rsrc, rdst, ctx, revmap)
125 125
126 126 ui.progress(_('converting revisions'), None)
127 127 success = True
128 128 finally:
129 129 rdst.dirstate.clear()
130 130 release(dstlock, dstwlock)
131 131 if not success:
132 132 # we failed, remove the new directory
133 133 shutil.rmtree(rdst.root)
134 134
135 135 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
136 136 # Convert src parents to dst parents
137 137 parents = _convertparents(ctx, revmap)
138 138
139 139 # Generate list of changed files
140 140 files = _getchangedfiles(ctx, parents)
141 141
142 142 def getfilectx(repo, memctx, f):
143 143 if lfutil.standin(f) in files:
144 144 # if the file isn't in the manifest then it was removed
145 145 # or renamed, raise IOError to indicate this
146 146 try:
147 147 fctx = ctx.filectx(lfutil.standin(f))
148 148 except error.LookupError:
149 raise IOError
149 return None
150 150 renamed = fctx.renamed()
151 151 if renamed:
152 152 renamed = lfutil.splitstandin(renamed[0])
153 153
154 154 hash = fctx.data().strip()
155 155 path = lfutil.findfile(rsrc, hash)
156 156
157 157 # If one file is missing, likely all files from this rev are
158 158 if path is None:
159 159 cachelfiles(ui, rsrc, ctx.node())
160 160 path = lfutil.findfile(rsrc, hash)
161 161
162 162 if path is None:
163 163 raise util.Abort(
164 164 _("missing largefile \'%s\' from revision %s")
165 165 % (f, node.hex(ctx.node())))
166 166
167 167 data = ''
168 168 fd = None
169 169 try:
170 170 fd = open(path, 'rb')
171 171 data = fd.read()
172 172 finally:
173 173 if fd:
174 174 fd.close()
175 175 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
176 176 'x' in fctx.flags(), renamed)
177 177 else:
178 178 return _getnormalcontext(repo, ctx, f, revmap)
179 179
180 180 dstfiles = []
181 181 for file in files:
182 182 if lfutil.isstandin(file):
183 183 dstfiles.append(lfutil.splitstandin(file))
184 184 else:
185 185 dstfiles.append(file)
186 186 # Commit
187 187 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
188 188
189 189 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
190 190 matcher, size, lfiletohash):
191 191 # Convert src parents to dst parents
192 192 parents = _convertparents(ctx, revmap)
193 193
194 194 # Generate list of changed files
195 195 files = _getchangedfiles(ctx, parents)
196 196
197 197 dstfiles = []
198 198 for f in files:
199 199 if f not in lfiles and f not in normalfiles:
200 200 islfile = _islfile(f, ctx, matcher, size)
201 201 # If this file was renamed or copied then copy
202 202 # the largefile-ness of its predecessor
203 203 if f in ctx.manifest():
204 204 fctx = ctx.filectx(f)
205 205 renamed = fctx.renamed()
206 206 renamedlfile = renamed and renamed[0] in lfiles
207 207 islfile |= renamedlfile
208 208 if 'l' in fctx.flags():
209 209 if renamedlfile:
210 210 raise util.Abort(
211 211 _('renamed/copied largefile %s becomes symlink')
212 212 % f)
213 213 islfile = False
214 214 if islfile:
215 215 lfiles.add(f)
216 216 else:
217 217 normalfiles.add(f)
218 218
219 219 if f in lfiles:
220 220 dstfiles.append(lfutil.standin(f))
221 221 # largefile in manifest if it has not been removed/renamed
222 222 if f in ctx.manifest():
223 223 fctx = ctx.filectx(f)
224 224 if 'l' in fctx.flags():
225 225 renamed = fctx.renamed()
226 226 if renamed and renamed[0] in lfiles:
227 227 raise util.Abort(_('largefile %s becomes symlink') % f)
228 228
229 229 # largefile was modified, update standins
230 230 m = util.sha1('')
231 231 m.update(ctx[f].data())
232 232 hash = m.hexdigest()
233 233 if f not in lfiletohash or lfiletohash[f] != hash:
234 234 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
235 235 executable = 'x' in ctx[f].flags()
236 236 lfutil.writestandin(rdst, lfutil.standin(f), hash,
237 237 executable)
238 238 lfiletohash[f] = hash
239 239 else:
240 240 # normal file
241 241 dstfiles.append(f)
242 242
243 243 def getfilectx(repo, memctx, f):
244 244 if lfutil.isstandin(f):
245 245 # if the file isn't in the manifest then it was removed
246 246 # or renamed, raise IOError to indicate this
247 247 srcfname = lfutil.splitstandin(f)
248 248 try:
249 249 fctx = ctx.filectx(srcfname)
250 250 except error.LookupError:
251 raise IOError
251 return None
252 252 renamed = fctx.renamed()
253 253 if renamed:
254 254 # standin is always a largefile because largefile-ness
255 255 # doesn't change after rename or copy
256 256 renamed = lfutil.standin(renamed[0])
257 257
258 258 return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n',
259 259 'l' in fctx.flags(), 'x' in fctx.flags(),
260 260 renamed)
261 261 else:
262 262 return _getnormalcontext(repo, ctx, f, revmap)
263 263
264 264 # Commit
265 265 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
266 266
267 267 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
268 268 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
269 269 getfilectx, ctx.user(), ctx.date(), ctx.extra())
270 270 ret = rdst.commitctx(mctx)
271 271 rdst.setparents(ret)
272 272 revmap[ctx.node()] = rdst.changelog.tip()
273 273
274 274 # Generate list of changed files
275 275 def _getchangedfiles(ctx, parents):
276 276 files = set(ctx.files())
277 277 if node.nullid not in parents:
278 278 mc = ctx.manifest()
279 279 mp1 = ctx.parents()[0].manifest()
280 280 mp2 = ctx.parents()[1].manifest()
281 281 files |= (set(mp1) | set(mp2)) - set(mc)
282 282 for f in mc:
283 283 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
284 284 files.add(f)
285 285 return files
286 286
287 287 # Convert src parents to dst parents
288 288 def _convertparents(ctx, revmap):
289 289 parents = []
290 290 for p in ctx.parents():
291 291 parents.append(revmap[p.node()])
292 292 while len(parents) < 2:
293 293 parents.append(node.nullid)
294 294 return parents
295 295
296 296 # Get memfilectx for a normal file
297 297 def _getnormalcontext(repo, ctx, f, revmap):
298 298 try:
299 299 fctx = ctx.filectx(f)
300 300 except error.LookupError:
301 raise IOError
301 return None
302 302 renamed = fctx.renamed()
303 303 if renamed:
304 304 renamed = renamed[0]
305 305
306 306 data = fctx.data()
307 307 if f == '.hgtags':
308 308 data = _converttags (repo.ui, revmap, data)
309 309 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
310 310 'x' in fctx.flags(), renamed)
311 311
312 312 # Remap tag data using a revision map
313 313 def _converttags(ui, revmap, data):
314 314 newdata = []
315 315 for line in data.splitlines():
316 316 try:
317 317 id, name = line.split(' ', 1)
318 318 except ValueError:
319 319 ui.warn(_('skipping incorrectly formatted tag %s\n')
320 320 % line)
321 321 continue
322 322 try:
323 323 newid = node.bin(id)
324 324 except TypeError:
325 325 ui.warn(_('skipping incorrectly formatted id %s\n')
326 326 % id)
327 327 continue
328 328 try:
329 329 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
330 330 name))
331 331 except KeyError:
332 332 ui.warn(_('no mapping for id %s\n') % id)
333 333 continue
334 334 return ''.join(newdata)
335 335
336 336 def _islfile(file, ctx, matcher, size):
337 337 '''Return true if file should be considered a largefile, i.e.
338 338 matcher matches it or it is larger than size.'''
339 339 # never store special .hg* files as largefiles
340 340 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
341 341 return False
342 342 if matcher and matcher(file):
343 343 return True
344 344 try:
345 345 return ctx.filectx(file).size() >= size * 1024 * 1024
346 346 except error.LookupError:
347 347 return False
348 348
349 349 def uploadlfiles(ui, rsrc, rdst, files):
350 350 '''upload largefiles to the central store'''
351 351
352 352 if not files:
353 353 return
354 354
355 355 store = basestore._openstore(rsrc, rdst, put=True)
356 356
357 357 at = 0
358 358 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
359 359 retval = store.exists(files)
360 360 files = filter(lambda h: not retval[h], files)
361 361 ui.debug("%d largefiles need to be uploaded\n" % len(files))
362 362
363 363 for hash in files:
364 364 ui.progress(_('uploading largefiles'), at, unit='largefile',
365 365 total=len(files))
366 366 source = lfutil.findfile(rsrc, hash)
367 367 if not source:
368 368 raise util.Abort(_('largefile %s missing from store'
369 369 ' (needs to be uploaded)') % hash)
370 370 # XXX check for errors here
371 371 store.put(source, hash)
372 372 at += 1
373 373 ui.progress(_('uploading largefiles'), None)
374 374
375 375 def verifylfiles(ui, repo, all=False, contents=False):
376 376 '''Verify that every largefile revision in the current changeset
377 377 exists in the central store. With --contents, also verify that
378 378 the contents of each local largefile file revision are correct (SHA-1 hash
379 379 matches the revision ID). With --all, check every changeset in
380 380 this repository.'''
381 381 if all:
382 382 # Pass a list to the function rather than an iterator because we know a
383 383 # list will work.
384 384 revs = range(len(repo))
385 385 else:
386 386 revs = ['.']
387 387
388 388 store = basestore._openstore(repo)
389 389 return store.verify(revs, contents=contents)
390 390
391 391 def cachelfiles(ui, repo, node, filelist=None):
392 392 '''cachelfiles ensures that all largefiles needed by the specified revision
393 393 are present in the repository's largefile cache.
394 394
395 395 returns a tuple (cached, missing). cached is the list of files downloaded
396 396 by this operation; missing is the list of files that were needed but could
397 397 not be found.'''
398 398 lfiles = lfutil.listlfiles(repo, node)
399 399 if filelist:
400 400 lfiles = set(lfiles) & set(filelist)
401 401 toget = []
402 402
403 403 for lfile in lfiles:
404 404 try:
405 405 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
406 406 except IOError, err:
407 407 if err.errno == errno.ENOENT:
408 408 continue # node must be None and standin wasn't found in wctx
409 409 raise
410 410 if not lfutil.findfile(repo, expectedhash):
411 411 toget.append((lfile, expectedhash))
412 412
413 413 if toget:
414 414 store = basestore._openstore(repo)
415 415 ret = store.get(toget)
416 416 return ret
417 417
418 418 return ([], [])
419 419
420 420 def downloadlfiles(ui, repo, rev=None):
421 421 matchfn = scmutil.match(repo[None],
422 422 [repo.wjoin(lfutil.shortname)], {})
423 423 def prepare(ctx, fns):
424 424 pass
425 425 totalsuccess = 0
426 426 totalmissing = 0
427 427 if rev != []: # walkchangerevs on empty list would return all revs
428 428 for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
429 429 prepare):
430 430 success, missing = cachelfiles(ui, repo, ctx.node())
431 431 totalsuccess += len(success)
432 432 totalmissing += len(missing)
433 433 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
434 434 if totalmissing > 0:
435 435 ui.status(_("%d largefiles failed to download\n") % totalmissing)
436 436 return totalsuccess, totalmissing
437 437
438 438 def updatelfiles(ui, repo, filelist=None, printmessage=True,
439 439 normallookup=False):
440 440 wlock = repo.wlock()
441 441 try:
442 442 lfdirstate = lfutil.openlfdirstate(ui, repo)
443 443 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
444 444
445 445 if filelist is not None:
446 446 filelist = set(filelist)
447 447 lfiles = [f for f in lfiles if f in filelist]
448 448
449 449 update = {}
450 450 updated, removed = 0, 0
451 451 for lfile in lfiles:
452 452 abslfile = repo.wjoin(lfile)
453 453 absstandin = repo.wjoin(lfutil.standin(lfile))
454 454 if os.path.exists(absstandin):
455 455 if (os.path.exists(absstandin + '.orig') and
456 456 os.path.exists(abslfile)):
457 457 shutil.copyfile(abslfile, abslfile + '.orig')
458 458 util.unlinkpath(absstandin + '.orig')
459 459 expecthash = lfutil.readstandin(repo, lfile)
460 460 if (expecthash != '' and
461 461 (not os.path.exists(abslfile) or
462 462 expecthash != lfutil.hashfile(abslfile))):
463 463 if lfile not in repo[None]: # not switched to normal file
464 464 util.unlinkpath(abslfile, ignoremissing=True)
465 465 # use normallookup() to allocate entry in largefiles
466 466 # dirstate, because lack of it misleads
467 467 # lfilesrepo.status() into recognition that such cache
468 468 # missing files are REMOVED.
469 469 lfdirstate.normallookup(lfile)
470 470 update[lfile] = expecthash
471 471 else:
472 472 # Remove lfiles for which the standin is deleted, unless the
473 473 # lfile is added to the repository again. This happens when a
474 474 # largefile is converted back to a normal file: the standin
475 475 # disappears, but a new (normal) file appears as the lfile.
476 476 if (os.path.exists(abslfile) and
477 477 repo.dirstate.normalize(lfile) not in repo[None]):
478 478 util.unlinkpath(abslfile)
479 479 removed += 1
480 480
481 481 # largefile processing might be slow and be interrupted - be prepared
482 482 lfdirstate.write()
483 483
484 484 if lfiles:
485 485 if printmessage:
486 486 ui.status(_('getting changed largefiles\n'))
487 487 cachelfiles(ui, repo, None, lfiles)
488 488
489 489 for lfile in lfiles:
490 490 update1 = 0
491 491
492 492 expecthash = update.get(lfile)
493 493 if expecthash:
494 494 if not lfutil.copyfromcache(repo, expecthash, lfile):
495 495 # failed ... but already removed and set to normallookup
496 496 continue
497 497 # Synchronize largefile dirstate to the last modified
498 498 # time of the file
499 499 lfdirstate.normal(lfile)
500 500 update1 = 1
501 501
502 502 # copy the state of largefile standin from the repository's
503 503 # dirstate to its state in the lfdirstate.
504 504 abslfile = repo.wjoin(lfile)
505 505 absstandin = repo.wjoin(lfutil.standin(lfile))
506 506 if os.path.exists(absstandin):
507 507 mode = os.stat(absstandin).st_mode
508 508 if mode != os.stat(abslfile).st_mode:
509 509 os.chmod(abslfile, mode)
510 510 update1 = 1
511 511
512 512 updated += update1
513 513
514 514 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
515 515
516 516 if filelist is not None:
517 517 # If "local largefile" is chosen at file merging, it is
518 518 # not listed in "filelist" (= dirstate syncing is
519 519 # omitted), because the standin file is not changed before and
520 520 # after merging.
521 521 # But the status of such files may have to be changed by
522 522 # merging. For example, locally modified ("M") largefile
523 523 # has to become re-added("A"), if it is "normal" file in
524 524 # the target revision of linear-merging.
525 525 for lfile in lfdirstate:
526 526 if lfile not in filelist:
527 527 lfutil.synclfdirstate(repo, lfdirstate, lfile, True)
528 528
529 529 lfdirstate.write()
530 530 if printmessage and lfiles:
531 531 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
532 532 removed))
533 533 finally:
534 534 wlock.release()
535 535
536 536 @command('lfpull',
537 537 [('r', 'rev', [], _('pull largefiles for these revisions'))
538 538 ] + commands.remoteopts,
539 539 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
540 540 def lfpull(ui, repo, source="default", **opts):
541 541 """pull largefiles for the specified revisions from the specified source
542 542
543 543 Pull largefiles that are referenced from local changesets but missing
544 544 locally, pulling from a remote repository to the local cache.
545 545
546 546 If SOURCE is omitted, the 'default' path will be used.
547 547 See :hg:`help urls` for more information.
548 548
549 549 .. container:: verbose
550 550
551 551 Some examples:
552 552
553 553 - pull largefiles for all branch heads::
554 554
555 555 hg lfpull -r "head() and not closed()"
556 556
557 557 - pull largefiles on the default branch::
558 558
559 559 hg lfpull -r "branch(default)"
560 560 """
561 561 repo.lfpullsource = source
562 562
563 563 revs = opts.get('rev', [])
564 564 if not revs:
565 565 raise util.Abort(_('no revisions specified'))
566 566 revs = scmutil.revrange(repo, revs)
567 567
568 568 numcached = 0
569 569 for rev in revs:
570 570 ui.note(_('pulling largefiles for revision %s\n') % rev)
571 571 (cached, missing) = cachelfiles(ui, repo, rev)
572 572 numcached += len(cached)
573 573 ui.status(_("%d largefiles cached\n") % numcached)
@@ -1,2725 +1,2725 b''
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import os, sys, errno, re, tempfile
11 11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
12 12 import match as matchmod
13 13 import context, repair, graphmod, revset, phases, obsolete, pathutil
14 14 import changelog
15 15 import bookmarks
16 16 import lock as lockmod
17 17
18 18 def parsealiases(cmd):
19 19 return cmd.lstrip("^").split("|")
20 20
21 21 def findpossible(cmd, table, strict=False):
22 22 """
23 23 Return cmd -> (aliases, command table entry)
24 24 for each matching command.
25 25 Return debug commands (or their aliases) only if no normal command matches.
26 26 """
27 27 choice = {}
28 28 debugchoice = {}
29 29
30 30 if cmd in table:
31 31 # short-circuit exact matches, "log" alias beats "^log|history"
32 32 keys = [cmd]
33 33 else:
34 34 keys = table.keys()
35 35
36 36 for e in keys:
37 37 aliases = parsealiases(e)
38 38 found = None
39 39 if cmd in aliases:
40 40 found = cmd
41 41 elif not strict:
42 42 for a in aliases:
43 43 if a.startswith(cmd):
44 44 found = a
45 45 break
46 46 if found is not None:
47 47 if aliases[0].startswith("debug") or found.startswith("debug"):
48 48 debugchoice[found] = (aliases, table[e])
49 49 else:
50 50 choice[found] = (aliases, table[e])
51 51
52 52 if not choice and debugchoice:
53 53 choice = debugchoice
54 54
55 55 return choice
56 56
57 57 def findcmd(cmd, table, strict=True):
58 58 """Return (aliases, command table entry) for command string."""
59 59 choice = findpossible(cmd, table, strict)
60 60
61 61 if cmd in choice:
62 62 return choice[cmd]
63 63
64 64 if len(choice) > 1:
65 65 clist = choice.keys()
66 66 clist.sort()
67 67 raise error.AmbiguousCommand(cmd, clist)
68 68
69 69 if choice:
70 70 return choice.values()[0]
71 71
72 72 raise error.UnknownCommand(cmd)
73 73
74 74 def findrepo(p):
75 75 while not os.path.isdir(os.path.join(p, ".hg")):
76 76 oldp, p = p, os.path.dirname(p)
77 77 if p == oldp:
78 78 return None
79 79
80 80 return p
81 81
82 82 def bailifchanged(repo):
83 83 if repo.dirstate.p2() != nullid:
84 84 raise util.Abort(_('outstanding uncommitted merge'))
85 85 modified, added, removed, deleted = repo.status()[:4]
86 86 if modified or added or removed or deleted:
87 87 raise util.Abort(_('uncommitted changes'))
88 88 ctx = repo[None]
89 89 for s in sorted(ctx.substate):
90 90 if ctx.sub(s).dirty():
91 91 raise util.Abort(_("uncommitted changes in subrepo %s") % s)
92 92
93 93 def logmessage(ui, opts):
94 94 """ get the log message according to -m and -l option """
95 95 message = opts.get('message')
96 96 logfile = opts.get('logfile')
97 97
98 98 if message and logfile:
99 99 raise util.Abort(_('options --message and --logfile are mutually '
100 100 'exclusive'))
101 101 if not message and logfile:
102 102 try:
103 103 if logfile == '-':
104 104 message = ui.fin.read()
105 105 else:
106 106 message = '\n'.join(util.readfile(logfile).splitlines())
107 107 except IOError, inst:
108 108 raise util.Abort(_("can't read commit message '%s': %s") %
109 109 (logfile, inst.strerror))
110 110 return message
111 111
112 112 def mergeeditform(ctxorbool, baseform):
113 113 """build appropriate editform from ctxorbool and baseform
114 114
115 115 'cxtorbool' is one of a ctx to be committed, or a bool whether
116 116 merging is committed.
117 117
118 118 This returns editform 'baseform' with '.merge' if merging is
119 119 committed, or one with '.normal' suffix otherwise.
120 120 """
121 121 if isinstance(ctxorbool, bool):
122 122 if ctxorbool:
123 123 return baseform + ".merge"
124 124 elif 1 < len(ctxorbool.parents()):
125 125 return baseform + ".merge"
126 126
127 127 return baseform + ".normal"
128 128
129 129 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
130 130 editform='', **opts):
131 131 """get appropriate commit message editor according to '--edit' option
132 132
133 133 'finishdesc' is a function to be called with edited commit message
134 134 (= 'description' of the new changeset) just after editing, but
135 135 before checking empty-ness. It should return actual text to be
136 136 stored into history. This allows to change description before
137 137 storing.
138 138
139 139 'extramsg' is a extra message to be shown in the editor instead of
140 140 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
141 141 is automatically added.
142 142
143 143 'editform' is a dot-separated list of names, to distinguish
144 144 the purpose of commit text editing.
145 145
146 146 'getcommiteditor' returns 'commitforceeditor' regardless of
147 147 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
148 148 they are specific for usage in MQ.
149 149 """
150 150 if edit or finishdesc or extramsg:
151 151 return lambda r, c, s: commitforceeditor(r, c, s,
152 152 finishdesc=finishdesc,
153 153 extramsg=extramsg,
154 154 editform=editform)
155 155 elif editform:
156 156 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
157 157 else:
158 158 return commiteditor
159 159
160 160 def loglimit(opts):
161 161 """get the log limit according to option -l/--limit"""
162 162 limit = opts.get('limit')
163 163 if limit:
164 164 try:
165 165 limit = int(limit)
166 166 except ValueError:
167 167 raise util.Abort(_('limit must be a positive integer'))
168 168 if limit <= 0:
169 169 raise util.Abort(_('limit must be positive'))
170 170 else:
171 171 limit = None
172 172 return limit
173 173
174 174 def makefilename(repo, pat, node, desc=None,
175 175 total=None, seqno=None, revwidth=None, pathname=None):
176 176 node_expander = {
177 177 'H': lambda: hex(node),
178 178 'R': lambda: str(repo.changelog.rev(node)),
179 179 'h': lambda: short(node),
180 180 'm': lambda: re.sub('[^\w]', '_', str(desc))
181 181 }
182 182 expander = {
183 183 '%': lambda: '%',
184 184 'b': lambda: os.path.basename(repo.root),
185 185 }
186 186
187 187 try:
188 188 if node:
189 189 expander.update(node_expander)
190 190 if node:
191 191 expander['r'] = (lambda:
192 192 str(repo.changelog.rev(node)).zfill(revwidth or 0))
193 193 if total is not None:
194 194 expander['N'] = lambda: str(total)
195 195 if seqno is not None:
196 196 expander['n'] = lambda: str(seqno)
197 197 if total is not None and seqno is not None:
198 198 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
199 199 if pathname is not None:
200 200 expander['s'] = lambda: os.path.basename(pathname)
201 201 expander['d'] = lambda: os.path.dirname(pathname) or '.'
202 202 expander['p'] = lambda: pathname
203 203
204 204 newname = []
205 205 patlen = len(pat)
206 206 i = 0
207 207 while i < patlen:
208 208 c = pat[i]
209 209 if c == '%':
210 210 i += 1
211 211 c = pat[i]
212 212 c = expander[c]()
213 213 newname.append(c)
214 214 i += 1
215 215 return ''.join(newname)
216 216 except KeyError, inst:
217 217 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
218 218 inst.args[0])
219 219
220 220 def makefileobj(repo, pat, node=None, desc=None, total=None,
221 221 seqno=None, revwidth=None, mode='wb', modemap=None,
222 222 pathname=None):
223 223
224 224 writable = mode not in ('r', 'rb')
225 225
226 226 if not pat or pat == '-':
227 227 fp = writable and repo.ui.fout or repo.ui.fin
228 228 if util.safehasattr(fp, 'fileno'):
229 229 return os.fdopen(os.dup(fp.fileno()), mode)
230 230 else:
231 231 # if this fp can't be duped properly, return
232 232 # a dummy object that can be closed
233 233 class wrappedfileobj(object):
234 234 noop = lambda x: None
235 235 def __init__(self, f):
236 236 self.f = f
237 237 def __getattr__(self, attr):
238 238 if attr == 'close':
239 239 return self.noop
240 240 else:
241 241 return getattr(self.f, attr)
242 242
243 243 return wrappedfileobj(fp)
244 244 if util.safehasattr(pat, 'write') and writable:
245 245 return pat
246 246 if util.safehasattr(pat, 'read') and 'r' in mode:
247 247 return pat
248 248 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
249 249 if modemap is not None:
250 250 mode = modemap.get(fn, mode)
251 251 if mode == 'wb':
252 252 modemap[fn] = 'ab'
253 253 return open(fn, mode)
254 254
255 255 def openrevlog(repo, cmd, file_, opts):
256 256 """opens the changelog, manifest, a filelog or a given revlog"""
257 257 cl = opts['changelog']
258 258 mf = opts['manifest']
259 259 msg = None
260 260 if cl and mf:
261 261 msg = _('cannot specify --changelog and --manifest at the same time')
262 262 elif cl or mf:
263 263 if file_:
264 264 msg = _('cannot specify filename with --changelog or --manifest')
265 265 elif not repo:
266 266 msg = _('cannot specify --changelog or --manifest '
267 267 'without a repository')
268 268 if msg:
269 269 raise util.Abort(msg)
270 270
271 271 r = None
272 272 if repo:
273 273 if cl:
274 274 r = repo.unfiltered().changelog
275 275 elif mf:
276 276 r = repo.manifest
277 277 elif file_:
278 278 filelog = repo.file(file_)
279 279 if len(filelog):
280 280 r = filelog
281 281 if not r:
282 282 if not file_:
283 283 raise error.CommandError(cmd, _('invalid arguments'))
284 284 if not os.path.isfile(file_):
285 285 raise util.Abort(_("revlog '%s' not found") % file_)
286 286 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
287 287 file_[:-2] + ".i")
288 288 return r
289 289
290 290 def copy(ui, repo, pats, opts, rename=False):
291 291 # called with the repo lock held
292 292 #
293 293 # hgsep => pathname that uses "/" to separate directories
294 294 # ossep => pathname that uses os.sep to separate directories
295 295 cwd = repo.getcwd()
296 296 targets = {}
297 297 after = opts.get("after")
298 298 dryrun = opts.get("dry_run")
299 299 wctx = repo[None]
300 300
301 301 def walkpat(pat):
302 302 srcs = []
303 303 badstates = after and '?' or '?r'
304 304 m = scmutil.match(repo[None], [pat], opts, globbed=True)
305 305 for abs in repo.walk(m):
306 306 state = repo.dirstate[abs]
307 307 rel = m.rel(abs)
308 308 exact = m.exact(abs)
309 309 if state in badstates:
310 310 if exact and state == '?':
311 311 ui.warn(_('%s: not copying - file is not managed\n') % rel)
312 312 if exact and state == 'r':
313 313 ui.warn(_('%s: not copying - file has been marked for'
314 314 ' remove\n') % rel)
315 315 continue
316 316 # abs: hgsep
317 317 # rel: ossep
318 318 srcs.append((abs, rel, exact))
319 319 return srcs
320 320
321 321 # abssrc: hgsep
322 322 # relsrc: ossep
323 323 # otarget: ossep
324 324 def copyfile(abssrc, relsrc, otarget, exact):
325 325 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
326 326 if '/' in abstarget:
327 327 # We cannot normalize abstarget itself, this would prevent
328 328 # case only renames, like a => A.
329 329 abspath, absname = abstarget.rsplit('/', 1)
330 330 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
331 331 reltarget = repo.pathto(abstarget, cwd)
332 332 target = repo.wjoin(abstarget)
333 333 src = repo.wjoin(abssrc)
334 334 state = repo.dirstate[abstarget]
335 335
336 336 scmutil.checkportable(ui, abstarget)
337 337
338 338 # check for collisions
339 339 prevsrc = targets.get(abstarget)
340 340 if prevsrc is not None:
341 341 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
342 342 (reltarget, repo.pathto(abssrc, cwd),
343 343 repo.pathto(prevsrc, cwd)))
344 344 return
345 345
346 346 # check for overwrites
347 347 exists = os.path.lexists(target)
348 348 samefile = False
349 349 if exists and abssrc != abstarget:
350 350 if (repo.dirstate.normalize(abssrc) ==
351 351 repo.dirstate.normalize(abstarget)):
352 352 if not rename:
353 353 ui.warn(_("%s: can't copy - same file\n") % reltarget)
354 354 return
355 355 exists = False
356 356 samefile = True
357 357
358 358 if not after and exists or after and state in 'mn':
359 359 if not opts['force']:
360 360 ui.warn(_('%s: not overwriting - file exists\n') %
361 361 reltarget)
362 362 return
363 363
364 364 if after:
365 365 if not exists:
366 366 if rename:
367 367 ui.warn(_('%s: not recording move - %s does not exist\n') %
368 368 (relsrc, reltarget))
369 369 else:
370 370 ui.warn(_('%s: not recording copy - %s does not exist\n') %
371 371 (relsrc, reltarget))
372 372 return
373 373 elif not dryrun:
374 374 try:
375 375 if exists:
376 376 os.unlink(target)
377 377 targetdir = os.path.dirname(target) or '.'
378 378 if not os.path.isdir(targetdir):
379 379 os.makedirs(targetdir)
380 380 if samefile:
381 381 tmp = target + "~hgrename"
382 382 os.rename(src, tmp)
383 383 os.rename(tmp, target)
384 384 else:
385 385 util.copyfile(src, target)
386 386 srcexists = True
387 387 except IOError, inst:
388 388 if inst.errno == errno.ENOENT:
389 389 ui.warn(_('%s: deleted in working copy\n') % relsrc)
390 390 srcexists = False
391 391 else:
392 392 ui.warn(_('%s: cannot copy - %s\n') %
393 393 (relsrc, inst.strerror))
394 394 return True # report a failure
395 395
396 396 if ui.verbose or not exact:
397 397 if rename:
398 398 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
399 399 else:
400 400 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
401 401
402 402 targets[abstarget] = abssrc
403 403
404 404 # fix up dirstate
405 405 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
406 406 dryrun=dryrun, cwd=cwd)
407 407 if rename and not dryrun:
408 408 if not after and srcexists and not samefile:
409 409 util.unlinkpath(repo.wjoin(abssrc))
410 410 wctx.forget([abssrc])
411 411
412 412 # pat: ossep
413 413 # dest ossep
414 414 # srcs: list of (hgsep, hgsep, ossep, bool)
415 415 # return: function that takes hgsep and returns ossep
416 416 def targetpathfn(pat, dest, srcs):
417 417 if os.path.isdir(pat):
418 418 abspfx = pathutil.canonpath(repo.root, cwd, pat)
419 419 abspfx = util.localpath(abspfx)
420 420 if destdirexists:
421 421 striplen = len(os.path.split(abspfx)[0])
422 422 else:
423 423 striplen = len(abspfx)
424 424 if striplen:
425 425 striplen += len(os.sep)
426 426 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
427 427 elif destdirexists:
428 428 res = lambda p: os.path.join(dest,
429 429 os.path.basename(util.localpath(p)))
430 430 else:
431 431 res = lambda p: dest
432 432 return res
433 433
434 434 # pat: ossep
435 435 # dest ossep
436 436 # srcs: list of (hgsep, hgsep, ossep, bool)
437 437 # return: function that takes hgsep and returns ossep
438 438 def targetpathafterfn(pat, dest, srcs):
439 439 if matchmod.patkind(pat):
440 440 # a mercurial pattern
441 441 res = lambda p: os.path.join(dest,
442 442 os.path.basename(util.localpath(p)))
443 443 else:
444 444 abspfx = pathutil.canonpath(repo.root, cwd, pat)
445 445 if len(abspfx) < len(srcs[0][0]):
446 446 # A directory. Either the target path contains the last
447 447 # component of the source path or it does not.
448 448 def evalpath(striplen):
449 449 score = 0
450 450 for s in srcs:
451 451 t = os.path.join(dest, util.localpath(s[0])[striplen:])
452 452 if os.path.lexists(t):
453 453 score += 1
454 454 return score
455 455
456 456 abspfx = util.localpath(abspfx)
457 457 striplen = len(abspfx)
458 458 if striplen:
459 459 striplen += len(os.sep)
460 460 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
461 461 score = evalpath(striplen)
462 462 striplen1 = len(os.path.split(abspfx)[0])
463 463 if striplen1:
464 464 striplen1 += len(os.sep)
465 465 if evalpath(striplen1) > score:
466 466 striplen = striplen1
467 467 res = lambda p: os.path.join(dest,
468 468 util.localpath(p)[striplen:])
469 469 else:
470 470 # a file
471 471 if destdirexists:
472 472 res = lambda p: os.path.join(dest,
473 473 os.path.basename(util.localpath(p)))
474 474 else:
475 475 res = lambda p: dest
476 476 return res
477 477
478 478
479 479 pats = scmutil.expandpats(pats)
480 480 if not pats:
481 481 raise util.Abort(_('no source or destination specified'))
482 482 if len(pats) == 1:
483 483 raise util.Abort(_('no destination specified'))
484 484 dest = pats.pop()
485 485 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
486 486 if not destdirexists:
487 487 if len(pats) > 1 or matchmod.patkind(pats[0]):
488 488 raise util.Abort(_('with multiple sources, destination must be an '
489 489 'existing directory'))
490 490 if util.endswithsep(dest):
491 491 raise util.Abort(_('destination %s is not a directory') % dest)
492 492
493 493 tfn = targetpathfn
494 494 if after:
495 495 tfn = targetpathafterfn
496 496 copylist = []
497 497 for pat in pats:
498 498 srcs = walkpat(pat)
499 499 if not srcs:
500 500 continue
501 501 copylist.append((tfn(pat, dest, srcs), srcs))
502 502 if not copylist:
503 503 raise util.Abort(_('no files to copy'))
504 504
505 505 errors = 0
506 506 for targetpath, srcs in copylist:
507 507 for abssrc, relsrc, exact in srcs:
508 508 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
509 509 errors += 1
510 510
511 511 if errors:
512 512 ui.warn(_('(consider using --after)\n'))
513 513
514 514 return errors != 0
515 515
516 516 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
517 517 runargs=None, appendpid=False):
518 518 '''Run a command as a service.'''
519 519
520 520 def writepid(pid):
521 521 if opts['pid_file']:
522 522 mode = appendpid and 'a' or 'w'
523 523 fp = open(opts['pid_file'], mode)
524 524 fp.write(str(pid) + '\n')
525 525 fp.close()
526 526
527 527 if opts['daemon'] and not opts['daemon_pipefds']:
528 528 # Signal child process startup with file removal
529 529 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
530 530 os.close(lockfd)
531 531 try:
532 532 if not runargs:
533 533 runargs = util.hgcmd() + sys.argv[1:]
534 534 runargs.append('--daemon-pipefds=%s' % lockpath)
535 535 # Don't pass --cwd to the child process, because we've already
536 536 # changed directory.
537 537 for i in xrange(1, len(runargs)):
538 538 if runargs[i].startswith('--cwd='):
539 539 del runargs[i]
540 540 break
541 541 elif runargs[i].startswith('--cwd'):
542 542 del runargs[i:i + 2]
543 543 break
544 544 def condfn():
545 545 return not os.path.exists(lockpath)
546 546 pid = util.rundetached(runargs, condfn)
547 547 if pid < 0:
548 548 raise util.Abort(_('child process failed to start'))
549 549 writepid(pid)
550 550 finally:
551 551 try:
552 552 os.unlink(lockpath)
553 553 except OSError, e:
554 554 if e.errno != errno.ENOENT:
555 555 raise
556 556 if parentfn:
557 557 return parentfn(pid)
558 558 else:
559 559 return
560 560
561 561 if initfn:
562 562 initfn()
563 563
564 564 if not opts['daemon']:
565 565 writepid(os.getpid())
566 566
567 567 if opts['daemon_pipefds']:
568 568 lockpath = opts['daemon_pipefds']
569 569 try:
570 570 os.setsid()
571 571 except AttributeError:
572 572 pass
573 573 os.unlink(lockpath)
574 574 util.hidewindow()
575 575 sys.stdout.flush()
576 576 sys.stderr.flush()
577 577
578 578 nullfd = os.open(os.devnull, os.O_RDWR)
579 579 logfilefd = nullfd
580 580 if logfile:
581 581 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
582 582 os.dup2(nullfd, 0)
583 583 os.dup2(logfilefd, 1)
584 584 os.dup2(logfilefd, 2)
585 585 if nullfd not in (0, 1, 2):
586 586 os.close(nullfd)
587 587 if logfile and logfilefd not in (0, 1, 2):
588 588 os.close(logfilefd)
589 589
590 590 if runfn:
591 591 return runfn()
592 592
593 593 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
594 594 """Utility function used by commands.import to import a single patch
595 595
596 596 This function is explicitly defined here to help the evolve extension to
597 597 wrap this part of the import logic.
598 598
599 599 The API is currently a bit ugly because it a simple code translation from
600 600 the import command. Feel free to make it better.
601 601
602 602 :hunk: a patch (as a binary string)
603 603 :parents: nodes that will be parent of the created commit
604 604 :opts: the full dict of option passed to the import command
605 605 :msgs: list to save commit message to.
606 606 (used in case we need to save it when failing)
607 607 :updatefunc: a function that update a repo to a given node
608 608 updatefunc(<repo>, <node>)
609 609 """
610 610 tmpname, message, user, date, branch, nodeid, p1, p2 = \
611 611 patch.extract(ui, hunk)
612 612
613 613 update = not opts.get('bypass')
614 614 strip = opts["strip"]
615 615 sim = float(opts.get('similarity') or 0)
616 616 if not tmpname:
617 617 return (None, None, False)
618 618 msg = _('applied to working directory')
619 619
620 620 rejects = False
621 621
622 622 try:
623 623 cmdline_message = logmessage(ui, opts)
624 624 if cmdline_message:
625 625 # pickup the cmdline msg
626 626 message = cmdline_message
627 627 elif message:
628 628 # pickup the patch msg
629 629 message = message.strip()
630 630 else:
631 631 # launch the editor
632 632 message = None
633 633 ui.debug('message:\n%s\n' % message)
634 634
635 635 if len(parents) == 1:
636 636 parents.append(repo[nullid])
637 637 if opts.get('exact'):
638 638 if not nodeid or not p1:
639 639 raise util.Abort(_('not a Mercurial patch'))
640 640 p1 = repo[p1]
641 641 p2 = repo[p2 or nullid]
642 642 elif p2:
643 643 try:
644 644 p1 = repo[p1]
645 645 p2 = repo[p2]
646 646 # Without any options, consider p2 only if the
647 647 # patch is being applied on top of the recorded
648 648 # first parent.
649 649 if p1 != parents[0]:
650 650 p1 = parents[0]
651 651 p2 = repo[nullid]
652 652 except error.RepoError:
653 653 p1, p2 = parents
654 654 else:
655 655 p1, p2 = parents
656 656
657 657 n = None
658 658 if update:
659 659 if p1 != parents[0]:
660 660 updatefunc(repo, p1.node())
661 661 if p2 != parents[1]:
662 662 repo.setparents(p1.node(), p2.node())
663 663
664 664 if opts.get('exact') or opts.get('import_branch'):
665 665 repo.dirstate.setbranch(branch or 'default')
666 666
667 667 partial = opts.get('partial', False)
668 668 files = set()
669 669 try:
670 670 patch.patch(ui, repo, tmpname, strip=strip, files=files,
671 671 eolmode=None, similarity=sim / 100.0)
672 672 except patch.PatchError, e:
673 673 if not partial:
674 674 raise util.Abort(str(e))
675 675 if partial:
676 676 rejects = True
677 677
678 678 files = list(files)
679 679 if opts.get('no_commit'):
680 680 if message:
681 681 msgs.append(message)
682 682 else:
683 683 if opts.get('exact') or p2:
684 684 # If you got here, you either use --force and know what
685 685 # you are doing or used --exact or a merge patch while
686 686 # being updated to its first parent.
687 687 m = None
688 688 else:
689 689 m = scmutil.matchfiles(repo, files or [])
690 690 editform = mergeeditform(repo[None], 'import.normal')
691 691 if opts.get('exact'):
692 692 editor = None
693 693 else:
694 694 editor = getcommiteditor(editform=editform, **opts)
695 695 n = repo.commit(message, opts.get('user') or user,
696 696 opts.get('date') or date, match=m,
697 697 editor=editor, force=partial)
698 698 else:
699 699 if opts.get('exact') or opts.get('import_branch'):
700 700 branch = branch or 'default'
701 701 else:
702 702 branch = p1.branch()
703 703 store = patch.filestore()
704 704 try:
705 705 files = set()
706 706 try:
707 707 patch.patchrepo(ui, repo, p1, store, tmpname, strip,
708 708 files, eolmode=None)
709 709 except patch.PatchError, e:
710 710 raise util.Abort(str(e))
711 711 if opts.get('exact'):
712 712 editor = None
713 713 else:
714 714 editor = getcommiteditor(editform='import.bypass')
715 715 memctx = context.makememctx(repo, (p1.node(), p2.node()),
716 716 message,
717 717 opts.get('user') or user,
718 718 opts.get('date') or date,
719 719 branch, files, store,
720 720 editor=editor)
721 721 n = memctx.commit()
722 722 finally:
723 723 store.close()
724 724 if opts.get('exact') and hex(n) != nodeid:
725 725 raise util.Abort(_('patch is damaged or loses information'))
726 726 if n:
727 727 # i18n: refers to a short changeset id
728 728 msg = _('created %s') % short(n)
729 729 return (msg, n, rejects)
730 730 finally:
731 731 os.unlink(tmpname)
732 732
733 733 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
734 734 opts=None):
735 735 '''export changesets as hg patches.'''
736 736
737 737 total = len(revs)
738 738 revwidth = max([len(str(rev)) for rev in revs])
739 739 filemode = {}
740 740
741 741 def single(rev, seqno, fp):
742 742 ctx = repo[rev]
743 743 node = ctx.node()
744 744 parents = [p.node() for p in ctx.parents() if p]
745 745 branch = ctx.branch()
746 746 if switch_parent:
747 747 parents.reverse()
748 748 prev = (parents and parents[0]) or nullid
749 749
750 750 shouldclose = False
751 751 if not fp and len(template) > 0:
752 752 desc_lines = ctx.description().rstrip().split('\n')
753 753 desc = desc_lines[0] #Commit always has a first line.
754 754 fp = makefileobj(repo, template, node, desc=desc, total=total,
755 755 seqno=seqno, revwidth=revwidth, mode='wb',
756 756 modemap=filemode)
757 757 if fp != template:
758 758 shouldclose = True
759 759 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
760 760 repo.ui.note("%s\n" % fp.name)
761 761
762 762 if not fp:
763 763 write = repo.ui.write
764 764 else:
765 765 def write(s, **kw):
766 766 fp.write(s)
767 767
768 768
769 769 write("# HG changeset patch\n")
770 770 write("# User %s\n" % ctx.user())
771 771 write("# Date %d %d\n" % ctx.date())
772 772 write("# %s\n" % util.datestr(ctx.date()))
773 773 if branch and branch != 'default':
774 774 write("# Branch %s\n" % branch)
775 775 write("# Node ID %s\n" % hex(node))
776 776 write("# Parent %s\n" % hex(prev))
777 777 if len(parents) > 1:
778 778 write("# Parent %s\n" % hex(parents[1]))
779 779 write(ctx.description().rstrip())
780 780 write("\n\n")
781 781
782 782 for chunk, label in patch.diffui(repo, prev, node, opts=opts):
783 783 write(chunk, label=label)
784 784
785 785 if shouldclose:
786 786 fp.close()
787 787
788 788 for seqno, rev in enumerate(revs):
789 789 single(rev, seqno + 1, fp)
790 790
791 791 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
792 792 changes=None, stat=False, fp=None, prefix='',
793 793 listsubrepos=False):
794 794 '''show diff or diffstat.'''
795 795 if fp is None:
796 796 write = ui.write
797 797 else:
798 798 def write(s, **kw):
799 799 fp.write(s)
800 800
801 801 if stat:
802 802 diffopts = diffopts.copy(context=0)
803 803 width = 80
804 804 if not ui.plain():
805 805 width = ui.termwidth()
806 806 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
807 807 prefix=prefix)
808 808 for chunk, label in patch.diffstatui(util.iterlines(chunks),
809 809 width=width,
810 810 git=diffopts.git):
811 811 write(chunk, label=label)
812 812 else:
813 813 for chunk, label in patch.diffui(repo, node1, node2, match,
814 814 changes, diffopts, prefix=prefix):
815 815 write(chunk, label=label)
816 816
817 817 if listsubrepos:
818 818 ctx1 = repo[node1]
819 819 ctx2 = repo[node2]
820 820 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
821 821 tempnode2 = node2
822 822 try:
823 823 if node2 is not None:
824 824 tempnode2 = ctx2.substate[subpath][1]
825 825 except KeyError:
826 826 # A subrepo that existed in node1 was deleted between node1 and
827 827 # node2 (inclusive). Thus, ctx2's substate won't contain that
828 828 # subpath. The best we can do is to ignore it.
829 829 tempnode2 = None
830 830 submatch = matchmod.narrowmatcher(subpath, match)
831 831 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
832 832 stat=stat, fp=fp, prefix=prefix)
833 833
834 834 class changeset_printer(object):
835 835 '''show changeset information when templating not requested.'''
836 836
837 837 def __init__(self, ui, repo, patch, diffopts, buffered):
838 838 self.ui = ui
839 839 self.repo = repo
840 840 self.buffered = buffered
841 841 self.patch = patch
842 842 self.diffopts = diffopts
843 843 self.header = {}
844 844 self.hunk = {}
845 845 self.lastheader = None
846 846 self.footer = None
847 847
848 848 def flush(self, rev):
849 849 if rev in self.header:
850 850 h = self.header[rev]
851 851 if h != self.lastheader:
852 852 self.lastheader = h
853 853 self.ui.write(h)
854 854 del self.header[rev]
855 855 if rev in self.hunk:
856 856 self.ui.write(self.hunk[rev])
857 857 del self.hunk[rev]
858 858 return 1
859 859 return 0
860 860
861 861 def close(self):
862 862 if self.footer:
863 863 self.ui.write(self.footer)
864 864
865 865 def show(self, ctx, copies=None, matchfn=None, **props):
866 866 if self.buffered:
867 867 self.ui.pushbuffer()
868 868 self._show(ctx, copies, matchfn, props)
869 869 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
870 870 else:
871 871 self._show(ctx, copies, matchfn, props)
872 872
873 873 def _show(self, ctx, copies, matchfn, props):
874 874 '''show a single changeset or file revision'''
875 875 changenode = ctx.node()
876 876 rev = ctx.rev()
877 877
878 878 if self.ui.quiet:
879 879 self.ui.write("%d:%s\n" % (rev, short(changenode)),
880 880 label='log.node')
881 881 return
882 882
883 883 log = self.repo.changelog
884 884 date = util.datestr(ctx.date())
885 885
886 886 hexfunc = self.ui.debugflag and hex or short
887 887
888 888 parents = [(p, hexfunc(log.node(p)))
889 889 for p in self._meaningful_parentrevs(log, rev)]
890 890
891 891 # i18n: column positioning for "hg log"
892 892 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
893 893 label='log.changeset changeset.%s' % ctx.phasestr())
894 894
895 895 branch = ctx.branch()
896 896 # don't show the default branch name
897 897 if branch != 'default':
898 898 # i18n: column positioning for "hg log"
899 899 self.ui.write(_("branch: %s\n") % branch,
900 900 label='log.branch')
901 901 for bookmark in self.repo.nodebookmarks(changenode):
902 902 # i18n: column positioning for "hg log"
903 903 self.ui.write(_("bookmark: %s\n") % bookmark,
904 904 label='log.bookmark')
905 905 for tag in self.repo.nodetags(changenode):
906 906 # i18n: column positioning for "hg log"
907 907 self.ui.write(_("tag: %s\n") % tag,
908 908 label='log.tag')
909 909 if self.ui.debugflag and ctx.phase():
910 910 # i18n: column positioning for "hg log"
911 911 self.ui.write(_("phase: %s\n") % _(ctx.phasestr()),
912 912 label='log.phase')
913 913 for parent in parents:
914 914 # i18n: column positioning for "hg log"
915 915 self.ui.write(_("parent: %d:%s\n") % parent,
916 916 label='log.parent changeset.%s' % ctx.phasestr())
917 917
918 918 if self.ui.debugflag:
919 919 mnode = ctx.manifestnode()
920 920 # i18n: column positioning for "hg log"
921 921 self.ui.write(_("manifest: %d:%s\n") %
922 922 (self.repo.manifest.rev(mnode), hex(mnode)),
923 923 label='ui.debug log.manifest')
924 924 # i18n: column positioning for "hg log"
925 925 self.ui.write(_("user: %s\n") % ctx.user(),
926 926 label='log.user')
927 927 # i18n: column positioning for "hg log"
928 928 self.ui.write(_("date: %s\n") % date,
929 929 label='log.date')
930 930
931 931 if self.ui.debugflag:
932 932 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
933 933 for key, value in zip([# i18n: column positioning for "hg log"
934 934 _("files:"),
935 935 # i18n: column positioning for "hg log"
936 936 _("files+:"),
937 937 # i18n: column positioning for "hg log"
938 938 _("files-:")], files):
939 939 if value:
940 940 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
941 941 label='ui.debug log.files')
942 942 elif ctx.files() and self.ui.verbose:
943 943 # i18n: column positioning for "hg log"
944 944 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
945 945 label='ui.note log.files')
946 946 if copies and self.ui.verbose:
947 947 copies = ['%s (%s)' % c for c in copies]
948 948 # i18n: column positioning for "hg log"
949 949 self.ui.write(_("copies: %s\n") % ' '.join(copies),
950 950 label='ui.note log.copies')
951 951
952 952 extra = ctx.extra()
953 953 if extra and self.ui.debugflag:
954 954 for key, value in sorted(extra.items()):
955 955 # i18n: column positioning for "hg log"
956 956 self.ui.write(_("extra: %s=%s\n")
957 957 % (key, value.encode('string_escape')),
958 958 label='ui.debug log.extra')
959 959
960 960 description = ctx.description().strip()
961 961 if description:
962 962 if self.ui.verbose:
963 963 self.ui.write(_("description:\n"),
964 964 label='ui.note log.description')
965 965 self.ui.write(description,
966 966 label='ui.note log.description')
967 967 self.ui.write("\n\n")
968 968 else:
969 969 # i18n: column positioning for "hg log"
970 970 self.ui.write(_("summary: %s\n") %
971 971 description.splitlines()[0],
972 972 label='log.summary')
973 973 self.ui.write("\n")
974 974
975 975 self.showpatch(changenode, matchfn)
976 976
977 977 def showpatch(self, node, matchfn):
978 978 if not matchfn:
979 979 matchfn = self.patch
980 980 if matchfn:
981 981 stat = self.diffopts.get('stat')
982 982 diff = self.diffopts.get('patch')
983 983 diffopts = patch.diffopts(self.ui, self.diffopts)
984 984 prev = self.repo.changelog.parents(node)[0]
985 985 if stat:
986 986 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
987 987 match=matchfn, stat=True)
988 988 if diff:
989 989 if stat:
990 990 self.ui.write("\n")
991 991 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
992 992 match=matchfn, stat=False)
993 993 self.ui.write("\n")
994 994
995 995 def _meaningful_parentrevs(self, log, rev):
996 996 """Return list of meaningful (or all if debug) parentrevs for rev.
997 997
998 998 For merges (two non-nullrev revisions) both parents are meaningful.
999 999 Otherwise the first parent revision is considered meaningful if it
1000 1000 is not the preceding revision.
1001 1001 """
1002 1002 parents = log.parentrevs(rev)
1003 1003 if not self.ui.debugflag and parents[1] == nullrev:
1004 1004 if parents[0] >= rev - 1:
1005 1005 parents = []
1006 1006 else:
1007 1007 parents = [parents[0]]
1008 1008 return parents
1009 1009
1010 1010
1011 1011 class changeset_templater(changeset_printer):
1012 1012 '''format changeset information.'''
1013 1013
1014 1014 def __init__(self, ui, repo, patch, diffopts, tmpl, mapfile, buffered):
1015 1015 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
1016 1016 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1017 1017 defaulttempl = {
1018 1018 'parent': '{rev}:{node|formatnode} ',
1019 1019 'manifest': '{rev}:{node|formatnode}',
1020 1020 'file_copy': '{name} ({source})',
1021 1021 'extra': '{key}={value|stringescape}'
1022 1022 }
1023 1023 # filecopy is preserved for compatibility reasons
1024 1024 defaulttempl['filecopy'] = defaulttempl['file_copy']
1025 1025 self.t = templater.templater(mapfile, {'formatnode': formatnode},
1026 1026 cache=defaulttempl)
1027 1027 if tmpl:
1028 1028 self.t.cache['changeset'] = tmpl
1029 1029
1030 1030 self.cache = {}
1031 1031
1032 1032 def _meaningful_parentrevs(self, ctx):
1033 1033 """Return list of meaningful (or all if debug) parentrevs for rev.
1034 1034 """
1035 1035 parents = ctx.parents()
1036 1036 if len(parents) > 1:
1037 1037 return parents
1038 1038 if self.ui.debugflag:
1039 1039 return [parents[0], self.repo['null']]
1040 1040 if parents[0].rev() >= ctx.rev() - 1:
1041 1041 return []
1042 1042 return parents
1043 1043
1044 1044 def _show(self, ctx, copies, matchfn, props):
1045 1045 '''show a single changeset or file revision'''
1046 1046
1047 1047 showlist = templatekw.showlist
1048 1048
1049 1049 # showparents() behaviour depends on ui trace level which
1050 1050 # causes unexpected behaviours at templating level and makes
1051 1051 # it harder to extract it in a standalone function. Its
1052 1052 # behaviour cannot be changed so leave it here for now.
1053 1053 def showparents(**args):
1054 1054 ctx = args['ctx']
1055 1055 parents = [[('rev', p.rev()), ('node', p.hex())]
1056 1056 for p in self._meaningful_parentrevs(ctx)]
1057 1057 return showlist('parent', parents, **args)
1058 1058
1059 1059 props = props.copy()
1060 1060 props.update(templatekw.keywords)
1061 1061 props['parents'] = showparents
1062 1062 props['templ'] = self.t
1063 1063 props['ctx'] = ctx
1064 1064 props['repo'] = self.repo
1065 1065 props['revcache'] = {'copies': copies}
1066 1066 props['cache'] = self.cache
1067 1067
1068 1068 # find correct templates for current mode
1069 1069
1070 1070 tmplmodes = [
1071 1071 (True, None),
1072 1072 (self.ui.verbose, 'verbose'),
1073 1073 (self.ui.quiet, 'quiet'),
1074 1074 (self.ui.debugflag, 'debug'),
1075 1075 ]
1076 1076
1077 1077 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
1078 1078 for mode, postfix in tmplmodes:
1079 1079 for type in types:
1080 1080 cur = postfix and ('%s_%s' % (type, postfix)) or type
1081 1081 if mode and cur in self.t:
1082 1082 types[type] = cur
1083 1083
1084 1084 try:
1085 1085
1086 1086 # write header
1087 1087 if types['header']:
1088 1088 h = templater.stringify(self.t(types['header'], **props))
1089 1089 if self.buffered:
1090 1090 self.header[ctx.rev()] = h
1091 1091 else:
1092 1092 if self.lastheader != h:
1093 1093 self.lastheader = h
1094 1094 self.ui.write(h)
1095 1095
1096 1096 # write changeset metadata, then patch if requested
1097 1097 key = types['changeset']
1098 1098 self.ui.write(templater.stringify(self.t(key, **props)))
1099 1099 self.showpatch(ctx.node(), matchfn)
1100 1100
1101 1101 if types['footer']:
1102 1102 if not self.footer:
1103 1103 self.footer = templater.stringify(self.t(types['footer'],
1104 1104 **props))
1105 1105
1106 1106 except KeyError, inst:
1107 1107 msg = _("%s: no key named '%s'")
1108 1108 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
1109 1109 except SyntaxError, inst:
1110 1110 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1111 1111
1112 1112 def gettemplate(ui, tmpl, style):
1113 1113 """
1114 1114 Find the template matching the given template spec or style.
1115 1115 """
1116 1116
1117 1117 # ui settings
1118 1118 if not tmpl and not style:
1119 1119 tmpl = ui.config('ui', 'logtemplate')
1120 1120 if tmpl:
1121 1121 try:
1122 1122 tmpl = templater.parsestring(tmpl)
1123 1123 except SyntaxError:
1124 1124 tmpl = templater.parsestring(tmpl, quoted=False)
1125 1125 return tmpl, None
1126 1126 else:
1127 1127 style = util.expandpath(ui.config('ui', 'style', ''))
1128 1128
1129 1129 if style:
1130 1130 mapfile = style
1131 1131 if not os.path.split(mapfile)[0]:
1132 1132 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1133 1133 or templater.templatepath(mapfile))
1134 1134 if mapname:
1135 1135 mapfile = mapname
1136 1136 return None, mapfile
1137 1137
1138 1138 if not tmpl:
1139 1139 return None, None
1140 1140
1141 1141 # looks like a literal template?
1142 1142 if '{' in tmpl:
1143 1143 return tmpl, None
1144 1144
1145 1145 # perhaps a stock style?
1146 1146 if not os.path.split(tmpl)[0]:
1147 1147 mapname = (templater.templatepath('map-cmdline.' + tmpl)
1148 1148 or templater.templatepath(tmpl))
1149 1149 if mapname and os.path.isfile(mapname):
1150 1150 return None, mapname
1151 1151
1152 1152 # perhaps it's a reference to [templates]
1153 1153 t = ui.config('templates', tmpl)
1154 1154 if t:
1155 1155 try:
1156 1156 tmpl = templater.parsestring(t)
1157 1157 except SyntaxError:
1158 1158 tmpl = templater.parsestring(t, quoted=False)
1159 1159 return tmpl, None
1160 1160
1161 1161 if tmpl == 'list':
1162 1162 ui.write(_("available styles: %s\n") % templater.stylelist())
1163 1163 raise util.Abort(_("specify a template"))
1164 1164
1165 1165 # perhaps it's a path to a map or a template
1166 1166 if ('/' in tmpl or '\\' in tmpl) and os.path.isfile(tmpl):
1167 1167 # is it a mapfile for a style?
1168 1168 if os.path.basename(tmpl).startswith("map-"):
1169 1169 return None, os.path.realpath(tmpl)
1170 1170 tmpl = open(tmpl).read()
1171 1171 return tmpl, None
1172 1172
1173 1173 # constant string?
1174 1174 return tmpl, None
1175 1175
1176 1176 def show_changeset(ui, repo, opts, buffered=False):
1177 1177 """show one changeset using template or regular display.
1178 1178
1179 1179 Display format will be the first non-empty hit of:
1180 1180 1. option 'template'
1181 1181 2. option 'style'
1182 1182 3. [ui] setting 'logtemplate'
1183 1183 4. [ui] setting 'style'
1184 1184 If all of these values are either the unset or the empty string,
1185 1185 regular display via changeset_printer() is done.
1186 1186 """
1187 1187 # options
1188 1188 patch = None
1189 1189 if opts.get('patch') or opts.get('stat'):
1190 1190 patch = scmutil.matchall(repo)
1191 1191
1192 1192 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1193 1193
1194 1194 if not tmpl and not mapfile:
1195 1195 return changeset_printer(ui, repo, patch, opts, buffered)
1196 1196
1197 1197 try:
1198 1198 t = changeset_templater(ui, repo, patch, opts, tmpl, mapfile, buffered)
1199 1199 except SyntaxError, inst:
1200 1200 raise util.Abort(inst.args[0])
1201 1201 return t
1202 1202
1203 1203 def showmarker(ui, marker):
1204 1204 """utility function to display obsolescence marker in a readable way
1205 1205
1206 1206 To be used by debug function."""
1207 1207 ui.write(hex(marker.precnode()))
1208 1208 for repl in marker.succnodes():
1209 1209 ui.write(' ')
1210 1210 ui.write(hex(repl))
1211 1211 ui.write(' %X ' % marker.flags())
1212 1212 parents = marker.parentnodes()
1213 1213 if parents is not None:
1214 1214 ui.write('{%s} ' % ', '.join(hex(p) for p in parents))
1215 1215 ui.write('(%s) ' % util.datestr(marker.date()))
1216 1216 ui.write('{%s}' % (', '.join('%r: %r' % t for t in
1217 1217 sorted(marker.metadata().items())
1218 1218 if t[0] != 'date')))
1219 1219 ui.write('\n')
1220 1220
1221 1221 def finddate(ui, repo, date):
1222 1222 """Find the tipmost changeset that matches the given date spec"""
1223 1223
1224 1224 df = util.matchdate(date)
1225 1225 m = scmutil.matchall(repo)
1226 1226 results = {}
1227 1227
1228 1228 def prep(ctx, fns):
1229 1229 d = ctx.date()
1230 1230 if df(d[0]):
1231 1231 results[ctx.rev()] = d
1232 1232
1233 1233 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1234 1234 rev = ctx.rev()
1235 1235 if rev in results:
1236 1236 ui.status(_("found revision %s from %s\n") %
1237 1237 (rev, util.datestr(results[rev])))
1238 1238 return str(rev)
1239 1239
1240 1240 raise util.Abort(_("revision matching date not found"))
1241 1241
1242 1242 def increasingwindows(windowsize=8, sizelimit=512):
1243 1243 while True:
1244 1244 yield windowsize
1245 1245 if windowsize < sizelimit:
1246 1246 windowsize *= 2
1247 1247
1248 1248 class FileWalkError(Exception):
1249 1249 pass
1250 1250
1251 1251 def walkfilerevs(repo, match, follow, revs, fncache):
1252 1252 '''Walks the file history for the matched files.
1253 1253
1254 1254 Returns the changeset revs that are involved in the file history.
1255 1255
1256 1256 Throws FileWalkError if the file history can't be walked using
1257 1257 filelogs alone.
1258 1258 '''
1259 1259 wanted = set()
1260 1260 copies = []
1261 1261 minrev, maxrev = min(revs), max(revs)
1262 1262 def filerevgen(filelog, last):
1263 1263 """
1264 1264 Only files, no patterns. Check the history of each file.
1265 1265
1266 1266 Examines filelog entries within minrev, maxrev linkrev range
1267 1267 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1268 1268 tuples in backwards order
1269 1269 """
1270 1270 cl_count = len(repo)
1271 1271 revs = []
1272 1272 for j in xrange(0, last + 1):
1273 1273 linkrev = filelog.linkrev(j)
1274 1274 if linkrev < minrev:
1275 1275 continue
1276 1276 # only yield rev for which we have the changelog, it can
1277 1277 # happen while doing "hg log" during a pull or commit
1278 1278 if linkrev >= cl_count:
1279 1279 break
1280 1280
1281 1281 parentlinkrevs = []
1282 1282 for p in filelog.parentrevs(j):
1283 1283 if p != nullrev:
1284 1284 parentlinkrevs.append(filelog.linkrev(p))
1285 1285 n = filelog.node(j)
1286 1286 revs.append((linkrev, parentlinkrevs,
1287 1287 follow and filelog.renamed(n)))
1288 1288
1289 1289 return reversed(revs)
1290 1290 def iterfiles():
1291 1291 pctx = repo['.']
1292 1292 for filename in match.files():
1293 1293 if follow:
1294 1294 if filename not in pctx:
1295 1295 raise util.Abort(_('cannot follow file not in parent '
1296 1296 'revision: "%s"') % filename)
1297 1297 yield filename, pctx[filename].filenode()
1298 1298 else:
1299 1299 yield filename, None
1300 1300 for filename_node in copies:
1301 1301 yield filename_node
1302 1302
1303 1303 for file_, node in iterfiles():
1304 1304 filelog = repo.file(file_)
1305 1305 if not len(filelog):
1306 1306 if node is None:
1307 1307 # A zero count may be a directory or deleted file, so
1308 1308 # try to find matching entries on the slow path.
1309 1309 if follow:
1310 1310 raise util.Abort(
1311 1311 _('cannot follow nonexistent file: "%s"') % file_)
1312 1312 raise FileWalkError("Cannot walk via filelog")
1313 1313 else:
1314 1314 continue
1315 1315
1316 1316 if node is None:
1317 1317 last = len(filelog) - 1
1318 1318 else:
1319 1319 last = filelog.rev(node)
1320 1320
1321 1321
1322 1322 # keep track of all ancestors of the file
1323 1323 ancestors = set([filelog.linkrev(last)])
1324 1324
1325 1325 # iterate from latest to oldest revision
1326 1326 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1327 1327 if not follow:
1328 1328 if rev > maxrev:
1329 1329 continue
1330 1330 else:
1331 1331 # Note that last might not be the first interesting
1332 1332 # rev to us:
1333 1333 # if the file has been changed after maxrev, we'll
1334 1334 # have linkrev(last) > maxrev, and we still need
1335 1335 # to explore the file graph
1336 1336 if rev not in ancestors:
1337 1337 continue
1338 1338 # XXX insert 1327 fix here
1339 1339 if flparentlinkrevs:
1340 1340 ancestors.update(flparentlinkrevs)
1341 1341
1342 1342 fncache.setdefault(rev, []).append(file_)
1343 1343 wanted.add(rev)
1344 1344 if copied:
1345 1345 copies.append(copied)
1346 1346
1347 1347 return wanted
1348 1348
1349 1349 def walkchangerevs(repo, match, opts, prepare):
1350 1350 '''Iterate over files and the revs in which they changed.
1351 1351
1352 1352 Callers most commonly need to iterate backwards over the history
1353 1353 in which they are interested. Doing so has awful (quadratic-looking)
1354 1354 performance, so we use iterators in a "windowed" way.
1355 1355
1356 1356 We walk a window of revisions in the desired order. Within the
1357 1357 window, we first walk forwards to gather data, then in the desired
1358 1358 order (usually backwards) to display it.
1359 1359
1360 1360 This function returns an iterator yielding contexts. Before
1361 1361 yielding each context, the iterator will first call the prepare
1362 1362 function on each context in the window in forward order.'''
1363 1363
1364 1364 follow = opts.get('follow') or opts.get('follow_first')
1365 1365
1366 1366 if opts.get('rev'):
1367 1367 revs = scmutil.revrange(repo, opts.get('rev'))
1368 1368 elif follow:
1369 1369 revs = repo.revs('reverse(:.)')
1370 1370 else:
1371 1371 revs = revset.spanset(repo)
1372 1372 revs.reverse()
1373 1373 if not revs:
1374 1374 return []
1375 1375 wanted = set()
1376 1376 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1377 1377 fncache = {}
1378 1378 change = repo.changectx
1379 1379
1380 1380 # First step is to fill wanted, the set of revisions that we want to yield.
1381 1381 # When it does not induce extra cost, we also fill fncache for revisions in
1382 1382 # wanted: a cache of filenames that were changed (ctx.files()) and that
1383 1383 # match the file filtering conditions.
1384 1384
1385 1385 if not slowpath and not match.files():
1386 1386 # No files, no patterns. Display all revs.
1387 1387 wanted = revs
1388 1388
1389 1389 if not slowpath and match.files():
1390 1390 # We only have to read through the filelog to find wanted revisions
1391 1391
1392 1392 try:
1393 1393 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1394 1394 except FileWalkError:
1395 1395 slowpath = True
1396 1396
1397 1397 # We decided to fall back to the slowpath because at least one
1398 1398 # of the paths was not a file. Check to see if at least one of them
1399 1399 # existed in history, otherwise simply return
1400 1400 for path in match.files():
1401 1401 if path == '.' or path in repo.store:
1402 1402 break
1403 1403 else:
1404 1404 return []
1405 1405
1406 1406 if slowpath:
1407 1407 # We have to read the changelog to match filenames against
1408 1408 # changed files
1409 1409
1410 1410 if follow:
1411 1411 raise util.Abort(_('can only follow copies/renames for explicit '
1412 1412 'filenames'))
1413 1413
1414 1414 # The slow path checks files modified in every changeset.
1415 1415 # This is really slow on large repos, so compute the set lazily.
1416 1416 class lazywantedset(object):
1417 1417 def __init__(self):
1418 1418 self.set = set()
1419 1419 self.revs = set(revs)
1420 1420
1421 1421 # No need to worry about locality here because it will be accessed
1422 1422 # in the same order as the increasing window below.
1423 1423 def __contains__(self, value):
1424 1424 if value in self.set:
1425 1425 return True
1426 1426 elif not value in self.revs:
1427 1427 return False
1428 1428 else:
1429 1429 self.revs.discard(value)
1430 1430 ctx = change(value)
1431 1431 matches = filter(match, ctx.files())
1432 1432 if matches:
1433 1433 fncache[value] = matches
1434 1434 self.set.add(value)
1435 1435 return True
1436 1436 return False
1437 1437
1438 1438 def discard(self, value):
1439 1439 self.revs.discard(value)
1440 1440 self.set.discard(value)
1441 1441
1442 1442 wanted = lazywantedset()
1443 1443
1444 1444 class followfilter(object):
1445 1445 def __init__(self, onlyfirst=False):
1446 1446 self.startrev = nullrev
1447 1447 self.roots = set()
1448 1448 self.onlyfirst = onlyfirst
1449 1449
1450 1450 def match(self, rev):
1451 1451 def realparents(rev):
1452 1452 if self.onlyfirst:
1453 1453 return repo.changelog.parentrevs(rev)[0:1]
1454 1454 else:
1455 1455 return filter(lambda x: x != nullrev,
1456 1456 repo.changelog.parentrevs(rev))
1457 1457
1458 1458 if self.startrev == nullrev:
1459 1459 self.startrev = rev
1460 1460 return True
1461 1461
1462 1462 if rev > self.startrev:
1463 1463 # forward: all descendants
1464 1464 if not self.roots:
1465 1465 self.roots.add(self.startrev)
1466 1466 for parent in realparents(rev):
1467 1467 if parent in self.roots:
1468 1468 self.roots.add(rev)
1469 1469 return True
1470 1470 else:
1471 1471 # backwards: all parents
1472 1472 if not self.roots:
1473 1473 self.roots.update(realparents(self.startrev))
1474 1474 if rev in self.roots:
1475 1475 self.roots.remove(rev)
1476 1476 self.roots.update(realparents(rev))
1477 1477 return True
1478 1478
1479 1479 return False
1480 1480
1481 1481 # it might be worthwhile to do this in the iterator if the rev range
1482 1482 # is descending and the prune args are all within that range
1483 1483 for rev in opts.get('prune', ()):
1484 1484 rev = repo[rev].rev()
1485 1485 ff = followfilter()
1486 1486 stop = min(revs[0], revs[-1])
1487 1487 for x in xrange(rev, stop - 1, -1):
1488 1488 if ff.match(x):
1489 1489 wanted = wanted - [x]
1490 1490
1491 1491 # Now that wanted is correctly initialized, we can iterate over the
1492 1492 # revision range, yielding only revisions in wanted.
1493 1493 def iterate():
1494 1494 if follow and not match.files():
1495 1495 ff = followfilter(onlyfirst=opts.get('follow_first'))
1496 1496 def want(rev):
1497 1497 return ff.match(rev) and rev in wanted
1498 1498 else:
1499 1499 def want(rev):
1500 1500 return rev in wanted
1501 1501
1502 1502 it = iter(revs)
1503 1503 stopiteration = False
1504 1504 for windowsize in increasingwindows():
1505 1505 nrevs = []
1506 1506 for i in xrange(windowsize):
1507 1507 try:
1508 1508 rev = it.next()
1509 1509 if want(rev):
1510 1510 nrevs.append(rev)
1511 1511 except (StopIteration):
1512 1512 stopiteration = True
1513 1513 break
1514 1514 for rev in sorted(nrevs):
1515 1515 fns = fncache.get(rev)
1516 1516 ctx = change(rev)
1517 1517 if not fns:
1518 1518 def fns_generator():
1519 1519 for f in ctx.files():
1520 1520 if match(f):
1521 1521 yield f
1522 1522 fns = fns_generator()
1523 1523 prepare(ctx, fns)
1524 1524 for rev in nrevs:
1525 1525 yield change(rev)
1526 1526
1527 1527 if stopiteration:
1528 1528 break
1529 1529
1530 1530 return iterate()
1531 1531
1532 1532 def _makefollowlogfilematcher(repo, files, followfirst):
1533 1533 # When displaying a revision with --patch --follow FILE, we have
1534 1534 # to know which file of the revision must be diffed. With
1535 1535 # --follow, we want the names of the ancestors of FILE in the
1536 1536 # revision, stored in "fcache". "fcache" is populated by
1537 1537 # reproducing the graph traversal already done by --follow revset
1538 1538 # and relating linkrevs to file names (which is not "correct" but
1539 1539 # good enough).
1540 1540 fcache = {}
1541 1541 fcacheready = [False]
1542 1542 pctx = repo['.']
1543 1543
1544 1544 def populate():
1545 1545 for fn in files:
1546 1546 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1547 1547 for c in i:
1548 1548 fcache.setdefault(c.linkrev(), set()).add(c.path())
1549 1549
1550 1550 def filematcher(rev):
1551 1551 if not fcacheready[0]:
1552 1552 # Lazy initialization
1553 1553 fcacheready[0] = True
1554 1554 populate()
1555 1555 return scmutil.matchfiles(repo, fcache.get(rev, []))
1556 1556
1557 1557 return filematcher
1558 1558
1559 1559 def _makenofollowlogfilematcher(repo, pats, opts):
1560 1560 '''hook for extensions to override the filematcher for non-follow cases'''
1561 1561 return None
1562 1562
1563 1563 def _makelogrevset(repo, pats, opts, revs):
1564 1564 """Return (expr, filematcher) where expr is a revset string built
1565 1565 from log options and file patterns or None. If --stat or --patch
1566 1566 are not passed filematcher is None. Otherwise it is a callable
1567 1567 taking a revision number and returning a match objects filtering
1568 1568 the files to be detailed when displaying the revision.
1569 1569 """
1570 1570 opt2revset = {
1571 1571 'no_merges': ('not merge()', None),
1572 1572 'only_merges': ('merge()', None),
1573 1573 '_ancestors': ('ancestors(%(val)s)', None),
1574 1574 '_fancestors': ('_firstancestors(%(val)s)', None),
1575 1575 '_descendants': ('descendants(%(val)s)', None),
1576 1576 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1577 1577 '_matchfiles': ('_matchfiles(%(val)s)', None),
1578 1578 'date': ('date(%(val)r)', None),
1579 1579 'branch': ('branch(%(val)r)', ' or '),
1580 1580 '_patslog': ('filelog(%(val)r)', ' or '),
1581 1581 '_patsfollow': ('follow(%(val)r)', ' or '),
1582 1582 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1583 1583 'keyword': ('keyword(%(val)r)', ' or '),
1584 1584 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1585 1585 'user': ('user(%(val)r)', ' or '),
1586 1586 }
1587 1587
1588 1588 opts = dict(opts)
1589 1589 # follow or not follow?
1590 1590 follow = opts.get('follow') or opts.get('follow_first')
1591 1591 followfirst = opts.get('follow_first') and 1 or 0
1592 1592 # --follow with FILE behaviour depends on revs...
1593 1593 it = iter(revs)
1594 1594 startrev = it.next()
1595 1595 try:
1596 1596 followdescendants = startrev < it.next()
1597 1597 except (StopIteration):
1598 1598 followdescendants = False
1599 1599
1600 1600 # branch and only_branch are really aliases and must be handled at
1601 1601 # the same time
1602 1602 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1603 1603 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1604 1604 # pats/include/exclude are passed to match.match() directly in
1605 1605 # _matchfiles() revset but walkchangerevs() builds its matcher with
1606 1606 # scmutil.match(). The difference is input pats are globbed on
1607 1607 # platforms without shell expansion (windows).
1608 1608 pctx = repo[None]
1609 1609 match, pats = scmutil.matchandpats(pctx, pats, opts)
1610 1610 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1611 1611 if not slowpath:
1612 1612 for f in match.files():
1613 1613 if follow and f not in pctx:
1614 1614 # If the file exists, it may be a directory, so let it
1615 1615 # take the slow path.
1616 1616 if os.path.exists(repo.wjoin(f)):
1617 1617 slowpath = True
1618 1618 continue
1619 1619 else:
1620 1620 raise util.Abort(_('cannot follow file not in parent '
1621 1621 'revision: "%s"') % f)
1622 1622 filelog = repo.file(f)
1623 1623 if not filelog:
1624 1624 # A zero count may be a directory or deleted file, so
1625 1625 # try to find matching entries on the slow path.
1626 1626 if follow:
1627 1627 raise util.Abort(
1628 1628 _('cannot follow nonexistent file: "%s"') % f)
1629 1629 slowpath = True
1630 1630
1631 1631 # We decided to fall back to the slowpath because at least one
1632 1632 # of the paths was not a file. Check to see if at least one of them
1633 1633 # existed in history - in that case, we'll continue down the
1634 1634 # slowpath; otherwise, we can turn off the slowpath
1635 1635 if slowpath:
1636 1636 for path in match.files():
1637 1637 if path == '.' or path in repo.store:
1638 1638 break
1639 1639 else:
1640 1640 slowpath = False
1641 1641
1642 1642 if slowpath:
1643 1643 # See walkchangerevs() slow path.
1644 1644 #
1645 1645 # pats/include/exclude cannot be represented as separate
1646 1646 # revset expressions as their filtering logic applies at file
1647 1647 # level. For instance "-I a -X a" matches a revision touching
1648 1648 # "a" and "b" while "file(a) and not file(b)" does
1649 1649 # not. Besides, filesets are evaluated against the working
1650 1650 # directory.
1651 1651 matchargs = ['r:', 'd:relpath']
1652 1652 for p in pats:
1653 1653 matchargs.append('p:' + p)
1654 1654 for p in opts.get('include', []):
1655 1655 matchargs.append('i:' + p)
1656 1656 for p in opts.get('exclude', []):
1657 1657 matchargs.append('x:' + p)
1658 1658 matchargs = ','.join(('%r' % p) for p in matchargs)
1659 1659 opts['_matchfiles'] = matchargs
1660 1660 else:
1661 1661 if follow:
1662 1662 fpats = ('_patsfollow', '_patsfollowfirst')
1663 1663 fnopats = (('_ancestors', '_fancestors'),
1664 1664 ('_descendants', '_fdescendants'))
1665 1665 if pats:
1666 1666 # follow() revset interprets its file argument as a
1667 1667 # manifest entry, so use match.files(), not pats.
1668 1668 opts[fpats[followfirst]] = list(match.files())
1669 1669 else:
1670 1670 opts[fnopats[followdescendants][followfirst]] = str(startrev)
1671 1671 else:
1672 1672 opts['_patslog'] = list(pats)
1673 1673
1674 1674 filematcher = None
1675 1675 if opts.get('patch') or opts.get('stat'):
1676 1676 # When following files, track renames via a special matcher.
1677 1677 # If we're forced to take the slowpath it means we're following
1678 1678 # at least one pattern/directory, so don't bother with rename tracking.
1679 1679 if follow and not match.always() and not slowpath:
1680 1680 # _makelogfilematcher expects its files argument to be relative to
1681 1681 # the repo root, so use match.files(), not pats.
1682 1682 filematcher = _makefollowlogfilematcher(repo, match.files(),
1683 1683 followfirst)
1684 1684 else:
1685 1685 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
1686 1686 if filematcher is None:
1687 1687 filematcher = lambda rev: match
1688 1688
1689 1689 expr = []
1690 1690 for op, val in opts.iteritems():
1691 1691 if not val:
1692 1692 continue
1693 1693 if op not in opt2revset:
1694 1694 continue
1695 1695 revop, andor = opt2revset[op]
1696 1696 if '%(val)' not in revop:
1697 1697 expr.append(revop)
1698 1698 else:
1699 1699 if not isinstance(val, list):
1700 1700 e = revop % {'val': val}
1701 1701 else:
1702 1702 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
1703 1703 expr.append(e)
1704 1704
1705 1705 if expr:
1706 1706 expr = '(' + ' and '.join(expr) + ')'
1707 1707 else:
1708 1708 expr = None
1709 1709 return expr, filematcher
1710 1710
1711 1711 def getgraphlogrevs(repo, pats, opts):
1712 1712 """Return (revs, expr, filematcher) where revs is an iterable of
1713 1713 revision numbers, expr is a revset string built from log options
1714 1714 and file patterns or None, and used to filter 'revs'. If --stat or
1715 1715 --patch are not passed filematcher is None. Otherwise it is a
1716 1716 callable taking a revision number and returning a match objects
1717 1717 filtering the files to be detailed when displaying the revision.
1718 1718 """
1719 1719 if not len(repo):
1720 1720 return [], None, None
1721 1721 limit = loglimit(opts)
1722 1722 # Default --rev value depends on --follow but --follow behaviour
1723 1723 # depends on revisions resolved from --rev...
1724 1724 follow = opts.get('follow') or opts.get('follow_first')
1725 1725 possiblyunsorted = False # whether revs might need sorting
1726 1726 if opts.get('rev'):
1727 1727 revs = scmutil.revrange(repo, opts['rev'])
1728 1728 # Don't sort here because _makelogrevset might depend on the
1729 1729 # order of revs
1730 1730 possiblyunsorted = True
1731 1731 else:
1732 1732 if follow and len(repo) > 0:
1733 1733 revs = repo.revs('reverse(:.)')
1734 1734 else:
1735 1735 revs = revset.spanset(repo)
1736 1736 revs.reverse()
1737 1737 if not revs:
1738 1738 return revset.baseset(), None, None
1739 1739 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
1740 1740 if possiblyunsorted:
1741 1741 revs.sort(reverse=True)
1742 1742 if expr:
1743 1743 # Revset matchers often operate faster on revisions in changelog
1744 1744 # order, because most filters deal with the changelog.
1745 1745 revs.reverse()
1746 1746 matcher = revset.match(repo.ui, expr)
1747 1747 # Revset matches can reorder revisions. "A or B" typically returns
1748 1748 # returns the revision matching A then the revision matching B. Sort
1749 1749 # again to fix that.
1750 1750 revs = matcher(repo, revs)
1751 1751 revs.sort(reverse=True)
1752 1752 if limit is not None:
1753 1753 limitedrevs = revset.baseset()
1754 1754 for idx, rev in enumerate(revs):
1755 1755 if idx >= limit:
1756 1756 break
1757 1757 limitedrevs.append(rev)
1758 1758 revs = limitedrevs
1759 1759
1760 1760 return revs, expr, filematcher
1761 1761
1762 1762 def getlogrevs(repo, pats, opts):
1763 1763 """Return (revs, expr, filematcher) where revs is an iterable of
1764 1764 revision numbers, expr is a revset string built from log options
1765 1765 and file patterns or None, and used to filter 'revs'. If --stat or
1766 1766 --patch are not passed filematcher is None. Otherwise it is a
1767 1767 callable taking a revision number and returning a match objects
1768 1768 filtering the files to be detailed when displaying the revision.
1769 1769 """
1770 1770 limit = loglimit(opts)
1771 1771 # Default --rev value depends on --follow but --follow behaviour
1772 1772 # depends on revisions resolved from --rev...
1773 1773 follow = opts.get('follow') or opts.get('follow_first')
1774 1774 if opts.get('rev'):
1775 1775 revs = scmutil.revrange(repo, opts['rev'])
1776 1776 elif follow:
1777 1777 revs = repo.revs('reverse(:.)')
1778 1778 else:
1779 1779 revs = revset.spanset(repo)
1780 1780 revs.reverse()
1781 1781 if not revs:
1782 1782 return revset.baseset([]), None, None
1783 1783 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
1784 1784 if expr:
1785 1785 # Revset matchers often operate faster on revisions in changelog
1786 1786 # order, because most filters deal with the changelog.
1787 1787 if not opts.get('rev'):
1788 1788 revs.reverse()
1789 1789 matcher = revset.match(repo.ui, expr)
1790 1790 # Revset matches can reorder revisions. "A or B" typically returns
1791 1791 # returns the revision matching A then the revision matching B. Sort
1792 1792 # again to fix that.
1793 1793 revs = matcher(repo, revs)
1794 1794 if not opts.get('rev'):
1795 1795 revs.sort(reverse=True)
1796 1796 if limit is not None:
1797 1797 count = 0
1798 1798 limitedrevs = revset.baseset([])
1799 1799 it = iter(revs)
1800 1800 while count < limit:
1801 1801 try:
1802 1802 limitedrevs.append(it.next())
1803 1803 except (StopIteration):
1804 1804 break
1805 1805 count += 1
1806 1806 revs = limitedrevs
1807 1807
1808 1808 return revs, expr, filematcher
1809 1809
1810 1810 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
1811 1811 filematcher=None):
1812 1812 seen, state = [], graphmod.asciistate()
1813 1813 for rev, type, ctx, parents in dag:
1814 1814 char = 'o'
1815 1815 if ctx.node() in showparents:
1816 1816 char = '@'
1817 1817 elif ctx.obsolete():
1818 1818 char = 'x'
1819 1819 copies = None
1820 1820 if getrenamed and ctx.rev():
1821 1821 copies = []
1822 1822 for fn in ctx.files():
1823 1823 rename = getrenamed(fn, ctx.rev())
1824 1824 if rename:
1825 1825 copies.append((fn, rename[0]))
1826 1826 revmatchfn = None
1827 1827 if filematcher is not None:
1828 1828 revmatchfn = filematcher(ctx.rev())
1829 1829 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
1830 1830 lines = displayer.hunk.pop(rev).split('\n')
1831 1831 if not lines[-1]:
1832 1832 del lines[-1]
1833 1833 displayer.flush(rev)
1834 1834 edges = edgefn(type, char, lines, seen, rev, parents)
1835 1835 for type, char, lines, coldata in edges:
1836 1836 graphmod.ascii(ui, state, type, char, lines, coldata)
1837 1837 displayer.close()
1838 1838
1839 1839 def graphlog(ui, repo, *pats, **opts):
1840 1840 # Parameters are identical to log command ones
1841 1841 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
1842 1842 revdag = graphmod.dagwalker(repo, revs)
1843 1843
1844 1844 getrenamed = None
1845 1845 if opts.get('copies'):
1846 1846 endrev = None
1847 1847 if opts.get('rev'):
1848 1848 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
1849 1849 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
1850 1850 displayer = show_changeset(ui, repo, opts, buffered=True)
1851 1851 showparents = [ctx.node() for ctx in repo[None].parents()]
1852 1852 displaygraph(ui, revdag, displayer, showparents,
1853 1853 graphmod.asciiedges, getrenamed, filematcher)
1854 1854
1855 1855 def checkunsupportedgraphflags(pats, opts):
1856 1856 for op in ["newest_first"]:
1857 1857 if op in opts and opts[op]:
1858 1858 raise util.Abort(_("-G/--graph option is incompatible with --%s")
1859 1859 % op.replace("_", "-"))
1860 1860
1861 1861 def graphrevs(repo, nodes, opts):
1862 1862 limit = loglimit(opts)
1863 1863 nodes.reverse()
1864 1864 if limit is not None:
1865 1865 nodes = nodes[:limit]
1866 1866 return graphmod.nodes(repo, nodes)
1867 1867
1868 1868 def add(ui, repo, match, dryrun, listsubrepos, prefix, explicitonly):
1869 1869 join = lambda f: os.path.join(prefix, f)
1870 1870 bad = []
1871 1871 oldbad = match.bad
1872 1872 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1873 1873 names = []
1874 1874 wctx = repo[None]
1875 1875 cca = None
1876 1876 abort, warn = scmutil.checkportabilityalert(ui)
1877 1877 if abort or warn:
1878 1878 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
1879 1879 for f in repo.walk(match):
1880 1880 exact = match.exact(f)
1881 1881 if exact or not explicitonly and f not in repo.dirstate:
1882 1882 if cca:
1883 1883 cca(f)
1884 1884 names.append(f)
1885 1885 if ui.verbose or not exact:
1886 1886 ui.status(_('adding %s\n') % match.rel(join(f)))
1887 1887
1888 1888 for subpath in sorted(wctx.substate):
1889 1889 sub = wctx.sub(subpath)
1890 1890 try:
1891 1891 submatch = matchmod.narrowmatcher(subpath, match)
1892 1892 if listsubrepos:
1893 1893 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1894 1894 False))
1895 1895 else:
1896 1896 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1897 1897 True))
1898 1898 except error.LookupError:
1899 1899 ui.status(_("skipping missing subrepository: %s\n")
1900 1900 % join(subpath))
1901 1901
1902 1902 if not dryrun:
1903 1903 rejected = wctx.add(names, prefix)
1904 1904 bad.extend(f for f in rejected if f in match.files())
1905 1905 return bad
1906 1906
1907 1907 def forget(ui, repo, match, prefix, explicitonly):
1908 1908 join = lambda f: os.path.join(prefix, f)
1909 1909 bad = []
1910 1910 oldbad = match.bad
1911 1911 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1912 1912 wctx = repo[None]
1913 1913 forgot = []
1914 1914 s = repo.status(match=match, clean=True)
1915 1915 forget = sorted(s[0] + s[1] + s[3] + s[6])
1916 1916 if explicitonly:
1917 1917 forget = [f for f in forget if match.exact(f)]
1918 1918
1919 1919 for subpath in sorted(wctx.substate):
1920 1920 sub = wctx.sub(subpath)
1921 1921 try:
1922 1922 submatch = matchmod.narrowmatcher(subpath, match)
1923 1923 subbad, subforgot = sub.forget(ui, submatch, prefix)
1924 1924 bad.extend([subpath + '/' + f for f in subbad])
1925 1925 forgot.extend([subpath + '/' + f for f in subforgot])
1926 1926 except error.LookupError:
1927 1927 ui.status(_("skipping missing subrepository: %s\n")
1928 1928 % join(subpath))
1929 1929
1930 1930 if not explicitonly:
1931 1931 for f in match.files():
1932 1932 if f not in repo.dirstate and not os.path.isdir(match.rel(join(f))):
1933 1933 if f not in forgot:
1934 1934 if os.path.exists(match.rel(join(f))):
1935 1935 ui.warn(_('not removing %s: '
1936 1936 'file is already untracked\n')
1937 1937 % match.rel(join(f)))
1938 1938 bad.append(f)
1939 1939
1940 1940 for f in forget:
1941 1941 if ui.verbose or not match.exact(f):
1942 1942 ui.status(_('removing %s\n') % match.rel(join(f)))
1943 1943
1944 1944 rejected = wctx.forget(forget, prefix)
1945 1945 bad.extend(f for f in rejected if f in match.files())
1946 1946 forgot.extend(forget)
1947 1947 return bad, forgot
1948 1948
1949 1949 def cat(ui, repo, ctx, matcher, prefix, **opts):
1950 1950 err = 1
1951 1951
1952 1952 def write(path):
1953 1953 fp = makefileobj(repo, opts.get('output'), ctx.node(),
1954 1954 pathname=os.path.join(prefix, path))
1955 1955 data = ctx[path].data()
1956 1956 if opts.get('decode'):
1957 1957 data = repo.wwritedata(path, data)
1958 1958 fp.write(data)
1959 1959 fp.close()
1960 1960
1961 1961 # Automation often uses hg cat on single files, so special case it
1962 1962 # for performance to avoid the cost of parsing the manifest.
1963 1963 if len(matcher.files()) == 1 and not matcher.anypats():
1964 1964 file = matcher.files()[0]
1965 1965 mf = repo.manifest
1966 1966 mfnode = ctx._changeset[0]
1967 1967 if mf.find(mfnode, file)[0]:
1968 1968 write(file)
1969 1969 return 0
1970 1970
1971 1971 # Don't warn about "missing" files that are really in subrepos
1972 1972 bad = matcher.bad
1973 1973
1974 1974 def badfn(path, msg):
1975 1975 for subpath in ctx.substate:
1976 1976 if path.startswith(subpath):
1977 1977 return
1978 1978 bad(path, msg)
1979 1979
1980 1980 matcher.bad = badfn
1981 1981
1982 1982 for abs in ctx.walk(matcher):
1983 1983 write(abs)
1984 1984 err = 0
1985 1985
1986 1986 matcher.bad = bad
1987 1987
1988 1988 for subpath in sorted(ctx.substate):
1989 1989 sub = ctx.sub(subpath)
1990 1990 try:
1991 1991 submatch = matchmod.narrowmatcher(subpath, matcher)
1992 1992
1993 1993 if not sub.cat(ui, submatch, os.path.join(prefix, sub._path),
1994 1994 **opts):
1995 1995 err = 0
1996 1996 except error.RepoLookupError:
1997 1997 ui.status(_("skipping missing subrepository: %s\n")
1998 1998 % os.path.join(prefix, subpath))
1999 1999
2000 2000 return err
2001 2001
2002 2002 def duplicatecopies(repo, rev, fromrev, skiprev=None):
2003 2003 '''reproduce copies from fromrev to rev in the dirstate
2004 2004
2005 2005 If skiprev is specified, it's a revision that should be used to
2006 2006 filter copy records. Any copies that occur between fromrev and
2007 2007 skiprev will not be duplicated, even if they appear in the set of
2008 2008 copies between fromrev and rev.
2009 2009 '''
2010 2010 exclude = {}
2011 2011 if skiprev is not None:
2012 2012 exclude = copies.pathcopies(repo[fromrev], repo[skiprev])
2013 2013 for dst, src in copies.pathcopies(repo[fromrev], repo[rev]).iteritems():
2014 2014 # copies.pathcopies returns backward renames, so dst might not
2015 2015 # actually be in the dirstate
2016 2016 if dst in exclude:
2017 2017 continue
2018 2018 if repo.dirstate[dst] in "nma":
2019 2019 repo.dirstate.copy(src, dst)
2020 2020
2021 2021 def commit(ui, repo, commitfunc, pats, opts):
2022 2022 '''commit the specified files or all outstanding changes'''
2023 2023 date = opts.get('date')
2024 2024 if date:
2025 2025 opts['date'] = util.parsedate(date)
2026 2026 message = logmessage(ui, opts)
2027 2027
2028 2028 # extract addremove carefully -- this function can be called from a command
2029 2029 # that doesn't support addremove
2030 2030 if opts.get('addremove'):
2031 2031 scmutil.addremove(repo, pats, opts)
2032 2032
2033 2033 return commitfunc(ui, repo, message,
2034 2034 scmutil.match(repo[None], pats, opts), opts)
2035 2035
2036 2036 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2037 2037 ui.note(_('amending changeset %s\n') % old)
2038 2038 base = old.p1()
2039 2039
2040 2040 wlock = lock = newid = None
2041 2041 try:
2042 2042 wlock = repo.wlock()
2043 2043 lock = repo.lock()
2044 2044 tr = repo.transaction('amend')
2045 2045 try:
2046 2046 # See if we got a message from -m or -l, if not, open the editor
2047 2047 # with the message of the changeset to amend
2048 2048 message = logmessage(ui, opts)
2049 2049 # ensure logfile does not conflict with later enforcement of the
2050 2050 # message. potential logfile content has been processed by
2051 2051 # `logmessage` anyway.
2052 2052 opts.pop('logfile')
2053 2053 # First, do a regular commit to record all changes in the working
2054 2054 # directory (if there are any)
2055 2055 ui.callhooks = False
2056 2056 currentbookmark = repo._bookmarkcurrent
2057 2057 try:
2058 2058 repo._bookmarkcurrent = None
2059 2059 opts['message'] = 'temporary amend commit for %s' % old
2060 2060 node = commit(ui, repo, commitfunc, pats, opts)
2061 2061 finally:
2062 2062 repo._bookmarkcurrent = currentbookmark
2063 2063 ui.callhooks = True
2064 2064 ctx = repo[node]
2065 2065
2066 2066 # Participating changesets:
2067 2067 #
2068 2068 # node/ctx o - new (intermediate) commit that contains changes
2069 2069 # | from working dir to go into amending commit
2070 2070 # | (or a workingctx if there were no changes)
2071 2071 # |
2072 2072 # old o - changeset to amend
2073 2073 # |
2074 2074 # base o - parent of amending changeset
2075 2075
2076 2076 # Update extra dict from amended commit (e.g. to preserve graft
2077 2077 # source)
2078 2078 extra.update(old.extra())
2079 2079
2080 2080 # Also update it from the intermediate commit or from the wctx
2081 2081 extra.update(ctx.extra())
2082 2082
2083 2083 if len(old.parents()) > 1:
2084 2084 # ctx.files() isn't reliable for merges, so fall back to the
2085 2085 # slower repo.status() method
2086 2086 files = set([fn for st in repo.status(base, old)[:3]
2087 2087 for fn in st])
2088 2088 else:
2089 2089 files = set(old.files())
2090 2090
2091 2091 # Second, we use either the commit we just did, or if there were no
2092 2092 # changes the parent of the working directory as the version of the
2093 2093 # files in the final amend commit
2094 2094 if node:
2095 2095 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2096 2096
2097 2097 user = ctx.user()
2098 2098 date = ctx.date()
2099 2099 # Recompute copies (avoid recording a -> b -> a)
2100 2100 copied = copies.pathcopies(base, ctx)
2101 2101
2102 2102 # Prune files which were reverted by the updates: if old
2103 2103 # introduced file X and our intermediate commit, node,
2104 2104 # renamed that file, then those two files are the same and
2105 2105 # we can discard X from our list of files. Likewise if X
2106 2106 # was deleted, it's no longer relevant
2107 2107 files.update(ctx.files())
2108 2108
2109 2109 def samefile(f):
2110 2110 if f in ctx.manifest():
2111 2111 a = ctx.filectx(f)
2112 2112 if f in base.manifest():
2113 2113 b = base.filectx(f)
2114 2114 return (not a.cmp(b)
2115 2115 and a.flags() == b.flags())
2116 2116 else:
2117 2117 return False
2118 2118 else:
2119 2119 return f not in base.manifest()
2120 2120 files = [f for f in files if not samefile(f)]
2121 2121
2122 2122 def filectxfn(repo, ctx_, path):
2123 2123 try:
2124 2124 fctx = ctx[path]
2125 2125 flags = fctx.flags()
2126 2126 mctx = context.memfilectx(repo,
2127 2127 fctx.path(), fctx.data(),
2128 2128 islink='l' in flags,
2129 2129 isexec='x' in flags,
2130 2130 copied=copied.get(path))
2131 2131 return mctx
2132 2132 except KeyError:
2133 raise IOError
2133 return None
2134 2134 else:
2135 2135 ui.note(_('copying changeset %s to %s\n') % (old, base))
2136 2136
2137 2137 # Use version of files as in the old cset
2138 2138 def filectxfn(repo, ctx_, path):
2139 2139 try:
2140 2140 return old.filectx(path)
2141 2141 except KeyError:
2142 raise IOError
2142 return None
2143 2143
2144 2144 user = opts.get('user') or old.user()
2145 2145 date = opts.get('date') or old.date()
2146 2146 editform = mergeeditform(old, 'commit.amend')
2147 2147 editor = getcommiteditor(editform=editform, **opts)
2148 2148 if not message:
2149 2149 editor = getcommiteditor(edit=True, editform=editform)
2150 2150 message = old.description()
2151 2151
2152 2152 pureextra = extra.copy()
2153 2153 extra['amend_source'] = old.hex()
2154 2154
2155 2155 new = context.memctx(repo,
2156 2156 parents=[base.node(), old.p2().node()],
2157 2157 text=message,
2158 2158 files=files,
2159 2159 filectxfn=filectxfn,
2160 2160 user=user,
2161 2161 date=date,
2162 2162 extra=extra,
2163 2163 editor=editor)
2164 2164
2165 2165 newdesc = changelog.stripdesc(new.description())
2166 2166 if ((not node)
2167 2167 and newdesc == old.description()
2168 2168 and user == old.user()
2169 2169 and date == old.date()
2170 2170 and pureextra == old.extra()):
2171 2171 # nothing changed. continuing here would create a new node
2172 2172 # anyway because of the amend_source noise.
2173 2173 #
2174 2174 # This not what we expect from amend.
2175 2175 return old.node()
2176 2176
2177 2177 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2178 2178 try:
2179 2179 if opts.get('secret'):
2180 2180 commitphase = 'secret'
2181 2181 else:
2182 2182 commitphase = old.phase()
2183 2183 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2184 2184 newid = repo.commitctx(new)
2185 2185 finally:
2186 2186 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2187 2187 if newid != old.node():
2188 2188 # Reroute the working copy parent to the new changeset
2189 2189 repo.setparents(newid, nullid)
2190 2190
2191 2191 # Move bookmarks from old parent to amend commit
2192 2192 bms = repo.nodebookmarks(old.node())
2193 2193 if bms:
2194 2194 marks = repo._bookmarks
2195 2195 for bm in bms:
2196 2196 marks[bm] = newid
2197 2197 marks.write()
2198 2198 #commit the whole amend process
2199 2199 if obsolete._enabled and newid != old.node():
2200 2200 # mark the new changeset as successor of the rewritten one
2201 2201 new = repo[newid]
2202 2202 obs = [(old, (new,))]
2203 2203 if node:
2204 2204 obs.append((ctx, ()))
2205 2205
2206 2206 obsolete.createmarkers(repo, obs)
2207 2207 tr.close()
2208 2208 finally:
2209 2209 tr.release()
2210 2210 if (not obsolete._enabled) and newid != old.node():
2211 2211 # Strip the intermediate commit (if there was one) and the amended
2212 2212 # commit
2213 2213 if node:
2214 2214 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2215 2215 ui.note(_('stripping amended changeset %s\n') % old)
2216 2216 repair.strip(ui, repo, old.node(), topic='amend-backup')
2217 2217 finally:
2218 2218 if newid is None:
2219 2219 repo.dirstate.invalidate()
2220 2220 lockmod.release(lock, wlock)
2221 2221 return newid
2222 2222
2223 2223 def commiteditor(repo, ctx, subs, editform=''):
2224 2224 if ctx.description():
2225 2225 return ctx.description()
2226 2226 return commitforceeditor(repo, ctx, subs, editform=editform)
2227 2227
2228 2228 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2229 2229 editform=''):
2230 2230 if not extramsg:
2231 2231 extramsg = _("Leave message empty to abort commit.")
2232 2232
2233 2233 forms = [e for e in editform.split('.') if e]
2234 2234 forms.insert(0, 'changeset')
2235 2235 while forms:
2236 2236 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2237 2237 if tmpl:
2238 2238 committext = buildcommittemplate(repo, ctx, subs, extramsg, tmpl)
2239 2239 break
2240 2240 forms.pop()
2241 2241 else:
2242 2242 committext = buildcommittext(repo, ctx, subs, extramsg)
2243 2243
2244 2244 # run editor in the repository root
2245 2245 olddir = os.getcwd()
2246 2246 os.chdir(repo.root)
2247 2247 text = repo.ui.edit(committext, ctx.user(), ctx.extra(), editform=editform)
2248 2248 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2249 2249 os.chdir(olddir)
2250 2250
2251 2251 if finishdesc:
2252 2252 text = finishdesc(text)
2253 2253 if not text.strip():
2254 2254 raise util.Abort(_("empty commit message"))
2255 2255
2256 2256 return text
2257 2257
2258 2258 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2259 2259 ui = repo.ui
2260 2260 tmpl, mapfile = gettemplate(ui, tmpl, None)
2261 2261
2262 2262 try:
2263 2263 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2264 2264 except SyntaxError, inst:
2265 2265 raise util.Abort(inst.args[0])
2266 2266
2267 2267 for k, v in repo.ui.configitems('committemplate'):
2268 2268 if k != 'changeset':
2269 2269 t.t.cache[k] = v
2270 2270
2271 2271 if not extramsg:
2272 2272 extramsg = '' # ensure that extramsg is string
2273 2273
2274 2274 ui.pushbuffer()
2275 2275 t.show(ctx, extramsg=extramsg)
2276 2276 return ui.popbuffer()
2277 2277
2278 2278 def buildcommittext(repo, ctx, subs, extramsg):
2279 2279 edittext = []
2280 2280 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2281 2281 if ctx.description():
2282 2282 edittext.append(ctx.description())
2283 2283 edittext.append("")
2284 2284 edittext.append("") # Empty line between message and comments.
2285 2285 edittext.append(_("HG: Enter commit message."
2286 2286 " Lines beginning with 'HG:' are removed."))
2287 2287 edittext.append("HG: %s" % extramsg)
2288 2288 edittext.append("HG: --")
2289 2289 edittext.append(_("HG: user: %s") % ctx.user())
2290 2290 if ctx.p2():
2291 2291 edittext.append(_("HG: branch merge"))
2292 2292 if ctx.branch():
2293 2293 edittext.append(_("HG: branch '%s'") % ctx.branch())
2294 2294 if bookmarks.iscurrent(repo):
2295 2295 edittext.append(_("HG: bookmark '%s'") % repo._bookmarkcurrent)
2296 2296 edittext.extend([_("HG: subrepo %s") % s for s in subs])
2297 2297 edittext.extend([_("HG: added %s") % f for f in added])
2298 2298 edittext.extend([_("HG: changed %s") % f for f in modified])
2299 2299 edittext.extend([_("HG: removed %s") % f for f in removed])
2300 2300 if not added and not modified and not removed:
2301 2301 edittext.append(_("HG: no files changed"))
2302 2302 edittext.append("")
2303 2303
2304 2304 return "\n".join(edittext)
2305 2305
2306 2306 def commitstatus(repo, node, branch, bheads=None, opts={}):
2307 2307 ctx = repo[node]
2308 2308 parents = ctx.parents()
2309 2309
2310 2310 if (not opts.get('amend') and bheads and node not in bheads and not
2311 2311 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2312 2312 repo.ui.status(_('created new head\n'))
2313 2313 # The message is not printed for initial roots. For the other
2314 2314 # changesets, it is printed in the following situations:
2315 2315 #
2316 2316 # Par column: for the 2 parents with ...
2317 2317 # N: null or no parent
2318 2318 # B: parent is on another named branch
2319 2319 # C: parent is a regular non head changeset
2320 2320 # H: parent was a branch head of the current branch
2321 2321 # Msg column: whether we print "created new head" message
2322 2322 # In the following, it is assumed that there already exists some
2323 2323 # initial branch heads of the current branch, otherwise nothing is
2324 2324 # printed anyway.
2325 2325 #
2326 2326 # Par Msg Comment
2327 2327 # N N y additional topo root
2328 2328 #
2329 2329 # B N y additional branch root
2330 2330 # C N y additional topo head
2331 2331 # H N n usual case
2332 2332 #
2333 2333 # B B y weird additional branch root
2334 2334 # C B y branch merge
2335 2335 # H B n merge with named branch
2336 2336 #
2337 2337 # C C y additional head from merge
2338 2338 # C H n merge with a head
2339 2339 #
2340 2340 # H H n head merge: head count decreases
2341 2341
2342 2342 if not opts.get('close_branch'):
2343 2343 for r in parents:
2344 2344 if r.closesbranch() and r.branch() == branch:
2345 2345 repo.ui.status(_('reopening closed branch head %d\n') % r)
2346 2346
2347 2347 if repo.ui.debugflag:
2348 2348 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2349 2349 elif repo.ui.verbose:
2350 2350 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2351 2351
2352 2352 def revert(ui, repo, ctx, parents, *pats, **opts):
2353 2353 parent, p2 = parents
2354 2354 node = ctx.node()
2355 2355
2356 2356 mf = ctx.manifest()
2357 2357 if node == p2:
2358 2358 parent = p2
2359 2359 if node == parent:
2360 2360 pmf = mf
2361 2361 else:
2362 2362 pmf = None
2363 2363
2364 2364 # need all matching names in dirstate and manifest of target rev,
2365 2365 # so have to walk both. do not print errors if files exist in one
2366 2366 # but not other.
2367 2367
2368 2368 # `names` is a mapping for all elements in working copy and target revision
2369 2369 # The mapping is in the form:
2370 2370 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2371 2371 names = {}
2372 2372
2373 2373 wlock = repo.wlock()
2374 2374 try:
2375 2375 ## filling of the `names` mapping
2376 2376 # walk dirstate to fill `names`
2377 2377
2378 2378 m = scmutil.match(repo[None], pats, opts)
2379 2379 m.bad = lambda x, y: False
2380 2380 for abs in repo.walk(m):
2381 2381 names[abs] = m.rel(abs), m.exact(abs)
2382 2382
2383 2383 # walk target manifest to fill `names`
2384 2384
2385 2385 def badfn(path, msg):
2386 2386 if path in names:
2387 2387 return
2388 2388 if path in ctx.substate:
2389 2389 return
2390 2390 path_ = path + '/'
2391 2391 for f in names:
2392 2392 if f.startswith(path_):
2393 2393 return
2394 2394 ui.warn("%s: %s\n" % (m.rel(path), msg))
2395 2395
2396 2396 m = scmutil.match(ctx, pats, opts)
2397 2397 m.bad = badfn
2398 2398 for abs in ctx.walk(m):
2399 2399 if abs not in names:
2400 2400 names[abs] = m.rel(abs), m.exact(abs)
2401 2401
2402 2402 # get the list of subrepos that must be reverted
2403 2403 targetsubs = sorted(s for s in ctx.substate if m(s))
2404 2404
2405 2405 # Find status of all file in `names`.
2406 2406 m = scmutil.matchfiles(repo, names)
2407 2407
2408 2408 changes = repo.status(node1=node, match=m,
2409 2409 unknown=True, ignored=True, clean=True)
2410 2410 modified = set(changes[0])
2411 2411 added = set(changes[1])
2412 2412 removed = set(changes[2])
2413 2413 _deleted = set(changes[3])
2414 2414 unknown = set(changes[4])
2415 2415 unknown.update(changes[5])
2416 2416 clean = set(changes[6])
2417 2417
2418 2418 # split between files known in target manifest and the others
2419 2419 smf = set(mf)
2420 2420
2421 2421 # determine the exact nature of the deleted changesets
2422 2422 _deletedadded = _deleted - smf
2423 2423 _deletedmodified = _deleted - _deletedadded
2424 2424 added |= _deletedadded
2425 2425 modified |= _deletedmodified
2426 2426
2427 2427 # We need to account for the state of file in the dirstate
2428 2428 #
2429 2429 # Even, when we revert agains something else than parent. this will
2430 2430 # slightly alter the behavior of revert (doing back up or not, delete
2431 2431 # or just forget etc)
2432 2432 if parent == node:
2433 2433 dsmodified = modified
2434 2434 dsadded = added
2435 2435 dsremoved = removed
2436 2436 modified, added, removed = set(), set(), set()
2437 2437 else:
2438 2438 changes = repo.status(node1=parent, match=m)
2439 2439 dsmodified = set(changes[0])
2440 2440 dsadded = set(changes[1])
2441 2441 dsremoved = set(changes[2])
2442 2442
2443 2443 # only take into account for removes between wc and target
2444 2444 clean |= dsremoved - removed
2445 2445 dsremoved &= removed
2446 2446 # distinct between dirstate remove and other
2447 2447 removed -= dsremoved
2448 2448
2449 2449 # tell newly modified apart.
2450 2450 dsmodified &= modified
2451 2451 dsmodified |= modified & dsadded # dirstate added may needs backup
2452 2452 modified -= dsmodified
2453 2453
2454 2454 # There are three categories of added files
2455 2455 #
2456 2456 # 1. addition that just happened in the dirstate
2457 2457 # (should be forgotten)
2458 2458 # 2. file is added since target revision and has local changes
2459 2459 # (should be backed up and removed)
2460 2460 # 3. file is added since target revision and is clean
2461 2461 # (should be removed)
2462 2462 #
2463 2463 # However we do not need to split them yet. The current revert code
2464 2464 # will automatically recognize (1) when performing operation. And
2465 2465 # the backup system is currently unabled to handle (2).
2466 2466 #
2467 2467 # So we just put them all in the same group.
2468 2468 dsadded = added
2469 2469
2470 2470 # in case of merge, files that are actually added can be reported as
2471 2471 # modified, we need to post process the result
2472 2472 if p2 != nullid:
2473 2473 if pmf is None:
2474 2474 # only need parent manifest in the merge case,
2475 2475 # so do not read by default
2476 2476 pmf = repo[parent].manifest()
2477 2477 mergeadd = dsmodified - set(pmf)
2478 2478 dsadded |= mergeadd
2479 2479 dsmodified -= mergeadd
2480 2480
2481 2481 # if f is a rename, update `names` to also revert the source
2482 2482 cwd = repo.getcwd()
2483 2483 for f in dsadded:
2484 2484 src = repo.dirstate.copied(f)
2485 2485 # XXX should we check for rename down to target node?
2486 2486 if src and src not in names and repo.dirstate[src] == 'r':
2487 2487 dsremoved.add(src)
2488 2488 names[src] = (repo.pathto(src, cwd), True)
2489 2489
2490 2490 ## computation of the action to performs on `names` content.
2491 2491
2492 2492 def removeforget(abs):
2493 2493 if repo.dirstate[abs] == 'a':
2494 2494 return _('forgetting %s\n')
2495 2495 return _('removing %s\n')
2496 2496
2497 2497 # action to be actually performed by revert
2498 2498 # (<list of file>, message>) tuple
2499 2499 actions = {'revert': ([], _('reverting %s\n')),
2500 2500 'add': ([], _('adding %s\n')),
2501 2501 'remove': ([], removeforget),
2502 2502 'undelete': ([], _('undeleting %s\n')),
2503 2503 'noop': (None, _('no changes needed to %s\n')),
2504 2504 'unknown': (None, _('file not managed: %s\n')),
2505 2505 }
2506 2506
2507 2507
2508 2508 # should we do a backup?
2509 2509 backup = not opts.get('no_backup')
2510 2510 discard = False
2511 2511
2512 2512 disptable = (
2513 2513 # dispatch table:
2514 2514 # file state
2515 2515 # action
2516 2516 # make backup
2517 2517 (modified, actions['revert'], discard),
2518 2518 (dsmodified, actions['revert'], backup),
2519 2519 (dsadded, actions['remove'], backup),
2520 2520 (removed, actions['add'], backup),
2521 2521 (dsremoved, actions['undelete'], backup),
2522 2522 (clean, actions['noop'], discard),
2523 2523 (unknown, actions['unknown'], discard),
2524 2524 )
2525 2525
2526 2526 for abs, (rel, exact) in sorted(names.items()):
2527 2527 # target file to be touch on disk (relative to cwd)
2528 2528 target = repo.wjoin(abs)
2529 2529 # search the entry in the dispatch table.
2530 2530 # if the file is in any of these sets, it was touched in the working
2531 2531 # directory parent and we are sure it needs to be reverted.
2532 2532 for table, (xlist, msg), dobackup in disptable:
2533 2533 if abs not in table:
2534 2534 continue
2535 2535 if xlist is not None:
2536 2536 xlist.append(abs)
2537 2537 if (dobackup and os.path.lexists(target) and
2538 2538 abs in ctx and repo[None][abs].cmp(ctx[abs])):
2539 2539 bakname = "%s.orig" % rel
2540 2540 ui.note(_('saving current version of %s as %s\n') %
2541 2541 (rel, bakname))
2542 2542 if not opts.get('dry_run'):
2543 2543 util.rename(target, bakname)
2544 2544 if ui.verbose or not exact:
2545 2545 if not isinstance(msg, basestring):
2546 2546 msg = msg(abs)
2547 2547 ui.status(msg % rel)
2548 2548 elif exact:
2549 2549 ui.warn(msg % rel)
2550 2550 break
2551 2551
2552 2552
2553 2553 if not opts.get('dry_run'):
2554 2554 _performrevert(repo, parents, ctx, actions)
2555 2555
2556 2556 if targetsubs:
2557 2557 # Revert the subrepos on the revert list
2558 2558 for sub in targetsubs:
2559 2559 ctx.sub(sub).revert(ui, ctx.substate[sub], *pats, **opts)
2560 2560 finally:
2561 2561 wlock.release()
2562 2562
2563 2563 def _performrevert(repo, parents, ctx, actions):
2564 2564 """function that actually perform all the actions computed for revert
2565 2565
2566 2566 This is an independent function to let extension to plug in and react to
2567 2567 the imminent revert.
2568 2568
2569 2569 Make sure you have the working directory locked when calling this function.
2570 2570 """
2571 2571 parent, p2 = parents
2572 2572 node = ctx.node()
2573 2573 def checkout(f):
2574 2574 fc = ctx[f]
2575 2575 repo.wwrite(f, fc.data(), fc.flags())
2576 2576
2577 2577 audit_path = pathutil.pathauditor(repo.root)
2578 2578 for f in actions['remove'][0]:
2579 2579 if repo.dirstate[f] == 'a':
2580 2580 repo.dirstate.drop(f)
2581 2581 continue
2582 2582 audit_path(f)
2583 2583 try:
2584 2584 util.unlinkpath(repo.wjoin(f))
2585 2585 except OSError:
2586 2586 pass
2587 2587 repo.dirstate.remove(f)
2588 2588
2589 2589 normal = None
2590 2590 if node == parent:
2591 2591 # We're reverting to our parent. If possible, we'd like status
2592 2592 # to report the file as clean. We have to use normallookup for
2593 2593 # merges to avoid losing information about merged/dirty files.
2594 2594 if p2 != nullid:
2595 2595 normal = repo.dirstate.normallookup
2596 2596 else:
2597 2597 normal = repo.dirstate.normal
2598 2598 for f in actions['revert'][0]:
2599 2599 checkout(f)
2600 2600 if normal:
2601 2601 normal(f)
2602 2602
2603 2603 for f in actions['add'][0]:
2604 2604 checkout(f)
2605 2605 repo.dirstate.add(f)
2606 2606
2607 2607 normal = repo.dirstate.normallookup
2608 2608 if node == parent and p2 == nullid:
2609 2609 normal = repo.dirstate.normal
2610 2610 for f in actions['undelete'][0]:
2611 2611 checkout(f)
2612 2612 normal(f)
2613 2613
2614 2614 copied = copies.pathcopies(repo[parent], ctx)
2615 2615
2616 2616 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
2617 2617 if f in copied:
2618 2618 repo.dirstate.copy(copied[f], f)
2619 2619
2620 2620 def command(table):
2621 2621 """Returns a function object to be used as a decorator for making commands.
2622 2622
2623 2623 This function receives a command table as its argument. The table should
2624 2624 be a dict.
2625 2625
2626 2626 The returned function can be used as a decorator for adding commands
2627 2627 to that command table. This function accepts multiple arguments to define
2628 2628 a command.
2629 2629
2630 2630 The first argument is the command name.
2631 2631
2632 2632 The options argument is an iterable of tuples defining command arguments.
2633 2633 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
2634 2634
2635 2635 The synopsis argument defines a short, one line summary of how to use the
2636 2636 command. This shows up in the help output.
2637 2637
2638 2638 The norepo argument defines whether the command does not require a
2639 2639 local repository. Most commands operate against a repository, thus the
2640 2640 default is False.
2641 2641
2642 2642 The optionalrepo argument defines whether the command optionally requires
2643 2643 a local repository.
2644 2644
2645 2645 The inferrepo argument defines whether to try to find a repository from the
2646 2646 command line arguments. If True, arguments will be examined for potential
2647 2647 repository locations. See ``findrepo()``. If a repository is found, it
2648 2648 will be used.
2649 2649 """
2650 2650 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
2651 2651 inferrepo=False):
2652 2652 def decorator(func):
2653 2653 if synopsis:
2654 2654 table[name] = func, list(options), synopsis
2655 2655 else:
2656 2656 table[name] = func, list(options)
2657 2657
2658 2658 if norepo:
2659 2659 # Avoid import cycle.
2660 2660 import commands
2661 2661 commands.norepo += ' %s' % ' '.join(parsealiases(name))
2662 2662
2663 2663 if optionalrepo:
2664 2664 import commands
2665 2665 commands.optionalrepo += ' %s' % ' '.join(parsealiases(name))
2666 2666
2667 2667 if inferrepo:
2668 2668 import commands
2669 2669 commands.inferrepo += ' %s' % ' '.join(parsealiases(name))
2670 2670
2671 2671 return func
2672 2672 return decorator
2673 2673
2674 2674 return cmd
2675 2675
2676 2676 # a list of (ui, repo, otherpeer, opts, missing) functions called by
2677 2677 # commands.outgoing. "missing" is "missing" of the result of
2678 2678 # "findcommonoutgoing()"
2679 2679 outgoinghooks = util.hooks()
2680 2680
2681 2681 # a list of (ui, repo) functions called by commands.summary
2682 2682 summaryhooks = util.hooks()
2683 2683
2684 2684 # a list of (ui, repo, opts, changes) functions called by commands.summary.
2685 2685 #
2686 2686 # functions should return tuple of booleans below, if 'changes' is None:
2687 2687 # (whether-incomings-are-needed, whether-outgoings-are-needed)
2688 2688 #
2689 2689 # otherwise, 'changes' is a tuple of tuples below:
2690 2690 # - (sourceurl, sourcebranch, sourcepeer, incoming)
2691 2691 # - (desturl, destbranch, destpeer, outgoing)
2692 2692 summaryremotehooks = util.hooks()
2693 2693
2694 2694 # A list of state files kept by multistep operations like graft.
2695 2695 # Since graft cannot be aborted, it is considered 'clearable' by update.
2696 2696 # note: bisect is intentionally excluded
2697 2697 # (state file, clearable, allowcommit, error, hint)
2698 2698 unfinishedstates = [
2699 2699 ('graftstate', True, False, _('graft in progress'),
2700 2700 _("use 'hg graft --continue' or 'hg update' to abort")),
2701 2701 ('updatestate', True, False, _('last update was interrupted'),
2702 2702 _("use 'hg update' to get a consistent checkout"))
2703 2703 ]
2704 2704
2705 2705 def checkunfinished(repo, commit=False):
2706 2706 '''Look for an unfinished multistep operation, like graft, and abort
2707 2707 if found. It's probably good to check this right before
2708 2708 bailifchanged().
2709 2709 '''
2710 2710 for f, clearable, allowcommit, msg, hint in unfinishedstates:
2711 2711 if commit and allowcommit:
2712 2712 continue
2713 2713 if repo.vfs.exists(f):
2714 2714 raise util.Abort(msg, hint=hint)
2715 2715
2716 2716 def clearunfinished(repo):
2717 2717 '''Check for unfinished operations (as above), and clear the ones
2718 2718 that are clearable.
2719 2719 '''
2720 2720 for f, clearable, allowcommit, msg, hint in unfinishedstates:
2721 2721 if not clearable and repo.vfs.exists(f):
2722 2722 raise util.Abort(msg, hint=hint)
2723 2723 for f, clearable, allowcommit, msg, hint in unfinishedstates:
2724 2724 if clearable and repo.vfs.exists(f):
2725 2725 util.unlink(repo.join(f))
@@ -1,1694 +1,1699 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, nullrev, short, hex, bin
9 9 from i18n import _
10 10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 11 import match as matchmod
12 12 import os, errno, stat
13 13 import obsolete as obsmod
14 14 import repoview
15 15 import fileset
16 16 import revlog
17 17
18 18 propertycache = util.propertycache
19 19
20 20 class basectx(object):
21 21 """A basectx object represents the common logic for its children:
22 22 changectx: read-only context that is already present in the repo,
23 23 workingctx: a context that represents the working directory and can
24 24 be committed,
25 25 memctx: a context that represents changes in-memory and can also
26 26 be committed."""
27 27 def __new__(cls, repo, changeid='', *args, **kwargs):
28 28 if isinstance(changeid, basectx):
29 29 return changeid
30 30
31 31 o = super(basectx, cls).__new__(cls)
32 32
33 33 o._repo = repo
34 34 o._rev = nullrev
35 35 o._node = nullid
36 36
37 37 return o
38 38
39 39 def __str__(self):
40 40 return short(self.node())
41 41
42 42 def __int__(self):
43 43 return self.rev()
44 44
45 45 def __repr__(self):
46 46 return "<%s %s>" % (type(self).__name__, str(self))
47 47
48 48 def __eq__(self, other):
49 49 try:
50 50 return type(self) == type(other) and self._rev == other._rev
51 51 except AttributeError:
52 52 return False
53 53
54 54 def __ne__(self, other):
55 55 return not (self == other)
56 56
57 57 def __contains__(self, key):
58 58 return key in self._manifest
59 59
60 60 def __getitem__(self, key):
61 61 return self.filectx(key)
62 62
63 63 def __iter__(self):
64 64 for f in sorted(self._manifest):
65 65 yield f
66 66
67 67 def _manifestmatches(self, match, s):
68 68 """generate a new manifest filtered by the match argument
69 69
70 70 This method is for internal use only and mainly exists to provide an
71 71 object oriented way for other contexts to customize the manifest
72 72 generation.
73 73 """
74 74 if match.always():
75 75 return self.manifest().copy()
76 76
77 77 files = match.files()
78 78 if (match.matchfn == match.exact or
79 79 (not match.anypats() and util.all(fn in self for fn in files))):
80 80 return self.manifest().intersectfiles(files)
81 81
82 82 mf = self.manifest().copy()
83 83 for fn in mf.keys():
84 84 if not match(fn):
85 85 del mf[fn]
86 86 return mf
87 87
88 88 def _matchstatus(self, other, s, match, listignored, listclean,
89 89 listunknown):
90 90 """return match.always if match is none
91 91
92 92 This internal method provides a way for child objects to override the
93 93 match operator.
94 94 """
95 95 return match or matchmod.always(self._repo.root, self._repo.getcwd())
96 96
97 97 def _prestatus(self, other, s, match, listignored, listclean, listunknown):
98 98 """provide a hook to allow child objects to preprocess status results
99 99
100 100 For example, this allows other contexts, such as workingctx, to query
101 101 the dirstate before comparing the manifests.
102 102 """
103 103 # load earliest manifest first for caching reasons
104 104 if self.rev() < other.rev():
105 105 self.manifest()
106 106 return s
107 107
108 108 def _poststatus(self, other, s, match, listignored, listclean, listunknown):
109 109 """provide a hook to allow child objects to postprocess status results
110 110
111 111 For example, this allows other contexts, such as workingctx, to filter
112 112 suspect symlinks in the case of FAT32 and NTFS filesytems.
113 113 """
114 114 return s
115 115
116 116 def _buildstatus(self, other, s, match, listignored, listclean,
117 117 listunknown):
118 118 """build a status with respect to another context"""
119 119 mf1 = other._manifestmatches(match, s)
120 120 mf2 = self._manifestmatches(match, s)
121 121
122 122 modified, added, clean = [], [], []
123 123 deleted, unknown, ignored = s[3], s[4], s[5]
124 124 withflags = mf1.withflags() | mf2.withflags()
125 125 for fn, mf2node in mf2.iteritems():
126 126 if fn in mf1:
127 127 if (fn not in deleted and
128 128 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
129 129 (mf1[fn] != mf2node and
130 130 (mf2node or self[fn].cmp(other[fn]))))):
131 131 modified.append(fn)
132 132 elif listclean:
133 133 clean.append(fn)
134 134 del mf1[fn]
135 135 elif fn not in deleted:
136 136 added.append(fn)
137 137 removed = mf1.keys()
138 138 if removed:
139 139 # need to filter files if they are already reported as removed
140 140 unknown = [fn for fn in unknown if fn not in mf1]
141 141 ignored = [fn for fn in ignored if fn not in mf1]
142 142
143 143 return [modified, added, removed, deleted, unknown, ignored, clean]
144 144
145 145 @propertycache
146 146 def substate(self):
147 147 return subrepo.state(self, self._repo.ui)
148 148
149 149 def subrev(self, subpath):
150 150 return self.substate[subpath][1]
151 151
152 152 def rev(self):
153 153 return self._rev
154 154 def node(self):
155 155 return self._node
156 156 def hex(self):
157 157 return hex(self.node())
158 158 def manifest(self):
159 159 return self._manifest
160 160 def phasestr(self):
161 161 return phases.phasenames[self.phase()]
162 162 def mutable(self):
163 163 return self.phase() > phases.public
164 164
165 165 def getfileset(self, expr):
166 166 return fileset.getfileset(self, expr)
167 167
168 168 def obsolete(self):
169 169 """True if the changeset is obsolete"""
170 170 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
171 171
172 172 def extinct(self):
173 173 """True if the changeset is extinct"""
174 174 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
175 175
176 176 def unstable(self):
177 177 """True if the changeset is not obsolete but it's ancestor are"""
178 178 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
179 179
180 180 def bumped(self):
181 181 """True if the changeset try to be a successor of a public changeset
182 182
183 183 Only non-public and non-obsolete changesets may be bumped.
184 184 """
185 185 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
186 186
187 187 def divergent(self):
188 188 """Is a successors of a changeset with multiple possible successors set
189 189
190 190 Only non-public and non-obsolete changesets may be divergent.
191 191 """
192 192 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
193 193
194 194 def troubled(self):
195 195 """True if the changeset is either unstable, bumped or divergent"""
196 196 return self.unstable() or self.bumped() or self.divergent()
197 197
198 198 def troubles(self):
199 199 """return the list of troubles affecting this changesets.
200 200
201 201 Troubles are returned as strings. possible values are:
202 202 - unstable,
203 203 - bumped,
204 204 - divergent.
205 205 """
206 206 troubles = []
207 207 if self.unstable():
208 208 troubles.append('unstable')
209 209 if self.bumped():
210 210 troubles.append('bumped')
211 211 if self.divergent():
212 212 troubles.append('divergent')
213 213 return troubles
214 214
215 215 def parents(self):
216 216 """return contexts for each parent changeset"""
217 217 return self._parents
218 218
219 219 def p1(self):
220 220 return self._parents[0]
221 221
222 222 def p2(self):
223 223 if len(self._parents) == 2:
224 224 return self._parents[1]
225 225 return changectx(self._repo, -1)
226 226
227 227 def _fileinfo(self, path):
228 228 if '_manifest' in self.__dict__:
229 229 try:
230 230 return self._manifest[path], self._manifest.flags(path)
231 231 except KeyError:
232 232 raise error.ManifestLookupError(self._node, path,
233 233 _('not found in manifest'))
234 234 if '_manifestdelta' in self.__dict__ or path in self.files():
235 235 if path in self._manifestdelta:
236 236 return (self._manifestdelta[path],
237 237 self._manifestdelta.flags(path))
238 238 node, flag = self._repo.manifest.find(self._changeset[0], path)
239 239 if not node:
240 240 raise error.ManifestLookupError(self._node, path,
241 241 _('not found in manifest'))
242 242
243 243 return node, flag
244 244
245 245 def filenode(self, path):
246 246 return self._fileinfo(path)[0]
247 247
248 248 def flags(self, path):
249 249 try:
250 250 return self._fileinfo(path)[1]
251 251 except error.LookupError:
252 252 return ''
253 253
254 254 def sub(self, path):
255 255 return subrepo.subrepo(self, path)
256 256
257 257 def match(self, pats=[], include=None, exclude=None, default='glob'):
258 258 r = self._repo
259 259 return matchmod.match(r.root, r.getcwd(), pats,
260 260 include, exclude, default,
261 261 auditor=r.auditor, ctx=self)
262 262
263 263 def diff(self, ctx2=None, match=None, **opts):
264 264 """Returns a diff generator for the given contexts and matcher"""
265 265 if ctx2 is None:
266 266 ctx2 = self.p1()
267 267 if ctx2 is not None:
268 268 ctx2 = self._repo[ctx2]
269 269 diffopts = patch.diffopts(self._repo.ui, opts)
270 270 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
271 271
272 272 @propertycache
273 273 def _dirs(self):
274 274 return scmutil.dirs(self._manifest)
275 275
276 276 def dirs(self):
277 277 return self._dirs
278 278
279 279 def dirty(self, missing=False, merge=True, branch=True):
280 280 return False
281 281
282 282 def status(self, other=None, match=None, listignored=False,
283 283 listclean=False, listunknown=False, listsubrepos=False):
284 284 """return status of files between two nodes or node and working
285 285 directory.
286 286
287 287 If other is None, compare this node with working directory.
288 288
289 289 returns (modified, added, removed, deleted, unknown, ignored, clean)
290 290 """
291 291
292 292 ctx1 = self
293 293 ctx2 = self._repo[other]
294 294
295 295 # This next code block is, admittedly, fragile logic that tests for
296 296 # reversing the contexts and wouldn't need to exist if it weren't for
297 297 # the fast (and common) code path of comparing the working directory
298 298 # with its first parent.
299 299 #
300 300 # What we're aiming for here is the ability to call:
301 301 #
302 302 # workingctx.status(parentctx)
303 303 #
304 304 # If we always built the manifest for each context and compared those,
305 305 # then we'd be done. But the special case of the above call means we
306 306 # just copy the manifest of the parent.
307 307 reversed = False
308 308 if (not isinstance(ctx1, changectx)
309 309 and isinstance(ctx2, changectx)):
310 310 reversed = True
311 311 ctx1, ctx2 = ctx2, ctx1
312 312
313 313 r = [[], [], [], [], [], [], []]
314 314 match = ctx2._matchstatus(ctx1, r, match, listignored, listclean,
315 315 listunknown)
316 316 r = ctx2._prestatus(ctx1, r, match, listignored, listclean, listunknown)
317 317 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
318 318 listunknown)
319 319 r = ctx2._poststatus(ctx1, r, match, listignored, listclean,
320 320 listunknown)
321 321
322 322 if reversed:
323 323 # reverse added and removed
324 324 r[1], r[2] = r[2], r[1]
325 325
326 326 if listsubrepos:
327 327 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
328 328 rev2 = ctx2.subrev(subpath)
329 329 try:
330 330 submatch = matchmod.narrowmatcher(subpath, match)
331 331 s = sub.status(rev2, match=submatch, ignored=listignored,
332 332 clean=listclean, unknown=listunknown,
333 333 listsubrepos=True)
334 334 for rfiles, sfiles in zip(r, s):
335 335 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
336 336 except error.LookupError:
337 337 self._repo.ui.status(_("skipping missing "
338 338 "subrepository: %s\n") % subpath)
339 339
340 340 for l in r:
341 341 l.sort()
342 342
343 343 # we return a tuple to signify that this list isn't changing
344 344 return tuple(r)
345 345
346 346
347 347 def makememctx(repo, parents, text, user, date, branch, files, store,
348 348 editor=None):
349 349 def getfilectx(repo, memctx, path):
350 data, (islink, isexec), copied = store.getfile(path)
350 data, mode, copied = store.getfile(path)
351 if data is None:
352 return None
353 islink, isexec = mode
351 354 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
352 355 copied=copied, memctx=memctx)
353 356 extra = {}
354 357 if branch:
355 358 extra['branch'] = encoding.fromlocal(branch)
356 359 ctx = memctx(repo, parents, text, files, getfilectx, user,
357 360 date, extra, editor)
358 361 return ctx
359 362
360 363 class changectx(basectx):
361 364 """A changecontext object makes access to data related to a particular
362 365 changeset convenient. It represents a read-only context already present in
363 366 the repo."""
364 367 def __init__(self, repo, changeid=''):
365 368 """changeid is a revision number, node, or tag"""
366 369
367 370 # since basectx.__new__ already took care of copying the object, we
368 371 # don't need to do anything in __init__, so we just exit here
369 372 if isinstance(changeid, basectx):
370 373 return
371 374
372 375 if changeid == '':
373 376 changeid = '.'
374 377 self._repo = repo
375 378
376 379 if isinstance(changeid, int):
377 380 try:
378 381 self._node = repo.changelog.node(changeid)
379 382 except IndexError:
380 383 raise error.RepoLookupError(
381 384 _("unknown revision '%s'") % changeid)
382 385 self._rev = changeid
383 386 return
384 387 if isinstance(changeid, long):
385 388 changeid = str(changeid)
386 389 if changeid == '.':
387 390 self._node = repo.dirstate.p1()
388 391 self._rev = repo.changelog.rev(self._node)
389 392 return
390 393 if changeid == 'null':
391 394 self._node = nullid
392 395 self._rev = nullrev
393 396 return
394 397 if changeid == 'tip':
395 398 self._node = repo.changelog.tip()
396 399 self._rev = repo.changelog.rev(self._node)
397 400 return
398 401 if len(changeid) == 20:
399 402 try:
400 403 self._node = changeid
401 404 self._rev = repo.changelog.rev(changeid)
402 405 return
403 406 except LookupError:
404 407 pass
405 408
406 409 try:
407 410 r = int(changeid)
408 411 if str(r) != changeid:
409 412 raise ValueError
410 413 l = len(repo.changelog)
411 414 if r < 0:
412 415 r += l
413 416 if r < 0 or r >= l:
414 417 raise ValueError
415 418 self._rev = r
416 419 self._node = repo.changelog.node(r)
417 420 return
418 421 except (ValueError, OverflowError, IndexError):
419 422 pass
420 423
421 424 if len(changeid) == 40:
422 425 try:
423 426 self._node = bin(changeid)
424 427 self._rev = repo.changelog.rev(self._node)
425 428 return
426 429 except (TypeError, LookupError):
427 430 pass
428 431
429 432 if changeid in repo._bookmarks:
430 433 self._node = repo._bookmarks[changeid]
431 434 self._rev = repo.changelog.rev(self._node)
432 435 return
433 436 if changeid in repo._tagscache.tags:
434 437 self._node = repo._tagscache.tags[changeid]
435 438 self._rev = repo.changelog.rev(self._node)
436 439 return
437 440 try:
438 441 self._node = repo.branchtip(changeid)
439 442 self._rev = repo.changelog.rev(self._node)
440 443 return
441 444 except error.RepoLookupError:
442 445 pass
443 446
444 447 self._node = repo.changelog._partialmatch(changeid)
445 448 if self._node is not None:
446 449 self._rev = repo.changelog.rev(self._node)
447 450 return
448 451
449 452 # lookup failed
450 453 # check if it might have come from damaged dirstate
451 454 #
452 455 # XXX we could avoid the unfiltered if we had a recognizable exception
453 456 # for filtered changeset access
454 457 if changeid in repo.unfiltered().dirstate.parents():
455 458 raise error.Abort(_("working directory has unknown parent '%s'!")
456 459 % short(changeid))
457 460 try:
458 461 if len(changeid) == 20:
459 462 changeid = hex(changeid)
460 463 except TypeError:
461 464 pass
462 465 raise error.RepoLookupError(
463 466 _("unknown revision '%s'") % changeid)
464 467
465 468 def __hash__(self):
466 469 try:
467 470 return hash(self._rev)
468 471 except AttributeError:
469 472 return id(self)
470 473
471 474 def __nonzero__(self):
472 475 return self._rev != nullrev
473 476
474 477 @propertycache
475 478 def _changeset(self):
476 479 return self._repo.changelog.read(self.rev())
477 480
478 481 @propertycache
479 482 def _manifest(self):
480 483 return self._repo.manifest.read(self._changeset[0])
481 484
482 485 @propertycache
483 486 def _manifestdelta(self):
484 487 return self._repo.manifest.readdelta(self._changeset[0])
485 488
486 489 @propertycache
487 490 def _parents(self):
488 491 p = self._repo.changelog.parentrevs(self._rev)
489 492 if p[1] == nullrev:
490 493 p = p[:-1]
491 494 return [changectx(self._repo, x) for x in p]
492 495
493 496 def changeset(self):
494 497 return self._changeset
495 498 def manifestnode(self):
496 499 return self._changeset[0]
497 500
498 501 def user(self):
499 502 return self._changeset[1]
500 503 def date(self):
501 504 return self._changeset[2]
502 505 def files(self):
503 506 return self._changeset[3]
504 507 def description(self):
505 508 return self._changeset[4]
506 509 def branch(self):
507 510 return encoding.tolocal(self._changeset[5].get("branch"))
508 511 def closesbranch(self):
509 512 return 'close' in self._changeset[5]
510 513 def extra(self):
511 514 return self._changeset[5]
512 515 def tags(self):
513 516 return self._repo.nodetags(self._node)
514 517 def bookmarks(self):
515 518 return self._repo.nodebookmarks(self._node)
516 519 def phase(self):
517 520 return self._repo._phasecache.phase(self._repo, self._rev)
518 521 def hidden(self):
519 522 return self._rev in repoview.filterrevs(self._repo, 'visible')
520 523
521 524 def children(self):
522 525 """return contexts for each child changeset"""
523 526 c = self._repo.changelog.children(self._node)
524 527 return [changectx(self._repo, x) for x in c]
525 528
526 529 def ancestors(self):
527 530 for a in self._repo.changelog.ancestors([self._rev]):
528 531 yield changectx(self._repo, a)
529 532
530 533 def descendants(self):
531 534 for d in self._repo.changelog.descendants([self._rev]):
532 535 yield changectx(self._repo, d)
533 536
534 537 def filectx(self, path, fileid=None, filelog=None):
535 538 """get a file context from this changeset"""
536 539 if fileid is None:
537 540 fileid = self.filenode(path)
538 541 return filectx(self._repo, path, fileid=fileid,
539 542 changectx=self, filelog=filelog)
540 543
541 544 def ancestor(self, c2, warn=False):
542 545 """
543 546 return the "best" ancestor context of self and c2
544 547 """
545 548 # deal with workingctxs
546 549 n2 = c2._node
547 550 if n2 is None:
548 551 n2 = c2._parents[0]._node
549 552 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
550 553 if not cahs:
551 554 anc = nullid
552 555 elif len(cahs) == 1:
553 556 anc = cahs[0]
554 557 else:
555 558 for r in self._repo.ui.configlist('merge', 'preferancestor'):
556 559 if r == '*':
557 560 continue
558 561 ctx = changectx(self._repo, r)
559 562 anc = ctx.node()
560 563 if anc in cahs:
561 564 break
562 565 else:
563 566 anc = self._repo.changelog.ancestor(self._node, n2)
564 567 if warn:
565 568 self._repo.ui.status(
566 569 (_("note: using %s as ancestor of %s and %s\n") %
567 570 (short(anc), short(self._node), short(n2))) +
568 571 ''.join(_(" alternatively, use --config "
569 572 "merge.preferancestor=%s\n") %
570 573 short(n) for n in sorted(cahs) if n != anc))
571 574 return changectx(self._repo, anc)
572 575
573 576 def descendant(self, other):
574 577 """True if other is descendant of this changeset"""
575 578 return self._repo.changelog.descendant(self._rev, other._rev)
576 579
577 580 def walk(self, match):
578 581 fset = set(match.files())
579 582 # for dirstate.walk, files=['.'] means "walk the whole tree".
580 583 # follow that here, too
581 584 fset.discard('.')
582 585
583 586 # avoid the entire walk if we're only looking for specific files
584 587 if fset and not match.anypats():
585 588 if util.all([fn in self for fn in fset]):
586 589 for fn in sorted(fset):
587 590 if match(fn):
588 591 yield fn
589 592 raise StopIteration
590 593
591 594 for fn in self:
592 595 if fn in fset:
593 596 # specified pattern is the exact name
594 597 fset.remove(fn)
595 598 if match(fn):
596 599 yield fn
597 600 for fn in sorted(fset):
598 601 if fn in self._dirs:
599 602 # specified pattern is a directory
600 603 continue
601 604 match.bad(fn, _('no such file in rev %s') % self)
602 605
603 606 def matches(self, match):
604 607 return self.walk(match)
605 608
606 609 class basefilectx(object):
607 610 """A filecontext object represents the common logic for its children:
608 611 filectx: read-only access to a filerevision that is already present
609 612 in the repo,
610 613 workingfilectx: a filecontext that represents files from the working
611 614 directory,
612 615 memfilectx: a filecontext that represents files in-memory."""
613 616 def __new__(cls, repo, path, *args, **kwargs):
614 617 return super(basefilectx, cls).__new__(cls)
615 618
616 619 @propertycache
617 620 def _filelog(self):
618 621 return self._repo.file(self._path)
619 622
620 623 @propertycache
621 624 def _changeid(self):
622 625 if '_changeid' in self.__dict__:
623 626 return self._changeid
624 627 elif '_changectx' in self.__dict__:
625 628 return self._changectx.rev()
626 629 else:
627 630 return self._filelog.linkrev(self._filerev)
628 631
629 632 @propertycache
630 633 def _filenode(self):
631 634 if '_fileid' in self.__dict__:
632 635 return self._filelog.lookup(self._fileid)
633 636 else:
634 637 return self._changectx.filenode(self._path)
635 638
636 639 @propertycache
637 640 def _filerev(self):
638 641 return self._filelog.rev(self._filenode)
639 642
640 643 @propertycache
641 644 def _repopath(self):
642 645 return self._path
643 646
644 647 def __nonzero__(self):
645 648 try:
646 649 self._filenode
647 650 return True
648 651 except error.LookupError:
649 652 # file is missing
650 653 return False
651 654
652 655 def __str__(self):
653 656 return "%s@%s" % (self.path(), self._changectx)
654 657
655 658 def __repr__(self):
656 659 return "<%s %s>" % (type(self).__name__, str(self))
657 660
658 661 def __hash__(self):
659 662 try:
660 663 return hash((self._path, self._filenode))
661 664 except AttributeError:
662 665 return id(self)
663 666
664 667 def __eq__(self, other):
665 668 try:
666 669 return (type(self) == type(other) and self._path == other._path
667 670 and self._filenode == other._filenode)
668 671 except AttributeError:
669 672 return False
670 673
671 674 def __ne__(self, other):
672 675 return not (self == other)
673 676
674 677 def filerev(self):
675 678 return self._filerev
676 679 def filenode(self):
677 680 return self._filenode
678 681 def flags(self):
679 682 return self._changectx.flags(self._path)
680 683 def filelog(self):
681 684 return self._filelog
682 685 def rev(self):
683 686 return self._changeid
684 687 def linkrev(self):
685 688 return self._filelog.linkrev(self._filerev)
686 689 def node(self):
687 690 return self._changectx.node()
688 691 def hex(self):
689 692 return self._changectx.hex()
690 693 def user(self):
691 694 return self._changectx.user()
692 695 def date(self):
693 696 return self._changectx.date()
694 697 def files(self):
695 698 return self._changectx.files()
696 699 def description(self):
697 700 return self._changectx.description()
698 701 def branch(self):
699 702 return self._changectx.branch()
700 703 def extra(self):
701 704 return self._changectx.extra()
702 705 def phase(self):
703 706 return self._changectx.phase()
704 707 def phasestr(self):
705 708 return self._changectx.phasestr()
706 709 def manifest(self):
707 710 return self._changectx.manifest()
708 711 def changectx(self):
709 712 return self._changectx
710 713
711 714 def path(self):
712 715 return self._path
713 716
714 717 def isbinary(self):
715 718 try:
716 719 return util.binary(self.data())
717 720 except IOError:
718 721 return False
719 722 def isexec(self):
720 723 return 'x' in self.flags()
721 724 def islink(self):
722 725 return 'l' in self.flags()
723 726
724 727 def cmp(self, fctx):
725 728 """compare with other file context
726 729
727 730 returns True if different than fctx.
728 731 """
729 732 if (fctx._filerev is None
730 733 and (self._repo._encodefilterpats
731 734 # if file data starts with '\1\n', empty metadata block is
732 735 # prepended, which adds 4 bytes to filelog.size().
733 736 or self.size() - 4 == fctx.size())
734 737 or self.size() == fctx.size()):
735 738 return self._filelog.cmp(self._filenode, fctx.data())
736 739
737 740 return True
738 741
739 742 def parents(self):
740 743 _path = self._path
741 744 fl = self._filelog
742 745 pl = [(_path, n, fl) for n in self._filelog.parents(self._filenode)]
743 746
744 747 r = self._filelog.renamed(self._filenode)
745 748 if r:
746 749 pl[0] = (r[0], r[1], None)
747 750
748 751 return [filectx(self._repo, p, fileid=n, filelog=l)
749 752 for p, n, l in pl if n != nullid]
750 753
751 754 def p1(self):
752 755 return self.parents()[0]
753 756
754 757 def p2(self):
755 758 p = self.parents()
756 759 if len(p) == 2:
757 760 return p[1]
758 761 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
759 762
760 763 def annotate(self, follow=False, linenumber=None, diffopts=None):
761 764 '''returns a list of tuples of (ctx, line) for each line
762 765 in the file, where ctx is the filectx of the node where
763 766 that line was last changed.
764 767 This returns tuples of ((ctx, linenumber), line) for each line,
765 768 if "linenumber" parameter is NOT "None".
766 769 In such tuples, linenumber means one at the first appearance
767 770 in the managed file.
768 771 To reduce annotation cost,
769 772 this returns fixed value(False is used) as linenumber,
770 773 if "linenumber" parameter is "False".'''
771 774
772 775 if linenumber is None:
773 776 def decorate(text, rev):
774 777 return ([rev] * len(text.splitlines()), text)
775 778 elif linenumber:
776 779 def decorate(text, rev):
777 780 size = len(text.splitlines())
778 781 return ([(rev, i) for i in xrange(1, size + 1)], text)
779 782 else:
780 783 def decorate(text, rev):
781 784 return ([(rev, False)] * len(text.splitlines()), text)
782 785
783 786 def pair(parent, child):
784 787 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
785 788 refine=True)
786 789 for (a1, a2, b1, b2), t in blocks:
787 790 # Changed blocks ('!') or blocks made only of blank lines ('~')
788 791 # belong to the child.
789 792 if t == '=':
790 793 child[0][b1:b2] = parent[0][a1:a2]
791 794 return child
792 795
793 796 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
794 797
795 798 def parents(f):
796 799 pl = f.parents()
797 800
798 801 # Don't return renamed parents if we aren't following.
799 802 if not follow:
800 803 pl = [p for p in pl if p.path() == f.path()]
801 804
802 805 # renamed filectx won't have a filelog yet, so set it
803 806 # from the cache to save time
804 807 for p in pl:
805 808 if not '_filelog' in p.__dict__:
806 809 p._filelog = getlog(p.path())
807 810
808 811 return pl
809 812
810 813 # use linkrev to find the first changeset where self appeared
811 814 if self.rev() != self.linkrev():
812 815 base = self.filectx(self.filenode())
813 816 else:
814 817 base = self
815 818
816 819 # This algorithm would prefer to be recursive, but Python is a
817 820 # bit recursion-hostile. Instead we do an iterative
818 821 # depth-first search.
819 822
820 823 visit = [base]
821 824 hist = {}
822 825 pcache = {}
823 826 needed = {base: 1}
824 827 while visit:
825 828 f = visit[-1]
826 829 pcached = f in pcache
827 830 if not pcached:
828 831 pcache[f] = parents(f)
829 832
830 833 ready = True
831 834 pl = pcache[f]
832 835 for p in pl:
833 836 if p not in hist:
834 837 ready = False
835 838 visit.append(p)
836 839 if not pcached:
837 840 needed[p] = needed.get(p, 0) + 1
838 841 if ready:
839 842 visit.pop()
840 843 reusable = f in hist
841 844 if reusable:
842 845 curr = hist[f]
843 846 else:
844 847 curr = decorate(f.data(), f)
845 848 for p in pl:
846 849 if not reusable:
847 850 curr = pair(hist[p], curr)
848 851 if needed[p] == 1:
849 852 del hist[p]
850 853 del needed[p]
851 854 else:
852 855 needed[p] -= 1
853 856
854 857 hist[f] = curr
855 858 pcache[f] = []
856 859
857 860 return zip(hist[base][0], hist[base][1].splitlines(True))
858 861
859 862 def ancestors(self, followfirst=False):
860 863 visit = {}
861 864 c = self
862 865 cut = followfirst and 1 or None
863 866 while True:
864 867 for parent in c.parents()[:cut]:
865 868 visit[(parent.rev(), parent.node())] = parent
866 869 if not visit:
867 870 break
868 871 c = visit.pop(max(visit))
869 872 yield c
870 873
871 874 class filectx(basefilectx):
872 875 """A filecontext object makes access to data related to a particular
873 876 filerevision convenient."""
874 877 def __init__(self, repo, path, changeid=None, fileid=None,
875 878 filelog=None, changectx=None):
876 879 """changeid can be a changeset revision, node, or tag.
877 880 fileid can be a file revision or node."""
878 881 self._repo = repo
879 882 self._path = path
880 883
881 884 assert (changeid is not None
882 885 or fileid is not None
883 886 or changectx is not None), \
884 887 ("bad args: changeid=%r, fileid=%r, changectx=%r"
885 888 % (changeid, fileid, changectx))
886 889
887 890 if filelog is not None:
888 891 self._filelog = filelog
889 892
890 893 if changeid is not None:
891 894 self._changeid = changeid
892 895 if changectx is not None:
893 896 self._changectx = changectx
894 897 if fileid is not None:
895 898 self._fileid = fileid
896 899
897 900 @propertycache
898 901 def _changectx(self):
899 902 try:
900 903 return changectx(self._repo, self._changeid)
901 904 except error.RepoLookupError:
902 905 # Linkrev may point to any revision in the repository. When the
903 906 # repository is filtered this may lead to `filectx` trying to build
904 907 # `changectx` for filtered revision. In such case we fallback to
905 908 # creating `changectx` on the unfiltered version of the reposition.
906 909 # This fallback should not be an issue because `changectx` from
907 910 # `filectx` are not used in complex operations that care about
908 911 # filtering.
909 912 #
910 913 # This fallback is a cheap and dirty fix that prevent several
911 914 # crashes. It does not ensure the behavior is correct. However the
912 915 # behavior was not correct before filtering either and "incorrect
913 916 # behavior" is seen as better as "crash"
914 917 #
915 918 # Linkrevs have several serious troubles with filtering that are
916 919 # complicated to solve. Proper handling of the issue here should be
917 920 # considered when solving linkrev issue are on the table.
918 921 return changectx(self._repo.unfiltered(), self._changeid)
919 922
920 923 def filectx(self, fileid):
921 924 '''opens an arbitrary revision of the file without
922 925 opening a new filelog'''
923 926 return filectx(self._repo, self._path, fileid=fileid,
924 927 filelog=self._filelog)
925 928
926 929 def data(self):
927 930 return self._filelog.read(self._filenode)
928 931 def size(self):
929 932 return self._filelog.size(self._filerev)
930 933
931 934 def renamed(self):
932 935 """check if file was actually renamed in this changeset revision
933 936
934 937 If rename logged in file revision, we report copy for changeset only
935 938 if file revisions linkrev points back to the changeset in question
936 939 or both changeset parents contain different file revisions.
937 940 """
938 941
939 942 renamed = self._filelog.renamed(self._filenode)
940 943 if not renamed:
941 944 return renamed
942 945
943 946 if self.rev() == self.linkrev():
944 947 return renamed
945 948
946 949 name = self.path()
947 950 fnode = self._filenode
948 951 for p in self._changectx.parents():
949 952 try:
950 953 if fnode == p.filenode(name):
951 954 return None
952 955 except error.LookupError:
953 956 pass
954 957 return renamed
955 958
956 959 def children(self):
957 960 # hard for renames
958 961 c = self._filelog.children(self._filenode)
959 962 return [filectx(self._repo, self._path, fileid=x,
960 963 filelog=self._filelog) for x in c]
961 964
962 965 class committablectx(basectx):
963 966 """A committablectx object provides common functionality for a context that
964 967 wants the ability to commit, e.g. workingctx or memctx."""
965 968 def __init__(self, repo, text="", user=None, date=None, extra=None,
966 969 changes=None):
967 970 self._repo = repo
968 971 self._rev = None
969 972 self._node = None
970 973 self._text = text
971 974 if date:
972 975 self._date = util.parsedate(date)
973 976 if user:
974 977 self._user = user
975 978 if changes:
976 979 self._status = changes
977 980
978 981 self._extra = {}
979 982 if extra:
980 983 self._extra = extra.copy()
981 984 if 'branch' not in self._extra:
982 985 try:
983 986 branch = encoding.fromlocal(self._repo.dirstate.branch())
984 987 except UnicodeDecodeError:
985 988 raise util.Abort(_('branch name not in UTF-8!'))
986 989 self._extra['branch'] = branch
987 990 if self._extra['branch'] == '':
988 991 self._extra['branch'] = 'default'
989 992
990 993 def __str__(self):
991 994 return str(self._parents[0]) + "+"
992 995
993 996 def __nonzero__(self):
994 997 return True
995 998
996 999 def _buildflagfunc(self):
997 1000 # Create a fallback function for getting file flags when the
998 1001 # filesystem doesn't support them
999 1002
1000 1003 copiesget = self._repo.dirstate.copies().get
1001 1004
1002 1005 if len(self._parents) < 2:
1003 1006 # when we have one parent, it's easy: copy from parent
1004 1007 man = self._parents[0].manifest()
1005 1008 def func(f):
1006 1009 f = copiesget(f, f)
1007 1010 return man.flags(f)
1008 1011 else:
1009 1012 # merges are tricky: we try to reconstruct the unstored
1010 1013 # result from the merge (issue1802)
1011 1014 p1, p2 = self._parents
1012 1015 pa = p1.ancestor(p2)
1013 1016 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1014 1017
1015 1018 def func(f):
1016 1019 f = copiesget(f, f) # may be wrong for merges with copies
1017 1020 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1018 1021 if fl1 == fl2:
1019 1022 return fl1
1020 1023 if fl1 == fla:
1021 1024 return fl2
1022 1025 if fl2 == fla:
1023 1026 return fl1
1024 1027 return '' # punt for conflicts
1025 1028
1026 1029 return func
1027 1030
1028 1031 @propertycache
1029 1032 def _flagfunc(self):
1030 1033 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1031 1034
1032 1035 @propertycache
1033 1036 def _manifest(self):
1034 1037 """generate a manifest corresponding to the values in self._status"""
1035 1038
1036 1039 man = self._parents[0].manifest().copy()
1037 1040 if len(self._parents) > 1:
1038 1041 man2 = self.p2().manifest()
1039 1042 def getman(f):
1040 1043 if f in man:
1041 1044 return man
1042 1045 return man2
1043 1046 else:
1044 1047 getman = lambda f: man
1045 1048
1046 1049 copied = self._repo.dirstate.copies()
1047 1050 ff = self._flagfunc
1048 1051 modified, added, removed, deleted = self._status[:4]
1049 1052 for i, l in (("a", added), ("m", modified)):
1050 1053 for f in l:
1051 1054 orig = copied.get(f, f)
1052 1055 man[f] = getman(orig).get(orig, nullid) + i
1053 1056 try:
1054 1057 man.set(f, ff(f))
1055 1058 except OSError:
1056 1059 pass
1057 1060
1058 1061 for f in deleted + removed:
1059 1062 if f in man:
1060 1063 del man[f]
1061 1064
1062 1065 return man
1063 1066
1064 1067 @propertycache
1065 1068 def _status(self):
1066 1069 return self._repo.status()
1067 1070
1068 1071 @propertycache
1069 1072 def _user(self):
1070 1073 return self._repo.ui.username()
1071 1074
1072 1075 @propertycache
1073 1076 def _date(self):
1074 1077 return util.makedate()
1075 1078
1076 1079 def subrev(self, subpath):
1077 1080 return None
1078 1081
1079 1082 def user(self):
1080 1083 return self._user or self._repo.ui.username()
1081 1084 def date(self):
1082 1085 return self._date
1083 1086 def description(self):
1084 1087 return self._text
1085 1088 def files(self):
1086 1089 return sorted(self._status[0] + self._status[1] + self._status[2])
1087 1090
1088 1091 def modified(self):
1089 1092 return self._status[0]
1090 1093 def added(self):
1091 1094 return self._status[1]
1092 1095 def removed(self):
1093 1096 return self._status[2]
1094 1097 def deleted(self):
1095 1098 return self._status[3]
1096 1099 def unknown(self):
1097 1100 return self._status[4]
1098 1101 def ignored(self):
1099 1102 return self._status[5]
1100 1103 def clean(self):
1101 1104 return self._status[6]
1102 1105 def branch(self):
1103 1106 return encoding.tolocal(self._extra['branch'])
1104 1107 def closesbranch(self):
1105 1108 return 'close' in self._extra
1106 1109 def extra(self):
1107 1110 return self._extra
1108 1111
1109 1112 def tags(self):
1110 1113 t = []
1111 1114 for p in self.parents():
1112 1115 t.extend(p.tags())
1113 1116 return t
1114 1117
1115 1118 def bookmarks(self):
1116 1119 b = []
1117 1120 for p in self.parents():
1118 1121 b.extend(p.bookmarks())
1119 1122 return b
1120 1123
1121 1124 def phase(self):
1122 1125 phase = phases.draft # default phase to draft
1123 1126 for p in self.parents():
1124 1127 phase = max(phase, p.phase())
1125 1128 return phase
1126 1129
1127 1130 def hidden(self):
1128 1131 return False
1129 1132
1130 1133 def children(self):
1131 1134 return []
1132 1135
1133 1136 def flags(self, path):
1134 1137 if '_manifest' in self.__dict__:
1135 1138 try:
1136 1139 return self._manifest.flags(path)
1137 1140 except KeyError:
1138 1141 return ''
1139 1142
1140 1143 try:
1141 1144 return self._flagfunc(path)
1142 1145 except OSError:
1143 1146 return ''
1144 1147
1145 1148 def ancestor(self, c2):
1146 1149 """return the ancestor context of self and c2"""
1147 1150 return self._parents[0].ancestor(c2) # punt on two parents for now
1148 1151
1149 1152 def walk(self, match):
1150 1153 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1151 1154 True, False))
1152 1155
1153 1156 def matches(self, match):
1154 1157 return sorted(self._repo.dirstate.matches(match))
1155 1158
1156 1159 def ancestors(self):
1157 1160 for a in self._repo.changelog.ancestors(
1158 1161 [p.rev() for p in self._parents]):
1159 1162 yield changectx(self._repo, a)
1160 1163
1161 1164 def markcommitted(self, node):
1162 1165 """Perform post-commit cleanup necessary after committing this ctx
1163 1166
1164 1167 Specifically, this updates backing stores this working context
1165 1168 wraps to reflect the fact that the changes reflected by this
1166 1169 workingctx have been committed. For example, it marks
1167 1170 modified and added files as normal in the dirstate.
1168 1171
1169 1172 """
1170 1173
1171 1174 for f in self.modified() + self.added():
1172 1175 self._repo.dirstate.normal(f)
1173 1176 for f in self.removed():
1174 1177 self._repo.dirstate.drop(f)
1175 1178 self._repo.dirstate.setparents(node)
1176 1179
1177 1180 def dirs(self):
1178 1181 return self._repo.dirstate.dirs()
1179 1182
1180 1183 class workingctx(committablectx):
1181 1184 """A workingctx object makes access to data related to
1182 1185 the current working directory convenient.
1183 1186 date - any valid date string or (unixtime, offset), or None.
1184 1187 user - username string, or None.
1185 1188 extra - a dictionary of extra values, or None.
1186 1189 changes - a list of file lists as returned by localrepo.status()
1187 1190 or None to use the repository status.
1188 1191 """
1189 1192 def __init__(self, repo, text="", user=None, date=None, extra=None,
1190 1193 changes=None):
1191 1194 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1192 1195
1193 1196 def __iter__(self):
1194 1197 d = self._repo.dirstate
1195 1198 for f in d:
1196 1199 if d[f] != 'r':
1197 1200 yield f
1198 1201
1199 1202 def __contains__(self, key):
1200 1203 return self._repo.dirstate[key] not in "?r"
1201 1204
1202 1205 @propertycache
1203 1206 def _parents(self):
1204 1207 p = self._repo.dirstate.parents()
1205 1208 if p[1] == nullid:
1206 1209 p = p[:-1]
1207 1210 return [changectx(self._repo, x) for x in p]
1208 1211
1209 1212 def filectx(self, path, filelog=None):
1210 1213 """get a file context from the working directory"""
1211 1214 return workingfilectx(self._repo, path, workingctx=self,
1212 1215 filelog=filelog)
1213 1216
1214 1217 def dirty(self, missing=False, merge=True, branch=True):
1215 1218 "check whether a working directory is modified"
1216 1219 # check subrepos first
1217 1220 for s in sorted(self.substate):
1218 1221 if self.sub(s).dirty():
1219 1222 return True
1220 1223 # check current working dir
1221 1224 return ((merge and self.p2()) or
1222 1225 (branch and self.branch() != self.p1().branch()) or
1223 1226 self.modified() or self.added() or self.removed() or
1224 1227 (missing and self.deleted()))
1225 1228
1226 1229 def add(self, list, prefix=""):
1227 1230 join = lambda f: os.path.join(prefix, f)
1228 1231 wlock = self._repo.wlock()
1229 1232 ui, ds = self._repo.ui, self._repo.dirstate
1230 1233 try:
1231 1234 rejected = []
1232 1235 lstat = self._repo.wvfs.lstat
1233 1236 for f in list:
1234 1237 scmutil.checkportable(ui, join(f))
1235 1238 try:
1236 1239 st = lstat(f)
1237 1240 except OSError:
1238 1241 ui.warn(_("%s does not exist!\n") % join(f))
1239 1242 rejected.append(f)
1240 1243 continue
1241 1244 if st.st_size > 10000000:
1242 1245 ui.warn(_("%s: up to %d MB of RAM may be required "
1243 1246 "to manage this file\n"
1244 1247 "(use 'hg revert %s' to cancel the "
1245 1248 "pending addition)\n")
1246 1249 % (f, 3 * st.st_size // 1000000, join(f)))
1247 1250 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1248 1251 ui.warn(_("%s not added: only files and symlinks "
1249 1252 "supported currently\n") % join(f))
1250 1253 rejected.append(f)
1251 1254 elif ds[f] in 'amn':
1252 1255 ui.warn(_("%s already tracked!\n") % join(f))
1253 1256 elif ds[f] == 'r':
1254 1257 ds.normallookup(f)
1255 1258 else:
1256 1259 ds.add(f)
1257 1260 return rejected
1258 1261 finally:
1259 1262 wlock.release()
1260 1263
1261 1264 def forget(self, files, prefix=""):
1262 1265 join = lambda f: os.path.join(prefix, f)
1263 1266 wlock = self._repo.wlock()
1264 1267 try:
1265 1268 rejected = []
1266 1269 for f in files:
1267 1270 if f not in self._repo.dirstate:
1268 1271 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1269 1272 rejected.append(f)
1270 1273 elif self._repo.dirstate[f] != 'a':
1271 1274 self._repo.dirstate.remove(f)
1272 1275 else:
1273 1276 self._repo.dirstate.drop(f)
1274 1277 return rejected
1275 1278 finally:
1276 1279 wlock.release()
1277 1280
1278 1281 def undelete(self, list):
1279 1282 pctxs = self.parents()
1280 1283 wlock = self._repo.wlock()
1281 1284 try:
1282 1285 for f in list:
1283 1286 if self._repo.dirstate[f] != 'r':
1284 1287 self._repo.ui.warn(_("%s not removed!\n") % f)
1285 1288 else:
1286 1289 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1287 1290 t = fctx.data()
1288 1291 self._repo.wwrite(f, t, fctx.flags())
1289 1292 self._repo.dirstate.normal(f)
1290 1293 finally:
1291 1294 wlock.release()
1292 1295
1293 1296 def copy(self, source, dest):
1294 1297 try:
1295 1298 st = self._repo.wvfs.lstat(dest)
1296 1299 except OSError, err:
1297 1300 if err.errno != errno.ENOENT:
1298 1301 raise
1299 1302 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1300 1303 return
1301 1304 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1302 1305 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1303 1306 "symbolic link\n") % dest)
1304 1307 else:
1305 1308 wlock = self._repo.wlock()
1306 1309 try:
1307 1310 if self._repo.dirstate[dest] in '?r':
1308 1311 self._repo.dirstate.add(dest)
1309 1312 self._repo.dirstate.copy(source, dest)
1310 1313 finally:
1311 1314 wlock.release()
1312 1315
1313 1316 def _filtersuspectsymlink(self, files):
1314 1317 if not files or self._repo.dirstate._checklink:
1315 1318 return files
1316 1319
1317 1320 # Symlink placeholders may get non-symlink-like contents
1318 1321 # via user error or dereferencing by NFS or Samba servers,
1319 1322 # so we filter out any placeholders that don't look like a
1320 1323 # symlink
1321 1324 sane = []
1322 1325 for f in files:
1323 1326 if self.flags(f) == 'l':
1324 1327 d = self[f].data()
1325 1328 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1326 1329 self._repo.ui.debug('ignoring suspect symlink placeholder'
1327 1330 ' "%s"\n' % f)
1328 1331 continue
1329 1332 sane.append(f)
1330 1333 return sane
1331 1334
1332 1335 def _checklookup(self, files):
1333 1336 # check for any possibly clean files
1334 1337 if not files:
1335 1338 return [], []
1336 1339
1337 1340 modified = []
1338 1341 fixup = []
1339 1342 pctx = self._parents[0]
1340 1343 # do a full compare of any files that might have changed
1341 1344 for f in sorted(files):
1342 1345 if (f not in pctx or self.flags(f) != pctx.flags(f)
1343 1346 or pctx[f].cmp(self[f])):
1344 1347 modified.append(f)
1345 1348 else:
1346 1349 fixup.append(f)
1347 1350
1348 1351 # update dirstate for files that are actually clean
1349 1352 if fixup:
1350 1353 try:
1351 1354 # updating the dirstate is optional
1352 1355 # so we don't wait on the lock
1353 1356 # wlock can invalidate the dirstate, so cache normal _after_
1354 1357 # taking the lock
1355 1358 wlock = self._repo.wlock(False)
1356 1359 normal = self._repo.dirstate.normal
1357 1360 try:
1358 1361 for f in fixup:
1359 1362 normal(f)
1360 1363 finally:
1361 1364 wlock.release()
1362 1365 except error.LockError:
1363 1366 pass
1364 1367 return modified, fixup
1365 1368
1366 1369 def _manifestmatches(self, match, s):
1367 1370 """Slow path for workingctx
1368 1371
1369 1372 The fast path is when we compare the working directory to its parent
1370 1373 which means this function is comparing with a non-parent; therefore we
1371 1374 need to build a manifest and return what matches.
1372 1375 """
1373 1376 mf = self._repo['.']._manifestmatches(match, s)
1374 1377 modified, added, removed = s[0:3]
1375 1378 for f in modified + added:
1376 1379 mf[f] = None
1377 1380 mf.set(f, self.flags(f))
1378 1381 for f in removed:
1379 1382 if f in mf:
1380 1383 del mf[f]
1381 1384 return mf
1382 1385
1383 1386 def _prestatus(self, other, s, match, listignored, listclean, listunknown):
1384 1387 """override the parent hook with a dirstate query
1385 1388
1386 1389 We use this prestatus hook to populate the status with information from
1387 1390 the dirstate.
1388 1391 """
1389 1392 # doesn't need to call super; if that changes, be aware that super
1390 1393 # calls self.manifest which would slow down the common case of calling
1391 1394 # status against a workingctx's parent
1392 1395 return self._dirstatestatus(match, listignored, listclean, listunknown)
1393 1396
1394 1397 def _poststatus(self, other, s, match, listignored, listclean, listunknown):
1395 1398 """override the parent hook with a filter for suspect symlinks
1396 1399
1397 1400 We use this poststatus hook to filter out symlinks that might have
1398 1401 accidentally ended up with the entire contents of the file they are
1399 1402 susposed to be linking to.
1400 1403 """
1401 1404 s[0] = self._filtersuspectsymlink(s[0])
1402 1405 self._status = s[:]
1403 1406 return s
1404 1407
1405 1408 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1406 1409 unknown=False):
1407 1410 '''Gets the status from the dirstate -- internal use only.'''
1408 1411 listignored, listclean, listunknown = ignored, clean, unknown
1409 1412 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1410 1413 subrepos = []
1411 1414 if '.hgsub' in self:
1412 1415 subrepos = sorted(self.substate)
1413 1416 s = self._repo.dirstate.status(match, subrepos, listignored,
1414 1417 listclean, listunknown)
1415 1418 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1416 1419
1417 1420 # check for any possibly clean files
1418 1421 if cmp:
1419 1422 modified2, fixup = self._checklookup(cmp)
1420 1423 modified += modified2
1421 1424
1422 1425 # update dirstate for files that are actually clean
1423 1426 if fixup and listclean:
1424 1427 clean += fixup
1425 1428
1426 1429 return [modified, added, removed, deleted, unknown, ignored, clean]
1427 1430
1428 1431 def _buildstatus(self, other, s, match, listignored, listclean,
1429 1432 listunknown):
1430 1433 """build a status with respect to another context
1431 1434
1432 1435 This includes logic for maintaining the fast path of status when
1433 1436 comparing the working directory against its parent, which is to skip
1434 1437 building a new manifest if self (working directory) is not comparing
1435 1438 against its parent (repo['.']).
1436 1439 """
1437 1440 if other != self._repo['.']:
1438 1441 s = super(workingctx, self)._buildstatus(other, s, match,
1439 1442 listignored, listclean,
1440 1443 listunknown)
1441 1444 return s
1442 1445
1443 1446 def _matchstatus(self, other, s, match, listignored, listclean,
1444 1447 listunknown):
1445 1448 """override the match method with a filter for directory patterns
1446 1449
1447 1450 We use inheritance to customize the match.bad method only in cases of
1448 1451 workingctx since it belongs only to the working directory when
1449 1452 comparing against the parent changeset.
1450 1453
1451 1454 If we aren't comparing against the working directory's parent, then we
1452 1455 just use the default match object sent to us.
1453 1456 """
1454 1457 superself = super(workingctx, self)
1455 1458 match = superself._matchstatus(other, s, match, listignored, listclean,
1456 1459 listunknown)
1457 1460 if other != self._repo['.']:
1458 1461 def bad(f, msg):
1459 1462 # 'f' may be a directory pattern from 'match.files()',
1460 1463 # so 'f not in ctx1' is not enough
1461 1464 if f not in other and f not in other.dirs():
1462 1465 self._repo.ui.warn('%s: %s\n' %
1463 1466 (self._repo.dirstate.pathto(f), msg))
1464 1467 match.bad = bad
1465 1468 return match
1466 1469
1467 1470 def status(self, other='.', match=None, listignored=False,
1468 1471 listclean=False, listunknown=False, listsubrepos=False):
1469 1472 # yet to be determined: what to do if 'other' is a 'workingctx' or a
1470 1473 # 'memctx'?
1471 1474 s = super(workingctx, self).status(other, match, listignored, listclean,
1472 1475 listunknown, listsubrepos)
1473 1476 # calling 'super' subtly reveresed the contexts, so we flip the results
1474 1477 # (s[1] is 'added' and s[2] is 'removed')
1475 1478 s = list(s)
1476 1479 s[1], s[2] = s[2], s[1]
1477 1480 return tuple(s)
1478 1481
1479 1482 class committablefilectx(basefilectx):
1480 1483 """A committablefilectx provides common functionality for a file context
1481 1484 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1482 1485 def __init__(self, repo, path, filelog=None, ctx=None):
1483 1486 self._repo = repo
1484 1487 self._path = path
1485 1488 self._changeid = None
1486 1489 self._filerev = self._filenode = None
1487 1490
1488 1491 if filelog is not None:
1489 1492 self._filelog = filelog
1490 1493 if ctx:
1491 1494 self._changectx = ctx
1492 1495
1493 1496 def __nonzero__(self):
1494 1497 return True
1495 1498
1496 1499 def parents(self):
1497 1500 '''return parent filectxs, following copies if necessary'''
1498 1501 def filenode(ctx, path):
1499 1502 return ctx._manifest.get(path, nullid)
1500 1503
1501 1504 path = self._path
1502 1505 fl = self._filelog
1503 1506 pcl = self._changectx._parents
1504 1507 renamed = self.renamed()
1505 1508
1506 1509 if renamed:
1507 1510 pl = [renamed + (None,)]
1508 1511 else:
1509 1512 pl = [(path, filenode(pcl[0], path), fl)]
1510 1513
1511 1514 for pc in pcl[1:]:
1512 1515 pl.append((path, filenode(pc, path), fl))
1513 1516
1514 1517 return [filectx(self._repo, p, fileid=n, filelog=l)
1515 1518 for p, n, l in pl if n != nullid]
1516 1519
1517 1520 def children(self):
1518 1521 return []
1519 1522
1520 1523 class workingfilectx(committablefilectx):
1521 1524 """A workingfilectx object makes access to data related to a particular
1522 1525 file in the working directory convenient."""
1523 1526 def __init__(self, repo, path, filelog=None, workingctx=None):
1524 1527 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1525 1528
1526 1529 @propertycache
1527 1530 def _changectx(self):
1528 1531 return workingctx(self._repo)
1529 1532
1530 1533 def data(self):
1531 1534 return self._repo.wread(self._path)
1532 1535 def renamed(self):
1533 1536 rp = self._repo.dirstate.copied(self._path)
1534 1537 if not rp:
1535 1538 return None
1536 1539 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1537 1540
1538 1541 def size(self):
1539 1542 return self._repo.wvfs.lstat(self._path).st_size
1540 1543 def date(self):
1541 1544 t, tz = self._changectx.date()
1542 1545 try:
1543 1546 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1544 1547 except OSError, err:
1545 1548 if err.errno != errno.ENOENT:
1546 1549 raise
1547 1550 return (t, tz)
1548 1551
1549 1552 def cmp(self, fctx):
1550 1553 """compare with other file context
1551 1554
1552 1555 returns True if different than fctx.
1553 1556 """
1554 1557 # fctx should be a filectx (not a workingfilectx)
1555 1558 # invert comparison to reuse the same code path
1556 1559 return fctx.cmp(self)
1557 1560
1558 1561 def remove(self, ignoremissing=False):
1559 1562 """wraps unlink for a repo's working directory"""
1560 1563 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1561 1564
1562 1565 def write(self, data, flags):
1563 1566 """wraps repo.wwrite"""
1564 1567 self._repo.wwrite(self._path, data, flags)
1565 1568
1566 1569 class memctx(committablectx):
1567 1570 """Use memctx to perform in-memory commits via localrepo.commitctx().
1568 1571
1569 1572 Revision information is supplied at initialization time while
1570 1573 related files data and is made available through a callback
1571 1574 mechanism. 'repo' is the current localrepo, 'parents' is a
1572 1575 sequence of two parent revisions identifiers (pass None for every
1573 1576 missing parent), 'text' is the commit message and 'files' lists
1574 1577 names of files touched by the revision (normalized and relative to
1575 1578 repository root).
1576 1579
1577 1580 filectxfn(repo, memctx, path) is a callable receiving the
1578 1581 repository, the current memctx object and the normalized path of
1579 1582 requested file, relative to repository root. It is fired by the
1580 1583 commit function for every file in 'files', but calls order is
1581 1584 undefined. If the file is available in the revision being
1582 1585 committed (updated or added), filectxfn returns a memfilectx
1583 1586 object. If the file was removed, filectxfn raises an
1584 1587 IOError. Moved files are represented by marking the source file
1585 1588 removed and the new file added with copy information (see
1586 1589 memfilectx).
1587 1590
1588 1591 user receives the committer name and defaults to current
1589 1592 repository username, date is the commit date in any format
1590 1593 supported by util.parsedate() and defaults to current date, extra
1591 1594 is a dictionary of metadata or is left empty.
1592 1595 """
1593 1596 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1594 1597 date=None, extra=None, editor=False):
1595 1598 super(memctx, self).__init__(repo, text, user, date, extra)
1596 1599 self._rev = None
1597 1600 self._node = None
1598 1601 parents = [(p or nullid) for p in parents]
1599 1602 p1, p2 = parents
1600 1603 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1601 1604 files = sorted(set(files))
1602 1605 self._status = [files, [], [], [], []]
1603 1606 self._filectxfn = filectxfn
1604 1607 self.substate = {}
1605 1608
1606 1609 # if store is not callable, wrap it in a function
1607 1610 if not callable(filectxfn):
1608 1611 def getfilectx(repo, memctx, path):
1609 1612 fctx = filectxfn[path]
1610 1613 # this is weird but apparently we only keep track of one parent
1611 1614 # (why not only store that instead of a tuple?)
1612 1615 copied = fctx.renamed()
1613 1616 if copied:
1614 1617 copied = copied[0]
1615 1618 return memfilectx(repo, path, fctx.data(),
1616 1619 islink=fctx.islink(), isexec=fctx.isexec(),
1617 1620 copied=copied, memctx=memctx)
1618 1621 self._filectxfn = getfilectx
1619 1622
1620 1623 self._extra = extra and extra.copy() or {}
1621 1624 if self._extra.get('branch', '') == '':
1622 1625 self._extra['branch'] = 'default'
1623 1626
1624 1627 if editor:
1625 1628 self._text = editor(self._repo, self, [])
1626 1629 self._repo.savecommitmessage(self._text)
1627 1630
1628 1631 def filectx(self, path, filelog=None):
1629 """get a file context from the working directory"""
1632 """get a file context from the working directory
1633
1634 Returns None if file doesn't exist and should be removed."""
1630 1635 return self._filectxfn(self._repo, self, path)
1631 1636
1632 1637 def commit(self):
1633 1638 """commit context to the repo"""
1634 1639 return self._repo.commitctx(self)
1635 1640
1636 1641 @propertycache
1637 1642 def _manifest(self):
1638 1643 """generate a manifest based on the return values of filectxfn"""
1639 1644
1640 1645 # keep this simple for now; just worry about p1
1641 1646 pctx = self._parents[0]
1642 1647 man = pctx.manifest().copy()
1643 1648
1644 1649 for f, fnode in man.iteritems():
1645 1650 p1node = nullid
1646 1651 p2node = nullid
1647 1652 p = pctx[f].parents() # if file isn't in pctx, check p2?
1648 1653 if len(p) > 0:
1649 1654 p1node = p[0].node()
1650 1655 if len(p) > 1:
1651 1656 p2node = p[1].node()
1652 1657 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1653 1658
1654 1659 return man
1655 1660
1656 1661
1657 1662 class memfilectx(committablefilectx):
1658 1663 """memfilectx represents an in-memory file to commit.
1659 1664
1660 1665 See memctx and commitablefilectx for more details.
1661 1666 """
1662 1667 def __init__(self, repo, path, data, islink=False,
1663 1668 isexec=False, copied=None, memctx=None):
1664 1669 """
1665 1670 path is the normalized file path relative to repository root.
1666 1671 data is the file content as a string.
1667 1672 islink is True if the file is a symbolic link.
1668 1673 isexec is True if the file is executable.
1669 1674 copied is the source file path if current file was copied in the
1670 1675 revision being committed, or None."""
1671 1676 super(memfilectx, self).__init__(repo, path, None, memctx)
1672 1677 self._data = data
1673 1678 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1674 1679 self._copied = None
1675 1680 if copied:
1676 1681 self._copied = (copied, nullid)
1677 1682
1678 1683 def data(self):
1679 1684 return self._data
1680 1685 def size(self):
1681 1686 return len(self.data())
1682 1687 def flags(self):
1683 1688 return self._flags
1684 1689 def renamed(self):
1685 1690 return self._copied
1686 1691
1687 1692 def remove(self, ignoremissing=False):
1688 1693 """wraps unlink for a repo's working directory"""
1689 1694 # need to figure out what to do here
1690 1695 del self._changectx[self._path]
1691 1696
1692 1697 def write(self, data, flags):
1693 1698 """wraps repo.wwrite"""
1694 1699 self._data = data
@@ -1,1780 +1,1781 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 import branchmap, pathutil
21 21 propertycache = util.propertycache
22 22 filecache = scmutil.filecache
23 23
24 24 class repofilecache(filecache):
25 25 """All filecache usage on repo are done for logic that should be unfiltered
26 26 """
27 27
28 28 def __get__(self, repo, type=None):
29 29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 30 def __set__(self, repo, value):
31 31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 32 def __delete__(self, repo):
33 33 return super(repofilecache, self).__delete__(repo.unfiltered())
34 34
35 35 class storecache(repofilecache):
36 36 """filecache for files in the store"""
37 37 def join(self, obj, fname):
38 38 return obj.sjoin(fname)
39 39
40 40 class unfilteredpropertycache(propertycache):
41 41 """propertycache that apply to unfiltered repo only"""
42 42
43 43 def __get__(self, repo, type=None):
44 44 unfi = repo.unfiltered()
45 45 if unfi is repo:
46 46 return super(unfilteredpropertycache, self).__get__(unfi)
47 47 return getattr(unfi, self.name)
48 48
49 49 class filteredpropertycache(propertycache):
50 50 """propertycache that must take filtering in account"""
51 51
52 52 def cachevalue(self, obj, value):
53 53 object.__setattr__(obj, self.name, value)
54 54
55 55
56 56 def hasunfilteredcache(repo, name):
57 57 """check if a repo has an unfilteredpropertycache value for <name>"""
58 58 return name in vars(repo.unfiltered())
59 59
60 60 def unfilteredmethod(orig):
61 61 """decorate method that always need to be run on unfiltered version"""
62 62 def wrapper(repo, *args, **kwargs):
63 63 return orig(repo.unfiltered(), *args, **kwargs)
64 64 return wrapper
65 65
66 66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 67 'unbundle'))
68 68 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 69
70 70 class localpeer(peer.peerrepository):
71 71 '''peer for a local repo; reflects only the most recent API'''
72 72
73 73 def __init__(self, repo, caps=moderncaps):
74 74 peer.peerrepository.__init__(self)
75 75 self._repo = repo.filtered('served')
76 76 self.ui = repo.ui
77 77 self._caps = repo._restrictcapabilities(caps)
78 78 self.requirements = repo.requirements
79 79 self.supportedformats = repo.supportedformats
80 80
81 81 def close(self):
82 82 self._repo.close()
83 83
84 84 def _capabilities(self):
85 85 return self._caps
86 86
87 87 def local(self):
88 88 return self._repo
89 89
90 90 def canpush(self):
91 91 return True
92 92
93 93 def url(self):
94 94 return self._repo.url()
95 95
96 96 def lookup(self, key):
97 97 return self._repo.lookup(key)
98 98
99 99 def branchmap(self):
100 100 return self._repo.branchmap()
101 101
102 102 def heads(self):
103 103 return self._repo.heads()
104 104
105 105 def known(self, nodes):
106 106 return self._repo.known(nodes)
107 107
108 108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 109 format='HG10', **kwargs):
110 110 cg = exchange.getbundle(self._repo, source, heads=heads,
111 111 common=common, bundlecaps=bundlecaps, **kwargs)
112 112 if bundlecaps is not None and 'HG2X' in bundlecaps:
113 113 # When requesting a bundle2, getbundle returns a stream to make the
114 114 # wire level function happier. We need to build a proper object
115 115 # from it in local peer.
116 116 cg = bundle2.unbundle20(self.ui, cg)
117 117 return cg
118 118
119 119 # TODO We might want to move the next two calls into legacypeer and add
120 120 # unbundle instead.
121 121
122 122 def unbundle(self, cg, heads, url):
123 123 """apply a bundle on a repo
124 124
125 125 This function handles the repo locking itself."""
126 126 try:
127 127 cg = exchange.readbundle(self.ui, cg, None)
128 128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 129 if util.safehasattr(ret, 'getchunks'):
130 130 # This is a bundle20 object, turn it into an unbundler.
131 131 # This little dance should be dropped eventually when the API
132 132 # is finally improved.
133 133 stream = util.chunkbuffer(ret.getchunks())
134 134 ret = bundle2.unbundle20(self.ui, stream)
135 135 return ret
136 136 except error.PushRaced, exc:
137 137 raise error.ResponseError(_('push failed:'), str(exc))
138 138
139 139 def lock(self):
140 140 return self._repo.lock()
141 141
142 142 def addchangegroup(self, cg, source, url):
143 143 return changegroup.addchangegroup(self._repo, cg, source, url)
144 144
145 145 def pushkey(self, namespace, key, old, new):
146 146 return self._repo.pushkey(namespace, key, old, new)
147 147
148 148 def listkeys(self, namespace):
149 149 return self._repo.listkeys(namespace)
150 150
151 151 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 152 '''used to test argument passing over the wire'''
153 153 return "%s %s %s %s %s" % (one, two, three, four, five)
154 154
155 155 class locallegacypeer(localpeer):
156 156 '''peer extension which implements legacy methods too; used for tests with
157 157 restricted capabilities'''
158 158
159 159 def __init__(self, repo):
160 160 localpeer.__init__(self, repo, caps=legacycaps)
161 161
162 162 def branches(self, nodes):
163 163 return self._repo.branches(nodes)
164 164
165 165 def between(self, pairs):
166 166 return self._repo.between(pairs)
167 167
168 168 def changegroup(self, basenodes, source):
169 169 return changegroup.changegroup(self._repo, basenodes, source)
170 170
171 171 def changegroupsubset(self, bases, heads, source):
172 172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173 173
174 174 class localrepository(object):
175 175
176 176 supportedformats = set(('revlogv1', 'generaldelta'))
177 177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 178 'dotencode'))
179 179 openerreqs = set(('revlogv1', 'generaldelta'))
180 180 requirements = ['revlogv1']
181 181 filtername = None
182 182
183 183 bundle2caps = {'HG2X': (),
184 184 'b2x:listkeys': (),
185 185 'b2x:pushkey': (),
186 186 'b2x:changegroup': (),
187 187 }
188 188
189 189 # a list of (ui, featureset) functions.
190 190 # only functions defined in module of enabled extensions are invoked
191 191 featuresetupfuncs = set()
192 192
193 193 def _baserequirements(self, create):
194 194 return self.requirements[:]
195 195
196 196 def __init__(self, baseui, path=None, create=False):
197 197 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
198 198 self.wopener = self.wvfs
199 199 self.root = self.wvfs.base
200 200 self.path = self.wvfs.join(".hg")
201 201 self.origroot = path
202 202 self.auditor = pathutil.pathauditor(self.root, self._checknested)
203 203 self.vfs = scmutil.vfs(self.path)
204 204 self.opener = self.vfs
205 205 self.baseui = baseui
206 206 self.ui = baseui.copy()
207 207 self.ui.copy = baseui.copy # prevent copying repo configuration
208 208 # A list of callback to shape the phase if no data were found.
209 209 # Callback are in the form: func(repo, roots) --> processed root.
210 210 # This list it to be filled by extension during repo setup
211 211 self._phasedefaults = []
212 212 try:
213 213 self.ui.readconfig(self.join("hgrc"), self.root)
214 214 extensions.loadall(self.ui)
215 215 except IOError:
216 216 pass
217 217
218 218 if self.featuresetupfuncs:
219 219 self.supported = set(self._basesupported) # use private copy
220 220 extmods = set(m.__name__ for n, m
221 221 in extensions.extensions(self.ui))
222 222 for setupfunc in self.featuresetupfuncs:
223 223 if setupfunc.__module__ in extmods:
224 224 setupfunc(self.ui, self.supported)
225 225 else:
226 226 self.supported = self._basesupported
227 227
228 228 if not self.vfs.isdir():
229 229 if create:
230 230 if not self.wvfs.exists():
231 231 self.wvfs.makedirs()
232 232 self.vfs.makedir(notindexed=True)
233 233 requirements = self._baserequirements(create)
234 234 if self.ui.configbool('format', 'usestore', True):
235 235 self.vfs.mkdir("store")
236 236 requirements.append("store")
237 237 if self.ui.configbool('format', 'usefncache', True):
238 238 requirements.append("fncache")
239 239 if self.ui.configbool('format', 'dotencode', True):
240 240 requirements.append('dotencode')
241 241 # create an invalid changelog
242 242 self.vfs.append(
243 243 "00changelog.i",
244 244 '\0\0\0\2' # represents revlogv2
245 245 ' dummy changelog to prevent using the old repo layout'
246 246 )
247 247 if self.ui.configbool('format', 'generaldelta', False):
248 248 requirements.append("generaldelta")
249 249 requirements = set(requirements)
250 250 else:
251 251 raise error.RepoError(_("repository %s not found") % path)
252 252 elif create:
253 253 raise error.RepoError(_("repository %s already exists") % path)
254 254 else:
255 255 try:
256 256 requirements = scmutil.readrequires(self.vfs, self.supported)
257 257 except IOError, inst:
258 258 if inst.errno != errno.ENOENT:
259 259 raise
260 260 requirements = set()
261 261
262 262 self.sharedpath = self.path
263 263 try:
264 264 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
265 265 realpath=True)
266 266 s = vfs.base
267 267 if not vfs.exists():
268 268 raise error.RepoError(
269 269 _('.hg/sharedpath points to nonexistent directory %s') % s)
270 270 self.sharedpath = s
271 271 except IOError, inst:
272 272 if inst.errno != errno.ENOENT:
273 273 raise
274 274
275 275 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
276 276 self.spath = self.store.path
277 277 self.svfs = self.store.vfs
278 278 self.sopener = self.svfs
279 279 self.sjoin = self.store.join
280 280 self.vfs.createmode = self.store.createmode
281 281 self._applyrequirements(requirements)
282 282 if create:
283 283 self._writerequirements()
284 284
285 285
286 286 self._branchcaches = {}
287 287 self.filterpats = {}
288 288 self._datafilters = {}
289 289 self._transref = self._lockref = self._wlockref = None
290 290
291 291 # A cache for various files under .hg/ that tracks file changes,
292 292 # (used by the filecache decorator)
293 293 #
294 294 # Maps a property name to its util.filecacheentry
295 295 self._filecache = {}
296 296
297 297 # hold sets of revision to be filtered
298 298 # should be cleared when something might have changed the filter value:
299 299 # - new changesets,
300 300 # - phase change,
301 301 # - new obsolescence marker,
302 302 # - working directory parent change,
303 303 # - bookmark changes
304 304 self.filteredrevcache = {}
305 305
306 306 def close(self):
307 307 pass
308 308
309 309 def _restrictcapabilities(self, caps):
310 310 # bundle2 is not ready for prime time, drop it unless explicitly
311 311 # required by the tests (or some brave tester)
312 312 if self.ui.configbool('experimental', 'bundle2-exp', False):
313 313 caps = set(caps)
314 314 capsblob = bundle2.encodecaps(self.bundle2caps)
315 315 caps.add('bundle2-exp=' + urllib.quote(capsblob))
316 316 return caps
317 317
318 318 def _applyrequirements(self, requirements):
319 319 self.requirements = requirements
320 320 self.sopener.options = dict((r, 1) for r in requirements
321 321 if r in self.openerreqs)
322 322 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
323 323 if chunkcachesize is not None:
324 324 self.sopener.options['chunkcachesize'] = chunkcachesize
325 325
326 326 def _writerequirements(self):
327 327 reqfile = self.opener("requires", "w")
328 328 for r in sorted(self.requirements):
329 329 reqfile.write("%s\n" % r)
330 330 reqfile.close()
331 331
332 332 def _checknested(self, path):
333 333 """Determine if path is a legal nested repository."""
334 334 if not path.startswith(self.root):
335 335 return False
336 336 subpath = path[len(self.root) + 1:]
337 337 normsubpath = util.pconvert(subpath)
338 338
339 339 # XXX: Checking against the current working copy is wrong in
340 340 # the sense that it can reject things like
341 341 #
342 342 # $ hg cat -r 10 sub/x.txt
343 343 #
344 344 # if sub/ is no longer a subrepository in the working copy
345 345 # parent revision.
346 346 #
347 347 # However, it can of course also allow things that would have
348 348 # been rejected before, such as the above cat command if sub/
349 349 # is a subrepository now, but was a normal directory before.
350 350 # The old path auditor would have rejected by mistake since it
351 351 # panics when it sees sub/.hg/.
352 352 #
353 353 # All in all, checking against the working copy seems sensible
354 354 # since we want to prevent access to nested repositories on
355 355 # the filesystem *now*.
356 356 ctx = self[None]
357 357 parts = util.splitpath(subpath)
358 358 while parts:
359 359 prefix = '/'.join(parts)
360 360 if prefix in ctx.substate:
361 361 if prefix == normsubpath:
362 362 return True
363 363 else:
364 364 sub = ctx.sub(prefix)
365 365 return sub.checknested(subpath[len(prefix) + 1:])
366 366 else:
367 367 parts.pop()
368 368 return False
369 369
370 370 def peer(self):
371 371 return localpeer(self) # not cached to avoid reference cycle
372 372
373 373 def unfiltered(self):
374 374 """Return unfiltered version of the repository
375 375
376 376 Intended to be overwritten by filtered repo."""
377 377 return self
378 378
379 379 def filtered(self, name):
380 380 """Return a filtered version of a repository"""
381 381 # build a new class with the mixin and the current class
382 382 # (possibly subclass of the repo)
383 383 class proxycls(repoview.repoview, self.unfiltered().__class__):
384 384 pass
385 385 return proxycls(self, name)
386 386
387 387 @repofilecache('bookmarks')
388 388 def _bookmarks(self):
389 389 return bookmarks.bmstore(self)
390 390
391 391 @repofilecache('bookmarks.current')
392 392 def _bookmarkcurrent(self):
393 393 return bookmarks.readcurrent(self)
394 394
395 395 def bookmarkheads(self, bookmark):
396 396 name = bookmark.split('@', 1)[0]
397 397 heads = []
398 398 for mark, n in self._bookmarks.iteritems():
399 399 if mark.split('@', 1)[0] == name:
400 400 heads.append(n)
401 401 return heads
402 402
403 403 @storecache('phaseroots')
404 404 def _phasecache(self):
405 405 return phases.phasecache(self, self._phasedefaults)
406 406
407 407 @storecache('obsstore')
408 408 def obsstore(self):
409 409 store = obsolete.obsstore(self.sopener)
410 410 if store and not obsolete._enabled:
411 411 # message is rare enough to not be translated
412 412 msg = 'obsolete feature not enabled but %i markers found!\n'
413 413 self.ui.warn(msg % len(list(store)))
414 414 return store
415 415
416 416 @storecache('00changelog.i')
417 417 def changelog(self):
418 418 c = changelog.changelog(self.sopener)
419 419 if 'HG_PENDING' in os.environ:
420 420 p = os.environ['HG_PENDING']
421 421 if p.startswith(self.root):
422 422 c.readpending('00changelog.i.a')
423 423 return c
424 424
425 425 @storecache('00manifest.i')
426 426 def manifest(self):
427 427 return manifest.manifest(self.sopener)
428 428
429 429 @repofilecache('dirstate')
430 430 def dirstate(self):
431 431 warned = [0]
432 432 def validate(node):
433 433 try:
434 434 self.changelog.rev(node)
435 435 return node
436 436 except error.LookupError:
437 437 if not warned[0]:
438 438 warned[0] = True
439 439 self.ui.warn(_("warning: ignoring unknown"
440 440 " working parent %s!\n") % short(node))
441 441 return nullid
442 442
443 443 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
444 444
445 445 def __getitem__(self, changeid):
446 446 if changeid is None:
447 447 return context.workingctx(self)
448 448 return context.changectx(self, changeid)
449 449
450 450 def __contains__(self, changeid):
451 451 try:
452 452 return bool(self.lookup(changeid))
453 453 except error.RepoLookupError:
454 454 return False
455 455
456 456 def __nonzero__(self):
457 457 return True
458 458
459 459 def __len__(self):
460 460 return len(self.changelog)
461 461
462 462 def __iter__(self):
463 463 return iter(self.changelog)
464 464
465 465 def revs(self, expr, *args):
466 466 '''Return a list of revisions matching the given revset'''
467 467 expr = revset.formatspec(expr, *args)
468 468 m = revset.match(None, expr)
469 469 return m(self, revset.spanset(self))
470 470
471 471 def set(self, expr, *args):
472 472 '''
473 473 Yield a context for each matching revision, after doing arg
474 474 replacement via revset.formatspec
475 475 '''
476 476 for r in self.revs(expr, *args):
477 477 yield self[r]
478 478
479 479 def url(self):
480 480 return 'file:' + self.root
481 481
482 482 def hook(self, name, throw=False, **args):
483 483 """Call a hook, passing this repo instance.
484 484
485 485 This a convenience method to aid invoking hooks. Extensions likely
486 486 won't call this unless they have registered a custom hook or are
487 487 replacing code that is expected to call a hook.
488 488 """
489 489 return hook.hook(self.ui, self, name, throw, **args)
490 490
491 491 @unfilteredmethod
492 492 def _tag(self, names, node, message, local, user, date, extra={},
493 493 editor=False):
494 494 if isinstance(names, str):
495 495 names = (names,)
496 496
497 497 branches = self.branchmap()
498 498 for name in names:
499 499 self.hook('pretag', throw=True, node=hex(node), tag=name,
500 500 local=local)
501 501 if name in branches:
502 502 self.ui.warn(_("warning: tag %s conflicts with existing"
503 503 " branch name\n") % name)
504 504
505 505 def writetags(fp, names, munge, prevtags):
506 506 fp.seek(0, 2)
507 507 if prevtags and prevtags[-1] != '\n':
508 508 fp.write('\n')
509 509 for name in names:
510 510 m = munge and munge(name) or name
511 511 if (self._tagscache.tagtypes and
512 512 name in self._tagscache.tagtypes):
513 513 old = self.tags().get(name, nullid)
514 514 fp.write('%s %s\n' % (hex(old), m))
515 515 fp.write('%s %s\n' % (hex(node), m))
516 516 fp.close()
517 517
518 518 prevtags = ''
519 519 if local:
520 520 try:
521 521 fp = self.opener('localtags', 'r+')
522 522 except IOError:
523 523 fp = self.opener('localtags', 'a')
524 524 else:
525 525 prevtags = fp.read()
526 526
527 527 # local tags are stored in the current charset
528 528 writetags(fp, names, None, prevtags)
529 529 for name in names:
530 530 self.hook('tag', node=hex(node), tag=name, local=local)
531 531 return
532 532
533 533 try:
534 534 fp = self.wfile('.hgtags', 'rb+')
535 535 except IOError, e:
536 536 if e.errno != errno.ENOENT:
537 537 raise
538 538 fp = self.wfile('.hgtags', 'ab')
539 539 else:
540 540 prevtags = fp.read()
541 541
542 542 # committed tags are stored in UTF-8
543 543 writetags(fp, names, encoding.fromlocal, prevtags)
544 544
545 545 fp.close()
546 546
547 547 self.invalidatecaches()
548 548
549 549 if '.hgtags' not in self.dirstate:
550 550 self[None].add(['.hgtags'])
551 551
552 552 m = matchmod.exact(self.root, '', ['.hgtags'])
553 553 tagnode = self.commit(message, user, date, extra=extra, match=m,
554 554 editor=editor)
555 555
556 556 for name in names:
557 557 self.hook('tag', node=hex(node), tag=name, local=local)
558 558
559 559 return tagnode
560 560
561 561 def tag(self, names, node, message, local, user, date, editor=False):
562 562 '''tag a revision with one or more symbolic names.
563 563
564 564 names is a list of strings or, when adding a single tag, names may be a
565 565 string.
566 566
567 567 if local is True, the tags are stored in a per-repository file.
568 568 otherwise, they are stored in the .hgtags file, and a new
569 569 changeset is committed with the change.
570 570
571 571 keyword arguments:
572 572
573 573 local: whether to store tags in non-version-controlled file
574 574 (default False)
575 575
576 576 message: commit message to use if committing
577 577
578 578 user: name of user to use if committing
579 579
580 580 date: date tuple to use if committing'''
581 581
582 582 if not local:
583 583 for x in self.status()[:5]:
584 584 if '.hgtags' in x:
585 585 raise util.Abort(_('working copy of .hgtags is changed '
586 586 '(please commit .hgtags manually)'))
587 587
588 588 self.tags() # instantiate the cache
589 589 self._tag(names, node, message, local, user, date, editor=editor)
590 590
591 591 @filteredpropertycache
592 592 def _tagscache(self):
593 593 '''Returns a tagscache object that contains various tags related
594 594 caches.'''
595 595
596 596 # This simplifies its cache management by having one decorated
597 597 # function (this one) and the rest simply fetch things from it.
598 598 class tagscache(object):
599 599 def __init__(self):
600 600 # These two define the set of tags for this repository. tags
601 601 # maps tag name to node; tagtypes maps tag name to 'global' or
602 602 # 'local'. (Global tags are defined by .hgtags across all
603 603 # heads, and local tags are defined in .hg/localtags.)
604 604 # They constitute the in-memory cache of tags.
605 605 self.tags = self.tagtypes = None
606 606
607 607 self.nodetagscache = self.tagslist = None
608 608
609 609 cache = tagscache()
610 610 cache.tags, cache.tagtypes = self._findtags()
611 611
612 612 return cache
613 613
614 614 def tags(self):
615 615 '''return a mapping of tag to node'''
616 616 t = {}
617 617 if self.changelog.filteredrevs:
618 618 tags, tt = self._findtags()
619 619 else:
620 620 tags = self._tagscache.tags
621 621 for k, v in tags.iteritems():
622 622 try:
623 623 # ignore tags to unknown nodes
624 624 self.changelog.rev(v)
625 625 t[k] = v
626 626 except (error.LookupError, ValueError):
627 627 pass
628 628 return t
629 629
630 630 def _findtags(self):
631 631 '''Do the hard work of finding tags. Return a pair of dicts
632 632 (tags, tagtypes) where tags maps tag name to node, and tagtypes
633 633 maps tag name to a string like \'global\' or \'local\'.
634 634 Subclasses or extensions are free to add their own tags, but
635 635 should be aware that the returned dicts will be retained for the
636 636 duration of the localrepo object.'''
637 637
638 638 # XXX what tagtype should subclasses/extensions use? Currently
639 639 # mq and bookmarks add tags, but do not set the tagtype at all.
640 640 # Should each extension invent its own tag type? Should there
641 641 # be one tagtype for all such "virtual" tags? Or is the status
642 642 # quo fine?
643 643
644 644 alltags = {} # map tag name to (node, hist)
645 645 tagtypes = {}
646 646
647 647 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
648 648 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
649 649
650 650 # Build the return dicts. Have to re-encode tag names because
651 651 # the tags module always uses UTF-8 (in order not to lose info
652 652 # writing to the cache), but the rest of Mercurial wants them in
653 653 # local encoding.
654 654 tags = {}
655 655 for (name, (node, hist)) in alltags.iteritems():
656 656 if node != nullid:
657 657 tags[encoding.tolocal(name)] = node
658 658 tags['tip'] = self.changelog.tip()
659 659 tagtypes = dict([(encoding.tolocal(name), value)
660 660 for (name, value) in tagtypes.iteritems()])
661 661 return (tags, tagtypes)
662 662
663 663 def tagtype(self, tagname):
664 664 '''
665 665 return the type of the given tag. result can be:
666 666
667 667 'local' : a local tag
668 668 'global' : a global tag
669 669 None : tag does not exist
670 670 '''
671 671
672 672 return self._tagscache.tagtypes.get(tagname)
673 673
674 674 def tagslist(self):
675 675 '''return a list of tags ordered by revision'''
676 676 if not self._tagscache.tagslist:
677 677 l = []
678 678 for t, n in self.tags().iteritems():
679 679 l.append((self.changelog.rev(n), t, n))
680 680 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
681 681
682 682 return self._tagscache.tagslist
683 683
684 684 def nodetags(self, node):
685 685 '''return the tags associated with a node'''
686 686 if not self._tagscache.nodetagscache:
687 687 nodetagscache = {}
688 688 for t, n in self._tagscache.tags.iteritems():
689 689 nodetagscache.setdefault(n, []).append(t)
690 690 for tags in nodetagscache.itervalues():
691 691 tags.sort()
692 692 self._tagscache.nodetagscache = nodetagscache
693 693 return self._tagscache.nodetagscache.get(node, [])
694 694
695 695 def nodebookmarks(self, node):
696 696 marks = []
697 697 for bookmark, n in self._bookmarks.iteritems():
698 698 if n == node:
699 699 marks.append(bookmark)
700 700 return sorted(marks)
701 701
702 702 def branchmap(self):
703 703 '''returns a dictionary {branch: [branchheads]} with branchheads
704 704 ordered by increasing revision number'''
705 705 branchmap.updatecache(self)
706 706 return self._branchcaches[self.filtername]
707 707
708 708 def branchtip(self, branch):
709 709 '''return the tip node for a given branch'''
710 710 try:
711 711 return self.branchmap().branchtip(branch)
712 712 except KeyError:
713 713 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
714 714
715 715 def lookup(self, key):
716 716 return self[key].node()
717 717
718 718 def lookupbranch(self, key, remote=None):
719 719 repo = remote or self
720 720 if key in repo.branchmap():
721 721 return key
722 722
723 723 repo = (remote and remote.local()) and remote or self
724 724 return repo[key].branch()
725 725
726 726 def known(self, nodes):
727 727 nm = self.changelog.nodemap
728 728 pc = self._phasecache
729 729 result = []
730 730 for n in nodes:
731 731 r = nm.get(n)
732 732 resp = not (r is None or pc.phase(self, r) >= phases.secret)
733 733 result.append(resp)
734 734 return result
735 735
736 736 def local(self):
737 737 return self
738 738
739 739 def cancopy(self):
740 740 # so statichttprepo's override of local() works
741 741 if not self.local():
742 742 return False
743 743 if not self.ui.configbool('phases', 'publish', True):
744 744 return True
745 745 # if publishing we can't copy if there is filtered content
746 746 return not self.filtered('visible').changelog.filteredrevs
747 747
748 748 def join(self, f):
749 749 return os.path.join(self.path, f)
750 750
751 751 def wjoin(self, f):
752 752 return os.path.join(self.root, f)
753 753
754 754 def file(self, f):
755 755 if f[0] == '/':
756 756 f = f[1:]
757 757 return filelog.filelog(self.sopener, f)
758 758
759 759 def changectx(self, changeid):
760 760 return self[changeid]
761 761
762 762 def parents(self, changeid=None):
763 763 '''get list of changectxs for parents of changeid'''
764 764 return self[changeid].parents()
765 765
766 766 def setparents(self, p1, p2=nullid):
767 767 copies = self.dirstate.setparents(p1, p2)
768 768 pctx = self[p1]
769 769 if copies:
770 770 # Adjust copy records, the dirstate cannot do it, it
771 771 # requires access to parents manifests. Preserve them
772 772 # only for entries added to first parent.
773 773 for f in copies:
774 774 if f not in pctx and copies[f] in pctx:
775 775 self.dirstate.copy(copies[f], f)
776 776 if p2 == nullid:
777 777 for f, s in sorted(self.dirstate.copies().items()):
778 778 if f not in pctx and s not in pctx:
779 779 self.dirstate.copy(None, f)
780 780
781 781 def filectx(self, path, changeid=None, fileid=None):
782 782 """changeid can be a changeset revision, node, or tag.
783 783 fileid can be a file revision or node."""
784 784 return context.filectx(self, path, changeid, fileid)
785 785
786 786 def getcwd(self):
787 787 return self.dirstate.getcwd()
788 788
789 789 def pathto(self, f, cwd=None):
790 790 return self.dirstate.pathto(f, cwd)
791 791
792 792 def wfile(self, f, mode='r'):
793 793 return self.wopener(f, mode)
794 794
795 795 def _link(self, f):
796 796 return self.wvfs.islink(f)
797 797
798 798 def _loadfilter(self, filter):
799 799 if filter not in self.filterpats:
800 800 l = []
801 801 for pat, cmd in self.ui.configitems(filter):
802 802 if cmd == '!':
803 803 continue
804 804 mf = matchmod.match(self.root, '', [pat])
805 805 fn = None
806 806 params = cmd
807 807 for name, filterfn in self._datafilters.iteritems():
808 808 if cmd.startswith(name):
809 809 fn = filterfn
810 810 params = cmd[len(name):].lstrip()
811 811 break
812 812 if not fn:
813 813 fn = lambda s, c, **kwargs: util.filter(s, c)
814 814 # Wrap old filters not supporting keyword arguments
815 815 if not inspect.getargspec(fn)[2]:
816 816 oldfn = fn
817 817 fn = lambda s, c, **kwargs: oldfn(s, c)
818 818 l.append((mf, fn, params))
819 819 self.filterpats[filter] = l
820 820 return self.filterpats[filter]
821 821
822 822 def _filter(self, filterpats, filename, data):
823 823 for mf, fn, cmd in filterpats:
824 824 if mf(filename):
825 825 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
826 826 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
827 827 break
828 828
829 829 return data
830 830
831 831 @unfilteredpropertycache
832 832 def _encodefilterpats(self):
833 833 return self._loadfilter('encode')
834 834
835 835 @unfilteredpropertycache
836 836 def _decodefilterpats(self):
837 837 return self._loadfilter('decode')
838 838
839 839 def adddatafilter(self, name, filter):
840 840 self._datafilters[name] = filter
841 841
842 842 def wread(self, filename):
843 843 if self._link(filename):
844 844 data = self.wvfs.readlink(filename)
845 845 else:
846 846 data = self.wopener.read(filename)
847 847 return self._filter(self._encodefilterpats, filename, data)
848 848
849 849 def wwrite(self, filename, data, flags):
850 850 data = self._filter(self._decodefilterpats, filename, data)
851 851 if 'l' in flags:
852 852 self.wopener.symlink(data, filename)
853 853 else:
854 854 self.wopener.write(filename, data)
855 855 if 'x' in flags:
856 856 self.wvfs.setflags(filename, False, True)
857 857
858 858 def wwritedata(self, filename, data):
859 859 return self._filter(self._decodefilterpats, filename, data)
860 860
861 861 def transaction(self, desc, report=None):
862 862 tr = self._transref and self._transref() or None
863 863 if tr and tr.running():
864 864 return tr.nest()
865 865
866 866 # abort here if the journal already exists
867 867 if self.svfs.exists("journal"):
868 868 raise error.RepoError(
869 869 _("abandoned transaction found"),
870 870 hint=_("run 'hg recover' to clean up transaction"))
871 871
872 872 def onclose():
873 873 self.store.write(self._transref())
874 874
875 875 self._writejournal(desc)
876 876 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
877 877 rp = report and report or self.ui.warn
878 878 tr = transaction.transaction(rp, self.sopener,
879 879 "journal",
880 880 aftertrans(renames),
881 881 self.store.createmode,
882 882 onclose)
883 883 self._transref = weakref.ref(tr)
884 884 return tr
885 885
886 886 def _journalfiles(self):
887 887 return ((self.svfs, 'journal'),
888 888 (self.vfs, 'journal.dirstate'),
889 889 (self.vfs, 'journal.branch'),
890 890 (self.vfs, 'journal.desc'),
891 891 (self.vfs, 'journal.bookmarks'),
892 892 (self.svfs, 'journal.phaseroots'))
893 893
894 894 def undofiles(self):
895 895 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
896 896
897 897 def _writejournal(self, desc):
898 898 self.opener.write("journal.dirstate",
899 899 self.opener.tryread("dirstate"))
900 900 self.opener.write("journal.branch",
901 901 encoding.fromlocal(self.dirstate.branch()))
902 902 self.opener.write("journal.desc",
903 903 "%d\n%s\n" % (len(self), desc))
904 904 self.opener.write("journal.bookmarks",
905 905 self.opener.tryread("bookmarks"))
906 906 self.sopener.write("journal.phaseroots",
907 907 self.sopener.tryread("phaseroots"))
908 908
909 909 def recover(self):
910 910 lock = self.lock()
911 911 try:
912 912 if self.svfs.exists("journal"):
913 913 self.ui.status(_("rolling back interrupted transaction\n"))
914 914 transaction.rollback(self.sopener, "journal",
915 915 self.ui.warn)
916 916 self.invalidate()
917 917 return True
918 918 else:
919 919 self.ui.warn(_("no interrupted transaction available\n"))
920 920 return False
921 921 finally:
922 922 lock.release()
923 923
924 924 def rollback(self, dryrun=False, force=False):
925 925 wlock = lock = None
926 926 try:
927 927 wlock = self.wlock()
928 928 lock = self.lock()
929 929 if self.svfs.exists("undo"):
930 930 return self._rollback(dryrun, force)
931 931 else:
932 932 self.ui.warn(_("no rollback information available\n"))
933 933 return 1
934 934 finally:
935 935 release(lock, wlock)
936 936
937 937 @unfilteredmethod # Until we get smarter cache management
938 938 def _rollback(self, dryrun, force):
939 939 ui = self.ui
940 940 try:
941 941 args = self.opener.read('undo.desc').splitlines()
942 942 (oldlen, desc, detail) = (int(args[0]), args[1], None)
943 943 if len(args) >= 3:
944 944 detail = args[2]
945 945 oldtip = oldlen - 1
946 946
947 947 if detail and ui.verbose:
948 948 msg = (_('repository tip rolled back to revision %s'
949 949 ' (undo %s: %s)\n')
950 950 % (oldtip, desc, detail))
951 951 else:
952 952 msg = (_('repository tip rolled back to revision %s'
953 953 ' (undo %s)\n')
954 954 % (oldtip, desc))
955 955 except IOError:
956 956 msg = _('rolling back unknown transaction\n')
957 957 desc = None
958 958
959 959 if not force and self['.'] != self['tip'] and desc == 'commit':
960 960 raise util.Abort(
961 961 _('rollback of last commit while not checked out '
962 962 'may lose data'), hint=_('use -f to force'))
963 963
964 964 ui.status(msg)
965 965 if dryrun:
966 966 return 0
967 967
968 968 parents = self.dirstate.parents()
969 969 self.destroying()
970 970 transaction.rollback(self.sopener, 'undo', ui.warn)
971 971 if self.vfs.exists('undo.bookmarks'):
972 972 self.vfs.rename('undo.bookmarks', 'bookmarks')
973 973 if self.svfs.exists('undo.phaseroots'):
974 974 self.svfs.rename('undo.phaseroots', 'phaseroots')
975 975 self.invalidate()
976 976
977 977 parentgone = (parents[0] not in self.changelog.nodemap or
978 978 parents[1] not in self.changelog.nodemap)
979 979 if parentgone:
980 980 self.vfs.rename('undo.dirstate', 'dirstate')
981 981 try:
982 982 branch = self.opener.read('undo.branch')
983 983 self.dirstate.setbranch(encoding.tolocal(branch))
984 984 except IOError:
985 985 ui.warn(_('named branch could not be reset: '
986 986 'current branch is still \'%s\'\n')
987 987 % self.dirstate.branch())
988 988
989 989 self.dirstate.invalidate()
990 990 parents = tuple([p.rev() for p in self.parents()])
991 991 if len(parents) > 1:
992 992 ui.status(_('working directory now based on '
993 993 'revisions %d and %d\n') % parents)
994 994 else:
995 995 ui.status(_('working directory now based on '
996 996 'revision %d\n') % parents)
997 997 # TODO: if we know which new heads may result from this rollback, pass
998 998 # them to destroy(), which will prevent the branchhead cache from being
999 999 # invalidated.
1000 1000 self.destroyed()
1001 1001 return 0
1002 1002
1003 1003 def invalidatecaches(self):
1004 1004
1005 1005 if '_tagscache' in vars(self):
1006 1006 # can't use delattr on proxy
1007 1007 del self.__dict__['_tagscache']
1008 1008
1009 1009 self.unfiltered()._branchcaches.clear()
1010 1010 self.invalidatevolatilesets()
1011 1011
1012 1012 def invalidatevolatilesets(self):
1013 1013 self.filteredrevcache.clear()
1014 1014 obsolete.clearobscaches(self)
1015 1015
1016 1016 def invalidatedirstate(self):
1017 1017 '''Invalidates the dirstate, causing the next call to dirstate
1018 1018 to check if it was modified since the last time it was read,
1019 1019 rereading it if it has.
1020 1020
1021 1021 This is different to dirstate.invalidate() that it doesn't always
1022 1022 rereads the dirstate. Use dirstate.invalidate() if you want to
1023 1023 explicitly read the dirstate again (i.e. restoring it to a previous
1024 1024 known good state).'''
1025 1025 if hasunfilteredcache(self, 'dirstate'):
1026 1026 for k in self.dirstate._filecache:
1027 1027 try:
1028 1028 delattr(self.dirstate, k)
1029 1029 except AttributeError:
1030 1030 pass
1031 1031 delattr(self.unfiltered(), 'dirstate')
1032 1032
1033 1033 def invalidate(self):
1034 1034 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1035 1035 for k in self._filecache:
1036 1036 # dirstate is invalidated separately in invalidatedirstate()
1037 1037 if k == 'dirstate':
1038 1038 continue
1039 1039
1040 1040 try:
1041 1041 delattr(unfiltered, k)
1042 1042 except AttributeError:
1043 1043 pass
1044 1044 self.invalidatecaches()
1045 1045 self.store.invalidatecaches()
1046 1046
1047 1047 def invalidateall(self):
1048 1048 '''Fully invalidates both store and non-store parts, causing the
1049 1049 subsequent operation to reread any outside changes.'''
1050 1050 # extension should hook this to invalidate its caches
1051 1051 self.invalidate()
1052 1052 self.invalidatedirstate()
1053 1053
1054 1054 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1055 1055 try:
1056 1056 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1057 1057 except error.LockHeld, inst:
1058 1058 if not wait:
1059 1059 raise
1060 1060 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1061 1061 (desc, inst.locker))
1062 1062 # default to 600 seconds timeout
1063 1063 l = lockmod.lock(vfs, lockname,
1064 1064 int(self.ui.config("ui", "timeout", "600")),
1065 1065 releasefn, desc=desc)
1066 1066 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1067 1067 if acquirefn:
1068 1068 acquirefn()
1069 1069 return l
1070 1070
1071 1071 def _afterlock(self, callback):
1072 1072 """add a callback to the current repository lock.
1073 1073
1074 1074 The callback will be executed on lock release."""
1075 1075 l = self._lockref and self._lockref()
1076 1076 if l:
1077 1077 l.postrelease.append(callback)
1078 1078 else:
1079 1079 callback()
1080 1080
1081 1081 def lock(self, wait=True):
1082 1082 '''Lock the repository store (.hg/store) and return a weak reference
1083 1083 to the lock. Use this before modifying the store (e.g. committing or
1084 1084 stripping). If you are opening a transaction, get a lock as well.)'''
1085 1085 l = self._lockref and self._lockref()
1086 1086 if l is not None and l.held:
1087 1087 l.lock()
1088 1088 return l
1089 1089
1090 1090 def unlock():
1091 1091 for k, ce in self._filecache.items():
1092 1092 if k == 'dirstate' or k not in self.__dict__:
1093 1093 continue
1094 1094 ce.refresh()
1095 1095
1096 1096 l = self._lock(self.svfs, "lock", wait, unlock,
1097 1097 self.invalidate, _('repository %s') % self.origroot)
1098 1098 self._lockref = weakref.ref(l)
1099 1099 return l
1100 1100
1101 1101 def wlock(self, wait=True):
1102 1102 '''Lock the non-store parts of the repository (everything under
1103 1103 .hg except .hg/store) and return a weak reference to the lock.
1104 1104 Use this before modifying files in .hg.'''
1105 1105 l = self._wlockref and self._wlockref()
1106 1106 if l is not None and l.held:
1107 1107 l.lock()
1108 1108 return l
1109 1109
1110 1110 def unlock():
1111 1111 self.dirstate.write()
1112 1112 self._filecache['dirstate'].refresh()
1113 1113
1114 1114 l = self._lock(self.vfs, "wlock", wait, unlock,
1115 1115 self.invalidatedirstate, _('working directory of %s') %
1116 1116 self.origroot)
1117 1117 self._wlockref = weakref.ref(l)
1118 1118 return l
1119 1119
1120 1120 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1121 1121 """
1122 1122 commit an individual file as part of a larger transaction
1123 1123 """
1124 1124
1125 1125 fname = fctx.path()
1126 1126 text = fctx.data()
1127 1127 flog = self.file(fname)
1128 1128 fparent1 = manifest1.get(fname, nullid)
1129 1129 fparent2 = fparent2o = manifest2.get(fname, nullid)
1130 1130
1131 1131 meta = {}
1132 1132 copy = fctx.renamed()
1133 1133 if copy and copy[0] != fname:
1134 1134 # Mark the new revision of this file as a copy of another
1135 1135 # file. This copy data will effectively act as a parent
1136 1136 # of this new revision. If this is a merge, the first
1137 1137 # parent will be the nullid (meaning "look up the copy data")
1138 1138 # and the second one will be the other parent. For example:
1139 1139 #
1140 1140 # 0 --- 1 --- 3 rev1 changes file foo
1141 1141 # \ / rev2 renames foo to bar and changes it
1142 1142 # \- 2 -/ rev3 should have bar with all changes and
1143 1143 # should record that bar descends from
1144 1144 # bar in rev2 and foo in rev1
1145 1145 #
1146 1146 # this allows this merge to succeed:
1147 1147 #
1148 1148 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1149 1149 # \ / merging rev3 and rev4 should use bar@rev2
1150 1150 # \- 2 --- 4 as the merge base
1151 1151 #
1152 1152
1153 1153 cfname = copy[0]
1154 1154 crev = manifest1.get(cfname)
1155 1155 newfparent = fparent2
1156 1156
1157 1157 if manifest2: # branch merge
1158 1158 if fparent2 == nullid or crev is None: # copied on remote side
1159 1159 if cfname in manifest2:
1160 1160 crev = manifest2[cfname]
1161 1161 newfparent = fparent1
1162 1162
1163 1163 # find source in nearest ancestor if we've lost track
1164 1164 if not crev:
1165 1165 self.ui.debug(" %s: searching for copy revision for %s\n" %
1166 1166 (fname, cfname))
1167 1167 for ancestor in self[None].ancestors():
1168 1168 if cfname in ancestor:
1169 1169 crev = ancestor[cfname].filenode()
1170 1170 break
1171 1171
1172 1172 if crev:
1173 1173 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1174 1174 meta["copy"] = cfname
1175 1175 meta["copyrev"] = hex(crev)
1176 1176 fparent1, fparent2 = nullid, newfparent
1177 1177 else:
1178 1178 self.ui.warn(_("warning: can't find ancestor for '%s' "
1179 1179 "copied from '%s'!\n") % (fname, cfname))
1180 1180
1181 1181 elif fparent1 == nullid:
1182 1182 fparent1, fparent2 = fparent2, nullid
1183 1183 elif fparent2 != nullid:
1184 1184 # is one parent an ancestor of the other?
1185 1185 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1186 1186 if fparent1 in fparentancestors:
1187 1187 fparent1, fparent2 = fparent2, nullid
1188 1188 elif fparent2 in fparentancestors:
1189 1189 fparent2 = nullid
1190 1190
1191 1191 # is the file changed?
1192 1192 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1193 1193 changelist.append(fname)
1194 1194 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1195 1195
1196 1196 # are just the flags changed during merge?
1197 1197 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1198 1198 changelist.append(fname)
1199 1199
1200 1200 return fparent1
1201 1201
1202 1202 @unfilteredmethod
1203 1203 def commit(self, text="", user=None, date=None, match=None, force=False,
1204 1204 editor=False, extra={}):
1205 1205 """Add a new revision to current repository.
1206 1206
1207 1207 Revision information is gathered from the working directory,
1208 1208 match can be used to filter the committed files. If editor is
1209 1209 supplied, it is called to get a commit message.
1210 1210 """
1211 1211
1212 1212 def fail(f, msg):
1213 1213 raise util.Abort('%s: %s' % (f, msg))
1214 1214
1215 1215 if not match:
1216 1216 match = matchmod.always(self.root, '')
1217 1217
1218 1218 if not force:
1219 1219 vdirs = []
1220 1220 match.explicitdir = vdirs.append
1221 1221 match.bad = fail
1222 1222
1223 1223 wlock = self.wlock()
1224 1224 try:
1225 1225 wctx = self[None]
1226 1226 merge = len(wctx.parents()) > 1
1227 1227
1228 1228 if (not force and merge and match and
1229 1229 (match.files() or match.anypats())):
1230 1230 raise util.Abort(_('cannot partially commit a merge '
1231 1231 '(do not specify files or patterns)'))
1232 1232
1233 1233 changes = self.status(match=match, clean=force)
1234 1234 if force:
1235 1235 changes[0].extend(changes[6]) # mq may commit unchanged files
1236 1236
1237 1237 # check subrepos
1238 1238 subs = []
1239 1239 commitsubs = set()
1240 1240 newstate = wctx.substate.copy()
1241 1241 # only manage subrepos and .hgsubstate if .hgsub is present
1242 1242 if '.hgsub' in wctx:
1243 1243 # we'll decide whether to track this ourselves, thanks
1244 1244 for c in changes[:3]:
1245 1245 if '.hgsubstate' in c:
1246 1246 c.remove('.hgsubstate')
1247 1247
1248 1248 # compare current state to last committed state
1249 1249 # build new substate based on last committed state
1250 1250 oldstate = wctx.p1().substate
1251 1251 for s in sorted(newstate.keys()):
1252 1252 if not match(s):
1253 1253 # ignore working copy, use old state if present
1254 1254 if s in oldstate:
1255 1255 newstate[s] = oldstate[s]
1256 1256 continue
1257 1257 if not force:
1258 1258 raise util.Abort(
1259 1259 _("commit with new subrepo %s excluded") % s)
1260 1260 if wctx.sub(s).dirty(True):
1261 1261 if not self.ui.configbool('ui', 'commitsubrepos'):
1262 1262 raise util.Abort(
1263 1263 _("uncommitted changes in subrepo %s") % s,
1264 1264 hint=_("use --subrepos for recursive commit"))
1265 1265 subs.append(s)
1266 1266 commitsubs.add(s)
1267 1267 else:
1268 1268 bs = wctx.sub(s).basestate()
1269 1269 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1270 1270 if oldstate.get(s, (None, None, None))[1] != bs:
1271 1271 subs.append(s)
1272 1272
1273 1273 # check for removed subrepos
1274 1274 for p in wctx.parents():
1275 1275 r = [s for s in p.substate if s not in newstate]
1276 1276 subs += [s for s in r if match(s)]
1277 1277 if subs:
1278 1278 if (not match('.hgsub') and
1279 1279 '.hgsub' in (wctx.modified() + wctx.added())):
1280 1280 raise util.Abort(
1281 1281 _("can't commit subrepos without .hgsub"))
1282 1282 changes[0].insert(0, '.hgsubstate')
1283 1283
1284 1284 elif '.hgsub' in changes[2]:
1285 1285 # clean up .hgsubstate when .hgsub is removed
1286 1286 if ('.hgsubstate' in wctx and
1287 1287 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1288 1288 changes[2].insert(0, '.hgsubstate')
1289 1289
1290 1290 # make sure all explicit patterns are matched
1291 1291 if not force and match.files():
1292 1292 matched = set(changes[0] + changes[1] + changes[2])
1293 1293
1294 1294 for f in match.files():
1295 1295 f = self.dirstate.normalize(f)
1296 1296 if f == '.' or f in matched or f in wctx.substate:
1297 1297 continue
1298 1298 if f in changes[3]: # missing
1299 1299 fail(f, _('file not found!'))
1300 1300 if f in vdirs: # visited directory
1301 1301 d = f + '/'
1302 1302 for mf in matched:
1303 1303 if mf.startswith(d):
1304 1304 break
1305 1305 else:
1306 1306 fail(f, _("no match under directory!"))
1307 1307 elif f not in self.dirstate:
1308 1308 fail(f, _("file not tracked!"))
1309 1309
1310 1310 cctx = context.workingctx(self, text, user, date, extra, changes)
1311 1311
1312 1312 if (not force and not extra.get("close") and not merge
1313 1313 and not cctx.files()
1314 1314 and wctx.branch() == wctx.p1().branch()):
1315 1315 return None
1316 1316
1317 1317 if merge and cctx.deleted():
1318 1318 raise util.Abort(_("cannot commit merge with missing files"))
1319 1319
1320 1320 ms = mergemod.mergestate(self)
1321 1321 for f in changes[0]:
1322 1322 if f in ms and ms[f] == 'u':
1323 1323 raise util.Abort(_("unresolved merge conflicts "
1324 1324 "(see hg help resolve)"))
1325 1325
1326 1326 if editor:
1327 1327 cctx._text = editor(self, cctx, subs)
1328 1328 edited = (text != cctx._text)
1329 1329
1330 1330 # Save commit message in case this transaction gets rolled back
1331 1331 # (e.g. by a pretxncommit hook). Leave the content alone on
1332 1332 # the assumption that the user will use the same editor again.
1333 1333 msgfn = self.savecommitmessage(cctx._text)
1334 1334
1335 1335 # commit subs and write new state
1336 1336 if subs:
1337 1337 for s in sorted(commitsubs):
1338 1338 sub = wctx.sub(s)
1339 1339 self.ui.status(_('committing subrepository %s\n') %
1340 1340 subrepo.subrelpath(sub))
1341 1341 sr = sub.commit(cctx._text, user, date)
1342 1342 newstate[s] = (newstate[s][0], sr)
1343 1343 subrepo.writestate(self, newstate)
1344 1344
1345 1345 p1, p2 = self.dirstate.parents()
1346 1346 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1347 1347 try:
1348 1348 self.hook("precommit", throw=True, parent1=hookp1,
1349 1349 parent2=hookp2)
1350 1350 ret = self.commitctx(cctx, True)
1351 1351 except: # re-raises
1352 1352 if edited:
1353 1353 self.ui.write(
1354 1354 _('note: commit message saved in %s\n') % msgfn)
1355 1355 raise
1356 1356
1357 1357 # update bookmarks, dirstate and mergestate
1358 1358 bookmarks.update(self, [p1, p2], ret)
1359 1359 cctx.markcommitted(ret)
1360 1360 ms.reset()
1361 1361 finally:
1362 1362 wlock.release()
1363 1363
1364 1364 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1365 1365 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1366 1366 self._afterlock(commithook)
1367 1367 return ret
1368 1368
1369 1369 @unfilteredmethod
1370 1370 def commitctx(self, ctx, error=False):
1371 1371 """Add a new revision to current repository.
1372 1372 Revision information is passed via the context argument.
1373 1373 """
1374 1374
1375 1375 tr = lock = None
1376 1376 removed = list(ctx.removed())
1377 1377 p1, p2 = ctx.p1(), ctx.p2()
1378 1378 user = ctx.user()
1379 1379
1380 1380 lock = self.lock()
1381 1381 try:
1382 1382 tr = self.transaction("commit")
1383 1383 trp = weakref.proxy(tr)
1384 1384
1385 1385 if ctx.files():
1386 1386 m1 = p1.manifest().copy()
1387 1387 m2 = p2.manifest()
1388 1388
1389 1389 # check in files
1390 1390 new = {}
1391 1391 changed = []
1392 1392 linkrev = len(self)
1393 1393 for f in sorted(ctx.modified() + ctx.added()):
1394 1394 self.ui.note(f + "\n")
1395 1395 try:
1396 1396 fctx = ctx[f]
1397 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1398 changed)
1399 m1.set(f, fctx.flags())
1397 if fctx is None:
1398 removed.append(f)
1399 else:
1400 new[f] = self._filecommit(fctx, m1, m2, linkrev,
1401 trp, changed)
1402 m1.set(f, fctx.flags())
1400 1403 except OSError, inst:
1401 1404 self.ui.warn(_("trouble committing %s!\n") % f)
1402 1405 raise
1403 1406 except IOError, inst:
1404 1407 errcode = getattr(inst, 'errno', errno.ENOENT)
1405 1408 if error or errcode and errcode != errno.ENOENT:
1406 1409 self.ui.warn(_("trouble committing %s!\n") % f)
1407 raise
1408 else:
1409 removed.append(f)
1410 raise
1410 1411
1411 1412 # update manifest
1412 1413 m1.update(new)
1413 1414 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1414 1415 drop = [f for f in removed if f in m1]
1415 1416 for f in drop:
1416 1417 del m1[f]
1417 1418 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1418 1419 p2.manifestnode(), (new, drop))
1419 1420 files = changed + removed
1420 1421 else:
1421 1422 mn = p1.manifestnode()
1422 1423 files = []
1423 1424
1424 1425 # update changelog
1425 1426 self.changelog.delayupdate()
1426 1427 n = self.changelog.add(mn, files, ctx.description(),
1427 1428 trp, p1.node(), p2.node(),
1428 1429 user, ctx.date(), ctx.extra().copy())
1429 1430 p = lambda: self.changelog.writepending() and self.root or ""
1430 1431 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1431 1432 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1432 1433 parent2=xp2, pending=p)
1433 1434 self.changelog.finalize(trp)
1434 1435 # set the new commit is proper phase
1435 1436 targetphase = subrepo.newcommitphase(self.ui, ctx)
1436 1437 if targetphase:
1437 1438 # retract boundary do not alter parent changeset.
1438 1439 # if a parent have higher the resulting phase will
1439 1440 # be compliant anyway
1440 1441 #
1441 1442 # if minimal phase was 0 we don't need to retract anything
1442 1443 phases.retractboundary(self, tr, targetphase, [n])
1443 1444 tr.close()
1444 1445 branchmap.updatecache(self.filtered('served'))
1445 1446 return n
1446 1447 finally:
1447 1448 if tr:
1448 1449 tr.release()
1449 1450 lock.release()
1450 1451
1451 1452 @unfilteredmethod
1452 1453 def destroying(self):
1453 1454 '''Inform the repository that nodes are about to be destroyed.
1454 1455 Intended for use by strip and rollback, so there's a common
1455 1456 place for anything that has to be done before destroying history.
1456 1457
1457 1458 This is mostly useful for saving state that is in memory and waiting
1458 1459 to be flushed when the current lock is released. Because a call to
1459 1460 destroyed is imminent, the repo will be invalidated causing those
1460 1461 changes to stay in memory (waiting for the next unlock), or vanish
1461 1462 completely.
1462 1463 '''
1463 1464 # When using the same lock to commit and strip, the phasecache is left
1464 1465 # dirty after committing. Then when we strip, the repo is invalidated,
1465 1466 # causing those changes to disappear.
1466 1467 if '_phasecache' in vars(self):
1467 1468 self._phasecache.write()
1468 1469
1469 1470 @unfilteredmethod
1470 1471 def destroyed(self):
1471 1472 '''Inform the repository that nodes have been destroyed.
1472 1473 Intended for use by strip and rollback, so there's a common
1473 1474 place for anything that has to be done after destroying history.
1474 1475 '''
1475 1476 # When one tries to:
1476 1477 # 1) destroy nodes thus calling this method (e.g. strip)
1477 1478 # 2) use phasecache somewhere (e.g. commit)
1478 1479 #
1479 1480 # then 2) will fail because the phasecache contains nodes that were
1480 1481 # removed. We can either remove phasecache from the filecache,
1481 1482 # causing it to reload next time it is accessed, or simply filter
1482 1483 # the removed nodes now and write the updated cache.
1483 1484 self._phasecache.filterunknown(self)
1484 1485 self._phasecache.write()
1485 1486
1486 1487 # update the 'served' branch cache to help read only server process
1487 1488 # Thanks to branchcache collaboration this is done from the nearest
1488 1489 # filtered subset and it is expected to be fast.
1489 1490 branchmap.updatecache(self.filtered('served'))
1490 1491
1491 1492 # Ensure the persistent tag cache is updated. Doing it now
1492 1493 # means that the tag cache only has to worry about destroyed
1493 1494 # heads immediately after a strip/rollback. That in turn
1494 1495 # guarantees that "cachetip == currenttip" (comparing both rev
1495 1496 # and node) always means no nodes have been added or destroyed.
1496 1497
1497 1498 # XXX this is suboptimal when qrefresh'ing: we strip the current
1498 1499 # head, refresh the tag cache, then immediately add a new head.
1499 1500 # But I think doing it this way is necessary for the "instant
1500 1501 # tag cache retrieval" case to work.
1501 1502 self.invalidate()
1502 1503
1503 1504 def walk(self, match, node=None):
1504 1505 '''
1505 1506 walk recursively through the directory tree or a given
1506 1507 changeset, finding all files matched by the match
1507 1508 function
1508 1509 '''
1509 1510 return self[node].walk(match)
1510 1511
1511 1512 def status(self, node1='.', node2=None, match=None,
1512 1513 ignored=False, clean=False, unknown=False,
1513 1514 listsubrepos=False):
1514 1515 '''a convenience method that calls node1.status(node2)'''
1515 1516 return self[node1].status(node2, match, ignored, clean, unknown,
1516 1517 listsubrepos)
1517 1518
1518 1519 def heads(self, start=None):
1519 1520 heads = self.changelog.heads(start)
1520 1521 # sort the output in rev descending order
1521 1522 return sorted(heads, key=self.changelog.rev, reverse=True)
1522 1523
1523 1524 def branchheads(self, branch=None, start=None, closed=False):
1524 1525 '''return a (possibly filtered) list of heads for the given branch
1525 1526
1526 1527 Heads are returned in topological order, from newest to oldest.
1527 1528 If branch is None, use the dirstate branch.
1528 1529 If start is not None, return only heads reachable from start.
1529 1530 If closed is True, return heads that are marked as closed as well.
1530 1531 '''
1531 1532 if branch is None:
1532 1533 branch = self[None].branch()
1533 1534 branches = self.branchmap()
1534 1535 if branch not in branches:
1535 1536 return []
1536 1537 # the cache returns heads ordered lowest to highest
1537 1538 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1538 1539 if start is not None:
1539 1540 # filter out the heads that cannot be reached from startrev
1540 1541 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1541 1542 bheads = [h for h in bheads if h in fbheads]
1542 1543 return bheads
1543 1544
1544 1545 def branches(self, nodes):
1545 1546 if not nodes:
1546 1547 nodes = [self.changelog.tip()]
1547 1548 b = []
1548 1549 for n in nodes:
1549 1550 t = n
1550 1551 while True:
1551 1552 p = self.changelog.parents(n)
1552 1553 if p[1] != nullid or p[0] == nullid:
1553 1554 b.append((t, n, p[0], p[1]))
1554 1555 break
1555 1556 n = p[0]
1556 1557 return b
1557 1558
1558 1559 def between(self, pairs):
1559 1560 r = []
1560 1561
1561 1562 for top, bottom in pairs:
1562 1563 n, l, i = top, [], 0
1563 1564 f = 1
1564 1565
1565 1566 while n != bottom and n != nullid:
1566 1567 p = self.changelog.parents(n)[0]
1567 1568 if i == f:
1568 1569 l.append(n)
1569 1570 f = f * 2
1570 1571 n = p
1571 1572 i += 1
1572 1573
1573 1574 r.append(l)
1574 1575
1575 1576 return r
1576 1577
1577 1578 def pull(self, remote, heads=None, force=False):
1578 1579 return exchange.pull (self, remote, heads, force)
1579 1580
1580 1581 def checkpush(self, pushop):
1581 1582 """Extensions can override this function if additional checks have
1582 1583 to be performed before pushing, or call it if they override push
1583 1584 command.
1584 1585 """
1585 1586 pass
1586 1587
1587 1588 @unfilteredpropertycache
1588 1589 def prepushoutgoinghooks(self):
1589 1590 """Return util.hooks consists of "(repo, remote, outgoing)"
1590 1591 functions, which are called before pushing changesets.
1591 1592 """
1592 1593 return util.hooks()
1593 1594
1594 1595 def push(self, remote, force=False, revs=None, newbranch=False):
1595 1596 return exchange.push(self, remote, force, revs, newbranch)
1596 1597
1597 1598 def stream_in(self, remote, requirements):
1598 1599 lock = self.lock()
1599 1600 try:
1600 1601 # Save remote branchmap. We will use it later
1601 1602 # to speed up branchcache creation
1602 1603 rbranchmap = None
1603 1604 if remote.capable("branchmap"):
1604 1605 rbranchmap = remote.branchmap()
1605 1606
1606 1607 fp = remote.stream_out()
1607 1608 l = fp.readline()
1608 1609 try:
1609 1610 resp = int(l)
1610 1611 except ValueError:
1611 1612 raise error.ResponseError(
1612 1613 _('unexpected response from remote server:'), l)
1613 1614 if resp == 1:
1614 1615 raise util.Abort(_('operation forbidden by server'))
1615 1616 elif resp == 2:
1616 1617 raise util.Abort(_('locking the remote repository failed'))
1617 1618 elif resp != 0:
1618 1619 raise util.Abort(_('the server sent an unknown error code'))
1619 1620 self.ui.status(_('streaming all changes\n'))
1620 1621 l = fp.readline()
1621 1622 try:
1622 1623 total_files, total_bytes = map(int, l.split(' ', 1))
1623 1624 except (ValueError, TypeError):
1624 1625 raise error.ResponseError(
1625 1626 _('unexpected response from remote server:'), l)
1626 1627 self.ui.status(_('%d files to transfer, %s of data\n') %
1627 1628 (total_files, util.bytecount(total_bytes)))
1628 1629 handled_bytes = 0
1629 1630 self.ui.progress(_('clone'), 0, total=total_bytes)
1630 1631 start = time.time()
1631 1632
1632 1633 tr = self.transaction(_('clone'))
1633 1634 try:
1634 1635 for i in xrange(total_files):
1635 1636 # XXX doesn't support '\n' or '\r' in filenames
1636 1637 l = fp.readline()
1637 1638 try:
1638 1639 name, size = l.split('\0', 1)
1639 1640 size = int(size)
1640 1641 except (ValueError, TypeError):
1641 1642 raise error.ResponseError(
1642 1643 _('unexpected response from remote server:'), l)
1643 1644 if self.ui.debugflag:
1644 1645 self.ui.debug('adding %s (%s)\n' %
1645 1646 (name, util.bytecount(size)))
1646 1647 # for backwards compat, name was partially encoded
1647 1648 ofp = self.sopener(store.decodedir(name), 'w')
1648 1649 for chunk in util.filechunkiter(fp, limit=size):
1649 1650 handled_bytes += len(chunk)
1650 1651 self.ui.progress(_('clone'), handled_bytes,
1651 1652 total=total_bytes)
1652 1653 ofp.write(chunk)
1653 1654 ofp.close()
1654 1655 tr.close()
1655 1656 finally:
1656 1657 tr.release()
1657 1658
1658 1659 # Writing straight to files circumvented the inmemory caches
1659 1660 self.invalidate()
1660 1661
1661 1662 elapsed = time.time() - start
1662 1663 if elapsed <= 0:
1663 1664 elapsed = 0.001
1664 1665 self.ui.progress(_('clone'), None)
1665 1666 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1666 1667 (util.bytecount(total_bytes), elapsed,
1667 1668 util.bytecount(total_bytes / elapsed)))
1668 1669
1669 1670 # new requirements = old non-format requirements +
1670 1671 # new format-related
1671 1672 # requirements from the streamed-in repository
1672 1673 requirements.update(set(self.requirements) - self.supportedformats)
1673 1674 self._applyrequirements(requirements)
1674 1675 self._writerequirements()
1675 1676
1676 1677 if rbranchmap:
1677 1678 rbheads = []
1678 1679 for bheads in rbranchmap.itervalues():
1679 1680 rbheads.extend(bheads)
1680 1681
1681 1682 if rbheads:
1682 1683 rtiprev = max((int(self.changelog.rev(node))
1683 1684 for node in rbheads))
1684 1685 cache = branchmap.branchcache(rbranchmap,
1685 1686 self[rtiprev].node(),
1686 1687 rtiprev)
1687 1688 # Try to stick it as low as possible
1688 1689 # filter above served are unlikely to be fetch from a clone
1689 1690 for candidate in ('base', 'immutable', 'served'):
1690 1691 rview = self.filtered(candidate)
1691 1692 if cache.validfor(rview):
1692 1693 self._branchcaches[candidate] = cache
1693 1694 cache.write(rview)
1694 1695 break
1695 1696 self.invalidate()
1696 1697 return len(self.heads()) + 1
1697 1698 finally:
1698 1699 lock.release()
1699 1700
1700 1701 def clone(self, remote, heads=[], stream=False):
1701 1702 '''clone remote repository.
1702 1703
1703 1704 keyword arguments:
1704 1705 heads: list of revs to clone (forces use of pull)
1705 1706 stream: use streaming clone if possible'''
1706 1707
1707 1708 # now, all clients that can request uncompressed clones can
1708 1709 # read repo formats supported by all servers that can serve
1709 1710 # them.
1710 1711
1711 1712 # if revlog format changes, client will have to check version
1712 1713 # and format flags on "stream" capability, and use
1713 1714 # uncompressed only if compatible.
1714 1715
1715 1716 if not stream:
1716 1717 # if the server explicitly prefers to stream (for fast LANs)
1717 1718 stream = remote.capable('stream-preferred')
1718 1719
1719 1720 if stream and not heads:
1720 1721 # 'stream' means remote revlog format is revlogv1 only
1721 1722 if remote.capable('stream'):
1722 1723 return self.stream_in(remote, set(('revlogv1',)))
1723 1724 # otherwise, 'streamreqs' contains the remote revlog format
1724 1725 streamreqs = remote.capable('streamreqs')
1725 1726 if streamreqs:
1726 1727 streamreqs = set(streamreqs.split(','))
1727 1728 # if we support it, stream in and adjust our requirements
1728 1729 if not streamreqs - self.supportedformats:
1729 1730 return self.stream_in(remote, streamreqs)
1730 1731 return self.pull(remote, heads)
1731 1732
1732 1733 def pushkey(self, namespace, key, old, new):
1733 1734 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1734 1735 old=old, new=new)
1735 1736 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1736 1737 ret = pushkey.push(self, namespace, key, old, new)
1737 1738 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1738 1739 ret=ret)
1739 1740 return ret
1740 1741
1741 1742 def listkeys(self, namespace):
1742 1743 self.hook('prelistkeys', throw=True, namespace=namespace)
1743 1744 self.ui.debug('listing keys for "%s"\n' % namespace)
1744 1745 values = pushkey.list(self, namespace)
1745 1746 self.hook('listkeys', namespace=namespace, values=values)
1746 1747 return values
1747 1748
1748 1749 def debugwireargs(self, one, two, three=None, four=None, five=None):
1749 1750 '''used to test argument passing over the wire'''
1750 1751 return "%s %s %s %s %s" % (one, two, three, four, five)
1751 1752
1752 1753 def savecommitmessage(self, text):
1753 1754 fp = self.opener('last-message.txt', 'wb')
1754 1755 try:
1755 1756 fp.write(text)
1756 1757 finally:
1757 1758 fp.close()
1758 1759 return self.pathto(fp.name[len(self.root) + 1:])
1759 1760
1760 1761 # used to avoid circular references so destructors work
1761 1762 def aftertrans(files):
1762 1763 renamefiles = [tuple(t) for t in files]
1763 1764 def a():
1764 1765 for vfs, src, dest in renamefiles:
1765 1766 try:
1766 1767 vfs.rename(src, dest)
1767 1768 except OSError: # journal file does not yet exist
1768 1769 pass
1769 1770 return a
1770 1771
1771 1772 def undoname(fn):
1772 1773 base, name = os.path.split(fn)
1773 1774 assert name.startswith('journal')
1774 1775 return os.path.join(base, name.replace('journal', 'undo', 1))
1775 1776
1776 1777 def instance(ui, path, create):
1777 1778 return localrepository(ui, util.urllocalpath(path), create)
1778 1779
1779 1780 def islocal(path):
1780 1781 return True
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now