##// END OF EJS Templates
util: use built-in set instead of util.unique
Martin Geisler -
r8151:12728188 default
parent child Browse files
Show More
@@ -1,335 +1,335 b''
1 1 # GNU Arch support for the convert extension
2 2
3 3 from common import NoRepo, commandline, commit, converter_source
4 4 from mercurial.i18n import _
5 5 from mercurial import util
6 6 import os, shutil, tempfile, stat, locale
7 7 from email.Parser import Parser
8 8
9 9 class gnuarch_source(converter_source, commandline):
10 10
11 11 class gnuarch_rev:
12 12 def __init__(self, rev):
13 13 self.rev = rev
14 14 self.summary = ''
15 15 self.date = None
16 16 self.author = ''
17 17 self.continuationof = None
18 18 self.add_files = []
19 19 self.mod_files = []
20 20 self.del_files = []
21 21 self.ren_files = {}
22 22 self.ren_dirs = {}
23 23
24 24 def __init__(self, ui, path, rev=None):
25 25 super(gnuarch_source, self).__init__(ui, path, rev=rev)
26 26
27 27 if not os.path.exists(os.path.join(path, '{arch}')):
28 28 raise NoRepo(_("%s does not look like a GNU Arch repo") % path)
29 29
30 30 # Could use checktool, but we want to check for baz or tla.
31 31 self.execmd = None
32 32 if util.find_exe('baz'):
33 33 self.execmd = 'baz'
34 34 else:
35 35 if util.find_exe('tla'):
36 36 self.execmd = 'tla'
37 37 else:
38 38 raise util.Abort(_('cannot find a GNU Arch tool'))
39 39
40 40 commandline.__init__(self, ui, self.execmd)
41 41
42 42 self.path = os.path.realpath(path)
43 43 self.tmppath = None
44 44
45 45 self.treeversion = None
46 46 self.lastrev = None
47 47 self.changes = {}
48 48 self.parents = {}
49 49 self.tags = {}
50 50 self.modecache = {}
51 51 self.catlogparser = Parser()
52 52 self.locale = locale.getpreferredencoding()
53 53 self.archives = []
54 54
55 55 def before(self):
56 56 # Get registered archives
57 57 self.archives = [i.rstrip('\n')
58 58 for i in self.runlines0('archives', '-n')]
59 59
60 60 if self.execmd == 'tla':
61 61 output = self.run0('tree-version', self.path)
62 62 else:
63 63 output = self.run0('tree-version', '-d', self.path)
64 64 self.treeversion = output.strip()
65 65
66 66 # Get name of temporary directory
67 67 version = self.treeversion.split('/')
68 68 self.tmppath = os.path.join(tempfile.gettempdir(),
69 69 'hg-%s' % version[1])
70 70
71 71 # Generate parents dictionary
72 72 self.parents[None] = []
73 73 treeversion = self.treeversion
74 74 child = None
75 75 while treeversion:
76 76 self.ui.status(_('analyzing tree version %s...\n') % treeversion)
77 77
78 78 archive = treeversion.split('/')[0]
79 79 if archive not in self.archives:
80 80 self.ui.status(_('tree analysis stopped because it points to an unregistered archive %s...\n') % archive)
81 81 break
82 82
83 83 # Get the complete list of revisions for that tree version
84 84 output, status = self.runlines('revisions', '-r', '-f', treeversion)
85 85 self.checkexit(status, 'failed retrieveing revisions for %s' % treeversion)
86 86
87 87 # No new iteration unless a revision has a continuation-of header
88 88 treeversion = None
89 89
90 90 for l in output:
91 91 rev = l.strip()
92 92 self.changes[rev] = self.gnuarch_rev(rev)
93 93 self.parents[rev] = []
94 94
95 95 # Read author, date and summary
96 96 catlog, status = self.run('cat-log', '-d', self.path, rev)
97 97 if status:
98 98 catlog = self.run0('cat-archive-log', rev)
99 99 self._parsecatlog(catlog, rev)
100 100
101 101 # Populate the parents map
102 102 self.parents[child].append(rev)
103 103
104 104 # Keep track of the current revision as the child of the next
105 105 # revision scanned
106 106 child = rev
107 107
108 108 # Check if we have to follow the usual incremental history
109 109 # or if we have to 'jump' to a different treeversion given
110 110 # by the continuation-of header.
111 111 if self.changes[rev].continuationof:
112 112 treeversion = '--'.join(self.changes[rev].continuationof.split('--')[:-1])
113 113 break
114 114
115 115 # If we reached a base-0 revision w/o any continuation-of
116 116 # header, it means the tree history ends here.
117 117 if rev[-6:] == 'base-0':
118 118 break
119 119
120 120 def after(self):
121 121 self.ui.debug(_('cleaning up %s\n') % self.tmppath)
122 122 shutil.rmtree(self.tmppath, ignore_errors=True)
123 123
124 124 def getheads(self):
125 125 return self.parents[None]
126 126
127 127 def getfile(self, name, rev):
128 128 if rev != self.lastrev:
129 129 raise util.Abort(_('internal calling inconsistency'))
130 130
131 131 # Raise IOError if necessary (i.e. deleted files).
132 132 if not os.path.exists(os.path.join(self.tmppath, name)):
133 133 raise IOError
134 134
135 135 data, mode = self._getfile(name, rev)
136 136 self.modecache[(name, rev)] = mode
137 137
138 138 return data
139 139
140 140 def getmode(self, name, rev):
141 141 return self.modecache[(name, rev)]
142 142
143 143 def getchanges(self, rev):
144 144 self.modecache = {}
145 145 self._update(rev)
146 146 changes = []
147 147 copies = {}
148 148
149 149 for f in self.changes[rev].add_files:
150 150 changes.append((f, rev))
151 151
152 152 for f in self.changes[rev].mod_files:
153 153 changes.append((f, rev))
154 154
155 155 for f in self.changes[rev].del_files:
156 156 changes.append((f, rev))
157 157
158 158 for src in self.changes[rev].ren_files:
159 159 to = self.changes[rev].ren_files[src]
160 160 changes.append((src, rev))
161 161 changes.append((to, rev))
162 162 copies[to] = src
163 163
164 164 for src in self.changes[rev].ren_dirs:
165 165 to = self.changes[rev].ren_dirs[src]
166 166 chgs, cps = self._rendirchanges(src, to);
167 167 changes += [(f, rev) for f in chgs]
168 168 copies.update(cps)
169 169
170 170 self.lastrev = rev
171 return util.sort(util.unique(changes)), copies
171 return util.sort(set(changes)), copies
172 172
173 173 def getcommit(self, rev):
174 174 changes = self.changes[rev]
175 175 return commit(author = changes.author, date = changes.date,
176 176 desc = changes.summary, parents = self.parents[rev], rev=rev)
177 177
178 178 def gettags(self):
179 179 return self.tags
180 180
181 181 def _execute(self, cmd, *args, **kwargs):
182 182 cmdline = [self.execmd, cmd]
183 183 cmdline += args
184 184 cmdline = [util.shellquote(arg) for arg in cmdline]
185 185 cmdline += ['>', util.nulldev, '2>', util.nulldev]
186 186 cmdline = util.quotecommand(' '.join(cmdline))
187 187 self.ui.debug(cmdline, '\n')
188 188 return os.system(cmdline)
189 189
190 190 def _update(self, rev):
191 191 self.ui.debug(_('applying revision %s...\n') % rev)
192 192 changeset, status = self.runlines('replay', '-d', self.tmppath,
193 193 rev)
194 194 if status:
195 195 # Something went wrong while merging (baz or tla
196 196 # issue?), get latest revision and try from there
197 197 shutil.rmtree(self.tmppath, ignore_errors=True)
198 198 self._obtainrevision(rev)
199 199 else:
200 200 old_rev = self.parents[rev][0]
201 201 self.ui.debug(_('computing changeset between %s and %s...\n')
202 202 % (old_rev, rev))
203 203 self._parsechangeset(changeset, rev)
204 204
205 205 def _getfile(self, name, rev):
206 206 mode = os.lstat(os.path.join(self.tmppath, name)).st_mode
207 207 if stat.S_ISLNK(mode):
208 208 data = os.readlink(os.path.join(self.tmppath, name))
209 209 mode = mode and 'l' or ''
210 210 else:
211 211 data = open(os.path.join(self.tmppath, name), 'rb').read()
212 212 mode = (mode & 0111) and 'x' or ''
213 213 return data, mode
214 214
215 215 def _exclude(self, name):
216 216 exclude = [ '{arch}', '.arch-ids', '.arch-inventory' ]
217 217 for exc in exclude:
218 218 if name.find(exc) != -1:
219 219 return True
220 220 return False
221 221
222 222 def _readcontents(self, path):
223 223 files = []
224 224 contents = os.listdir(path)
225 225 while len(contents) > 0:
226 226 c = contents.pop()
227 227 p = os.path.join(path, c)
228 228 # os.walk could be used, but here we avoid internal GNU
229 229 # Arch files and directories, thus saving a lot time.
230 230 if not self._exclude(p):
231 231 if os.path.isdir(p):
232 232 contents += [os.path.join(c, f) for f in os.listdir(p)]
233 233 else:
234 234 files.append(c)
235 235 return files
236 236
237 237 def _rendirchanges(self, src, dest):
238 238 changes = []
239 239 copies = {}
240 240 files = self._readcontents(os.path.join(self.tmppath, dest))
241 241 for f in files:
242 242 s = os.path.join(src, f)
243 243 d = os.path.join(dest, f)
244 244 changes.append(s)
245 245 changes.append(d)
246 246 copies[d] = s
247 247 return changes, copies
248 248
249 249 def _obtainrevision(self, rev):
250 250 self.ui.debug(_('obtaining revision %s...\n') % rev)
251 251 output = self._execute('get', rev, self.tmppath)
252 252 self.checkexit(output)
253 253 self.ui.debug(_('analysing revision %s...\n') % rev)
254 254 files = self._readcontents(self.tmppath)
255 255 self.changes[rev].add_files += files
256 256
257 257 def _stripbasepath(self, path):
258 258 if path.startswith('./'):
259 259 return path[2:]
260 260 return path
261 261
262 262 def _parsecatlog(self, data, rev):
263 263 try:
264 264 catlog = self.catlogparser.parsestr(data)
265 265
266 266 # Commit date
267 267 self.changes[rev].date = util.datestr(
268 268 util.strdate(catlog['Standard-date'],
269 269 '%Y-%m-%d %H:%M:%S'))
270 270
271 271 # Commit author
272 272 self.changes[rev].author = self.recode(catlog['Creator'])
273 273
274 274 # Commit description
275 275 self.changes[rev].summary = '\n\n'.join((catlog['Summary'],
276 276 catlog.get_payload()))
277 277 self.changes[rev].summary = self.recode(self.changes[rev].summary)
278 278
279 279 # Commit revision origin when dealing with a branch or tag
280 280 if catlog.has_key('Continuation-of'):
281 281 self.changes[rev].continuationof = self.recode(catlog['Continuation-of'])
282 282 except Exception:
283 283 raise util.Abort(_('could not parse cat-log of %s') % rev)
284 284
285 285 def _parsechangeset(self, data, rev):
286 286 for l in data:
287 287 l = l.strip()
288 288 # Added file (ignore added directory)
289 289 if l.startswith('A') and not l.startswith('A/'):
290 290 file = self._stripbasepath(l[1:].strip())
291 291 if not self._exclude(file):
292 292 self.changes[rev].add_files.append(file)
293 293 # Deleted file (ignore deleted directory)
294 294 elif l.startswith('D') and not l.startswith('D/'):
295 295 file = self._stripbasepath(l[1:].strip())
296 296 if not self._exclude(file):
297 297 self.changes[rev].del_files.append(file)
298 298 # Modified binary file
299 299 elif l.startswith('Mb'):
300 300 file = self._stripbasepath(l[2:].strip())
301 301 if not self._exclude(file):
302 302 self.changes[rev].mod_files.append(file)
303 303 # Modified link
304 304 elif l.startswith('M->'):
305 305 file = self._stripbasepath(l[3:].strip())
306 306 if not self._exclude(file):
307 307 self.changes[rev].mod_files.append(file)
308 308 # Modified file
309 309 elif l.startswith('M'):
310 310 file = self._stripbasepath(l[1:].strip())
311 311 if not self._exclude(file):
312 312 self.changes[rev].mod_files.append(file)
313 313 # Renamed file (or link)
314 314 elif l.startswith('=>'):
315 315 files = l[2:].strip().split(' ')
316 316 if len(files) == 1:
317 317 files = l[2:].strip().split('\t')
318 318 src = self._stripbasepath(files[0])
319 319 dst = self._stripbasepath(files[1])
320 320 if not self._exclude(src) and not self._exclude(dst):
321 321 self.changes[rev].ren_files[src] = dst
322 322 # Conversion from file to link or from link to file (modified)
323 323 elif l.startswith('ch'):
324 324 file = self._stripbasepath(l[2:].strip())
325 325 if not self._exclude(file):
326 326 self.changes[rev].mod_files.append(file)
327 327 # Renamed directory
328 328 elif l.startswith('/>'):
329 329 dirs = l[2:].strip().split(' ')
330 330 if len(dirs) == 1:
331 331 dirs = l[2:].strip().split('\t')
332 332 src = self._stripbasepath(dirs[0])
333 333 dst = self._stripbasepath(dirs[1])
334 334 if not self._exclude(src) and not self._exclude(dst):
335 335 self.changes[rev].ren_dirs[src] = dst
@@ -1,1205 +1,1205 b''
1 1 # Subversion 1.4/1.5 Python API backend
2 2 #
3 3 # Copyright(C) 2007 Daniel Holth et al
4 4 #
5 5 # Configuration options:
6 6 #
7 7 # convert.svn.trunk
8 8 # Relative path to the trunk (default: "trunk")
9 9 # convert.svn.branches
10 10 # Relative path to tree of branches (default: "branches")
11 11 # convert.svn.tags
12 12 # Relative path to tree of tags (default: "tags")
13 13 #
14 14 # Set these in a hgrc, or on the command line as follows:
15 15 #
16 16 # hg convert --config convert.svn.trunk=wackoname [...]
17 17
18 18 import locale
19 19 import os
20 20 import re
21 21 import sys
22 22 import cPickle as pickle
23 23 import tempfile
24 24 import urllib
25 25
26 26 from mercurial import strutil, util
27 27 from mercurial.i18n import _
28 28
29 29 # Subversion stuff. Works best with very recent Python SVN bindings
30 30 # e.g. SVN 1.5 or backports. Thanks to the bzr folks for enhancing
31 31 # these bindings.
32 32
33 33 from cStringIO import StringIO
34 34
35 35 from common import NoRepo, MissingTool, commit, encodeargs, decodeargs
36 36 from common import commandline, converter_source, converter_sink, mapfile
37 37
38 38 try:
39 39 from svn.core import SubversionException, Pool
40 40 import svn
41 41 import svn.client
42 42 import svn.core
43 43 import svn.ra
44 44 import svn.delta
45 45 import transport
46 46 except ImportError:
47 47 pass
48 48
49 49 class SvnPathNotFound(Exception):
50 50 pass
51 51
52 52 def geturl(path):
53 53 try:
54 54 return svn.client.url_from_path(svn.core.svn_path_canonicalize(path))
55 55 except SubversionException:
56 56 pass
57 57 if os.path.isdir(path):
58 58 path = os.path.normpath(os.path.abspath(path))
59 59 if os.name == 'nt':
60 60 path = '/' + util.normpath(path)
61 61 return 'file://%s' % urllib.quote(path)
62 62 return path
63 63
64 64 def optrev(number):
65 65 optrev = svn.core.svn_opt_revision_t()
66 66 optrev.kind = svn.core.svn_opt_revision_number
67 67 optrev.value.number = number
68 68 return optrev
69 69
70 70 class changedpath(object):
71 71 def __init__(self, p):
72 72 self.copyfrom_path = p.copyfrom_path
73 73 self.copyfrom_rev = p.copyfrom_rev
74 74 self.action = p.action
75 75
76 76 def get_log_child(fp, url, paths, start, end, limit=0, discover_changed_paths=True,
77 77 strict_node_history=False):
78 78 protocol = -1
79 79 def receiver(orig_paths, revnum, author, date, message, pool):
80 80 if orig_paths is not None:
81 81 for k, v in orig_paths.iteritems():
82 82 orig_paths[k] = changedpath(v)
83 83 pickle.dump((orig_paths, revnum, author, date, message),
84 84 fp, protocol)
85 85
86 86 try:
87 87 # Use an ra of our own so that our parent can consume
88 88 # our results without confusing the server.
89 89 t = transport.SvnRaTransport(url=url)
90 90 svn.ra.get_log(t.ra, paths, start, end, limit,
91 91 discover_changed_paths,
92 92 strict_node_history,
93 93 receiver)
94 94 except SubversionException, (inst, num):
95 95 pickle.dump(num, fp, protocol)
96 96 except IOError:
97 97 # Caller may interrupt the iteration
98 98 pickle.dump(None, fp, protocol)
99 99 else:
100 100 pickle.dump(None, fp, protocol)
101 101 fp.close()
102 102 # With large history, cleanup process goes crazy and suddenly
103 103 # consumes *huge* amount of memory. The output file being closed,
104 104 # there is no need for clean termination.
105 105 os._exit(0)
106 106
107 107 def debugsvnlog(ui, **opts):
108 108 """Fetch SVN log in a subprocess and channel them back to parent to
109 109 avoid memory collection issues.
110 110 """
111 111 util.set_binary(sys.stdin)
112 112 util.set_binary(sys.stdout)
113 113 args = decodeargs(sys.stdin.read())
114 114 get_log_child(sys.stdout, *args)
115 115
116 116 class logstream:
117 117 """Interruptible revision log iterator."""
118 118 def __init__(self, stdout):
119 119 self._stdout = stdout
120 120
121 121 def __iter__(self):
122 122 while True:
123 123 entry = pickle.load(self._stdout)
124 124 try:
125 125 orig_paths, revnum, author, date, message = entry
126 126 except:
127 127 if entry is None:
128 128 break
129 129 raise SubversionException("child raised exception", entry)
130 130 yield entry
131 131
132 132 def close(self):
133 133 if self._stdout:
134 134 self._stdout.close()
135 135 self._stdout = None
136 136
137 137
138 138 # Check to see if the given path is a local Subversion repo. Verify this by
139 139 # looking for several svn-specific files and directories in the given
140 140 # directory.
141 141 def filecheck(path, proto):
142 142 for x in ('locks', 'hooks', 'format', 'db', ):
143 143 if not os.path.exists(os.path.join(path, x)):
144 144 return False
145 145 return True
146 146
147 147 # Check to see if a given path is the root of an svn repo over http. We verify
148 148 # this by requesting a version-controlled URL we know can't exist and looking
149 149 # for the svn-specific "not found" XML.
150 150 def httpcheck(path, proto):
151 151 return ('<m:human-readable errcode="160013">' in
152 152 urllib.urlopen('%s://%s/!svn/ver/0/.svn' % (proto, path)).read())
153 153
154 154 protomap = {'http': httpcheck,
155 155 'https': httpcheck,
156 156 'file': filecheck,
157 157 }
158 158 def issvnurl(url):
159 159 if not '://' in url:
160 160 return False
161 161 proto, path = url.split('://', 1)
162 162 check = protomap.get(proto, lambda p, p2: False)
163 163 while '/' in path:
164 164 if check(path, proto):
165 165 return True
166 166 path = path.rsplit('/', 1)[0]
167 167 return False
168 168
169 169 # SVN conversion code stolen from bzr-svn and tailor
170 170 #
171 171 # Subversion looks like a versioned filesystem, branches structures
172 172 # are defined by conventions and not enforced by the tool. First,
173 173 # we define the potential branches (modules) as "trunk" and "branches"
174 174 # children directories. Revisions are then identified by their
175 175 # module and revision number (and a repository identifier).
176 176 #
177 177 # The revision graph is really a tree (or a forest). By default, a
178 178 # revision parent is the previous revision in the same module. If the
179 179 # module directory is copied/moved from another module then the
180 180 # revision is the module root and its parent the source revision in
181 181 # the parent module. A revision has at most one parent.
182 182 #
183 183 class svn_source(converter_source):
184 184 def __init__(self, ui, url, rev=None):
185 185 super(svn_source, self).__init__(ui, url, rev=rev)
186 186
187 187 if not (url.startswith('svn://') or url.startswith('svn+ssh://') or
188 188 (os.path.exists(url) and
189 189 os.path.exists(os.path.join(url, '.svn'))) or
190 190 issvnurl(url)):
191 191 raise NoRepo("%s does not look like a Subversion repo" % url)
192 192
193 193 try:
194 194 SubversionException
195 195 except NameError:
196 196 raise MissingTool(_('Subversion python bindings could not be loaded'))
197 197
198 198 try:
199 199 version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
200 200 if version < (1, 4):
201 201 raise MissingTool(_('Subversion python bindings %d.%d found, '
202 202 '1.4 or later required') % version)
203 203 except AttributeError:
204 204 raise MissingTool(_('Subversion python bindings are too old, 1.4 '
205 205 'or later required'))
206 206
207 207 self.encoding = locale.getpreferredencoding()
208 208 self.lastrevs = {}
209 209
210 210 latest = None
211 211 try:
212 212 # Support file://path@rev syntax. Useful e.g. to convert
213 213 # deleted branches.
214 214 at = url.rfind('@')
215 215 if at >= 0:
216 216 latest = int(url[at+1:])
217 217 url = url[:at]
218 218 except ValueError:
219 219 pass
220 220 self.url = geturl(url)
221 221 self.encoding = 'UTF-8' # Subversion is always nominal UTF-8
222 222 try:
223 223 self.transport = transport.SvnRaTransport(url=self.url)
224 224 self.ra = self.transport.ra
225 225 self.ctx = self.transport.client
226 226 self.baseurl = svn.ra.get_repos_root(self.ra)
227 227 # Module is either empty or a repository path starting with
228 228 # a slash and not ending with a slash.
229 229 self.module = urllib.unquote(self.url[len(self.baseurl):])
230 230 self.prevmodule = None
231 231 self.rootmodule = self.module
232 232 self.commits = {}
233 233 self.paths = {}
234 234 self.uuid = svn.ra.get_uuid(self.ra).decode(self.encoding)
235 235 except SubversionException:
236 236 ui.print_exc()
237 237 raise NoRepo("%s does not look like a Subversion repo" % self.url)
238 238
239 239 if rev:
240 240 try:
241 241 latest = int(rev)
242 242 except ValueError:
243 243 raise util.Abort(_('svn: revision %s is not an integer') % rev)
244 244
245 245 self.startrev = self.ui.config('convert', 'svn.startrev', default=0)
246 246 try:
247 247 self.startrev = int(self.startrev)
248 248 if self.startrev < 0:
249 249 self.startrev = 0
250 250 except ValueError:
251 251 raise util.Abort(_('svn: start revision %s is not an integer')
252 252 % self.startrev)
253 253
254 254 try:
255 255 self.get_blacklist()
256 256 except IOError:
257 257 pass
258 258
259 259 self.head = self.latest(self.module, latest)
260 260 if not self.head:
261 261 raise util.Abort(_('no revision found in module %s') %
262 262 self.module.encode(self.encoding))
263 263 self.last_changed = self.revnum(self.head)
264 264
265 265 self._changescache = None
266 266
267 267 if os.path.exists(os.path.join(url, '.svn/entries')):
268 268 self.wc = url
269 269 else:
270 270 self.wc = None
271 271 self.convertfp = None
272 272
273 273 def setrevmap(self, revmap):
274 274 lastrevs = {}
275 275 for revid in revmap.iterkeys():
276 276 uuid, module, revnum = self.revsplit(revid)
277 277 lastrevnum = lastrevs.setdefault(module, revnum)
278 278 if revnum > lastrevnum:
279 279 lastrevs[module] = revnum
280 280 self.lastrevs = lastrevs
281 281
282 282 def exists(self, path, optrev):
283 283 try:
284 284 svn.client.ls(self.url.rstrip('/') + '/' + urllib.quote(path),
285 285 optrev, False, self.ctx)
286 286 return True
287 287 except SubversionException:
288 288 return False
289 289
290 290 def getheads(self):
291 291
292 292 def isdir(path, revnum):
293 293 kind = self._checkpath(path, revnum)
294 294 return kind == svn.core.svn_node_dir
295 295
296 296 def getcfgpath(name, rev):
297 297 cfgpath = self.ui.config('convert', 'svn.' + name)
298 298 if cfgpath is not None and cfgpath.strip() == '':
299 299 return None
300 300 path = (cfgpath or name).strip('/')
301 301 if not self.exists(path, rev):
302 302 if cfgpath:
303 303 raise util.Abort(_('expected %s to be at %r, but not found')
304 304 % (name, path))
305 305 return None
306 306 self.ui.note(_('found %s at %r\n') % (name, path))
307 307 return path
308 308
309 309 rev = optrev(self.last_changed)
310 310 oldmodule = ''
311 311 trunk = getcfgpath('trunk', rev)
312 312 self.tags = getcfgpath('tags', rev)
313 313 branches = getcfgpath('branches', rev)
314 314
315 315 # If the project has a trunk or branches, we will extract heads
316 316 # from them. We keep the project root otherwise.
317 317 if trunk:
318 318 oldmodule = self.module or ''
319 319 self.module += '/' + trunk
320 320 self.head = self.latest(self.module, self.last_changed)
321 321 if not self.head:
322 322 raise util.Abort(_('no revision found in module %s') %
323 323 self.module.encode(self.encoding))
324 324
325 325 # First head in the list is the module's head
326 326 self.heads = [self.head]
327 327 if self.tags is not None:
328 328 self.tags = '%s/%s' % (oldmodule , (self.tags or 'tags'))
329 329
330 330 # Check if branches bring a few more heads to the list
331 331 if branches:
332 332 rpath = self.url.strip('/')
333 333 branchnames = svn.client.ls(rpath + '/' + urllib.quote(branches),
334 334 rev, False, self.ctx)
335 335 for branch in branchnames.keys():
336 336 module = '%s/%s/%s' % (oldmodule, branches, branch)
337 337 if not isdir(module, self.last_changed):
338 338 continue
339 339 brevid = self.latest(module, self.last_changed)
340 340 if not brevid:
341 341 self.ui.note(_('ignoring empty branch %s\n') %
342 342 branch.encode(self.encoding))
343 343 continue
344 344 self.ui.note(_('found branch %s at %d\n') %
345 345 (branch, self.revnum(brevid)))
346 346 self.heads.append(brevid)
347 347
348 348 if self.startrev and self.heads:
349 349 if len(self.heads) > 1:
350 350 raise util.Abort(_('svn: start revision is not supported '
351 351 'with more than one branch'))
352 352 revnum = self.revnum(self.heads[0])
353 353 if revnum < self.startrev:
354 354 raise util.Abort(_('svn: no revision found after start revision %d')
355 355 % self.startrev)
356 356
357 357 return self.heads
358 358
359 359 def getfile(self, file, rev):
360 360 data, mode = self._getfile(file, rev)
361 361 self.modecache[(file, rev)] = mode
362 362 return data
363 363
364 364 def getmode(self, file, rev):
365 365 return self.modecache[(file, rev)]
366 366
367 367 def getchanges(self, rev):
368 368 if self._changescache and self._changescache[0] == rev:
369 369 return self._changescache[1]
370 370 self._changescache = None
371 371 self.modecache = {}
372 372 (paths, parents) = self.paths[rev]
373 373 if parents:
374 374 files, copies = self.expandpaths(rev, paths, parents)
375 375 else:
376 376 # Perform a full checkout on roots
377 377 uuid, module, revnum = self.revsplit(rev)
378 378 entries = svn.client.ls(self.baseurl + urllib.quote(module),
379 379 optrev(revnum), True, self.ctx)
380 380 files = [n for n,e in entries.iteritems()
381 381 if e.kind == svn.core.svn_node_file]
382 382 copies = {}
383 383
384 384 files.sort()
385 385 files = zip(files, [rev] * len(files))
386 386
387 387 # caller caches the result, so free it here to release memory
388 388 del self.paths[rev]
389 389 return (files, copies)
390 390
391 391 def getchangedfiles(self, rev, i):
392 392 changes = self.getchanges(rev)
393 393 self._changescache = (rev, changes)
394 394 return [f[0] for f in changes[0]]
395 395
396 396 def getcommit(self, rev):
397 397 if rev not in self.commits:
398 398 uuid, module, revnum = self.revsplit(rev)
399 399 self.module = module
400 400 self.reparent(module)
401 401 # We assume that:
402 402 # - requests for revisions after "stop" come from the
403 403 # revision graph backward traversal. Cache all of them
404 404 # down to stop, they will be used eventually.
405 405 # - requests for revisions before "stop" come to get
406 406 # isolated branches parents. Just fetch what is needed.
407 407 stop = self.lastrevs.get(module, 0)
408 408 if revnum < stop:
409 409 stop = revnum + 1
410 410 self._fetch_revisions(revnum, stop)
411 411 commit = self.commits[rev]
412 412 # caller caches the result, so free it here to release memory
413 413 del self.commits[rev]
414 414 return commit
415 415
416 416 def gettags(self):
417 417 tags = {}
418 418 if self.tags is None:
419 419 return tags
420 420
421 421 # svn tags are just a convention, project branches left in a
422 422 # 'tags' directory. There is no other relationship than
423 423 # ancestry, which is expensive to discover and makes them hard
424 424 # to update incrementally. Worse, past revisions may be
425 425 # referenced by tags far away in the future, requiring a deep
426 426 # history traversal on every calculation. Current code
427 427 # performs a single backward traversal, tracking moves within
428 428 # the tags directory (tag renaming) and recording a new tag
429 429 # everytime a project is copied from outside the tags
430 430 # directory. It also lists deleted tags, this behaviour may
431 431 # change in the future.
432 432 pendings = []
433 433 tagspath = self.tags
434 434 start = svn.ra.get_latest_revnum(self.ra)
435 435 try:
436 436 for entry in self._getlog([self.tags], start, self.startrev):
437 437 origpaths, revnum, author, date, message = entry
438 438 copies = [(e.copyfrom_path, e.copyfrom_rev, p) for p, e
439 439 in origpaths.iteritems() if e.copyfrom_path]
440 440 copies.sort()
441 441 # Apply moves/copies from more specific to general
442 442 copies.reverse()
443 443
444 444 srctagspath = tagspath
445 445 if copies and copies[-1][2] == tagspath:
446 446 # Track tags directory moves
447 447 srctagspath = copies.pop()[0]
448 448
449 449 for source, sourcerev, dest in copies:
450 450 if not dest.startswith(tagspath + '/'):
451 451 continue
452 452 for tag in pendings:
453 453 if tag[0].startswith(dest):
454 454 tagpath = source + tag[0][len(dest):]
455 455 tag[:2] = [tagpath, sourcerev]
456 456 break
457 457 else:
458 458 pendings.append([source, sourcerev, dest.split('/')[-1]])
459 459
460 460 # Tell tag renamings from tag creations
461 461 remainings = []
462 462 for source, sourcerev, tagname in pendings:
463 463 if source.startswith(srctagspath):
464 464 remainings.append([source, sourcerev, tagname])
465 465 continue
466 466 # From revision may be fake, get one with changes
467 467 try:
468 468 tagid = self.latest(source, sourcerev)
469 469 if tagid:
470 470 tags[tagname] = tagid
471 471 except SvnPathNotFound:
472 472 # It happens when we are following directories we assumed
473 473 # were copied with their parents but were really created
474 474 # in the tag directory.
475 475 pass
476 476 pendings = remainings
477 477 tagspath = srctagspath
478 478
479 479 except SubversionException:
480 480 self.ui.note(_('no tags found at revision %d\n') % start)
481 481 return tags
482 482
483 483 def converted(self, rev, destrev):
484 484 if not self.wc:
485 485 return
486 486 if self.convertfp is None:
487 487 self.convertfp = open(os.path.join(self.wc, '.svn', 'hg-shamap'),
488 488 'a')
489 489 self.convertfp.write('%s %d\n' % (destrev, self.revnum(rev)))
490 490 self.convertfp.flush()
491 491
492 492 # -- helper functions --
493 493
494 494 def revid(self, revnum, module=None):
495 495 if not module:
496 496 module = self.module
497 497 return u"svn:%s%s@%s" % (self.uuid, module.decode(self.encoding),
498 498 revnum)
499 499
500 500 def revnum(self, rev):
501 501 return int(rev.split('@')[-1])
502 502
503 503 def revsplit(self, rev):
504 504 url, revnum = strutil.rsplit(rev.encode(self.encoding), '@', 1)
505 505 revnum = int(revnum)
506 506 parts = url.split('/', 1)
507 507 uuid = parts.pop(0)[4:]
508 508 mod = ''
509 509 if parts:
510 510 mod = '/' + parts[0]
511 511 return uuid, mod, revnum
512 512
513 513 def latest(self, path, stop=0):
514 514 """Find the latest revid affecting path, up to stop. It may return
515 515 a revision in a different module, since a branch may be moved without
516 516 a change being reported. Return None if computed module does not
517 517 belong to rootmodule subtree.
518 518 """
519 519 if not path.startswith(self.rootmodule):
520 520 # Requests on foreign branches may be forbidden at server level
521 521 self.ui.debug(_('ignoring foreign branch %r\n') % path)
522 522 return None
523 523
524 524 if not stop:
525 525 stop = svn.ra.get_latest_revnum(self.ra)
526 526 try:
527 527 prevmodule = self.reparent('')
528 528 dirent = svn.ra.stat(self.ra, path.strip('/'), stop)
529 529 self.reparent(prevmodule)
530 530 except SubversionException:
531 531 dirent = None
532 532 if not dirent:
533 533 raise SvnPathNotFound(_('%s not found up to revision %d') % (path, stop))
534 534
535 535 # stat() gives us the previous revision on this line of development, but
536 536 # it might be in *another module*. Fetch the log and detect renames down
537 537 # to the latest revision.
538 538 stream = self._getlog([path], stop, dirent.created_rev)
539 539 try:
540 540 for entry in stream:
541 541 paths, revnum, author, date, message = entry
542 542 if revnum <= dirent.created_rev:
543 543 break
544 544
545 545 for p in paths:
546 546 if not path.startswith(p) or not paths[p].copyfrom_path:
547 547 continue
548 548 newpath = paths[p].copyfrom_path + path[len(p):]
549 549 self.ui.debug(_("branch renamed from %s to %s at %d\n") %
550 550 (path, newpath, revnum))
551 551 path = newpath
552 552 break
553 553 finally:
554 554 stream.close()
555 555
556 556 if not path.startswith(self.rootmodule):
557 557 self.ui.debug(_('ignoring foreign branch %r\n') % path)
558 558 return None
559 559 return self.revid(dirent.created_rev, path)
560 560
561 561 def get_blacklist(self):
562 562 """Avoid certain revision numbers.
563 563 It is not uncommon for two nearby revisions to cancel each other
564 564 out, e.g. 'I copied trunk into a subdirectory of itself instead
565 565 of making a branch'. The converted repository is significantly
566 566 smaller if we ignore such revisions."""
567 567 self.blacklist = set()
568 568 blacklist = self.blacklist
569 569 for line in file("blacklist.txt", "r"):
570 570 if not line.startswith("#"):
571 571 try:
572 572 svn_rev = int(line.strip())
573 573 blacklist.add(svn_rev)
574 574 except ValueError:
575 575 pass # not an integer or a comment
576 576
577 577 def is_blacklisted(self, svn_rev):
578 578 return svn_rev in self.blacklist
579 579
580 580 def reparent(self, module):
581 581 """Reparent the svn transport and return the previous parent."""
582 582 if self.prevmodule == module:
583 583 return module
584 584 svnurl = self.baseurl + urllib.quote(module)
585 585 prevmodule = self.prevmodule
586 586 if prevmodule is None:
587 587 prevmodule = ''
588 588 self.ui.debug(_("reparent to %s\n") % svnurl)
589 589 svn.ra.reparent(self.ra, svnurl)
590 590 self.prevmodule = module
591 591 return prevmodule
592 592
593 593 def expandpaths(self, rev, paths, parents):
594 594 entries = []
595 595 copyfrom = {} # Map of entrypath, revision for finding source of deleted revisions.
596 596 copies = {}
597 597
598 598 new_module, revnum = self.revsplit(rev)[1:]
599 599 if new_module != self.module:
600 600 self.module = new_module
601 601 self.reparent(self.module)
602 602
603 603 for path, ent in paths:
604 604 entrypath = self.getrelpath(path)
605 605 entry = entrypath.decode(self.encoding)
606 606
607 607 kind = self._checkpath(entrypath, revnum)
608 608 if kind == svn.core.svn_node_file:
609 609 entries.append(self.recode(entry))
610 610 if not ent.copyfrom_path or not parents:
611 611 continue
612 612 # Copy sources not in parent revisions cannot be represented,
613 613 # ignore their origin for now
614 614 pmodule, prevnum = self.revsplit(parents[0])[1:]
615 615 if ent.copyfrom_rev < prevnum:
616 616 continue
617 617 copyfrom_path = self.getrelpath(ent.copyfrom_path, pmodule)
618 618 if not copyfrom_path:
619 619 continue
620 620 self.ui.debug(_("copied to %s from %s@%s\n") %
621 621 (entrypath, copyfrom_path, ent.copyfrom_rev))
622 622 copies[self.recode(entry)] = self.recode(copyfrom_path)
623 623 elif kind == 0: # gone, but had better be a deleted *file*
624 624 self.ui.debug(_("gone from %s\n") % ent.copyfrom_rev)
625 625
626 626 # if a branch is created but entries are removed in the same
627 627 # changeset, get the right fromrev
628 628 # parents cannot be empty here, you cannot remove things from
629 629 # a root revision.
630 630 uuid, old_module, fromrev = self.revsplit(parents[0])
631 631
632 632 basepath = old_module + "/" + self.getrelpath(path)
633 633 entrypath = basepath
634 634
635 635 def lookup_parts(p):
636 636 rc = None
637 637 parts = p.split("/")
638 638 for i in range(len(parts)):
639 639 part = "/".join(parts[:i])
640 640 info = part, copyfrom.get(part, None)
641 641 if info[1] is not None:
642 642 self.ui.debug(_("found parent directory %s\n") % info[1])
643 643 rc = info
644 644 return rc
645 645
646 646 self.ui.debug(_("base, entry %s %s\n") % (basepath, entrypath))
647 647
648 648 frompath, froment = lookup_parts(entrypath) or (None, revnum - 1)
649 649
650 650 # need to remove fragment from lookup_parts and replace with copyfrom_path
651 651 if frompath is not None:
652 652 self.ui.debug(_("munge-o-matic\n"))
653 653 self.ui.debug(entrypath + '\n')
654 654 self.ui.debug(entrypath[len(frompath):] + '\n')
655 655 entrypath = froment.copyfrom_path + entrypath[len(frompath):]
656 656 fromrev = froment.copyfrom_rev
657 657 self.ui.debug(_("info: %s %s %s %s\n") % (frompath, froment, ent, entrypath))
658 658
659 659 # We can avoid the reparent calls if the module has not changed
660 660 # but it probably does not worth the pain.
661 661 prevmodule = self.reparent('')
662 662 fromkind = svn.ra.check_path(self.ra, entrypath.strip('/'), fromrev)
663 663 self.reparent(prevmodule)
664 664
665 665 if fromkind == svn.core.svn_node_file: # a deleted file
666 666 entries.append(self.recode(entry))
667 667 elif fromkind == svn.core.svn_node_dir:
668 668 # print "Deleted/moved non-file:", revnum, path, ent
669 669 # children = self._find_children(path, revnum - 1)
670 670 # print "find children %s@%d from %d action %s" % (path, revnum, ent.copyfrom_rev, ent.action)
671 671 # Sometimes this is tricky. For example: in
672 672 # The Subversion Repository revision 6940 a dir
673 673 # was copied and one of its files was deleted
674 674 # from the new location in the same commit. This
675 675 # code can't deal with that yet.
676 676 if ent.action == 'C':
677 677 children = self._find_children(path, fromrev)
678 678 else:
679 679 oroot = entrypath.strip('/')
680 680 nroot = path.strip('/')
681 681 children = self._find_children(oroot, fromrev)
682 682 children = [s.replace(oroot,nroot) for s in children]
683 683 # Mark all [files, not directories] as deleted.
684 684 for child in children:
685 685 # Can we move a child directory and its
686 686 # parent in the same commit? (probably can). Could
687 687 # cause problems if instead of revnum -1,
688 688 # we have to look in (copyfrom_path, revnum - 1)
689 689 entrypath = self.getrelpath("/" + child, module=old_module)
690 690 if entrypath:
691 691 entry = self.recode(entrypath.decode(self.encoding))
692 692 if entry in copies:
693 693 # deleted file within a copy
694 694 del copies[entry]
695 695 else:
696 696 entries.append(entry)
697 697 else:
698 698 self.ui.debug(_('unknown path in revision %d: %s\n') % \
699 699 (revnum, path))
700 700 elif kind == svn.core.svn_node_dir:
701 701 # Should probably synthesize normal file entries
702 702 # and handle as above to clean up copy/rename handling.
703 703
704 704 # If the directory just had a prop change,
705 705 # then we shouldn't need to look for its children.
706 706 if ent.action == 'M':
707 707 continue
708 708
709 709 # Also this could create duplicate entries. Not sure
710 710 # whether this will matter. Maybe should make entries a set.
711 711 # print "Changed directory", revnum, path, ent.action, ent.copyfrom_path, ent.copyfrom_rev
712 712 # This will fail if a directory was copied
713 713 # from another branch and then some of its files
714 714 # were deleted in the same transaction.
715 715 children = util.sort(self._find_children(path, revnum))
716 716 for child in children:
717 717 # Can we move a child directory and its
718 718 # parent in the same commit? (probably can). Could
719 719 # cause problems if instead of revnum -1,
720 720 # we have to look in (copyfrom_path, revnum - 1)
721 721 entrypath = self.getrelpath("/" + child)
722 722 # print child, self.module, entrypath
723 723 if entrypath:
724 724 # Need to filter out directories here...
725 725 kind = self._checkpath(entrypath, revnum)
726 726 if kind != svn.core.svn_node_dir:
727 727 entries.append(self.recode(entrypath))
728 728
729 729 # Copies here (must copy all from source)
730 730 # Probably not a real problem for us if
731 731 # source does not exist
732 732 if not ent.copyfrom_path or not parents:
733 733 continue
734 734 # Copy sources not in parent revisions cannot be represented,
735 735 # ignore their origin for now
736 736 pmodule, prevnum = self.revsplit(parents[0])[1:]
737 737 if ent.copyfrom_rev < prevnum:
738 738 continue
739 739 copyfrompath = ent.copyfrom_path.decode(self.encoding)
740 740 copyfrompath = self.getrelpath(copyfrompath, pmodule)
741 741 if not copyfrompath:
742 742 continue
743 743 copyfrom[path] = ent
744 744 self.ui.debug(_("mark %s came from %s:%d\n")
745 745 % (path, copyfrompath, ent.copyfrom_rev))
746 746 children = self._find_children(ent.copyfrom_path, ent.copyfrom_rev)
747 747 children.sort()
748 748 for child in children:
749 749 entrypath = self.getrelpath("/" + child, pmodule)
750 750 if not entrypath:
751 751 continue
752 752 entry = entrypath.decode(self.encoding)
753 753 copytopath = path + entry[len(copyfrompath):]
754 754 copytopath = self.getrelpath(copytopath)
755 755 copies[self.recode(copytopath)] = self.recode(entry, pmodule)
756 756
757 return (util.unique(entries), copies)
757 return (list(set(entries)), copies)
758 758
759 759 def _fetch_revisions(self, from_revnum, to_revnum):
760 760 if from_revnum < to_revnum:
761 761 from_revnum, to_revnum = to_revnum, from_revnum
762 762
763 763 self.child_cset = None
764 764
765 765 def parselogentry(orig_paths, revnum, author, date, message):
766 766 """Return the parsed commit object or None, and True if
767 767 the revision is a branch root.
768 768 """
769 769 self.ui.debug(_("parsing revision %d (%d changes)\n") %
770 770 (revnum, len(orig_paths)))
771 771
772 772 branched = False
773 773 rev = self.revid(revnum)
774 774 # branch log might return entries for a parent we already have
775 775
776 776 if rev in self.commits or revnum < to_revnum:
777 777 return None, branched
778 778
779 779 parents = []
780 780 # check whether this revision is the start of a branch or part
781 781 # of a branch renaming
782 782 orig_paths = util.sort(orig_paths.items())
783 783 root_paths = [(p,e) for p,e in orig_paths if self.module.startswith(p)]
784 784 if root_paths:
785 785 path, ent = root_paths[-1]
786 786 if ent.copyfrom_path:
787 787 branched = True
788 788 newpath = ent.copyfrom_path + self.module[len(path):]
789 789 # ent.copyfrom_rev may not be the actual last revision
790 790 previd = self.latest(newpath, ent.copyfrom_rev)
791 791 if previd is not None:
792 792 prevmodule, prevnum = self.revsplit(previd)[1:]
793 793 if prevnum >= self.startrev:
794 794 parents = [previd]
795 795 self.ui.note(_('found parent of branch %s at %d: %s\n') %
796 796 (self.module, prevnum, prevmodule))
797 797 else:
798 798 self.ui.debug(_("no copyfrom path, don't know what to do.\n"))
799 799
800 800 paths = []
801 801 # filter out unrelated paths
802 802 for path, ent in orig_paths:
803 803 if self.getrelpath(path) is None:
804 804 continue
805 805 paths.append((path, ent))
806 806
807 807 # Example SVN datetime. Includes microseconds.
808 808 # ISO-8601 conformant
809 809 # '2007-01-04T17:35:00.902377Z'
810 810 date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"])
811 811
812 812 log = message and self.recode(message) or ''
813 813 author = author and self.recode(author) or ''
814 814 try:
815 815 branch = self.module.split("/")[-1]
816 816 if branch == 'trunk':
817 817 branch = ''
818 818 except IndexError:
819 819 branch = None
820 820
821 821 cset = commit(author=author,
822 822 date=util.datestr(date),
823 823 desc=log,
824 824 parents=parents,
825 825 branch=branch,
826 826 rev=rev.encode('utf-8'))
827 827
828 828 self.commits[rev] = cset
829 829 # The parents list is *shared* among self.paths and the
830 830 # commit object. Both will be updated below.
831 831 self.paths[rev] = (paths, cset.parents)
832 832 if self.child_cset and not self.child_cset.parents:
833 833 self.child_cset.parents[:] = [rev]
834 834 self.child_cset = cset
835 835 return cset, branched
836 836
837 837 self.ui.note(_('fetching revision log for "%s" from %d to %d\n') %
838 838 (self.module, from_revnum, to_revnum))
839 839
840 840 try:
841 841 firstcset = None
842 842 lastonbranch = False
843 843 stream = self._getlog([self.module], from_revnum, to_revnum)
844 844 try:
845 845 for entry in stream:
846 846 paths, revnum, author, date, message = entry
847 847 if revnum < self.startrev:
848 848 lastonbranch = True
849 849 break
850 850 if self.is_blacklisted(revnum):
851 851 self.ui.note(_('skipping blacklisted revision %d\n')
852 852 % revnum)
853 853 continue
854 854 if paths is None:
855 855 self.ui.debug(_('revision %d has no entries\n') % revnum)
856 856 continue
857 857 cset, lastonbranch = parselogentry(paths, revnum, author,
858 858 date, message)
859 859 if cset:
860 860 firstcset = cset
861 861 if lastonbranch:
862 862 break
863 863 finally:
864 864 stream.close()
865 865
866 866 if not lastonbranch and firstcset and not firstcset.parents:
867 867 # The first revision of the sequence (the last fetched one)
868 868 # has invalid parents if not a branch root. Find the parent
869 869 # revision now, if any.
870 870 try:
871 871 firstrevnum = self.revnum(firstcset.rev)
872 872 if firstrevnum > 1:
873 873 latest = self.latest(self.module, firstrevnum - 1)
874 874 if latest:
875 875 firstcset.parents.append(latest)
876 876 except SvnPathNotFound:
877 877 pass
878 878 except SubversionException, (inst, num):
879 879 if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION:
880 880 raise util.Abort(_('svn: branch has no revision %s') % to_revnum)
881 881 raise
882 882
883 883 def _getfile(self, file, rev):
884 884 # TODO: ra.get_file transmits the whole file instead of diffs.
885 885 mode = ''
886 886 try:
887 887 new_module, revnum = self.revsplit(rev)[1:]
888 888 if self.module != new_module:
889 889 self.module = new_module
890 890 self.reparent(self.module)
891 891 io = StringIO()
892 892 info = svn.ra.get_file(self.ra, file, revnum, io)
893 893 data = io.getvalue()
894 894 # ra.get_files() seems to keep a reference on the input buffer
895 895 # preventing collection. Release it explicitely.
896 896 io.close()
897 897 if isinstance(info, list):
898 898 info = info[-1]
899 899 mode = ("svn:executable" in info) and 'x' or ''
900 900 mode = ("svn:special" in info) and 'l' or mode
901 901 except SubversionException, e:
902 902 notfound = (svn.core.SVN_ERR_FS_NOT_FOUND,
903 903 svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND)
904 904 if e.apr_err in notfound: # File not found
905 905 raise IOError()
906 906 raise
907 907 if mode == 'l':
908 908 link_prefix = "link "
909 909 if data.startswith(link_prefix):
910 910 data = data[len(link_prefix):]
911 911 return data, mode
912 912
913 913 def _find_children(self, path, revnum):
914 914 path = path.strip('/')
915 915 pool = Pool()
916 916 rpath = '/'.join([self.baseurl, urllib.quote(path)]).strip('/')
917 917 return ['%s/%s' % (path, x) for x in
918 918 svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool).keys()]
919 919
920 920 def getrelpath(self, path, module=None):
921 921 if module is None:
922 922 module = self.module
923 923 # Given the repository url of this wc, say
924 924 # "http://server/plone/CMFPlone/branches/Plone-2_0-branch"
925 925 # extract the "entry" portion (a relative path) from what
926 926 # svn log --xml says, ie
927 927 # "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py"
928 928 # that is to say "tests/PloneTestCase.py"
929 929 if path.startswith(module):
930 930 relative = path.rstrip('/')[len(module):]
931 931 if relative.startswith('/'):
932 932 return relative[1:]
933 933 elif relative == '':
934 934 return relative
935 935
936 936 # The path is outside our tracked tree...
937 937 self.ui.debug(_('%r is not under %r, ignoring\n') % (path, module))
938 938 return None
939 939
940 940 def _checkpath(self, path, revnum):
941 941 # ra.check_path does not like leading slashes very much, it leads
942 942 # to PROPFIND subversion errors
943 943 return svn.ra.check_path(self.ra, path.strip('/'), revnum)
944 944
945 945 def _getlog(self, paths, start, end, limit=0, discover_changed_paths=True,
946 946 strict_node_history=False):
947 947 # Normalize path names, svn >= 1.5 only wants paths relative to
948 948 # supplied URL
949 949 relpaths = []
950 950 for p in paths:
951 951 if not p.startswith('/'):
952 952 p = self.module + '/' + p
953 953 relpaths.append(p.strip('/'))
954 954 args = [self.baseurl, relpaths, start, end, limit, discover_changed_paths,
955 955 strict_node_history]
956 956 arg = encodeargs(args)
957 957 hgexe = util.hgexecutable()
958 958 cmd = '%s debugsvnlog' % util.shellquote(hgexe)
959 959 stdin, stdout = util.popen2(cmd, 'b')
960 960 stdin.write(arg)
961 961 stdin.close()
962 962 return logstream(stdout)
963 963
964 964 pre_revprop_change = '''#!/bin/sh
965 965
966 966 REPOS="$1"
967 967 REV="$2"
968 968 USER="$3"
969 969 PROPNAME="$4"
970 970 ACTION="$5"
971 971
972 972 if [ "$ACTION" = "M" -a "$PROPNAME" = "svn:log" ]; then exit 0; fi
973 973 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-branch" ]; then exit 0; fi
974 974 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-rev" ]; then exit 0; fi
975 975
976 976 echo "Changing prohibited revision property" >&2
977 977 exit 1
978 978 '''
979 979
980 980 class svn_sink(converter_sink, commandline):
981 981 commit_re = re.compile(r'Committed revision (\d+).', re.M)
982 982
983 983 def prerun(self):
984 984 if self.wc:
985 985 os.chdir(self.wc)
986 986
987 987 def postrun(self):
988 988 if self.wc:
989 989 os.chdir(self.cwd)
990 990
991 991 def join(self, name):
992 992 return os.path.join(self.wc, '.svn', name)
993 993
994 994 def revmapfile(self):
995 995 return self.join('hg-shamap')
996 996
997 997 def authorfile(self):
998 998 return self.join('hg-authormap')
999 999
1000 1000 def __init__(self, ui, path):
1001 1001 converter_sink.__init__(self, ui, path)
1002 1002 commandline.__init__(self, ui, 'svn')
1003 1003 self.delete = []
1004 1004 self.setexec = []
1005 1005 self.delexec = []
1006 1006 self.copies = []
1007 1007 self.wc = None
1008 1008 self.cwd = os.getcwd()
1009 1009
1010 1010 path = os.path.realpath(path)
1011 1011
1012 1012 created = False
1013 1013 if os.path.isfile(os.path.join(path, '.svn', 'entries')):
1014 1014 self.wc = path
1015 1015 self.run0('update')
1016 1016 else:
1017 1017 wcpath = os.path.join(os.getcwd(), os.path.basename(path) + '-wc')
1018 1018
1019 1019 if os.path.isdir(os.path.dirname(path)):
1020 1020 if not os.path.exists(os.path.join(path, 'db', 'fs-type')):
1021 1021 ui.status(_('initializing svn repo %r\n') %
1022 1022 os.path.basename(path))
1023 1023 commandline(ui, 'svnadmin').run0('create', path)
1024 1024 created = path
1025 1025 path = util.normpath(path)
1026 1026 if not path.startswith('/'):
1027 1027 path = '/' + path
1028 1028 path = 'file://' + path
1029 1029
1030 1030 ui.status(_('initializing svn wc %r\n') % os.path.basename(wcpath))
1031 1031 self.run0('checkout', path, wcpath)
1032 1032
1033 1033 self.wc = wcpath
1034 1034 self.opener = util.opener(self.wc)
1035 1035 self.wopener = util.opener(self.wc)
1036 1036 self.childmap = mapfile(ui, self.join('hg-childmap'))
1037 1037 self.is_exec = util.checkexec(self.wc) and util.is_exec or None
1038 1038
1039 1039 if created:
1040 1040 hook = os.path.join(created, 'hooks', 'pre-revprop-change')
1041 1041 fp = open(hook, 'w')
1042 1042 fp.write(pre_revprop_change)
1043 1043 fp.close()
1044 1044 util.set_flags(hook, False, True)
1045 1045
1046 1046 xport = transport.SvnRaTransport(url=geturl(path))
1047 1047 self.uuid = svn.ra.get_uuid(xport.ra)
1048 1048
1049 1049 def wjoin(self, *names):
1050 1050 return os.path.join(self.wc, *names)
1051 1051
1052 1052 def putfile(self, filename, flags, data):
1053 1053 if 'l' in flags:
1054 1054 self.wopener.symlink(data, filename)
1055 1055 else:
1056 1056 try:
1057 1057 if os.path.islink(self.wjoin(filename)):
1058 1058 os.unlink(filename)
1059 1059 except OSError:
1060 1060 pass
1061 1061 self.wopener(filename, 'w').write(data)
1062 1062
1063 1063 if self.is_exec:
1064 1064 was_exec = self.is_exec(self.wjoin(filename))
1065 1065 else:
1066 1066 # On filesystems not supporting execute-bit, there is no way
1067 1067 # to know if it is set but asking subversion. Setting it
1068 1068 # systematically is just as expensive and much simpler.
1069 1069 was_exec = 'x' not in flags
1070 1070
1071 1071 util.set_flags(self.wjoin(filename), False, 'x' in flags)
1072 1072 if was_exec:
1073 1073 if 'x' not in flags:
1074 1074 self.delexec.append(filename)
1075 1075 else:
1076 1076 if 'x' in flags:
1077 1077 self.setexec.append(filename)
1078 1078
1079 1079 def _copyfile(self, source, dest):
1080 1080 # SVN's copy command pukes if the destination file exists, but
1081 1081 # our copyfile method expects to record a copy that has
1082 1082 # already occurred. Cross the semantic gap.
1083 1083 wdest = self.wjoin(dest)
1084 1084 exists = os.path.exists(wdest)
1085 1085 if exists:
1086 1086 fd, tempname = tempfile.mkstemp(
1087 1087 prefix='hg-copy-', dir=os.path.dirname(wdest))
1088 1088 os.close(fd)
1089 1089 os.unlink(tempname)
1090 1090 os.rename(wdest, tempname)
1091 1091 try:
1092 1092 self.run0('copy', source, dest)
1093 1093 finally:
1094 1094 if exists:
1095 1095 try:
1096 1096 os.unlink(wdest)
1097 1097 except OSError:
1098 1098 pass
1099 1099 os.rename(tempname, wdest)
1100 1100
1101 1101 def dirs_of(self, files):
1102 1102 dirs = set()
1103 1103 for f in files:
1104 1104 if os.path.isdir(self.wjoin(f)):
1105 1105 dirs.add(f)
1106 1106 for i in strutil.rfindall(f, '/'):
1107 1107 dirs.add(f[:i])
1108 1108 return dirs
1109 1109
1110 1110 def add_dirs(self, files):
1111 1111 add_dirs = [d for d in util.sort(self.dirs_of(files))
1112 1112 if not os.path.exists(self.wjoin(d, '.svn', 'entries'))]
1113 1113 if add_dirs:
1114 1114 self.xargs(add_dirs, 'add', non_recursive=True, quiet=True)
1115 1115 return add_dirs
1116 1116
1117 1117 def add_files(self, files):
1118 1118 if files:
1119 1119 self.xargs(files, 'add', quiet=True)
1120 1120 return files
1121 1121
1122 1122 def tidy_dirs(self, names):
1123 1123 dirs = util.sort(self.dirs_of(names))
1124 1124 dirs.reverse()
1125 1125 deleted = []
1126 1126 for d in dirs:
1127 1127 wd = self.wjoin(d)
1128 1128 if os.listdir(wd) == '.svn':
1129 1129 self.run0('delete', d)
1130 1130 deleted.append(d)
1131 1131 return deleted
1132 1132
1133 1133 def addchild(self, parent, child):
1134 1134 self.childmap[parent] = child
1135 1135
1136 1136 def revid(self, rev):
1137 1137 return u"svn:%s@%s" % (self.uuid, rev)
1138 1138
1139 1139 def putcommit(self, files, copies, parents, commit, source):
1140 1140 # Apply changes to working copy
1141 1141 for f, v in files:
1142 1142 try:
1143 1143 data = source.getfile(f, v)
1144 1144 except IOError:
1145 1145 self.delete.append(f)
1146 1146 else:
1147 1147 e = source.getmode(f, v)
1148 1148 self.putfile(f, e, data)
1149 1149 if f in copies:
1150 1150 self.copies.append([copies[f], f])
1151 1151 files = [f[0] for f in files]
1152 1152
1153 1153 for parent in parents:
1154 1154 try:
1155 1155 return self.revid(self.childmap[parent])
1156 1156 except KeyError:
1157 1157 pass
1158 1158 entries = set(self.delete)
1159 1159 files = frozenset(files)
1160 1160 entries.update(self.add_dirs(files.difference(entries)))
1161 1161 if self.copies:
1162 1162 for s, d in self.copies:
1163 1163 self._copyfile(s, d)
1164 1164 self.copies = []
1165 1165 if self.delete:
1166 1166 self.xargs(self.delete, 'delete')
1167 1167 self.delete = []
1168 1168 entries.update(self.add_files(files.difference(entries)))
1169 1169 entries.update(self.tidy_dirs(entries))
1170 1170 if self.delexec:
1171 1171 self.xargs(self.delexec, 'propdel', 'svn:executable')
1172 1172 self.delexec = []
1173 1173 if self.setexec:
1174 1174 self.xargs(self.setexec, 'propset', 'svn:executable', '*')
1175 1175 self.setexec = []
1176 1176
1177 1177 fd, messagefile = tempfile.mkstemp(prefix='hg-convert-')
1178 1178 fp = os.fdopen(fd, 'w')
1179 1179 fp.write(commit.desc)
1180 1180 fp.close()
1181 1181 try:
1182 1182 output = self.run0('commit',
1183 1183 username=util.shortuser(commit.author),
1184 1184 file=messagefile,
1185 1185 encoding='utf-8')
1186 1186 try:
1187 1187 rev = self.commit_re.search(output).group(1)
1188 1188 except AttributeError:
1189 1189 self.ui.warn(_('unexpected svn output:\n'))
1190 1190 self.ui.warn(output)
1191 1191 raise util.Abort(_('unable to cope with svn output'))
1192 1192 if commit.rev:
1193 1193 self.run('propset', 'hg:convert-rev', commit.rev,
1194 1194 revprop=True, revision=rev)
1195 1195 if commit.branch and commit.branch != 'default':
1196 1196 self.run('propset', 'hg:convert-branch', commit.branch,
1197 1197 revprop=True, revision=rev)
1198 1198 for parent in parents:
1199 1199 self.addchild(parent, rev)
1200 1200 return self.revid(rev)
1201 1201 finally:
1202 1202 os.unlink(messagefile)
1203 1203
1204 1204 def puttags(self, tags):
1205 1205 self.ui.warn(_('XXX TAGS NOT IMPLEMENTED YET\n'))
@@ -1,2613 +1,2613 b''
1 1 # mq.py - patch queues for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 '''patch management and development
9 9
10 10 This extension lets you work with a stack of patches in a Mercurial
11 11 repository. It manages two stacks of patches - all known patches, and
12 12 applied patches (subset of known patches).
13 13
14 14 Known patches are represented as patch files in the .hg/patches
15 15 directory. Applied patches are both patch files and changesets.
16 16
17 17 Common tasks (use "hg help command" for more details):
18 18
19 19 prepare repository to work with patches qinit
20 20 create new patch qnew
21 21 import existing patch qimport
22 22
23 23 print patch series qseries
24 24 print applied patches qapplied
25 25 print name of top applied patch qtop
26 26
27 27 add known patch to applied stack qpush
28 28 remove patch from applied stack qpop
29 29 refresh contents of top applied patch qrefresh
30 30 '''
31 31
32 32 from mercurial.i18n import _
33 33 from mercurial.node import bin, hex, short, nullid, nullrev
34 34 from mercurial.lock import release
35 35 from mercurial import commands, cmdutil, hg, patch, util
36 36 from mercurial import repair, extensions, url, error
37 37 import os, sys, re, errno
38 38
39 39 commands.norepo += " qclone"
40 40
41 41 # Patch names looks like unix-file names.
42 42 # They must be joinable with queue directory and result in the patch path.
43 43 normname = util.normpath
44 44
45 45 class statusentry:
46 46 def __init__(self, rev, name=None):
47 47 if not name:
48 48 fields = rev.split(':', 1)
49 49 if len(fields) == 2:
50 50 self.rev, self.name = fields
51 51 else:
52 52 self.rev, self.name = None, None
53 53 else:
54 54 self.rev, self.name = rev, name
55 55
56 56 def __str__(self):
57 57 return self.rev + ':' + self.name
58 58
59 59 class patchheader(object):
60 60 def __init__(self, message, comments, user, date, haspatch):
61 61 self.message = message
62 62 self.comments = comments
63 63 self.user = user
64 64 self.date = date
65 65 self.haspatch = haspatch
66 66
67 67 def setuser(self, user):
68 68 if not self.setheader(['From: ', '# User '], user):
69 69 try:
70 70 patchheaderat = self.comments.index('# HG changeset patch')
71 71 self.comments.insert(patchheaderat + 1,'# User ' + user)
72 72 except ValueError:
73 73 self.comments = ['From: ' + user, ''] + self.comments
74 74 self.user = user
75 75
76 76 def setdate(self, date):
77 77 if self.setheader(['# Date '], date):
78 78 self.date = date
79 79
80 80 def setmessage(self, message):
81 81 if self.comments:
82 82 self._delmsg()
83 83 self.message = [message]
84 84 self.comments += self.message
85 85
86 86 def setheader(self, prefixes, new):
87 87 '''Update all references to a field in the patch header.
88 88 If none found, add it email style.'''
89 89 res = False
90 90 for prefix in prefixes:
91 91 for i in xrange(len(self.comments)):
92 92 if self.comments[i].startswith(prefix):
93 93 self.comments[i] = prefix + new
94 94 res = True
95 95 break
96 96 return res
97 97
98 98 def __str__(self):
99 99 if not self.comments:
100 100 return ''
101 101 return '\n'.join(self.comments) + '\n\n'
102 102
103 103 def _delmsg(self):
104 104 '''Remove existing message, keeping the rest of the comments fields.
105 105 If comments contains 'subject: ', message will prepend
106 106 the field and a blank line.'''
107 107 if self.message:
108 108 subj = 'subject: ' + self.message[0].lower()
109 109 for i in xrange(len(self.comments)):
110 110 if subj == self.comments[i].lower():
111 111 del self.comments[i]
112 112 self.message = self.message[2:]
113 113 break
114 114 ci = 0
115 115 for mi in xrange(len(self.message)):
116 116 while self.message[mi] != self.comments[ci]:
117 117 ci += 1
118 118 del self.comments[ci]
119 119
120 120 class queue:
121 121 def __init__(self, ui, path, patchdir=None):
122 122 self.basepath = path
123 123 self.path = patchdir or os.path.join(path, "patches")
124 124 self.opener = util.opener(self.path)
125 125 self.ui = ui
126 126 self.applied = []
127 127 self.full_series = []
128 128 self.applied_dirty = 0
129 129 self.series_dirty = 0
130 130 self.series_path = "series"
131 131 self.status_path = "status"
132 132 self.guards_path = "guards"
133 133 self.active_guards = None
134 134 self.guards_dirty = False
135 135 self._diffopts = None
136 136
137 137 if os.path.exists(self.join(self.series_path)):
138 138 self.full_series = self.opener(self.series_path).read().splitlines()
139 139 self.parse_series()
140 140
141 141 if os.path.exists(self.join(self.status_path)):
142 142 lines = self.opener(self.status_path).read().splitlines()
143 143 self.applied = [statusentry(l) for l in lines]
144 144
145 145 def diffopts(self):
146 146 if self._diffopts is None:
147 147 self._diffopts = patch.diffopts(self.ui)
148 148 return self._diffopts
149 149
150 150 def join(self, *p):
151 151 return os.path.join(self.path, *p)
152 152
153 153 def find_series(self, patch):
154 154 pre = re.compile("(\s*)([^#]+)")
155 155 index = 0
156 156 for l in self.full_series:
157 157 m = pre.match(l)
158 158 if m:
159 159 s = m.group(2)
160 160 s = s.rstrip()
161 161 if s == patch:
162 162 return index
163 163 index += 1
164 164 return None
165 165
166 166 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
167 167
168 168 def parse_series(self):
169 169 self.series = []
170 170 self.series_guards = []
171 171 for l in self.full_series:
172 172 h = l.find('#')
173 173 if h == -1:
174 174 patch = l
175 175 comment = ''
176 176 elif h == 0:
177 177 continue
178 178 else:
179 179 patch = l[:h]
180 180 comment = l[h:]
181 181 patch = patch.strip()
182 182 if patch:
183 183 if patch in self.series:
184 184 raise util.Abort(_('%s appears more than once in %s') %
185 185 (patch, self.join(self.series_path)))
186 186 self.series.append(patch)
187 187 self.series_guards.append(self.guard_re.findall(comment))
188 188
189 189 def check_guard(self, guard):
190 190 if not guard:
191 191 return _('guard cannot be an empty string')
192 192 bad_chars = '# \t\r\n\f'
193 193 first = guard[0]
194 194 for c in '-+':
195 195 if first == c:
196 196 return (_('guard %r starts with invalid character: %r') %
197 197 (guard, c))
198 198 for c in bad_chars:
199 199 if c in guard:
200 200 return _('invalid character in guard %r: %r') % (guard, c)
201 201
202 202 def set_active(self, guards):
203 203 for guard in guards:
204 204 bad = self.check_guard(guard)
205 205 if bad:
206 206 raise util.Abort(bad)
207 guards = util.sort(util.unique(guards))
207 guards = util.sort(set(guards))
208 208 self.ui.debug(_('active guards: %s\n') % ' '.join(guards))
209 209 self.active_guards = guards
210 210 self.guards_dirty = True
211 211
212 212 def active(self):
213 213 if self.active_guards is None:
214 214 self.active_guards = []
215 215 try:
216 216 guards = self.opener(self.guards_path).read().split()
217 217 except IOError, err:
218 218 if err.errno != errno.ENOENT: raise
219 219 guards = []
220 220 for i, guard in enumerate(guards):
221 221 bad = self.check_guard(guard)
222 222 if bad:
223 223 self.ui.warn('%s:%d: %s\n' %
224 224 (self.join(self.guards_path), i + 1, bad))
225 225 else:
226 226 self.active_guards.append(guard)
227 227 return self.active_guards
228 228
229 229 def set_guards(self, idx, guards):
230 230 for g in guards:
231 231 if len(g) < 2:
232 232 raise util.Abort(_('guard %r too short') % g)
233 233 if g[0] not in '-+':
234 234 raise util.Abort(_('guard %r starts with invalid char') % g)
235 235 bad = self.check_guard(g[1:])
236 236 if bad:
237 237 raise util.Abort(bad)
238 238 drop = self.guard_re.sub('', self.full_series[idx])
239 239 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
240 240 self.parse_series()
241 241 self.series_dirty = True
242 242
243 243 def pushable(self, idx):
244 244 if isinstance(idx, str):
245 245 idx = self.series.index(idx)
246 246 patchguards = self.series_guards[idx]
247 247 if not patchguards:
248 248 return True, None
249 249 guards = self.active()
250 250 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
251 251 if exactneg:
252 252 return False, exactneg[0]
253 253 pos = [g for g in patchguards if g[0] == '+']
254 254 exactpos = [g for g in pos if g[1:] in guards]
255 255 if pos:
256 256 if exactpos:
257 257 return True, exactpos[0]
258 258 return False, pos
259 259 return True, ''
260 260
261 261 def explain_pushable(self, idx, all_patches=False):
262 262 write = all_patches and self.ui.write or self.ui.warn
263 263 if all_patches or self.ui.verbose:
264 264 if isinstance(idx, str):
265 265 idx = self.series.index(idx)
266 266 pushable, why = self.pushable(idx)
267 267 if all_patches and pushable:
268 268 if why is None:
269 269 write(_('allowing %s - no guards in effect\n') %
270 270 self.series[idx])
271 271 else:
272 272 if not why:
273 273 write(_('allowing %s - no matching negative guards\n') %
274 274 self.series[idx])
275 275 else:
276 276 write(_('allowing %s - guarded by %r\n') %
277 277 (self.series[idx], why))
278 278 if not pushable:
279 279 if why:
280 280 write(_('skipping %s - guarded by %r\n') %
281 281 (self.series[idx], why))
282 282 else:
283 283 write(_('skipping %s - no matching guards\n') %
284 284 self.series[idx])
285 285
286 286 def save_dirty(self):
287 287 def write_list(items, path):
288 288 fp = self.opener(path, 'w')
289 289 for i in items:
290 290 fp.write("%s\n" % i)
291 291 fp.close()
292 292 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
293 293 if self.series_dirty: write_list(self.full_series, self.series_path)
294 294 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
295 295
296 296 def readheaders(self, patch):
297 297 def eatdiff(lines):
298 298 while lines:
299 299 l = lines[-1]
300 300 if (l.startswith("diff -") or
301 301 l.startswith("Index:") or
302 302 l.startswith("===========")):
303 303 del lines[-1]
304 304 else:
305 305 break
306 306 def eatempty(lines):
307 307 while lines:
308 308 l = lines[-1]
309 309 if re.match('\s*$', l):
310 310 del lines[-1]
311 311 else:
312 312 break
313 313
314 314 pf = self.join(patch)
315 315 message = []
316 316 comments = []
317 317 user = None
318 318 date = None
319 319 format = None
320 320 subject = None
321 321 diffstart = 0
322 322
323 323 for line in file(pf):
324 324 line = line.rstrip()
325 325 if line.startswith('diff --git'):
326 326 diffstart = 2
327 327 break
328 328 if diffstart:
329 329 if line.startswith('+++ '):
330 330 diffstart = 2
331 331 break
332 332 if line.startswith("--- "):
333 333 diffstart = 1
334 334 continue
335 335 elif format == "hgpatch":
336 336 # parse values when importing the result of an hg export
337 337 if line.startswith("# User "):
338 338 user = line[7:]
339 339 elif line.startswith("# Date "):
340 340 date = line[7:]
341 341 elif not line.startswith("# ") and line:
342 342 message.append(line)
343 343 format = None
344 344 elif line == '# HG changeset patch':
345 345 format = "hgpatch"
346 346 elif (format != "tagdone" and (line.startswith("Subject: ") or
347 347 line.startswith("subject: "))):
348 348 subject = line[9:]
349 349 format = "tag"
350 350 elif (format != "tagdone" and (line.startswith("From: ") or
351 351 line.startswith("from: "))):
352 352 user = line[6:]
353 353 format = "tag"
354 354 elif format == "tag" and line == "":
355 355 # when looking for tags (subject: from: etc) they
356 356 # end once you find a blank line in the source
357 357 format = "tagdone"
358 358 elif message or line:
359 359 message.append(line)
360 360 comments.append(line)
361 361
362 362 eatdiff(message)
363 363 eatdiff(comments)
364 364 eatempty(message)
365 365 eatempty(comments)
366 366
367 367 # make sure message isn't empty
368 368 if format and format.startswith("tag") and subject:
369 369 message.insert(0, "")
370 370 message.insert(0, subject)
371 371 return patchheader(message, comments, user, date, diffstart > 1)
372 372
373 373 def removeundo(self, repo):
374 374 undo = repo.sjoin('undo')
375 375 if not os.path.exists(undo):
376 376 return
377 377 try:
378 378 os.unlink(undo)
379 379 except OSError, inst:
380 380 self.ui.warn(_('error removing undo: %s\n') % str(inst))
381 381
382 382 def printdiff(self, repo, node1, node2=None, files=None,
383 383 fp=None, changes=None, opts={}):
384 384 m = cmdutil.match(repo, files, opts)
385 385 chunks = patch.diff(repo, node1, node2, m, changes, self.diffopts())
386 386 write = fp is None and repo.ui.write or fp.write
387 387 for chunk in chunks:
388 388 write(chunk)
389 389
390 390 def mergeone(self, repo, mergeq, head, patch, rev):
391 391 # first try just applying the patch
392 392 (err, n) = self.apply(repo, [ patch ], update_status=False,
393 393 strict=True, merge=rev)
394 394
395 395 if err == 0:
396 396 return (err, n)
397 397
398 398 if n is None:
399 399 raise util.Abort(_("apply failed for patch %s") % patch)
400 400
401 401 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
402 402
403 403 # apply failed, strip away that rev and merge.
404 404 hg.clean(repo, head)
405 405 self.strip(repo, n, update=False, backup='strip')
406 406
407 407 ctx = repo[rev]
408 408 ret = hg.merge(repo, rev)
409 409 if ret:
410 410 raise util.Abort(_("update returned %d") % ret)
411 411 n = repo.commit(None, ctx.description(), ctx.user(), force=1)
412 412 if n == None:
413 413 raise util.Abort(_("repo commit failed"))
414 414 try:
415 415 ph = mergeq.readheaders(patch)
416 416 except:
417 417 raise util.Abort(_("unable to read %s") % patch)
418 418
419 419 patchf = self.opener(patch, "w")
420 420 comments = str(ph)
421 421 if comments:
422 422 patchf.write(comments)
423 423 self.printdiff(repo, head, n, fp=patchf)
424 424 patchf.close()
425 425 self.removeundo(repo)
426 426 return (0, n)
427 427
428 428 def qparents(self, repo, rev=None):
429 429 if rev is None:
430 430 (p1, p2) = repo.dirstate.parents()
431 431 if p2 == nullid:
432 432 return p1
433 433 if len(self.applied) == 0:
434 434 return None
435 435 return bin(self.applied[-1].rev)
436 436 pp = repo.changelog.parents(rev)
437 437 if pp[1] != nullid:
438 438 arevs = [ x.rev for x in self.applied ]
439 439 p0 = hex(pp[0])
440 440 p1 = hex(pp[1])
441 441 if p0 in arevs:
442 442 return pp[0]
443 443 if p1 in arevs:
444 444 return pp[1]
445 445 return pp[0]
446 446
447 447 def mergepatch(self, repo, mergeq, series):
448 448 if len(self.applied) == 0:
449 449 # each of the patches merged in will have two parents. This
450 450 # can confuse the qrefresh, qdiff, and strip code because it
451 451 # needs to know which parent is actually in the patch queue.
452 452 # so, we insert a merge marker with only one parent. This way
453 453 # the first patch in the queue is never a merge patch
454 454 #
455 455 pname = ".hg.patches.merge.marker"
456 456 n = repo.commit(None, '[mq]: merge marker', user=None, force=1)
457 457 self.removeundo(repo)
458 458 self.applied.append(statusentry(hex(n), pname))
459 459 self.applied_dirty = 1
460 460
461 461 head = self.qparents(repo)
462 462
463 463 for patch in series:
464 464 patch = mergeq.lookup(patch, strict=True)
465 465 if not patch:
466 466 self.ui.warn(_("patch %s does not exist\n") % patch)
467 467 return (1, None)
468 468 pushable, reason = self.pushable(patch)
469 469 if not pushable:
470 470 self.explain_pushable(patch, all_patches=True)
471 471 continue
472 472 info = mergeq.isapplied(patch)
473 473 if not info:
474 474 self.ui.warn(_("patch %s is not applied\n") % patch)
475 475 return (1, None)
476 476 rev = bin(info[1])
477 477 (err, head) = self.mergeone(repo, mergeq, head, patch, rev)
478 478 if head:
479 479 self.applied.append(statusentry(hex(head), patch))
480 480 self.applied_dirty = 1
481 481 if err:
482 482 return (err, head)
483 483 self.save_dirty()
484 484 return (0, head)
485 485
486 486 def patch(self, repo, patchfile):
487 487 '''Apply patchfile to the working directory.
488 488 patchfile: file name of patch'''
489 489 files = {}
490 490 try:
491 491 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
492 492 files=files)
493 493 except Exception, inst:
494 494 self.ui.note(str(inst) + '\n')
495 495 if not self.ui.verbose:
496 496 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
497 497 return (False, files, False)
498 498
499 499 return (True, files, fuzz)
500 500
501 501 def apply(self, repo, series, list=False, update_status=True,
502 502 strict=False, patchdir=None, merge=None, all_files={}):
503 503 wlock = lock = tr = None
504 504 try:
505 505 wlock = repo.wlock()
506 506 lock = repo.lock()
507 507 tr = repo.transaction()
508 508 try:
509 509 ret = self._apply(repo, series, list, update_status,
510 510 strict, patchdir, merge, all_files=all_files)
511 511 tr.close()
512 512 self.save_dirty()
513 513 return ret
514 514 except:
515 515 try:
516 516 tr.abort()
517 517 finally:
518 518 repo.invalidate()
519 519 repo.dirstate.invalidate()
520 520 raise
521 521 finally:
522 522 del tr
523 523 release(lock, wlock)
524 524 self.removeundo(repo)
525 525
526 526 def _apply(self, repo, series, list=False, update_status=True,
527 527 strict=False, patchdir=None, merge=None, all_files={}):
528 528 # TODO unify with commands.py
529 529 if not patchdir:
530 530 patchdir = self.path
531 531 err = 0
532 532 n = None
533 533 for patchname in series:
534 534 pushable, reason = self.pushable(patchname)
535 535 if not pushable:
536 536 self.explain_pushable(patchname, all_patches=True)
537 537 continue
538 538 self.ui.warn(_("applying %s\n") % patchname)
539 539 pf = os.path.join(patchdir, patchname)
540 540
541 541 try:
542 542 ph = self.readheaders(patchname)
543 543 except:
544 544 self.ui.warn(_("Unable to read %s\n") % patchname)
545 545 err = 1
546 546 break
547 547
548 548 message = ph.message
549 549 if not message:
550 550 message = _("imported patch %s\n") % patchname
551 551 else:
552 552 if list:
553 553 message.append(_("\nimported patch %s") % patchname)
554 554 message = '\n'.join(message)
555 555
556 556 if ph.haspatch:
557 557 (patcherr, files, fuzz) = self.patch(repo, pf)
558 558 all_files.update(files)
559 559 patcherr = not patcherr
560 560 else:
561 561 self.ui.warn(_("patch %s is empty\n") % patchname)
562 562 patcherr, files, fuzz = 0, [], 0
563 563
564 564 if merge and files:
565 565 # Mark as removed/merged and update dirstate parent info
566 566 removed = []
567 567 merged = []
568 568 for f in files:
569 569 if os.path.exists(repo.wjoin(f)):
570 570 merged.append(f)
571 571 else:
572 572 removed.append(f)
573 573 for f in removed:
574 574 repo.dirstate.remove(f)
575 575 for f in merged:
576 576 repo.dirstate.merge(f)
577 577 p1, p2 = repo.dirstate.parents()
578 578 repo.dirstate.setparents(p1, merge)
579 579
580 580 files = patch.updatedir(self.ui, repo, files)
581 581 match = cmdutil.matchfiles(repo, files or [])
582 582 n = repo.commit(files, message, ph.user, ph.date, match=match,
583 583 force=True)
584 584
585 585 if n == None:
586 586 raise util.Abort(_("repo commit failed"))
587 587
588 588 if update_status:
589 589 self.applied.append(statusentry(hex(n), patchname))
590 590
591 591 if patcherr:
592 592 self.ui.warn(_("patch failed, rejects left in working dir\n"))
593 593 err = 1
594 594 break
595 595
596 596 if fuzz and strict:
597 597 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
598 598 err = 1
599 599 break
600 600 return (err, n)
601 601
602 602 def _clean_series(self, patches):
603 603 indices = util.sort([self.find_series(p) for p in patches])
604 604 for i in indices[-1::-1]:
605 605 del self.full_series[i]
606 606 self.parse_series()
607 607 self.series_dirty = 1
608 608
609 609 def finish(self, repo, revs):
610 610 revs.sort()
611 611 firstrev = repo[self.applied[0].rev].rev()
612 612 appliedbase = 0
613 613 patches = []
614 614 for rev in util.sort(revs):
615 615 if rev < firstrev:
616 616 raise util.Abort(_('revision %d is not managed') % rev)
617 617 base = bin(self.applied[appliedbase].rev)
618 618 node = repo.changelog.node(rev)
619 619 if node != base:
620 620 raise util.Abort(_('cannot delete revision %d above '
621 621 'applied patches') % rev)
622 622 patches.append(self.applied[appliedbase].name)
623 623 appliedbase += 1
624 624
625 625 r = self.qrepo()
626 626 if r:
627 627 r.remove(patches, True)
628 628 else:
629 629 for p in patches:
630 630 os.unlink(self.join(p))
631 631
632 632 del self.applied[:appliedbase]
633 633 self.applied_dirty = 1
634 634 self._clean_series(patches)
635 635
636 636 def delete(self, repo, patches, opts):
637 637 if not patches and not opts.get('rev'):
638 638 raise util.Abort(_('qdelete requires at least one revision or '
639 639 'patch name'))
640 640
641 641 realpatches = []
642 642 for patch in patches:
643 643 patch = self.lookup(patch, strict=True)
644 644 info = self.isapplied(patch)
645 645 if info:
646 646 raise util.Abort(_("cannot delete applied patch %s") % patch)
647 647 if patch not in self.series:
648 648 raise util.Abort(_("patch %s not in series file") % patch)
649 649 realpatches.append(patch)
650 650
651 651 appliedbase = 0
652 652 if opts.get('rev'):
653 653 if not self.applied:
654 654 raise util.Abort(_('no patches applied'))
655 655 revs = cmdutil.revrange(repo, opts['rev'])
656 656 if len(revs) > 1 and revs[0] > revs[1]:
657 657 revs.reverse()
658 658 for rev in revs:
659 659 if appliedbase >= len(self.applied):
660 660 raise util.Abort(_("revision %d is not managed") % rev)
661 661
662 662 base = bin(self.applied[appliedbase].rev)
663 663 node = repo.changelog.node(rev)
664 664 if node != base:
665 665 raise util.Abort(_("cannot delete revision %d above "
666 666 "applied patches") % rev)
667 667 realpatches.append(self.applied[appliedbase].name)
668 668 appliedbase += 1
669 669
670 670 if not opts.get('keep'):
671 671 r = self.qrepo()
672 672 if r:
673 673 r.remove(realpatches, True)
674 674 else:
675 675 for p in realpatches:
676 676 os.unlink(self.join(p))
677 677
678 678 if appliedbase:
679 679 del self.applied[:appliedbase]
680 680 self.applied_dirty = 1
681 681 self._clean_series(realpatches)
682 682
683 683 def check_toppatch(self, repo):
684 684 if len(self.applied) > 0:
685 685 top = bin(self.applied[-1].rev)
686 686 pp = repo.dirstate.parents()
687 687 if top not in pp:
688 688 raise util.Abort(_("working directory revision is not qtip"))
689 689 return top
690 690 return None
691 691 def check_localchanges(self, repo, force=False, refresh=True):
692 692 m, a, r, d = repo.status()[:4]
693 693 if m or a or r or d:
694 694 if not force:
695 695 if refresh:
696 696 raise util.Abort(_("local changes found, refresh first"))
697 697 else:
698 698 raise util.Abort(_("local changes found"))
699 699 return m, a, r, d
700 700
701 701 _reserved = ('series', 'status', 'guards')
702 702 def check_reserved_name(self, name):
703 703 if (name in self._reserved or name.startswith('.hg')
704 704 or name.startswith('.mq')):
705 705 raise util.Abort(_('"%s" cannot be used as the name of a patch')
706 706 % name)
707 707
708 708 def new(self, repo, patchfn, *pats, **opts):
709 709 """options:
710 710 msg: a string or a no-argument function returning a string
711 711 """
712 712 msg = opts.get('msg')
713 713 force = opts.get('force')
714 714 user = opts.get('user')
715 715 date = opts.get('date')
716 716 if date:
717 717 date = util.parsedate(date)
718 718 self.check_reserved_name(patchfn)
719 719 if os.path.exists(self.join(patchfn)):
720 720 raise util.Abort(_('patch "%s" already exists') % patchfn)
721 721 if opts.get('include') or opts.get('exclude') or pats:
722 722 match = cmdutil.match(repo, pats, opts)
723 723 # detect missing files in pats
724 724 def badfn(f, msg):
725 725 raise util.Abort('%s: %s' % (f, msg))
726 726 match.bad = badfn
727 727 m, a, r, d = repo.status(match=match)[:4]
728 728 else:
729 729 m, a, r, d = self.check_localchanges(repo, force)
730 730 match = cmdutil.matchfiles(repo, m + a + r)
731 731 commitfiles = m + a + r
732 732 self.check_toppatch(repo)
733 733 insert = self.full_series_end()
734 734 wlock = repo.wlock()
735 735 try:
736 736 # if patch file write fails, abort early
737 737 p = self.opener(patchfn, "w")
738 738 try:
739 739 if date:
740 740 p.write("# HG changeset patch\n")
741 741 if user:
742 742 p.write("# User " + user + "\n")
743 743 p.write("# Date %d %d\n\n" % date)
744 744 elif user:
745 745 p.write("From: " + user + "\n\n")
746 746
747 747 if callable(msg):
748 748 msg = msg()
749 749 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
750 750 n = repo.commit(commitfiles, commitmsg, user, date, match=match, force=True)
751 751 if n == None:
752 752 raise util.Abort(_("repo commit failed"))
753 753 try:
754 754 self.full_series[insert:insert] = [patchfn]
755 755 self.applied.append(statusentry(hex(n), patchfn))
756 756 self.parse_series()
757 757 self.series_dirty = 1
758 758 self.applied_dirty = 1
759 759 if msg:
760 760 msg = msg + "\n\n"
761 761 p.write(msg)
762 762 if commitfiles:
763 763 diffopts = self.diffopts()
764 764 if opts.get('git'): diffopts.git = True
765 765 parent = self.qparents(repo, n)
766 766 chunks = patch.diff(repo, node1=parent, node2=n,
767 767 match=match, opts=diffopts)
768 768 for chunk in chunks:
769 769 p.write(chunk)
770 770 p.close()
771 771 wlock.release()
772 772 wlock = None
773 773 r = self.qrepo()
774 774 if r: r.add([patchfn])
775 775 except:
776 776 repo.rollback()
777 777 raise
778 778 except Exception:
779 779 patchpath = self.join(patchfn)
780 780 try:
781 781 os.unlink(patchpath)
782 782 except:
783 783 self.ui.warn(_('error unlinking %s\n') % patchpath)
784 784 raise
785 785 self.removeundo(repo)
786 786 finally:
787 787 release(wlock)
788 788
789 789 def strip(self, repo, rev, update=True, backup="all", force=None):
790 790 wlock = lock = None
791 791 try:
792 792 wlock = repo.wlock()
793 793 lock = repo.lock()
794 794
795 795 if update:
796 796 self.check_localchanges(repo, force=force, refresh=False)
797 797 urev = self.qparents(repo, rev)
798 798 hg.clean(repo, urev)
799 799 repo.dirstate.write()
800 800
801 801 self.removeundo(repo)
802 802 repair.strip(self.ui, repo, rev, backup)
803 803 # strip may have unbundled a set of backed up revisions after
804 804 # the actual strip
805 805 self.removeundo(repo)
806 806 finally:
807 807 release(lock, wlock)
808 808
809 809 def isapplied(self, patch):
810 810 """returns (index, rev, patch)"""
811 811 for i in xrange(len(self.applied)):
812 812 a = self.applied[i]
813 813 if a.name == patch:
814 814 return (i, a.rev, a.name)
815 815 return None
816 816
817 817 # if the exact patch name does not exist, we try a few
818 818 # variations. If strict is passed, we try only #1
819 819 #
820 820 # 1) a number to indicate an offset in the series file
821 821 # 2) a unique substring of the patch name was given
822 822 # 3) patchname[-+]num to indicate an offset in the series file
823 823 def lookup(self, patch, strict=False):
824 824 patch = patch and str(patch)
825 825
826 826 def partial_name(s):
827 827 if s in self.series:
828 828 return s
829 829 matches = [x for x in self.series if s in x]
830 830 if len(matches) > 1:
831 831 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
832 832 for m in matches:
833 833 self.ui.warn(' %s\n' % m)
834 834 return None
835 835 if matches:
836 836 return matches[0]
837 837 if len(self.series) > 0 and len(self.applied) > 0:
838 838 if s == 'qtip':
839 839 return self.series[self.series_end(True)-1]
840 840 if s == 'qbase':
841 841 return self.series[0]
842 842 return None
843 843
844 844 if patch == None:
845 845 return None
846 846 if patch in self.series:
847 847 return patch
848 848
849 849 if not os.path.isfile(self.join(patch)):
850 850 try:
851 851 sno = int(patch)
852 852 except(ValueError, OverflowError):
853 853 pass
854 854 else:
855 855 if -len(self.series) <= sno < len(self.series):
856 856 return self.series[sno]
857 857
858 858 if not strict:
859 859 res = partial_name(patch)
860 860 if res:
861 861 return res
862 862 minus = patch.rfind('-')
863 863 if minus >= 0:
864 864 res = partial_name(patch[:minus])
865 865 if res:
866 866 i = self.series.index(res)
867 867 try:
868 868 off = int(patch[minus+1:] or 1)
869 869 except(ValueError, OverflowError):
870 870 pass
871 871 else:
872 872 if i - off >= 0:
873 873 return self.series[i - off]
874 874 plus = patch.rfind('+')
875 875 if plus >= 0:
876 876 res = partial_name(patch[:plus])
877 877 if res:
878 878 i = self.series.index(res)
879 879 try:
880 880 off = int(patch[plus+1:] or 1)
881 881 except(ValueError, OverflowError):
882 882 pass
883 883 else:
884 884 if i + off < len(self.series):
885 885 return self.series[i + off]
886 886 raise util.Abort(_("patch %s not in series") % patch)
887 887
888 888 def push(self, repo, patch=None, force=False, list=False,
889 889 mergeq=None, all=False):
890 890 wlock = repo.wlock()
891 891 if repo.dirstate.parents()[0] != repo.changelog.tip():
892 892 self.ui.status(_("(working directory not at tip)\n"))
893 893
894 894 if not self.series:
895 895 self.ui.warn(_('no patches in series\n'))
896 896 return 0
897 897
898 898 try:
899 899 patch = self.lookup(patch)
900 900 # Suppose our series file is: A B C and the current 'top'
901 901 # patch is B. qpush C should be performed (moving forward)
902 902 # qpush B is a NOP (no change) qpush A is an error (can't
903 903 # go backwards with qpush)
904 904 if patch:
905 905 info = self.isapplied(patch)
906 906 if info:
907 907 if info[0] < len(self.applied) - 1:
908 908 raise util.Abort(
909 909 _("cannot push to a previous patch: %s") % patch)
910 910 self.ui.warn(
911 911 _('qpush: %s is already at the top\n') % patch)
912 912 return
913 913 pushable, reason = self.pushable(patch)
914 914 if not pushable:
915 915 if reason:
916 916 reason = _('guarded by %r') % reason
917 917 else:
918 918 reason = _('no matching guards')
919 919 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
920 920 return 1
921 921 elif all:
922 922 patch = self.series[-1]
923 923 if self.isapplied(patch):
924 924 self.ui.warn(_('all patches are currently applied\n'))
925 925 return 0
926 926
927 927 # Following the above example, starting at 'top' of B:
928 928 # qpush should be performed (pushes C), but a subsequent
929 929 # qpush without an argument is an error (nothing to
930 930 # apply). This allows a loop of "...while hg qpush..." to
931 931 # work as it detects an error when done
932 932 start = self.series_end()
933 933 if start == len(self.series):
934 934 self.ui.warn(_('patch series already fully applied\n'))
935 935 return 1
936 936 if not force:
937 937 self.check_localchanges(repo)
938 938
939 939 self.applied_dirty = 1
940 940 if start > 0:
941 941 self.check_toppatch(repo)
942 942 if not patch:
943 943 patch = self.series[start]
944 944 end = start + 1
945 945 else:
946 946 end = self.series.index(patch, start) + 1
947 947 s = self.series[start:end]
948 948 all_files = {}
949 949 try:
950 950 if mergeq:
951 951 ret = self.mergepatch(repo, mergeq, s)
952 952 else:
953 953 ret = self.apply(repo, s, list, all_files=all_files)
954 954 except:
955 955 self.ui.warn(_('cleaning up working directory...'))
956 956 node = repo.dirstate.parents()[0]
957 957 hg.revert(repo, node, None)
958 958 unknown = repo.status(unknown=True)[4]
959 959 # only remove unknown files that we know we touched or
960 960 # created while patching
961 961 for f in unknown:
962 962 if f in all_files:
963 963 util.unlink(repo.wjoin(f))
964 964 self.ui.warn(_('done\n'))
965 965 raise
966 966 top = self.applied[-1].name
967 967 if ret[0]:
968 968 self.ui.write(_("errors during apply, please fix and "
969 969 "refresh %s\n") % top)
970 970 else:
971 971 self.ui.write(_("now at: %s\n") % top)
972 972 return ret[0]
973 973 finally:
974 974 wlock.release()
975 975
976 976 def pop(self, repo, patch=None, force=False, update=True, all=False):
977 977 def getfile(f, rev, flags):
978 978 t = repo.file(f).read(rev)
979 979 repo.wwrite(f, t, flags)
980 980
981 981 wlock = repo.wlock()
982 982 try:
983 983 if patch:
984 984 # index, rev, patch
985 985 info = self.isapplied(patch)
986 986 if not info:
987 987 patch = self.lookup(patch)
988 988 info = self.isapplied(patch)
989 989 if not info:
990 990 raise util.Abort(_("patch %s is not applied") % patch)
991 991
992 992 if len(self.applied) == 0:
993 993 # Allow qpop -a to work repeatedly,
994 994 # but not qpop without an argument
995 995 self.ui.warn(_("no patches applied\n"))
996 996 return not all
997 997
998 998 if all:
999 999 start = 0
1000 1000 elif patch:
1001 1001 start = info[0] + 1
1002 1002 else:
1003 1003 start = len(self.applied) - 1
1004 1004
1005 1005 if start >= len(self.applied):
1006 1006 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1007 1007 return
1008 1008
1009 1009 if not update:
1010 1010 parents = repo.dirstate.parents()
1011 1011 rr = [ bin(x.rev) for x in self.applied ]
1012 1012 for p in parents:
1013 1013 if p in rr:
1014 1014 self.ui.warn(_("qpop: forcing dirstate update\n"))
1015 1015 update = True
1016 1016 else:
1017 1017 parents = [p.hex() for p in repo[None].parents()]
1018 1018 needupdate = False
1019 1019 for entry in self.applied[start:]:
1020 1020 if entry.rev in parents:
1021 1021 needupdate = True
1022 1022 break
1023 1023 update = needupdate
1024 1024
1025 1025 if not force and update:
1026 1026 self.check_localchanges(repo)
1027 1027
1028 1028 self.applied_dirty = 1
1029 1029 end = len(self.applied)
1030 1030 rev = bin(self.applied[start].rev)
1031 1031 if update:
1032 1032 top = self.check_toppatch(repo)
1033 1033
1034 1034 try:
1035 1035 heads = repo.changelog.heads(rev)
1036 1036 except error.LookupError:
1037 1037 node = short(rev)
1038 1038 raise util.Abort(_('trying to pop unknown node %s') % node)
1039 1039
1040 1040 if heads != [bin(self.applied[-1].rev)]:
1041 1041 raise util.Abort(_("popping would remove a revision not "
1042 1042 "managed by this patch queue"))
1043 1043
1044 1044 # we know there are no local changes, so we can make a simplified
1045 1045 # form of hg.update.
1046 1046 if update:
1047 1047 qp = self.qparents(repo, rev)
1048 1048 changes = repo.changelog.read(qp)
1049 1049 mmap = repo.manifest.read(changes[0])
1050 1050 m, a, r, d = repo.status(qp, top)[:4]
1051 1051 if d:
1052 1052 raise util.Abort(_("deletions found between repo revs"))
1053 1053 for f in m:
1054 1054 getfile(f, mmap[f], mmap.flags(f))
1055 1055 for f in r:
1056 1056 getfile(f, mmap[f], mmap.flags(f))
1057 1057 for f in m + r:
1058 1058 repo.dirstate.normal(f)
1059 1059 for f in a:
1060 1060 try:
1061 1061 os.unlink(repo.wjoin(f))
1062 1062 except OSError, e:
1063 1063 if e.errno != errno.ENOENT:
1064 1064 raise
1065 1065 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
1066 1066 except: pass
1067 1067 repo.dirstate.forget(f)
1068 1068 repo.dirstate.setparents(qp, nullid)
1069 1069 del self.applied[start:end]
1070 1070 self.strip(repo, rev, update=False, backup='strip')
1071 1071 if len(self.applied):
1072 1072 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1073 1073 else:
1074 1074 self.ui.write(_("patch queue now empty\n"))
1075 1075 finally:
1076 1076 wlock.release()
1077 1077
1078 1078 def diff(self, repo, pats, opts):
1079 1079 top = self.check_toppatch(repo)
1080 1080 if not top:
1081 1081 self.ui.write(_("no patches applied\n"))
1082 1082 return
1083 1083 qp = self.qparents(repo, top)
1084 1084 self._diffopts = patch.diffopts(self.ui, opts)
1085 1085 self.printdiff(repo, qp, files=pats, opts=opts)
1086 1086
1087 1087 def refresh(self, repo, pats=None, **opts):
1088 1088 if len(self.applied) == 0:
1089 1089 self.ui.write(_("no patches applied\n"))
1090 1090 return 1
1091 1091 msg = opts.get('msg', '').rstrip()
1092 1092 newuser = opts.get('user')
1093 1093 newdate = opts.get('date')
1094 1094 if newdate:
1095 1095 newdate = '%d %d' % util.parsedate(newdate)
1096 1096 wlock = repo.wlock()
1097 1097 try:
1098 1098 self.check_toppatch(repo)
1099 1099 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
1100 1100 top = bin(top)
1101 1101 if repo.changelog.heads(top) != [top]:
1102 1102 raise util.Abort(_("cannot refresh a revision with children"))
1103 1103 cparents = repo.changelog.parents(top)
1104 1104 patchparent = self.qparents(repo, top)
1105 1105 ph = self.readheaders(patchfn)
1106 1106
1107 1107 patchf = self.opener(patchfn, 'r')
1108 1108
1109 1109 # if the patch was a git patch, refresh it as a git patch
1110 1110 for line in patchf:
1111 1111 if line.startswith('diff --git'):
1112 1112 self.diffopts().git = True
1113 1113 break
1114 1114
1115 1115 if msg:
1116 1116 ph.setmessage(msg)
1117 1117 if newuser:
1118 1118 ph.setuser(newuser)
1119 1119 if newdate:
1120 1120 ph.setdate(newdate)
1121 1121
1122 1122 # only commit new patch when write is complete
1123 1123 patchf = self.opener(patchfn, 'w', atomictemp=True)
1124 1124
1125 1125 patchf.seek(0)
1126 1126 patchf.truncate()
1127 1127
1128 1128 comments = str(ph)
1129 1129 if comments:
1130 1130 patchf.write(comments)
1131 1131
1132 1132 if opts.get('git'):
1133 1133 self.diffopts().git = True
1134 1134 tip = repo.changelog.tip()
1135 1135 if top == tip:
1136 1136 # if the top of our patch queue is also the tip, there is an
1137 1137 # optimization here. We update the dirstate in place and strip
1138 1138 # off the tip commit. Then just commit the current directory
1139 1139 # tree. We can also send repo.commit the list of files
1140 1140 # changed to speed up the diff
1141 1141 #
1142 1142 # in short mode, we only diff the files included in the
1143 1143 # patch already plus specified files
1144 1144 #
1145 1145 # this should really read:
1146 1146 # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
1147 1147 # but we do it backwards to take advantage of manifest/chlog
1148 1148 # caching against the next repo.status call
1149 1149 #
1150 1150 mm, aa, dd, aa2 = repo.status(patchparent, tip)[:4]
1151 1151 changes = repo.changelog.read(tip)
1152 1152 man = repo.manifest.read(changes[0])
1153 1153 aaa = aa[:]
1154 1154 matchfn = cmdutil.match(repo, pats, opts)
1155 1155 if opts.get('short'):
1156 1156 # if amending a patch, we start with existing
1157 1157 # files plus specified files - unfiltered
1158 1158 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1159 1159 # filter with inc/exl options
1160 1160 matchfn = cmdutil.match(repo, opts=opts)
1161 1161 else:
1162 1162 match = cmdutil.matchall(repo)
1163 1163 m, a, r, d = repo.status(match=match)[:4]
1164 1164
1165 1165 # we might end up with files that were added between
1166 1166 # tip and the dirstate parent, but then changed in the
1167 1167 # local dirstate. in this case, we want them to only
1168 1168 # show up in the added section
1169 1169 for x in m:
1170 1170 if x not in aa:
1171 1171 mm.append(x)
1172 1172 # we might end up with files added by the local dirstate that
1173 1173 # were deleted by the patch. In this case, they should only
1174 1174 # show up in the changed section.
1175 1175 for x in a:
1176 1176 if x in dd:
1177 1177 del dd[dd.index(x)]
1178 1178 mm.append(x)
1179 1179 else:
1180 1180 aa.append(x)
1181 1181 # make sure any files deleted in the local dirstate
1182 1182 # are not in the add or change column of the patch
1183 1183 forget = []
1184 1184 for x in d + r:
1185 1185 if x in aa:
1186 1186 del aa[aa.index(x)]
1187 1187 forget.append(x)
1188 1188 continue
1189 1189 elif x in mm:
1190 1190 del mm[mm.index(x)]
1191 1191 dd.append(x)
1192 1192
1193 m = util.unique(mm)
1194 r = util.unique(dd)
1195 a = util.unique(aa)
1193 m = list(set(mm))
1194 r = list(set(dd))
1195 a = list(set(aa))
1196 1196 c = [filter(matchfn, l) for l in (m, a, r)]
1197 match = cmdutil.matchfiles(repo, util.unique(c[0] + c[1] + c[2]))
1197 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
1198 1198 chunks = patch.diff(repo, patchparent, match=match,
1199 1199 changes=c, opts=self.diffopts())
1200 1200 for chunk in chunks:
1201 1201 patchf.write(chunk)
1202 1202
1203 1203 try:
1204 1204 if self.diffopts().git:
1205 1205 copies = {}
1206 1206 for dst in a:
1207 1207 src = repo.dirstate.copied(dst)
1208 1208 # during qfold, the source file for copies may
1209 1209 # be removed. Treat this as a simple add.
1210 1210 if src is not None and src in repo.dirstate:
1211 1211 copies.setdefault(src, []).append(dst)
1212 1212 repo.dirstate.add(dst)
1213 1213 # remember the copies between patchparent and tip
1214 1214 for dst in aaa:
1215 1215 f = repo.file(dst)
1216 1216 src = f.renamed(man[dst])
1217 1217 if src:
1218 1218 copies.setdefault(src[0], []).extend(copies.get(dst, []))
1219 1219 if dst in a:
1220 1220 copies[src[0]].append(dst)
1221 1221 # we can't copy a file created by the patch itself
1222 1222 if dst in copies:
1223 1223 del copies[dst]
1224 1224 for src, dsts in copies.iteritems():
1225 1225 for dst in dsts:
1226 1226 repo.dirstate.copy(src, dst)
1227 1227 else:
1228 1228 for dst in a:
1229 1229 repo.dirstate.add(dst)
1230 1230 # Drop useless copy information
1231 1231 for f in list(repo.dirstate.copies()):
1232 1232 repo.dirstate.copy(None, f)
1233 1233 for f in r:
1234 1234 repo.dirstate.remove(f)
1235 1235 # if the patch excludes a modified file, mark that
1236 1236 # file with mtime=0 so status can see it.
1237 1237 mm = []
1238 1238 for i in xrange(len(m)-1, -1, -1):
1239 1239 if not matchfn(m[i]):
1240 1240 mm.append(m[i])
1241 1241 del m[i]
1242 1242 for f in m:
1243 1243 repo.dirstate.normal(f)
1244 1244 for f in mm:
1245 1245 repo.dirstate.normallookup(f)
1246 1246 for f in forget:
1247 1247 repo.dirstate.forget(f)
1248 1248
1249 1249 if not msg:
1250 1250 if not ph.message:
1251 1251 message = "[mq]: %s\n" % patchfn
1252 1252 else:
1253 1253 message = "\n".join(ph.message)
1254 1254 else:
1255 1255 message = msg
1256 1256
1257 1257 user = ph.user or changes[1]
1258 1258
1259 1259 # assumes strip can roll itself back if interrupted
1260 1260 repo.dirstate.setparents(*cparents)
1261 1261 self.applied.pop()
1262 1262 self.applied_dirty = 1
1263 1263 self.strip(repo, top, update=False,
1264 1264 backup='strip')
1265 1265 except:
1266 1266 repo.dirstate.invalidate()
1267 1267 raise
1268 1268
1269 1269 try:
1270 1270 # might be nice to attempt to roll back strip after this
1271 1271 patchf.rename()
1272 1272 n = repo.commit(match.files(), message, user, ph.date,
1273 1273 match=match, force=1)
1274 1274 self.applied.append(statusentry(hex(n), patchfn))
1275 1275 except:
1276 1276 ctx = repo[cparents[0]]
1277 1277 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1278 1278 self.save_dirty()
1279 1279 self.ui.warn(_('refresh interrupted while patch was popped! '
1280 1280 '(revert --all, qpush to recover)\n'))
1281 1281 raise
1282 1282 else:
1283 1283 self.printdiff(repo, patchparent, fp=patchf)
1284 1284 patchf.rename()
1285 1285 added = repo.status()[1]
1286 1286 for a in added:
1287 1287 f = repo.wjoin(a)
1288 1288 try:
1289 1289 os.unlink(f)
1290 1290 except OSError, e:
1291 1291 if e.errno != errno.ENOENT:
1292 1292 raise
1293 1293 try: os.removedirs(os.path.dirname(f))
1294 1294 except: pass
1295 1295 # forget the file copies in the dirstate
1296 1296 # push should readd the files later on
1297 1297 repo.dirstate.forget(a)
1298 1298 self.pop(repo, force=True)
1299 1299 self.push(repo, force=True)
1300 1300 finally:
1301 1301 wlock.release()
1302 1302 self.removeundo(repo)
1303 1303
1304 1304 def init(self, repo, create=False):
1305 1305 if not create and os.path.isdir(self.path):
1306 1306 raise util.Abort(_("patch queue directory already exists"))
1307 1307 try:
1308 1308 os.mkdir(self.path)
1309 1309 except OSError, inst:
1310 1310 if inst.errno != errno.EEXIST or not create:
1311 1311 raise
1312 1312 if create:
1313 1313 return self.qrepo(create=True)
1314 1314
1315 1315 def unapplied(self, repo, patch=None):
1316 1316 if patch and patch not in self.series:
1317 1317 raise util.Abort(_("patch %s is not in series file") % patch)
1318 1318 if not patch:
1319 1319 start = self.series_end()
1320 1320 else:
1321 1321 start = self.series.index(patch) + 1
1322 1322 unapplied = []
1323 1323 for i in xrange(start, len(self.series)):
1324 1324 pushable, reason = self.pushable(i)
1325 1325 if pushable:
1326 1326 unapplied.append((i, self.series[i]))
1327 1327 self.explain_pushable(i)
1328 1328 return unapplied
1329 1329
1330 1330 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1331 1331 summary=False):
1332 1332 def displayname(patchname):
1333 1333 if summary:
1334 1334 ph = self.readheaders(patchname)
1335 1335 msg = ph.message
1336 1336 msg = msg and ': ' + msg[0] or ': '
1337 1337 else:
1338 1338 msg = ''
1339 1339 return '%s%s' % (patchname, msg)
1340 1340
1341 1341 applied = dict.fromkeys([p.name for p in self.applied])
1342 1342 if length is None:
1343 1343 length = len(self.series) - start
1344 1344 if not missing:
1345 1345 for i in xrange(start, start+length):
1346 1346 patch = self.series[i]
1347 1347 if patch in applied:
1348 1348 stat = 'A'
1349 1349 elif self.pushable(i)[0]:
1350 1350 stat = 'U'
1351 1351 else:
1352 1352 stat = 'G'
1353 1353 pfx = ''
1354 1354 if self.ui.verbose:
1355 1355 pfx = '%d %s ' % (i, stat)
1356 1356 elif status and status != stat:
1357 1357 continue
1358 1358 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1359 1359 else:
1360 1360 msng_list = []
1361 1361 for root, dirs, files in os.walk(self.path):
1362 1362 d = root[len(self.path) + 1:]
1363 1363 for f in files:
1364 1364 fl = os.path.join(d, f)
1365 1365 if (fl not in self.series and
1366 1366 fl not in (self.status_path, self.series_path,
1367 1367 self.guards_path)
1368 1368 and not fl.startswith('.')):
1369 1369 msng_list.append(fl)
1370 1370 for x in util.sort(msng_list):
1371 1371 pfx = self.ui.verbose and ('D ') or ''
1372 1372 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1373 1373
1374 1374 def issaveline(self, l):
1375 1375 if l.name == '.hg.patches.save.line':
1376 1376 return True
1377 1377
1378 1378 def qrepo(self, create=False):
1379 1379 if create or os.path.isdir(self.join(".hg")):
1380 1380 return hg.repository(self.ui, path=self.path, create=create)
1381 1381
1382 1382 def restore(self, repo, rev, delete=None, qupdate=None):
1383 1383 c = repo.changelog.read(rev)
1384 1384 desc = c[4].strip()
1385 1385 lines = desc.splitlines()
1386 1386 i = 0
1387 1387 datastart = None
1388 1388 series = []
1389 1389 applied = []
1390 1390 qpp = None
1391 1391 for i in xrange(0, len(lines)):
1392 1392 if lines[i] == 'Patch Data:':
1393 1393 datastart = i + 1
1394 1394 elif lines[i].startswith('Dirstate:'):
1395 1395 l = lines[i].rstrip()
1396 1396 l = l[10:].split(' ')
1397 1397 qpp = [ bin(x) for x in l ]
1398 1398 elif datastart != None:
1399 1399 l = lines[i].rstrip()
1400 1400 se = statusentry(l)
1401 1401 file_ = se.name
1402 1402 if se.rev:
1403 1403 applied.append(se)
1404 1404 else:
1405 1405 series.append(file_)
1406 1406 if datastart == None:
1407 1407 self.ui.warn(_("No saved patch data found\n"))
1408 1408 return 1
1409 1409 self.ui.warn(_("restoring status: %s\n") % lines[0])
1410 1410 self.full_series = series
1411 1411 self.applied = applied
1412 1412 self.parse_series()
1413 1413 self.series_dirty = 1
1414 1414 self.applied_dirty = 1
1415 1415 heads = repo.changelog.heads()
1416 1416 if delete:
1417 1417 if rev not in heads:
1418 1418 self.ui.warn(_("save entry has children, leaving it alone\n"))
1419 1419 else:
1420 1420 self.ui.warn(_("removing save entry %s\n") % short(rev))
1421 1421 pp = repo.dirstate.parents()
1422 1422 if rev in pp:
1423 1423 update = True
1424 1424 else:
1425 1425 update = False
1426 1426 self.strip(repo, rev, update=update, backup='strip')
1427 1427 if qpp:
1428 1428 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1429 1429 (short(qpp[0]), short(qpp[1])))
1430 1430 if qupdate:
1431 1431 self.ui.status(_("queue directory updating\n"))
1432 1432 r = self.qrepo()
1433 1433 if not r:
1434 1434 self.ui.warn(_("Unable to load queue repository\n"))
1435 1435 return 1
1436 1436 hg.clean(r, qpp[0])
1437 1437
1438 1438 def save(self, repo, msg=None):
1439 1439 if len(self.applied) == 0:
1440 1440 self.ui.warn(_("save: no patches applied, exiting\n"))
1441 1441 return 1
1442 1442 if self.issaveline(self.applied[-1]):
1443 1443 self.ui.warn(_("status is already saved\n"))
1444 1444 return 1
1445 1445
1446 1446 ar = [ ':' + x for x in self.full_series ]
1447 1447 if not msg:
1448 1448 msg = _("hg patches saved state")
1449 1449 else:
1450 1450 msg = "hg patches: " + msg.rstrip('\r\n')
1451 1451 r = self.qrepo()
1452 1452 if r:
1453 1453 pp = r.dirstate.parents()
1454 1454 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1455 1455 msg += "\n\nPatch Data:\n"
1456 1456 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1457 1457 "\n".join(ar) + '\n' or "")
1458 1458 n = repo.commit(None, text, user=None, force=1)
1459 1459 if not n:
1460 1460 self.ui.warn(_("repo commit failed\n"))
1461 1461 return 1
1462 1462 self.applied.append(statusentry(hex(n),'.hg.patches.save.line'))
1463 1463 self.applied_dirty = 1
1464 1464 self.removeundo(repo)
1465 1465
1466 1466 def full_series_end(self):
1467 1467 if len(self.applied) > 0:
1468 1468 p = self.applied[-1].name
1469 1469 end = self.find_series(p)
1470 1470 if end == None:
1471 1471 return len(self.full_series)
1472 1472 return end + 1
1473 1473 return 0
1474 1474
1475 1475 def series_end(self, all_patches=False):
1476 1476 """If all_patches is False, return the index of the next pushable patch
1477 1477 in the series, or the series length. If all_patches is True, return the
1478 1478 index of the first patch past the last applied one.
1479 1479 """
1480 1480 end = 0
1481 1481 def next(start):
1482 1482 if all_patches:
1483 1483 return start
1484 1484 i = start
1485 1485 while i < len(self.series):
1486 1486 p, reason = self.pushable(i)
1487 1487 if p:
1488 1488 break
1489 1489 self.explain_pushable(i)
1490 1490 i += 1
1491 1491 return i
1492 1492 if len(self.applied) > 0:
1493 1493 p = self.applied[-1].name
1494 1494 try:
1495 1495 end = self.series.index(p)
1496 1496 except ValueError:
1497 1497 return 0
1498 1498 return next(end + 1)
1499 1499 return next(end)
1500 1500
1501 1501 def appliedname(self, index):
1502 1502 pname = self.applied[index].name
1503 1503 if not self.ui.verbose:
1504 1504 p = pname
1505 1505 else:
1506 1506 p = str(self.series.index(pname)) + " " + pname
1507 1507 return p
1508 1508
1509 1509 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1510 1510 force=None, git=False):
1511 1511 def checkseries(patchname):
1512 1512 if patchname in self.series:
1513 1513 raise util.Abort(_('patch %s is already in the series file')
1514 1514 % patchname)
1515 1515 def checkfile(patchname):
1516 1516 if not force and os.path.exists(self.join(patchname)):
1517 1517 raise util.Abort(_('patch "%s" already exists')
1518 1518 % patchname)
1519 1519
1520 1520 if rev:
1521 1521 if files:
1522 1522 raise util.Abort(_('option "-r" not valid when importing '
1523 1523 'files'))
1524 1524 rev = cmdutil.revrange(repo, rev)
1525 1525 rev.sort(lambda x, y: cmp(y, x))
1526 1526 if (len(files) > 1 or len(rev) > 1) and patchname:
1527 1527 raise util.Abort(_('option "-n" not valid when importing multiple '
1528 1528 'patches'))
1529 1529 i = 0
1530 1530 added = []
1531 1531 if rev:
1532 1532 # If mq patches are applied, we can only import revisions
1533 1533 # that form a linear path to qbase.
1534 1534 # Otherwise, they should form a linear path to a head.
1535 1535 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1536 1536 if len(heads) > 1:
1537 1537 raise util.Abort(_('revision %d is the root of more than one '
1538 1538 'branch') % rev[-1])
1539 1539 if self.applied:
1540 1540 base = hex(repo.changelog.node(rev[0]))
1541 1541 if base in [n.rev for n in self.applied]:
1542 1542 raise util.Abort(_('revision %d is already managed')
1543 1543 % rev[0])
1544 1544 if heads != [bin(self.applied[-1].rev)]:
1545 1545 raise util.Abort(_('revision %d is not the parent of '
1546 1546 'the queue') % rev[0])
1547 1547 base = repo.changelog.rev(bin(self.applied[0].rev))
1548 1548 lastparent = repo.changelog.parentrevs(base)[0]
1549 1549 else:
1550 1550 if heads != [repo.changelog.node(rev[0])]:
1551 1551 raise util.Abort(_('revision %d has unmanaged children')
1552 1552 % rev[0])
1553 1553 lastparent = None
1554 1554
1555 1555 if git:
1556 1556 self.diffopts().git = True
1557 1557
1558 1558 for r in rev:
1559 1559 p1, p2 = repo.changelog.parentrevs(r)
1560 1560 n = repo.changelog.node(r)
1561 1561 if p2 != nullrev:
1562 1562 raise util.Abort(_('cannot import merge revision %d') % r)
1563 1563 if lastparent and lastparent != r:
1564 1564 raise util.Abort(_('revision %d is not the parent of %d')
1565 1565 % (r, lastparent))
1566 1566 lastparent = p1
1567 1567
1568 1568 if not patchname:
1569 1569 patchname = normname('%d.diff' % r)
1570 1570 self.check_reserved_name(patchname)
1571 1571 checkseries(patchname)
1572 1572 checkfile(patchname)
1573 1573 self.full_series.insert(0, patchname)
1574 1574
1575 1575 patchf = self.opener(patchname, "w")
1576 1576 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1577 1577 patchf.close()
1578 1578
1579 1579 se = statusentry(hex(n), patchname)
1580 1580 self.applied.insert(0, se)
1581 1581
1582 1582 added.append(patchname)
1583 1583 patchname = None
1584 1584 self.parse_series()
1585 1585 self.applied_dirty = 1
1586 1586
1587 1587 for filename in files:
1588 1588 if existing:
1589 1589 if filename == '-':
1590 1590 raise util.Abort(_('-e is incompatible with import from -'))
1591 1591 if not patchname:
1592 1592 patchname = normname(filename)
1593 1593 self.check_reserved_name(patchname)
1594 1594 if not os.path.isfile(self.join(patchname)):
1595 1595 raise util.Abort(_("patch %s does not exist") % patchname)
1596 1596 else:
1597 1597 try:
1598 1598 if filename == '-':
1599 1599 if not patchname:
1600 1600 raise util.Abort(_('need --name to import a patch from -'))
1601 1601 text = sys.stdin.read()
1602 1602 else:
1603 1603 text = url.open(self.ui, filename).read()
1604 1604 except (OSError, IOError):
1605 1605 raise util.Abort(_("unable to read %s") % filename)
1606 1606 if not patchname:
1607 1607 patchname = normname(os.path.basename(filename))
1608 1608 self.check_reserved_name(patchname)
1609 1609 checkfile(patchname)
1610 1610 patchf = self.opener(patchname, "w")
1611 1611 patchf.write(text)
1612 1612 if not force:
1613 1613 checkseries(patchname)
1614 1614 if patchname not in self.series:
1615 1615 index = self.full_series_end() + i
1616 1616 self.full_series[index:index] = [patchname]
1617 1617 self.parse_series()
1618 1618 self.ui.warn(_("adding %s to series file\n") % patchname)
1619 1619 i += 1
1620 1620 added.append(patchname)
1621 1621 patchname = None
1622 1622 self.series_dirty = 1
1623 1623 qrepo = self.qrepo()
1624 1624 if qrepo:
1625 1625 qrepo.add(added)
1626 1626
1627 1627 def delete(ui, repo, *patches, **opts):
1628 1628 """remove patches from queue
1629 1629
1630 1630 The patches must not be applied, unless they are arguments to the
1631 1631 -r/--rev parameter. At least one patch or revision is required.
1632 1632
1633 1633 With --rev, mq will stop managing the named revisions (converting
1634 1634 them to regular mercurial changesets). The qfinish command should
1635 1635 be used as an alternative for qdelete -r, as the latter option is
1636 1636 deprecated.
1637 1637
1638 1638 With -k/--keep, the patch files are preserved in the patch
1639 1639 directory."""
1640 1640 q = repo.mq
1641 1641 q.delete(repo, patches, opts)
1642 1642 q.save_dirty()
1643 1643 return 0
1644 1644
1645 1645 def applied(ui, repo, patch=None, **opts):
1646 1646 """print the patches already applied"""
1647 1647 q = repo.mq
1648 1648 if patch:
1649 1649 if patch not in q.series:
1650 1650 raise util.Abort(_("patch %s is not in series file") % patch)
1651 1651 end = q.series.index(patch) + 1
1652 1652 else:
1653 1653 end = q.series_end(True)
1654 1654 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1655 1655
1656 1656 def unapplied(ui, repo, patch=None, **opts):
1657 1657 """print the patches not yet applied"""
1658 1658 q = repo.mq
1659 1659 if patch:
1660 1660 if patch not in q.series:
1661 1661 raise util.Abort(_("patch %s is not in series file") % patch)
1662 1662 start = q.series.index(patch) + 1
1663 1663 else:
1664 1664 start = q.series_end(True)
1665 1665 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1666 1666
1667 1667 def qimport(ui, repo, *filename, **opts):
1668 1668 """import a patch
1669 1669
1670 1670 The patch is inserted into the series after the last applied
1671 1671 patch. If no patches have been applied, qimport prepends the patch
1672 1672 to the series.
1673 1673
1674 1674 The patch will have the same name as its source file unless you
1675 1675 give it a new one with -n/--name.
1676 1676
1677 1677 You can register an existing patch inside the patch directory with
1678 1678 the -e/--existing flag.
1679 1679
1680 1680 With -f/--force, an existing patch of the same name will be
1681 1681 overwritten.
1682 1682
1683 1683 An existing changeset may be placed under mq control with -r/--rev
1684 1684 (e.g. qimport --rev tip -n patch will place tip under mq control).
1685 1685 With -g/--git, patches imported with --rev will use the git diff
1686 1686 format. See the diffs help topic for information on why this is
1687 1687 important for preserving rename/copy information and permission
1688 1688 changes.
1689 1689
1690 1690 To import a patch from standard input, pass - as the patch file.
1691 1691 When importing from standard input, a patch name must be specified
1692 1692 using the --name flag.
1693 1693 """
1694 1694 q = repo.mq
1695 1695 q.qimport(repo, filename, patchname=opts['name'],
1696 1696 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1697 1697 git=opts['git'])
1698 1698 q.save_dirty()
1699 1699 return 0
1700 1700
1701 1701 def init(ui, repo, **opts):
1702 1702 """init a new queue repository
1703 1703
1704 1704 The queue repository is unversioned by default. If
1705 1705 -c/--create-repo is specified, qinit will create a separate nested
1706 1706 repository for patches (qinit -c may also be run later to convert
1707 1707 an unversioned patch repository into a versioned one). You can use
1708 1708 qcommit to commit changes to this queue repository."""
1709 1709 q = repo.mq
1710 1710 r = q.init(repo, create=opts['create_repo'])
1711 1711 q.save_dirty()
1712 1712 if r:
1713 1713 if not os.path.exists(r.wjoin('.hgignore')):
1714 1714 fp = r.wopener('.hgignore', 'w')
1715 1715 fp.write('^\\.hg\n')
1716 1716 fp.write('^\\.mq\n')
1717 1717 fp.write('syntax: glob\n')
1718 1718 fp.write('status\n')
1719 1719 fp.write('guards\n')
1720 1720 fp.close()
1721 1721 if not os.path.exists(r.wjoin('series')):
1722 1722 r.wopener('series', 'w').close()
1723 1723 r.add(['.hgignore', 'series'])
1724 1724 commands.add(ui, r)
1725 1725 return 0
1726 1726
1727 1727 def clone(ui, source, dest=None, **opts):
1728 1728 '''clone main and patch repository at same time
1729 1729
1730 1730 If source is local, destination will have no patches applied. If
1731 1731 source is remote, this command can not check if patches are
1732 1732 applied in source, so cannot guarantee that patches are not
1733 1733 applied in destination. If you clone remote repository, be sure
1734 1734 before that it has no patches applied.
1735 1735
1736 1736 Source patch repository is looked for in <src>/.hg/patches by
1737 1737 default. Use -p <url> to change.
1738 1738
1739 1739 The patch directory must be a nested mercurial repository, as
1740 1740 would be created by qinit -c.
1741 1741 '''
1742 1742 def patchdir(repo):
1743 1743 url = repo.url()
1744 1744 if url.endswith('/'):
1745 1745 url = url[:-1]
1746 1746 return url + '/.hg/patches'
1747 1747 cmdutil.setremoteconfig(ui, opts)
1748 1748 if dest is None:
1749 1749 dest = hg.defaultdest(source)
1750 1750 sr = hg.repository(ui, ui.expandpath(source))
1751 1751 if opts['patches']:
1752 1752 patchespath = ui.expandpath(opts['patches'])
1753 1753 else:
1754 1754 patchespath = patchdir(sr)
1755 1755 try:
1756 1756 hg.repository(ui, patchespath)
1757 1757 except error.RepoError:
1758 1758 raise util.Abort(_('versioned patch repository not found'
1759 1759 ' (see qinit -c)'))
1760 1760 qbase, destrev = None, None
1761 1761 if sr.local():
1762 1762 if sr.mq.applied:
1763 1763 qbase = bin(sr.mq.applied[0].rev)
1764 1764 if not hg.islocal(dest):
1765 1765 heads = dict.fromkeys(sr.heads())
1766 1766 for h in sr.heads(qbase):
1767 1767 del heads[h]
1768 1768 destrev = heads.keys()
1769 1769 destrev.append(sr.changelog.parents(qbase)[0])
1770 1770 elif sr.capable('lookup'):
1771 1771 try:
1772 1772 qbase = sr.lookup('qbase')
1773 1773 except error.RepoError:
1774 1774 pass
1775 1775 ui.note(_('cloning main repository\n'))
1776 1776 sr, dr = hg.clone(ui, sr.url(), dest,
1777 1777 pull=opts['pull'],
1778 1778 rev=destrev,
1779 1779 update=False,
1780 1780 stream=opts['uncompressed'])
1781 1781 ui.note(_('cloning patch repository\n'))
1782 1782 hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1783 1783 pull=opts['pull'], update=not opts['noupdate'],
1784 1784 stream=opts['uncompressed'])
1785 1785 if dr.local():
1786 1786 if qbase:
1787 1787 ui.note(_('stripping applied patches from destination '
1788 1788 'repository\n'))
1789 1789 dr.mq.strip(dr, qbase, update=False, backup=None)
1790 1790 if not opts['noupdate']:
1791 1791 ui.note(_('updating destination repository\n'))
1792 1792 hg.update(dr, dr.changelog.tip())
1793 1793
1794 1794 def commit(ui, repo, *pats, **opts):
1795 1795 """commit changes in the queue repository"""
1796 1796 q = repo.mq
1797 1797 r = q.qrepo()
1798 1798 if not r: raise util.Abort('no queue repository')
1799 1799 commands.commit(r.ui, r, *pats, **opts)
1800 1800
1801 1801 def series(ui, repo, **opts):
1802 1802 """print the entire series file"""
1803 1803 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1804 1804 return 0
1805 1805
1806 1806 def top(ui, repo, **opts):
1807 1807 """print the name of the current patch"""
1808 1808 q = repo.mq
1809 1809 t = q.applied and q.series_end(True) or 0
1810 1810 if t:
1811 1811 return q.qseries(repo, start=t-1, length=1, status='A',
1812 1812 summary=opts.get('summary'))
1813 1813 else:
1814 1814 ui.write(_("no patches applied\n"))
1815 1815 return 1
1816 1816
1817 1817 def next(ui, repo, **opts):
1818 1818 """print the name of the next patch"""
1819 1819 q = repo.mq
1820 1820 end = q.series_end()
1821 1821 if end == len(q.series):
1822 1822 ui.write(_("all patches applied\n"))
1823 1823 return 1
1824 1824 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1825 1825
1826 1826 def prev(ui, repo, **opts):
1827 1827 """print the name of the previous patch"""
1828 1828 q = repo.mq
1829 1829 l = len(q.applied)
1830 1830 if l == 1:
1831 1831 ui.write(_("only one patch applied\n"))
1832 1832 return 1
1833 1833 if not l:
1834 1834 ui.write(_("no patches applied\n"))
1835 1835 return 1
1836 1836 return q.qseries(repo, start=l-2, length=1, status='A',
1837 1837 summary=opts.get('summary'))
1838 1838
1839 1839 def setupheaderopts(ui, opts):
1840 1840 def do(opt,val):
1841 1841 if not opts[opt] and opts['current' + opt]:
1842 1842 opts[opt] = val
1843 1843 do('user', ui.username())
1844 1844 do('date', "%d %d" % util.makedate())
1845 1845
1846 1846 def new(ui, repo, patch, *args, **opts):
1847 1847 """create a new patch
1848 1848
1849 1849 qnew creates a new patch on top of the currently-applied patch (if
1850 1850 any). It will refuse to run if there are any outstanding changes
1851 1851 unless -f/--force is specified, in which case the patch will be
1852 1852 initialized with them. You may also use -I/--include,
1853 1853 -X/--exclude, and/or a list of files after the patch name to add
1854 1854 only changes to matching files to the new patch, leaving the rest
1855 1855 as uncommitted modifications.
1856 1856
1857 1857 -u/--user and -d/--date can be used to set the (given) user and
1858 1858 date, respectively. -U/--currentuser and -D/--currentdate set user
1859 1859 to current user and date to current date.
1860 1860
1861 1861 -e/--edit, -m/--message or -l/--logfile set the patch header as
1862 1862 well as the commit message. If none is specified, the header is
1863 1863 empty and the commit message is '[mq]: PATCH'.
1864 1864
1865 1865 Use the -g/--git option to keep the patch in the git extended diff
1866 1866 format. Read the diffs help topic for more information on why this
1867 1867 is important for preserving permission changes and copy/rename
1868 1868 information.
1869 1869 """
1870 1870 msg = cmdutil.logmessage(opts)
1871 1871 def getmsg(): return ui.edit(msg, ui.username())
1872 1872 q = repo.mq
1873 1873 opts['msg'] = msg
1874 1874 if opts.get('edit'):
1875 1875 opts['msg'] = getmsg
1876 1876 else:
1877 1877 opts['msg'] = msg
1878 1878 setupheaderopts(ui, opts)
1879 1879 q.new(repo, patch, *args, **opts)
1880 1880 q.save_dirty()
1881 1881 return 0
1882 1882
1883 1883 def refresh(ui, repo, *pats, **opts):
1884 1884 """update the current patch
1885 1885
1886 1886 If any file patterns are provided, the refreshed patch will
1887 1887 contain only the modifications that match those patterns; the
1888 1888 remaining modifications will remain in the working directory.
1889 1889
1890 1890 If -s/--short is specified, files currently included in the patch
1891 1891 will be refreshed just like matched files and remain in the patch.
1892 1892
1893 1893 hg add/remove/copy/rename work as usual, though you might want to
1894 1894 use git-style patches (-g/--git or [diff] git=1) to track copies
1895 1895 and renames. See the diffs help topic for more information on the
1896 1896 git diff format.
1897 1897 """
1898 1898 q = repo.mq
1899 1899 message = cmdutil.logmessage(opts)
1900 1900 if opts['edit']:
1901 1901 if not q.applied:
1902 1902 ui.write(_("no patches applied\n"))
1903 1903 return 1
1904 1904 if message:
1905 1905 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1906 1906 patch = q.applied[-1].name
1907 1907 ph = q.readheaders(patch)
1908 1908 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
1909 1909 setupheaderopts(ui, opts)
1910 1910 ret = q.refresh(repo, pats, msg=message, **opts)
1911 1911 q.save_dirty()
1912 1912 return ret
1913 1913
1914 1914 def diff(ui, repo, *pats, **opts):
1915 1915 """diff of the current patch and subsequent modifications
1916 1916
1917 1917 Shows a diff which includes the current patch as well as any
1918 1918 changes which have been made in the working directory since the
1919 1919 last refresh (thus showing what the current patch would become
1920 1920 after a qrefresh).
1921 1921
1922 1922 Use 'hg diff' if you only want to see the changes made since the
1923 1923 last qrefresh, or 'hg export qtip' if you want to see changes made
1924 1924 by the current patch without including changes made since the
1925 1925 qrefresh.
1926 1926 """
1927 1927 repo.mq.diff(repo, pats, opts)
1928 1928 return 0
1929 1929
1930 1930 def fold(ui, repo, *files, **opts):
1931 1931 """fold the named patches into the current patch
1932 1932
1933 1933 Patches must not yet be applied. Each patch will be successively
1934 1934 applied to the current patch in the order given. If all the
1935 1935 patches apply successfully, the current patch will be refreshed
1936 1936 with the new cumulative patch, and the folded patches will be
1937 1937 deleted. With -k/--keep, the folded patch files will not be
1938 1938 removed afterwards.
1939 1939
1940 1940 The header for each folded patch will be concatenated with the
1941 1941 current patch header, separated by a line of '* * *'."""
1942 1942
1943 1943 q = repo.mq
1944 1944
1945 1945 if not files:
1946 1946 raise util.Abort(_('qfold requires at least one patch name'))
1947 1947 if not q.check_toppatch(repo):
1948 1948 raise util.Abort(_('No patches applied'))
1949 1949
1950 1950 message = cmdutil.logmessage(opts)
1951 1951 if opts['edit']:
1952 1952 if message:
1953 1953 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1954 1954
1955 1955 parent = q.lookup('qtip')
1956 1956 patches = []
1957 1957 messages = []
1958 1958 for f in files:
1959 1959 p = q.lookup(f)
1960 1960 if p in patches or p == parent:
1961 1961 ui.warn(_('Skipping already folded patch %s') % p)
1962 1962 if q.isapplied(p):
1963 1963 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1964 1964 patches.append(p)
1965 1965
1966 1966 for p in patches:
1967 1967 if not message:
1968 1968 ph = q.readheaders(p)
1969 1969 if ph.message:
1970 1970 messages.append(ph.message)
1971 1971 pf = q.join(p)
1972 1972 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1973 1973 if not patchsuccess:
1974 1974 raise util.Abort(_('Error folding patch %s') % p)
1975 1975 patch.updatedir(ui, repo, files)
1976 1976
1977 1977 if not message:
1978 1978 ph = q.readheaders(parent)
1979 1979 message, user = ph.message, ph.user
1980 1980 for msg in messages:
1981 1981 message.append('* * *')
1982 1982 message.extend(msg)
1983 1983 message = '\n'.join(message)
1984 1984
1985 1985 if opts['edit']:
1986 1986 message = ui.edit(message, user or ui.username())
1987 1987
1988 1988 q.refresh(repo, msg=message)
1989 1989 q.delete(repo, patches, opts)
1990 1990 q.save_dirty()
1991 1991
1992 1992 def goto(ui, repo, patch, **opts):
1993 1993 '''push or pop patches until named patch is at top of stack'''
1994 1994 q = repo.mq
1995 1995 patch = q.lookup(patch)
1996 1996 if q.isapplied(patch):
1997 1997 ret = q.pop(repo, patch, force=opts['force'])
1998 1998 else:
1999 1999 ret = q.push(repo, patch, force=opts['force'])
2000 2000 q.save_dirty()
2001 2001 return ret
2002 2002
2003 2003 def guard(ui, repo, *args, **opts):
2004 2004 '''set or print guards for a patch
2005 2005
2006 2006 Guards control whether a patch can be pushed. A patch with no
2007 2007 guards is always pushed. A patch with a positive guard ("+foo") is
2008 2008 pushed only if the qselect command has activated it. A patch with
2009 2009 a negative guard ("-foo") is never pushed if the qselect command
2010 2010 has activated it.
2011 2011
2012 2012 With no arguments, print the currently active guards.
2013 2013 With arguments, set guards for the named patch.
2014 2014 NOTE: Specifying negative guards now requires '--'.
2015 2015
2016 2016 To set guards on another patch:
2017 2017 hg qguard -- other.patch +2.6.17 -stable
2018 2018 '''
2019 2019 def status(idx):
2020 2020 guards = q.series_guards[idx] or ['unguarded']
2021 2021 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
2022 2022 q = repo.mq
2023 2023 patch = None
2024 2024 args = list(args)
2025 2025 if opts['list']:
2026 2026 if args or opts['none']:
2027 2027 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2028 2028 for i in xrange(len(q.series)):
2029 2029 status(i)
2030 2030 return
2031 2031 if not args or args[0][0:1] in '-+':
2032 2032 if not q.applied:
2033 2033 raise util.Abort(_('no patches applied'))
2034 2034 patch = q.applied[-1].name
2035 2035 if patch is None and args[0][0:1] not in '-+':
2036 2036 patch = args.pop(0)
2037 2037 if patch is None:
2038 2038 raise util.Abort(_('no patch to work with'))
2039 2039 if args or opts['none']:
2040 2040 idx = q.find_series(patch)
2041 2041 if idx is None:
2042 2042 raise util.Abort(_('no patch named %s') % patch)
2043 2043 q.set_guards(idx, args)
2044 2044 q.save_dirty()
2045 2045 else:
2046 2046 status(q.series.index(q.lookup(patch)))
2047 2047
2048 2048 def header(ui, repo, patch=None):
2049 2049 """print the header of the topmost or specified patch"""
2050 2050 q = repo.mq
2051 2051
2052 2052 if patch:
2053 2053 patch = q.lookup(patch)
2054 2054 else:
2055 2055 if not q.applied:
2056 2056 ui.write('no patches applied\n')
2057 2057 return 1
2058 2058 patch = q.lookup('qtip')
2059 2059 ph = repo.mq.readheaders(patch)
2060 2060
2061 2061 ui.write('\n'.join(ph.message) + '\n')
2062 2062
2063 2063 def lastsavename(path):
2064 2064 (directory, base) = os.path.split(path)
2065 2065 names = os.listdir(directory)
2066 2066 namere = re.compile("%s.([0-9]+)" % base)
2067 2067 maxindex = None
2068 2068 maxname = None
2069 2069 for f in names:
2070 2070 m = namere.match(f)
2071 2071 if m:
2072 2072 index = int(m.group(1))
2073 2073 if maxindex == None or index > maxindex:
2074 2074 maxindex = index
2075 2075 maxname = f
2076 2076 if maxname:
2077 2077 return (os.path.join(directory, maxname), maxindex)
2078 2078 return (None, None)
2079 2079
2080 2080 def savename(path):
2081 2081 (last, index) = lastsavename(path)
2082 2082 if last is None:
2083 2083 index = 0
2084 2084 newpath = path + ".%d" % (index + 1)
2085 2085 return newpath
2086 2086
2087 2087 def push(ui, repo, patch=None, **opts):
2088 2088 """push the next patch onto the stack
2089 2089
2090 2090 When -f/--force is applied, all local changes in patched files
2091 2091 will be lost.
2092 2092 """
2093 2093 q = repo.mq
2094 2094 mergeq = None
2095 2095
2096 2096 if opts['merge']:
2097 2097 if opts['name']:
2098 2098 newpath = repo.join(opts['name'])
2099 2099 else:
2100 2100 newpath, i = lastsavename(q.path)
2101 2101 if not newpath:
2102 2102 ui.warn(_("no saved queues found, please use -n\n"))
2103 2103 return 1
2104 2104 mergeq = queue(ui, repo.join(""), newpath)
2105 2105 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2106 2106 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
2107 2107 mergeq=mergeq, all=opts.get('all'))
2108 2108 return ret
2109 2109
2110 2110 def pop(ui, repo, patch=None, **opts):
2111 2111 """pop the current patch off the stack
2112 2112
2113 2113 By default, pops off the top of the patch stack. If given a patch
2114 2114 name, keeps popping off patches until the named patch is at the
2115 2115 top of the stack.
2116 2116 """
2117 2117 localupdate = True
2118 2118 if opts['name']:
2119 2119 q = queue(ui, repo.join(""), repo.join(opts['name']))
2120 2120 ui.warn(_('using patch queue: %s\n') % q.path)
2121 2121 localupdate = False
2122 2122 else:
2123 2123 q = repo.mq
2124 2124 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
2125 2125 all=opts['all'])
2126 2126 q.save_dirty()
2127 2127 return ret
2128 2128
2129 2129 def rename(ui, repo, patch, name=None, **opts):
2130 2130 """rename a patch
2131 2131
2132 2132 With one argument, renames the current patch to PATCH1.
2133 2133 With two arguments, renames PATCH1 to PATCH2."""
2134 2134
2135 2135 q = repo.mq
2136 2136
2137 2137 if not name:
2138 2138 name = patch
2139 2139 patch = None
2140 2140
2141 2141 if patch:
2142 2142 patch = q.lookup(patch)
2143 2143 else:
2144 2144 if not q.applied:
2145 2145 ui.write(_('no patches applied\n'))
2146 2146 return
2147 2147 patch = q.lookup('qtip')
2148 2148 absdest = q.join(name)
2149 2149 if os.path.isdir(absdest):
2150 2150 name = normname(os.path.join(name, os.path.basename(patch)))
2151 2151 absdest = q.join(name)
2152 2152 if os.path.exists(absdest):
2153 2153 raise util.Abort(_('%s already exists') % absdest)
2154 2154
2155 2155 if name in q.series:
2156 2156 raise util.Abort(_('A patch named %s already exists in the series file') % name)
2157 2157
2158 2158 if ui.verbose:
2159 2159 ui.write('renaming %s to %s\n' % (patch, name))
2160 2160 i = q.find_series(patch)
2161 2161 guards = q.guard_re.findall(q.full_series[i])
2162 2162 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2163 2163 q.parse_series()
2164 2164 q.series_dirty = 1
2165 2165
2166 2166 info = q.isapplied(patch)
2167 2167 if info:
2168 2168 q.applied[info[0]] = statusentry(info[1], name)
2169 2169 q.applied_dirty = 1
2170 2170
2171 2171 util.rename(q.join(patch), absdest)
2172 2172 r = q.qrepo()
2173 2173 if r:
2174 2174 wlock = r.wlock()
2175 2175 try:
2176 2176 if r.dirstate[patch] == 'a':
2177 2177 r.dirstate.forget(patch)
2178 2178 r.dirstate.add(name)
2179 2179 else:
2180 2180 if r.dirstate[name] == 'r':
2181 2181 r.undelete([name])
2182 2182 r.copy(patch, name)
2183 2183 r.remove([patch], False)
2184 2184 finally:
2185 2185 wlock.release()
2186 2186
2187 2187 q.save_dirty()
2188 2188
2189 2189 def restore(ui, repo, rev, **opts):
2190 2190 """restore the queue state saved by a revision"""
2191 2191 rev = repo.lookup(rev)
2192 2192 q = repo.mq
2193 2193 q.restore(repo, rev, delete=opts['delete'],
2194 2194 qupdate=opts['update'])
2195 2195 q.save_dirty()
2196 2196 return 0
2197 2197
2198 2198 def save(ui, repo, **opts):
2199 2199 """save current queue state"""
2200 2200 q = repo.mq
2201 2201 message = cmdutil.logmessage(opts)
2202 2202 ret = q.save(repo, msg=message)
2203 2203 if ret:
2204 2204 return ret
2205 2205 q.save_dirty()
2206 2206 if opts['copy']:
2207 2207 path = q.path
2208 2208 if opts['name']:
2209 2209 newpath = os.path.join(q.basepath, opts['name'])
2210 2210 if os.path.exists(newpath):
2211 2211 if not os.path.isdir(newpath):
2212 2212 raise util.Abort(_('destination %s exists and is not '
2213 2213 'a directory') % newpath)
2214 2214 if not opts['force']:
2215 2215 raise util.Abort(_('destination %s exists, '
2216 2216 'use -f to force') % newpath)
2217 2217 else:
2218 2218 newpath = savename(path)
2219 2219 ui.warn(_("copy %s to %s\n") % (path, newpath))
2220 2220 util.copyfiles(path, newpath)
2221 2221 if opts['empty']:
2222 2222 try:
2223 2223 os.unlink(q.join(q.status_path))
2224 2224 except:
2225 2225 pass
2226 2226 return 0
2227 2227
2228 2228 def strip(ui, repo, rev, **opts):
2229 2229 """strip a revision and all its descendants from the repository
2230 2230
2231 2231 If one of the working directory's parent revisions is stripped, the
2232 2232 working directory will be updated to the parent of the stripped
2233 2233 revision.
2234 2234 """
2235 2235 backup = 'all'
2236 2236 if opts['backup']:
2237 2237 backup = 'strip'
2238 2238 elif opts['nobackup']:
2239 2239 backup = 'none'
2240 2240
2241 2241 rev = repo.lookup(rev)
2242 2242 p = repo.dirstate.parents()
2243 2243 cl = repo.changelog
2244 2244 update = True
2245 2245 if p[0] == nullid:
2246 2246 update = False
2247 2247 elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
2248 2248 update = False
2249 2249 elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
2250 2250 update = False
2251 2251
2252 2252 repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
2253 2253 return 0
2254 2254
2255 2255 def select(ui, repo, *args, **opts):
2256 2256 '''set or print guarded patches to push
2257 2257
2258 2258 Use the qguard command to set or print guards on patch, then use
2259 2259 qselect to tell mq which guards to use. A patch will be pushed if
2260 2260 it has no guards or any positive guards match the currently
2261 2261 selected guard, but will not be pushed if any negative guards
2262 2262 match the current guard. For example:
2263 2263
2264 2264 qguard foo.patch -stable (negative guard)
2265 2265 qguard bar.patch +stable (positive guard)
2266 2266 qselect stable
2267 2267
2268 2268 This activates the "stable" guard. mq will skip foo.patch (because
2269 2269 it has a negative match) but push bar.patch (because it has a
2270 2270 positive match).
2271 2271
2272 2272 With no arguments, prints the currently active guards.
2273 2273 With one argument, sets the active guard.
2274 2274
2275 2275 Use -n/--none to deactivate guards (no other arguments needed).
2276 2276 When no guards are active, patches with positive guards are
2277 2277 skipped and patches with negative guards are pushed.
2278 2278
2279 2279 qselect can change the guards on applied patches. It does not pop
2280 2280 guarded patches by default. Use --pop to pop back to the last
2281 2281 applied patch that is not guarded. Use --reapply (which implies
2282 2282 --pop) to push back to the current patch afterwards, but skip
2283 2283 guarded patches.
2284 2284
2285 2285 Use -s/--series to print a list of all guards in the series file
2286 2286 (no other arguments needed). Use -v for more information.'''
2287 2287
2288 2288 q = repo.mq
2289 2289 guards = q.active()
2290 2290 if args or opts['none']:
2291 2291 old_unapplied = q.unapplied(repo)
2292 2292 old_guarded = [i for i in xrange(len(q.applied)) if
2293 2293 not q.pushable(i)[0]]
2294 2294 q.set_active(args)
2295 2295 q.save_dirty()
2296 2296 if not args:
2297 2297 ui.status(_('guards deactivated\n'))
2298 2298 if not opts['pop'] and not opts['reapply']:
2299 2299 unapplied = q.unapplied(repo)
2300 2300 guarded = [i for i in xrange(len(q.applied))
2301 2301 if not q.pushable(i)[0]]
2302 2302 if len(unapplied) != len(old_unapplied):
2303 2303 ui.status(_('number of unguarded, unapplied patches has '
2304 2304 'changed from %d to %d\n') %
2305 2305 (len(old_unapplied), len(unapplied)))
2306 2306 if len(guarded) != len(old_guarded):
2307 2307 ui.status(_('number of guarded, applied patches has changed '
2308 2308 'from %d to %d\n') %
2309 2309 (len(old_guarded), len(guarded)))
2310 2310 elif opts['series']:
2311 2311 guards = {}
2312 2312 noguards = 0
2313 2313 for gs in q.series_guards:
2314 2314 if not gs:
2315 2315 noguards += 1
2316 2316 for g in gs:
2317 2317 guards.setdefault(g, 0)
2318 2318 guards[g] += 1
2319 2319 if ui.verbose:
2320 2320 guards['NONE'] = noguards
2321 2321 guards = guards.items()
2322 2322 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
2323 2323 if guards:
2324 2324 ui.note(_('guards in series file:\n'))
2325 2325 for guard, count in guards:
2326 2326 ui.note('%2d ' % count)
2327 2327 ui.write(guard, '\n')
2328 2328 else:
2329 2329 ui.note(_('no guards in series file\n'))
2330 2330 else:
2331 2331 if guards:
2332 2332 ui.note(_('active guards:\n'))
2333 2333 for g in guards:
2334 2334 ui.write(g, '\n')
2335 2335 else:
2336 2336 ui.write(_('no active guards\n'))
2337 2337 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2338 2338 popped = False
2339 2339 if opts['pop'] or opts['reapply']:
2340 2340 for i in xrange(len(q.applied)):
2341 2341 pushable, reason = q.pushable(i)
2342 2342 if not pushable:
2343 2343 ui.status(_('popping guarded patches\n'))
2344 2344 popped = True
2345 2345 if i == 0:
2346 2346 q.pop(repo, all=True)
2347 2347 else:
2348 2348 q.pop(repo, i-1)
2349 2349 break
2350 2350 if popped:
2351 2351 try:
2352 2352 if reapply:
2353 2353 ui.status(_('reapplying unguarded patches\n'))
2354 2354 q.push(repo, reapply)
2355 2355 finally:
2356 2356 q.save_dirty()
2357 2357
2358 2358 def finish(ui, repo, *revrange, **opts):
2359 2359 """move applied patches into repository history
2360 2360
2361 2361 Finishes the specified revisions (corresponding to applied
2362 2362 patches) by moving them out of mq control into regular repository
2363 2363 history.
2364 2364
2365 2365 Accepts a revision range or the -a/--applied option. If --applied
2366 2366 is specified, all applied mq revisions are removed from mq
2367 2367 control. Otherwise, the given revisions must be at the base of the
2368 2368 stack of applied patches.
2369 2369
2370 2370 This can be especially useful if your changes have been applied to
2371 2371 an upstream repository, or if you are about to push your changes
2372 2372 to upstream.
2373 2373 """
2374 2374 if not opts['applied'] and not revrange:
2375 2375 raise util.Abort(_('no revisions specified'))
2376 2376 elif opts['applied']:
2377 2377 revrange = ('qbase:qtip',) + revrange
2378 2378
2379 2379 q = repo.mq
2380 2380 if not q.applied:
2381 2381 ui.status(_('no patches applied\n'))
2382 2382 return 0
2383 2383
2384 2384 revs = cmdutil.revrange(repo, revrange)
2385 2385 q.finish(repo, revs)
2386 2386 q.save_dirty()
2387 2387 return 0
2388 2388
2389 2389 def reposetup(ui, repo):
2390 2390 class mqrepo(repo.__class__):
2391 2391 def abort_if_wdir_patched(self, errmsg, force=False):
2392 2392 if self.mq.applied and not force:
2393 2393 parent = hex(self.dirstate.parents()[0])
2394 2394 if parent in [s.rev for s in self.mq.applied]:
2395 2395 raise util.Abort(errmsg)
2396 2396
2397 2397 def commit(self, *args, **opts):
2398 2398 if len(args) >= 6:
2399 2399 force = args[5]
2400 2400 else:
2401 2401 force = opts.get('force')
2402 2402 self.abort_if_wdir_patched(
2403 2403 _('cannot commit over an applied mq patch'),
2404 2404 force)
2405 2405
2406 2406 return super(mqrepo, self).commit(*args, **opts)
2407 2407
2408 2408 def push(self, remote, force=False, revs=None):
2409 2409 if self.mq.applied and not force and not revs:
2410 2410 raise util.Abort(_('source has mq patches applied'))
2411 2411 return super(mqrepo, self).push(remote, force, revs)
2412 2412
2413 2413 def tags(self):
2414 2414 if self.tagscache:
2415 2415 return self.tagscache
2416 2416
2417 2417 tagscache = super(mqrepo, self).tags()
2418 2418
2419 2419 q = self.mq
2420 2420 if not q.applied:
2421 2421 return tagscache
2422 2422
2423 2423 mqtags = [(bin(patch.rev), patch.name) for patch in q.applied]
2424 2424
2425 2425 if mqtags[-1][0] not in self.changelog.nodemap:
2426 2426 self.ui.warn(_('mq status file refers to unknown node %s\n')
2427 2427 % short(mqtags[-1][0]))
2428 2428 return tagscache
2429 2429
2430 2430 mqtags.append((mqtags[-1][0], 'qtip'))
2431 2431 mqtags.append((mqtags[0][0], 'qbase'))
2432 2432 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2433 2433 for patch in mqtags:
2434 2434 if patch[1] in tagscache:
2435 2435 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2436 2436 % patch[1])
2437 2437 else:
2438 2438 tagscache[patch[1]] = patch[0]
2439 2439
2440 2440 return tagscache
2441 2441
2442 2442 def _branchtags(self, partial, lrev):
2443 2443 q = self.mq
2444 2444 if not q.applied:
2445 2445 return super(mqrepo, self)._branchtags(partial, lrev)
2446 2446
2447 2447 cl = self.changelog
2448 2448 qbasenode = bin(q.applied[0].rev)
2449 2449 if qbasenode not in cl.nodemap:
2450 2450 self.ui.warn(_('mq status file refers to unknown node %s\n')
2451 2451 % short(qbasenode))
2452 2452 return super(mqrepo, self)._branchtags(partial, lrev)
2453 2453
2454 2454 qbase = cl.rev(qbasenode)
2455 2455 start = lrev + 1
2456 2456 if start < qbase:
2457 2457 # update the cache (excluding the patches) and save it
2458 2458 self._updatebranchcache(partial, lrev+1, qbase)
2459 2459 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2460 2460 start = qbase
2461 2461 # if start = qbase, the cache is as updated as it should be.
2462 2462 # if start > qbase, the cache includes (part of) the patches.
2463 2463 # we might as well use it, but we won't save it.
2464 2464
2465 2465 # update the cache up to the tip
2466 2466 self._updatebranchcache(partial, start, len(cl))
2467 2467
2468 2468 return partial
2469 2469
2470 2470 if repo.local():
2471 2471 repo.__class__ = mqrepo
2472 2472 repo.mq = queue(ui, repo.join(""))
2473 2473
2474 2474 def mqimport(orig, ui, repo, *args, **kwargs):
2475 2475 if hasattr(repo, 'abort_if_wdir_patched'):
2476 2476 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2477 2477 kwargs.get('force'))
2478 2478 return orig(ui, repo, *args, **kwargs)
2479 2479
2480 2480 def uisetup(ui):
2481 2481 extensions.wrapcommand(commands.table, 'import', mqimport)
2482 2482
2483 2483 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2484 2484
2485 2485 cmdtable = {
2486 2486 "qapplied": (applied, [] + seriesopts, _('hg qapplied [-s] [PATCH]')),
2487 2487 "qclone":
2488 2488 (clone,
2489 2489 [('', 'pull', None, _('use pull protocol to copy metadata')),
2490 2490 ('U', 'noupdate', None, _('do not update the new working directories')),
2491 2491 ('', 'uncompressed', None,
2492 2492 _('use uncompressed transfer (fast over LAN)')),
2493 2493 ('p', 'patches', '', _('location of source patch repository')),
2494 2494 ] + commands.remoteopts,
2495 2495 _('hg qclone [OPTION]... SOURCE [DEST]')),
2496 2496 "qcommit|qci":
2497 2497 (commit,
2498 2498 commands.table["^commit|ci"][1],
2499 2499 _('hg qcommit [OPTION]... [FILE]...')),
2500 2500 "^qdiff":
2501 2501 (diff,
2502 2502 commands.diffopts + commands.diffopts2 + commands.walkopts,
2503 2503 _('hg qdiff [OPTION]... [FILE]...')),
2504 2504 "qdelete|qremove|qrm":
2505 2505 (delete,
2506 2506 [('k', 'keep', None, _('keep patch file')),
2507 2507 ('r', 'rev', [], _('stop managing a revision'))],
2508 2508 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2509 2509 'qfold':
2510 2510 (fold,
2511 2511 [('e', 'edit', None, _('edit patch header')),
2512 2512 ('k', 'keep', None, _('keep folded patch files')),
2513 2513 ] + commands.commitopts,
2514 2514 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2515 2515 'qgoto':
2516 2516 (goto,
2517 2517 [('f', 'force', None, _('overwrite any local changes'))],
2518 2518 _('hg qgoto [OPTION]... PATCH')),
2519 2519 'qguard':
2520 2520 (guard,
2521 2521 [('l', 'list', None, _('list all patches and guards')),
2522 2522 ('n', 'none', None, _('drop all guards'))],
2523 2523 _('hg qguard [-l] [-n] -- [PATCH] [+GUARD]... [-GUARD]...')),
2524 2524 'qheader': (header, [], _('hg qheader [PATCH]')),
2525 2525 "^qimport":
2526 2526 (qimport,
2527 2527 [('e', 'existing', None, _('import file in patch directory')),
2528 2528 ('n', 'name', '', _('patch file name')),
2529 2529 ('f', 'force', None, _('overwrite existing files')),
2530 2530 ('r', 'rev', [], _('place existing revisions under mq control')),
2531 2531 ('g', 'git', None, _('use git extended diff format'))],
2532 2532 _('hg qimport [-e] [-n NAME] [-f] [-g] [-r REV]... FILE...')),
2533 2533 "^qinit":
2534 2534 (init,
2535 2535 [('c', 'create-repo', None, _('create queue repository'))],
2536 2536 _('hg qinit [-c]')),
2537 2537 "qnew":
2538 2538 (new,
2539 2539 [('e', 'edit', None, _('edit commit message')),
2540 2540 ('f', 'force', None, _('import uncommitted changes into patch')),
2541 2541 ('g', 'git', None, _('use git extended diff format')),
2542 2542 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2543 2543 ('u', 'user', '', _('add "From: <given user>" to patch')),
2544 2544 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2545 2545 ('d', 'date', '', _('add "Date: <given date>" to patch'))
2546 2546 ] + commands.walkopts + commands.commitopts,
2547 2547 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2548 2548 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2549 2549 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2550 2550 "^qpop":
2551 2551 (pop,
2552 2552 [('a', 'all', None, _('pop all patches')),
2553 2553 ('n', 'name', '', _('queue name to pop')),
2554 2554 ('f', 'force', None, _('forget any local changes'))],
2555 2555 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2556 2556 "^qpush":
2557 2557 (push,
2558 2558 [('f', 'force', None, _('apply if the patch has rejects')),
2559 2559 ('l', 'list', None, _('list patch name in commit text')),
2560 2560 ('a', 'all', None, _('apply all patches')),
2561 2561 ('m', 'merge', None, _('merge from another queue')),
2562 2562 ('n', 'name', '', _('merge queue name'))],
2563 2563 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2564 2564 "^qrefresh":
2565 2565 (refresh,
2566 2566 [('e', 'edit', None, _('edit commit message')),
2567 2567 ('g', 'git', None, _('use git extended diff format')),
2568 2568 ('s', 'short', None, _('refresh only files already in the patch and specified files')),
2569 2569 ('U', 'currentuser', None, _('add/update "From: <current user>" in patch')),
2570 2570 ('u', 'user', '', _('add/update "From: <given user>" in patch')),
2571 2571 ('D', 'currentdate', None, _('update "Date: <current date>" in patch (if present)')),
2572 2572 ('d', 'date', '', _('update "Date: <given date>" in patch (if present)'))
2573 2573 ] + commands.walkopts + commands.commitopts,
2574 2574 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2575 2575 'qrename|qmv':
2576 2576 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2577 2577 "qrestore":
2578 2578 (restore,
2579 2579 [('d', 'delete', None, _('delete save entry')),
2580 2580 ('u', 'update', None, _('update queue working directory'))],
2581 2581 _('hg qrestore [-d] [-u] REV')),
2582 2582 "qsave":
2583 2583 (save,
2584 2584 [('c', 'copy', None, _('copy patch directory')),
2585 2585 ('n', 'name', '', _('copy directory name')),
2586 2586 ('e', 'empty', None, _('clear queue status file')),
2587 2587 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2588 2588 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2589 2589 "qselect":
2590 2590 (select,
2591 2591 [('n', 'none', None, _('disable all guards')),
2592 2592 ('s', 'series', None, _('list all guards in series file')),
2593 2593 ('', 'pop', None, _('pop to before first guarded applied patch')),
2594 2594 ('', 'reapply', None, _('pop, then reapply patches'))],
2595 2595 _('hg qselect [OPTION]... [GUARD]...')),
2596 2596 "qseries":
2597 2597 (series,
2598 2598 [('m', 'missing', None, _('print patches not in series')),
2599 2599 ] + seriesopts,
2600 2600 _('hg qseries [-ms]')),
2601 2601 "^strip":
2602 2602 (strip,
2603 2603 [('f', 'force', None, _('force removal with local changes')),
2604 2604 ('b', 'backup', None, _('bundle unrelated changesets')),
2605 2605 ('n', 'nobackup', None, _('no backups'))],
2606 2606 _('hg strip [-f] [-b] [-n] REV')),
2607 2607 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2608 2608 "qunapplied": (unapplied, [] + seriesopts, _('hg qunapplied [-s] [PATCH]')),
2609 2609 "qfinish":
2610 2610 (finish,
2611 2611 [('a', 'applied', None, _('finish all applied changesets'))],
2612 2612 _('hg qfinish [-a] [REV...]')),
2613 2613 }
@@ -1,806 +1,806 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import nullid, nullrev, short, hex
9 9 from i18n import _
10 10 import ancestor, bdiff, error, util, os, errno
11 11
12 12 class propertycache(object):
13 13 def __init__(self, func):
14 14 self.func = func
15 15 self.name = func.__name__
16 16 def __get__(self, obj, type=None):
17 17 result = self.func(obj)
18 18 setattr(obj, self.name, result)
19 19 return result
20 20
21 21 class changectx(object):
22 22 """A changecontext object makes access to data related to a particular
23 23 changeset convenient."""
24 24 def __init__(self, repo, changeid=''):
25 25 """changeid is a revision number, node, or tag"""
26 26 if changeid == '':
27 27 changeid = '.'
28 28 self._repo = repo
29 29 if isinstance(changeid, (long, int)):
30 30 self._rev = changeid
31 31 self._node = self._repo.changelog.node(changeid)
32 32 else:
33 33 self._node = self._repo.lookup(changeid)
34 34 self._rev = self._repo.changelog.rev(self._node)
35 35
36 36 def __str__(self):
37 37 return short(self.node())
38 38
39 39 def __int__(self):
40 40 return self.rev()
41 41
42 42 def __repr__(self):
43 43 return "<changectx %s>" % str(self)
44 44
45 45 def __hash__(self):
46 46 try:
47 47 return hash(self._rev)
48 48 except AttributeError:
49 49 return id(self)
50 50
51 51 def __eq__(self, other):
52 52 try:
53 53 return self._rev == other._rev
54 54 except AttributeError:
55 55 return False
56 56
57 57 def __ne__(self, other):
58 58 return not (self == other)
59 59
60 60 def __nonzero__(self):
61 61 return self._rev != nullrev
62 62
63 63 def _changeset(self):
64 64 return self._repo.changelog.read(self.node())
65 65 _changeset = propertycache(_changeset)
66 66
67 67 def _manifest(self):
68 68 return self._repo.manifest.read(self._changeset[0])
69 69 _manifest = propertycache(_manifest)
70 70
71 71 def _manifestdelta(self):
72 72 return self._repo.manifest.readdelta(self._changeset[0])
73 73 _manifestdelta = propertycache(_manifestdelta)
74 74
75 75 def _parents(self):
76 76 p = self._repo.changelog.parentrevs(self._rev)
77 77 if p[1] == nullrev:
78 78 p = p[:-1]
79 79 return [changectx(self._repo, x) for x in p]
80 80 _parents = propertycache(_parents)
81 81
82 82 def __contains__(self, key):
83 83 return key in self._manifest
84 84
85 85 def __getitem__(self, key):
86 86 return self.filectx(key)
87 87
88 88 def __iter__(self):
89 89 for f in util.sort(self._manifest):
90 90 yield f
91 91
92 92 def changeset(self): return self._changeset
93 93 def manifest(self): return self._manifest
94 94
95 95 def rev(self): return self._rev
96 96 def node(self): return self._node
97 97 def hex(self): return hex(self._node)
98 98 def user(self): return self._changeset[1]
99 99 def date(self): return self._changeset[2]
100 100 def files(self): return self._changeset[3]
101 101 def description(self): return self._changeset[4]
102 102 def branch(self): return self._changeset[5].get("branch")
103 103 def extra(self): return self._changeset[5]
104 104 def tags(self): return self._repo.nodetags(self._node)
105 105
106 106 def parents(self):
107 107 """return contexts for each parent changeset"""
108 108 return self._parents
109 109
110 110 def children(self):
111 111 """return contexts for each child changeset"""
112 112 c = self._repo.changelog.children(self._node)
113 113 return [changectx(self._repo, x) for x in c]
114 114
115 115 def ancestors(self):
116 116 for a in self._repo.changelog.ancestors(self._rev):
117 117 yield changectx(self._repo, a)
118 118
119 119 def descendants(self):
120 120 for d in self._repo.changelog.descendants(self._rev):
121 121 yield changectx(self._repo, d)
122 122
123 123 def _fileinfo(self, path):
124 124 if '_manifest' in self.__dict__:
125 125 try:
126 126 return self._manifest[path], self._manifest.flags(path)
127 127 except KeyError:
128 128 raise error.LookupError(self._node, path,
129 129 _('not found in manifest'))
130 130 if '_manifestdelta' in self.__dict__ or path in self.files():
131 131 if path in self._manifestdelta:
132 132 return self._manifestdelta[path], self._manifestdelta.flags(path)
133 133 node, flag = self._repo.manifest.find(self._changeset[0], path)
134 134 if not node:
135 135 raise error.LookupError(self._node, path,
136 136 _('not found in manifest'))
137 137
138 138 return node, flag
139 139
140 140 def filenode(self, path):
141 141 return self._fileinfo(path)[0]
142 142
143 143 def flags(self, path):
144 144 try:
145 145 return self._fileinfo(path)[1]
146 146 except error.LookupError:
147 147 return ''
148 148
149 149 def filectx(self, path, fileid=None, filelog=None):
150 150 """get a file context from this changeset"""
151 151 if fileid is None:
152 152 fileid = self.filenode(path)
153 153 return filectx(self._repo, path, fileid=fileid,
154 154 changectx=self, filelog=filelog)
155 155
156 156 def ancestor(self, c2):
157 157 """
158 158 return the ancestor context of self and c2
159 159 """
160 160 n = self._repo.changelog.ancestor(self._node, c2._node)
161 161 return changectx(self._repo, n)
162 162
163 163 def walk(self, match):
164 164 fdict = dict.fromkeys(match.files())
165 165 # for dirstate.walk, files=['.'] means "walk the whole tree".
166 166 # follow that here, too
167 167 fdict.pop('.', None)
168 168 for fn in self:
169 169 for ffn in fdict:
170 170 # match if the file is the exact name or a directory
171 171 if ffn == fn or fn.startswith("%s/" % ffn):
172 172 del fdict[ffn]
173 173 break
174 174 if match(fn):
175 175 yield fn
176 176 for fn in util.sort(fdict):
177 177 if match.bad(fn, 'No such file in rev ' + str(self)) and match(fn):
178 178 yield fn
179 179
180 180 class filectx(object):
181 181 """A filecontext object makes access to data related to a particular
182 182 filerevision convenient."""
183 183 def __init__(self, repo, path, changeid=None, fileid=None,
184 184 filelog=None, changectx=None):
185 185 """changeid can be a changeset revision, node, or tag.
186 186 fileid can be a file revision or node."""
187 187 self._repo = repo
188 188 self._path = path
189 189
190 190 assert (changeid is not None
191 191 or fileid is not None
192 192 or changectx is not None)
193 193
194 194 if filelog:
195 195 self._filelog = filelog
196 196
197 197 if changeid is not None:
198 198 self._changeid = changeid
199 199 if changectx is not None:
200 200 self._changectx = changectx
201 201 if fileid is not None:
202 202 self._fileid = fileid
203 203
204 204 def _changectx(self):
205 205 return changectx(self._repo, self._changeid)
206 206 _changectx = propertycache(_changectx)
207 207
208 208 def _filelog(self):
209 209 return self._repo.file(self._path)
210 210 _filelog = propertycache(_filelog)
211 211
212 212 def _changeid(self):
213 213 if '_changectx' in self.__dict__:
214 214 return self._changectx.rev()
215 215 else:
216 216 return self._filelog.linkrev(self._filerev)
217 217 _changeid = propertycache(_changeid)
218 218
219 219 def _filenode(self):
220 220 if '_fileid' in self.__dict__:
221 221 return self._filelog.lookup(self._fileid)
222 222 else:
223 223 return self._changectx.filenode(self._path)
224 224 _filenode = propertycache(_filenode)
225 225
226 226 def _filerev(self):
227 227 return self._filelog.rev(self._filenode)
228 228 _filerev = propertycache(_filerev)
229 229
230 230 def _repopath(self):
231 231 return self._path
232 232 _repopath = propertycache(_repopath)
233 233
234 234 def __nonzero__(self):
235 235 try:
236 236 self._filenode
237 237 return True
238 238 except error.LookupError:
239 239 # file is missing
240 240 return False
241 241
242 242 def __str__(self):
243 243 return "%s@%s" % (self.path(), short(self.node()))
244 244
245 245 def __repr__(self):
246 246 return "<filectx %s>" % str(self)
247 247
248 248 def __hash__(self):
249 249 try:
250 250 return hash((self._path, self._fileid))
251 251 except AttributeError:
252 252 return id(self)
253 253
254 254 def __eq__(self, other):
255 255 try:
256 256 return (self._path == other._path
257 257 and self._fileid == other._fileid)
258 258 except AttributeError:
259 259 return False
260 260
261 261 def __ne__(self, other):
262 262 return not (self == other)
263 263
264 264 def filectx(self, fileid):
265 265 '''opens an arbitrary revision of the file without
266 266 opening a new filelog'''
267 267 return filectx(self._repo, self._path, fileid=fileid,
268 268 filelog=self._filelog)
269 269
270 270 def filerev(self): return self._filerev
271 271 def filenode(self): return self._filenode
272 272 def flags(self): return self._changectx.flags(self._path)
273 273 def filelog(self): return self._filelog
274 274
275 275 def rev(self):
276 276 if '_changectx' in self.__dict__:
277 277 return self._changectx.rev()
278 278 if '_changeid' in self.__dict__:
279 279 return self._changectx.rev()
280 280 return self._filelog.linkrev(self._filerev)
281 281
282 282 def linkrev(self): return self._filelog.linkrev(self._filerev)
283 283 def node(self): return self._changectx.node()
284 284 def user(self): return self._changectx.user()
285 285 def date(self): return self._changectx.date()
286 286 def files(self): return self._changectx.files()
287 287 def description(self): return self._changectx.description()
288 288 def branch(self): return self._changectx.branch()
289 289 def manifest(self): return self._changectx.manifest()
290 290 def changectx(self): return self._changectx
291 291
292 292 def data(self): return self._filelog.read(self._filenode)
293 293 def path(self): return self._path
294 294 def size(self): return self._filelog.size(self._filerev)
295 295
296 296 def cmp(self, text): return self._filelog.cmp(self._filenode, text)
297 297
298 298 def renamed(self):
299 299 """check if file was actually renamed in this changeset revision
300 300
301 301 If rename logged in file revision, we report copy for changeset only
302 302 if file revisions linkrev points back to the changeset in question
303 303 or both changeset parents contain different file revisions.
304 304 """
305 305
306 306 renamed = self._filelog.renamed(self._filenode)
307 307 if not renamed:
308 308 return renamed
309 309
310 310 if self.rev() == self.linkrev():
311 311 return renamed
312 312
313 313 name = self.path()
314 314 fnode = self._filenode
315 315 for p in self._changectx.parents():
316 316 try:
317 317 if fnode == p.filenode(name):
318 318 return None
319 319 except error.LookupError:
320 320 pass
321 321 return renamed
322 322
323 323 def parents(self):
324 324 p = self._path
325 325 fl = self._filelog
326 326 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
327 327
328 328 r = self._filelog.renamed(self._filenode)
329 329 if r:
330 330 pl[0] = (r[0], r[1], None)
331 331
332 332 return [filectx(self._repo, p, fileid=n, filelog=l)
333 333 for p,n,l in pl if n != nullid]
334 334
335 335 def children(self):
336 336 # hard for renames
337 337 c = self._filelog.children(self._filenode)
338 338 return [filectx(self._repo, self._path, fileid=x,
339 339 filelog=self._filelog) for x in c]
340 340
341 341 def annotate(self, follow=False, linenumber=None):
342 342 '''returns a list of tuples of (ctx, line) for each line
343 343 in the file, where ctx is the filectx of the node where
344 344 that line was last changed.
345 345 This returns tuples of ((ctx, linenumber), line) for each line,
346 346 if "linenumber" parameter is NOT "None".
347 347 In such tuples, linenumber means one at the first appearance
348 348 in the managed file.
349 349 To reduce annotation cost,
350 350 this returns fixed value(False is used) as linenumber,
351 351 if "linenumber" parameter is "False".'''
352 352
353 353 def decorate_compat(text, rev):
354 354 return ([rev] * len(text.splitlines()), text)
355 355
356 356 def without_linenumber(text, rev):
357 357 return ([(rev, False)] * len(text.splitlines()), text)
358 358
359 359 def with_linenumber(text, rev):
360 360 size = len(text.splitlines())
361 361 return ([(rev, i) for i in xrange(1, size + 1)], text)
362 362
363 363 decorate = (((linenumber is None) and decorate_compat) or
364 364 (linenumber and with_linenumber) or
365 365 without_linenumber)
366 366
367 367 def pair(parent, child):
368 368 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
369 369 child[0][b1:b2] = parent[0][a1:a2]
370 370 return child
371 371
372 372 getlog = util.cachefunc(lambda x: self._repo.file(x))
373 373 def getctx(path, fileid):
374 374 log = path == self._path and self._filelog or getlog(path)
375 375 return filectx(self._repo, path, fileid=fileid, filelog=log)
376 376 getctx = util.cachefunc(getctx)
377 377
378 378 def parents(f):
379 379 # we want to reuse filectx objects as much as possible
380 380 p = f._path
381 381 if f._filerev is None: # working dir
382 382 pl = [(n.path(), n.filerev()) for n in f.parents()]
383 383 else:
384 384 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
385 385
386 386 if follow:
387 387 r = f.renamed()
388 388 if r:
389 389 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
390 390
391 391 return [getctx(p, n) for p, n in pl if n != nullrev]
392 392
393 393 # use linkrev to find the first changeset where self appeared
394 394 if self.rev() != self.linkrev():
395 395 base = self.filectx(self.filerev())
396 396 else:
397 397 base = self
398 398
399 399 # find all ancestors
400 400 needed = {base: 1}
401 401 visit = [base]
402 402 files = [base._path]
403 403 while visit:
404 404 f = visit.pop(0)
405 405 for p in parents(f):
406 406 if p not in needed:
407 407 needed[p] = 1
408 408 visit.append(p)
409 409 if p._path not in files:
410 410 files.append(p._path)
411 411 else:
412 412 # count how many times we'll use this
413 413 needed[p] += 1
414 414
415 415 # sort by revision (per file) which is a topological order
416 416 visit = []
417 417 for f in files:
418 418 fn = [(n.rev(), n) for n in needed if n._path == f]
419 419 visit.extend(fn)
420 420
421 421 hist = {}
422 422 for r, f in util.sort(visit):
423 423 curr = decorate(f.data(), f)
424 424 for p in parents(f):
425 425 if p != nullid:
426 426 curr = pair(hist[p], curr)
427 427 # trim the history of unneeded revs
428 428 needed[p] -= 1
429 429 if not needed[p]:
430 430 del hist[p]
431 431 hist[f] = curr
432 432
433 433 return zip(hist[f][0], hist[f][1].splitlines(1))
434 434
435 435 def ancestor(self, fc2):
436 436 """
437 437 find the common ancestor file context, if any, of self, and fc2
438 438 """
439 439
440 440 acache = {}
441 441
442 442 # prime the ancestor cache for the working directory
443 443 for c in (self, fc2):
444 444 if c._filerev == None:
445 445 pl = [(n.path(), n.filenode()) for n in c.parents()]
446 446 acache[(c._path, None)] = pl
447 447
448 448 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
449 449 def parents(vertex):
450 450 if vertex in acache:
451 451 return acache[vertex]
452 452 f, n = vertex
453 453 if f not in flcache:
454 454 flcache[f] = self._repo.file(f)
455 455 fl = flcache[f]
456 456 pl = [(f, p) for p in fl.parents(n) if p != nullid]
457 457 re = fl.renamed(n)
458 458 if re:
459 459 pl.append(re)
460 460 acache[vertex] = pl
461 461 return pl
462 462
463 463 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
464 464 v = ancestor.ancestor(a, b, parents)
465 465 if v:
466 466 f, n = v
467 467 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
468 468
469 469 return None
470 470
471 471 class workingctx(changectx):
472 472 """A workingctx object makes access to data related to
473 473 the current working directory convenient.
474 474 parents - a pair of parent nodeids, or None to use the dirstate.
475 475 date - any valid date string or (unixtime, offset), or None.
476 476 user - username string, or None.
477 477 extra - a dictionary of extra values, or None.
478 478 changes - a list of file lists as returned by localrepo.status()
479 479 or None to use the repository status.
480 480 """
481 481 def __init__(self, repo, parents=None, text="", user=None, date=None,
482 482 extra=None, changes=None):
483 483 self._repo = repo
484 484 self._rev = None
485 485 self._node = None
486 486 self._text = text
487 487 if date:
488 488 self._date = util.parsedate(date)
489 489 if user:
490 490 self._user = user
491 491 if parents:
492 492 self._parents = [changectx(self._repo, p) for p in parents]
493 493 if changes:
494 494 self._status = list(changes)
495 495
496 496 self._extra = {}
497 497 if extra:
498 498 self._extra = extra.copy()
499 499 if 'branch' not in self._extra:
500 500 branch = self._repo.dirstate.branch()
501 501 try:
502 502 branch = branch.decode('UTF-8').encode('UTF-8')
503 503 except UnicodeDecodeError:
504 504 raise util.Abort(_('branch name not in UTF-8!'))
505 505 self._extra['branch'] = branch
506 506 if self._extra['branch'] == '':
507 507 self._extra['branch'] = 'default'
508 508
509 509 def __str__(self):
510 510 return str(self._parents[0]) + "+"
511 511
512 512 def __nonzero__(self):
513 513 return True
514 514
515 515 def __contains__(self, key):
516 516 return self._repo.dirstate[key] not in "?r"
517 517
518 518 def _manifest(self):
519 519 """generate a manifest corresponding to the working directory"""
520 520
521 521 man = self._parents[0].manifest().copy()
522 522 copied = self._repo.dirstate.copies()
523 523 cf = lambda x: man.flags(copied.get(x, x))
524 524 ff = self._repo.dirstate.flagfunc(cf)
525 525 modified, added, removed, deleted, unknown = self._status[:5]
526 526 for i, l in (("a", added), ("m", modified), ("u", unknown)):
527 527 for f in l:
528 528 man[f] = man.get(copied.get(f, f), nullid) + i
529 529 try:
530 530 man.set(f, ff(f))
531 531 except OSError:
532 532 pass
533 533
534 534 for f in deleted + removed:
535 535 if f in man:
536 536 del man[f]
537 537
538 538 return man
539 539 _manifest = propertycache(_manifest)
540 540
541 541 def _status(self):
542 542 return self._repo.status(unknown=True)
543 543 _status = propertycache(_status)
544 544
545 545 def _user(self):
546 546 return self._repo.ui.username()
547 547 _user = propertycache(_user)
548 548
549 549 def _date(self):
550 550 return util.makedate()
551 551 _date = propertycache(_date)
552 552
553 553 def _parents(self):
554 554 p = self._repo.dirstate.parents()
555 555 if p[1] == nullid:
556 556 p = p[:-1]
557 557 self._parents = [changectx(self._repo, x) for x in p]
558 558 return self._parents
559 559 _parents = propertycache(_parents)
560 560
561 561 def manifest(self): return self._manifest
562 562
563 563 def user(self): return self._user or self._repo.ui.username()
564 564 def date(self): return self._date
565 565 def description(self): return self._text
566 566 def files(self):
567 567 return util.sort(self._status[0] + self._status[1] + self._status[2])
568 568
569 569 def modified(self): return self._status[0]
570 570 def added(self): return self._status[1]
571 571 def removed(self): return self._status[2]
572 572 def deleted(self): return self._status[3]
573 573 def unknown(self): return self._status[4]
574 574 def clean(self): return self._status[5]
575 575 def branch(self): return self._extra['branch']
576 576 def extra(self): return self._extra
577 577
578 578 def tags(self):
579 579 t = []
580 580 [t.extend(p.tags()) for p in self.parents()]
581 581 return t
582 582
583 583 def children(self):
584 584 return []
585 585
586 586 def flags(self, path):
587 587 if '_manifest' in self.__dict__:
588 588 try:
589 589 return self._manifest.flags(path)
590 590 except KeyError:
591 591 return ''
592 592
593 593 pnode = self._parents[0].changeset()[0]
594 594 orig = self._repo.dirstate.copies().get(path, path)
595 595 node, flag = self._repo.manifest.find(pnode, orig)
596 596 try:
597 597 ff = self._repo.dirstate.flagfunc(lambda x: flag or '')
598 598 return ff(path)
599 599 except OSError:
600 600 pass
601 601
602 602 if not node or path in self.deleted() or path in self.removed():
603 603 return ''
604 604 return flag
605 605
606 606 def filectx(self, path, filelog=None):
607 607 """get a file context from the working directory"""
608 608 return workingfilectx(self._repo, path, workingctx=self,
609 609 filelog=filelog)
610 610
611 611 def ancestor(self, c2):
612 612 """return the ancestor context of self and c2"""
613 613 return self._parents[0].ancestor(c2) # punt on two parents for now
614 614
615 615 def walk(self, match):
616 616 return util.sort(self._repo.dirstate.walk(match, True, False).keys())
617 617
618 618 class workingfilectx(filectx):
619 619 """A workingfilectx object makes access to data related to a particular
620 620 file in the working directory convenient."""
621 621 def __init__(self, repo, path, filelog=None, workingctx=None):
622 622 """changeid can be a changeset revision, node, or tag.
623 623 fileid can be a file revision or node."""
624 624 self._repo = repo
625 625 self._path = path
626 626 self._changeid = None
627 627 self._filerev = self._filenode = None
628 628
629 629 if filelog:
630 630 self._filelog = filelog
631 631 if workingctx:
632 632 self._changectx = workingctx
633 633
634 634 def _changectx(self):
635 635 return workingctx(self._repo)
636 636 _changectx = propertycache(_changectx)
637 637
638 638 def _repopath(self):
639 639 return self._repo.dirstate.copied(self._path) or self._path
640 640 _repopath = propertycache(_repopath)
641 641
642 642 def _filelog(self):
643 643 return self._repo.file(self._repopath)
644 644 _filelog = propertycache(_filelog)
645 645
646 646 def __nonzero__(self):
647 647 return True
648 648
649 649 def __str__(self):
650 650 return "%s@%s" % (self.path(), self._changectx)
651 651
652 652 def filectx(self, fileid):
653 653 '''opens an arbitrary revision of the file without
654 654 opening a new filelog'''
655 655 return filectx(self._repo, self._repopath, fileid=fileid,
656 656 filelog=self._filelog)
657 657
658 658 def rev(self):
659 659 if '_changectx' in self.__dict__:
660 660 return self._changectx.rev()
661 661 return self._filelog.linkrev(self._filerev)
662 662
663 663 def data(self): return self._repo.wread(self._path)
664 664 def renamed(self):
665 665 rp = self._repopath
666 666 if rp == self._path:
667 667 return None
668 668 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
669 669
670 670 def parents(self):
671 671 '''return parent filectxs, following copies if necessary'''
672 672 p = self._path
673 673 rp = self._repopath
674 674 pcl = self._changectx._parents
675 675 fl = self._filelog
676 676 pl = [(rp, pcl[0]._manifest.get(rp, nullid), fl)]
677 677 if len(pcl) > 1:
678 678 if rp != p:
679 679 fl = None
680 680 pl.append((p, pcl[1]._manifest.get(p, nullid), fl))
681 681
682 682 return [filectx(self._repo, p, fileid=n, filelog=l)
683 683 for p,n,l in pl if n != nullid]
684 684
685 685 def children(self):
686 686 return []
687 687
688 688 def size(self): return os.stat(self._repo.wjoin(self._path)).st_size
689 689 def date(self):
690 690 t, tz = self._changectx.date()
691 691 try:
692 692 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
693 693 except OSError, err:
694 694 if err.errno != errno.ENOENT: raise
695 695 return (t, tz)
696 696
697 697 def cmp(self, text): return self._repo.wread(self._path) == text
698 698
699 699 class memctx(object):
700 700 """Use memctx to perform in-memory commits via localrepo.commitctx().
701 701
702 702 Revision information is supplied at initialization time while
703 703 related files data and is made available through a callback
704 704 mechanism. 'repo' is the current localrepo, 'parents' is a
705 705 sequence of two parent revisions identifiers (pass None for every
706 706 missing parent), 'text' is the commit message and 'files' lists
707 707 names of files touched by the revision (normalized and relative to
708 708 repository root).
709 709
710 710 filectxfn(repo, memctx, path) is a callable receiving the
711 711 repository, the current memctx object and the normalized path of
712 712 requested file, relative to repository root. It is fired by the
713 713 commit function for every file in 'files', but calls order is
714 714 undefined. If the file is available in the revision being
715 715 committed (updated or added), filectxfn returns a memfilectx
716 716 object. If the file was removed, filectxfn raises an
717 717 IOError. Moved files are represented by marking the source file
718 718 removed and the new file added with copy information (see
719 719 memfilectx).
720 720
721 721 user receives the committer name and defaults to current
722 722 repository username, date is the commit date in any format
723 723 supported by util.parsedate() and defaults to current date, extra
724 724 is a dictionary of metadata or is left empty.
725 725 """
726 726 def __init__(self, repo, parents, text, files, filectxfn, user=None,
727 727 date=None, extra=None):
728 728 self._repo = repo
729 729 self._rev = None
730 730 self._node = None
731 731 self._text = text
732 732 self._date = date and util.parsedate(date) or util.makedate()
733 733 self._user = user
734 734 parents = [(p or nullid) for p in parents]
735 735 p1, p2 = parents
736 736 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
737 files = util.sort(util.unique(files))
737 files = util.sort(set(files))
738 738 self._status = [files, [], [], [], []]
739 739 self._filectxfn = filectxfn
740 740
741 741 self._extra = extra and extra.copy() or {}
742 742 if 'branch' not in self._extra:
743 743 self._extra['branch'] = 'default'
744 744 elif self._extra.get('branch') == '':
745 745 self._extra['branch'] = 'default'
746 746
747 747 def __str__(self):
748 748 return str(self._parents[0]) + "+"
749 749
750 750 def __int__(self):
751 751 return self._rev
752 752
753 753 def __nonzero__(self):
754 754 return True
755 755
756 756 def user(self): return self._user or self._repo.ui.username()
757 757 def date(self): return self._date
758 758 def description(self): return self._text
759 759 def files(self): return self.modified()
760 760 def modified(self): return self._status[0]
761 761 def added(self): return self._status[1]
762 762 def removed(self): return self._status[2]
763 763 def deleted(self): return self._status[3]
764 764 def unknown(self): return self._status[4]
765 765 def clean(self): return self._status[5]
766 766 def branch(self): return self._extra['branch']
767 767 def extra(self): return self._extra
768 768 def flags(self, f): return self[f].flags()
769 769
770 770 def parents(self):
771 771 """return contexts for each parent changeset"""
772 772 return self._parents
773 773
774 774 def filectx(self, path, filelog=None):
775 775 """get a file context from the working directory"""
776 776 return self._filectxfn(self._repo, self, path)
777 777
778 778 class memfilectx(object):
779 779 """memfilectx represents an in-memory file to commit.
780 780
781 781 See memctx for more details.
782 782 """
783 783 def __init__(self, path, data, islink, isexec, copied):
784 784 """
785 785 path is the normalized file path relative to repository root.
786 786 data is the file content as a string.
787 787 islink is True if the file is a symbolic link.
788 788 isexec is True if the file is executable.
789 789 copied is the source file path if current file was copied in the
790 790 revision being committed, or None."""
791 791 self._path = path
792 792 self._data = data
793 793 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
794 794 self._copied = None
795 795 if copied:
796 796 self._copied = (copied, nullid)
797 797
798 798 def __nonzero__(self): return True
799 799 def __str__(self): return "%s@%s" % (self.path(), self._changectx)
800 800 def path(self): return self._path
801 801 def data(self): return self._data
802 802 def flags(self): return self._flags
803 803 def isexec(self): return 'x' in self._flags
804 804 def islink(self): return 'l' in self._flags
805 805 def renamed(self): return self._copied
806 806
@@ -1,585 +1,585 b''
1 1 """
2 2 dirstate.py - working directory tracking for mercurial
3 3
4 4 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5
6 6 This software may be used and distributed according to the terms
7 7 of the GNU General Public License, incorporated herein by reference.
8 8 """
9 9
10 10 from node import nullid
11 11 from i18n import _
12 12 import struct, os, stat, util, errno, ignore
13 13 import cStringIO, osutil, sys, parsers
14 14
15 15 _unknown = ('?', 0, 0, 0)
16 16 _format = ">cllll"
17 17
18 18 def _finddirs(path):
19 19 pos = path.rfind('/')
20 20 while pos != -1:
21 21 yield path[:pos]
22 22 pos = path.rfind('/', 0, pos)
23 23
24 24 def _incdirs(dirs, path):
25 25 for base in _finddirs(path):
26 26 if base in dirs:
27 27 dirs[base] += 1
28 28 return
29 29 dirs[base] = 1
30 30
31 31 def _decdirs(dirs, path):
32 32 for base in _finddirs(path):
33 33 if dirs[base] > 1:
34 34 dirs[base] -= 1
35 35 return
36 36 del dirs[base]
37 37
38 38 class dirstate(object):
39 39
40 40 def __init__(self, opener, ui, root):
41 41 self._opener = opener
42 42 self._root = root
43 43 self._rootdir = os.path.join(root, '')
44 44 self._dirty = False
45 45 self._dirtypl = False
46 46 self._ui = ui
47 47
48 48 def __getattr__(self, name):
49 49 if name == '_map':
50 50 self._read()
51 51 return self._map
52 52 elif name == '_copymap':
53 53 self._read()
54 54 return self._copymap
55 55 elif name == '_foldmap':
56 56 _foldmap = {}
57 57 for name in self._map:
58 58 norm = os.path.normcase(name)
59 59 _foldmap[norm] = name
60 60 self._foldmap = _foldmap
61 61 return self._foldmap
62 62 elif name == '_branch':
63 63 try:
64 64 self._branch = (self._opener("branch").read().strip()
65 65 or "default")
66 66 except IOError:
67 67 self._branch = "default"
68 68 return self._branch
69 69 elif name == '_pl':
70 70 self._pl = [nullid, nullid]
71 71 try:
72 72 st = self._opener("dirstate").read(40)
73 73 if len(st) == 40:
74 74 self._pl = st[:20], st[20:40]
75 75 except IOError, err:
76 76 if err.errno != errno.ENOENT: raise
77 77 return self._pl
78 78 elif name == '_dirs':
79 79 dirs = {}
80 80 for f,s in self._map.iteritems():
81 81 if s[0] != 'r':
82 82 _incdirs(dirs, f)
83 83 self._dirs = dirs
84 84 return self._dirs
85 85 elif name == '_ignore':
86 86 files = [self._join('.hgignore')]
87 87 for name, path in self._ui.configitems("ui"):
88 88 if name == 'ignore' or name.startswith('ignore.'):
89 89 files.append(os.path.expanduser(path))
90 90 self._ignore = ignore.ignore(self._root, files, self._ui.warn)
91 91 return self._ignore
92 92 elif name == '_slash':
93 93 self._slash = self._ui.configbool('ui', 'slash') and os.sep != '/'
94 94 return self._slash
95 95 elif name == '_checklink':
96 96 self._checklink = util.checklink(self._root)
97 97 return self._checklink
98 98 elif name == '_checkexec':
99 99 self._checkexec = util.checkexec(self._root)
100 100 return self._checkexec
101 101 elif name == '_checkcase':
102 102 self._checkcase = not util.checkcase(self._join('.hg'))
103 103 return self._checkcase
104 104 elif name == 'normalize':
105 105 if self._checkcase:
106 106 self.normalize = self._normalize
107 107 else:
108 108 self.normalize = lambda x, y=False: x
109 109 return self.normalize
110 110 else:
111 111 raise AttributeError(name)
112 112
113 113 def _join(self, f):
114 114 # much faster than os.path.join()
115 115 # it's safe because f is always a relative path
116 116 return self._rootdir + f
117 117
118 118 def flagfunc(self, fallback):
119 119 if self._checklink:
120 120 if self._checkexec:
121 121 def f(x):
122 122 p = self._join(x)
123 123 if os.path.islink(p):
124 124 return 'l'
125 125 if util.is_exec(p):
126 126 return 'x'
127 127 return ''
128 128 return f
129 129 def f(x):
130 130 if os.path.islink(self._join(x)):
131 131 return 'l'
132 132 if 'x' in fallback(x):
133 133 return 'x'
134 134 return ''
135 135 return f
136 136 if self._checkexec:
137 137 def f(x):
138 138 if 'l' in fallback(x):
139 139 return 'l'
140 140 if util.is_exec(self._join(x)):
141 141 return 'x'
142 142 return ''
143 143 return f
144 144 return fallback
145 145
146 146 def getcwd(self):
147 147 cwd = os.getcwd()
148 148 if cwd == self._root: return ''
149 149 # self._root ends with a path separator if self._root is '/' or 'C:\'
150 150 rootsep = self._root
151 151 if not util.endswithsep(rootsep):
152 152 rootsep += os.sep
153 153 if cwd.startswith(rootsep):
154 154 return cwd[len(rootsep):]
155 155 else:
156 156 # we're outside the repo. return an absolute path.
157 157 return cwd
158 158
159 159 def pathto(self, f, cwd=None):
160 160 if cwd is None:
161 161 cwd = self.getcwd()
162 162 path = util.pathto(self._root, cwd, f)
163 163 if self._slash:
164 164 return util.normpath(path)
165 165 return path
166 166
167 167 def __getitem__(self, key):
168 168 ''' current states:
169 169 n normal
170 170 m needs merging
171 171 r marked for removal
172 172 a marked for addition
173 173 ? not tracked'''
174 174 return self._map.get(key, ("?",))[0]
175 175
176 176 def __contains__(self, key):
177 177 return key in self._map
178 178
179 179 def __iter__(self):
180 180 for x in util.sort(self._map):
181 181 yield x
182 182
183 183 def parents(self):
184 184 return self._pl
185 185
186 186 def branch(self):
187 187 return self._branch
188 188
189 189 def setparents(self, p1, p2=nullid):
190 190 self._dirty = self._dirtypl = True
191 191 self._pl = p1, p2
192 192
193 193 def setbranch(self, branch):
194 194 self._branch = branch
195 195 self._opener("branch", "w").write(branch + '\n')
196 196
197 197 def _read(self):
198 198 self._map = {}
199 199 self._copymap = {}
200 200 try:
201 201 st = self._opener("dirstate").read()
202 202 except IOError, err:
203 203 if err.errno != errno.ENOENT: raise
204 204 return
205 205 if not st:
206 206 return
207 207
208 208 p = parsers.parse_dirstate(self._map, self._copymap, st)
209 209 if not self._dirtypl:
210 210 self._pl = p
211 211
212 212 def invalidate(self):
213 213 for a in "_map _copymap _foldmap _branch _pl _dirs _ignore".split():
214 214 if a in self.__dict__:
215 215 delattr(self, a)
216 216 self._dirty = False
217 217
218 218 def copy(self, source, dest):
219 219 """Mark dest as a copy of source. Unmark dest if source is None.
220 220 """
221 221 if source == dest:
222 222 return
223 223 self._dirty = True
224 224 if source is not None:
225 225 self._copymap[dest] = source
226 226 elif dest in self._copymap:
227 227 del self._copymap[dest]
228 228
229 229 def copied(self, file):
230 230 return self._copymap.get(file, None)
231 231
232 232 def copies(self):
233 233 return self._copymap
234 234
235 235 def _droppath(self, f):
236 236 if self[f] not in "?r" and "_dirs" in self.__dict__:
237 237 _decdirs(self._dirs, f)
238 238
239 239 def _addpath(self, f, check=False):
240 240 oldstate = self[f]
241 241 if check or oldstate == "r":
242 242 if '\r' in f or '\n' in f:
243 243 raise util.Abort(
244 244 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
245 245 if f in self._dirs:
246 246 raise util.Abort(_('directory %r already in dirstate') % f)
247 247 # shadows
248 248 for d in _finddirs(f):
249 249 if d in self._dirs:
250 250 break
251 251 if d in self._map and self[d] != 'r':
252 252 raise util.Abort(
253 253 _('file %r in dirstate clashes with %r') % (d, f))
254 254 if oldstate in "?r" and "_dirs" in self.__dict__:
255 255 _incdirs(self._dirs, f)
256 256
257 257 def normal(self, f):
258 258 'mark a file normal and clean'
259 259 self._dirty = True
260 260 self._addpath(f)
261 261 s = os.lstat(self._join(f))
262 262 self._map[f] = ('n', s.st_mode, s.st_size, int(s.st_mtime))
263 263 if f in self._copymap:
264 264 del self._copymap[f]
265 265
266 266 def normallookup(self, f):
267 267 'mark a file normal, but possibly dirty'
268 268 if self._pl[1] != nullid and f in self._map:
269 269 # if there is a merge going on and the file was either
270 270 # in state 'm' or dirty before being removed, restore that state.
271 271 entry = self._map[f]
272 272 if entry[0] == 'r' and entry[2] in (-1, -2):
273 273 source = self._copymap.get(f)
274 274 if entry[2] == -1:
275 275 self.merge(f)
276 276 elif entry[2] == -2:
277 277 self.normaldirty(f)
278 278 if source:
279 279 self.copy(source, f)
280 280 return
281 281 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
282 282 return
283 283 self._dirty = True
284 284 self._addpath(f)
285 285 self._map[f] = ('n', 0, -1, -1)
286 286 if f in self._copymap:
287 287 del self._copymap[f]
288 288
289 289 def normaldirty(self, f):
290 290 'mark a file normal, but dirty'
291 291 self._dirty = True
292 292 self._addpath(f)
293 293 self._map[f] = ('n', 0, -2, -1)
294 294 if f in self._copymap:
295 295 del self._copymap[f]
296 296
297 297 def add(self, f):
298 298 'mark a file added'
299 299 self._dirty = True
300 300 self._addpath(f, True)
301 301 self._map[f] = ('a', 0, -1, -1)
302 302 if f in self._copymap:
303 303 del self._copymap[f]
304 304
305 305 def remove(self, f):
306 306 'mark a file removed'
307 307 self._dirty = True
308 308 self._droppath(f)
309 309 size = 0
310 310 if self._pl[1] != nullid and f in self._map:
311 311 entry = self._map[f]
312 312 if entry[0] == 'm':
313 313 size = -1
314 314 elif entry[0] == 'n' and entry[2] == -2:
315 315 size = -2
316 316 self._map[f] = ('r', 0, size, 0)
317 317 if size == 0 and f in self._copymap:
318 318 del self._copymap[f]
319 319
320 320 def merge(self, f):
321 321 'mark a file merged'
322 322 self._dirty = True
323 323 s = os.lstat(self._join(f))
324 324 self._addpath(f)
325 325 self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
326 326 if f in self._copymap:
327 327 del self._copymap[f]
328 328
329 329 def forget(self, f):
330 330 'forget a file'
331 331 self._dirty = True
332 332 try:
333 333 self._droppath(f)
334 334 del self._map[f]
335 335 except KeyError:
336 336 self._ui.warn(_("not in dirstate: %s\n") % f)
337 337
338 338 def _normalize(self, path, knownpath=False):
339 339 norm_path = os.path.normcase(path)
340 340 fold_path = self._foldmap.get(norm_path, None)
341 341 if fold_path is None:
342 342 if knownpath or not os.path.exists(os.path.join(self._root, path)):
343 343 fold_path = path
344 344 else:
345 345 fold_path = self._foldmap.setdefault(norm_path,
346 346 util.fspath(path, self._root))
347 347 return fold_path
348 348
349 349 def clear(self):
350 350 self._map = {}
351 351 if "_dirs" in self.__dict__:
352 352 delattr(self, "_dirs");
353 353 self._copymap = {}
354 354 self._pl = [nullid, nullid]
355 355 self._dirty = True
356 356
357 357 def rebuild(self, parent, files):
358 358 self.clear()
359 359 for f in files:
360 360 if 'x' in files.flags(f):
361 361 self._map[f] = ('n', 0777, -1, 0)
362 362 else:
363 363 self._map[f] = ('n', 0666, -1, 0)
364 364 self._pl = (parent, nullid)
365 365 self._dirty = True
366 366
367 367 def write(self):
368 368 if not self._dirty:
369 369 return
370 370 st = self._opener("dirstate", "w", atomictemp=True)
371 371
372 372 try:
373 373 gran = int(self._ui.config('dirstate', 'granularity', 1))
374 374 except ValueError:
375 375 gran = 1
376 376 limit = sys.maxint
377 377 if gran > 0:
378 378 limit = util.fstat(st).st_mtime - gran
379 379
380 380 cs = cStringIO.StringIO()
381 381 copymap = self._copymap
382 382 pack = struct.pack
383 383 write = cs.write
384 384 write("".join(self._pl))
385 385 for f, e in self._map.iteritems():
386 386 if f in copymap:
387 387 f = "%s\0%s" % (f, copymap[f])
388 388 if e[3] > limit and e[0] == 'n':
389 389 e = (e[0], 0, -1, -1)
390 390 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
391 391 write(e)
392 392 write(f)
393 393 st.write(cs.getvalue())
394 394 st.rename()
395 395 self._dirty = self._dirtypl = False
396 396
397 397 def _dirignore(self, f):
398 398 if f == '.':
399 399 return False
400 400 if self._ignore(f):
401 401 return True
402 402 for p in _finddirs(f):
403 403 if self._ignore(p):
404 404 return True
405 405 return False
406 406
407 407 def walk(self, match, unknown, ignored):
408 408 '''
409 409 walk recursively through the directory tree, finding all files
410 410 matched by the match function
411 411
412 412 results are yielded in a tuple (filename, stat), where stat
413 413 and st is the stat result if the file was found in the directory.
414 414 '''
415 415
416 416 def fwarn(f, msg):
417 417 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
418 418 return False
419 419 badfn = fwarn
420 420 if hasattr(match, 'bad'):
421 421 badfn = match.bad
422 422
423 423 def badtype(f, mode):
424 424 kind = 'unknown'
425 425 if stat.S_ISCHR(mode): kind = _('character device')
426 426 elif stat.S_ISBLK(mode): kind = _('block device')
427 427 elif stat.S_ISFIFO(mode): kind = _('fifo')
428 428 elif stat.S_ISSOCK(mode): kind = _('socket')
429 429 elif stat.S_ISDIR(mode): kind = _('directory')
430 430 self._ui.warn(_('%s: unsupported file type (type is %s)\n')
431 431 % (self.pathto(f), kind))
432 432
433 433 ignore = self._ignore
434 434 dirignore = self._dirignore
435 435 if ignored:
436 436 ignore = util.never
437 437 dirignore = util.never
438 438 elif not unknown:
439 439 # if unknown and ignored are False, skip step 2
440 440 ignore = util.always
441 441 dirignore = util.always
442 442
443 443 matchfn = match.matchfn
444 444 dmap = self._map
445 445 normpath = util.normpath
446 446 normalize = self.normalize
447 447 listdir = osutil.listdir
448 448 lstat = os.lstat
449 449 getkind = stat.S_IFMT
450 450 dirkind = stat.S_IFDIR
451 451 regkind = stat.S_IFREG
452 452 lnkkind = stat.S_IFLNK
453 453 join = self._join
454 454 work = []
455 455 wadd = work.append
456 456
457 files = util.unique(match.files())
457 files = set(match.files())
458 458 if not files or '.' in files:
459 459 files = ['']
460 460 results = {'.hg': None}
461 461
462 462 # step 1: find all explicit files
463 463 for ff in util.sort(files):
464 464 nf = normalize(normpath(ff))
465 465 if nf in results:
466 466 continue
467 467
468 468 try:
469 469 st = lstat(join(nf))
470 470 kind = getkind(st.st_mode)
471 471 if kind == dirkind:
472 472 if not dirignore(nf):
473 473 wadd(nf)
474 474 elif kind == regkind or kind == lnkkind:
475 475 results[nf] = st
476 476 else:
477 477 badtype(ff, kind)
478 478 if nf in dmap:
479 479 results[nf] = None
480 480 except OSError, inst:
481 481 keep = False
482 482 prefix = nf + "/"
483 483 for fn in dmap:
484 484 if nf == fn or fn.startswith(prefix):
485 485 keep = True
486 486 break
487 487 if not keep:
488 488 if inst.errno != errno.ENOENT:
489 489 fwarn(ff, inst.strerror)
490 490 elif badfn(ff, inst.strerror):
491 491 if (nf in dmap or not ignore(nf)) and matchfn(nf):
492 492 results[nf] = None
493 493
494 494 # step 2: visit subdirectories
495 495 while work:
496 496 nd = work.pop()
497 497 if hasattr(match, 'dir'):
498 498 match.dir(nd)
499 499 skip = None
500 500 if nd == '.':
501 501 nd = ''
502 502 else:
503 503 skip = '.hg'
504 504 try:
505 505 entries = listdir(join(nd), stat=True, skip=skip)
506 506 except OSError, inst:
507 507 if inst.errno == errno.EACCES:
508 508 fwarn(nd, inst.strerror)
509 509 continue
510 510 raise
511 511 for f, kind, st in entries:
512 512 nf = normalize(nd and (nd + "/" + f) or f, True)
513 513 if nf not in results:
514 514 if kind == dirkind:
515 515 if not ignore(nf):
516 516 wadd(nf)
517 517 if nf in dmap and matchfn(nf):
518 518 results[nf] = None
519 519 elif kind == regkind or kind == lnkkind:
520 520 if nf in dmap:
521 521 if matchfn(nf):
522 522 results[nf] = st
523 523 elif matchfn(nf) and not ignore(nf):
524 524 results[nf] = st
525 525 elif nf in dmap and matchfn(nf):
526 526 results[nf] = None
527 527
528 528 # step 3: report unseen items in the dmap hash
529 529 visit = util.sort([f for f in dmap if f not in results and match(f)])
530 530 for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
531 531 if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
532 532 st = None
533 533 results[nf] = st
534 534
535 535 del results['.hg']
536 536 return results
537 537
538 538 def status(self, match, ignored, clean, unknown):
539 539 listignored, listclean, listunknown = ignored, clean, unknown
540 540 lookup, modified, added, unknown, ignored = [], [], [], [], []
541 541 removed, deleted, clean = [], [], []
542 542
543 543 dmap = self._map
544 544 ladd = lookup.append
545 545 madd = modified.append
546 546 aadd = added.append
547 547 uadd = unknown.append
548 548 iadd = ignored.append
549 549 radd = removed.append
550 550 dadd = deleted.append
551 551 cadd = clean.append
552 552
553 553 for fn, st in self.walk(match, listunknown, listignored).iteritems():
554 554 if fn not in dmap:
555 555 if (listignored or match.exact(fn)) and self._dirignore(fn):
556 556 if listignored:
557 557 iadd(fn)
558 558 elif listunknown:
559 559 uadd(fn)
560 560 continue
561 561
562 562 state, mode, size, time = dmap[fn]
563 563
564 564 if not st and state in "nma":
565 565 dadd(fn)
566 566 elif state == 'n':
567 567 if (size >= 0 and
568 568 (size != st.st_size
569 569 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
570 570 or size == -2
571 571 or fn in self._copymap):
572 572 madd(fn)
573 573 elif time != int(st.st_mtime):
574 574 ladd(fn)
575 575 elif listclean:
576 576 cadd(fn)
577 577 elif state == 'm':
578 578 madd(fn)
579 579 elif state == 'a':
580 580 aadd(fn)
581 581 elif state == 'r':
582 582 radd(fn)
583 583
584 584 return (lookup, modified, added, removed, deleted, unknown, ignored,
585 585 clean)
@@ -1,2174 +1,2174 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import lock, transaction, stat, errno, ui, store, encoding
13 13 import os, time, util, extensions, hook, inspect, error
14 14 import match as match_
15 15 import merge as merge_
16 16
17 17 from lock import release
18 18
19 19 class localrepository(repo.repository):
20 20 capabilities = set(('lookup', 'changegroupsubset'))
21 21 supported = ('revlogv1', 'store', 'fncache')
22 22
23 23 def __init__(self, parentui, path=None, create=0):
24 24 repo.repository.__init__(self)
25 25 self.root = os.path.realpath(path)
26 26 self.path = os.path.join(self.root, ".hg")
27 27 self.origroot = path
28 28 self.opener = util.opener(self.path)
29 29 self.wopener = util.opener(self.root)
30 30
31 31 if not os.path.isdir(self.path):
32 32 if create:
33 33 if not os.path.exists(path):
34 34 os.mkdir(path)
35 35 os.mkdir(self.path)
36 36 requirements = ["revlogv1"]
37 37 if parentui.configbool('format', 'usestore', True):
38 38 os.mkdir(os.path.join(self.path, "store"))
39 39 requirements.append("store")
40 40 if parentui.configbool('format', 'usefncache', True):
41 41 requirements.append("fncache")
42 42 # create an invalid changelog
43 43 self.opener("00changelog.i", "a").write(
44 44 '\0\0\0\2' # represents revlogv2
45 45 ' dummy changelog to prevent using the old repo layout'
46 46 )
47 47 reqfile = self.opener("requires", "w")
48 48 for r in requirements:
49 49 reqfile.write("%s\n" % r)
50 50 reqfile.close()
51 51 else:
52 52 raise error.RepoError(_("repository %s not found") % path)
53 53 elif create:
54 54 raise error.RepoError(_("repository %s already exists") % path)
55 55 else:
56 56 # find requirements
57 57 requirements = []
58 58 try:
59 59 requirements = self.opener("requires").read().splitlines()
60 60 for r in requirements:
61 61 if r not in self.supported:
62 62 raise error.RepoError(_("requirement '%s' not supported") % r)
63 63 except IOError, inst:
64 64 if inst.errno != errno.ENOENT:
65 65 raise
66 66
67 67 self.store = store.store(requirements, self.path, util.opener)
68 68 self.spath = self.store.path
69 69 self.sopener = self.store.opener
70 70 self.sjoin = self.store.join
71 71 self.opener.createmode = self.store.createmode
72 72
73 73 self.ui = ui.ui(parentui=parentui)
74 74 try:
75 75 self.ui.readconfig(self.join("hgrc"), self.root)
76 76 extensions.loadall(self.ui)
77 77 except IOError:
78 78 pass
79 79
80 80 self.tagscache = None
81 81 self._tagstypecache = None
82 82 self.branchcache = None
83 83 self._ubranchcache = None # UTF-8 version of branchcache
84 84 self._branchcachetip = None
85 85 self.nodetagscache = None
86 86 self.filterpats = {}
87 87 self._datafilters = {}
88 88 self._transref = self._lockref = self._wlockref = None
89 89
90 90 def __getattr__(self, name):
91 91 if name == 'changelog':
92 92 self.changelog = changelog.changelog(self.sopener)
93 93 if 'HG_PENDING' in os.environ:
94 94 p = os.environ['HG_PENDING']
95 95 if p.startswith(self.root):
96 96 self.changelog.readpending('00changelog.i.a')
97 97 self.sopener.defversion = self.changelog.version
98 98 return self.changelog
99 99 if name == 'manifest':
100 100 self.changelog
101 101 self.manifest = manifest.manifest(self.sopener)
102 102 return self.manifest
103 103 if name == 'dirstate':
104 104 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
105 105 return self.dirstate
106 106 else:
107 107 raise AttributeError(name)
108 108
109 109 def __getitem__(self, changeid):
110 110 if changeid == None:
111 111 return context.workingctx(self)
112 112 return context.changectx(self, changeid)
113 113
114 114 def __nonzero__(self):
115 115 return True
116 116
117 117 def __len__(self):
118 118 return len(self.changelog)
119 119
120 120 def __iter__(self):
121 121 for i in xrange(len(self)):
122 122 yield i
123 123
124 124 def url(self):
125 125 return 'file:' + self.root
126 126
127 127 def hook(self, name, throw=False, **args):
128 128 return hook.hook(self.ui, self, name, throw, **args)
129 129
130 130 tag_disallowed = ':\r\n'
131 131
132 132 def _tag(self, names, node, message, local, user, date, parent=None,
133 133 extra={}):
134 134 use_dirstate = parent is None
135 135
136 136 if isinstance(names, str):
137 137 allchars = names
138 138 names = (names,)
139 139 else:
140 140 allchars = ''.join(names)
141 141 for c in self.tag_disallowed:
142 142 if c in allchars:
143 143 raise util.Abort(_('%r cannot be used in a tag name') % c)
144 144
145 145 for name in names:
146 146 self.hook('pretag', throw=True, node=hex(node), tag=name,
147 147 local=local)
148 148
149 149 def writetags(fp, names, munge, prevtags):
150 150 fp.seek(0, 2)
151 151 if prevtags and prevtags[-1] != '\n':
152 152 fp.write('\n')
153 153 for name in names:
154 154 m = munge and munge(name) or name
155 155 if self._tagstypecache and name in self._tagstypecache:
156 156 old = self.tagscache.get(name, nullid)
157 157 fp.write('%s %s\n' % (hex(old), m))
158 158 fp.write('%s %s\n' % (hex(node), m))
159 159 fp.close()
160 160
161 161 prevtags = ''
162 162 if local:
163 163 try:
164 164 fp = self.opener('localtags', 'r+')
165 165 except IOError:
166 166 fp = self.opener('localtags', 'a')
167 167 else:
168 168 prevtags = fp.read()
169 169
170 170 # local tags are stored in the current charset
171 171 writetags(fp, names, None, prevtags)
172 172 for name in names:
173 173 self.hook('tag', node=hex(node), tag=name, local=local)
174 174 return
175 175
176 176 if use_dirstate:
177 177 try:
178 178 fp = self.wfile('.hgtags', 'rb+')
179 179 except IOError:
180 180 fp = self.wfile('.hgtags', 'ab')
181 181 else:
182 182 prevtags = fp.read()
183 183 else:
184 184 try:
185 185 prevtags = self.filectx('.hgtags', parent).data()
186 186 except error.LookupError:
187 187 pass
188 188 fp = self.wfile('.hgtags', 'wb')
189 189 if prevtags:
190 190 fp.write(prevtags)
191 191
192 192 # committed tags are stored in UTF-8
193 193 writetags(fp, names, encoding.fromlocal, prevtags)
194 194
195 195 if use_dirstate and '.hgtags' not in self.dirstate:
196 196 self.add(['.hgtags'])
197 197
198 198 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
199 199 extra=extra)
200 200
201 201 for name in names:
202 202 self.hook('tag', node=hex(node), tag=name, local=local)
203 203
204 204 return tagnode
205 205
206 206 def tag(self, names, node, message, local, user, date):
207 207 '''tag a revision with one or more symbolic names.
208 208
209 209 names is a list of strings or, when adding a single tag, names may be a
210 210 string.
211 211
212 212 if local is True, the tags are stored in a per-repository file.
213 213 otherwise, they are stored in the .hgtags file, and a new
214 214 changeset is committed with the change.
215 215
216 216 keyword arguments:
217 217
218 218 local: whether to store tags in non-version-controlled file
219 219 (default False)
220 220
221 221 message: commit message to use if committing
222 222
223 223 user: name of user to use if committing
224 224
225 225 date: date tuple to use if committing'''
226 226
227 227 for x in self.status()[:5]:
228 228 if '.hgtags' in x:
229 229 raise util.Abort(_('working copy of .hgtags is changed '
230 230 '(please commit .hgtags manually)'))
231 231
232 232 self.tags() # instantiate the cache
233 233 self._tag(names, node, message, local, user, date)
234 234
235 235 def tags(self):
236 236 '''return a mapping of tag to node'''
237 237 if self.tagscache:
238 238 return self.tagscache
239 239
240 240 globaltags = {}
241 241 tagtypes = {}
242 242
243 243 def readtags(lines, fn, tagtype):
244 244 filetags = {}
245 245 count = 0
246 246
247 247 def warn(msg):
248 248 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
249 249
250 250 for l in lines:
251 251 count += 1
252 252 if not l:
253 253 continue
254 254 s = l.split(" ", 1)
255 255 if len(s) != 2:
256 256 warn(_("cannot parse entry"))
257 257 continue
258 258 node, key = s
259 259 key = encoding.tolocal(key.strip()) # stored in UTF-8
260 260 try:
261 261 bin_n = bin(node)
262 262 except TypeError:
263 263 warn(_("node '%s' is not well formed") % node)
264 264 continue
265 265 if bin_n not in self.changelog.nodemap:
266 266 warn(_("tag '%s' refers to unknown node") % key)
267 267 continue
268 268
269 269 h = []
270 270 if key in filetags:
271 271 n, h = filetags[key]
272 272 h.append(n)
273 273 filetags[key] = (bin_n, h)
274 274
275 275 for k, nh in filetags.iteritems():
276 276 if k not in globaltags:
277 277 globaltags[k] = nh
278 278 tagtypes[k] = tagtype
279 279 continue
280 280
281 281 # we prefer the global tag if:
282 282 # it supercedes us OR
283 283 # mutual supercedes and it has a higher rank
284 284 # otherwise we win because we're tip-most
285 285 an, ah = nh
286 286 bn, bh = globaltags[k]
287 287 if (bn != an and an in bh and
288 288 (bn not in ah or len(bh) > len(ah))):
289 289 an = bn
290 290 ah.extend([n for n in bh if n not in ah])
291 291 globaltags[k] = an, ah
292 292 tagtypes[k] = tagtype
293 293
294 294 # read the tags file from each head, ending with the tip
295 295 f = None
296 296 for rev, node, fnode in self._hgtagsnodes():
297 297 f = (f and f.filectx(fnode) or
298 298 self.filectx('.hgtags', fileid=fnode))
299 299 readtags(f.data().splitlines(), f, "global")
300 300
301 301 try:
302 302 data = encoding.fromlocal(self.opener("localtags").read())
303 303 # localtags are stored in the local character set
304 304 # while the internal tag table is stored in UTF-8
305 305 readtags(data.splitlines(), "localtags", "local")
306 306 except IOError:
307 307 pass
308 308
309 309 self.tagscache = {}
310 310 self._tagstypecache = {}
311 311 for k, nh in globaltags.iteritems():
312 312 n = nh[0]
313 313 if n != nullid:
314 314 self.tagscache[k] = n
315 315 self._tagstypecache[k] = tagtypes[k]
316 316 self.tagscache['tip'] = self.changelog.tip()
317 317 return self.tagscache
318 318
319 319 def tagtype(self, tagname):
320 320 '''
321 321 return the type of the given tag. result can be:
322 322
323 323 'local' : a local tag
324 324 'global' : a global tag
325 325 None : tag does not exist
326 326 '''
327 327
328 328 self.tags()
329 329
330 330 return self._tagstypecache.get(tagname)
331 331
332 332 def _hgtagsnodes(self):
333 333 heads = self.heads()
334 334 heads.reverse()
335 335 last = {}
336 336 ret = []
337 337 for node in heads:
338 338 c = self[node]
339 339 rev = c.rev()
340 340 try:
341 341 fnode = c.filenode('.hgtags')
342 342 except error.LookupError:
343 343 continue
344 344 ret.append((rev, node, fnode))
345 345 if fnode in last:
346 346 ret[last[fnode]] = None
347 347 last[fnode] = len(ret) - 1
348 348 return [item for item in ret if item]
349 349
350 350 def tagslist(self):
351 351 '''return a list of tags ordered by revision'''
352 352 l = []
353 353 for t, n in self.tags().iteritems():
354 354 try:
355 355 r = self.changelog.rev(n)
356 356 except:
357 357 r = -2 # sort to the beginning of the list if unknown
358 358 l.append((r, t, n))
359 359 return [(t, n) for r, t, n in util.sort(l)]
360 360
361 361 def nodetags(self, node):
362 362 '''return the tags associated with a node'''
363 363 if not self.nodetagscache:
364 364 self.nodetagscache = {}
365 365 for t, n in self.tags().iteritems():
366 366 self.nodetagscache.setdefault(n, []).append(t)
367 367 return self.nodetagscache.get(node, [])
368 368
369 369 def _branchtags(self, partial, lrev):
370 370 # TODO: rename this function?
371 371 tiprev = len(self) - 1
372 372 if lrev != tiprev:
373 373 self._updatebranchcache(partial, lrev+1, tiprev+1)
374 374 self._writebranchcache(partial, self.changelog.tip(), tiprev)
375 375
376 376 return partial
377 377
378 378 def _branchheads(self):
379 379 tip = self.changelog.tip()
380 380 if self.branchcache is not None and self._branchcachetip == tip:
381 381 return self.branchcache
382 382
383 383 oldtip = self._branchcachetip
384 384 self._branchcachetip = tip
385 385 if self.branchcache is None:
386 386 self.branchcache = {} # avoid recursion in changectx
387 387 else:
388 388 self.branchcache.clear() # keep using the same dict
389 389 if oldtip is None or oldtip not in self.changelog.nodemap:
390 390 partial, last, lrev = self._readbranchcache()
391 391 else:
392 392 lrev = self.changelog.rev(oldtip)
393 393 partial = self._ubranchcache
394 394
395 395 self._branchtags(partial, lrev)
396 396 # this private cache holds all heads (not just tips)
397 397 self._ubranchcache = partial
398 398
399 399 # the branch cache is stored on disk as UTF-8, but in the local
400 400 # charset internally
401 401 for k, v in partial.iteritems():
402 402 self.branchcache[encoding.tolocal(k)] = v
403 403 return self.branchcache
404 404
405 405
406 406 def branchtags(self):
407 407 '''return a dict where branch names map to the tipmost head of
408 408 the branch, open heads come before closed'''
409 409 bt = {}
410 410 for bn, heads in self._branchheads().iteritems():
411 411 head = None
412 412 for i in range(len(heads)-1, -1, -1):
413 413 h = heads[i]
414 414 if 'close' not in self.changelog.read(h)[5]:
415 415 head = h
416 416 break
417 417 # no open heads were found
418 418 if head is None:
419 419 head = heads[-1]
420 420 bt[bn] = head
421 421 return bt
422 422
423 423
424 424 def _readbranchcache(self):
425 425 partial = {}
426 426 try:
427 427 f = self.opener("branchheads.cache")
428 428 lines = f.read().split('\n')
429 429 f.close()
430 430 except (IOError, OSError):
431 431 return {}, nullid, nullrev
432 432
433 433 try:
434 434 last, lrev = lines.pop(0).split(" ", 1)
435 435 last, lrev = bin(last), int(lrev)
436 436 if lrev >= len(self) or self[lrev].node() != last:
437 437 # invalidate the cache
438 438 raise ValueError('invalidating branch cache (tip differs)')
439 439 for l in lines:
440 440 if not l: continue
441 441 node, label = l.split(" ", 1)
442 442 partial.setdefault(label.strip(), []).append(bin(node))
443 443 except KeyboardInterrupt:
444 444 raise
445 445 except Exception, inst:
446 446 if self.ui.debugflag:
447 447 self.ui.warn(str(inst), '\n')
448 448 partial, last, lrev = {}, nullid, nullrev
449 449 return partial, last, lrev
450 450
451 451 def _writebranchcache(self, branches, tip, tiprev):
452 452 try:
453 453 f = self.opener("branchheads.cache", "w", atomictemp=True)
454 454 f.write("%s %s\n" % (hex(tip), tiprev))
455 455 for label, nodes in branches.iteritems():
456 456 for node in nodes:
457 457 f.write("%s %s\n" % (hex(node), label))
458 458 f.rename()
459 459 except (IOError, OSError):
460 460 pass
461 461
462 462 def _updatebranchcache(self, partial, start, end):
463 463 for r in xrange(start, end):
464 464 c = self[r]
465 465 b = c.branch()
466 466 bheads = partial.setdefault(b, [])
467 467 bheads.append(c.node())
468 468 for p in c.parents():
469 469 pn = p.node()
470 470 if pn in bheads:
471 471 bheads.remove(pn)
472 472
473 473 def lookup(self, key):
474 474 if isinstance(key, int):
475 475 return self.changelog.node(key)
476 476 elif key == '.':
477 477 return self.dirstate.parents()[0]
478 478 elif key == 'null':
479 479 return nullid
480 480 elif key == 'tip':
481 481 return self.changelog.tip()
482 482 n = self.changelog._match(key)
483 483 if n:
484 484 return n
485 485 if key in self.tags():
486 486 return self.tags()[key]
487 487 if key in self.branchtags():
488 488 return self.branchtags()[key]
489 489 n = self.changelog._partialmatch(key)
490 490 if n:
491 491 return n
492 492 try:
493 493 if len(key) == 20:
494 494 key = hex(key)
495 495 except:
496 496 pass
497 497 raise error.RepoError(_("unknown revision '%s'") % key)
498 498
499 499 def local(self):
500 500 return True
501 501
502 502 def join(self, f):
503 503 return os.path.join(self.path, f)
504 504
505 505 def wjoin(self, f):
506 506 return os.path.join(self.root, f)
507 507
508 508 def rjoin(self, f):
509 509 return os.path.join(self.root, util.pconvert(f))
510 510
511 511 def file(self, f):
512 512 if f[0] == '/':
513 513 f = f[1:]
514 514 return filelog.filelog(self.sopener, f)
515 515
516 516 def changectx(self, changeid):
517 517 return self[changeid]
518 518
519 519 def parents(self, changeid=None):
520 520 '''get list of changectxs for parents of changeid'''
521 521 return self[changeid].parents()
522 522
523 523 def filectx(self, path, changeid=None, fileid=None):
524 524 """changeid can be a changeset revision, node, or tag.
525 525 fileid can be a file revision or node."""
526 526 return context.filectx(self, path, changeid, fileid)
527 527
528 528 def getcwd(self):
529 529 return self.dirstate.getcwd()
530 530
531 531 def pathto(self, f, cwd=None):
532 532 return self.dirstate.pathto(f, cwd)
533 533
534 534 def wfile(self, f, mode='r'):
535 535 return self.wopener(f, mode)
536 536
537 537 def _link(self, f):
538 538 return os.path.islink(self.wjoin(f))
539 539
540 540 def _filter(self, filter, filename, data):
541 541 if filter not in self.filterpats:
542 542 l = []
543 543 for pat, cmd in self.ui.configitems(filter):
544 544 if cmd == '!':
545 545 continue
546 546 mf = util.matcher(self.root, "", [pat], [], [])[1]
547 547 fn = None
548 548 params = cmd
549 549 for name, filterfn in self._datafilters.iteritems():
550 550 if cmd.startswith(name):
551 551 fn = filterfn
552 552 params = cmd[len(name):].lstrip()
553 553 break
554 554 if not fn:
555 555 fn = lambda s, c, **kwargs: util.filter(s, c)
556 556 # Wrap old filters not supporting keyword arguments
557 557 if not inspect.getargspec(fn)[2]:
558 558 oldfn = fn
559 559 fn = lambda s, c, **kwargs: oldfn(s, c)
560 560 l.append((mf, fn, params))
561 561 self.filterpats[filter] = l
562 562
563 563 for mf, fn, cmd in self.filterpats[filter]:
564 564 if mf(filename):
565 565 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
566 566 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
567 567 break
568 568
569 569 return data
570 570
571 571 def adddatafilter(self, name, filter):
572 572 self._datafilters[name] = filter
573 573
574 574 def wread(self, filename):
575 575 if self._link(filename):
576 576 data = os.readlink(self.wjoin(filename))
577 577 else:
578 578 data = self.wopener(filename, 'r').read()
579 579 return self._filter("encode", filename, data)
580 580
581 581 def wwrite(self, filename, data, flags):
582 582 data = self._filter("decode", filename, data)
583 583 try:
584 584 os.unlink(self.wjoin(filename))
585 585 except OSError:
586 586 pass
587 587 if 'l' in flags:
588 588 self.wopener.symlink(data, filename)
589 589 else:
590 590 self.wopener(filename, 'w').write(data)
591 591 if 'x' in flags:
592 592 util.set_flags(self.wjoin(filename), False, True)
593 593
594 594 def wwritedata(self, filename, data):
595 595 return self._filter("decode", filename, data)
596 596
597 597 def transaction(self):
598 598 tr = self._transref and self._transref() or None
599 599 if tr and tr.running():
600 600 return tr.nest()
601 601
602 602 # abort here if the journal already exists
603 603 if os.path.exists(self.sjoin("journal")):
604 604 raise error.RepoError(_("journal already exists - run hg recover"))
605 605
606 606 # save dirstate for rollback
607 607 try:
608 608 ds = self.opener("dirstate").read()
609 609 except IOError:
610 610 ds = ""
611 611 self.opener("journal.dirstate", "w").write(ds)
612 612 self.opener("journal.branch", "w").write(self.dirstate.branch())
613 613
614 614 renames = [(self.sjoin("journal"), self.sjoin("undo")),
615 615 (self.join("journal.dirstate"), self.join("undo.dirstate")),
616 616 (self.join("journal.branch"), self.join("undo.branch"))]
617 617 tr = transaction.transaction(self.ui.warn, self.sopener,
618 618 self.sjoin("journal"),
619 619 aftertrans(renames),
620 620 self.store.createmode)
621 621 self._transref = weakref.ref(tr)
622 622 return tr
623 623
624 624 def recover(self):
625 625 lock = self.lock()
626 626 try:
627 627 if os.path.exists(self.sjoin("journal")):
628 628 self.ui.status(_("rolling back interrupted transaction\n"))
629 629 transaction.rollback(self.sopener, self.sjoin("journal"))
630 630 self.invalidate()
631 631 return True
632 632 else:
633 633 self.ui.warn(_("no interrupted transaction available\n"))
634 634 return False
635 635 finally:
636 636 lock.release()
637 637
638 638 def rollback(self):
639 639 wlock = lock = None
640 640 try:
641 641 wlock = self.wlock()
642 642 lock = self.lock()
643 643 if os.path.exists(self.sjoin("undo")):
644 644 self.ui.status(_("rolling back last transaction\n"))
645 645 transaction.rollback(self.sopener, self.sjoin("undo"))
646 646 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
647 647 try:
648 648 branch = self.opener("undo.branch").read()
649 649 self.dirstate.setbranch(branch)
650 650 except IOError:
651 651 self.ui.warn(_("Named branch could not be reset, "
652 652 "current branch still is: %s\n")
653 653 % encoding.tolocal(self.dirstate.branch()))
654 654 self.invalidate()
655 655 self.dirstate.invalidate()
656 656 else:
657 657 self.ui.warn(_("no rollback information available\n"))
658 658 finally:
659 659 release(lock, wlock)
660 660
661 661 def invalidate(self):
662 662 for a in "changelog manifest".split():
663 663 if a in self.__dict__:
664 664 delattr(self, a)
665 665 self.tagscache = None
666 666 self._tagstypecache = None
667 667 self.nodetagscache = None
668 668 self.branchcache = None
669 669 self._ubranchcache = None
670 670 self._branchcachetip = None
671 671
672 672 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
673 673 try:
674 674 l = lock.lock(lockname, 0, releasefn, desc=desc)
675 675 except error.LockHeld, inst:
676 676 if not wait:
677 677 raise
678 678 self.ui.warn(_("waiting for lock on %s held by %r\n") %
679 679 (desc, inst.locker))
680 680 # default to 600 seconds timeout
681 681 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
682 682 releasefn, desc=desc)
683 683 if acquirefn:
684 684 acquirefn()
685 685 return l
686 686
687 687 def lock(self, wait=True):
688 688 l = self._lockref and self._lockref()
689 689 if l is not None and l.held:
690 690 l.lock()
691 691 return l
692 692
693 693 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
694 694 _('repository %s') % self.origroot)
695 695 self._lockref = weakref.ref(l)
696 696 return l
697 697
698 698 def wlock(self, wait=True):
699 699 l = self._wlockref and self._wlockref()
700 700 if l is not None and l.held:
701 701 l.lock()
702 702 return l
703 703
704 704 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
705 705 self.dirstate.invalidate, _('working directory of %s') %
706 706 self.origroot)
707 707 self._wlockref = weakref.ref(l)
708 708 return l
709 709
710 710 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
711 711 """
712 712 commit an individual file as part of a larger transaction
713 713 """
714 714
715 715 fn = fctx.path()
716 716 t = fctx.data()
717 717 fl = self.file(fn)
718 718 fp1 = manifest1.get(fn, nullid)
719 719 fp2 = manifest2.get(fn, nullid)
720 720
721 721 meta = {}
722 722 cp = fctx.renamed()
723 723 if cp and cp[0] != fn:
724 724 # Mark the new revision of this file as a copy of another
725 725 # file. This copy data will effectively act as a parent
726 726 # of this new revision. If this is a merge, the first
727 727 # parent will be the nullid (meaning "look up the copy data")
728 728 # and the second one will be the other parent. For example:
729 729 #
730 730 # 0 --- 1 --- 3 rev1 changes file foo
731 731 # \ / rev2 renames foo to bar and changes it
732 732 # \- 2 -/ rev3 should have bar with all changes and
733 733 # should record that bar descends from
734 734 # bar in rev2 and foo in rev1
735 735 #
736 736 # this allows this merge to succeed:
737 737 #
738 738 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
739 739 # \ / merging rev3 and rev4 should use bar@rev2
740 740 # \- 2 --- 4 as the merge base
741 741 #
742 742
743 743 cf = cp[0]
744 744 cr = manifest1.get(cf)
745 745 nfp = fp2
746 746
747 747 if manifest2: # branch merge
748 748 if fp2 == nullid or cr is None: # copied on remote side
749 749 if cf in manifest2:
750 750 cr = manifest2[cf]
751 751 nfp = fp1
752 752
753 753 # find source in nearest ancestor if we've lost track
754 754 if not cr:
755 755 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
756 756 (fn, cf))
757 757 for a in self['.'].ancestors():
758 758 if cf in a:
759 759 cr = a[cf].filenode()
760 760 break
761 761
762 762 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
763 763 meta["copy"] = cf
764 764 meta["copyrev"] = hex(cr)
765 765 fp1, fp2 = nullid, nfp
766 766 elif fp2 != nullid:
767 767 # is one parent an ancestor of the other?
768 768 fpa = fl.ancestor(fp1, fp2)
769 769 if fpa == fp1:
770 770 fp1, fp2 = fp2, nullid
771 771 elif fpa == fp2:
772 772 fp2 = nullid
773 773
774 774 # is the file unmodified from the parent? report existing entry
775 775 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
776 776 return fp1
777 777
778 778 changelist.append(fn)
779 779 return fl.add(t, meta, tr, linkrev, fp1, fp2)
780 780
781 781 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
782 782 if p1 is None:
783 783 p1, p2 = self.dirstate.parents()
784 784 return self.commit(files=files, text=text, user=user, date=date,
785 785 p1=p1, p2=p2, extra=extra, empty_ok=True)
786 786
787 787 def commit(self, files=None, text="", user=None, date=None,
788 788 match=None, force=False, force_editor=False,
789 789 p1=None, p2=None, extra={}, empty_ok=False):
790 790 wlock = lock = None
791 791 if extra.get("close"):
792 792 force = True
793 793 if files:
794 files = util.unique(files)
794 files = list(set(files))
795 795 try:
796 796 wlock = self.wlock()
797 797 lock = self.lock()
798 798 use_dirstate = (p1 is None) # not rawcommit
799 799
800 800 if use_dirstate:
801 801 p1, p2 = self.dirstate.parents()
802 802 update_dirstate = True
803 803
804 804 if (not force and p2 != nullid and
805 805 (match and (match.files() or match.anypats()))):
806 806 raise util.Abort(_('cannot partially commit a merge '
807 807 '(do not specify files or patterns)'))
808 808
809 809 if files:
810 810 modified, removed = [], []
811 811 for f in files:
812 812 s = self.dirstate[f]
813 813 if s in 'nma':
814 814 modified.append(f)
815 815 elif s == 'r':
816 816 removed.append(f)
817 817 else:
818 818 self.ui.warn(_("%s not tracked!\n") % f)
819 819 changes = [modified, [], removed, [], []]
820 820 else:
821 821 changes = self.status(match=match)
822 822 else:
823 823 p1, p2 = p1, p2 or nullid
824 824 update_dirstate = (self.dirstate.parents()[0] == p1)
825 825 changes = [files, [], [], [], []]
826 826
827 827 ms = merge_.mergestate(self)
828 828 for f in changes[0]:
829 829 if f in ms and ms[f] == 'u':
830 830 raise util.Abort(_("unresolved merge conflicts "
831 831 "(see hg resolve)"))
832 832 wctx = context.workingctx(self, (p1, p2), text, user, date,
833 833 extra, changes)
834 834 r = self._commitctx(wctx, force, force_editor, empty_ok,
835 835 use_dirstate, update_dirstate)
836 836 ms.reset()
837 837 return r
838 838
839 839 finally:
840 840 release(lock, wlock)
841 841
842 842 def commitctx(self, ctx):
843 843 """Add a new revision to current repository.
844 844
845 845 Revision information is passed in the context.memctx argument.
846 846 commitctx() does not touch the working directory.
847 847 """
848 848 wlock = lock = None
849 849 try:
850 850 wlock = self.wlock()
851 851 lock = self.lock()
852 852 return self._commitctx(ctx, force=True, force_editor=False,
853 853 empty_ok=True, use_dirstate=False,
854 854 update_dirstate=False)
855 855 finally:
856 856 release(lock, wlock)
857 857
858 858 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
859 859 use_dirstate=True, update_dirstate=True):
860 860 tr = None
861 861 valid = 0 # don't save the dirstate if this isn't set
862 862 try:
863 863 commit = util.sort(wctx.modified() + wctx.added())
864 864 remove = wctx.removed()
865 865 extra = wctx.extra().copy()
866 866 branchname = extra['branch']
867 867 user = wctx.user()
868 868 text = wctx.description()
869 869
870 870 p1, p2 = [p.node() for p in wctx.parents()]
871 871 c1 = self.changelog.read(p1)
872 872 c2 = self.changelog.read(p2)
873 873 m1 = self.manifest.read(c1[0]).copy()
874 874 m2 = self.manifest.read(c2[0])
875 875
876 876 if use_dirstate:
877 877 oldname = c1[5].get("branch") # stored in UTF-8
878 878 if (not commit and not remove and not force and p2 == nullid
879 879 and branchname == oldname):
880 880 self.ui.status(_("nothing changed\n"))
881 881 return None
882 882
883 883 xp1 = hex(p1)
884 884 if p2 == nullid: xp2 = ''
885 885 else: xp2 = hex(p2)
886 886
887 887 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
888 888
889 889 tr = self.transaction()
890 890 trp = weakref.proxy(tr)
891 891
892 892 # check in files
893 893 new = {}
894 894 changed = []
895 895 linkrev = len(self)
896 896 for f in commit:
897 897 self.ui.note(f + "\n")
898 898 try:
899 899 fctx = wctx.filectx(f)
900 900 newflags = fctx.flags()
901 901 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
902 902 if ((not changed or changed[-1] != f) and
903 903 m2.get(f) != new[f]):
904 904 # mention the file in the changelog if some
905 905 # flag changed, even if there was no content
906 906 # change.
907 907 if m1.flags(f) != newflags:
908 908 changed.append(f)
909 909 m1.set(f, newflags)
910 910 if use_dirstate:
911 911 self.dirstate.normal(f)
912 912
913 913 except (OSError, IOError):
914 914 if use_dirstate:
915 915 self.ui.warn(_("trouble committing %s!\n") % f)
916 916 raise
917 917 else:
918 918 remove.append(f)
919 919
920 920 updated, added = [], []
921 921 for f in util.sort(changed):
922 922 if f in m1 or f in m2:
923 923 updated.append(f)
924 924 else:
925 925 added.append(f)
926 926
927 927 # update manifest
928 928 m1.update(new)
929 929 removed = [f for f in util.sort(remove) if f in m1 or f in m2]
930 930 removed1 = []
931 931
932 932 for f in removed:
933 933 if f in m1:
934 934 del m1[f]
935 935 removed1.append(f)
936 936 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
937 937 (new, removed1))
938 938
939 939 # add changeset
940 940 if (not empty_ok and not text) or force_editor:
941 941 edittext = []
942 942 if text:
943 943 edittext.append(text)
944 944 edittext.append("")
945 945 edittext.append("") # Empty line between message and comments.
946 946 edittext.append(_("HG: Enter commit message."
947 947 " Lines beginning with 'HG:' are removed."))
948 948 edittext.append("HG: --")
949 949 edittext.append("HG: user: %s" % user)
950 950 if p2 != nullid:
951 951 edittext.append("HG: branch merge")
952 952 if branchname:
953 953 edittext.append("HG: branch '%s'"
954 954 % encoding.tolocal(branchname))
955 955 edittext.extend(["HG: added %s" % f for f in added])
956 956 edittext.extend(["HG: changed %s" % f for f in updated])
957 957 edittext.extend(["HG: removed %s" % f for f in removed])
958 958 if not added and not updated and not removed:
959 959 edittext.append("HG: no files changed")
960 960 edittext.append("")
961 961 # run editor in the repository root
962 962 olddir = os.getcwd()
963 963 os.chdir(self.root)
964 964 text = self.ui.edit("\n".join(edittext), user)
965 965 os.chdir(olddir)
966 966
967 967 lines = [line.rstrip() for line in text.rstrip().splitlines()]
968 968 while lines and not lines[0]:
969 969 del lines[0]
970 970 if not lines and use_dirstate:
971 971 raise util.Abort(_("empty commit message"))
972 972 text = '\n'.join(lines)
973 973
974 974 self.changelog.delayupdate()
975 975 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
976 976 user, wctx.date(), extra)
977 977 p = lambda: self.changelog.writepending() and self.root or ""
978 978 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
979 979 parent2=xp2, pending=p)
980 980 self.changelog.finalize(trp)
981 981 tr.close()
982 982
983 983 if self.branchcache:
984 984 self.branchtags()
985 985
986 986 if use_dirstate or update_dirstate:
987 987 self.dirstate.setparents(n)
988 988 if use_dirstate:
989 989 for f in removed:
990 990 self.dirstate.forget(f)
991 991 valid = 1 # our dirstate updates are complete
992 992
993 993 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
994 994 return n
995 995 finally:
996 996 if not valid: # don't save our updated dirstate
997 997 self.dirstate.invalidate()
998 998 del tr
999 999
1000 1000 def walk(self, match, node=None):
1001 1001 '''
1002 1002 walk recursively through the directory tree or a given
1003 1003 changeset, finding all files matched by the match
1004 1004 function
1005 1005 '''
1006 1006 return self[node].walk(match)
1007 1007
1008 1008 def status(self, node1='.', node2=None, match=None,
1009 1009 ignored=False, clean=False, unknown=False):
1010 1010 """return status of files between two nodes or node and working directory
1011 1011
1012 1012 If node1 is None, use the first dirstate parent instead.
1013 1013 If node2 is None, compare node1 with working directory.
1014 1014 """
1015 1015
1016 1016 def mfmatches(ctx):
1017 1017 mf = ctx.manifest().copy()
1018 1018 for fn in mf.keys():
1019 1019 if not match(fn):
1020 1020 del mf[fn]
1021 1021 return mf
1022 1022
1023 1023 if isinstance(node1, context.changectx):
1024 1024 ctx1 = node1
1025 1025 else:
1026 1026 ctx1 = self[node1]
1027 1027 if isinstance(node2, context.changectx):
1028 1028 ctx2 = node2
1029 1029 else:
1030 1030 ctx2 = self[node2]
1031 1031
1032 1032 working = ctx2.rev() is None
1033 1033 parentworking = working and ctx1 == self['.']
1034 1034 match = match or match_.always(self.root, self.getcwd())
1035 1035 listignored, listclean, listunknown = ignored, clean, unknown
1036 1036
1037 1037 # load earliest manifest first for caching reasons
1038 1038 if not working and ctx2.rev() < ctx1.rev():
1039 1039 ctx2.manifest()
1040 1040
1041 1041 if not parentworking:
1042 1042 def bad(f, msg):
1043 1043 if f not in ctx1:
1044 1044 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1045 1045 return False
1046 1046 match.bad = bad
1047 1047
1048 1048 if working: # we need to scan the working dir
1049 1049 s = self.dirstate.status(match, listignored, listclean, listunknown)
1050 1050 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1051 1051
1052 1052 # check for any possibly clean files
1053 1053 if parentworking and cmp:
1054 1054 fixup = []
1055 1055 # do a full compare of any files that might have changed
1056 1056 for f in cmp:
1057 1057 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1058 1058 or ctx1[f].cmp(ctx2[f].data())):
1059 1059 modified.append(f)
1060 1060 else:
1061 1061 fixup.append(f)
1062 1062
1063 1063 if listclean:
1064 1064 clean += fixup
1065 1065
1066 1066 # update dirstate for files that are actually clean
1067 1067 if fixup:
1068 1068 wlock = None
1069 1069 try:
1070 1070 try:
1071 1071 # updating the dirstate is optional
1072 1072 # so we dont wait on the lock
1073 1073 wlock = self.wlock(False)
1074 1074 for f in fixup:
1075 1075 self.dirstate.normal(f)
1076 1076 except error.LockError:
1077 1077 pass
1078 1078 finally:
1079 1079 release(wlock)
1080 1080
1081 1081 if not parentworking:
1082 1082 mf1 = mfmatches(ctx1)
1083 1083 if working:
1084 1084 # we are comparing working dir against non-parent
1085 1085 # generate a pseudo-manifest for the working dir
1086 1086 mf2 = mfmatches(self['.'])
1087 1087 for f in cmp + modified + added:
1088 1088 mf2[f] = None
1089 1089 mf2.set(f, ctx2.flags(f))
1090 1090 for f in removed:
1091 1091 if f in mf2:
1092 1092 del mf2[f]
1093 1093 else:
1094 1094 # we are comparing two revisions
1095 1095 deleted, unknown, ignored = [], [], []
1096 1096 mf2 = mfmatches(ctx2)
1097 1097
1098 1098 modified, added, clean = [], [], []
1099 1099 for fn in mf2:
1100 1100 if fn in mf1:
1101 1101 if (mf1.flags(fn) != mf2.flags(fn) or
1102 1102 (mf1[fn] != mf2[fn] and
1103 1103 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1104 1104 modified.append(fn)
1105 1105 elif listclean:
1106 1106 clean.append(fn)
1107 1107 del mf1[fn]
1108 1108 else:
1109 1109 added.append(fn)
1110 1110 removed = mf1.keys()
1111 1111
1112 1112 r = modified, added, removed, deleted, unknown, ignored, clean
1113 1113 [l.sort() for l in r]
1114 1114 return r
1115 1115
1116 1116 def add(self, list):
1117 1117 wlock = self.wlock()
1118 1118 try:
1119 1119 rejected = []
1120 1120 for f in list:
1121 1121 p = self.wjoin(f)
1122 1122 try:
1123 1123 st = os.lstat(p)
1124 1124 except:
1125 1125 self.ui.warn(_("%s does not exist!\n") % f)
1126 1126 rejected.append(f)
1127 1127 continue
1128 1128 if st.st_size > 10000000:
1129 1129 self.ui.warn(_("%s: files over 10MB may cause memory and"
1130 1130 " performance problems\n"
1131 1131 "(use 'hg revert %s' to unadd the file)\n")
1132 1132 % (f, f))
1133 1133 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1134 1134 self.ui.warn(_("%s not added: only files and symlinks "
1135 1135 "supported currently\n") % f)
1136 1136 rejected.append(p)
1137 1137 elif self.dirstate[f] in 'amn':
1138 1138 self.ui.warn(_("%s already tracked!\n") % f)
1139 1139 elif self.dirstate[f] == 'r':
1140 1140 self.dirstate.normallookup(f)
1141 1141 else:
1142 1142 self.dirstate.add(f)
1143 1143 return rejected
1144 1144 finally:
1145 1145 wlock.release()
1146 1146
1147 1147 def forget(self, list):
1148 1148 wlock = self.wlock()
1149 1149 try:
1150 1150 for f in list:
1151 1151 if self.dirstate[f] != 'a':
1152 1152 self.ui.warn(_("%s not added!\n") % f)
1153 1153 else:
1154 1154 self.dirstate.forget(f)
1155 1155 finally:
1156 1156 wlock.release()
1157 1157
1158 1158 def remove(self, list, unlink=False):
1159 1159 wlock = None
1160 1160 try:
1161 1161 if unlink:
1162 1162 for f in list:
1163 1163 try:
1164 1164 util.unlink(self.wjoin(f))
1165 1165 except OSError, inst:
1166 1166 if inst.errno != errno.ENOENT:
1167 1167 raise
1168 1168 wlock = self.wlock()
1169 1169 for f in list:
1170 1170 if unlink and os.path.exists(self.wjoin(f)):
1171 1171 self.ui.warn(_("%s still exists!\n") % f)
1172 1172 elif self.dirstate[f] == 'a':
1173 1173 self.dirstate.forget(f)
1174 1174 elif f not in self.dirstate:
1175 1175 self.ui.warn(_("%s not tracked!\n") % f)
1176 1176 else:
1177 1177 self.dirstate.remove(f)
1178 1178 finally:
1179 1179 release(wlock)
1180 1180
1181 1181 def undelete(self, list):
1182 1182 manifests = [self.manifest.read(self.changelog.read(p)[0])
1183 1183 for p in self.dirstate.parents() if p != nullid]
1184 1184 wlock = self.wlock()
1185 1185 try:
1186 1186 for f in list:
1187 1187 if self.dirstate[f] != 'r':
1188 1188 self.ui.warn(_("%s not removed!\n") % f)
1189 1189 else:
1190 1190 m = f in manifests[0] and manifests[0] or manifests[1]
1191 1191 t = self.file(f).read(m[f])
1192 1192 self.wwrite(f, t, m.flags(f))
1193 1193 self.dirstate.normal(f)
1194 1194 finally:
1195 1195 wlock.release()
1196 1196
1197 1197 def copy(self, source, dest):
1198 1198 p = self.wjoin(dest)
1199 1199 if not (os.path.exists(p) or os.path.islink(p)):
1200 1200 self.ui.warn(_("%s does not exist!\n") % dest)
1201 1201 elif not (os.path.isfile(p) or os.path.islink(p)):
1202 1202 self.ui.warn(_("copy failed: %s is not a file or a "
1203 1203 "symbolic link\n") % dest)
1204 1204 else:
1205 1205 wlock = self.wlock()
1206 1206 try:
1207 1207 if self.dirstate[dest] in '?r':
1208 1208 self.dirstate.add(dest)
1209 1209 self.dirstate.copy(source, dest)
1210 1210 finally:
1211 1211 wlock.release()
1212 1212
1213 1213 def heads(self, start=None, closed=True):
1214 1214 heads = self.changelog.heads(start)
1215 1215 def display(head):
1216 1216 if closed:
1217 1217 return True
1218 1218 extras = self.changelog.read(head)[5]
1219 1219 return ('close' not in extras)
1220 1220 # sort the output in rev descending order
1221 1221 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1222 1222 return [n for (r, n) in util.sort(heads)]
1223 1223
1224 1224 def branchheads(self, branch=None, start=None, closed=True):
1225 1225 if branch is None:
1226 1226 branch = self[None].branch()
1227 1227 branches = self._branchheads()
1228 1228 if branch not in branches:
1229 1229 return []
1230 1230 bheads = branches[branch]
1231 1231 # the cache returns heads ordered lowest to highest
1232 1232 bheads.reverse()
1233 1233 if start is not None:
1234 1234 # filter out the heads that cannot be reached from startrev
1235 1235 bheads = self.changelog.nodesbetween([start], bheads)[2]
1236 1236 if not closed:
1237 1237 bheads = [h for h in bheads if
1238 1238 ('close' not in self.changelog.read(h)[5])]
1239 1239 return bheads
1240 1240
1241 1241 def branches(self, nodes):
1242 1242 if not nodes:
1243 1243 nodes = [self.changelog.tip()]
1244 1244 b = []
1245 1245 for n in nodes:
1246 1246 t = n
1247 1247 while 1:
1248 1248 p = self.changelog.parents(n)
1249 1249 if p[1] != nullid or p[0] == nullid:
1250 1250 b.append((t, n, p[0], p[1]))
1251 1251 break
1252 1252 n = p[0]
1253 1253 return b
1254 1254
1255 1255 def between(self, pairs):
1256 1256 r = []
1257 1257
1258 1258 for top, bottom in pairs:
1259 1259 n, l, i = top, [], 0
1260 1260 f = 1
1261 1261
1262 1262 while n != bottom and n != nullid:
1263 1263 p = self.changelog.parents(n)[0]
1264 1264 if i == f:
1265 1265 l.append(n)
1266 1266 f = f * 2
1267 1267 n = p
1268 1268 i += 1
1269 1269
1270 1270 r.append(l)
1271 1271
1272 1272 return r
1273 1273
1274 1274 def findincoming(self, remote, base=None, heads=None, force=False):
1275 1275 """Return list of roots of the subsets of missing nodes from remote
1276 1276
1277 1277 If base dict is specified, assume that these nodes and their parents
1278 1278 exist on the remote side and that no child of a node of base exists
1279 1279 in both remote and self.
1280 1280 Furthermore base will be updated to include the nodes that exists
1281 1281 in self and remote but no children exists in self and remote.
1282 1282 If a list of heads is specified, return only nodes which are heads
1283 1283 or ancestors of these heads.
1284 1284
1285 1285 All the ancestors of base are in self and in remote.
1286 1286 All the descendants of the list returned are missing in self.
1287 1287 (and so we know that the rest of the nodes are missing in remote, see
1288 1288 outgoing)
1289 1289 """
1290 1290 return self.findcommonincoming(remote, base, heads, force)[1]
1291 1291
1292 1292 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1293 1293 """Return a tuple (common, missing roots, heads) used to identify
1294 1294 missing nodes from remote.
1295 1295
1296 1296 If base dict is specified, assume that these nodes and their parents
1297 1297 exist on the remote side and that no child of a node of base exists
1298 1298 in both remote and self.
1299 1299 Furthermore base will be updated to include the nodes that exists
1300 1300 in self and remote but no children exists in self and remote.
1301 1301 If a list of heads is specified, return only nodes which are heads
1302 1302 or ancestors of these heads.
1303 1303
1304 1304 All the ancestors of base are in self and in remote.
1305 1305 """
1306 1306 m = self.changelog.nodemap
1307 1307 search = []
1308 1308 fetch = {}
1309 1309 seen = {}
1310 1310 seenbranch = {}
1311 1311 if base == None:
1312 1312 base = {}
1313 1313
1314 1314 if not heads:
1315 1315 heads = remote.heads()
1316 1316
1317 1317 if self.changelog.tip() == nullid:
1318 1318 base[nullid] = 1
1319 1319 if heads != [nullid]:
1320 1320 return [nullid], [nullid], list(heads)
1321 1321 return [nullid], [], []
1322 1322
1323 1323 # assume we're closer to the tip than the root
1324 1324 # and start by examining the heads
1325 1325 self.ui.status(_("searching for changes\n"))
1326 1326
1327 1327 unknown = []
1328 1328 for h in heads:
1329 1329 if h not in m:
1330 1330 unknown.append(h)
1331 1331 else:
1332 1332 base[h] = 1
1333 1333
1334 1334 heads = unknown
1335 1335 if not unknown:
1336 1336 return base.keys(), [], []
1337 1337
1338 1338 req = dict.fromkeys(unknown)
1339 1339 reqcnt = 0
1340 1340
1341 1341 # search through remote branches
1342 1342 # a 'branch' here is a linear segment of history, with four parts:
1343 1343 # head, root, first parent, second parent
1344 1344 # (a branch always has two parents (or none) by definition)
1345 1345 unknown = remote.branches(unknown)
1346 1346 while unknown:
1347 1347 r = []
1348 1348 while unknown:
1349 1349 n = unknown.pop(0)
1350 1350 if n[0] in seen:
1351 1351 continue
1352 1352
1353 1353 self.ui.debug(_("examining %s:%s\n")
1354 1354 % (short(n[0]), short(n[1])))
1355 1355 if n[0] == nullid: # found the end of the branch
1356 1356 pass
1357 1357 elif n in seenbranch:
1358 1358 self.ui.debug(_("branch already found\n"))
1359 1359 continue
1360 1360 elif n[1] and n[1] in m: # do we know the base?
1361 1361 self.ui.debug(_("found incomplete branch %s:%s\n")
1362 1362 % (short(n[0]), short(n[1])))
1363 1363 search.append(n[0:2]) # schedule branch range for scanning
1364 1364 seenbranch[n] = 1
1365 1365 else:
1366 1366 if n[1] not in seen and n[1] not in fetch:
1367 1367 if n[2] in m and n[3] in m:
1368 1368 self.ui.debug(_("found new changeset %s\n") %
1369 1369 short(n[1]))
1370 1370 fetch[n[1]] = 1 # earliest unknown
1371 1371 for p in n[2:4]:
1372 1372 if p in m:
1373 1373 base[p] = 1 # latest known
1374 1374
1375 1375 for p in n[2:4]:
1376 1376 if p not in req and p not in m:
1377 1377 r.append(p)
1378 1378 req[p] = 1
1379 1379 seen[n[0]] = 1
1380 1380
1381 1381 if r:
1382 1382 reqcnt += 1
1383 1383 self.ui.debug(_("request %d: %s\n") %
1384 1384 (reqcnt, " ".join(map(short, r))))
1385 1385 for p in xrange(0, len(r), 10):
1386 1386 for b in remote.branches(r[p:p+10]):
1387 1387 self.ui.debug(_("received %s:%s\n") %
1388 1388 (short(b[0]), short(b[1])))
1389 1389 unknown.append(b)
1390 1390
1391 1391 # do binary search on the branches we found
1392 1392 while search:
1393 1393 newsearch = []
1394 1394 reqcnt += 1
1395 1395 for n, l in zip(search, remote.between(search)):
1396 1396 l.append(n[1])
1397 1397 p = n[0]
1398 1398 f = 1
1399 1399 for i in l:
1400 1400 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1401 1401 if i in m:
1402 1402 if f <= 2:
1403 1403 self.ui.debug(_("found new branch changeset %s\n") %
1404 1404 short(p))
1405 1405 fetch[p] = 1
1406 1406 base[i] = 1
1407 1407 else:
1408 1408 self.ui.debug(_("narrowed branch search to %s:%s\n")
1409 1409 % (short(p), short(i)))
1410 1410 newsearch.append((p, i))
1411 1411 break
1412 1412 p, f = i, f * 2
1413 1413 search = newsearch
1414 1414
1415 1415 # sanity check our fetch list
1416 1416 for f in fetch.keys():
1417 1417 if f in m:
1418 1418 raise error.RepoError(_("already have changeset ")
1419 1419 + short(f[:4]))
1420 1420
1421 1421 if base.keys() == [nullid]:
1422 1422 if force:
1423 1423 self.ui.warn(_("warning: repository is unrelated\n"))
1424 1424 else:
1425 1425 raise util.Abort(_("repository is unrelated"))
1426 1426
1427 1427 self.ui.debug(_("found new changesets starting at ") +
1428 1428 " ".join([short(f) for f in fetch]) + "\n")
1429 1429
1430 1430 self.ui.debug(_("%d total queries\n") % reqcnt)
1431 1431
1432 1432 return base.keys(), fetch.keys(), heads
1433 1433
1434 1434 def findoutgoing(self, remote, base=None, heads=None, force=False):
1435 1435 """Return list of nodes that are roots of subsets not in remote
1436 1436
1437 1437 If base dict is specified, assume that these nodes and their parents
1438 1438 exist on the remote side.
1439 1439 If a list of heads is specified, return only nodes which are heads
1440 1440 or ancestors of these heads, and return a second element which
1441 1441 contains all remote heads which get new children.
1442 1442 """
1443 1443 if base == None:
1444 1444 base = {}
1445 1445 self.findincoming(remote, base, heads, force=force)
1446 1446
1447 1447 self.ui.debug(_("common changesets up to ")
1448 1448 + " ".join(map(short, base.keys())) + "\n")
1449 1449
1450 1450 remain = dict.fromkeys(self.changelog.nodemap)
1451 1451
1452 1452 # prune everything remote has from the tree
1453 1453 del remain[nullid]
1454 1454 remove = base.keys()
1455 1455 while remove:
1456 1456 n = remove.pop(0)
1457 1457 if n in remain:
1458 1458 del remain[n]
1459 1459 for p in self.changelog.parents(n):
1460 1460 remove.append(p)
1461 1461
1462 1462 # find every node whose parents have been pruned
1463 1463 subset = []
1464 1464 # find every remote head that will get new children
1465 1465 updated_heads = {}
1466 1466 for n in remain:
1467 1467 p1, p2 = self.changelog.parents(n)
1468 1468 if p1 not in remain and p2 not in remain:
1469 1469 subset.append(n)
1470 1470 if heads:
1471 1471 if p1 in heads:
1472 1472 updated_heads[p1] = True
1473 1473 if p2 in heads:
1474 1474 updated_heads[p2] = True
1475 1475
1476 1476 # this is the set of all roots we have to push
1477 1477 if heads:
1478 1478 return subset, updated_heads.keys()
1479 1479 else:
1480 1480 return subset
1481 1481
1482 1482 def pull(self, remote, heads=None, force=False):
1483 1483 lock = self.lock()
1484 1484 try:
1485 1485 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1486 1486 force=force)
1487 1487 if fetch == [nullid]:
1488 1488 self.ui.status(_("requesting all changes\n"))
1489 1489
1490 1490 if not fetch:
1491 1491 self.ui.status(_("no changes found\n"))
1492 1492 return 0
1493 1493
1494 1494 if heads is None and remote.capable('changegroupsubset'):
1495 1495 heads = rheads
1496 1496
1497 1497 if heads is None:
1498 1498 cg = remote.changegroup(fetch, 'pull')
1499 1499 else:
1500 1500 if not remote.capable('changegroupsubset'):
1501 1501 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1502 1502 cg = remote.changegroupsubset(fetch, heads, 'pull')
1503 1503 return self.addchangegroup(cg, 'pull', remote.url())
1504 1504 finally:
1505 1505 lock.release()
1506 1506
1507 1507 def push(self, remote, force=False, revs=None):
1508 1508 # there are two ways to push to remote repo:
1509 1509 #
1510 1510 # addchangegroup assumes local user can lock remote
1511 1511 # repo (local filesystem, old ssh servers).
1512 1512 #
1513 1513 # unbundle assumes local user cannot lock remote repo (new ssh
1514 1514 # servers, http servers).
1515 1515
1516 1516 if remote.capable('unbundle'):
1517 1517 return self.push_unbundle(remote, force, revs)
1518 1518 return self.push_addchangegroup(remote, force, revs)
1519 1519
1520 1520 def prepush(self, remote, force, revs):
1521 1521 common = {}
1522 1522 remote_heads = remote.heads()
1523 1523 inc = self.findincoming(remote, common, remote_heads, force=force)
1524 1524
1525 1525 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1526 1526 if revs is not None:
1527 1527 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1528 1528 else:
1529 1529 bases, heads = update, self.changelog.heads()
1530 1530
1531 1531 if not bases:
1532 1532 self.ui.status(_("no changes found\n"))
1533 1533 return None, 1
1534 1534 elif not force:
1535 1535 # check if we're creating new remote heads
1536 1536 # to be a remote head after push, node must be either
1537 1537 # - unknown locally
1538 1538 # - a local outgoing head descended from update
1539 1539 # - a remote head that's known locally and not
1540 1540 # ancestral to an outgoing head
1541 1541
1542 1542 warn = 0
1543 1543
1544 1544 if remote_heads == [nullid]:
1545 1545 warn = 0
1546 1546 elif not revs and len(heads) > len(remote_heads):
1547 1547 warn = 1
1548 1548 else:
1549 1549 newheads = list(heads)
1550 1550 for r in remote_heads:
1551 1551 if r in self.changelog.nodemap:
1552 1552 desc = self.changelog.heads(r, heads)
1553 1553 l = [h for h in heads if h in desc]
1554 1554 if not l:
1555 1555 newheads.append(r)
1556 1556 else:
1557 1557 newheads.append(r)
1558 1558 if len(newheads) > len(remote_heads):
1559 1559 warn = 1
1560 1560
1561 1561 if warn:
1562 1562 self.ui.warn(_("abort: push creates new remote heads!\n"))
1563 1563 self.ui.status(_("(did you forget to merge?"
1564 1564 " use push -f to force)\n"))
1565 1565 return None, 0
1566 1566 elif inc:
1567 1567 self.ui.warn(_("note: unsynced remote changes!\n"))
1568 1568
1569 1569
1570 1570 if revs is None:
1571 1571 # use the fast path, no race possible on push
1572 1572 cg = self._changegroup(common.keys(), 'push')
1573 1573 else:
1574 1574 cg = self.changegroupsubset(update, revs, 'push')
1575 1575 return cg, remote_heads
1576 1576
1577 1577 def push_addchangegroup(self, remote, force, revs):
1578 1578 lock = remote.lock()
1579 1579 try:
1580 1580 ret = self.prepush(remote, force, revs)
1581 1581 if ret[0] is not None:
1582 1582 cg, remote_heads = ret
1583 1583 return remote.addchangegroup(cg, 'push', self.url())
1584 1584 return ret[1]
1585 1585 finally:
1586 1586 lock.release()
1587 1587
1588 1588 def push_unbundle(self, remote, force, revs):
1589 1589 # local repo finds heads on server, finds out what revs it
1590 1590 # must push. once revs transferred, if server finds it has
1591 1591 # different heads (someone else won commit/push race), server
1592 1592 # aborts.
1593 1593
1594 1594 ret = self.prepush(remote, force, revs)
1595 1595 if ret[0] is not None:
1596 1596 cg, remote_heads = ret
1597 1597 if force: remote_heads = ['force']
1598 1598 return remote.unbundle(cg, remote_heads, 'push')
1599 1599 return ret[1]
1600 1600
1601 1601 def changegroupinfo(self, nodes, source):
1602 1602 if self.ui.verbose or source == 'bundle':
1603 1603 self.ui.status(_("%d changesets found\n") % len(nodes))
1604 1604 if self.ui.debugflag:
1605 1605 self.ui.debug(_("list of changesets:\n"))
1606 1606 for node in nodes:
1607 1607 self.ui.debug("%s\n" % hex(node))
1608 1608
1609 1609 def changegroupsubset(self, bases, heads, source, extranodes=None):
1610 1610 """This function generates a changegroup consisting of all the nodes
1611 1611 that are descendents of any of the bases, and ancestors of any of
1612 1612 the heads.
1613 1613
1614 1614 It is fairly complex as determining which filenodes and which
1615 1615 manifest nodes need to be included for the changeset to be complete
1616 1616 is non-trivial.
1617 1617
1618 1618 Another wrinkle is doing the reverse, figuring out which changeset in
1619 1619 the changegroup a particular filenode or manifestnode belongs to.
1620 1620
1621 1621 The caller can specify some nodes that must be included in the
1622 1622 changegroup using the extranodes argument. It should be a dict
1623 1623 where the keys are the filenames (or 1 for the manifest), and the
1624 1624 values are lists of (node, linknode) tuples, where node is a wanted
1625 1625 node and linknode is the changelog node that should be transmitted as
1626 1626 the linkrev.
1627 1627 """
1628 1628
1629 1629 if extranodes is None:
1630 1630 # can we go through the fast path ?
1631 1631 heads.sort()
1632 1632 allheads = self.heads()
1633 1633 allheads.sort()
1634 1634 if heads == allheads:
1635 1635 common = []
1636 1636 # parents of bases are known from both sides
1637 1637 for n in bases:
1638 1638 for p in self.changelog.parents(n):
1639 1639 if p != nullid:
1640 1640 common.append(p)
1641 1641 return self._changegroup(common, source)
1642 1642
1643 1643 self.hook('preoutgoing', throw=True, source=source)
1644 1644
1645 1645 # Set up some initial variables
1646 1646 # Make it easy to refer to self.changelog
1647 1647 cl = self.changelog
1648 1648 # msng is short for missing - compute the list of changesets in this
1649 1649 # changegroup.
1650 1650 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1651 1651 self.changegroupinfo(msng_cl_lst, source)
1652 1652 # Some bases may turn out to be superfluous, and some heads may be
1653 1653 # too. nodesbetween will return the minimal set of bases and heads
1654 1654 # necessary to re-create the changegroup.
1655 1655
1656 1656 # Known heads are the list of heads that it is assumed the recipient
1657 1657 # of this changegroup will know about.
1658 1658 knownheads = {}
1659 1659 # We assume that all parents of bases are known heads.
1660 1660 for n in bases:
1661 1661 for p in cl.parents(n):
1662 1662 if p != nullid:
1663 1663 knownheads[p] = 1
1664 1664 knownheads = knownheads.keys()
1665 1665 if knownheads:
1666 1666 # Now that we know what heads are known, we can compute which
1667 1667 # changesets are known. The recipient must know about all
1668 1668 # changesets required to reach the known heads from the null
1669 1669 # changeset.
1670 1670 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1671 1671 junk = None
1672 1672 # Transform the list into an ersatz set.
1673 1673 has_cl_set = dict.fromkeys(has_cl_set)
1674 1674 else:
1675 1675 # If there were no known heads, the recipient cannot be assumed to
1676 1676 # know about any changesets.
1677 1677 has_cl_set = {}
1678 1678
1679 1679 # Make it easy to refer to self.manifest
1680 1680 mnfst = self.manifest
1681 1681 # We don't know which manifests are missing yet
1682 1682 msng_mnfst_set = {}
1683 1683 # Nor do we know which filenodes are missing.
1684 1684 msng_filenode_set = {}
1685 1685
1686 1686 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1687 1687 junk = None
1688 1688
1689 1689 # A changeset always belongs to itself, so the changenode lookup
1690 1690 # function for a changenode is identity.
1691 1691 def identity(x):
1692 1692 return x
1693 1693
1694 1694 # A function generating function. Sets up an environment for the
1695 1695 # inner function.
1696 1696 def cmp_by_rev_func(revlog):
1697 1697 # Compare two nodes by their revision number in the environment's
1698 1698 # revision history. Since the revision number both represents the
1699 1699 # most efficient order to read the nodes in, and represents a
1700 1700 # topological sorting of the nodes, this function is often useful.
1701 1701 def cmp_by_rev(a, b):
1702 1702 return cmp(revlog.rev(a), revlog.rev(b))
1703 1703 return cmp_by_rev
1704 1704
1705 1705 # If we determine that a particular file or manifest node must be a
1706 1706 # node that the recipient of the changegroup will already have, we can
1707 1707 # also assume the recipient will have all the parents. This function
1708 1708 # prunes them from the set of missing nodes.
1709 1709 def prune_parents(revlog, hasset, msngset):
1710 1710 haslst = hasset.keys()
1711 1711 haslst.sort(cmp_by_rev_func(revlog))
1712 1712 for node in haslst:
1713 1713 parentlst = [p for p in revlog.parents(node) if p != nullid]
1714 1714 while parentlst:
1715 1715 n = parentlst.pop()
1716 1716 if n not in hasset:
1717 1717 hasset[n] = 1
1718 1718 p = [p for p in revlog.parents(n) if p != nullid]
1719 1719 parentlst.extend(p)
1720 1720 for n in hasset:
1721 1721 msngset.pop(n, None)
1722 1722
1723 1723 # This is a function generating function used to set up an environment
1724 1724 # for the inner function to execute in.
1725 1725 def manifest_and_file_collector(changedfileset):
1726 1726 # This is an information gathering function that gathers
1727 1727 # information from each changeset node that goes out as part of
1728 1728 # the changegroup. The information gathered is a list of which
1729 1729 # manifest nodes are potentially required (the recipient may
1730 1730 # already have them) and total list of all files which were
1731 1731 # changed in any changeset in the changegroup.
1732 1732 #
1733 1733 # We also remember the first changenode we saw any manifest
1734 1734 # referenced by so we can later determine which changenode 'owns'
1735 1735 # the manifest.
1736 1736 def collect_manifests_and_files(clnode):
1737 1737 c = cl.read(clnode)
1738 1738 for f in c[3]:
1739 1739 # This is to make sure we only have one instance of each
1740 1740 # filename string for each filename.
1741 1741 changedfileset.setdefault(f, f)
1742 1742 msng_mnfst_set.setdefault(c[0], clnode)
1743 1743 return collect_manifests_and_files
1744 1744
1745 1745 # Figure out which manifest nodes (of the ones we think might be part
1746 1746 # of the changegroup) the recipient must know about and remove them
1747 1747 # from the changegroup.
1748 1748 def prune_manifests():
1749 1749 has_mnfst_set = {}
1750 1750 for n in msng_mnfst_set:
1751 1751 # If a 'missing' manifest thinks it belongs to a changenode
1752 1752 # the recipient is assumed to have, obviously the recipient
1753 1753 # must have that manifest.
1754 1754 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1755 1755 if linknode in has_cl_set:
1756 1756 has_mnfst_set[n] = 1
1757 1757 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1758 1758
1759 1759 # Use the information collected in collect_manifests_and_files to say
1760 1760 # which changenode any manifestnode belongs to.
1761 1761 def lookup_manifest_link(mnfstnode):
1762 1762 return msng_mnfst_set[mnfstnode]
1763 1763
1764 1764 # A function generating function that sets up the initial environment
1765 1765 # the inner function.
1766 1766 def filenode_collector(changedfiles):
1767 1767 next_rev = [0]
1768 1768 # This gathers information from each manifestnode included in the
1769 1769 # changegroup about which filenodes the manifest node references
1770 1770 # so we can include those in the changegroup too.
1771 1771 #
1772 1772 # It also remembers which changenode each filenode belongs to. It
1773 1773 # does this by assuming the a filenode belongs to the changenode
1774 1774 # the first manifest that references it belongs to.
1775 1775 def collect_msng_filenodes(mnfstnode):
1776 1776 r = mnfst.rev(mnfstnode)
1777 1777 if r == next_rev[0]:
1778 1778 # If the last rev we looked at was the one just previous,
1779 1779 # we only need to see a diff.
1780 1780 deltamf = mnfst.readdelta(mnfstnode)
1781 1781 # For each line in the delta
1782 1782 for f, fnode in deltamf.iteritems():
1783 1783 f = changedfiles.get(f, None)
1784 1784 # And if the file is in the list of files we care
1785 1785 # about.
1786 1786 if f is not None:
1787 1787 # Get the changenode this manifest belongs to
1788 1788 clnode = msng_mnfst_set[mnfstnode]
1789 1789 # Create the set of filenodes for the file if
1790 1790 # there isn't one already.
1791 1791 ndset = msng_filenode_set.setdefault(f, {})
1792 1792 # And set the filenode's changelog node to the
1793 1793 # manifest's if it hasn't been set already.
1794 1794 ndset.setdefault(fnode, clnode)
1795 1795 else:
1796 1796 # Otherwise we need a full manifest.
1797 1797 m = mnfst.read(mnfstnode)
1798 1798 # For every file in we care about.
1799 1799 for f in changedfiles:
1800 1800 fnode = m.get(f, None)
1801 1801 # If it's in the manifest
1802 1802 if fnode is not None:
1803 1803 # See comments above.
1804 1804 clnode = msng_mnfst_set[mnfstnode]
1805 1805 ndset = msng_filenode_set.setdefault(f, {})
1806 1806 ndset.setdefault(fnode, clnode)
1807 1807 # Remember the revision we hope to see next.
1808 1808 next_rev[0] = r + 1
1809 1809 return collect_msng_filenodes
1810 1810
1811 1811 # We have a list of filenodes we think we need for a file, lets remove
1812 1812 # all those we now the recipient must have.
1813 1813 def prune_filenodes(f, filerevlog):
1814 1814 msngset = msng_filenode_set[f]
1815 1815 hasset = {}
1816 1816 # If a 'missing' filenode thinks it belongs to a changenode we
1817 1817 # assume the recipient must have, then the recipient must have
1818 1818 # that filenode.
1819 1819 for n in msngset:
1820 1820 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1821 1821 if clnode in has_cl_set:
1822 1822 hasset[n] = 1
1823 1823 prune_parents(filerevlog, hasset, msngset)
1824 1824
1825 1825 # A function generator function that sets up the a context for the
1826 1826 # inner function.
1827 1827 def lookup_filenode_link_func(fname):
1828 1828 msngset = msng_filenode_set[fname]
1829 1829 # Lookup the changenode the filenode belongs to.
1830 1830 def lookup_filenode_link(fnode):
1831 1831 return msngset[fnode]
1832 1832 return lookup_filenode_link
1833 1833
1834 1834 # Add the nodes that were explicitly requested.
1835 1835 def add_extra_nodes(name, nodes):
1836 1836 if not extranodes or name not in extranodes:
1837 1837 return
1838 1838
1839 1839 for node, linknode in extranodes[name]:
1840 1840 if node not in nodes:
1841 1841 nodes[node] = linknode
1842 1842
1843 1843 # Now that we have all theses utility functions to help out and
1844 1844 # logically divide up the task, generate the group.
1845 1845 def gengroup():
1846 1846 # The set of changed files starts empty.
1847 1847 changedfiles = {}
1848 1848 # Create a changenode group generator that will call our functions
1849 1849 # back to lookup the owning changenode and collect information.
1850 1850 group = cl.group(msng_cl_lst, identity,
1851 1851 manifest_and_file_collector(changedfiles))
1852 1852 for chnk in group:
1853 1853 yield chnk
1854 1854
1855 1855 # The list of manifests has been collected by the generator
1856 1856 # calling our functions back.
1857 1857 prune_manifests()
1858 1858 add_extra_nodes(1, msng_mnfst_set)
1859 1859 msng_mnfst_lst = msng_mnfst_set.keys()
1860 1860 # Sort the manifestnodes by revision number.
1861 1861 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1862 1862 # Create a generator for the manifestnodes that calls our lookup
1863 1863 # and data collection functions back.
1864 1864 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1865 1865 filenode_collector(changedfiles))
1866 1866 for chnk in group:
1867 1867 yield chnk
1868 1868
1869 1869 # These are no longer needed, dereference and toss the memory for
1870 1870 # them.
1871 1871 msng_mnfst_lst = None
1872 1872 msng_mnfst_set.clear()
1873 1873
1874 1874 if extranodes:
1875 1875 for fname in extranodes:
1876 1876 if isinstance(fname, int):
1877 1877 continue
1878 1878 msng_filenode_set.setdefault(fname, {})
1879 1879 changedfiles[fname] = 1
1880 1880 # Go through all our files in order sorted by name.
1881 1881 for fname in util.sort(changedfiles):
1882 1882 filerevlog = self.file(fname)
1883 1883 if not len(filerevlog):
1884 1884 raise util.Abort(_("empty or missing revlog for %s") % fname)
1885 1885 # Toss out the filenodes that the recipient isn't really
1886 1886 # missing.
1887 1887 if fname in msng_filenode_set:
1888 1888 prune_filenodes(fname, filerevlog)
1889 1889 add_extra_nodes(fname, msng_filenode_set[fname])
1890 1890 msng_filenode_lst = msng_filenode_set[fname].keys()
1891 1891 else:
1892 1892 msng_filenode_lst = []
1893 1893 # If any filenodes are left, generate the group for them,
1894 1894 # otherwise don't bother.
1895 1895 if len(msng_filenode_lst) > 0:
1896 1896 yield changegroup.chunkheader(len(fname))
1897 1897 yield fname
1898 1898 # Sort the filenodes by their revision #
1899 1899 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1900 1900 # Create a group generator and only pass in a changenode
1901 1901 # lookup function as we need to collect no information
1902 1902 # from filenodes.
1903 1903 group = filerevlog.group(msng_filenode_lst,
1904 1904 lookup_filenode_link_func(fname))
1905 1905 for chnk in group:
1906 1906 yield chnk
1907 1907 if fname in msng_filenode_set:
1908 1908 # Don't need this anymore, toss it to free memory.
1909 1909 del msng_filenode_set[fname]
1910 1910 # Signal that no more groups are left.
1911 1911 yield changegroup.closechunk()
1912 1912
1913 1913 if msng_cl_lst:
1914 1914 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1915 1915
1916 1916 return util.chunkbuffer(gengroup())
1917 1917
1918 1918 def changegroup(self, basenodes, source):
1919 1919 # to avoid a race we use changegroupsubset() (issue1320)
1920 1920 return self.changegroupsubset(basenodes, self.heads(), source)
1921 1921
1922 1922 def _changegroup(self, common, source):
1923 1923 """Generate a changegroup of all nodes that we have that a recipient
1924 1924 doesn't.
1925 1925
1926 1926 This is much easier than the previous function as we can assume that
1927 1927 the recipient has any changenode we aren't sending them.
1928 1928
1929 1929 common is the set of common nodes between remote and self"""
1930 1930
1931 1931 self.hook('preoutgoing', throw=True, source=source)
1932 1932
1933 1933 cl = self.changelog
1934 1934 nodes = cl.findmissing(common)
1935 1935 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1936 1936 self.changegroupinfo(nodes, source)
1937 1937
1938 1938 def identity(x):
1939 1939 return x
1940 1940
1941 1941 def gennodelst(log):
1942 1942 for r in log:
1943 1943 if log.linkrev(r) in revset:
1944 1944 yield log.node(r)
1945 1945
1946 1946 def changed_file_collector(changedfileset):
1947 1947 def collect_changed_files(clnode):
1948 1948 c = cl.read(clnode)
1949 1949 for fname in c[3]:
1950 1950 changedfileset[fname] = 1
1951 1951 return collect_changed_files
1952 1952
1953 1953 def lookuprevlink_func(revlog):
1954 1954 def lookuprevlink(n):
1955 1955 return cl.node(revlog.linkrev(revlog.rev(n)))
1956 1956 return lookuprevlink
1957 1957
1958 1958 def gengroup():
1959 1959 # construct a list of all changed files
1960 1960 changedfiles = {}
1961 1961
1962 1962 for chnk in cl.group(nodes, identity,
1963 1963 changed_file_collector(changedfiles)):
1964 1964 yield chnk
1965 1965
1966 1966 mnfst = self.manifest
1967 1967 nodeiter = gennodelst(mnfst)
1968 1968 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1969 1969 yield chnk
1970 1970
1971 1971 for fname in util.sort(changedfiles):
1972 1972 filerevlog = self.file(fname)
1973 1973 if not len(filerevlog):
1974 1974 raise util.Abort(_("empty or missing revlog for %s") % fname)
1975 1975 nodeiter = gennodelst(filerevlog)
1976 1976 nodeiter = list(nodeiter)
1977 1977 if nodeiter:
1978 1978 yield changegroup.chunkheader(len(fname))
1979 1979 yield fname
1980 1980 lookup = lookuprevlink_func(filerevlog)
1981 1981 for chnk in filerevlog.group(nodeiter, lookup):
1982 1982 yield chnk
1983 1983
1984 1984 yield changegroup.closechunk()
1985 1985
1986 1986 if nodes:
1987 1987 self.hook('outgoing', node=hex(nodes[0]), source=source)
1988 1988
1989 1989 return util.chunkbuffer(gengroup())
1990 1990
1991 1991 def addchangegroup(self, source, srctype, url, emptyok=False):
1992 1992 """add changegroup to repo.
1993 1993
1994 1994 return values:
1995 1995 - nothing changed or no source: 0
1996 1996 - more heads than before: 1+added heads (2..n)
1997 1997 - less heads than before: -1-removed heads (-2..-n)
1998 1998 - number of heads stays the same: 1
1999 1999 """
2000 2000 def csmap(x):
2001 2001 self.ui.debug(_("add changeset %s\n") % short(x))
2002 2002 return len(cl)
2003 2003
2004 2004 def revmap(x):
2005 2005 return cl.rev(x)
2006 2006
2007 2007 if not source:
2008 2008 return 0
2009 2009
2010 2010 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2011 2011
2012 2012 changesets = files = revisions = 0
2013 2013
2014 2014 # write changelog data to temp files so concurrent readers will not see
2015 2015 # inconsistent view
2016 2016 cl = self.changelog
2017 2017 cl.delayupdate()
2018 2018 oldheads = len(cl.heads())
2019 2019
2020 2020 tr = self.transaction()
2021 2021 try:
2022 2022 trp = weakref.proxy(tr)
2023 2023 # pull off the changeset group
2024 2024 self.ui.status(_("adding changesets\n"))
2025 2025 cor = len(cl) - 1
2026 2026 chunkiter = changegroup.chunkiter(source)
2027 2027 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2028 2028 raise util.Abort(_("received changelog group is empty"))
2029 2029 cnr = len(cl) - 1
2030 2030 changesets = cnr - cor
2031 2031
2032 2032 # pull off the manifest group
2033 2033 self.ui.status(_("adding manifests\n"))
2034 2034 chunkiter = changegroup.chunkiter(source)
2035 2035 # no need to check for empty manifest group here:
2036 2036 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2037 2037 # no new manifest will be created and the manifest group will
2038 2038 # be empty during the pull
2039 2039 self.manifest.addgroup(chunkiter, revmap, trp)
2040 2040
2041 2041 # process the files
2042 2042 self.ui.status(_("adding file changes\n"))
2043 2043 while 1:
2044 2044 f = changegroup.getchunk(source)
2045 2045 if not f:
2046 2046 break
2047 2047 self.ui.debug(_("adding %s revisions\n") % f)
2048 2048 fl = self.file(f)
2049 2049 o = len(fl)
2050 2050 chunkiter = changegroup.chunkiter(source)
2051 2051 if fl.addgroup(chunkiter, revmap, trp) is None:
2052 2052 raise util.Abort(_("received file revlog group is empty"))
2053 2053 revisions += len(fl) - o
2054 2054 files += 1
2055 2055
2056 2056 newheads = len(self.changelog.heads())
2057 2057 heads = ""
2058 2058 if oldheads and newheads != oldheads:
2059 2059 heads = _(" (%+d heads)") % (newheads - oldheads)
2060 2060
2061 2061 self.ui.status(_("added %d changesets"
2062 2062 " with %d changes to %d files%s\n")
2063 2063 % (changesets, revisions, files, heads))
2064 2064
2065 2065 if changesets > 0:
2066 2066 p = lambda: self.changelog.writepending() and self.root or ""
2067 2067 self.hook('pretxnchangegroup', throw=True,
2068 2068 node=hex(self.changelog.node(cor+1)), source=srctype,
2069 2069 url=url, pending=p)
2070 2070
2071 2071 # make changelog see real files again
2072 2072 cl.finalize(trp)
2073 2073
2074 2074 tr.close()
2075 2075 finally:
2076 2076 del tr
2077 2077
2078 2078 if changesets > 0:
2079 2079 # forcefully update the on-disk branch cache
2080 2080 self.ui.debug(_("updating the branch cache\n"))
2081 2081 self.branchtags()
2082 2082 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2083 2083 source=srctype, url=url)
2084 2084
2085 2085 for i in xrange(cor + 1, cnr + 1):
2086 2086 self.hook("incoming", node=hex(self.changelog.node(i)),
2087 2087 source=srctype, url=url)
2088 2088
2089 2089 # never return 0 here:
2090 2090 if newheads < oldheads:
2091 2091 return newheads - oldheads - 1
2092 2092 else:
2093 2093 return newheads - oldheads + 1
2094 2094
2095 2095
2096 2096 def stream_in(self, remote):
2097 2097 fp = remote.stream_out()
2098 2098 l = fp.readline()
2099 2099 try:
2100 2100 resp = int(l)
2101 2101 except ValueError:
2102 2102 raise error.ResponseError(
2103 2103 _('Unexpected response from remote server:'), l)
2104 2104 if resp == 1:
2105 2105 raise util.Abort(_('operation forbidden by server'))
2106 2106 elif resp == 2:
2107 2107 raise util.Abort(_('locking the remote repository failed'))
2108 2108 elif resp != 0:
2109 2109 raise util.Abort(_('the server sent an unknown error code'))
2110 2110 self.ui.status(_('streaming all changes\n'))
2111 2111 l = fp.readline()
2112 2112 try:
2113 2113 total_files, total_bytes = map(int, l.split(' ', 1))
2114 2114 except (ValueError, TypeError):
2115 2115 raise error.ResponseError(
2116 2116 _('Unexpected response from remote server:'), l)
2117 2117 self.ui.status(_('%d files to transfer, %s of data\n') %
2118 2118 (total_files, util.bytecount(total_bytes)))
2119 2119 start = time.time()
2120 2120 for i in xrange(total_files):
2121 2121 # XXX doesn't support '\n' or '\r' in filenames
2122 2122 l = fp.readline()
2123 2123 try:
2124 2124 name, size = l.split('\0', 1)
2125 2125 size = int(size)
2126 2126 except (ValueError, TypeError):
2127 2127 raise error.ResponseError(
2128 2128 _('Unexpected response from remote server:'), l)
2129 2129 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2130 2130 ofp = self.sopener(name, 'w')
2131 2131 for chunk in util.filechunkiter(fp, limit=size):
2132 2132 ofp.write(chunk)
2133 2133 ofp.close()
2134 2134 elapsed = time.time() - start
2135 2135 if elapsed <= 0:
2136 2136 elapsed = 0.001
2137 2137 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2138 2138 (util.bytecount(total_bytes), elapsed,
2139 2139 util.bytecount(total_bytes / elapsed)))
2140 2140 self.invalidate()
2141 2141 return len(self.heads()) + 1
2142 2142
2143 2143 def clone(self, remote, heads=[], stream=False):
2144 2144 '''clone remote repository.
2145 2145
2146 2146 keyword arguments:
2147 2147 heads: list of revs to clone (forces use of pull)
2148 2148 stream: use streaming clone if possible'''
2149 2149
2150 2150 # now, all clients that can request uncompressed clones can
2151 2151 # read repo formats supported by all servers that can serve
2152 2152 # them.
2153 2153
2154 2154 # if revlog format changes, client will have to check version
2155 2155 # and format flags on "stream" capability, and use
2156 2156 # uncompressed only if compatible.
2157 2157
2158 2158 if stream and not heads and remote.capable('stream'):
2159 2159 return self.stream_in(remote)
2160 2160 return self.pull(remote, heads)
2161 2161
2162 2162 # used to avoid circular references so destructors work
2163 2163 def aftertrans(files):
2164 2164 renamefiles = [tuple(t) for t in files]
2165 2165 def a():
2166 2166 for src, dest in renamefiles:
2167 2167 util.rename(src, dest)
2168 2168 return a
2169 2169
2170 2170 def instance(ui, path, create):
2171 2171 return localrepository(ui, util.drop_scheme('file', path), create)
2172 2172
2173 2173 def islocal(path):
2174 2174 return True
@@ -1,1495 +1,1491 b''
1 1 """
2 2 util.py - Mercurial utility functions and platform specfic implementations
3 3
4 4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
5 5 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 6 Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
7 7
8 8 This software may be used and distributed according to the terms
9 9 of the GNU General Public License, incorporated herein by reference.
10 10
11 11 This contains helper routines that are independent of the SCM core and hide
12 12 platform-specific details from the core.
13 13 """
14 14
15 15 from i18n import _
16 16 import cStringIO, errno, re, shutil, sys, tempfile, traceback, error
17 17 import os, stat, threading, time, calendar, ConfigParser, glob, osutil
18 18 import imp
19 19
20 20 # Python compatibility
21 21
22 22 _md5 = None
23 23 def md5(s):
24 24 global _md5
25 25 if _md5 is None:
26 26 try:
27 27 import hashlib
28 28 _md5 = hashlib.md5
29 29 except ImportError:
30 30 import md5
31 31 _md5 = md5.md5
32 32 return _md5(s)
33 33
34 34 _sha1 = None
35 35 def sha1(s):
36 36 global _sha1
37 37 if _sha1 is None:
38 38 try:
39 39 import hashlib
40 40 _sha1 = hashlib.sha1
41 41 except ImportError:
42 42 import sha
43 43 _sha1 = sha.sha
44 44 return _sha1(s)
45 45
46 46 try:
47 47 import subprocess
48 48 subprocess.Popen # trigger ImportError early
49 49 closefds = os.name == 'posix'
50 50 def popen2(cmd, mode='t', bufsize=-1):
51 51 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
52 52 close_fds=closefds,
53 53 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
54 54 return p.stdin, p.stdout
55 55 def popen3(cmd, mode='t', bufsize=-1):
56 56 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
57 57 close_fds=closefds,
58 58 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
59 59 stderr=subprocess.PIPE)
60 60 return p.stdin, p.stdout, p.stderr
61 61 def Popen3(cmd, capturestderr=False, bufsize=-1):
62 62 stderr = capturestderr and subprocess.PIPE or None
63 63 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
64 64 close_fds=closefds,
65 65 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
66 66 stderr=stderr)
67 67 p.fromchild = p.stdout
68 68 p.tochild = p.stdin
69 69 p.childerr = p.stderr
70 70 return p
71 71 except ImportError:
72 72 subprocess = None
73 73 from popen2 import Popen3
74 74 popen2 = os.popen2
75 75 popen3 = os.popen3
76 76
77 77
78 78 def version():
79 79 """Return version information if available."""
80 80 try:
81 81 import __version__
82 82 return __version__.version
83 83 except ImportError:
84 84 return 'unknown'
85 85
86 86 # used by parsedate
87 87 defaultdateformats = (
88 88 '%Y-%m-%d %H:%M:%S',
89 89 '%Y-%m-%d %I:%M:%S%p',
90 90 '%Y-%m-%d %H:%M',
91 91 '%Y-%m-%d %I:%M%p',
92 92 '%Y-%m-%d',
93 93 '%m-%d',
94 94 '%m/%d',
95 95 '%m/%d/%y',
96 96 '%m/%d/%Y',
97 97 '%a %b %d %H:%M:%S %Y',
98 98 '%a %b %d %I:%M:%S%p %Y',
99 99 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
100 100 '%b %d %H:%M:%S %Y',
101 101 '%b %d %I:%M:%S%p %Y',
102 102 '%b %d %H:%M:%S',
103 103 '%b %d %I:%M:%S%p',
104 104 '%b %d %H:%M',
105 105 '%b %d %I:%M%p',
106 106 '%b %d %Y',
107 107 '%b %d',
108 108 '%H:%M:%S',
109 109 '%I:%M:%SP',
110 110 '%H:%M',
111 111 '%I:%M%p',
112 112 )
113 113
114 114 extendeddateformats = defaultdateformats + (
115 115 "%Y",
116 116 "%Y-%m",
117 117 "%b",
118 118 "%b %Y",
119 119 )
120 120
121 121 # differences from SafeConfigParser:
122 122 # - case-sensitive keys
123 123 # - allows values that are not strings (this means that you may not
124 124 # be able to save the configuration to a file)
125 125 class configparser(ConfigParser.SafeConfigParser):
126 126 def optionxform(self, optionstr):
127 127 return optionstr
128 128
129 129 def set(self, section, option, value):
130 130 return ConfigParser.ConfigParser.set(self, section, option, value)
131 131
132 132 def _interpolate(self, section, option, rawval, vars):
133 133 if not isinstance(rawval, basestring):
134 134 return rawval
135 135 return ConfigParser.SafeConfigParser._interpolate(self, section,
136 136 option, rawval, vars)
137 137
138 138 def cachefunc(func):
139 139 '''cache the result of function calls'''
140 140 # XXX doesn't handle keywords args
141 141 cache = {}
142 142 if func.func_code.co_argcount == 1:
143 143 # we gain a small amount of time because
144 144 # we don't need to pack/unpack the list
145 145 def f(arg):
146 146 if arg not in cache:
147 147 cache[arg] = func(arg)
148 148 return cache[arg]
149 149 else:
150 150 def f(*args):
151 151 if args not in cache:
152 152 cache[args] = func(*args)
153 153 return cache[args]
154 154
155 155 return f
156 156
157 157 def pipefilter(s, cmd):
158 158 '''filter string S through command CMD, returning its output'''
159 159 (pin, pout) = popen2(cmd, 'b')
160 160 def writer():
161 161 try:
162 162 pin.write(s)
163 163 pin.close()
164 164 except IOError, inst:
165 165 if inst.errno != errno.EPIPE:
166 166 raise
167 167
168 168 # we should use select instead on UNIX, but this will work on most
169 169 # systems, including Windows
170 170 w = threading.Thread(target=writer)
171 171 w.start()
172 172 f = pout.read()
173 173 pout.close()
174 174 w.join()
175 175 return f
176 176
177 177 def tempfilter(s, cmd):
178 178 '''filter string S through a pair of temporary files with CMD.
179 179 CMD is used as a template to create the real command to be run,
180 180 with the strings INFILE and OUTFILE replaced by the real names of
181 181 the temporary files generated.'''
182 182 inname, outname = None, None
183 183 try:
184 184 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
185 185 fp = os.fdopen(infd, 'wb')
186 186 fp.write(s)
187 187 fp.close()
188 188 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
189 189 os.close(outfd)
190 190 cmd = cmd.replace('INFILE', inname)
191 191 cmd = cmd.replace('OUTFILE', outname)
192 192 code = os.system(cmd)
193 193 if sys.platform == 'OpenVMS' and code & 1:
194 194 code = 0
195 195 if code: raise Abort(_("command '%s' failed: %s") %
196 196 (cmd, explain_exit(code)))
197 197 return open(outname, 'rb').read()
198 198 finally:
199 199 try:
200 200 if inname: os.unlink(inname)
201 201 except: pass
202 202 try:
203 203 if outname: os.unlink(outname)
204 204 except: pass
205 205
206 206 filtertable = {
207 207 'tempfile:': tempfilter,
208 208 'pipe:': pipefilter,
209 209 }
210 210
211 211 def filter(s, cmd):
212 212 "filter a string through a command that transforms its input to its output"
213 213 for name, fn in filtertable.iteritems():
214 214 if cmd.startswith(name):
215 215 return fn(s, cmd[len(name):].lstrip())
216 216 return pipefilter(s, cmd)
217 217
218 218 def binary(s):
219 219 """return true if a string is binary data"""
220 220 return bool(s and '\0' in s)
221 221
222 def unique(g):
223 """return the uniq elements of iterable g"""
224 return dict.fromkeys(g).keys()
225
226 222 def sort(l):
227 223 if not isinstance(l, list):
228 224 l = list(l)
229 225 l.sort()
230 226 return l
231 227
232 228 def increasingchunks(source, min=1024, max=65536):
233 229 '''return no less than min bytes per chunk while data remains,
234 230 doubling min after each chunk until it reaches max'''
235 231 def log2(x):
236 232 if not x:
237 233 return 0
238 234 i = 0
239 235 while x:
240 236 x >>= 1
241 237 i += 1
242 238 return i - 1
243 239
244 240 buf = []
245 241 blen = 0
246 242 for chunk in source:
247 243 buf.append(chunk)
248 244 blen += len(chunk)
249 245 if blen >= min:
250 246 if min < max:
251 247 min = min << 1
252 248 nmin = 1 << log2(blen)
253 249 if nmin > min:
254 250 min = nmin
255 251 if min > max:
256 252 min = max
257 253 yield ''.join(buf)
258 254 blen = 0
259 255 buf = []
260 256 if buf:
261 257 yield ''.join(buf)
262 258
263 259 Abort = error.Abort
264 260
265 261 def always(fn): return True
266 262 def never(fn): return False
267 263
268 264 def patkind(name, default):
269 265 """Split a string into an optional pattern kind prefix and the
270 266 actual pattern."""
271 267 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
272 268 if name.startswith(prefix + ':'): return name.split(':', 1)
273 269 return default, name
274 270
275 271 def globre(pat, head='^', tail='$'):
276 272 "convert a glob pattern into a regexp"
277 273 i, n = 0, len(pat)
278 274 res = ''
279 275 group = 0
280 276 def peek(): return i < n and pat[i]
281 277 while i < n:
282 278 c = pat[i]
283 279 i = i+1
284 280 if c == '*':
285 281 if peek() == '*':
286 282 i += 1
287 283 res += '.*'
288 284 else:
289 285 res += '[^/]*'
290 286 elif c == '?':
291 287 res += '.'
292 288 elif c == '[':
293 289 j = i
294 290 if j < n and pat[j] in '!]':
295 291 j += 1
296 292 while j < n and pat[j] != ']':
297 293 j += 1
298 294 if j >= n:
299 295 res += '\\['
300 296 else:
301 297 stuff = pat[i:j].replace('\\','\\\\')
302 298 i = j + 1
303 299 if stuff[0] == '!':
304 300 stuff = '^' + stuff[1:]
305 301 elif stuff[0] == '^':
306 302 stuff = '\\' + stuff
307 303 res = '%s[%s]' % (res, stuff)
308 304 elif c == '{':
309 305 group += 1
310 306 res += '(?:'
311 307 elif c == '}' and group:
312 308 res += ')'
313 309 group -= 1
314 310 elif c == ',' and group:
315 311 res += '|'
316 312 elif c == '\\':
317 313 p = peek()
318 314 if p:
319 315 i += 1
320 316 res += re.escape(p)
321 317 else:
322 318 res += re.escape(c)
323 319 else:
324 320 res += re.escape(c)
325 321 return head + res + tail
326 322
327 323 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
328 324
329 325 def pathto(root, n1, n2):
330 326 '''return the relative path from one place to another.
331 327 root should use os.sep to separate directories
332 328 n1 should use os.sep to separate directories
333 329 n2 should use "/" to separate directories
334 330 returns an os.sep-separated path.
335 331
336 332 If n1 is a relative path, it's assumed it's
337 333 relative to root.
338 334 n2 should always be relative to root.
339 335 '''
340 336 if not n1: return localpath(n2)
341 337 if os.path.isabs(n1):
342 338 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
343 339 return os.path.join(root, localpath(n2))
344 340 n2 = '/'.join((pconvert(root), n2))
345 341 a, b = splitpath(n1), n2.split('/')
346 342 a.reverse()
347 343 b.reverse()
348 344 while a and b and a[-1] == b[-1]:
349 345 a.pop()
350 346 b.pop()
351 347 b.reverse()
352 348 return os.sep.join((['..'] * len(a)) + b) or '.'
353 349
354 350 def canonpath(root, cwd, myname):
355 351 """return the canonical path of myname, given cwd and root"""
356 352 if root == os.sep:
357 353 rootsep = os.sep
358 354 elif endswithsep(root):
359 355 rootsep = root
360 356 else:
361 357 rootsep = root + os.sep
362 358 name = myname
363 359 if not os.path.isabs(name):
364 360 name = os.path.join(root, cwd, name)
365 361 name = os.path.normpath(name)
366 362 audit_path = path_auditor(root)
367 363 if name != rootsep and name.startswith(rootsep):
368 364 name = name[len(rootsep):]
369 365 audit_path(name)
370 366 return pconvert(name)
371 367 elif name == root:
372 368 return ''
373 369 else:
374 370 # Determine whether `name' is in the hierarchy at or beneath `root',
375 371 # by iterating name=dirname(name) until that causes no change (can't
376 372 # check name == '/', because that doesn't work on windows). For each
377 373 # `name', compare dev/inode numbers. If they match, the list `rel'
378 374 # holds the reversed list of components making up the relative file
379 375 # name we want.
380 376 root_st = os.stat(root)
381 377 rel = []
382 378 while True:
383 379 try:
384 380 name_st = os.stat(name)
385 381 except OSError:
386 382 break
387 383 if samestat(name_st, root_st):
388 384 if not rel:
389 385 # name was actually the same as root (maybe a symlink)
390 386 return ''
391 387 rel.reverse()
392 388 name = os.path.join(*rel)
393 389 audit_path(name)
394 390 return pconvert(name)
395 391 dirname, basename = os.path.split(name)
396 392 rel.append(basename)
397 393 if dirname == name:
398 394 break
399 395 name = dirname
400 396
401 397 raise Abort('%s not under root' % myname)
402 398
403 399 def matcher(canonroot, cwd='', names=[], inc=[], exc=[], src=None, dflt_pat='glob'):
404 400 """build a function to match a set of file patterns
405 401
406 402 arguments:
407 403 canonroot - the canonical root of the tree you're matching against
408 404 cwd - the current working directory, if relevant
409 405 names - patterns to find
410 406 inc - patterns to include
411 407 exc - patterns to exclude
412 408 dflt_pat - if a pattern in names has no explicit type, assume this one
413 409 src - where these patterns came from (e.g. .hgignore)
414 410
415 411 a pattern is one of:
416 412 'glob:<glob>' - a glob relative to cwd
417 413 're:<regexp>' - a regular expression
418 414 'path:<path>' - a path relative to canonroot
419 415 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
420 416 'relpath:<path>' - a path relative to cwd
421 417 'relre:<regexp>' - a regexp that doesn't have to match the start of a name
422 418 '<something>' - one of the cases above, selected by the dflt_pat argument
423 419
424 420 returns:
425 421 a 3-tuple containing
426 422 - list of roots (places where one should start a recursive walk of the fs);
427 423 this often matches the explicit non-pattern names passed in, but also
428 424 includes the initial part of glob: patterns that has no glob characters
429 425 - a bool match(filename) function
430 426 - a bool indicating if any patterns were passed in
431 427 """
432 428
433 429 # a common case: no patterns at all
434 430 if not names and not inc and not exc:
435 431 return [], always, False
436 432
437 433 def contains_glob(name):
438 434 for c in name:
439 435 if c in _globchars: return True
440 436 return False
441 437
442 438 def regex(kind, name, tail):
443 439 '''convert a pattern into a regular expression'''
444 440 if not name:
445 441 return ''
446 442 if kind == 're':
447 443 return name
448 444 elif kind == 'path':
449 445 return '^' + re.escape(name) + '(?:/|$)'
450 446 elif kind == 'relglob':
451 447 return globre(name, '(?:|.*/)', tail)
452 448 elif kind == 'relpath':
453 449 return re.escape(name) + '(?:/|$)'
454 450 elif kind == 'relre':
455 451 if name.startswith('^'):
456 452 return name
457 453 return '.*' + name
458 454 return globre(name, '', tail)
459 455
460 456 def matchfn(pats, tail):
461 457 """build a matching function from a set of patterns"""
462 458 if not pats:
463 459 return
464 460 try:
465 461 pat = '(?:%s)' % '|'.join([regex(k, p, tail) for (k, p) in pats])
466 462 if len(pat) > 20000:
467 463 raise OverflowError()
468 464 return re.compile(pat).match
469 465 except OverflowError:
470 466 # We're using a Python with a tiny regex engine and we
471 467 # made it explode, so we'll divide the pattern list in two
472 468 # until it works
473 469 l = len(pats)
474 470 if l < 2:
475 471 raise
476 472 a, b = matchfn(pats[:l//2], tail), matchfn(pats[l//2:], tail)
477 473 return lambda s: a(s) or b(s)
478 474 except re.error:
479 475 for k, p in pats:
480 476 try:
481 477 re.compile('(?:%s)' % regex(k, p, tail))
482 478 except re.error:
483 479 if src:
484 480 raise Abort("%s: invalid pattern (%s): %s" %
485 481 (src, k, p))
486 482 else:
487 483 raise Abort("invalid pattern (%s): %s" % (k, p))
488 484 raise Abort("invalid pattern")
489 485
490 486 def globprefix(pat):
491 487 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
492 488 root = []
493 489 for p in pat.split('/'):
494 490 if contains_glob(p): break
495 491 root.append(p)
496 492 return '/'.join(root) or '.'
497 493
498 494 def normalizepats(names, default):
499 495 pats = []
500 496 roots = []
501 497 anypats = False
502 498 for kind, name in [patkind(p, default) for p in names]:
503 499 if kind in ('glob', 'relpath'):
504 500 name = canonpath(canonroot, cwd, name)
505 501 elif kind in ('relglob', 'path'):
506 502 name = normpath(name)
507 503
508 504 pats.append((kind, name))
509 505
510 506 if kind in ('glob', 're', 'relglob', 'relre'):
511 507 anypats = True
512 508
513 509 if kind == 'glob':
514 510 root = globprefix(name)
515 511 roots.append(root)
516 512 elif kind in ('relpath', 'path'):
517 513 roots.append(name or '.')
518 514 elif kind == 'relglob':
519 515 roots.append('.')
520 516 return roots, pats, anypats
521 517
522 518 roots, pats, anypats = normalizepats(names, dflt_pat)
523 519
524 520 patmatch = matchfn(pats, '$') or always
525 521 incmatch = always
526 522 if inc:
527 523 dummy, inckinds, dummy = normalizepats(inc, 'glob')
528 524 incmatch = matchfn(inckinds, '(?:/|$)')
529 525 excmatch = never
530 526 if exc:
531 527 dummy, exckinds, dummy = normalizepats(exc, 'glob')
532 528 excmatch = matchfn(exckinds, '(?:/|$)')
533 529
534 530 if not names and inc and not exc:
535 531 # common case: hgignore patterns
536 532 match = incmatch
537 533 else:
538 534 match = lambda fn: incmatch(fn) and not excmatch(fn) and patmatch(fn)
539 535
540 536 return (roots, match, (inc or exc or anypats) and True)
541 537
542 538 _hgexecutable = None
543 539
544 540 def main_is_frozen():
545 541 """return True if we are a frozen executable.
546 542
547 543 The code supports py2exe (most common, Windows only) and tools/freeze
548 544 (portable, not much used).
549 545 """
550 546 return (hasattr(sys, "frozen") or # new py2exe
551 547 hasattr(sys, "importers") or # old py2exe
552 548 imp.is_frozen("__main__")) # tools/freeze
553 549
554 550 def hgexecutable():
555 551 """return location of the 'hg' executable.
556 552
557 553 Defaults to $HG or 'hg' in the search path.
558 554 """
559 555 if _hgexecutable is None:
560 556 hg = os.environ.get('HG')
561 557 if hg:
562 558 set_hgexecutable(hg)
563 559 elif main_is_frozen():
564 560 set_hgexecutable(sys.executable)
565 561 else:
566 562 set_hgexecutable(find_exe('hg') or 'hg')
567 563 return _hgexecutable
568 564
569 565 def set_hgexecutable(path):
570 566 """set location of the 'hg' executable"""
571 567 global _hgexecutable
572 568 _hgexecutable = path
573 569
574 570 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
575 571 '''enhanced shell command execution.
576 572 run with environment maybe modified, maybe in different dir.
577 573
578 574 if command fails and onerr is None, return status. if ui object,
579 575 print error message and return status, else raise onerr object as
580 576 exception.'''
581 577 def py2shell(val):
582 578 'convert python object into string that is useful to shell'
583 579 if val in (None, False):
584 580 return '0'
585 581 if val == True:
586 582 return '1'
587 583 return str(val)
588 584 oldenv = {}
589 585 for k in environ:
590 586 oldenv[k] = os.environ.get(k)
591 587 if cwd is not None:
592 588 oldcwd = os.getcwd()
593 589 origcmd = cmd
594 590 if os.name == 'nt':
595 591 cmd = '"%s"' % cmd
596 592 try:
597 593 for k, v in environ.iteritems():
598 594 os.environ[k] = py2shell(v)
599 595 os.environ['HG'] = hgexecutable()
600 596 if cwd is not None and oldcwd != cwd:
601 597 os.chdir(cwd)
602 598 rc = os.system(cmd)
603 599 if sys.platform == 'OpenVMS' and rc & 1:
604 600 rc = 0
605 601 if rc and onerr:
606 602 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
607 603 explain_exit(rc)[0])
608 604 if errprefix:
609 605 errmsg = '%s: %s' % (errprefix, errmsg)
610 606 try:
611 607 onerr.warn(errmsg + '\n')
612 608 except AttributeError:
613 609 raise onerr(errmsg)
614 610 return rc
615 611 finally:
616 612 for k, v in oldenv.iteritems():
617 613 if v is None:
618 614 del os.environ[k]
619 615 else:
620 616 os.environ[k] = v
621 617 if cwd is not None and oldcwd != cwd:
622 618 os.chdir(oldcwd)
623 619
624 620 def checksignature(func):
625 621 '''wrap a function with code to check for calling errors'''
626 622 def check(*args, **kwargs):
627 623 try:
628 624 return func(*args, **kwargs)
629 625 except TypeError:
630 626 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
631 627 raise error.SignatureError
632 628 raise
633 629
634 630 return check
635 631
636 632 # os.path.lexists is not available on python2.3
637 633 def lexists(filename):
638 634 "test whether a file with this name exists. does not follow symlinks"
639 635 try:
640 636 os.lstat(filename)
641 637 except:
642 638 return False
643 639 return True
644 640
645 641 def rename(src, dst):
646 642 """forcibly rename a file"""
647 643 try:
648 644 os.rename(src, dst)
649 645 except OSError, err: # FIXME: check err (EEXIST ?)
650 646 # on windows, rename to existing file is not allowed, so we
651 647 # must delete destination first. but if file is open, unlink
652 648 # schedules it for delete but does not delete it. rename
653 649 # happens immediately even for open files, so we rename
654 650 # destination to a temporary name, then delete that. then
655 651 # rename is safe to do.
656 652 temp = dst + "-force-rename"
657 653 os.rename(dst, temp)
658 654 os.unlink(temp)
659 655 os.rename(src, dst)
660 656
661 657 def unlink(f):
662 658 """unlink and remove the directory if it is empty"""
663 659 os.unlink(f)
664 660 # try removing directories that might now be empty
665 661 try:
666 662 os.removedirs(os.path.dirname(f))
667 663 except OSError:
668 664 pass
669 665
670 666 def copyfile(src, dest):
671 667 "copy a file, preserving mode and atime/mtime"
672 668 if os.path.islink(src):
673 669 try:
674 670 os.unlink(dest)
675 671 except:
676 672 pass
677 673 os.symlink(os.readlink(src), dest)
678 674 else:
679 675 try:
680 676 shutil.copyfile(src, dest)
681 677 shutil.copystat(src, dest)
682 678 except shutil.Error, inst:
683 679 raise Abort(str(inst))
684 680
685 681 def copyfiles(src, dst, hardlink=None):
686 682 """Copy a directory tree using hardlinks if possible"""
687 683
688 684 if hardlink is None:
689 685 hardlink = (os.stat(src).st_dev ==
690 686 os.stat(os.path.dirname(dst)).st_dev)
691 687
692 688 if os.path.isdir(src):
693 689 os.mkdir(dst)
694 690 for name, kind in osutil.listdir(src):
695 691 srcname = os.path.join(src, name)
696 692 dstname = os.path.join(dst, name)
697 693 copyfiles(srcname, dstname, hardlink)
698 694 else:
699 695 if hardlink:
700 696 try:
701 697 os_link(src, dst)
702 698 except (IOError, OSError):
703 699 hardlink = False
704 700 shutil.copy(src, dst)
705 701 else:
706 702 shutil.copy(src, dst)
707 703
708 704 class path_auditor(object):
709 705 '''ensure that a filesystem path contains no banned components.
710 706 the following properties of a path are checked:
711 707
712 708 - under top-level .hg
713 709 - starts at the root of a windows drive
714 710 - contains ".."
715 711 - traverses a symlink (e.g. a/symlink_here/b)
716 712 - inside a nested repository'''
717 713
718 714 def __init__(self, root):
719 715 self.audited = set()
720 716 self.auditeddir = set()
721 717 self.root = root
722 718
723 719 def __call__(self, path):
724 720 if path in self.audited:
725 721 return
726 722 normpath = os.path.normcase(path)
727 723 parts = splitpath(normpath)
728 724 if (os.path.splitdrive(path)[0]
729 725 or parts[0].lower() in ('.hg', '.hg.', '')
730 726 or os.pardir in parts):
731 727 raise Abort(_("path contains illegal component: %s") % path)
732 728 if '.hg' in path.lower():
733 729 lparts = [p.lower() for p in parts]
734 730 for p in '.hg', '.hg.':
735 731 if p in lparts[1:]:
736 732 pos = lparts.index(p)
737 733 base = os.path.join(*parts[:pos])
738 734 raise Abort(_('path %r is inside repo %r') % (path, base))
739 735 def check(prefix):
740 736 curpath = os.path.join(self.root, prefix)
741 737 try:
742 738 st = os.lstat(curpath)
743 739 except OSError, err:
744 740 # EINVAL can be raised as invalid path syntax under win32.
745 741 # They must be ignored for patterns can be checked too.
746 742 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
747 743 raise
748 744 else:
749 745 if stat.S_ISLNK(st.st_mode):
750 746 raise Abort(_('path %r traverses symbolic link %r') %
751 747 (path, prefix))
752 748 elif (stat.S_ISDIR(st.st_mode) and
753 749 os.path.isdir(os.path.join(curpath, '.hg'))):
754 750 raise Abort(_('path %r is inside repo %r') %
755 751 (path, prefix))
756 752 parts.pop()
757 753 prefixes = []
758 754 for n in range(len(parts)):
759 755 prefix = os.sep.join(parts)
760 756 if prefix in self.auditeddir:
761 757 break
762 758 check(prefix)
763 759 prefixes.append(prefix)
764 760 parts.pop()
765 761
766 762 self.audited.add(path)
767 763 # only add prefixes to the cache after checking everything: we don't
768 764 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
769 765 self.auditeddir.update(prefixes)
770 766
771 767 def nlinks(pathname):
772 768 """Return number of hardlinks for the given file."""
773 769 return os.lstat(pathname).st_nlink
774 770
775 771 if hasattr(os, 'link'):
776 772 os_link = os.link
777 773 else:
778 774 def os_link(src, dst):
779 775 raise OSError(0, _("Hardlinks not supported"))
780 776
781 777 def lookup_reg(key, name=None, scope=None):
782 778 return None
783 779
784 780 if os.name == 'nt':
785 781 from windows import *
786 782 def expand_glob(pats):
787 783 '''On Windows, expand the implicit globs in a list of patterns'''
788 784 ret = []
789 785 for p in pats:
790 786 kind, name = patkind(p, None)
791 787 if kind is None:
792 788 globbed = glob.glob(name)
793 789 if globbed:
794 790 ret.extend(globbed)
795 791 continue
796 792 # if we couldn't expand the glob, just keep it around
797 793 ret.append(p)
798 794 return ret
799 795 else:
800 796 from posix import *
801 797
802 798 def makelock(info, pathname):
803 799 try:
804 800 return os.symlink(info, pathname)
805 801 except OSError, why:
806 802 if why.errno == errno.EEXIST:
807 803 raise
808 804 except AttributeError: # no symlink in os
809 805 pass
810 806
811 807 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
812 808 os.write(ld, info)
813 809 os.close(ld)
814 810
815 811 def readlock(pathname):
816 812 try:
817 813 return os.readlink(pathname)
818 814 except OSError, why:
819 815 if why.errno not in (errno.EINVAL, errno.ENOSYS):
820 816 raise
821 817 except AttributeError: # no symlink in os
822 818 pass
823 819 return posixfile(pathname).read()
824 820
825 821 def fstat(fp):
826 822 '''stat file object that may not have fileno method.'''
827 823 try:
828 824 return os.fstat(fp.fileno())
829 825 except AttributeError:
830 826 return os.stat(fp.name)
831 827
832 828 # File system features
833 829
834 830 def checkcase(path):
835 831 """
836 832 Check whether the given path is on a case-sensitive filesystem
837 833
838 834 Requires a path (like /foo/.hg) ending with a foldable final
839 835 directory component.
840 836 """
841 837 s1 = os.stat(path)
842 838 d, b = os.path.split(path)
843 839 p2 = os.path.join(d, b.upper())
844 840 if path == p2:
845 841 p2 = os.path.join(d, b.lower())
846 842 try:
847 843 s2 = os.stat(p2)
848 844 if s2 == s1:
849 845 return False
850 846 return True
851 847 except:
852 848 return True
853 849
854 850 _fspathcache = {}
855 851 def fspath(name, root):
856 852 '''Get name in the case stored in the filesystem
857 853
858 854 The name is either relative to root, or it is an absolute path starting
859 855 with root. Note that this function is unnecessary, and should not be
860 856 called, for case-sensitive filesystems (simply because it's expensive).
861 857 '''
862 858 # If name is absolute, make it relative
863 859 if name.lower().startswith(root.lower()):
864 860 l = len(root)
865 861 if name[l] == os.sep or name[l] == os.altsep:
866 862 l = l + 1
867 863 name = name[l:]
868 864
869 865 if not os.path.exists(os.path.join(root, name)):
870 866 return None
871 867
872 868 seps = os.sep
873 869 if os.altsep:
874 870 seps = seps + os.altsep
875 871 # Protect backslashes. This gets silly very quickly.
876 872 seps.replace('\\','\\\\')
877 873 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
878 874 dir = os.path.normcase(os.path.normpath(root))
879 875 result = []
880 876 for part, sep in pattern.findall(name):
881 877 if sep:
882 878 result.append(sep)
883 879 continue
884 880
885 881 if dir not in _fspathcache:
886 882 _fspathcache[dir] = os.listdir(dir)
887 883 contents = _fspathcache[dir]
888 884
889 885 lpart = part.lower()
890 886 for n in contents:
891 887 if n.lower() == lpart:
892 888 result.append(n)
893 889 break
894 890 else:
895 891 # Cannot happen, as the file exists!
896 892 result.append(part)
897 893 dir = os.path.join(dir, lpart)
898 894
899 895 return ''.join(result)
900 896
901 897 def checkexec(path):
902 898 """
903 899 Check whether the given path is on a filesystem with UNIX-like exec flags
904 900
905 901 Requires a directory (like /foo/.hg)
906 902 """
907 903
908 904 # VFAT on some Linux versions can flip mode but it doesn't persist
909 905 # a FS remount. Frequently we can detect it if files are created
910 906 # with exec bit on.
911 907
912 908 try:
913 909 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
914 910 fh, fn = tempfile.mkstemp("", "", path)
915 911 try:
916 912 os.close(fh)
917 913 m = os.stat(fn).st_mode & 0777
918 914 new_file_has_exec = m & EXECFLAGS
919 915 os.chmod(fn, m ^ EXECFLAGS)
920 916 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
921 917 finally:
922 918 os.unlink(fn)
923 919 except (IOError, OSError):
924 920 # we don't care, the user probably won't be able to commit anyway
925 921 return False
926 922 return not (new_file_has_exec or exec_flags_cannot_flip)
927 923
928 924 def checklink(path):
929 925 """check whether the given path is on a symlink-capable filesystem"""
930 926 # mktemp is not racy because symlink creation will fail if the
931 927 # file already exists
932 928 name = tempfile.mktemp(dir=path)
933 929 try:
934 930 os.symlink(".", name)
935 931 os.unlink(name)
936 932 return True
937 933 except (OSError, AttributeError):
938 934 return False
939 935
940 936 def needbinarypatch():
941 937 """return True if patches should be applied in binary mode by default."""
942 938 return os.name == 'nt'
943 939
944 940 def endswithsep(path):
945 941 '''Check path ends with os.sep or os.altsep.'''
946 942 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
947 943
948 944 def splitpath(path):
949 945 '''Split path by os.sep.
950 946 Note that this function does not use os.altsep because this is
951 947 an alternative of simple "xxx.split(os.sep)".
952 948 It is recommended to use os.path.normpath() before using this
953 949 function if need.'''
954 950 return path.split(os.sep)
955 951
956 952 def gui():
957 953 '''Are we running in a GUI?'''
958 954 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
959 955
960 956 def mktempcopy(name, emptyok=False, createmode=None):
961 957 """Create a temporary file with the same contents from name
962 958
963 959 The permission bits are copied from the original file.
964 960
965 961 If the temporary file is going to be truncated immediately, you
966 962 can use emptyok=True as an optimization.
967 963
968 964 Returns the name of the temporary file.
969 965 """
970 966 d, fn = os.path.split(name)
971 967 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
972 968 os.close(fd)
973 969 # Temporary files are created with mode 0600, which is usually not
974 970 # what we want. If the original file already exists, just copy
975 971 # its mode. Otherwise, manually obey umask.
976 972 try:
977 973 st_mode = os.lstat(name).st_mode & 0777
978 974 except OSError, inst:
979 975 if inst.errno != errno.ENOENT:
980 976 raise
981 977 st_mode = createmode
982 978 if st_mode is None:
983 979 st_mode = ~umask
984 980 st_mode &= 0666
985 981 os.chmod(temp, st_mode)
986 982 if emptyok:
987 983 return temp
988 984 try:
989 985 try:
990 986 ifp = posixfile(name, "rb")
991 987 except IOError, inst:
992 988 if inst.errno == errno.ENOENT:
993 989 return temp
994 990 if not getattr(inst, 'filename', None):
995 991 inst.filename = name
996 992 raise
997 993 ofp = posixfile(temp, "wb")
998 994 for chunk in filechunkiter(ifp):
999 995 ofp.write(chunk)
1000 996 ifp.close()
1001 997 ofp.close()
1002 998 except:
1003 999 try: os.unlink(temp)
1004 1000 except: pass
1005 1001 raise
1006 1002 return temp
1007 1003
1008 1004 class atomictempfile(posixfile):
1009 1005 """file-like object that atomically updates a file
1010 1006
1011 1007 All writes will be redirected to a temporary copy of the original
1012 1008 file. When rename is called, the copy is renamed to the original
1013 1009 name, making the changes visible.
1014 1010 """
1015 1011 def __init__(self, name, mode, createmode):
1016 1012 self.__name = name
1017 1013 self.temp = mktempcopy(name, emptyok=('w' in mode),
1018 1014 createmode=createmode)
1019 1015 posixfile.__init__(self, self.temp, mode)
1020 1016
1021 1017 def rename(self):
1022 1018 if not self.closed:
1023 1019 posixfile.close(self)
1024 1020 rename(self.temp, localpath(self.__name))
1025 1021
1026 1022 def __del__(self):
1027 1023 if not self.closed:
1028 1024 try:
1029 1025 os.unlink(self.temp)
1030 1026 except: pass
1031 1027 posixfile.close(self)
1032 1028
1033 1029 def makedirs(name, mode=None):
1034 1030 """recursive directory creation with parent mode inheritance"""
1035 1031 try:
1036 1032 os.mkdir(name)
1037 1033 if mode is not None:
1038 1034 os.chmod(name, mode)
1039 1035 return
1040 1036 except OSError, err:
1041 1037 if err.errno == errno.EEXIST:
1042 1038 return
1043 1039 if err.errno != errno.ENOENT:
1044 1040 raise
1045 1041 parent = os.path.abspath(os.path.dirname(name))
1046 1042 makedirs(parent, mode)
1047 1043 makedirs(name, mode)
1048 1044
1049 1045 class opener(object):
1050 1046 """Open files relative to a base directory
1051 1047
1052 1048 This class is used to hide the details of COW semantics and
1053 1049 remote file access from higher level code.
1054 1050 """
1055 1051 def __init__(self, base, audit=True):
1056 1052 self.base = base
1057 1053 if audit:
1058 1054 self.audit_path = path_auditor(base)
1059 1055 else:
1060 1056 self.audit_path = always
1061 1057 self.createmode = None
1062 1058
1063 1059 def __getattr__(self, name):
1064 1060 if name == '_can_symlink':
1065 1061 self._can_symlink = checklink(self.base)
1066 1062 return self._can_symlink
1067 1063 raise AttributeError(name)
1068 1064
1069 1065 def _fixfilemode(self, name):
1070 1066 if self.createmode is None:
1071 1067 return
1072 1068 os.chmod(name, self.createmode & 0666)
1073 1069
1074 1070 def __call__(self, path, mode="r", text=False, atomictemp=False):
1075 1071 self.audit_path(path)
1076 1072 f = os.path.join(self.base, path)
1077 1073
1078 1074 if not text and "b" not in mode:
1079 1075 mode += "b" # for that other OS
1080 1076
1081 1077 nlink = -1
1082 1078 if mode not in ("r", "rb"):
1083 1079 try:
1084 1080 nlink = nlinks(f)
1085 1081 except OSError:
1086 1082 nlink = 0
1087 1083 d = os.path.dirname(f)
1088 1084 if not os.path.isdir(d):
1089 1085 makedirs(d, self.createmode)
1090 1086 if atomictemp:
1091 1087 return atomictempfile(f, mode, self.createmode)
1092 1088 if nlink > 1:
1093 1089 rename(mktempcopy(f), f)
1094 1090 fp = posixfile(f, mode)
1095 1091 if nlink == 0:
1096 1092 self._fixfilemode(f)
1097 1093 return fp
1098 1094
1099 1095 def symlink(self, src, dst):
1100 1096 self.audit_path(dst)
1101 1097 linkname = os.path.join(self.base, dst)
1102 1098 try:
1103 1099 os.unlink(linkname)
1104 1100 except OSError:
1105 1101 pass
1106 1102
1107 1103 dirname = os.path.dirname(linkname)
1108 1104 if not os.path.exists(dirname):
1109 1105 makedirs(dirname, self.createmode)
1110 1106
1111 1107 if self._can_symlink:
1112 1108 try:
1113 1109 os.symlink(src, linkname)
1114 1110 except OSError, err:
1115 1111 raise OSError(err.errno, _('could not symlink to %r: %s') %
1116 1112 (src, err.strerror), linkname)
1117 1113 else:
1118 1114 f = self(dst, "w")
1119 1115 f.write(src)
1120 1116 f.close()
1121 1117 self._fixfilemode(dst)
1122 1118
1123 1119 class chunkbuffer(object):
1124 1120 """Allow arbitrary sized chunks of data to be efficiently read from an
1125 1121 iterator over chunks of arbitrary size."""
1126 1122
1127 1123 def __init__(self, in_iter):
1128 1124 """in_iter is the iterator that's iterating over the input chunks.
1129 1125 targetsize is how big a buffer to try to maintain."""
1130 1126 self.iter = iter(in_iter)
1131 1127 self.buf = ''
1132 1128 self.targetsize = 2**16
1133 1129
1134 1130 def read(self, l):
1135 1131 """Read L bytes of data from the iterator of chunks of data.
1136 1132 Returns less than L bytes if the iterator runs dry."""
1137 1133 if l > len(self.buf) and self.iter:
1138 1134 # Clamp to a multiple of self.targetsize
1139 1135 targetsize = max(l, self.targetsize)
1140 1136 collector = cStringIO.StringIO()
1141 1137 collector.write(self.buf)
1142 1138 collected = len(self.buf)
1143 1139 for chunk in self.iter:
1144 1140 collector.write(chunk)
1145 1141 collected += len(chunk)
1146 1142 if collected >= targetsize:
1147 1143 break
1148 1144 if collected < targetsize:
1149 1145 self.iter = False
1150 1146 self.buf = collector.getvalue()
1151 1147 if len(self.buf) == l:
1152 1148 s, self.buf = str(self.buf), ''
1153 1149 else:
1154 1150 s, self.buf = self.buf[:l], buffer(self.buf, l)
1155 1151 return s
1156 1152
1157 1153 def filechunkiter(f, size=65536, limit=None):
1158 1154 """Create a generator that produces the data in the file size
1159 1155 (default 65536) bytes at a time, up to optional limit (default is
1160 1156 to read all data). Chunks may be less than size bytes if the
1161 1157 chunk is the last chunk in the file, or the file is a socket or
1162 1158 some other type of file that sometimes reads less data than is
1163 1159 requested."""
1164 1160 assert size >= 0
1165 1161 assert limit is None or limit >= 0
1166 1162 while True:
1167 1163 if limit is None: nbytes = size
1168 1164 else: nbytes = min(limit, size)
1169 1165 s = nbytes and f.read(nbytes)
1170 1166 if not s: break
1171 1167 if limit: limit -= len(s)
1172 1168 yield s
1173 1169
1174 1170 def makedate():
1175 1171 lt = time.localtime()
1176 1172 if lt[8] == 1 and time.daylight:
1177 1173 tz = time.altzone
1178 1174 else:
1179 1175 tz = time.timezone
1180 1176 return time.mktime(lt), tz
1181 1177
1182 1178 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1183 1179 """represent a (unixtime, offset) tuple as a localized time.
1184 1180 unixtime is seconds since the epoch, and offset is the time zone's
1185 1181 number of seconds away from UTC. if timezone is false, do not
1186 1182 append time zone to string."""
1187 1183 t, tz = date or makedate()
1188 1184 if "%1" in format or "%2" in format:
1189 1185 sign = (tz > 0) and "-" or "+"
1190 1186 minutes = abs(tz) / 60
1191 1187 format = format.replace("%1", "%c%02d" % (sign, minutes / 60))
1192 1188 format = format.replace("%2", "%02d" % (minutes % 60))
1193 1189 s = time.strftime(format, time.gmtime(float(t) - tz))
1194 1190 return s
1195 1191
1196 1192 def shortdate(date=None):
1197 1193 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1198 1194 return datestr(date, format='%Y-%m-%d')
1199 1195
1200 1196 def strdate(string, format, defaults=[]):
1201 1197 """parse a localized time string and return a (unixtime, offset) tuple.
1202 1198 if the string cannot be parsed, ValueError is raised."""
1203 1199 def timezone(string):
1204 1200 tz = string.split()[-1]
1205 1201 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1206 1202 sign = (tz[0] == "+") and 1 or -1
1207 1203 hours = int(tz[1:3])
1208 1204 minutes = int(tz[3:5])
1209 1205 return -sign * (hours * 60 + minutes) * 60
1210 1206 if tz == "GMT" or tz == "UTC":
1211 1207 return 0
1212 1208 return None
1213 1209
1214 1210 # NOTE: unixtime = localunixtime + offset
1215 1211 offset, date = timezone(string), string
1216 1212 if offset != None:
1217 1213 date = " ".join(string.split()[:-1])
1218 1214
1219 1215 # add missing elements from defaults
1220 1216 for part in defaults:
1221 1217 found = [True for p in part if ("%"+p) in format]
1222 1218 if not found:
1223 1219 date += "@" + defaults[part]
1224 1220 format += "@%" + part[0]
1225 1221
1226 1222 timetuple = time.strptime(date, format)
1227 1223 localunixtime = int(calendar.timegm(timetuple))
1228 1224 if offset is None:
1229 1225 # local timezone
1230 1226 unixtime = int(time.mktime(timetuple))
1231 1227 offset = unixtime - localunixtime
1232 1228 else:
1233 1229 unixtime = localunixtime + offset
1234 1230 return unixtime, offset
1235 1231
1236 1232 def parsedate(date, formats=None, defaults=None):
1237 1233 """parse a localized date/time string and return a (unixtime, offset) tuple.
1238 1234
1239 1235 The date may be a "unixtime offset" string or in one of the specified
1240 1236 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1241 1237 """
1242 1238 if not date:
1243 1239 return 0, 0
1244 1240 if isinstance(date, tuple) and len(date) == 2:
1245 1241 return date
1246 1242 if not formats:
1247 1243 formats = defaultdateformats
1248 1244 date = date.strip()
1249 1245 try:
1250 1246 when, offset = map(int, date.split(' '))
1251 1247 except ValueError:
1252 1248 # fill out defaults
1253 1249 if not defaults:
1254 1250 defaults = {}
1255 1251 now = makedate()
1256 1252 for part in "d mb yY HI M S".split():
1257 1253 if part not in defaults:
1258 1254 if part[0] in "HMS":
1259 1255 defaults[part] = "00"
1260 1256 else:
1261 1257 defaults[part] = datestr(now, "%" + part[0])
1262 1258
1263 1259 for format in formats:
1264 1260 try:
1265 1261 when, offset = strdate(date, format, defaults)
1266 1262 except (ValueError, OverflowError):
1267 1263 pass
1268 1264 else:
1269 1265 break
1270 1266 else:
1271 1267 raise Abort(_('invalid date: %r ') % date)
1272 1268 # validate explicit (probably user-specified) date and
1273 1269 # time zone offset. values must fit in signed 32 bits for
1274 1270 # current 32-bit linux runtimes. timezones go from UTC-12
1275 1271 # to UTC+14
1276 1272 if abs(when) > 0x7fffffff:
1277 1273 raise Abort(_('date exceeds 32 bits: %d') % when)
1278 1274 if offset < -50400 or offset > 43200:
1279 1275 raise Abort(_('impossible time zone offset: %d') % offset)
1280 1276 return when, offset
1281 1277
1282 1278 def matchdate(date):
1283 1279 """Return a function that matches a given date match specifier
1284 1280
1285 1281 Formats include:
1286 1282
1287 1283 '{date}' match a given date to the accuracy provided
1288 1284
1289 1285 '<{date}' on or before a given date
1290 1286
1291 1287 '>{date}' on or after a given date
1292 1288
1293 1289 """
1294 1290
1295 1291 def lower(date):
1296 1292 d = dict(mb="1", d="1")
1297 1293 return parsedate(date, extendeddateformats, d)[0]
1298 1294
1299 1295 def upper(date):
1300 1296 d = dict(mb="12", HI="23", M="59", S="59")
1301 1297 for days in "31 30 29".split():
1302 1298 try:
1303 1299 d["d"] = days
1304 1300 return parsedate(date, extendeddateformats, d)[0]
1305 1301 except:
1306 1302 pass
1307 1303 d["d"] = "28"
1308 1304 return parsedate(date, extendeddateformats, d)[0]
1309 1305
1310 1306 date = date.strip()
1311 1307 if date[0] == "<":
1312 1308 when = upper(date[1:])
1313 1309 return lambda x: x <= when
1314 1310 elif date[0] == ">":
1315 1311 when = lower(date[1:])
1316 1312 return lambda x: x >= when
1317 1313 elif date[0] == "-":
1318 1314 try:
1319 1315 days = int(date[1:])
1320 1316 except ValueError:
1321 1317 raise Abort(_("invalid day spec: %s") % date[1:])
1322 1318 when = makedate()[0] - days * 3600 * 24
1323 1319 return lambda x: x >= when
1324 1320 elif " to " in date:
1325 1321 a, b = date.split(" to ")
1326 1322 start, stop = lower(a), upper(b)
1327 1323 return lambda x: x >= start and x <= stop
1328 1324 else:
1329 1325 start, stop = lower(date), upper(date)
1330 1326 return lambda x: x >= start and x <= stop
1331 1327
1332 1328 def shortuser(user):
1333 1329 """Return a short representation of a user name or email address."""
1334 1330 f = user.find('@')
1335 1331 if f >= 0:
1336 1332 user = user[:f]
1337 1333 f = user.find('<')
1338 1334 if f >= 0:
1339 1335 user = user[f+1:]
1340 1336 f = user.find(' ')
1341 1337 if f >= 0:
1342 1338 user = user[:f]
1343 1339 f = user.find('.')
1344 1340 if f >= 0:
1345 1341 user = user[:f]
1346 1342 return user
1347 1343
1348 1344 def email(author):
1349 1345 '''get email of author.'''
1350 1346 r = author.find('>')
1351 1347 if r == -1: r = None
1352 1348 return author[author.find('<')+1:r]
1353 1349
1354 1350 def ellipsis(text, maxlength=400):
1355 1351 """Trim string to at most maxlength (default: 400) characters."""
1356 1352 if len(text) <= maxlength:
1357 1353 return text
1358 1354 else:
1359 1355 return "%s..." % (text[:maxlength-3])
1360 1356
1361 1357 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1362 1358 '''yield every hg repository under path, recursively.'''
1363 1359 def errhandler(err):
1364 1360 if err.filename == path:
1365 1361 raise err
1366 1362 if followsym and hasattr(os.path, 'samestat'):
1367 1363 def _add_dir_if_not_there(dirlst, dirname):
1368 1364 match = False
1369 1365 samestat = os.path.samestat
1370 1366 dirstat = os.stat(dirname)
1371 1367 for lstdirstat in dirlst:
1372 1368 if samestat(dirstat, lstdirstat):
1373 1369 match = True
1374 1370 break
1375 1371 if not match:
1376 1372 dirlst.append(dirstat)
1377 1373 return not match
1378 1374 else:
1379 1375 followsym = False
1380 1376
1381 1377 if (seen_dirs is None) and followsym:
1382 1378 seen_dirs = []
1383 1379 _add_dir_if_not_there(seen_dirs, path)
1384 1380 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1385 1381 if '.hg' in dirs:
1386 1382 yield root # found a repository
1387 1383 qroot = os.path.join(root, '.hg', 'patches')
1388 1384 if os.path.isdir(os.path.join(qroot, '.hg')):
1389 1385 yield qroot # we have a patch queue repo here
1390 1386 if recurse:
1391 1387 # avoid recursing inside the .hg directory
1392 1388 dirs.remove('.hg')
1393 1389 else:
1394 1390 dirs[:] = [] # don't descend further
1395 1391 elif followsym:
1396 1392 newdirs = []
1397 1393 for d in dirs:
1398 1394 fname = os.path.join(root, d)
1399 1395 if _add_dir_if_not_there(seen_dirs, fname):
1400 1396 if os.path.islink(fname):
1401 1397 for hgname in walkrepos(fname, True, seen_dirs):
1402 1398 yield hgname
1403 1399 else:
1404 1400 newdirs.append(d)
1405 1401 dirs[:] = newdirs
1406 1402
1407 1403 _rcpath = None
1408 1404
1409 1405 def os_rcpath():
1410 1406 '''return default os-specific hgrc search path'''
1411 1407 path = system_rcpath()
1412 1408 path.extend(user_rcpath())
1413 1409 path = [os.path.normpath(f) for f in path]
1414 1410 return path
1415 1411
1416 1412 def rcpath():
1417 1413 '''return hgrc search path. if env var HGRCPATH is set, use it.
1418 1414 for each item in path, if directory, use files ending in .rc,
1419 1415 else use item.
1420 1416 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1421 1417 if no HGRCPATH, use default os-specific path.'''
1422 1418 global _rcpath
1423 1419 if _rcpath is None:
1424 1420 if 'HGRCPATH' in os.environ:
1425 1421 _rcpath = []
1426 1422 for p in os.environ['HGRCPATH'].split(os.pathsep):
1427 1423 if not p: continue
1428 1424 if os.path.isdir(p):
1429 1425 for f, kind in osutil.listdir(p):
1430 1426 if f.endswith('.rc'):
1431 1427 _rcpath.append(os.path.join(p, f))
1432 1428 else:
1433 1429 _rcpath.append(p)
1434 1430 else:
1435 1431 _rcpath = os_rcpath()
1436 1432 return _rcpath
1437 1433
1438 1434 def bytecount(nbytes):
1439 1435 '''return byte count formatted as readable string, with units'''
1440 1436
1441 1437 units = (
1442 1438 (100, 1<<30, _('%.0f GB')),
1443 1439 (10, 1<<30, _('%.1f GB')),
1444 1440 (1, 1<<30, _('%.2f GB')),
1445 1441 (100, 1<<20, _('%.0f MB')),
1446 1442 (10, 1<<20, _('%.1f MB')),
1447 1443 (1, 1<<20, _('%.2f MB')),
1448 1444 (100, 1<<10, _('%.0f KB')),
1449 1445 (10, 1<<10, _('%.1f KB')),
1450 1446 (1, 1<<10, _('%.2f KB')),
1451 1447 (1, 1, _('%.0f bytes')),
1452 1448 )
1453 1449
1454 1450 for multiplier, divisor, format in units:
1455 1451 if nbytes >= divisor * multiplier:
1456 1452 return format % (nbytes / float(divisor))
1457 1453 return units[-1][2] % nbytes
1458 1454
1459 1455 def drop_scheme(scheme, path):
1460 1456 sc = scheme + ':'
1461 1457 if path.startswith(sc):
1462 1458 path = path[len(sc):]
1463 1459 if path.startswith('//'):
1464 1460 path = path[2:]
1465 1461 return path
1466 1462
1467 1463 def uirepr(s):
1468 1464 # Avoid double backslash in Windows path repr()
1469 1465 return repr(s).replace('\\\\', '\\')
1470 1466
1471 1467 def termwidth():
1472 1468 if 'COLUMNS' in os.environ:
1473 1469 try:
1474 1470 return int(os.environ['COLUMNS'])
1475 1471 except ValueError:
1476 1472 pass
1477 1473 try:
1478 1474 import termios, array, fcntl
1479 1475 for dev in (sys.stdout, sys.stdin):
1480 1476 try:
1481 1477 fd = dev.fileno()
1482 1478 if not os.isatty(fd):
1483 1479 continue
1484 1480 arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
1485 1481 return array.array('h', arri)[1]
1486 1482 except ValueError:
1487 1483 pass
1488 1484 except ImportError:
1489 1485 pass
1490 1486 return 80
1491 1487
1492 1488 def iterlines(iterator):
1493 1489 for chunk in iterator:
1494 1490 for line in chunk.splitlines():
1495 1491 yield line
@@ -1,245 +1,245 b''
1 1 # verify.py - repository integrity checking for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import nullid, short
9 9 from i18n import _
10 10 import revlog, util, error
11 11
12 12 def verify(repo):
13 13 lock = repo.lock()
14 14 try:
15 15 return _verify(repo)
16 16 finally:
17 17 lock.release()
18 18
19 19 def _verify(repo):
20 20 mflinkrevs = {}
21 21 filelinkrevs = {}
22 22 filenodes = {}
23 23 revisions = 0
24 24 badrevs = {}
25 25 errors = [0]
26 26 warnings = [0]
27 27 ui = repo.ui
28 28 cl = repo.changelog
29 29 mf = repo.manifest
30 30
31 31 if not repo.cancopy():
32 32 raise util.Abort(_("cannot verify bundle or remote repos"))
33 33
34 34 def err(linkrev, msg, filename=None):
35 35 if linkrev != None:
36 36 badrevs[linkrev] = True
37 37 else:
38 38 linkrev = '?'
39 39 msg = "%s: %s" % (linkrev, msg)
40 40 if filename:
41 41 msg = "%s@%s" % (filename, msg)
42 42 ui.warn(" " + msg + "\n")
43 43 errors[0] += 1
44 44
45 45 def exc(linkrev, msg, inst, filename=None):
46 46 if isinstance(inst, KeyboardInterrupt):
47 47 ui.warn(_("interrupted"))
48 48 raise
49 49 err(linkrev, "%s: %s" % (msg, inst), filename)
50 50
51 51 def warn(msg):
52 52 ui.warn(msg + "\n")
53 53 warnings[0] += 1
54 54
55 55 def checklog(obj, name):
56 56 if not len(obj) and (havecl or havemf):
57 57 err(0, _("empty or missing %s") % name)
58 58 return
59 59
60 60 d = obj.checksize()
61 61 if d[0]:
62 62 err(None, _("data length off by %d bytes") % d[0], name)
63 63 if d[1]:
64 64 err(None, _("index contains %d extra bytes") % d[1], name)
65 65
66 66 if obj.version != revlog.REVLOGV0:
67 67 if not revlogv1:
68 68 warn(_("warning: `%s' uses revlog format 1") % name)
69 69 elif revlogv1:
70 70 warn(_("warning: `%s' uses revlog format 0") % name)
71 71
72 72 def checkentry(obj, i, node, seen, linkrevs, f):
73 73 lr = obj.linkrev(obj.rev(node))
74 74 if lr < 0 or (havecl and lr not in linkrevs):
75 75 if lr < 0 or lr >= len(cl):
76 76 msg = _("rev %d points to nonexistent changeset %d")
77 77 else:
78 78 msg = _("rev %d points to unexpected changeset %d")
79 79 err(None, msg % (i, lr), f)
80 80 if linkrevs:
81 81 warn(_(" (expected %s)") % " ".join(map(str,linkrevs)))
82 82 lr = None # can't be trusted
83 83
84 84 try:
85 85 p1, p2 = obj.parents(node)
86 86 if p1 not in seen and p1 != nullid:
87 87 err(lr, _("unknown parent 1 %s of %s") %
88 88 (short(p1), short(n)), f)
89 89 if p2 not in seen and p2 != nullid:
90 90 err(lr, _("unknown parent 2 %s of %s") %
91 91 (short(p2), short(p1)), f)
92 92 except Exception, inst:
93 93 exc(lr, _("checking parents of %s") % short(node), inst, f)
94 94
95 95 if node in seen:
96 96 err(lr, _("duplicate revision %d (%d)") % (i, seen[n]), f)
97 97 seen[n] = i
98 98 return lr
99 99
100 100 revlogv1 = cl.version != revlog.REVLOGV0
101 101 if ui.verbose or not revlogv1:
102 102 ui.status(_("repository uses revlog format %d\n") %
103 103 (revlogv1 and 1 or 0))
104 104
105 105 havecl = len(cl) > 0
106 106 havemf = len(mf) > 0
107 107
108 108 ui.status(_("checking changesets\n"))
109 109 seen = {}
110 110 checklog(cl, "changelog")
111 111 for i in repo:
112 112 n = cl.node(i)
113 113 checkentry(cl, i, n, seen, [i], "changelog")
114 114
115 115 try:
116 116 changes = cl.read(n)
117 117 mflinkrevs.setdefault(changes[0], []).append(i)
118 118 for f in changes[3]:
119 119 filelinkrevs.setdefault(f, []).append(i)
120 120 except Exception, inst:
121 121 exc(i, _("unpacking changeset %s") % short(n), inst)
122 122
123 123 ui.status(_("checking manifests\n"))
124 124 seen = {}
125 125 checklog(mf, "manifest")
126 126 for i in mf:
127 127 n = mf.node(i)
128 128 lr = checkentry(mf, i, n, seen, mflinkrevs.get(n, []), "manifest")
129 129 if n in mflinkrevs:
130 130 del mflinkrevs[n]
131 131
132 132 try:
133 133 for f, fn in mf.readdelta(n).iteritems():
134 134 if not f:
135 135 err(lr, _("file without name in manifest"))
136 136 elif f != "/dev/null":
137 137 fns = filenodes.setdefault(f, {})
138 138 if fn not in fns:
139 139 fns[fn] = i
140 140 except Exception, inst:
141 141 exc(lr, _("reading manifest delta %s") % short(n), inst)
142 142
143 143 ui.status(_("crosschecking files in changesets and manifests\n"))
144 144
145 145 if havemf:
146 146 for c, m in util.sort([(c, m) for m in mflinkrevs for c in mflinkrevs[m]]):
147 147 err(c, _("changeset refers to unknown manifest %s") % short(m))
148 148 del mflinkrevs
149 149
150 150 for f in util.sort(filelinkrevs):
151 151 if f not in filenodes:
152 152 lr = filelinkrevs[f][0]
153 153 err(lr, _("in changeset but not in manifest"), f)
154 154
155 155 if havecl:
156 156 for f in util.sort(filenodes):
157 157 if f not in filelinkrevs:
158 158 try:
159 159 fl = repo.file(f)
160 160 lr = min([fl.linkrev(fl.rev(n)) for n in filenodes[f]])
161 161 except:
162 162 lr = None
163 163 err(lr, _("in manifest but not in changeset"), f)
164 164
165 165 ui.status(_("checking files\n"))
166 166
167 167 storefiles = {}
168 168 for f, f2, size in repo.store.datafiles():
169 169 if not f:
170 170 err(None, _("cannot decode filename '%s'") % f2)
171 171 elif size > 0:
172 172 storefiles[f] = True
173 173
174 files = util.sort(util.unique(filenodes.keys() + filelinkrevs.keys()))
174 files = util.sort(set(filenodes.keys() + filelinkrevs.keys()))
175 175 for f in files:
176 176 lr = filelinkrevs[f][0]
177 177 try:
178 178 fl = repo.file(f)
179 179 except error.RevlogError, e:
180 180 err(lr, _("broken revlog! (%s)") % e, f)
181 181 continue
182 182
183 183 for ff in fl.files():
184 184 try:
185 185 del storefiles[ff]
186 186 except KeyError:
187 187 err(lr, _("missing revlog!"), ff)
188 188
189 189 checklog(fl, f)
190 190 seen = {}
191 191 for i in fl:
192 192 revisions += 1
193 193 n = fl.node(i)
194 194 lr = checkentry(fl, i, n, seen, filelinkrevs.get(f, []), f)
195 195 if f in filenodes:
196 196 if havemf and n not in filenodes[f]:
197 197 err(lr, _("%s not in manifests") % (short(n)), f)
198 198 else:
199 199 del filenodes[f][n]
200 200
201 201 # verify contents
202 202 try:
203 203 t = fl.read(n)
204 204 rp = fl.renamed(n)
205 205 if len(t) != fl.size(i):
206 206 if len(fl.revision(n)) != fl.size(i):
207 207 err(lr, _("unpacked size is %s, %s expected") %
208 208 (len(t), fl.size(i)), f)
209 209 except Exception, inst:
210 210 exc(lr, _("unpacking %s") % short(n), inst, f)
211 211
212 212 # check renames
213 213 try:
214 214 if rp:
215 215 fl2 = repo.file(rp[0])
216 216 if not len(fl2):
217 217 err(lr, _("empty or missing copy source revlog %s:%s")
218 218 % (rp[0], short(rp[1])), f)
219 219 elif rp[1] == nullid:
220 220 warn(_("warning: %s@%s: copy source revision is nullid %s:%s")
221 221 % (f, lr, rp[0], short(rp[1])))
222 222 else:
223 223 fl2.rev(rp[1])
224 224 except Exception, inst:
225 225 exc(lr, _("checking rename of %s") % short(n), inst, f)
226 226
227 227 # cross-check
228 228 if f in filenodes:
229 229 fns = [(mf.linkrev(l), n) for n,l in filenodes[f].iteritems()]
230 230 for lr, node in util.sort(fns):
231 231 err(lr, _("%s in manifests not found") % short(node), f)
232 232
233 233 for f in storefiles:
234 234 warn(_("warning: orphan revlog '%s'") % f)
235 235
236 236 ui.status(_("%d files, %d changesets, %d total revisions\n") %
237 237 (len(files), len(cl), revisions))
238 238 if warnings[0]:
239 239 ui.warn(_("%d warnings encountered!\n") % warnings[0])
240 240 if errors[0]:
241 241 ui.warn(_("%d integrity errors encountered!\n") % errors[0])
242 242 if badrevs:
243 243 ui.warn(_("(first damaged changeset appears to be %d)\n")
244 244 % min(badrevs))
245 245 return 1
General Comments 0
You need to be logged in to leave comments. Login now