Simulating erratic failures in python

Imagine you’re using some library for interfacing with MySQL from python. And MySQL is dying from under your application. And you’re not dealing with it gracefully. So you get greeted with:

OperationalError: (2006, 'MySQL server has gone away')

If you re-start your app, the problem will go away for a bit. So how can you test (unit test, even) that you’re degrading gracefully?

Here’s one example. Rather more clean and generic and simple than what you typically do in something statically typed like java.

import random
from MySQLdb import OperationalError

class Store:
    def get_val(self, a):
        return a.lower()
    def execute(self, *args, **kwargs):
        return "OK"

class ErroringStore:
    """Wrapper around something that has an execute(…) to make it error."""
    def __init__(self, delegate):
        self.delegate = delegate
    def __getattr__(self, name):
        if name == "execute":
            if int(random.random()*2) == 1:
                def execute_error(*args, **kwargs):
                    raise OperationalError(2006, ‘MySQL server has gone away’)
                return execute_error
            else:
                return getattr(self.delegate, name)
        elif hasattr(self.delegate, name):
            return getattr(self.delegate, name)
        raise AttributeError, name
    def __setattr__(self, name, value):
        if name == "delegate":
            self.__dict__[name] = value
        else:
            setattr(self.delegate, name, value)

store = Store()
erroring = ErroringStore(store)

# these are ok
erroring.get_val("BLAAT")

# this doesn’t exist
try:
    erroring.nomethod()
    raise "should result in AttributeError"
except AttributeError:
    pass

# this sometimes throws an exception
errors = 0
for i in range(1, 100):
    try:
        erroring.execute("SELECT TRUE;")
    except:
        errors += 1
print "Failed %d times out of 100" % errors