Localized String Templating in .NET

I’ve been building a mustache-style string template system for my Saas app. It will mainly be used for e-mail notifications sent to users via Amazon’s SES. The idea is simple; you have a text template where you want to substitute the tokens {{…}} with send-time specific data:

{{Title}}
Here's a sample template for {{Person.Firstname}}! 
Generated on {{CreationDate:d}}

There’s a couple of important features to note here. Firstly, you can reference nested properties in the tokens – handy for passing existing business entities. Secondly, you can add format strings to determine how the token value should be formatted. This is a nice-to-have which means that if you have a locale associated with the user, you can format dates in e-mails to the user’s locale, not the sending server’s locale (i.e. mm/dd/yy or dd/mm/yy)

Here’s an simple example of how it would be called:

String template = @"{{Title}}
Here's a sample template for {{Person.Firstname}}! 
Generated on {{CreationDate:d}}";

PersonEntity Person = new PersonEntity();
Person.Firstname = "Brendan";
Person.Surname = "Whelan";
Person.Locale = "en-IE";

String localizedConcreteString = template.Inject(new {
                                                      Title = "Sample Injected Title", 
                                                      Person,
                                                      CreationDate = DateTime.UtcNow}, 
                                                      CultureInfo.GetCultureInfo(Person.Locale));

This generates localizedConcreteString as follows:

Sample Injected Title
Here's a sample template for Brendan!
Generated on 23/08/2013 

All the work for this is done by the Inject extension method, which means that it can be used generally, on any String where templating might be needed.

public static class StringInjectExtension
{
    public static string Inject(this string TemplateString, object InjectionObject)
    {
         return Inject(TemplateString, InjectionObject, CultureInfo.InvariantCulture);
    }

    public static string Inject(this string TemplateString, object InjectionObject, CultureInfo Culture)
    {
         return Inject(TemplateString, GetPropertyHash(InjectionObject), Culture);
    }

    public static string Inject(this string TemplateString, Hashtable values, CultureInfo Culture)
    {
         string result = TemplateString;

         //Assemble all tokens to replace
         Regex tokenRegex = new Regex("{{((?<noprops>\\w+(?:}}|(?\<hasformat>:(.[^}]*))}}))|(\<hasprops>(\\w|\\.)+(?:}}|(?\<hasformat>:(.[^}]*))}})))",
                                         RegexOptions.IgnoreCase | RegexOptions.Compiled);
            
         foreach (Match match in tokenRegex.Matches(TemplateString))
         {
             string replacement = match.ToString();

             //Get token version without mustache braces
             string shavenToken = match.ToString();
             shavenToken = shavenToken.Substring(2, shavenToken.Length - 4);

             //Formatted?
             string format = null;
             if (match.Groups["hasformat"].Length > 0)
             {
                 format = match.Groups["hasformat"].ToString();
                 shavenToken = shavenToken.Replace(format, null);
                 format = format.Substring(1);
             }
                
             if (match.Groups["noprops"].Length > 0) //matched {{foo}}
             {
                 replacement = FormatValue(values, shavenToken, format, Culture);
             }
             else //matched {{foo.bar[...]}}
             {
                 //Get the value of the nested property from the token and
                 //store it in value hashtable to avoid having to get it again (in case reused in current template)
                 if(!values.ContainsKey(shavenToken)){

                        string[] properties = shavenToken.Split(new char[] { '.' });
                        object propertyObject = values[properties[0]];
                        for(int propIdx = 1; propIdx < properties.Length; propIdx++){
                            if (propertyObject == null) break;
                            propertyObject = GetPropValue(propertyObject, properties[propIdx]);
                        }
                        values.Add(shavenToken, propertyObject);
                 }
                 replacement = FormatValue(values, shavenToken, format, Culture);
             }
                
             result = result.Replace(match.ToString(), replacement);
            }
            return result;
        }

        private static string FormatValue(Hashtable values, string key, string format, CultureInfo culture){
            var value = values[key];

            if (format != null)
            {
                //do a double string.Format - first to build the proper format string, and then to format the replacement value
                string attributeFormatString = string.Format(culture, "{{0:{0}}}", format);
                return string.Format(culture, attributeFormatString, value);
            }
            else
            {
                return (value ?? String.Empty).ToString();
            }
        }

        private static object GetPropValue(object PropertyObject, string PropertyName)
        {
            PropertyDescriptorCollection props = TypeDescriptor.GetProperties(PropertyObject);
            PropertyDescriptor prop = props.Find(PropertyName, true);

            return prop.GetValue(PropertyObject);
        }

        private static Hashtable GetPropertyHash(object properties)
        {
            Hashtable values = new Hashtable();
            if (properties != null)
			{
				PropertyDescriptorCollection props = TypeDescriptor.GetProperties(properties);
				foreach (PropertyDescriptor prop in props)
				{
				    values.Add(prop.Name, prop.GetValue(properties));
				}
			}
			return values;
		}

	}
}

.NET WCF Custom Headers

I use server-side error logging to trap and record any exceptions an end-user might be receiving. It’s handy for pro-active debugging and it’s also useful for tracking any potential intrusion attempts. To that end, I need to have the end user’s IP address to see if the intrusion attempts are all coming from a IP address or range that can potentially be blocked. In an N-tiered SOA app though, the service call that logs the exception will be in a different tier (and potentially on a different server) to the end-user. That means that the caller IP address for the service’s Log function will actually be the web server’s IP address, rather than the end-user’s browser IP address.

WCF allows for custom headers and it provides an ideal way to pass the end-user’s IP address (or any metadata) to the service layer from the web layer.

Firstly, we need to add the user’s IP address to every WCF call. This is done using a custom IClientMessageInspector to add a message header.

public class ClientMessageInspector : IClientMessageInspector
    {
        private const string HEADER_URI_NAMESPACE = "http://tempuri.org";
        private const string HEADER_SOURCE_ADDRESS = "SOURCE_ADDRESS";

        public ClientMessageInspector()
        {
        }

        public void AfterReceiveReply(ref System.ServiceModel.Channels.Message reply, object correlationState)
        {
        }

        public object BeforeSendRequest(ref System.ServiceModel.Channels.Message request, System.ServiceModel.IClientChannel channel)
        {
            if (HttpContext.Current != null)
            {
                MessageHeader header = null;
                try
                {
                    header = MessageHeader.CreateHeader(HEADER_SOURCE_ADDRESS , HEADER_URI_NAMESPACE, HttpContext.Current.Request.UserHostAddress);
                }
                catch (Exception e)
                {
                    header = MessageHeader.CreateHeader(HEADER_SOURCE_ADDRESS , HEADER_URI_NAMESPACE , null);
                }
                request.Headers.Add(header);
            }
            else if (OperationContext.Current != null)
            {
                //If service layer does a nested call to another service layer method, ensure that original web caller IP is passed through also 
                MessageHeader header = null;
                int index = OperationContext.Current.IncomingMessageHeaders.FindHeader(HEADER_SOURCE_ADDRESS, HEADER_URI_NAMESPACE);
                if (index > -1)
                {
                    string remoteAddress = OperationContext.Current.IncomingMessageHeaders.GetHeader(index);
                    header = MessageHeader.CreateHeader(HEADER_SOURCE_ADDRESS, HEADER_URI_NAMESPACE, remoteAddress);
                }else{
                    header = MessageHeader.CreateHeader(HEADER_SOURCE_ADDRESS , HEADER_URI_NAMESPACE , null);
                }

                request.Headers.Add(header);
            }
            
            return null;

        }
    }

To make WCF service calls use this inspector, a behavior and behavior extension is needed:

 public class EndpointBehavior : IEndpointBehavior
    {
        public EndpointBehavior() {}

        public void AddBindingParameters(ServiceEndpoint endpoint, System.ServiceModel.Channels.BindingParameterCollection bindingParameters) {}

        public void ApplyClientBehavior(ServiceEndpoint endpoint, System.ServiceModel.Dispatcher.ClientRuntime clientRuntime)
        {
            ClientMessageInspector inspector = new ClientMessageInspector();
            clientRuntime.MessageInspectors.Add(inspector);
        }

        public void ApplyDispatchBehavior(ServiceEndpoint endpoint, System.ServiceModel.Dispatcher.EndpointDispatcher endpointDispatcher) {}

        public void Validate(ServiceEndpoint endpoint) {}

    }
   public class BehaviorExtension : BehaviorExtensionElement
    {
        public override Type BehaviorType
        {
            get { return typeof(EndpointBehavior); }
        }

        protected override object CreateBehavior()
        {
            return new EndpointBehavior();
        }
    }

Now we can use the extension in the config file for the client endpoints.

<system.serviceModel>
    <extensions>
      <behaviorExtensions>
        <add name="CustomExtension" type="Example.Service.BehaviorExtension, Example.Service, Version=1.0.0.0, Culture=neutral, PublicKeyToken=null" />
      </behaviorExtensions>
    </extensions>
    <behaviors>
      <endpointBehaviors>
        <behavior name="ClientEndpointBehavior">
          <CustomExtension/>
        </behavior>
      </endpointBehaviors>
    </behaviors>
    <bindings>
      <netTcpBinding>
        <binding name="ExampleServiceClientBinding/>
      </netTcpBinding>
    </bindings>
    <client>
      <endpoint address="net.tcp://localhost:8091/CustomExample/DataService" binding="netTcpBinding" bindingConfiguration="ExampleServiceClientBinding" contract="Example.Service.Contract.IDataService" name="ExampleDataServiceClientEndpoint" behaviorConfiguration="ClientEndpointBehavior">
      </endpoint>
    </client>
  </system.serviceModel>

Using SvcTraceViewer we can see the new header being passed on the SOAP call:

<s:Envelope xmlns:a="http://www.w3.org/2005/08/addressing" xmlns:s="http://www.w3.org/2003/05/soap-envelope">
<s:Header>

[...]

<SOURCE_ADDRESS xmlns="http://tempuri.org">192.168.1.1</SOURCE_ADDRESS>
</s:Header>

Finally, to access this in the service code, I add a helper method to the service base class. A call to GetServiceCallerRemoteAddress() anywhere in service code will always give the IP address of the end-user caller of the service method.

    public abstract class BaseDataService 
    {
        //[...]

        protected string GetServiceCallerRemoteAddress()
        {
            ServiceSecurityContext cxtSec = ServiceSecurityContext.Current;
            int index = OperationContext.Current.IncomingMessageHeaders.FindHeader("SOURCE_ADDRESS", "http://tempuri.org");
            string remoteAddress = null;
            if (index > -1)
            {
                remoteAddress = OperationContext.Current.IncomingMessageHeaders.GetHeader(index);
            }
            return remoteAddress;
        }        
    }

.NET Scalable Server Push Notifications with SignalR and Redis

Modern web applications sometimes need to notify a logged-in user of an event that occurs on the server. Doing so involves sending data to the browser when the event happens. This is not easily achieved with the standard request-response model used by the HTTP protocol. A notification to the browser needs what’s known as “server push” technology. The server can not “push” a notification unless there is an open, dedicated connection to the client. HTML5 capable client browsers provide the WebSocket mechanism for this, but it is not widely available yet. Most browsers need to mimic push behavior, such as by using a long-polling technique in JavaScript, which simply means making frequent, light, requests to the server similar to AJAX.

To reduce the complexity of coding for the different browser capabilities the excellent SignalR library is available to use in .NET projects – it allows for the transport mechanisms mentioned, and some others. It automatically selects the best (read: performant) transport for the capabilities of the given browser & server combination. Crucially, it provides a means to configure itself so the developer can optimize it for performance and scalability. Using it for server initiated notifications is a “no-brainer”.

Here’s an example of how to set up such a notification mechanism.

To begin with, install required libraries into the project using NuGet.

PM> Install-Package Microsoft.AspNet.SignalR
PM> Install-Package ServiceStack.Redis
PM> Install-Package Microsoft.AspNet.SignalR.Redis

You can see that Redis is used too. This is to allow for web farm scaling. Redis is used to store the SignalR connections so they will always be available and synchronized no matter which web server the SignalR polling request arrives at. This can be achieved (depending on architectural demands) using just one Redis server instance, or by running multiple replicated Redis server instances (this is outside the scope of this example, but it’s easy to set-up).

Next configure SignalR to use Redis as the backing store and map the signalr route. This is done as part of RegisterRoutes (Global.asax.cs).

public static void RegisterRoutes(RouteCollection routes)
{
      //Use redis for signalr connections - set redis server connection details here
      GlobalHost.DependencyResolver.UseRedis("localhost", 6379, null, "WBSignalR");

      // Register the default SiganlR hubs route: ~/signalr
      // Has to be defined before default route, or it is overidden 
      RouteTable.Routes.MapHubs(new HubConfiguration { EnableDetailedErrors = true });

      //All other mvc routes are defined here            
}

A SignalR Hub subclass is needed to contain the server side code that both the SignalR client and server will use.

public class NotificationHub : Hub
{
}

We also use this class to keep the server aware of the open SignalR connections and – more importantly – which connections relate to which user. The events on the Hub class allow us to keep this up-to-date connection list.

There’s a lot to consider in the code for this class. The full code can be downloaded – NotificationHub.cs. Let’s look at it piece-by-piece.

The first thing is the nested ConnectionDetail class that is used to store the details of the connection in Redis.

[ProtoBuf.ProtoContract]
public class ConnectionDetail
{
    public ConnectionDetail() { }

    [ProtoBuf.ProtoMember(1)]
    public string ConnectionId { get; set; }

    public override bool Equals(object obj)
    {
        if (obj == null) return false;
        if (obj.GetType() != this.GetType()) return false;

        return (obj as ConnectionDetail).ConnectionId.Equals(this.ConnectionId);
    }
}

This class only has one property – the SignalR ConnectionId string. It is better to use a class instead of just the connection id string because we can extend it to store other detail about the connection that later on might affect what message we send, or how it should be treated on the client. For example we could record and store the type of browser associated with the connection (mobile, etc.)

The Equals implementation is needed to check if the connection object is already part of the user’s connection collection or not.

To store the connection detail object in Redis it will be serialized to a byte array using protocol buffers – hence the ProtoBuf attributes. Protocol buffers are a highly performant way of serializing/deserializing data. If you’re not familiar with protobuf.net, you really should check it out.

Next, we use the ServiceStack.Redis client to make all calls to Redis to store the list of connections per user. This is fairly trivial to set-up.

private RedisClient client;

public NotificationHub()
{
    client = new RedisClient();   //Default connection - localhost:6379
}

The connection to Redis is made when we want to add or remove a connection from the user’s connection list. Two methods provide that functionality – AddNotificationConnection and RemoveNotificationConnection. They are very similar, so I’ll just explain the first one.

public void AddNotificationConnection(string username, string connectionid)
{
    string key = String.Format("{0}:{1}", REDIS_NOTIF_PREFIX, username);

       client.Watch(key);
       try
       {
            List<ConnectionDetail> list = new List<ConnectionDetail>();
            byte[] data = client.Get(key);
            MemoryStream stream;
            if (data != null)
            {
                stream = new MemoryStream(data);
                list = ProtoBuf.Serializer.Deserialize<List<ConnectionDetail>>(stream);
            }
            ConnectionDetail cdetail = new ConnectionDetail() { ConnectionId = connectionid };
            if (!list.Contains(cdetail))
            {
                list.Add(cdetail);
            }
            stream = new MemoryStream();
            ProtoBuf.Serializer.Serialize<List<ConnectionDetail>>(stream, list);
            stream.Seek(0, SeekOrigin.Begin);
            data = new byte[stream.Length];
            stream.Read(data, 0, data.Length);

            using (var t = client.CreateTransaction())
            {
                t.QueueCommand(c => c.Set(key, data));
                t.Commit();
            }
        }
        finally
        {
            client.UnWatch();
        }
}

The code looks for data in Redis under a unique key which is a combination of the constant prefix and the username. It keyed this way because we can do a fast key lookup, retrieve and lock a small block of data, and so keep the operation atomic, maintaining integrity of the user’s connection list in an environment where the user could open a new connection via a different web server at any time. Keying it on one user, rather than storing a list of connections for all users under one key, also avoids creating locking bottlenecks at scale.

Next, we use the connection events of the Hub class to maintain the user’s list, e.g.:

public override Task OnConnected()
{
    string Username = GetConnectionUser();

    if (Username != null)
    {
        AddNotificationConnection(Username, Context.ConnectionId);
    }

    return base.OnConnected();
}

It’s fairly simple – the ConnectionId is taken from the Hub Context object and stored. The main issue here is how to get the user name associated with the connection. The usual HttpContext.User is not available in the SingalR Hub implementation. SignalR uses Owin for it’s Hhttp pipeline, not the usual MVC pipeline, and one of the consequences of this is that SignalR does not load the session (based on the session cookie). However, the browser cookies are sent with the SignalR request. In this case, I use FormsAuthentication in the web application, so the user’s name is stored encrypted in the ticket when the user logs in. GetConnectionUser gets this data from the FormsAuthentication cookie.

private string GetConnectionUser(){
    if (Context.RequestCookies.ContainsKey(FormsAuthentication.FormsCookieName))
    {
        string cookie = Context.RequestCookies[FormsAuthentication.FormsCookieName].Value;

        FormsAuthenticationTicket ticket = FormsAuthentication.Decrypt(cookie);
        return ticket.UserData;
    }

    return null;
}

The final piece of the Hub code is the function that actually sends the message to the user’s client browser sessions. It will invoke the corresponding ReceiveNotification function in Javascipt on the client.

public bool SendNotificationToUser(string username, string message){

    List list = GetNotificationConnections(username);
           
    foreach(ConnectionDetail detail in list){
        Clients.Client(detail.ConnectionId).receiveNotification(message);
    }

    return false;
}

To test this, we will call it from a controller action from a test page.

notf2

[HttpPost]
public ActionResult NotfTest(string touser, string message)
{            
    var hubConnection = new HubConnection("http://localhost/SignalR.Notification.Sample");
    IHubProxy hubProxy = hubConnection.CreateHubProxy("NotificationHub");
    try
    {
        hubConnection.Start().Wait(2000); //Async call, 2s wait - should use await in C# 5

        hubProxy.Invoke("SendNotificationToUser", new object[] { touser, message });
    }
    finally
    {
        hubConnection.Stop();
    }
    return View("NotfTestSent");
}

The call is made by the server creating a SignalR hub connection of its own and then sending a request to the Hub’s SendNotificationToUser function (similar to an RPC call).

That’s all the server side code, now for the client side.

To use the client side features of SignalR, we need to include the signalr javascript file, and the server-side generated hubs javascript.

How you want to display the notification in the browser is application dependant, and so up to you. For this, I use the jquery qtip plugin to show it as a tooltip pop-up.

<html>
<head>
    <!-- Add Script includes -->
    <script src="http://cdnjs.cloudflare.com/ajax/libs/qtip2/2.1.1/jquery.qtip.min.js" type="text/javascript"/>
    <script src="@Url.Content("~/Scripts/jquery.signalR-1.1.2.js")" type="text/javascript"/>
    <script src="@Url.Content("~/signalr/hubs")"/>
</head>

Near the end of html page (or near the end of the template page html), some javascript makes the connection to the hub once the page is loaded. Finally, define the client-side implementation ReceiveNotification to handle the display of the message.

<script type="text/javascript">

        //Make a connection to the server hubs
        $.connection.hub.start();

        // Declare a proxy to reference the server-side signalr hub class. 
        var notfHub = $.connection.notificationHub;

        //Link a client-side function to the server hub event
        notfHub.client.receiveNotification = function (message) {

            //Use qtip library to show a tooltip message
            $('#message-icon').qtip({
                content: {
                    text: message,
                    title: 'Notification',
                    button: true
                },
                position: {
                    at: 'top right',
                    my: 'bottom left'
                },
                show: {
                    delay: 0,
                    ready: true,
                    effect: function (offset) {
                        $(this).fadeIn(250);
                    }
                }
            }).show();
        };

    });
</script>

Voila. Server side push notification to any number of users, no matter how many places each is logged-in, and whatever browser they use.

notf3

SELinux Policies for Mono (.NET) Web Applications

I’m curious to see how production ready Mono on Linux is. What better way to find out than to take an existing .NET web app and run it on Mono? I tried – the web app wouldn’t even run – reporting an internal server error. After a little delving it turns out that SELinux was blocking Apache mod-mono from spawning the mono process. From /var/log/messages:

May 14 15:18:29 slave kernel: type=1400 audit(1374675509.274:34550): avc:  denied  { search } for  pid=5744 comm="mono" name="/" dev=dm-2 ino=2 scontext=unconfined_u:system_r:httpd_sys_script_t:s0 tcontext=system_u:object_r:file_t:s0 tclass=dir
May 14 15:18:29 slave kernel: type=1400 audit(1374675509.325:34551): avc:  denied  { search } for  pid=5744 comm="mono" name="/" dev=dm-2 ino=2 scontext=unconfined_u:system_r:httpd_sys_script_t:s0 tcontext=system_u:object_r:file_t:s0 tclass=dir
May 14 15:18:29 slave kernel: type=1400 audit(1374675509.325:34552): avc:  denied  { search } for  pid=5744 comm="mono" name="/" dev=dm-2 ino=2 scontext=unconfined_u:system_r:httpd_sys_script_t:s0 tcontext=system_u:object_r:file_t:s0 tclass=dir
May 14 15:18:29 slave kernel: type=1400 audit(1374675509.610:34553): avc:  denied  { search } for  pid=5744 comm="mono" name="/" dev=dm-2 ino=2 scontext=unconfined_u:system_r:httpd_sys_script_t:s0 tcontext=system_u:object_r:file_t:s0 tclass=dir
May 14 15:18:29 slave kernel: type=1400 audit(1374675509.610:34554): avc:  denied  { search } for  pid=5744 comm="mono" name="/" dev=dm-2 ino=2 scontext=unconfined_u:system_r:httpd_sys_script_t:s0 tcontext=system_u:object_r:file_t:s0 tclass=dir
May 14 15:18:29 slave kernel: type=1400 audit(1374675509.611:34555): avc:  denied  { search } for  pid=5744 comm="mono" name="/" dev=dm-2 ino=2 scontext=unconfined_u:system_r:httpd_sys_script_t:s0 tcontext=system_u:object_r:file_t:s0 tclass=dir
May 14 15:18:29 slave kernel: type=1400 audit(1374675509.657:34556): avc:  denied  { execmem } for  pid=5744 comm="mono" scontext=unconfined_u:system_r:httpd_sys_script_t:s0 tcontext=unconfined_u:system_r:httpd_sys_script_t:s0 tclass=process
May 14 15:18:29 slave kernel: type=1400 audit(1374675509.658:34557): avc:  denied  { execmem } for  pid=5744 comm="mono" scontext=unconfined_u:system_r:httpd_sys_script_t:s0 tcontext=unconfined_u:system_r:httpd_sys_script_t:s0 tclass=process
May 14 15:18:29 slave kernel: type=1400 audit(1374675509.863:34558): avc:  denied  { ptrace } for  pid=29877 comm="gdb" scontext=unconfined_u:system_r:httpd_sys_script_t:s0 tcontext=unconfined_u:system_r:httpd_sys_script_t:s0 tclass=process

I was later to find out that this was only the first thing SELinux would prevent it from doing!

The mono guys recommend that you switch SELinux off for mono apps, but that won’t really cut it for a production system. I wanted to create an SELinux policy that could be applied to allow the mono web app to work. SELinux is very fine-grained in the permissions that can be applied, so I can see why the mono guys won’t provide a standard mono SELinux policy to install. The policy depends very much on what the individual mono app needs to be able to do. If you tried to create an overall policy for mono apps it would have to allow so many permissions that you might as well switch SELinux off.

So how to derive a custom SELinux policy for a mono app?

To start with, try to run the app. Any SELinux enforcement messages are logged to the system log. The SELinux audit2allow command is helpful in deriving policy rules to resolve the enforcement errors.

cat /var/log/messages | audit2allow

The output will give you a list of rules to apply, e.g.:

[Bren@slave devel]$ sudo cat /var/log/messages | audit2allow

#============= httpd_sys_script_t ==============
allow httpd_sys_script_t file_t:dir search;
allow httpd_sys_script_t self:process execmem;
allow httpd_sys_script_t self:process ptrace;
allow httpd_sys_script_t tmpfs_t:file unlink;

To create a policy module start by creating a type enforcement (.te) file and enter the audit2allow discovered permissions into that file.

vi /usr/share/selinux/devel/mod_mono.te

After making any alteration to the .te file the selinux policy module needs to be re-compiled.

make /usr/share/selinux/devel/mod_mono.pp

Install the new policy module as follows (you’ll need root permissions to do these things).

sudo semodule -i /usr/share/selinux/devel/mod_mono.pp

When you do all that and run the app again, it should get further, but will only encounter new enforcement errors. Unfortunately I found no easy of discovering all the permissions for an app but to iteratively test it, run audit2allow, add new rules to the .te file, re-compile, and try the app again.

Some rules will be common to all mono web apps, and some as unique as the web app itself. It is a process that will be determined by the individual app’s requirements. For example, does it use pipes or TCP ports to connect to a database server? If new features are added to the app in later releases, it could require addition policy rule changes to work.

In the end, here is the full policy I needed to get my web app functionality to work as expected. (Note that the samba_share_t permissions are needed because my web app views are locate on a development samba share.)

policy_module(mod_mono,1.0.0)

gen_require(`
   type lib_t;
   type tmp_t;
   type file_t;
   type tmpfs_t;
   type mono_exec_t;
   type samba_share_t;
   type inotifyfs_t;
   type httpd_t;
   type httpd_sys_script_t;
   type httpd_sys_rw_content_t;
   type postgresql_port_t;
   type port_t;
   class process { execmem ptrace };
   class capability { sys_admin ipc_owner };
   class sock_file { write create unlink };
   class sem create;
   class dir { open search getattr read write add_name };
   class filesystem getattr;
   class file { open read getattr execute_no_trans };
   class tcp_socket { name_connect };
')

#============ httpd_sys_script_t =============
allow httpd_sys_script_t self:process execmem;
allow httpd_sys_script_t file_t:dir search;
allow httpd_sys_script_t inotifyfs_t:dir { search read };
allow httpd_sys_script_t samba_share_t:dir { open search getattr read };
allow httpd_sys_script_t samba_share_t:file { open read getattr };
allow httpd_sys_script_t tmpfs_t:dir { search read write open add_name remove_name };
allow httpd_sys_script_t tmpfs_t:filesystem getattr;
allow httpd_sys_script_t tmpfs_t:file { read write open create unlink } ;
allow httpd_sys_script_t postgresql_port_t:tcp_socket name_connect;
allow httpd_sys_script_t port_t:tcp_socket name_connect;

allow httpd_t lib_t:file execute_no_trans;
allow httpd_t mono_exec_t:file { read execute_no_trans };
allow httpd_t self:process ptrace;
allow httpd_t tmp_t:sock_file { write create };
allow httpd_t httpd_sys_rw_content_t:sock_file unlink;
allow httpd_t self:capability { sys_admin ipc_owner };
allow httpd_t file_t:dir search;
allow httpd_t samba_share_t:dir search;
allow httpd_t samba_share_t:file { read open getattr };